1 //===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the function verifier interface, that can be used for some
11 // sanity checking of input to the system.
12 //
13 // Note that this does not provide full `Java style' security and verifications,
14 // instead it just tries to ensure that code is well-formed.
15 //
16 //  * Both of a binary operator's parameters are of the same type
17 //  * Verify that the indices of mem access instructions match other operands
18 //  * Verify that arithmetic and other things are only performed on first-class
19 //    types.  Verify that shifts & logicals only happen on integrals f.e.
20 //  * All of the constants in a switch statement are of the correct type
21 //  * The code is in valid SSA form
22 //  * It should be illegal to put a label into any other type (like a structure)
23 //    or to return one. [except constant arrays!]
24 //  * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
25 //  * PHI nodes must have an entry for each predecessor, with no extras.
26 //  * PHI nodes must be the first thing in a basic block, all grouped together
27 //  * PHI nodes must have at least one entry
28 //  * All basic blocks should only end with terminator insts, not contain them
29 //  * The entry node to a function must not have predecessors
30 //  * All Instructions must be embedded into a basic block
31 //  * Functions cannot take a void-typed parameter
32 //  * Verify that a function's argument list agrees with it's declared type.
33 //  * It is illegal to specify a name for a void value.
34 //  * It is illegal to have a internal global value with no initializer
35 //  * It is illegal to have a ret instruction that returns a value that does not
36 //    agree with the function return value type.
37 //  * Function call argument types match the function prototype
38 //  * A landing pad is defined by a landingpad instruction, and can be jumped to
39 //    only by the unwind edge of an invoke instruction.
40 //  * A landingpad instruction must be the first non-PHI instruction in the
41 //    block.
42 //  * Landingpad instructions must be in a function with a personality function.
43 //  * All other things that are tested by asserts spread about the code...
44 //
45 //===----------------------------------------------------------------------===//
46 
47 #include "llvm/IR/Verifier.h"
48 #include "llvm/ADT/APFloat.h"
49 #include "llvm/ADT/APInt.h"
50 #include "llvm/ADT/ArrayRef.h"
51 #include "llvm/ADT/DenseMap.h"
52 #include "llvm/ADT/MapVector.h"
53 #include "llvm/ADT/Optional.h"
54 #include "llvm/ADT/STLExtras.h"
55 #include "llvm/ADT/SmallPtrSet.h"
56 #include "llvm/ADT/SmallSet.h"
57 #include "llvm/ADT/SmallVector.h"
58 #include "llvm/ADT/StringExtras.h"
59 #include "llvm/ADT/StringMap.h"
60 #include "llvm/ADT/StringRef.h"
61 #include "llvm/ADT/Twine.h"
62 #include "llvm/ADT/ilist.h"
63 #include "llvm/BinaryFormat/Dwarf.h"
64 #include "llvm/IR/Argument.h"
65 #include "llvm/IR/Attributes.h"
66 #include "llvm/IR/BasicBlock.h"
67 #include "llvm/IR/CFG.h"
68 #include "llvm/IR/CallSite.h"
69 #include "llvm/IR/CallingConv.h"
70 #include "llvm/IR/Comdat.h"
71 #include "llvm/IR/Constant.h"
72 #include "llvm/IR/ConstantRange.h"
73 #include "llvm/IR/Constants.h"
74 #include "llvm/IR/DataLayout.h"
75 #include "llvm/IR/DebugInfo.h"
76 #include "llvm/IR/DebugInfoMetadata.h"
77 #include "llvm/IR/DebugLoc.h"
78 #include "llvm/IR/DerivedTypes.h"
79 #include "llvm/IR/Dominators.h"
80 #include "llvm/IR/Function.h"
81 #include "llvm/IR/GlobalAlias.h"
82 #include "llvm/IR/GlobalValue.h"
83 #include "llvm/IR/GlobalVariable.h"
84 #include "llvm/IR/InlineAsm.h"
85 #include "llvm/IR/InstVisitor.h"
86 #include "llvm/IR/InstrTypes.h"
87 #include "llvm/IR/Instruction.h"
88 #include "llvm/IR/Instructions.h"
89 #include "llvm/IR/IntrinsicInst.h"
90 #include "llvm/IR/Intrinsics.h"
91 #include "llvm/IR/LLVMContext.h"
92 #include "llvm/IR/Metadata.h"
93 #include "llvm/IR/Module.h"
94 #include "llvm/IR/ModuleSlotTracker.h"
95 #include "llvm/IR/PassManager.h"
96 #include "llvm/IR/Statepoint.h"
97 #include "llvm/IR/Type.h"
98 #include "llvm/IR/Use.h"
99 #include "llvm/IR/User.h"
100 #include "llvm/IR/Value.h"
101 #include "llvm/Pass.h"
102 #include "llvm/Support/AtomicOrdering.h"
103 #include "llvm/Support/Casting.h"
104 #include "llvm/Support/CommandLine.h"
105 #include "llvm/Support/Debug.h"
106 #include "llvm/Support/ErrorHandling.h"
107 #include "llvm/Support/MathExtras.h"
108 #include "llvm/Support/raw_ostream.h"
109 #include <algorithm>
110 #include <cassert>
111 #include <cstdint>
112 #include <memory>
113 #include <string>
114 #include <utility>
115 
116 using namespace llvm;
117 
118 namespace llvm {
119 
120 struct VerifierSupport {
121   raw_ostream *OS;
122   const Module &M;
123   ModuleSlotTracker MST;
124   const DataLayout &DL;
125   LLVMContext &Context;
126 
127   /// Track the brokenness of the module while recursively visiting.
128   bool Broken = false;
129   /// Broken debug info can be "recovered" from by stripping the debug info.
130   bool BrokenDebugInfo = false;
131   /// Whether to treat broken debug info as an error.
132   bool TreatBrokenDebugInfoAsError = true;
133 
134   explicit VerifierSupport(raw_ostream *OS, const Module &M)
135       : OS(OS), M(M), MST(&M), DL(M.getDataLayout()), Context(M.getContext()) {}
136 
137 private:
138   void Write(const Module *M) {
139     *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
140   }
141 
142   void Write(const Value *V) {
143     if (!V)
144       return;
145     if (isa<Instruction>(V)) {
146       V->print(*OS, MST);
147       *OS << '\n';
148     } else {
149       V->printAsOperand(*OS, true, MST);
150       *OS << '\n';
151     }
152   }
153 
154   void Write(ImmutableCallSite CS) {
155     Write(CS.getInstruction());
156   }
157 
158   void Write(const Metadata *MD) {
159     if (!MD)
160       return;
161     MD->print(*OS, MST, &M);
162     *OS << '\n';
163   }
164 
165   template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
166     Write(MD.get());
167   }
168 
169   void Write(const NamedMDNode *NMD) {
170     if (!NMD)
171       return;
172     NMD->print(*OS, MST);
173     *OS << '\n';
174   }
175 
176   void Write(Type *T) {
177     if (!T)
178       return;
179     *OS << ' ' << *T;
180   }
181 
182   void Write(const Comdat *C) {
183     if (!C)
184       return;
185     *OS << *C;
186   }
187 
188   void Write(const APInt *AI) {
189     if (!AI)
190       return;
191     *OS << *AI << '\n';
192   }
193 
194   void Write(const unsigned i) { *OS << i << '\n'; }
195 
196   template <typename T> void Write(ArrayRef<T> Vs) {
197     for (const T &V : Vs)
198       Write(V);
199   }
200 
201   template <typename T1, typename... Ts>
202   void WriteTs(const T1 &V1, const Ts &... Vs) {
203     Write(V1);
204     WriteTs(Vs...);
205   }
206 
207   template <typename... Ts> void WriteTs() {}
208 
209 public:
210   /// A check failed, so printout out the condition and the message.
211   ///
212   /// This provides a nice place to put a breakpoint if you want to see why
213   /// something is not correct.
214   void CheckFailed(const Twine &Message) {
215     if (OS)
216       *OS << Message << '\n';
217     Broken = true;
218   }
219 
220   /// A check failed (with values to print).
221   ///
222   /// This calls the Message-only version so that the above is easier to set a
223   /// breakpoint on.
224   template <typename T1, typename... Ts>
225   void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
226     CheckFailed(Message);
227     if (OS)
228       WriteTs(V1, Vs...);
229   }
230 
231   /// A debug info check failed.
232   void DebugInfoCheckFailed(const Twine &Message) {
233     if (OS)
234       *OS << Message << '\n';
235     Broken |= TreatBrokenDebugInfoAsError;
236     BrokenDebugInfo = true;
237   }
238 
239   /// A debug info check failed (with values to print).
240   template <typename T1, typename... Ts>
241   void DebugInfoCheckFailed(const Twine &Message, const T1 &V1,
242                             const Ts &... Vs) {
243     DebugInfoCheckFailed(Message);
244     if (OS)
245       WriteTs(V1, Vs...);
246   }
247 };
248 
249 } // namespace llvm
250 
251 namespace {
252 
253 class Verifier : public InstVisitor<Verifier>, VerifierSupport {
254   friend class InstVisitor<Verifier>;
255 
256   DominatorTree DT;
257 
258   /// When verifying a basic block, keep track of all of the
259   /// instructions we have seen so far.
260   ///
261   /// This allows us to do efficient dominance checks for the case when an
262   /// instruction has an operand that is an instruction in the same block.
263   SmallPtrSet<Instruction *, 16> InstsInThisBlock;
264 
265   /// Keep track of the metadata nodes that have been checked already.
266   SmallPtrSet<const Metadata *, 32> MDNodes;
267 
268   /// Keep track which DISubprogram is attached to which function.
269   DenseMap<const DISubprogram *, const Function *> DISubprogramAttachments;
270 
271   /// Track all DICompileUnits visited.
272   SmallPtrSet<const Metadata *, 2> CUVisited;
273 
274   /// The result type for a landingpad.
275   Type *LandingPadResultTy;
276 
277   /// Whether we've seen a call to @llvm.localescape in this function
278   /// already.
279   bool SawFrameEscape;
280 
281   /// Whether the current function has a DISubprogram attached to it.
282   bool HasDebugInfo = false;
283 
284   /// Whether source was present on the first DIFile encountered in each CU.
285   DenseMap<const DICompileUnit *, bool> HasSourceDebugInfo;
286 
287   /// Stores the count of how many objects were passed to llvm.localescape for a
288   /// given function and the largest index passed to llvm.localrecover.
289   DenseMap<Function *, std::pair<unsigned, unsigned>> FrameEscapeInfo;
290 
291   // Maps catchswitches and cleanuppads that unwind to siblings to the
292   // terminators that indicate the unwind, used to detect cycles therein.
293   MapVector<Instruction *, Instruction *> SiblingFuncletInfo;
294 
295   /// Cache of constants visited in search of ConstantExprs.
296   SmallPtrSet<const Constant *, 32> ConstantExprVisited;
297 
298   /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic.
299   SmallVector<const Function *, 4> DeoptimizeDeclarations;
300 
301   // Verify that this GlobalValue is only used in this module.
302   // This map is used to avoid visiting uses twice. We can arrive at a user
303   // twice, if they have multiple operands. In particular for very large
304   // constant expressions, we can arrive at a particular user many times.
305   SmallPtrSet<const Value *, 32> GlobalValueVisited;
306 
307   // Keeps track of duplicate function argument debug info.
308   SmallVector<const DILocalVariable *, 16> DebugFnArgs;
309 
310   TBAAVerifier TBAAVerifyHelper;
311 
312   void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
313 
314 public:
315   explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError,
316                     const Module &M)
317       : VerifierSupport(OS, M), LandingPadResultTy(nullptr),
318         SawFrameEscape(false), TBAAVerifyHelper(this) {
319     TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError;
320   }
321 
322   bool hasBrokenDebugInfo() const { return BrokenDebugInfo; }
323 
324   bool verify(const Function &F) {
325     assert(F.getParent() == &M &&
326            "An instance of this class only works with a specific module!");
327 
328     // First ensure the function is well-enough formed to compute dominance
329     // information, and directly compute a dominance tree. We don't rely on the
330     // pass manager to provide this as it isolates us from a potentially
331     // out-of-date dominator tree and makes it significantly more complex to run
332     // this code outside of a pass manager.
333     // FIXME: It's really gross that we have to cast away constness here.
334     if (!F.empty())
335       DT.recalculate(const_cast<Function &>(F));
336 
337     for (const BasicBlock &BB : F) {
338       if (!BB.empty() && BB.back().isTerminator())
339         continue;
340 
341       if (OS) {
342         *OS << "Basic Block in function '" << F.getName()
343             << "' does not have terminator!\n";
344         BB.printAsOperand(*OS, true, MST);
345         *OS << "\n";
346       }
347       return false;
348     }
349 
350     Broken = false;
351     // FIXME: We strip const here because the inst visitor strips const.
352     visit(const_cast<Function &>(F));
353     verifySiblingFuncletUnwinds();
354     InstsInThisBlock.clear();
355     DebugFnArgs.clear();
356     LandingPadResultTy = nullptr;
357     SawFrameEscape = false;
358     SiblingFuncletInfo.clear();
359 
360     return !Broken;
361   }
362 
363   /// Verify the module that this instance of \c Verifier was initialized with.
364   bool verify() {
365     Broken = false;
366 
367     // Collect all declarations of the llvm.experimental.deoptimize intrinsic.
368     for (const Function &F : M)
369       if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize)
370         DeoptimizeDeclarations.push_back(&F);
371 
372     // Now that we've visited every function, verify that we never asked to
373     // recover a frame index that wasn't escaped.
374     verifyFrameRecoverIndices();
375     for (const GlobalVariable &GV : M.globals())
376       visitGlobalVariable(GV);
377 
378     for (const GlobalAlias &GA : M.aliases())
379       visitGlobalAlias(GA);
380 
381     for (const NamedMDNode &NMD : M.named_metadata())
382       visitNamedMDNode(NMD);
383 
384     for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
385       visitComdat(SMEC.getValue());
386 
387     visitModuleFlags(M);
388     visitModuleIdents(M);
389     visitModuleCommandLines(M);
390 
391     verifyCompileUnits();
392 
393     verifyDeoptimizeCallingConvs();
394     DISubprogramAttachments.clear();
395     return !Broken;
396   }
397 
398 private:
399   // Verification methods...
400   void visitGlobalValue(const GlobalValue &GV);
401   void visitGlobalVariable(const GlobalVariable &GV);
402   void visitGlobalAlias(const GlobalAlias &GA);
403   void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
404   void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
405                            const GlobalAlias &A, const Constant &C);
406   void visitNamedMDNode(const NamedMDNode &NMD);
407   void visitMDNode(const MDNode &MD);
408   void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
409   void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
410   void visitComdat(const Comdat &C);
411   void visitModuleIdents(const Module &M);
412   void visitModuleCommandLines(const Module &M);
413   void visitModuleFlags(const Module &M);
414   void visitModuleFlag(const MDNode *Op,
415                        DenseMap<const MDString *, const MDNode *> &SeenIDs,
416                        SmallVectorImpl<const MDNode *> &Requirements);
417   void visitModuleFlagCGProfileEntry(const MDOperand &MDO);
418   void visitFunction(const Function &F);
419   void visitBasicBlock(BasicBlock &BB);
420   void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
421   void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
422 
423   template <class Ty> bool isValidMetadataArray(const MDTuple &N);
424 #define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
425 #include "llvm/IR/Metadata.def"
426   void visitDIScope(const DIScope &N);
427   void visitDIVariable(const DIVariable &N);
428   void visitDILexicalBlockBase(const DILexicalBlockBase &N);
429   void visitDITemplateParameter(const DITemplateParameter &N);
430 
431   void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
432 
433   // InstVisitor overrides...
434   using InstVisitor<Verifier>::visit;
435   void visit(Instruction &I);
436 
437   void visitTruncInst(TruncInst &I);
438   void visitZExtInst(ZExtInst &I);
439   void visitSExtInst(SExtInst &I);
440   void visitFPTruncInst(FPTruncInst &I);
441   void visitFPExtInst(FPExtInst &I);
442   void visitFPToUIInst(FPToUIInst &I);
443   void visitFPToSIInst(FPToSIInst &I);
444   void visitUIToFPInst(UIToFPInst &I);
445   void visitSIToFPInst(SIToFPInst &I);
446   void visitIntToPtrInst(IntToPtrInst &I);
447   void visitPtrToIntInst(PtrToIntInst &I);
448   void visitBitCastInst(BitCastInst &I);
449   void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
450   void visitPHINode(PHINode &PN);
451   void visitUnaryOperator(UnaryOperator &U);
452   void visitBinaryOperator(BinaryOperator &B);
453   void visitICmpInst(ICmpInst &IC);
454   void visitFCmpInst(FCmpInst &FC);
455   void visitExtractElementInst(ExtractElementInst &EI);
456   void visitInsertElementInst(InsertElementInst &EI);
457   void visitShuffleVectorInst(ShuffleVectorInst &EI);
458   void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
459   void visitCallInst(CallInst &CI);
460   void visitInvokeInst(InvokeInst &II);
461   void visitGetElementPtrInst(GetElementPtrInst &GEP);
462   void visitLoadInst(LoadInst &LI);
463   void visitStoreInst(StoreInst &SI);
464   void verifyDominatesUse(Instruction &I, unsigned i);
465   void visitInstruction(Instruction &I);
466   void visitTerminator(Instruction &I);
467   void visitBranchInst(BranchInst &BI);
468   void visitReturnInst(ReturnInst &RI);
469   void visitSwitchInst(SwitchInst &SI);
470   void visitIndirectBrInst(IndirectBrInst &BI);
471   void visitSelectInst(SelectInst &SI);
472   void visitUserOp1(Instruction &I);
473   void visitUserOp2(Instruction &I) { visitUserOp1(I); }
474   void visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS);
475   void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
476   void visitDbgIntrinsic(StringRef Kind, DbgVariableIntrinsic &DII);
477   void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
478   void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
479   void visitAtomicRMWInst(AtomicRMWInst &RMWI);
480   void visitFenceInst(FenceInst &FI);
481   void visitAllocaInst(AllocaInst &AI);
482   void visitExtractValueInst(ExtractValueInst &EVI);
483   void visitInsertValueInst(InsertValueInst &IVI);
484   void visitEHPadPredecessors(Instruction &I);
485   void visitLandingPadInst(LandingPadInst &LPI);
486   void visitResumeInst(ResumeInst &RI);
487   void visitCatchPadInst(CatchPadInst &CPI);
488   void visitCatchReturnInst(CatchReturnInst &CatchReturn);
489   void visitCleanupPadInst(CleanupPadInst &CPI);
490   void visitFuncletPadInst(FuncletPadInst &FPI);
491   void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
492   void visitCleanupReturnInst(CleanupReturnInst &CRI);
493 
494   void verifyCallSite(CallSite CS);
495   void verifySwiftErrorCallSite(CallSite CS, const Value *SwiftErrorVal);
496   void verifySwiftErrorValue(const Value *SwiftErrorVal);
497   void verifyMustTailCall(CallInst &CI);
498   bool performTypeCheck(Intrinsic::ID ID, Function *F, Type *Ty, int VT,
499                         unsigned ArgNo, std::string &Suffix);
500   bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
501   void verifyAttributeTypes(AttributeSet Attrs, bool IsFunction,
502                             const Value *V);
503   void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
504   void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
505                            const Value *V);
506   void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
507 
508   void visitConstantExprsRecursively(const Constant *EntryC);
509   void visitConstantExpr(const ConstantExpr *CE);
510   void verifyStatepoint(ImmutableCallSite CS);
511   void verifyFrameRecoverIndices();
512   void verifySiblingFuncletUnwinds();
513 
514   void verifyFragmentExpression(const DbgVariableIntrinsic &I);
515   template <typename ValueOrMetadata>
516   void verifyFragmentExpression(const DIVariable &V,
517                                 DIExpression::FragmentInfo Fragment,
518                                 ValueOrMetadata *Desc);
519   void verifyFnArgs(const DbgVariableIntrinsic &I);
520 
521   /// Module-level debug info verification...
522   void verifyCompileUnits();
523 
524   /// Module-level verification that all @llvm.experimental.deoptimize
525   /// declarations share the same calling convention.
526   void verifyDeoptimizeCallingConvs();
527 
528   /// Verify all-or-nothing property of DIFile source attribute within a CU.
529   void verifySourceDebugInfo(const DICompileUnit &U, const DIFile &F);
530 };
531 
532 } // end anonymous namespace
533 
534 /// We know that cond should be true, if not print an error message.
535 #define Assert(C, ...) \
536   do { if (!(C)) { CheckFailed(__VA_ARGS__); return; } } while (false)
537 
538 /// We know that a debug info condition should be true, if not print
539 /// an error message.
540 #define AssertDI(C, ...) \
541   do { if (!(C)) { DebugInfoCheckFailed(__VA_ARGS__); return; } } while (false)
542 
543 void Verifier::visit(Instruction &I) {
544   for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
545     Assert(I.getOperand(i) != nullptr, "Operand is null", &I);
546   InstVisitor<Verifier>::visit(I);
547 }
548 
549 // Helper to recursively iterate over indirect users. By
550 // returning false, the callback can ask to stop recursing
551 // further.
552 static void forEachUser(const Value *User,
553                         SmallPtrSet<const Value *, 32> &Visited,
554                         llvm::function_ref<bool(const Value *)> Callback) {
555   if (!Visited.insert(User).second)
556     return;
557   for (const Value *TheNextUser : User->materialized_users())
558     if (Callback(TheNextUser))
559       forEachUser(TheNextUser, Visited, Callback);
560 }
561 
562 void Verifier::visitGlobalValue(const GlobalValue &GV) {
563   Assert(!GV.isDeclaration() || GV.hasValidDeclarationLinkage(),
564          "Global is external, but doesn't have external or weak linkage!", &GV);
565 
566   Assert(GV.getAlignment() <= Value::MaximumAlignment,
567          "huge alignment values are unsupported", &GV);
568   Assert(!GV.hasAppendingLinkage() || isa<GlobalVariable>(GV),
569          "Only global variables can have appending linkage!", &GV);
570 
571   if (GV.hasAppendingLinkage()) {
572     const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
573     Assert(GVar && GVar->getValueType()->isArrayTy(),
574            "Only global arrays can have appending linkage!", GVar);
575   }
576 
577   if (GV.isDeclarationForLinker())
578     Assert(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
579 
580   if (GV.hasDLLImportStorageClass()) {
581     Assert(!GV.isDSOLocal(),
582            "GlobalValue with DLLImport Storage is dso_local!", &GV);
583 
584     Assert((GV.isDeclaration() && GV.hasExternalLinkage()) ||
585                GV.hasAvailableExternallyLinkage(),
586            "Global is marked as dllimport, but not external", &GV);
587   }
588 
589   if (GV.hasLocalLinkage())
590     Assert(GV.isDSOLocal(),
591            "GlobalValue with private or internal linkage must be dso_local!",
592            &GV);
593 
594   if (!GV.hasDefaultVisibility() && !GV.hasExternalWeakLinkage())
595     Assert(GV.isDSOLocal(),
596            "GlobalValue with non default visibility must be dso_local!", &GV);
597 
598   forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
599     if (const Instruction *I = dyn_cast<Instruction>(V)) {
600       if (!I->getParent() || !I->getParent()->getParent())
601         CheckFailed("Global is referenced by parentless instruction!", &GV, &M,
602                     I);
603       else if (I->getParent()->getParent()->getParent() != &M)
604         CheckFailed("Global is referenced in a different module!", &GV, &M, I,
605                     I->getParent()->getParent(),
606                     I->getParent()->getParent()->getParent());
607       return false;
608     } else if (const Function *F = dyn_cast<Function>(V)) {
609       if (F->getParent() != &M)
610         CheckFailed("Global is used by function in a different module", &GV, &M,
611                     F, F->getParent());
612       return false;
613     }
614     return true;
615   });
616 }
617 
618 void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
619   if (GV.hasInitializer()) {
620     Assert(GV.getInitializer()->getType() == GV.getValueType(),
621            "Global variable initializer type does not match global "
622            "variable type!",
623            &GV);
624     // If the global has common linkage, it must have a zero initializer and
625     // cannot be constant.
626     if (GV.hasCommonLinkage()) {
627       Assert(GV.getInitializer()->isNullValue(),
628              "'common' global must have a zero initializer!", &GV);
629       Assert(!GV.isConstant(), "'common' global may not be marked constant!",
630              &GV);
631       Assert(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
632     }
633   }
634 
635   if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
636                        GV.getName() == "llvm.global_dtors")) {
637     Assert(!GV.hasInitializer() || GV.hasAppendingLinkage(),
638            "invalid linkage for intrinsic global variable", &GV);
639     // Don't worry about emitting an error for it not being an array,
640     // visitGlobalValue will complain on appending non-array.
641     if (ArrayType *ATy = dyn_cast<ArrayType>(GV.getValueType())) {
642       StructType *STy = dyn_cast<StructType>(ATy->getElementType());
643       PointerType *FuncPtrTy =
644           FunctionType::get(Type::getVoidTy(Context), false)->
645           getPointerTo(DL.getProgramAddressSpace());
646       // FIXME: Reject the 2-field form in LLVM 4.0.
647       Assert(STy &&
648                  (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
649                  STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
650                  STy->getTypeAtIndex(1) == FuncPtrTy,
651              "wrong type for intrinsic global variable", &GV);
652       if (STy->getNumElements() == 3) {
653         Type *ETy = STy->getTypeAtIndex(2);
654         Assert(ETy->isPointerTy() &&
655                    cast<PointerType>(ETy)->getElementType()->isIntegerTy(8),
656                "wrong type for intrinsic global variable", &GV);
657       }
658     }
659   }
660 
661   if (GV.hasName() && (GV.getName() == "llvm.used" ||
662                        GV.getName() == "llvm.compiler.used")) {
663     Assert(!GV.hasInitializer() || GV.hasAppendingLinkage(),
664            "invalid linkage for intrinsic global variable", &GV);
665     Type *GVType = GV.getValueType();
666     if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
667       PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
668       Assert(PTy, "wrong type for intrinsic global variable", &GV);
669       if (GV.hasInitializer()) {
670         const Constant *Init = GV.getInitializer();
671         const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
672         Assert(InitArray, "wrong initalizer for intrinsic global variable",
673                Init);
674         for (Value *Op : InitArray->operands()) {
675           Value *V = Op->stripPointerCastsNoFollowAliases();
676           Assert(isa<GlobalVariable>(V) || isa<Function>(V) ||
677                      isa<GlobalAlias>(V),
678                  "invalid llvm.used member", V);
679           Assert(V->hasName(), "members of llvm.used must be named", V);
680         }
681       }
682     }
683   }
684 
685   // Visit any debug info attachments.
686   SmallVector<MDNode *, 1> MDs;
687   GV.getMetadata(LLVMContext::MD_dbg, MDs);
688   for (auto *MD : MDs) {
689     if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
690       visitDIGlobalVariableExpression(*GVE);
691     else
692       AssertDI(false, "!dbg attachment of global variable must be a "
693                       "DIGlobalVariableExpression");
694   }
695 
696   if (!GV.hasInitializer()) {
697     visitGlobalValue(GV);
698     return;
699   }
700 
701   // Walk any aggregate initializers looking for bitcasts between address spaces
702   visitConstantExprsRecursively(GV.getInitializer());
703 
704   visitGlobalValue(GV);
705 }
706 
707 void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
708   SmallPtrSet<const GlobalAlias*, 4> Visited;
709   Visited.insert(&GA);
710   visitAliaseeSubExpr(Visited, GA, C);
711 }
712 
713 void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
714                                    const GlobalAlias &GA, const Constant &C) {
715   if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
716     Assert(!GV->isDeclarationForLinker(), "Alias must point to a definition",
717            &GA);
718 
719     if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
720       Assert(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
721 
722       Assert(!GA2->isInterposable(), "Alias cannot point to an interposable alias",
723              &GA);
724     } else {
725       // Only continue verifying subexpressions of GlobalAliases.
726       // Do not recurse into global initializers.
727       return;
728     }
729   }
730 
731   if (const auto *CE = dyn_cast<ConstantExpr>(&C))
732     visitConstantExprsRecursively(CE);
733 
734   for (const Use &U : C.operands()) {
735     Value *V = &*U;
736     if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
737       visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
738     else if (const auto *C2 = dyn_cast<Constant>(V))
739       visitAliaseeSubExpr(Visited, GA, *C2);
740   }
741 }
742 
743 void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
744   Assert(GlobalAlias::isValidLinkage(GA.getLinkage()),
745          "Alias should have private, internal, linkonce, weak, linkonce_odr, "
746          "weak_odr, or external linkage!",
747          &GA);
748   const Constant *Aliasee = GA.getAliasee();
749   Assert(Aliasee, "Aliasee cannot be NULL!", &GA);
750   Assert(GA.getType() == Aliasee->getType(),
751          "Alias and aliasee types should match!", &GA);
752 
753   Assert(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
754          "Aliasee should be either GlobalValue or ConstantExpr", &GA);
755 
756   visitAliaseeSubExpr(GA, *Aliasee);
757 
758   visitGlobalValue(GA);
759 }
760 
761 void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
762   // There used to be various other llvm.dbg.* nodes, but we don't support
763   // upgrading them and we want to reserve the namespace for future uses.
764   if (NMD.getName().startswith("llvm.dbg."))
765     AssertDI(NMD.getName() == "llvm.dbg.cu",
766              "unrecognized named metadata node in the llvm.dbg namespace",
767              &NMD);
768   for (const MDNode *MD : NMD.operands()) {
769     if (NMD.getName() == "llvm.dbg.cu")
770       AssertDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
771 
772     if (!MD)
773       continue;
774 
775     visitMDNode(*MD);
776   }
777 }
778 
779 void Verifier::visitMDNode(const MDNode &MD) {
780   // Only visit each node once.  Metadata can be mutually recursive, so this
781   // avoids infinite recursion here, as well as being an optimization.
782   if (!MDNodes.insert(&MD).second)
783     return;
784 
785   switch (MD.getMetadataID()) {
786   default:
787     llvm_unreachable("Invalid MDNode subclass");
788   case Metadata::MDTupleKind:
789     break;
790 #define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS)                                  \
791   case Metadata::CLASS##Kind:                                                  \
792     visit##CLASS(cast<CLASS>(MD));                                             \
793     break;
794 #include "llvm/IR/Metadata.def"
795   }
796 
797   for (const Metadata *Op : MD.operands()) {
798     if (!Op)
799       continue;
800     Assert(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
801            &MD, Op);
802     if (auto *N = dyn_cast<MDNode>(Op)) {
803       visitMDNode(*N);
804       continue;
805     }
806     if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
807       visitValueAsMetadata(*V, nullptr);
808       continue;
809     }
810   }
811 
812   // Check these last, so we diagnose problems in operands first.
813   Assert(!MD.isTemporary(), "Expected no forward declarations!", &MD);
814   Assert(MD.isResolved(), "All nodes should be resolved!", &MD);
815 }
816 
817 void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
818   Assert(MD.getValue(), "Expected valid value", &MD);
819   Assert(!MD.getValue()->getType()->isMetadataTy(),
820          "Unexpected metadata round-trip through values", &MD, MD.getValue());
821 
822   auto *L = dyn_cast<LocalAsMetadata>(&MD);
823   if (!L)
824     return;
825 
826   Assert(F, "function-local metadata used outside a function", L);
827 
828   // If this was an instruction, bb, or argument, verify that it is in the
829   // function that we expect.
830   Function *ActualF = nullptr;
831   if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
832     Assert(I->getParent(), "function-local metadata not in basic block", L, I);
833     ActualF = I->getParent()->getParent();
834   } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
835     ActualF = BB->getParent();
836   else if (Argument *A = dyn_cast<Argument>(L->getValue()))
837     ActualF = A->getParent();
838   assert(ActualF && "Unimplemented function local metadata case!");
839 
840   Assert(ActualF == F, "function-local metadata used in wrong function", L);
841 }
842 
843 void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
844   Metadata *MD = MDV.getMetadata();
845   if (auto *N = dyn_cast<MDNode>(MD)) {
846     visitMDNode(*N);
847     return;
848   }
849 
850   // Only visit each node once.  Metadata can be mutually recursive, so this
851   // avoids infinite recursion here, as well as being an optimization.
852   if (!MDNodes.insert(MD).second)
853     return;
854 
855   if (auto *V = dyn_cast<ValueAsMetadata>(MD))
856     visitValueAsMetadata(*V, F);
857 }
858 
859 static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
860 static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
861 static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
862 
863 void Verifier::visitDILocation(const DILocation &N) {
864   AssertDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
865            "location requires a valid scope", &N, N.getRawScope());
866   if (auto *IA = N.getRawInlinedAt())
867     AssertDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
868   if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
869     AssertDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
870 }
871 
872 void Verifier::visitGenericDINode(const GenericDINode &N) {
873   AssertDI(N.getTag(), "invalid tag", &N);
874 }
875 
876 void Verifier::visitDIScope(const DIScope &N) {
877   if (auto *F = N.getRawFile())
878     AssertDI(isa<DIFile>(F), "invalid file", &N, F);
879 }
880 
881 void Verifier::visitDISubrange(const DISubrange &N) {
882   AssertDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
883   auto Count = N.getCount();
884   AssertDI(Count, "Count must either be a signed constant or a DIVariable",
885            &N);
886   AssertDI(!Count.is<ConstantInt*>() ||
887                Count.get<ConstantInt*>()->getSExtValue() >= -1,
888            "invalid subrange count", &N);
889 }
890 
891 void Verifier::visitDIEnumerator(const DIEnumerator &N) {
892   AssertDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
893 }
894 
895 void Verifier::visitDIBasicType(const DIBasicType &N) {
896   AssertDI(N.getTag() == dwarf::DW_TAG_base_type ||
897                N.getTag() == dwarf::DW_TAG_unspecified_type,
898            "invalid tag", &N);
899   AssertDI(!(N.isBigEndian() && N.isLittleEndian()) ,
900             "has conflicting flags", &N);
901 }
902 
903 void Verifier::visitDIDerivedType(const DIDerivedType &N) {
904   // Common scope checks.
905   visitDIScope(N);
906 
907   AssertDI(N.getTag() == dwarf::DW_TAG_typedef ||
908                N.getTag() == dwarf::DW_TAG_pointer_type ||
909                N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
910                N.getTag() == dwarf::DW_TAG_reference_type ||
911                N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
912                N.getTag() == dwarf::DW_TAG_const_type ||
913                N.getTag() == dwarf::DW_TAG_volatile_type ||
914                N.getTag() == dwarf::DW_TAG_restrict_type ||
915                N.getTag() == dwarf::DW_TAG_atomic_type ||
916                N.getTag() == dwarf::DW_TAG_member ||
917                N.getTag() == dwarf::DW_TAG_inheritance ||
918                N.getTag() == dwarf::DW_TAG_friend,
919            "invalid tag", &N);
920   if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
921     AssertDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
922              N.getRawExtraData());
923   }
924 
925   AssertDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
926   AssertDI(isType(N.getRawBaseType()), "invalid base type", &N,
927            N.getRawBaseType());
928 
929   if (N.getDWARFAddressSpace()) {
930     AssertDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
931                  N.getTag() == dwarf::DW_TAG_reference_type,
932              "DWARF address space only applies to pointer or reference types",
933              &N);
934   }
935 }
936 
937 /// Detect mutually exclusive flags.
938 static bool hasConflictingReferenceFlags(unsigned Flags) {
939   return ((Flags & DINode::FlagLValueReference) &&
940           (Flags & DINode::FlagRValueReference)) ||
941          ((Flags & DINode::FlagTypePassByValue) &&
942           (Flags & DINode::FlagTypePassByReference));
943 }
944 
945 void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
946   auto *Params = dyn_cast<MDTuple>(&RawParams);
947   AssertDI(Params, "invalid template params", &N, &RawParams);
948   for (Metadata *Op : Params->operands()) {
949     AssertDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter",
950              &N, Params, Op);
951   }
952 }
953 
954 void Verifier::visitDICompositeType(const DICompositeType &N) {
955   // Common scope checks.
956   visitDIScope(N);
957 
958   AssertDI(N.getTag() == dwarf::DW_TAG_array_type ||
959                N.getTag() == dwarf::DW_TAG_structure_type ||
960                N.getTag() == dwarf::DW_TAG_union_type ||
961                N.getTag() == dwarf::DW_TAG_enumeration_type ||
962                N.getTag() == dwarf::DW_TAG_class_type ||
963                N.getTag() == dwarf::DW_TAG_variant_part,
964            "invalid tag", &N);
965 
966   AssertDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
967   AssertDI(isType(N.getRawBaseType()), "invalid base type", &N,
968            N.getRawBaseType());
969 
970   AssertDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
971            "invalid composite elements", &N, N.getRawElements());
972   AssertDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
973            N.getRawVTableHolder());
974   AssertDI(!hasConflictingReferenceFlags(N.getFlags()),
975            "invalid reference flags", &N);
976 
977   if (N.isVector()) {
978     const DINodeArray Elements = N.getElements();
979     AssertDI(Elements.size() == 1 &&
980              Elements[0]->getTag() == dwarf::DW_TAG_subrange_type,
981              "invalid vector, expected one element of type subrange", &N);
982   }
983 
984   if (auto *Params = N.getRawTemplateParams())
985     visitTemplateParams(N, *Params);
986 
987   if (N.getTag() == dwarf::DW_TAG_class_type ||
988       N.getTag() == dwarf::DW_TAG_union_type) {
989     AssertDI(N.getFile() && !N.getFile()->getFilename().empty(),
990              "class/union requires a filename", &N, N.getFile());
991   }
992 
993   if (auto *D = N.getRawDiscriminator()) {
994     AssertDI(isa<DIDerivedType>(D) && N.getTag() == dwarf::DW_TAG_variant_part,
995              "discriminator can only appear on variant part");
996   }
997 }
998 
999 void Verifier::visitDISubroutineType(const DISubroutineType &N) {
1000   AssertDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
1001   if (auto *Types = N.getRawTypeArray()) {
1002     AssertDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
1003     for (Metadata *Ty : N.getTypeArray()->operands()) {
1004       AssertDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
1005     }
1006   }
1007   AssertDI(!hasConflictingReferenceFlags(N.getFlags()),
1008            "invalid reference flags", &N);
1009 }
1010 
1011 void Verifier::visitDIFile(const DIFile &N) {
1012   AssertDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
1013   Optional<DIFile::ChecksumInfo<StringRef>> Checksum = N.getChecksum();
1014   if (Checksum) {
1015     AssertDI(Checksum->Kind <= DIFile::ChecksumKind::CSK_Last,
1016              "invalid checksum kind", &N);
1017     size_t Size;
1018     switch (Checksum->Kind) {
1019     case DIFile::CSK_MD5:
1020       Size = 32;
1021       break;
1022     case DIFile::CSK_SHA1:
1023       Size = 40;
1024       break;
1025     }
1026     AssertDI(Checksum->Value.size() == Size, "invalid checksum length", &N);
1027     AssertDI(Checksum->Value.find_if_not(llvm::isHexDigit) == StringRef::npos,
1028              "invalid checksum", &N);
1029   }
1030 }
1031 
1032 void Verifier::visitDICompileUnit(const DICompileUnit &N) {
1033   AssertDI(N.isDistinct(), "compile units must be distinct", &N);
1034   AssertDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
1035 
1036   // Don't bother verifying the compilation directory or producer string
1037   // as those could be empty.
1038   AssertDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
1039            N.getRawFile());
1040   AssertDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
1041            N.getFile());
1042 
1043   verifySourceDebugInfo(N, *N.getFile());
1044 
1045   AssertDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind),
1046            "invalid emission kind", &N);
1047 
1048   if (auto *Array = N.getRawEnumTypes()) {
1049     AssertDI(isa<MDTuple>(Array), "invalid enum list", &N, Array);
1050     for (Metadata *Op : N.getEnumTypes()->operands()) {
1051       auto *Enum = dyn_cast_or_null<DICompositeType>(Op);
1052       AssertDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
1053                "invalid enum type", &N, N.getEnumTypes(), Op);
1054     }
1055   }
1056   if (auto *Array = N.getRawRetainedTypes()) {
1057     AssertDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
1058     for (Metadata *Op : N.getRetainedTypes()->operands()) {
1059       AssertDI(Op && (isa<DIType>(Op) ||
1060                       (isa<DISubprogram>(Op) &&
1061                        !cast<DISubprogram>(Op)->isDefinition())),
1062                "invalid retained type", &N, Op);
1063     }
1064   }
1065   if (auto *Array = N.getRawGlobalVariables()) {
1066     AssertDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
1067     for (Metadata *Op : N.getGlobalVariables()->operands()) {
1068       AssertDI(Op && (isa<DIGlobalVariableExpression>(Op)),
1069                "invalid global variable ref", &N, Op);
1070     }
1071   }
1072   if (auto *Array = N.getRawImportedEntities()) {
1073     AssertDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
1074     for (Metadata *Op : N.getImportedEntities()->operands()) {
1075       AssertDI(Op && isa<DIImportedEntity>(Op), "invalid imported entity ref",
1076                &N, Op);
1077     }
1078   }
1079   if (auto *Array = N.getRawMacros()) {
1080     AssertDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1081     for (Metadata *Op : N.getMacros()->operands()) {
1082       AssertDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1083     }
1084   }
1085   CUVisited.insert(&N);
1086 }
1087 
1088 void Verifier::visitDISubprogram(const DISubprogram &N) {
1089   AssertDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
1090   AssertDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1091   if (auto *F = N.getRawFile())
1092     AssertDI(isa<DIFile>(F), "invalid file", &N, F);
1093   else
1094     AssertDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
1095   if (auto *T = N.getRawType())
1096     AssertDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
1097   AssertDI(isType(N.getRawContainingType()), "invalid containing type", &N,
1098            N.getRawContainingType());
1099   if (auto *Params = N.getRawTemplateParams())
1100     visitTemplateParams(N, *Params);
1101   if (auto *S = N.getRawDeclaration())
1102     AssertDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
1103              "invalid subprogram declaration", &N, S);
1104   if (auto *RawNode = N.getRawRetainedNodes()) {
1105     auto *Node = dyn_cast<MDTuple>(RawNode);
1106     AssertDI(Node, "invalid retained nodes list", &N, RawNode);
1107     for (Metadata *Op : Node->operands()) {
1108       AssertDI(Op && (isa<DILocalVariable>(Op) || isa<DILabel>(Op)),
1109                "invalid retained nodes, expected DILocalVariable or DILabel",
1110                &N, Node, Op);
1111     }
1112   }
1113   AssertDI(!hasConflictingReferenceFlags(N.getFlags()),
1114            "invalid reference flags", &N);
1115 
1116   auto *Unit = N.getRawUnit();
1117   if (N.isDefinition()) {
1118     // Subprogram definitions (not part of the type hierarchy).
1119     AssertDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
1120     AssertDI(Unit, "subprogram definitions must have a compile unit", &N);
1121     AssertDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
1122     if (N.getFile())
1123       verifySourceDebugInfo(*N.getUnit(), *N.getFile());
1124   } else {
1125     // Subprogram declarations (part of the type hierarchy).
1126     AssertDI(!Unit, "subprogram declarations must not have a compile unit", &N);
1127   }
1128 
1129   if (auto *RawThrownTypes = N.getRawThrownTypes()) {
1130     auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
1131     AssertDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
1132     for (Metadata *Op : ThrownTypes->operands())
1133       AssertDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
1134                Op);
1135   }
1136 
1137   if (N.areAllCallsDescribed())
1138     AssertDI(N.isDefinition(),
1139              "DIFlagAllCallsDescribed must be attached to a definition");
1140 }
1141 
1142 void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
1143   AssertDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
1144   AssertDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1145            "invalid local scope", &N, N.getRawScope());
1146   if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1147     AssertDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1148 }
1149 
1150 void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
1151   visitDILexicalBlockBase(N);
1152 
1153   AssertDI(N.getLine() || !N.getColumn(),
1154            "cannot have column info without line info", &N);
1155 }
1156 
1157 void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
1158   visitDILexicalBlockBase(N);
1159 }
1160 
1161 void Verifier::visitDINamespace(const DINamespace &N) {
1162   AssertDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
1163   if (auto *S = N.getRawScope())
1164     AssertDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1165 }
1166 
1167 void Verifier::visitDIMacro(const DIMacro &N) {
1168   AssertDI(N.getMacinfoType() == dwarf::DW_MACINFO_define ||
1169                N.getMacinfoType() == dwarf::DW_MACINFO_undef,
1170            "invalid macinfo type", &N);
1171   AssertDI(!N.getName().empty(), "anonymous macro", &N);
1172   if (!N.getValue().empty()) {
1173     assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
1174   }
1175 }
1176 
1177 void Verifier::visitDIMacroFile(const DIMacroFile &N) {
1178   AssertDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file,
1179            "invalid macinfo type", &N);
1180   if (auto *F = N.getRawFile())
1181     AssertDI(isa<DIFile>(F), "invalid file", &N, F);
1182 
1183   if (auto *Array = N.getRawElements()) {
1184     AssertDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1185     for (Metadata *Op : N.getElements()->operands()) {
1186       AssertDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1187     }
1188   }
1189 }
1190 
1191 void Verifier::visitDIModule(const DIModule &N) {
1192   AssertDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
1193   AssertDI(!N.getName().empty(), "anonymous module", &N);
1194 }
1195 
1196 void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
1197   AssertDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1198 }
1199 
1200 void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
1201   visitDITemplateParameter(N);
1202 
1203   AssertDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
1204            &N);
1205 }
1206 
1207 void Verifier::visitDITemplateValueParameter(
1208     const DITemplateValueParameter &N) {
1209   visitDITemplateParameter(N);
1210 
1211   AssertDI(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
1212                N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
1213                N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
1214            "invalid tag", &N);
1215 }
1216 
1217 void Verifier::visitDIVariable(const DIVariable &N) {
1218   if (auto *S = N.getRawScope())
1219     AssertDI(isa<DIScope>(S), "invalid scope", &N, S);
1220   if (auto *F = N.getRawFile())
1221     AssertDI(isa<DIFile>(F), "invalid file", &N, F);
1222 }
1223 
1224 void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
1225   // Checks common to all variables.
1226   visitDIVariable(N);
1227 
1228   AssertDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1229   AssertDI(!N.getName().empty(), "missing global variable name", &N);
1230   AssertDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1231   AssertDI(N.getType(), "missing global variable type", &N);
1232   if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
1233     AssertDI(isa<DIDerivedType>(Member),
1234              "invalid static data member declaration", &N, Member);
1235   }
1236 }
1237 
1238 void Verifier::visitDILocalVariable(const DILocalVariable &N) {
1239   // Checks common to all variables.
1240   visitDIVariable(N);
1241 
1242   AssertDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1243   AssertDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1244   AssertDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1245            "local variable requires a valid scope", &N, N.getRawScope());
1246 }
1247 
1248 void Verifier::visitDILabel(const DILabel &N) {
1249   if (auto *S = N.getRawScope())
1250     AssertDI(isa<DIScope>(S), "invalid scope", &N, S);
1251   if (auto *F = N.getRawFile())
1252     AssertDI(isa<DIFile>(F), "invalid file", &N, F);
1253 
1254   AssertDI(N.getTag() == dwarf::DW_TAG_label, "invalid tag", &N);
1255   AssertDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1256            "label requires a valid scope", &N, N.getRawScope());
1257 }
1258 
1259 void Verifier::visitDIExpression(const DIExpression &N) {
1260   AssertDI(N.isValid(), "invalid expression", &N);
1261 }
1262 
1263 void Verifier::visitDIGlobalVariableExpression(
1264     const DIGlobalVariableExpression &GVE) {
1265   AssertDI(GVE.getVariable(), "missing variable");
1266   if (auto *Var = GVE.getVariable())
1267     visitDIGlobalVariable(*Var);
1268   if (auto *Expr = GVE.getExpression()) {
1269     visitDIExpression(*Expr);
1270     if (auto Fragment = Expr->getFragmentInfo())
1271       verifyFragmentExpression(*GVE.getVariable(), *Fragment, &GVE);
1272   }
1273 }
1274 
1275 void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
1276   AssertDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
1277   if (auto *T = N.getRawType())
1278     AssertDI(isType(T), "invalid type ref", &N, T);
1279   if (auto *F = N.getRawFile())
1280     AssertDI(isa<DIFile>(F), "invalid file", &N, F);
1281 }
1282 
1283 void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
1284   AssertDI(N.getTag() == dwarf::DW_TAG_imported_module ||
1285                N.getTag() == dwarf::DW_TAG_imported_declaration,
1286            "invalid tag", &N);
1287   if (auto *S = N.getRawScope())
1288     AssertDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
1289   AssertDI(isDINode(N.getRawEntity()), "invalid imported entity", &N,
1290            N.getRawEntity());
1291 }
1292 
1293 void Verifier::visitComdat(const Comdat &C) {
1294   // The Module is invalid if the GlobalValue has private linkage.  Entities
1295   // with private linkage don't have entries in the symbol table.
1296   if (const GlobalValue *GV = M.getNamedValue(C.getName()))
1297     Assert(!GV->hasPrivateLinkage(), "comdat global value has private linkage",
1298            GV);
1299 }
1300 
1301 void Verifier::visitModuleIdents(const Module &M) {
1302   const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
1303   if (!Idents)
1304     return;
1305 
1306   // llvm.ident takes a list of metadata entry. Each entry has only one string.
1307   // Scan each llvm.ident entry and make sure that this requirement is met.
1308   for (const MDNode *N : Idents->operands()) {
1309     Assert(N->getNumOperands() == 1,
1310            "incorrect number of operands in llvm.ident metadata", N);
1311     Assert(dyn_cast_or_null<MDString>(N->getOperand(0)),
1312            ("invalid value for llvm.ident metadata entry operand"
1313             "(the operand should be a string)"),
1314            N->getOperand(0));
1315   }
1316 }
1317 
1318 void Verifier::visitModuleCommandLines(const Module &M) {
1319   const NamedMDNode *CommandLines = M.getNamedMetadata("llvm.commandline");
1320   if (!CommandLines)
1321     return;
1322 
1323   // llvm.commandline takes a list of metadata entry. Each entry has only one
1324   // string. Scan each llvm.commandline entry and make sure that this
1325   // requirement is met.
1326   for (const MDNode *N : CommandLines->operands()) {
1327     Assert(N->getNumOperands() == 1,
1328            "incorrect number of operands in llvm.commandline metadata", N);
1329     Assert(dyn_cast_or_null<MDString>(N->getOperand(0)),
1330            ("invalid value for llvm.commandline metadata entry operand"
1331             "(the operand should be a string)"),
1332            N->getOperand(0));
1333   }
1334 }
1335 
1336 void Verifier::visitModuleFlags(const Module &M) {
1337   const NamedMDNode *Flags = M.getModuleFlagsMetadata();
1338   if (!Flags) return;
1339 
1340   // Scan each flag, and track the flags and requirements.
1341   DenseMap<const MDString*, const MDNode*> SeenIDs;
1342   SmallVector<const MDNode*, 16> Requirements;
1343   for (const MDNode *MDN : Flags->operands())
1344     visitModuleFlag(MDN, SeenIDs, Requirements);
1345 
1346   // Validate that the requirements in the module are valid.
1347   for (const MDNode *Requirement : Requirements) {
1348     const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
1349     const Metadata *ReqValue = Requirement->getOperand(1);
1350 
1351     const MDNode *Op = SeenIDs.lookup(Flag);
1352     if (!Op) {
1353       CheckFailed("invalid requirement on flag, flag is not present in module",
1354                   Flag);
1355       continue;
1356     }
1357 
1358     if (Op->getOperand(2) != ReqValue) {
1359       CheckFailed(("invalid requirement on flag, "
1360                    "flag does not have the required value"),
1361                   Flag);
1362       continue;
1363     }
1364   }
1365 }
1366 
1367 void
1368 Verifier::visitModuleFlag(const MDNode *Op,
1369                           DenseMap<const MDString *, const MDNode *> &SeenIDs,
1370                           SmallVectorImpl<const MDNode *> &Requirements) {
1371   // Each module flag should have three arguments, the merge behavior (a
1372   // constant int), the flag ID (an MDString), and the value.
1373   Assert(Op->getNumOperands() == 3,
1374          "incorrect number of operands in module flag", Op);
1375   Module::ModFlagBehavior MFB;
1376   if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
1377     Assert(
1378         mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0)),
1379         "invalid behavior operand in module flag (expected constant integer)",
1380         Op->getOperand(0));
1381     Assert(false,
1382            "invalid behavior operand in module flag (unexpected constant)",
1383            Op->getOperand(0));
1384   }
1385   MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
1386   Assert(ID, "invalid ID operand in module flag (expected metadata string)",
1387          Op->getOperand(1));
1388 
1389   // Sanity check the values for behaviors with additional requirements.
1390   switch (MFB) {
1391   case Module::Error:
1392   case Module::Warning:
1393   case Module::Override:
1394     // These behavior types accept any value.
1395     break;
1396 
1397   case Module::Max: {
1398     Assert(mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2)),
1399            "invalid value for 'max' module flag (expected constant integer)",
1400            Op->getOperand(2));
1401     break;
1402   }
1403 
1404   case Module::Require: {
1405     // The value should itself be an MDNode with two operands, a flag ID (an
1406     // MDString), and a value.
1407     MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
1408     Assert(Value && Value->getNumOperands() == 2,
1409            "invalid value for 'require' module flag (expected metadata pair)",
1410            Op->getOperand(2));
1411     Assert(isa<MDString>(Value->getOperand(0)),
1412            ("invalid value for 'require' module flag "
1413             "(first value operand should be a string)"),
1414            Value->getOperand(0));
1415 
1416     // Append it to the list of requirements, to check once all module flags are
1417     // scanned.
1418     Requirements.push_back(Value);
1419     break;
1420   }
1421 
1422   case Module::Append:
1423   case Module::AppendUnique: {
1424     // These behavior types require the operand be an MDNode.
1425     Assert(isa<MDNode>(Op->getOperand(2)),
1426            "invalid value for 'append'-type module flag "
1427            "(expected a metadata node)",
1428            Op->getOperand(2));
1429     break;
1430   }
1431   }
1432 
1433   // Unless this is a "requires" flag, check the ID is unique.
1434   if (MFB != Module::Require) {
1435     bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
1436     Assert(Inserted,
1437            "module flag identifiers must be unique (or of 'require' type)", ID);
1438   }
1439 
1440   if (ID->getString() == "wchar_size") {
1441     ConstantInt *Value
1442       = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1443     Assert(Value, "wchar_size metadata requires constant integer argument");
1444   }
1445 
1446   if (ID->getString() == "Linker Options") {
1447     // If the llvm.linker.options named metadata exists, we assume that the
1448     // bitcode reader has upgraded the module flag. Otherwise the flag might
1449     // have been created by a client directly.
1450     Assert(M.getNamedMetadata("llvm.linker.options"),
1451            "'Linker Options' named metadata no longer supported");
1452   }
1453 
1454   if (ID->getString() == "CG Profile") {
1455     for (const MDOperand &MDO : cast<MDNode>(Op->getOperand(2))->operands())
1456       visitModuleFlagCGProfileEntry(MDO);
1457   }
1458 }
1459 
1460 void Verifier::visitModuleFlagCGProfileEntry(const MDOperand &MDO) {
1461   auto CheckFunction = [&](const MDOperand &FuncMDO) {
1462     if (!FuncMDO)
1463       return;
1464     auto F = dyn_cast<ValueAsMetadata>(FuncMDO);
1465     Assert(F && isa<Function>(F->getValue()), "expected a Function or null",
1466            FuncMDO);
1467   };
1468   auto Node = dyn_cast_or_null<MDNode>(MDO);
1469   Assert(Node && Node->getNumOperands() == 3, "expected a MDNode triple", MDO);
1470   CheckFunction(Node->getOperand(0));
1471   CheckFunction(Node->getOperand(1));
1472   auto Count = dyn_cast_or_null<ConstantAsMetadata>(Node->getOperand(2));
1473   Assert(Count && Count->getType()->isIntegerTy(),
1474          "expected an integer constant", Node->getOperand(2));
1475 }
1476 
1477 /// Return true if this attribute kind only applies to functions.
1478 static bool isFuncOnlyAttr(Attribute::AttrKind Kind) {
1479   switch (Kind) {
1480   case Attribute::NoReturn:
1481   case Attribute::NoCfCheck:
1482   case Attribute::NoUnwind:
1483   case Attribute::NoInline:
1484   case Attribute::AlwaysInline:
1485   case Attribute::OptimizeForSize:
1486   case Attribute::StackProtect:
1487   case Attribute::StackProtectReq:
1488   case Attribute::StackProtectStrong:
1489   case Attribute::SafeStack:
1490   case Attribute::ShadowCallStack:
1491   case Attribute::NoRedZone:
1492   case Attribute::NoImplicitFloat:
1493   case Attribute::Naked:
1494   case Attribute::InlineHint:
1495   case Attribute::StackAlignment:
1496   case Attribute::UWTable:
1497   case Attribute::NonLazyBind:
1498   case Attribute::ReturnsTwice:
1499   case Attribute::SanitizeAddress:
1500   case Attribute::SanitizeHWAddress:
1501   case Attribute::SanitizeThread:
1502   case Attribute::SanitizeMemory:
1503   case Attribute::MinSize:
1504   case Attribute::NoDuplicate:
1505   case Attribute::Builtin:
1506   case Attribute::NoBuiltin:
1507   case Attribute::Cold:
1508   case Attribute::OptForFuzzing:
1509   case Attribute::OptimizeNone:
1510   case Attribute::JumpTable:
1511   case Attribute::Convergent:
1512   case Attribute::ArgMemOnly:
1513   case Attribute::NoRecurse:
1514   case Attribute::InaccessibleMemOnly:
1515   case Attribute::InaccessibleMemOrArgMemOnly:
1516   case Attribute::AllocSize:
1517   case Attribute::SpeculativeLoadHardening:
1518   case Attribute::Speculatable:
1519   case Attribute::StrictFP:
1520     return true;
1521   default:
1522     break;
1523   }
1524   return false;
1525 }
1526 
1527 /// Return true if this is a function attribute that can also appear on
1528 /// arguments.
1529 static bool isFuncOrArgAttr(Attribute::AttrKind Kind) {
1530   return Kind == Attribute::ReadOnly || Kind == Attribute::WriteOnly ||
1531          Kind == Attribute::ReadNone;
1532 }
1533 
1534 void Verifier::verifyAttributeTypes(AttributeSet Attrs, bool IsFunction,
1535                                     const Value *V) {
1536   for (Attribute A : Attrs) {
1537     if (A.isStringAttribute())
1538       continue;
1539 
1540     if (isFuncOnlyAttr(A.getKindAsEnum())) {
1541       if (!IsFunction) {
1542         CheckFailed("Attribute '" + A.getAsString() +
1543                         "' only applies to functions!",
1544                     V);
1545         return;
1546       }
1547     } else if (IsFunction && !isFuncOrArgAttr(A.getKindAsEnum())) {
1548       CheckFailed("Attribute '" + A.getAsString() +
1549                       "' does not apply to functions!",
1550                   V);
1551       return;
1552     }
1553   }
1554 }
1555 
1556 // VerifyParameterAttrs - Check the given attributes for an argument or return
1557 // value of the specified type.  The value V is printed in error messages.
1558 void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
1559                                     const Value *V) {
1560   if (!Attrs.hasAttributes())
1561     return;
1562 
1563   verifyAttributeTypes(Attrs, /*IsFunction=*/false, V);
1564 
1565   // Check for mutually incompatible attributes.  Only inreg is compatible with
1566   // sret.
1567   unsigned AttrCount = 0;
1568   AttrCount += Attrs.hasAttribute(Attribute::ByVal);
1569   AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
1570   AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
1571                Attrs.hasAttribute(Attribute::InReg);
1572   AttrCount += Attrs.hasAttribute(Attribute::Nest);
1573   Assert(AttrCount <= 1, "Attributes 'byval', 'inalloca', 'inreg', 'nest', "
1574                          "and 'sret' are incompatible!",
1575          V);
1576 
1577   Assert(!(Attrs.hasAttribute(Attribute::InAlloca) &&
1578            Attrs.hasAttribute(Attribute::ReadOnly)),
1579          "Attributes "
1580          "'inalloca and readonly' are incompatible!",
1581          V);
1582 
1583   Assert(!(Attrs.hasAttribute(Attribute::StructRet) &&
1584            Attrs.hasAttribute(Attribute::Returned)),
1585          "Attributes "
1586          "'sret and returned' are incompatible!",
1587          V);
1588 
1589   Assert(!(Attrs.hasAttribute(Attribute::ZExt) &&
1590            Attrs.hasAttribute(Attribute::SExt)),
1591          "Attributes "
1592          "'zeroext and signext' are incompatible!",
1593          V);
1594 
1595   Assert(!(Attrs.hasAttribute(Attribute::ReadNone) &&
1596            Attrs.hasAttribute(Attribute::ReadOnly)),
1597          "Attributes "
1598          "'readnone and readonly' are incompatible!",
1599          V);
1600 
1601   Assert(!(Attrs.hasAttribute(Attribute::ReadNone) &&
1602            Attrs.hasAttribute(Attribute::WriteOnly)),
1603          "Attributes "
1604          "'readnone and writeonly' are incompatible!",
1605          V);
1606 
1607   Assert(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
1608            Attrs.hasAttribute(Attribute::WriteOnly)),
1609          "Attributes "
1610          "'readonly and writeonly' are incompatible!",
1611          V);
1612 
1613   Assert(!(Attrs.hasAttribute(Attribute::NoInline) &&
1614            Attrs.hasAttribute(Attribute::AlwaysInline)),
1615          "Attributes "
1616          "'noinline and alwaysinline' are incompatible!",
1617          V);
1618 
1619   AttrBuilder IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty);
1620   Assert(!AttrBuilder(Attrs).overlaps(IncompatibleAttrs),
1621          "Wrong types for attribute: " +
1622              AttributeSet::get(Context, IncompatibleAttrs).getAsString(),
1623          V);
1624 
1625   if (PointerType *PTy = dyn_cast<PointerType>(Ty)) {
1626     SmallPtrSet<Type*, 4> Visited;
1627     if (!PTy->getElementType()->isSized(&Visited)) {
1628       Assert(!Attrs.hasAttribute(Attribute::ByVal) &&
1629                  !Attrs.hasAttribute(Attribute::InAlloca),
1630              "Attributes 'byval' and 'inalloca' do not support unsized types!",
1631              V);
1632     }
1633     if (!isa<PointerType>(PTy->getElementType()))
1634       Assert(!Attrs.hasAttribute(Attribute::SwiftError),
1635              "Attribute 'swifterror' only applies to parameters "
1636              "with pointer to pointer type!",
1637              V);
1638   } else {
1639     Assert(!Attrs.hasAttribute(Attribute::ByVal),
1640            "Attribute 'byval' only applies to parameters with pointer type!",
1641            V);
1642     Assert(!Attrs.hasAttribute(Attribute::SwiftError),
1643            "Attribute 'swifterror' only applies to parameters "
1644            "with pointer type!",
1645            V);
1646   }
1647 }
1648 
1649 // Check parameter attributes against a function type.
1650 // The value V is printed in error messages.
1651 void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
1652                                    const Value *V) {
1653   if (Attrs.isEmpty())
1654     return;
1655 
1656   bool SawNest = false;
1657   bool SawReturned = false;
1658   bool SawSRet = false;
1659   bool SawSwiftSelf = false;
1660   bool SawSwiftError = false;
1661 
1662   // Verify return value attributes.
1663   AttributeSet RetAttrs = Attrs.getRetAttributes();
1664   Assert((!RetAttrs.hasAttribute(Attribute::ByVal) &&
1665           !RetAttrs.hasAttribute(Attribute::Nest) &&
1666           !RetAttrs.hasAttribute(Attribute::StructRet) &&
1667           !RetAttrs.hasAttribute(Attribute::NoCapture) &&
1668           !RetAttrs.hasAttribute(Attribute::Returned) &&
1669           !RetAttrs.hasAttribute(Attribute::InAlloca) &&
1670           !RetAttrs.hasAttribute(Attribute::SwiftSelf) &&
1671           !RetAttrs.hasAttribute(Attribute::SwiftError)),
1672          "Attributes 'byval', 'inalloca', 'nest', 'sret', 'nocapture', "
1673          "'returned', 'swiftself', and 'swifterror' do not apply to return "
1674          "values!",
1675          V);
1676   Assert((!RetAttrs.hasAttribute(Attribute::ReadOnly) &&
1677           !RetAttrs.hasAttribute(Attribute::WriteOnly) &&
1678           !RetAttrs.hasAttribute(Attribute::ReadNone)),
1679          "Attribute '" + RetAttrs.getAsString() +
1680              "' does not apply to function returns",
1681          V);
1682   verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
1683 
1684   // Verify parameter attributes.
1685   for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
1686     Type *Ty = FT->getParamType(i);
1687     AttributeSet ArgAttrs = Attrs.getParamAttributes(i);
1688 
1689     verifyParameterAttrs(ArgAttrs, Ty, V);
1690 
1691     if (ArgAttrs.hasAttribute(Attribute::Nest)) {
1692       Assert(!SawNest, "More than one parameter has attribute nest!", V);
1693       SawNest = true;
1694     }
1695 
1696     if (ArgAttrs.hasAttribute(Attribute::Returned)) {
1697       Assert(!SawReturned, "More than one parameter has attribute returned!",
1698              V);
1699       Assert(Ty->canLosslesslyBitCastTo(FT->getReturnType()),
1700              "Incompatible argument and return types for 'returned' attribute",
1701              V);
1702       SawReturned = true;
1703     }
1704 
1705     if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
1706       Assert(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
1707       Assert(i == 0 || i == 1,
1708              "Attribute 'sret' is not on first or second parameter!", V);
1709       SawSRet = true;
1710     }
1711 
1712     if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
1713       Assert(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
1714       SawSwiftSelf = true;
1715     }
1716 
1717     if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
1718       Assert(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!",
1719              V);
1720       SawSwiftError = true;
1721     }
1722 
1723     if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
1724       Assert(i == FT->getNumParams() - 1,
1725              "inalloca isn't on the last parameter!", V);
1726     }
1727   }
1728 
1729   if (!Attrs.hasAttributes(AttributeList::FunctionIndex))
1730     return;
1731 
1732   verifyAttributeTypes(Attrs.getFnAttributes(), /*IsFunction=*/true, V);
1733 
1734   Assert(!(Attrs.hasFnAttribute(Attribute::ReadNone) &&
1735            Attrs.hasFnAttribute(Attribute::ReadOnly)),
1736          "Attributes 'readnone and readonly' are incompatible!", V);
1737 
1738   Assert(!(Attrs.hasFnAttribute(Attribute::ReadNone) &&
1739            Attrs.hasFnAttribute(Attribute::WriteOnly)),
1740          "Attributes 'readnone and writeonly' are incompatible!", V);
1741 
1742   Assert(!(Attrs.hasFnAttribute(Attribute::ReadOnly) &&
1743            Attrs.hasFnAttribute(Attribute::WriteOnly)),
1744          "Attributes 'readonly and writeonly' are incompatible!", V);
1745 
1746   Assert(!(Attrs.hasFnAttribute(Attribute::ReadNone) &&
1747            Attrs.hasFnAttribute(Attribute::InaccessibleMemOrArgMemOnly)),
1748          "Attributes 'readnone and inaccessiblemem_or_argmemonly' are "
1749          "incompatible!",
1750          V);
1751 
1752   Assert(!(Attrs.hasFnAttribute(Attribute::ReadNone) &&
1753            Attrs.hasFnAttribute(Attribute::InaccessibleMemOnly)),
1754          "Attributes 'readnone and inaccessiblememonly' are incompatible!", V);
1755 
1756   Assert(!(Attrs.hasFnAttribute(Attribute::NoInline) &&
1757            Attrs.hasFnAttribute(Attribute::AlwaysInline)),
1758          "Attributes 'noinline and alwaysinline' are incompatible!", V);
1759 
1760   if (Attrs.hasFnAttribute(Attribute::OptimizeNone)) {
1761     Assert(Attrs.hasFnAttribute(Attribute::NoInline),
1762            "Attribute 'optnone' requires 'noinline'!", V);
1763 
1764     Assert(!Attrs.hasFnAttribute(Attribute::OptimizeForSize),
1765            "Attributes 'optsize and optnone' are incompatible!", V);
1766 
1767     Assert(!Attrs.hasFnAttribute(Attribute::MinSize),
1768            "Attributes 'minsize and optnone' are incompatible!", V);
1769   }
1770 
1771   if (Attrs.hasFnAttribute(Attribute::JumpTable)) {
1772     const GlobalValue *GV = cast<GlobalValue>(V);
1773     Assert(GV->hasGlobalUnnamedAddr(),
1774            "Attribute 'jumptable' requires 'unnamed_addr'", V);
1775   }
1776 
1777   if (Attrs.hasFnAttribute(Attribute::AllocSize)) {
1778     std::pair<unsigned, Optional<unsigned>> Args =
1779         Attrs.getAllocSizeArgs(AttributeList::FunctionIndex);
1780 
1781     auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
1782       if (ParamNo >= FT->getNumParams()) {
1783         CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
1784         return false;
1785       }
1786 
1787       if (!FT->getParamType(ParamNo)->isIntegerTy()) {
1788         CheckFailed("'allocsize' " + Name +
1789                         " argument must refer to an integer parameter",
1790                     V);
1791         return false;
1792       }
1793 
1794       return true;
1795     };
1796 
1797     if (!CheckParam("element size", Args.first))
1798       return;
1799 
1800     if (Args.second && !CheckParam("number of elements", *Args.second))
1801       return;
1802   }
1803 }
1804 
1805 void Verifier::verifyFunctionMetadata(
1806     ArrayRef<std::pair<unsigned, MDNode *>> MDs) {
1807   for (const auto &Pair : MDs) {
1808     if (Pair.first == LLVMContext::MD_prof) {
1809       MDNode *MD = Pair.second;
1810       Assert(MD->getNumOperands() >= 2,
1811              "!prof annotations should have no less than 2 operands", MD);
1812 
1813       // Check first operand.
1814       Assert(MD->getOperand(0) != nullptr, "first operand should not be null",
1815              MD);
1816       Assert(isa<MDString>(MD->getOperand(0)),
1817              "expected string with name of the !prof annotation", MD);
1818       MDString *MDS = cast<MDString>(MD->getOperand(0));
1819       StringRef ProfName = MDS->getString();
1820       Assert(ProfName.equals("function_entry_count") ||
1821                  ProfName.equals("synthetic_function_entry_count"),
1822              "first operand should be 'function_entry_count'"
1823              " or 'synthetic_function_entry_count'",
1824              MD);
1825 
1826       // Check second operand.
1827       Assert(MD->getOperand(1) != nullptr, "second operand should not be null",
1828              MD);
1829       Assert(isa<ConstantAsMetadata>(MD->getOperand(1)),
1830              "expected integer argument to function_entry_count", MD);
1831     }
1832   }
1833 }
1834 
1835 void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
1836   if (!ConstantExprVisited.insert(EntryC).second)
1837     return;
1838 
1839   SmallVector<const Constant *, 16> Stack;
1840   Stack.push_back(EntryC);
1841 
1842   while (!Stack.empty()) {
1843     const Constant *C = Stack.pop_back_val();
1844 
1845     // Check this constant expression.
1846     if (const auto *CE = dyn_cast<ConstantExpr>(C))
1847       visitConstantExpr(CE);
1848 
1849     if (const auto *GV = dyn_cast<GlobalValue>(C)) {
1850       // Global Values get visited separately, but we do need to make sure
1851       // that the global value is in the correct module
1852       Assert(GV->getParent() == &M, "Referencing global in another module!",
1853              EntryC, &M, GV, GV->getParent());
1854       continue;
1855     }
1856 
1857     // Visit all sub-expressions.
1858     for (const Use &U : C->operands()) {
1859       const auto *OpC = dyn_cast<Constant>(U);
1860       if (!OpC)
1861         continue;
1862       if (!ConstantExprVisited.insert(OpC).second)
1863         continue;
1864       Stack.push_back(OpC);
1865     }
1866   }
1867 }
1868 
1869 void Verifier::visitConstantExpr(const ConstantExpr *CE) {
1870   if (CE->getOpcode() == Instruction::BitCast)
1871     Assert(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
1872                                  CE->getType()),
1873            "Invalid bitcast", CE);
1874 
1875   if (CE->getOpcode() == Instruction::IntToPtr ||
1876       CE->getOpcode() == Instruction::PtrToInt) {
1877     auto *PtrTy = CE->getOpcode() == Instruction::IntToPtr
1878                       ? CE->getType()
1879                       : CE->getOperand(0)->getType();
1880     StringRef Msg = CE->getOpcode() == Instruction::IntToPtr
1881                         ? "inttoptr not supported for non-integral pointers"
1882                         : "ptrtoint not supported for non-integral pointers";
1883     Assert(
1884         !DL.isNonIntegralPointerType(cast<PointerType>(PtrTy->getScalarType())),
1885         Msg);
1886   }
1887 }
1888 
1889 bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
1890   // There shouldn't be more attribute sets than there are parameters plus the
1891   // function and return value.
1892   return Attrs.getNumAttrSets() <= Params + 2;
1893 }
1894 
1895 /// Verify that statepoint intrinsic is well formed.
1896 void Verifier::verifyStatepoint(ImmutableCallSite CS) {
1897   assert(CS.getCalledFunction() &&
1898          CS.getCalledFunction()->getIntrinsicID() ==
1899            Intrinsic::experimental_gc_statepoint);
1900 
1901   const Instruction &CI = *CS.getInstruction();
1902 
1903   Assert(!CS.doesNotAccessMemory() && !CS.onlyReadsMemory() &&
1904          !CS.onlyAccessesArgMemory(),
1905          "gc.statepoint must read and write all memory to preserve "
1906          "reordering restrictions required by safepoint semantics",
1907          &CI);
1908 
1909   const Value *IDV = CS.getArgument(0);
1910   Assert(isa<ConstantInt>(IDV), "gc.statepoint ID must be a constant integer",
1911          &CI);
1912 
1913   const Value *NumPatchBytesV = CS.getArgument(1);
1914   Assert(isa<ConstantInt>(NumPatchBytesV),
1915          "gc.statepoint number of patchable bytes must be a constant integer",
1916          &CI);
1917   const int64_t NumPatchBytes =
1918       cast<ConstantInt>(NumPatchBytesV)->getSExtValue();
1919   assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
1920   Assert(NumPatchBytes >= 0, "gc.statepoint number of patchable bytes must be "
1921                              "positive",
1922          &CI);
1923 
1924   const Value *Target = CS.getArgument(2);
1925   auto *PT = dyn_cast<PointerType>(Target->getType());
1926   Assert(PT && PT->getElementType()->isFunctionTy(),
1927          "gc.statepoint callee must be of function pointer type", &CI, Target);
1928   FunctionType *TargetFuncType = cast<FunctionType>(PT->getElementType());
1929 
1930   const Value *NumCallArgsV = CS.getArgument(3);
1931   Assert(isa<ConstantInt>(NumCallArgsV),
1932          "gc.statepoint number of arguments to underlying call "
1933          "must be constant integer",
1934          &CI);
1935   const int NumCallArgs = cast<ConstantInt>(NumCallArgsV)->getZExtValue();
1936   Assert(NumCallArgs >= 0,
1937          "gc.statepoint number of arguments to underlying call "
1938          "must be positive",
1939          &CI);
1940   const int NumParams = (int)TargetFuncType->getNumParams();
1941   if (TargetFuncType->isVarArg()) {
1942     Assert(NumCallArgs >= NumParams,
1943            "gc.statepoint mismatch in number of vararg call args", &CI);
1944 
1945     // TODO: Remove this limitation
1946     Assert(TargetFuncType->getReturnType()->isVoidTy(),
1947            "gc.statepoint doesn't support wrapping non-void "
1948            "vararg functions yet",
1949            &CI);
1950   } else
1951     Assert(NumCallArgs == NumParams,
1952            "gc.statepoint mismatch in number of call args", &CI);
1953 
1954   const Value *FlagsV = CS.getArgument(4);
1955   Assert(isa<ConstantInt>(FlagsV),
1956          "gc.statepoint flags must be constant integer", &CI);
1957   const uint64_t Flags = cast<ConstantInt>(FlagsV)->getZExtValue();
1958   Assert((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
1959          "unknown flag used in gc.statepoint flags argument", &CI);
1960 
1961   // Verify that the types of the call parameter arguments match
1962   // the type of the wrapped callee.
1963   AttributeList Attrs = CS.getAttributes();
1964   for (int i = 0; i < NumParams; i++) {
1965     Type *ParamType = TargetFuncType->getParamType(i);
1966     Type *ArgType = CS.getArgument(5 + i)->getType();
1967     Assert(ArgType == ParamType,
1968            "gc.statepoint call argument does not match wrapped "
1969            "function type",
1970            &CI);
1971 
1972     if (TargetFuncType->isVarArg()) {
1973       AttributeSet ArgAttrs = Attrs.getParamAttributes(5 + i);
1974       Assert(!ArgAttrs.hasAttribute(Attribute::StructRet),
1975              "Attribute 'sret' cannot be used for vararg call arguments!", &CI);
1976     }
1977   }
1978 
1979   const int EndCallArgsInx = 4 + NumCallArgs;
1980 
1981   const Value *NumTransitionArgsV = CS.getArgument(EndCallArgsInx+1);
1982   Assert(isa<ConstantInt>(NumTransitionArgsV),
1983          "gc.statepoint number of transition arguments "
1984          "must be constant integer",
1985          &CI);
1986   const int NumTransitionArgs =
1987       cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
1988   Assert(NumTransitionArgs >= 0,
1989          "gc.statepoint number of transition arguments must be positive", &CI);
1990   const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
1991 
1992   const Value *NumDeoptArgsV = CS.getArgument(EndTransitionArgsInx+1);
1993   Assert(isa<ConstantInt>(NumDeoptArgsV),
1994          "gc.statepoint number of deoptimization arguments "
1995          "must be constant integer",
1996          &CI);
1997   const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
1998   Assert(NumDeoptArgs >= 0, "gc.statepoint number of deoptimization arguments "
1999                             "must be positive",
2000          &CI);
2001 
2002   const int ExpectedNumArgs =
2003       7 + NumCallArgs + NumTransitionArgs + NumDeoptArgs;
2004   Assert(ExpectedNumArgs <= (int)CS.arg_size(),
2005          "gc.statepoint too few arguments according to length fields", &CI);
2006 
2007   // Check that the only uses of this gc.statepoint are gc.result or
2008   // gc.relocate calls which are tied to this statepoint and thus part
2009   // of the same statepoint sequence
2010   for (const User *U : CI.users()) {
2011     const CallInst *Call = dyn_cast<const CallInst>(U);
2012     Assert(Call, "illegal use of statepoint token", &CI, U);
2013     if (!Call) continue;
2014     Assert(isa<GCRelocateInst>(Call) || isa<GCResultInst>(Call),
2015            "gc.result or gc.relocate are the only value uses "
2016            "of a gc.statepoint",
2017            &CI, U);
2018     if (isa<GCResultInst>(Call)) {
2019       Assert(Call->getArgOperand(0) == &CI,
2020              "gc.result connected to wrong gc.statepoint", &CI, Call);
2021     } else if (isa<GCRelocateInst>(Call)) {
2022       Assert(Call->getArgOperand(0) == &CI,
2023              "gc.relocate connected to wrong gc.statepoint", &CI, Call);
2024     }
2025   }
2026 
2027   // Note: It is legal for a single derived pointer to be listed multiple
2028   // times.  It's non-optimal, but it is legal.  It can also happen after
2029   // insertion if we strip a bitcast away.
2030   // Note: It is really tempting to check that each base is relocated and
2031   // that a derived pointer is never reused as a base pointer.  This turns
2032   // out to be problematic since optimizations run after safepoint insertion
2033   // can recognize equality properties that the insertion logic doesn't know
2034   // about.  See example statepoint.ll in the verifier subdirectory
2035 }
2036 
2037 void Verifier::verifyFrameRecoverIndices() {
2038   for (auto &Counts : FrameEscapeInfo) {
2039     Function *F = Counts.first;
2040     unsigned EscapedObjectCount = Counts.second.first;
2041     unsigned MaxRecoveredIndex = Counts.second.second;
2042     Assert(MaxRecoveredIndex <= EscapedObjectCount,
2043            "all indices passed to llvm.localrecover must be less than the "
2044            "number of arguments passed ot llvm.localescape in the parent "
2045            "function",
2046            F);
2047   }
2048 }
2049 
2050 static Instruction *getSuccPad(Instruction *Terminator) {
2051   BasicBlock *UnwindDest;
2052   if (auto *II = dyn_cast<InvokeInst>(Terminator))
2053     UnwindDest = II->getUnwindDest();
2054   else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
2055     UnwindDest = CSI->getUnwindDest();
2056   else
2057     UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
2058   return UnwindDest->getFirstNonPHI();
2059 }
2060 
2061 void Verifier::verifySiblingFuncletUnwinds() {
2062   SmallPtrSet<Instruction *, 8> Visited;
2063   SmallPtrSet<Instruction *, 8> Active;
2064   for (const auto &Pair : SiblingFuncletInfo) {
2065     Instruction *PredPad = Pair.first;
2066     if (Visited.count(PredPad))
2067       continue;
2068     Active.insert(PredPad);
2069     Instruction *Terminator = Pair.second;
2070     do {
2071       Instruction *SuccPad = getSuccPad(Terminator);
2072       if (Active.count(SuccPad)) {
2073         // Found a cycle; report error
2074         Instruction *CyclePad = SuccPad;
2075         SmallVector<Instruction *, 8> CycleNodes;
2076         do {
2077           CycleNodes.push_back(CyclePad);
2078           Instruction *CycleTerminator = SiblingFuncletInfo[CyclePad];
2079           if (CycleTerminator != CyclePad)
2080             CycleNodes.push_back(CycleTerminator);
2081           CyclePad = getSuccPad(CycleTerminator);
2082         } while (CyclePad != SuccPad);
2083         Assert(false, "EH pads can't handle each other's exceptions",
2084                ArrayRef<Instruction *>(CycleNodes));
2085       }
2086       // Don't re-walk a node we've already checked
2087       if (!Visited.insert(SuccPad).second)
2088         break;
2089       // Walk to this successor if it has a map entry.
2090       PredPad = SuccPad;
2091       auto TermI = SiblingFuncletInfo.find(PredPad);
2092       if (TermI == SiblingFuncletInfo.end())
2093         break;
2094       Terminator = TermI->second;
2095       Active.insert(PredPad);
2096     } while (true);
2097     // Each node only has one successor, so we've walked all the active
2098     // nodes' successors.
2099     Active.clear();
2100   }
2101 }
2102 
2103 // visitFunction - Verify that a function is ok.
2104 //
2105 void Verifier::visitFunction(const Function &F) {
2106   visitGlobalValue(F);
2107 
2108   // Check function arguments.
2109   FunctionType *FT = F.getFunctionType();
2110   unsigned NumArgs = F.arg_size();
2111 
2112   Assert(&Context == &F.getContext(),
2113          "Function context does not match Module context!", &F);
2114 
2115   Assert(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
2116   Assert(FT->getNumParams() == NumArgs,
2117          "# formal arguments must match # of arguments for function type!", &F,
2118          FT);
2119   Assert(F.getReturnType()->isFirstClassType() ||
2120              F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
2121          "Functions cannot return aggregate values!", &F);
2122 
2123   Assert(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
2124          "Invalid struct return type!", &F);
2125 
2126   AttributeList Attrs = F.getAttributes();
2127 
2128   Assert(verifyAttributeCount(Attrs, FT->getNumParams()),
2129          "Attribute after last parameter!", &F);
2130 
2131   // Check function attributes.
2132   verifyFunctionAttrs(FT, Attrs, &F);
2133 
2134   // On function declarations/definitions, we do not support the builtin
2135   // attribute. We do not check this in VerifyFunctionAttrs since that is
2136   // checking for Attributes that can/can not ever be on functions.
2137   Assert(!Attrs.hasFnAttribute(Attribute::Builtin),
2138          "Attribute 'builtin' can only be applied to a callsite.", &F);
2139 
2140   // Check that this function meets the restrictions on this calling convention.
2141   // Sometimes varargs is used for perfectly forwarding thunks, so some of these
2142   // restrictions can be lifted.
2143   switch (F.getCallingConv()) {
2144   default:
2145   case CallingConv::C:
2146     break;
2147   case CallingConv::AMDGPU_KERNEL:
2148   case CallingConv::SPIR_KERNEL:
2149     Assert(F.getReturnType()->isVoidTy(),
2150            "Calling convention requires void return type", &F);
2151     LLVM_FALLTHROUGH;
2152   case CallingConv::AMDGPU_VS:
2153   case CallingConv::AMDGPU_HS:
2154   case CallingConv::AMDGPU_GS:
2155   case CallingConv::AMDGPU_PS:
2156   case CallingConv::AMDGPU_CS:
2157     Assert(!F.hasStructRetAttr(),
2158            "Calling convention does not allow sret", &F);
2159     LLVM_FALLTHROUGH;
2160   case CallingConv::Fast:
2161   case CallingConv::Cold:
2162   case CallingConv::Intel_OCL_BI:
2163   case CallingConv::PTX_Kernel:
2164   case CallingConv::PTX_Device:
2165     Assert(!F.isVarArg(), "Calling convention does not support varargs or "
2166                           "perfect forwarding!",
2167            &F);
2168     break;
2169   }
2170 
2171   bool isLLVMdotName = F.getName().size() >= 5 &&
2172                        F.getName().substr(0, 5) == "llvm.";
2173 
2174   // Check that the argument values match the function type for this function...
2175   unsigned i = 0;
2176   for (const Argument &Arg : F.args()) {
2177     Assert(Arg.getType() == FT->getParamType(i),
2178            "Argument value does not match function argument type!", &Arg,
2179            FT->getParamType(i));
2180     Assert(Arg.getType()->isFirstClassType(),
2181            "Function arguments must have first-class types!", &Arg);
2182     if (!isLLVMdotName) {
2183       Assert(!Arg.getType()->isMetadataTy(),
2184              "Function takes metadata but isn't an intrinsic", &Arg, &F);
2185       Assert(!Arg.getType()->isTokenTy(),
2186              "Function takes token but isn't an intrinsic", &Arg, &F);
2187     }
2188 
2189     // Check that swifterror argument is only used by loads and stores.
2190     if (Attrs.hasParamAttribute(i, Attribute::SwiftError)) {
2191       verifySwiftErrorValue(&Arg);
2192     }
2193     ++i;
2194   }
2195 
2196   if (!isLLVMdotName)
2197     Assert(!F.getReturnType()->isTokenTy(),
2198            "Functions returns a token but isn't an intrinsic", &F);
2199 
2200   // Get the function metadata attachments.
2201   SmallVector<std::pair<unsigned, MDNode *>, 4> MDs;
2202   F.getAllMetadata(MDs);
2203   assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
2204   verifyFunctionMetadata(MDs);
2205 
2206   // Check validity of the personality function
2207   if (F.hasPersonalityFn()) {
2208     auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
2209     if (Per)
2210       Assert(Per->getParent() == F.getParent(),
2211              "Referencing personality function in another module!",
2212              &F, F.getParent(), Per, Per->getParent());
2213   }
2214 
2215   if (F.isMaterializable()) {
2216     // Function has a body somewhere we can't see.
2217     Assert(MDs.empty(), "unmaterialized function cannot have metadata", &F,
2218            MDs.empty() ? nullptr : MDs.front().second);
2219   } else if (F.isDeclaration()) {
2220     for (const auto &I : MDs) {
2221       AssertDI(I.first != LLVMContext::MD_dbg,
2222                "function declaration may not have a !dbg attachment", &F);
2223       Assert(I.first != LLVMContext::MD_prof,
2224              "function declaration may not have a !prof attachment", &F);
2225 
2226       // Verify the metadata itself.
2227       visitMDNode(*I.second);
2228     }
2229     Assert(!F.hasPersonalityFn(),
2230            "Function declaration shouldn't have a personality routine", &F);
2231   } else {
2232     // Verify that this function (which has a body) is not named "llvm.*".  It
2233     // is not legal to define intrinsics.
2234     Assert(!isLLVMdotName, "llvm intrinsics cannot be defined!", &F);
2235 
2236     // Check the entry node
2237     const BasicBlock *Entry = &F.getEntryBlock();
2238     Assert(pred_empty(Entry),
2239            "Entry block to function must not have predecessors!", Entry);
2240 
2241     // The address of the entry block cannot be taken, unless it is dead.
2242     if (Entry->hasAddressTaken()) {
2243       Assert(!BlockAddress::lookup(Entry)->isConstantUsed(),
2244              "blockaddress may not be used with the entry block!", Entry);
2245     }
2246 
2247     unsigned NumDebugAttachments = 0, NumProfAttachments = 0;
2248     // Visit metadata attachments.
2249     for (const auto &I : MDs) {
2250       // Verify that the attachment is legal.
2251       switch (I.first) {
2252       default:
2253         break;
2254       case LLVMContext::MD_dbg: {
2255         ++NumDebugAttachments;
2256         AssertDI(NumDebugAttachments == 1,
2257                  "function must have a single !dbg attachment", &F, I.second);
2258         AssertDI(isa<DISubprogram>(I.second),
2259                  "function !dbg attachment must be a subprogram", &F, I.second);
2260         auto *SP = cast<DISubprogram>(I.second);
2261         const Function *&AttachedTo = DISubprogramAttachments[SP];
2262         AssertDI(!AttachedTo || AttachedTo == &F,
2263                  "DISubprogram attached to more than one function", SP, &F);
2264         AttachedTo = &F;
2265         break;
2266       }
2267       case LLVMContext::MD_prof:
2268         ++NumProfAttachments;
2269         Assert(NumProfAttachments == 1,
2270                "function must have a single !prof attachment", &F, I.second);
2271         break;
2272       }
2273 
2274       // Verify the metadata itself.
2275       visitMDNode(*I.second);
2276     }
2277   }
2278 
2279   // If this function is actually an intrinsic, verify that it is only used in
2280   // direct call/invokes, never having its "address taken".
2281   // Only do this if the module is materialized, otherwise we don't have all the
2282   // uses.
2283   if (F.getIntrinsicID() && F.getParent()->isMaterialized()) {
2284     const User *U;
2285     if (F.hasAddressTaken(&U))
2286       Assert(false, "Invalid user of intrinsic instruction!", U);
2287   }
2288 
2289   auto *N = F.getSubprogram();
2290   HasDebugInfo = (N != nullptr);
2291   if (!HasDebugInfo)
2292     return;
2293 
2294   // Check that all !dbg attachments lead to back to N (or, at least, another
2295   // subprogram that describes the same function).
2296   //
2297   // FIXME: Check this incrementally while visiting !dbg attachments.
2298   // FIXME: Only check when N is the canonical subprogram for F.
2299   SmallPtrSet<const MDNode *, 32> Seen;
2300   for (auto &BB : F)
2301     for (auto &I : BB) {
2302       // Be careful about using DILocation here since we might be dealing with
2303       // broken code (this is the Verifier after all).
2304       DILocation *DL =
2305           dyn_cast_or_null<DILocation>(I.getDebugLoc().getAsMDNode());
2306       if (!DL)
2307         continue;
2308       if (!Seen.insert(DL).second)
2309         continue;
2310 
2311       Metadata *Parent = DL->getRawScope();
2312       AssertDI(Parent && isa<DILocalScope>(Parent),
2313                "DILocation's scope must be a DILocalScope", N, &F, &I, DL,
2314                Parent);
2315       DILocalScope *Scope = DL->getInlinedAtScope();
2316       if (Scope && !Seen.insert(Scope).second)
2317         continue;
2318 
2319       DISubprogram *SP = Scope ? Scope->getSubprogram() : nullptr;
2320 
2321       // Scope and SP could be the same MDNode and we don't want to skip
2322       // validation in that case
2323       if (SP && ((Scope != SP) && !Seen.insert(SP).second))
2324         continue;
2325 
2326       // FIXME: Once N is canonical, check "SP == &N".
2327       AssertDI(SP->describes(&F),
2328                "!dbg attachment points at wrong subprogram for function", N, &F,
2329                &I, DL, Scope, SP);
2330     }
2331 }
2332 
2333 // verifyBasicBlock - Verify that a basic block is well formed...
2334 //
2335 void Verifier::visitBasicBlock(BasicBlock &BB) {
2336   InstsInThisBlock.clear();
2337 
2338   // Ensure that basic blocks have terminators!
2339   Assert(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
2340 
2341   // Check constraints that this basic block imposes on all of the PHI nodes in
2342   // it.
2343   if (isa<PHINode>(BB.front())) {
2344     SmallVector<BasicBlock*, 8> Preds(pred_begin(&BB), pred_end(&BB));
2345     SmallVector<std::pair<BasicBlock*, Value*>, 8> Values;
2346     llvm::sort(Preds);
2347     for (const PHINode &PN : BB.phis()) {
2348       // Ensure that PHI nodes have at least one entry!
2349       Assert(PN.getNumIncomingValues() != 0,
2350              "PHI nodes must have at least one entry.  If the block is dead, "
2351              "the PHI should be removed!",
2352              &PN);
2353       Assert(PN.getNumIncomingValues() == Preds.size(),
2354              "PHINode should have one entry for each predecessor of its "
2355              "parent basic block!",
2356              &PN);
2357 
2358       // Get and sort all incoming values in the PHI node...
2359       Values.clear();
2360       Values.reserve(PN.getNumIncomingValues());
2361       for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
2362         Values.push_back(
2363             std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
2364       llvm::sort(Values);
2365 
2366       for (unsigned i = 0, e = Values.size(); i != e; ++i) {
2367         // Check to make sure that if there is more than one entry for a
2368         // particular basic block in this PHI node, that the incoming values are
2369         // all identical.
2370         //
2371         Assert(i == 0 || Values[i].first != Values[i - 1].first ||
2372                    Values[i].second == Values[i - 1].second,
2373                "PHI node has multiple entries for the same basic block with "
2374                "different incoming values!",
2375                &PN, Values[i].first, Values[i].second, Values[i - 1].second);
2376 
2377         // Check to make sure that the predecessors and PHI node entries are
2378         // matched up.
2379         Assert(Values[i].first == Preds[i],
2380                "PHI node entries do not match predecessors!", &PN,
2381                Values[i].first, Preds[i]);
2382       }
2383     }
2384   }
2385 
2386   // Check that all instructions have their parent pointers set up correctly.
2387   for (auto &I : BB)
2388   {
2389     Assert(I.getParent() == &BB, "Instruction has bogus parent pointer!");
2390   }
2391 }
2392 
2393 void Verifier::visitTerminator(Instruction &I) {
2394   // Ensure that terminators only exist at the end of the basic block.
2395   Assert(&I == I.getParent()->getTerminator(),
2396          "Terminator found in the middle of a basic block!", I.getParent());
2397   visitInstruction(I);
2398 }
2399 
2400 void Verifier::visitBranchInst(BranchInst &BI) {
2401   if (BI.isConditional()) {
2402     Assert(BI.getCondition()->getType()->isIntegerTy(1),
2403            "Branch condition is not 'i1' type!", &BI, BI.getCondition());
2404   }
2405   visitTerminator(BI);
2406 }
2407 
2408 void Verifier::visitReturnInst(ReturnInst &RI) {
2409   Function *F = RI.getParent()->getParent();
2410   unsigned N = RI.getNumOperands();
2411   if (F->getReturnType()->isVoidTy())
2412     Assert(N == 0,
2413            "Found return instr that returns non-void in Function of void "
2414            "return type!",
2415            &RI, F->getReturnType());
2416   else
2417     Assert(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
2418            "Function return type does not match operand "
2419            "type of return inst!",
2420            &RI, F->getReturnType());
2421 
2422   // Check to make sure that the return value has necessary properties for
2423   // terminators...
2424   visitTerminator(RI);
2425 }
2426 
2427 void Verifier::visitSwitchInst(SwitchInst &SI) {
2428   // Check to make sure that all of the constants in the switch instruction
2429   // have the same type as the switched-on value.
2430   Type *SwitchTy = SI.getCondition()->getType();
2431   SmallPtrSet<ConstantInt*, 32> Constants;
2432   for (auto &Case : SI.cases()) {
2433     Assert(Case.getCaseValue()->getType() == SwitchTy,
2434            "Switch constants must all be same type as switch value!", &SI);
2435     Assert(Constants.insert(Case.getCaseValue()).second,
2436            "Duplicate integer as switch case", &SI, Case.getCaseValue());
2437   }
2438 
2439   visitTerminator(SI);
2440 }
2441 
2442 void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
2443   Assert(BI.getAddress()->getType()->isPointerTy(),
2444          "Indirectbr operand must have pointer type!", &BI);
2445   for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
2446     Assert(BI.getDestination(i)->getType()->isLabelTy(),
2447            "Indirectbr destinations must all have pointer type!", &BI);
2448 
2449   visitTerminator(BI);
2450 }
2451 
2452 void Verifier::visitSelectInst(SelectInst &SI) {
2453   Assert(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
2454                                          SI.getOperand(2)),
2455          "Invalid operands for select instruction!", &SI);
2456 
2457   Assert(SI.getTrueValue()->getType() == SI.getType(),
2458          "Select values must have same type as select instruction!", &SI);
2459   visitInstruction(SI);
2460 }
2461 
2462 /// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
2463 /// a pass, if any exist, it's an error.
2464 ///
2465 void Verifier::visitUserOp1(Instruction &I) {
2466   Assert(false, "User-defined operators should not live outside of a pass!", &I);
2467 }
2468 
2469 void Verifier::visitTruncInst(TruncInst &I) {
2470   // Get the source and destination types
2471   Type *SrcTy = I.getOperand(0)->getType();
2472   Type *DestTy = I.getType();
2473 
2474   // Get the size of the types in bits, we'll need this later
2475   unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2476   unsigned DestBitSize = DestTy->getScalarSizeInBits();
2477 
2478   Assert(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
2479   Assert(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
2480   Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2481          "trunc source and destination must both be a vector or neither", &I);
2482   Assert(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
2483 
2484   visitInstruction(I);
2485 }
2486 
2487 void Verifier::visitZExtInst(ZExtInst &I) {
2488   // Get the source and destination types
2489   Type *SrcTy = I.getOperand(0)->getType();
2490   Type *DestTy = I.getType();
2491 
2492   // Get the size of the types in bits, we'll need this later
2493   Assert(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
2494   Assert(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
2495   Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2496          "zext source and destination must both be a vector or neither", &I);
2497   unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2498   unsigned DestBitSize = DestTy->getScalarSizeInBits();
2499 
2500   Assert(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
2501 
2502   visitInstruction(I);
2503 }
2504 
2505 void Verifier::visitSExtInst(SExtInst &I) {
2506   // Get the source and destination types
2507   Type *SrcTy = I.getOperand(0)->getType();
2508   Type *DestTy = I.getType();
2509 
2510   // Get the size of the types in bits, we'll need this later
2511   unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2512   unsigned DestBitSize = DestTy->getScalarSizeInBits();
2513 
2514   Assert(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
2515   Assert(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
2516   Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2517          "sext source and destination must both be a vector or neither", &I);
2518   Assert(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
2519 
2520   visitInstruction(I);
2521 }
2522 
2523 void Verifier::visitFPTruncInst(FPTruncInst &I) {
2524   // Get the source and destination types
2525   Type *SrcTy = I.getOperand(0)->getType();
2526   Type *DestTy = I.getType();
2527   // Get the size of the types in bits, we'll need this later
2528   unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2529   unsigned DestBitSize = DestTy->getScalarSizeInBits();
2530 
2531   Assert(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
2532   Assert(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
2533   Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2534          "fptrunc source and destination must both be a vector or neither", &I);
2535   Assert(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
2536 
2537   visitInstruction(I);
2538 }
2539 
2540 void Verifier::visitFPExtInst(FPExtInst &I) {
2541   // Get the source and destination types
2542   Type *SrcTy = I.getOperand(0)->getType();
2543   Type *DestTy = I.getType();
2544 
2545   // Get the size of the types in bits, we'll need this later
2546   unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2547   unsigned DestBitSize = DestTy->getScalarSizeInBits();
2548 
2549   Assert(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
2550   Assert(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
2551   Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2552          "fpext source and destination must both be a vector or neither", &I);
2553   Assert(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
2554 
2555   visitInstruction(I);
2556 }
2557 
2558 void Verifier::visitUIToFPInst(UIToFPInst &I) {
2559   // Get the source and destination types
2560   Type *SrcTy = I.getOperand(0)->getType();
2561   Type *DestTy = I.getType();
2562 
2563   bool SrcVec = SrcTy->isVectorTy();
2564   bool DstVec = DestTy->isVectorTy();
2565 
2566   Assert(SrcVec == DstVec,
2567          "UIToFP source and dest must both be vector or scalar", &I);
2568   Assert(SrcTy->isIntOrIntVectorTy(),
2569          "UIToFP source must be integer or integer vector", &I);
2570   Assert(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
2571          &I);
2572 
2573   if (SrcVec && DstVec)
2574     Assert(cast<VectorType>(SrcTy)->getNumElements() ==
2575                cast<VectorType>(DestTy)->getNumElements(),
2576            "UIToFP source and dest vector length mismatch", &I);
2577 
2578   visitInstruction(I);
2579 }
2580 
2581 void Verifier::visitSIToFPInst(SIToFPInst &I) {
2582   // Get the source and destination types
2583   Type *SrcTy = I.getOperand(0)->getType();
2584   Type *DestTy = I.getType();
2585 
2586   bool SrcVec = SrcTy->isVectorTy();
2587   bool DstVec = DestTy->isVectorTy();
2588 
2589   Assert(SrcVec == DstVec,
2590          "SIToFP source and dest must both be vector or scalar", &I);
2591   Assert(SrcTy->isIntOrIntVectorTy(),
2592          "SIToFP source must be integer or integer vector", &I);
2593   Assert(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
2594          &I);
2595 
2596   if (SrcVec && DstVec)
2597     Assert(cast<VectorType>(SrcTy)->getNumElements() ==
2598                cast<VectorType>(DestTy)->getNumElements(),
2599            "SIToFP source and dest vector length mismatch", &I);
2600 
2601   visitInstruction(I);
2602 }
2603 
2604 void Verifier::visitFPToUIInst(FPToUIInst &I) {
2605   // Get the source and destination types
2606   Type *SrcTy = I.getOperand(0)->getType();
2607   Type *DestTy = I.getType();
2608 
2609   bool SrcVec = SrcTy->isVectorTy();
2610   bool DstVec = DestTy->isVectorTy();
2611 
2612   Assert(SrcVec == DstVec,
2613          "FPToUI source and dest must both be vector or scalar", &I);
2614   Assert(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector",
2615          &I);
2616   Assert(DestTy->isIntOrIntVectorTy(),
2617          "FPToUI result must be integer or integer vector", &I);
2618 
2619   if (SrcVec && DstVec)
2620     Assert(cast<VectorType>(SrcTy)->getNumElements() ==
2621                cast<VectorType>(DestTy)->getNumElements(),
2622            "FPToUI source and dest vector length mismatch", &I);
2623 
2624   visitInstruction(I);
2625 }
2626 
2627 void Verifier::visitFPToSIInst(FPToSIInst &I) {
2628   // Get the source and destination types
2629   Type *SrcTy = I.getOperand(0)->getType();
2630   Type *DestTy = I.getType();
2631 
2632   bool SrcVec = SrcTy->isVectorTy();
2633   bool DstVec = DestTy->isVectorTy();
2634 
2635   Assert(SrcVec == DstVec,
2636          "FPToSI source and dest must both be vector or scalar", &I);
2637   Assert(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector",
2638          &I);
2639   Assert(DestTy->isIntOrIntVectorTy(),
2640          "FPToSI result must be integer or integer vector", &I);
2641 
2642   if (SrcVec && DstVec)
2643     Assert(cast<VectorType>(SrcTy)->getNumElements() ==
2644                cast<VectorType>(DestTy)->getNumElements(),
2645            "FPToSI source and dest vector length mismatch", &I);
2646 
2647   visitInstruction(I);
2648 }
2649 
2650 void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
2651   // Get the source and destination types
2652   Type *SrcTy = I.getOperand(0)->getType();
2653   Type *DestTy = I.getType();
2654 
2655   Assert(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
2656 
2657   if (auto *PTy = dyn_cast<PointerType>(SrcTy->getScalarType()))
2658     Assert(!DL.isNonIntegralPointerType(PTy),
2659            "ptrtoint not supported for non-integral pointers");
2660 
2661   Assert(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
2662   Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
2663          &I);
2664 
2665   if (SrcTy->isVectorTy()) {
2666     VectorType *VSrc = dyn_cast<VectorType>(SrcTy);
2667     VectorType *VDest = dyn_cast<VectorType>(DestTy);
2668     Assert(VSrc->getNumElements() == VDest->getNumElements(),
2669            "PtrToInt Vector width mismatch", &I);
2670   }
2671 
2672   visitInstruction(I);
2673 }
2674 
2675 void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
2676   // Get the source and destination types
2677   Type *SrcTy = I.getOperand(0)->getType();
2678   Type *DestTy = I.getType();
2679 
2680   Assert(SrcTy->isIntOrIntVectorTy(),
2681          "IntToPtr source must be an integral", &I);
2682   Assert(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
2683 
2684   if (auto *PTy = dyn_cast<PointerType>(DestTy->getScalarType()))
2685     Assert(!DL.isNonIntegralPointerType(PTy),
2686            "inttoptr not supported for non-integral pointers");
2687 
2688   Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
2689          &I);
2690   if (SrcTy->isVectorTy()) {
2691     VectorType *VSrc = dyn_cast<VectorType>(SrcTy);
2692     VectorType *VDest = dyn_cast<VectorType>(DestTy);
2693     Assert(VSrc->getNumElements() == VDest->getNumElements(),
2694            "IntToPtr Vector width mismatch", &I);
2695   }
2696   visitInstruction(I);
2697 }
2698 
2699 void Verifier::visitBitCastInst(BitCastInst &I) {
2700   Assert(
2701       CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
2702       "Invalid bitcast", &I);
2703   visitInstruction(I);
2704 }
2705 
2706 void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
2707   Type *SrcTy = I.getOperand(0)->getType();
2708   Type *DestTy = I.getType();
2709 
2710   Assert(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
2711          &I);
2712   Assert(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
2713          &I);
2714   Assert(SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace(),
2715          "AddrSpaceCast must be between different address spaces", &I);
2716   if (SrcTy->isVectorTy())
2717     Assert(SrcTy->getVectorNumElements() == DestTy->getVectorNumElements(),
2718            "AddrSpaceCast vector pointer number of elements mismatch", &I);
2719   visitInstruction(I);
2720 }
2721 
2722 /// visitPHINode - Ensure that a PHI node is well formed.
2723 ///
2724 void Verifier::visitPHINode(PHINode &PN) {
2725   // Ensure that the PHI nodes are all grouped together at the top of the block.
2726   // This can be tested by checking whether the instruction before this is
2727   // either nonexistent (because this is begin()) or is a PHI node.  If not,
2728   // then there is some other instruction before a PHI.
2729   Assert(&PN == &PN.getParent()->front() ||
2730              isa<PHINode>(--BasicBlock::iterator(&PN)),
2731          "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
2732 
2733   // Check that a PHI doesn't yield a Token.
2734   Assert(!PN.getType()->isTokenTy(), "PHI nodes cannot have token type!");
2735 
2736   // Check that all of the values of the PHI node have the same type as the
2737   // result, and that the incoming blocks are really basic blocks.
2738   for (Value *IncValue : PN.incoming_values()) {
2739     Assert(PN.getType() == IncValue->getType(),
2740            "PHI node operands are not the same type as the result!", &PN);
2741   }
2742 
2743   // All other PHI node constraints are checked in the visitBasicBlock method.
2744 
2745   visitInstruction(PN);
2746 }
2747 
2748 void Verifier::verifyCallSite(CallSite CS) {
2749   Instruction *I = CS.getInstruction();
2750 
2751   Assert(CS.getCalledValue()->getType()->isPointerTy(),
2752          "Called function must be a pointer!", I);
2753   PointerType *FPTy = cast<PointerType>(CS.getCalledValue()->getType());
2754 
2755   Assert(FPTy->getElementType()->isFunctionTy(),
2756          "Called function is not pointer to function type!", I);
2757 
2758   Assert(FPTy->getElementType() == CS.getFunctionType(),
2759          "Called function is not the same type as the call!", I);
2760 
2761   FunctionType *FTy = CS.getFunctionType();
2762 
2763   // Verify that the correct number of arguments are being passed
2764   if (FTy->isVarArg())
2765     Assert(CS.arg_size() >= FTy->getNumParams(),
2766            "Called function requires more parameters than were provided!", I);
2767   else
2768     Assert(CS.arg_size() == FTy->getNumParams(),
2769            "Incorrect number of arguments passed to called function!", I);
2770 
2771   // Verify that all arguments to the call match the function type.
2772   for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
2773     Assert(CS.getArgument(i)->getType() == FTy->getParamType(i),
2774            "Call parameter type does not match function signature!",
2775            CS.getArgument(i), FTy->getParamType(i), I);
2776 
2777   AttributeList Attrs = CS.getAttributes();
2778 
2779   Assert(verifyAttributeCount(Attrs, CS.arg_size()),
2780          "Attribute after last parameter!", I);
2781 
2782   if (Attrs.hasAttribute(AttributeList::FunctionIndex, Attribute::Speculatable)) {
2783     // Don't allow speculatable on call sites, unless the underlying function
2784     // declaration is also speculatable.
2785     Function *Callee
2786       = dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
2787     Assert(Callee && Callee->isSpeculatable(),
2788            "speculatable attribute may not apply to call sites", I);
2789   }
2790 
2791   // Verify call attributes.
2792   verifyFunctionAttrs(FTy, Attrs, I);
2793 
2794   // Conservatively check the inalloca argument.
2795   // We have a bug if we can find that there is an underlying alloca without
2796   // inalloca.
2797   if (CS.hasInAllocaArgument()) {
2798     Value *InAllocaArg = CS.getArgument(FTy->getNumParams() - 1);
2799     if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
2800       Assert(AI->isUsedWithInAlloca(),
2801              "inalloca argument for call has mismatched alloca", AI, I);
2802   }
2803 
2804   // For each argument of the callsite, if it has the swifterror argument,
2805   // make sure the underlying alloca/parameter it comes from has a swifterror as
2806   // well.
2807   for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
2808     if (CS.paramHasAttr(i, Attribute::SwiftError)) {
2809       Value *SwiftErrorArg = CS.getArgument(i);
2810       if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
2811         Assert(AI->isSwiftError(),
2812                "swifterror argument for call has mismatched alloca", AI, I);
2813         continue;
2814       }
2815       auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
2816       Assert(ArgI, "swifterror argument should come from an alloca or parameter", SwiftErrorArg, I);
2817       Assert(ArgI->hasSwiftErrorAttr(),
2818              "swifterror argument for call has mismatched parameter", ArgI, I);
2819     }
2820 
2821   if (FTy->isVarArg()) {
2822     // FIXME? is 'nest' even legal here?
2823     bool SawNest = false;
2824     bool SawReturned = false;
2825 
2826     for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
2827       if (Attrs.hasParamAttribute(Idx, Attribute::Nest))
2828         SawNest = true;
2829       if (Attrs.hasParamAttribute(Idx, Attribute::Returned))
2830         SawReturned = true;
2831     }
2832 
2833     // Check attributes on the varargs part.
2834     for (unsigned Idx = FTy->getNumParams(); Idx < CS.arg_size(); ++Idx) {
2835       Type *Ty = CS.getArgument(Idx)->getType();
2836       AttributeSet ArgAttrs = Attrs.getParamAttributes(Idx);
2837       verifyParameterAttrs(ArgAttrs, Ty, I);
2838 
2839       if (ArgAttrs.hasAttribute(Attribute::Nest)) {
2840         Assert(!SawNest, "More than one parameter has attribute nest!", I);
2841         SawNest = true;
2842       }
2843 
2844       if (ArgAttrs.hasAttribute(Attribute::Returned)) {
2845         Assert(!SawReturned, "More than one parameter has attribute returned!",
2846                I);
2847         Assert(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
2848                "Incompatible argument and return types for 'returned' "
2849                "attribute",
2850                I);
2851         SawReturned = true;
2852       }
2853 
2854       // Statepoint intrinsic is vararg but the wrapped function may be not.
2855       // Allow sret here and check the wrapped function in verifyStatepoint.
2856       if (CS.getCalledFunction() == nullptr ||
2857           CS.getCalledFunction()->getIntrinsicID() !=
2858             Intrinsic::experimental_gc_statepoint)
2859         Assert(!ArgAttrs.hasAttribute(Attribute::StructRet),
2860                "Attribute 'sret' cannot be used for vararg call arguments!", I);
2861 
2862       if (ArgAttrs.hasAttribute(Attribute::InAlloca))
2863         Assert(Idx == CS.arg_size() - 1, "inalloca isn't on the last argument!",
2864                I);
2865     }
2866   }
2867 
2868   // Verify that there's no metadata unless it's a direct call to an intrinsic.
2869   if (CS.getCalledFunction() == nullptr ||
2870       !CS.getCalledFunction()->getName().startswith("llvm.")) {
2871     for (Type *ParamTy : FTy->params()) {
2872       Assert(!ParamTy->isMetadataTy(),
2873              "Function has metadata parameter but isn't an intrinsic", I);
2874       Assert(!ParamTy->isTokenTy(),
2875              "Function has token parameter but isn't an intrinsic", I);
2876     }
2877   }
2878 
2879   // Verify that indirect calls don't return tokens.
2880   if (CS.getCalledFunction() == nullptr)
2881     Assert(!FTy->getReturnType()->isTokenTy(),
2882            "Return type cannot be token for indirect call!");
2883 
2884   if (Function *F = CS.getCalledFunction())
2885     if (Intrinsic::ID ID = (Intrinsic::ID)F->getIntrinsicID())
2886       visitIntrinsicCallSite(ID, CS);
2887 
2888   // Verify that a callsite has at most one "deopt", at most one "funclet" and
2889   // at most one "gc-transition" operand bundle.
2890   bool FoundDeoptBundle = false, FoundFuncletBundle = false,
2891        FoundGCTransitionBundle = false;
2892   for (unsigned i = 0, e = CS.getNumOperandBundles(); i < e; ++i) {
2893     OperandBundleUse BU = CS.getOperandBundleAt(i);
2894     uint32_t Tag = BU.getTagID();
2895     if (Tag == LLVMContext::OB_deopt) {
2896       Assert(!FoundDeoptBundle, "Multiple deopt operand bundles", I);
2897       FoundDeoptBundle = true;
2898     } else if (Tag == LLVMContext::OB_gc_transition) {
2899       Assert(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
2900              I);
2901       FoundGCTransitionBundle = true;
2902     } else if (Tag == LLVMContext::OB_funclet) {
2903       Assert(!FoundFuncletBundle, "Multiple funclet operand bundles", I);
2904       FoundFuncletBundle = true;
2905       Assert(BU.Inputs.size() == 1,
2906              "Expected exactly one funclet bundle operand", I);
2907       Assert(isa<FuncletPadInst>(BU.Inputs.front()),
2908              "Funclet bundle operands should correspond to a FuncletPadInst",
2909              I);
2910     }
2911   }
2912 
2913   // Verify that each inlinable callsite of a debug-info-bearing function in a
2914   // debug-info-bearing function has a debug location attached to it. Failure to
2915   // do so causes assertion failures when the inliner sets up inline scope info.
2916   if (I->getFunction()->getSubprogram() && CS.getCalledFunction() &&
2917       CS.getCalledFunction()->getSubprogram())
2918     AssertDI(I->getDebugLoc(), "inlinable function call in a function with "
2919                                "debug info must have a !dbg location",
2920              I);
2921 
2922   visitInstruction(*I);
2923 }
2924 
2925 /// Two types are "congruent" if they are identical, or if they are both pointer
2926 /// types with different pointee types and the same address space.
2927 static bool isTypeCongruent(Type *L, Type *R) {
2928   if (L == R)
2929     return true;
2930   PointerType *PL = dyn_cast<PointerType>(L);
2931   PointerType *PR = dyn_cast<PointerType>(R);
2932   if (!PL || !PR)
2933     return false;
2934   return PL->getAddressSpace() == PR->getAddressSpace();
2935 }
2936 
2937 static AttrBuilder getParameterABIAttributes(int I, AttributeList Attrs) {
2938   static const Attribute::AttrKind ABIAttrs[] = {
2939       Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
2940       Attribute::InReg, Attribute::Returned, Attribute::SwiftSelf,
2941       Attribute::SwiftError};
2942   AttrBuilder Copy;
2943   for (auto AK : ABIAttrs) {
2944     if (Attrs.hasParamAttribute(I, AK))
2945       Copy.addAttribute(AK);
2946   }
2947   if (Attrs.hasParamAttribute(I, Attribute::Alignment))
2948     Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
2949   return Copy;
2950 }
2951 
2952 void Verifier::verifyMustTailCall(CallInst &CI) {
2953   Assert(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
2954 
2955   // - The caller and callee prototypes must match.  Pointer types of
2956   //   parameters or return types may differ in pointee type, but not
2957   //   address space.
2958   Function *F = CI.getParent()->getParent();
2959   FunctionType *CallerTy = F->getFunctionType();
2960   FunctionType *CalleeTy = CI.getFunctionType();
2961   if (!CI.getCalledFunction() || !CI.getCalledFunction()->isIntrinsic()) {
2962     Assert(CallerTy->getNumParams() == CalleeTy->getNumParams(),
2963            "cannot guarantee tail call due to mismatched parameter counts",
2964            &CI);
2965     for (int I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
2966       Assert(
2967           isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
2968           "cannot guarantee tail call due to mismatched parameter types", &CI);
2969     }
2970   }
2971   Assert(CallerTy->isVarArg() == CalleeTy->isVarArg(),
2972          "cannot guarantee tail call due to mismatched varargs", &CI);
2973   Assert(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
2974          "cannot guarantee tail call due to mismatched return types", &CI);
2975 
2976   // - The calling conventions of the caller and callee must match.
2977   Assert(F->getCallingConv() == CI.getCallingConv(),
2978          "cannot guarantee tail call due to mismatched calling conv", &CI);
2979 
2980   // - All ABI-impacting function attributes, such as sret, byval, inreg,
2981   //   returned, and inalloca, must match.
2982   AttributeList CallerAttrs = F->getAttributes();
2983   AttributeList CalleeAttrs = CI.getAttributes();
2984   for (int I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
2985     AttrBuilder CallerABIAttrs = getParameterABIAttributes(I, CallerAttrs);
2986     AttrBuilder CalleeABIAttrs = getParameterABIAttributes(I, CalleeAttrs);
2987     Assert(CallerABIAttrs == CalleeABIAttrs,
2988            "cannot guarantee tail call due to mismatched ABI impacting "
2989            "function attributes",
2990            &CI, CI.getOperand(I));
2991   }
2992 
2993   // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
2994   //   or a pointer bitcast followed by a ret instruction.
2995   // - The ret instruction must return the (possibly bitcasted) value
2996   //   produced by the call or void.
2997   Value *RetVal = &CI;
2998   Instruction *Next = CI.getNextNode();
2999 
3000   // Handle the optional bitcast.
3001   if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
3002     Assert(BI->getOperand(0) == RetVal,
3003            "bitcast following musttail call must use the call", BI);
3004     RetVal = BI;
3005     Next = BI->getNextNode();
3006   }
3007 
3008   // Check the return.
3009   ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
3010   Assert(Ret, "musttail call must precede a ret with an optional bitcast",
3011          &CI);
3012   Assert(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal,
3013          "musttail call result must be returned", Ret);
3014 }
3015 
3016 void Verifier::visitCallInst(CallInst &CI) {
3017   verifyCallSite(&CI);
3018 
3019   if (CI.isMustTailCall())
3020     verifyMustTailCall(CI);
3021 }
3022 
3023 void Verifier::visitInvokeInst(InvokeInst &II) {
3024   verifyCallSite(&II);
3025 
3026   // Verify that the first non-PHI instruction of the unwind destination is an
3027   // exception handling instruction.
3028   Assert(
3029       II.getUnwindDest()->isEHPad(),
3030       "The unwind destination does not have an exception handling instruction!",
3031       &II);
3032 
3033   visitTerminator(II);
3034 }
3035 
3036 /// visitUnaryOperator - Check the argument to the unary operator.
3037 ///
3038 void Verifier::visitUnaryOperator(UnaryOperator &U) {
3039   Assert(U.getType() == U.getOperand(0)->getType(),
3040          "Unary operators must have same type for"
3041          "operands and result!",
3042          &U);
3043 
3044   switch (U.getOpcode()) {
3045   // Check that floating-point arithmetic operators are only used with
3046   // floating-point operands.
3047   case Instruction::FNeg:
3048     Assert(U.getType()->isFPOrFPVectorTy(),
3049            "FNeg operator only works with float types!", &U);
3050     break;
3051   default:
3052     llvm_unreachable("Unknown UnaryOperator opcode!");
3053   }
3054 
3055   visitInstruction(U);
3056 }
3057 
3058 /// visitBinaryOperator - Check that both arguments to the binary operator are
3059 /// of the same type!
3060 ///
3061 void Verifier::visitBinaryOperator(BinaryOperator &B) {
3062   Assert(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
3063          "Both operands to a binary operator are not of the same type!", &B);
3064 
3065   switch (B.getOpcode()) {
3066   // Check that integer arithmetic operators are only used with
3067   // integral operands.
3068   case Instruction::Add:
3069   case Instruction::Sub:
3070   case Instruction::Mul:
3071   case Instruction::SDiv:
3072   case Instruction::UDiv:
3073   case Instruction::SRem:
3074   case Instruction::URem:
3075     Assert(B.getType()->isIntOrIntVectorTy(),
3076            "Integer arithmetic operators only work with integral types!", &B);
3077     Assert(B.getType() == B.getOperand(0)->getType(),
3078            "Integer arithmetic operators must have same type "
3079            "for operands and result!",
3080            &B);
3081     break;
3082   // Check that floating-point arithmetic operators are only used with
3083   // floating-point operands.
3084   case Instruction::FAdd:
3085   case Instruction::FSub:
3086   case Instruction::FMul:
3087   case Instruction::FDiv:
3088   case Instruction::FRem:
3089     Assert(B.getType()->isFPOrFPVectorTy(),
3090            "Floating-point arithmetic operators only work with "
3091            "floating-point types!",
3092            &B);
3093     Assert(B.getType() == B.getOperand(0)->getType(),
3094            "Floating-point arithmetic operators must have same type "
3095            "for operands and result!",
3096            &B);
3097     break;
3098   // Check that logical operators are only used with integral operands.
3099   case Instruction::And:
3100   case Instruction::Or:
3101   case Instruction::Xor:
3102     Assert(B.getType()->isIntOrIntVectorTy(),
3103            "Logical operators only work with integral types!", &B);
3104     Assert(B.getType() == B.getOperand(0)->getType(),
3105            "Logical operators must have same type for operands and result!",
3106            &B);
3107     break;
3108   case Instruction::Shl:
3109   case Instruction::LShr:
3110   case Instruction::AShr:
3111     Assert(B.getType()->isIntOrIntVectorTy(),
3112            "Shifts only work with integral types!", &B);
3113     Assert(B.getType() == B.getOperand(0)->getType(),
3114            "Shift return type must be same as operands!", &B);
3115     break;
3116   default:
3117     llvm_unreachable("Unknown BinaryOperator opcode!");
3118   }
3119 
3120   visitInstruction(B);
3121 }
3122 
3123 void Verifier::visitICmpInst(ICmpInst &IC) {
3124   // Check that the operands are the same type
3125   Type *Op0Ty = IC.getOperand(0)->getType();
3126   Type *Op1Ty = IC.getOperand(1)->getType();
3127   Assert(Op0Ty == Op1Ty,
3128          "Both operands to ICmp instruction are not of the same type!", &IC);
3129   // Check that the operands are the right type
3130   Assert(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
3131          "Invalid operand types for ICmp instruction", &IC);
3132   // Check that the predicate is valid.
3133   Assert(IC.isIntPredicate(),
3134          "Invalid predicate in ICmp instruction!", &IC);
3135 
3136   visitInstruction(IC);
3137 }
3138 
3139 void Verifier::visitFCmpInst(FCmpInst &FC) {
3140   // Check that the operands are the same type
3141   Type *Op0Ty = FC.getOperand(0)->getType();
3142   Type *Op1Ty = FC.getOperand(1)->getType();
3143   Assert(Op0Ty == Op1Ty,
3144          "Both operands to FCmp instruction are not of the same type!", &FC);
3145   // Check that the operands are the right type
3146   Assert(Op0Ty->isFPOrFPVectorTy(),
3147          "Invalid operand types for FCmp instruction", &FC);
3148   // Check that the predicate is valid.
3149   Assert(FC.isFPPredicate(),
3150          "Invalid predicate in FCmp instruction!", &FC);
3151 
3152   visitInstruction(FC);
3153 }
3154 
3155 void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
3156   Assert(
3157       ExtractElementInst::isValidOperands(EI.getOperand(0), EI.getOperand(1)),
3158       "Invalid extractelement operands!", &EI);
3159   visitInstruction(EI);
3160 }
3161 
3162 void Verifier::visitInsertElementInst(InsertElementInst &IE) {
3163   Assert(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1),
3164                                             IE.getOperand(2)),
3165          "Invalid insertelement operands!", &IE);
3166   visitInstruction(IE);
3167 }
3168 
3169 void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
3170   Assert(ShuffleVectorInst::isValidOperands(SV.getOperand(0), SV.getOperand(1),
3171                                             SV.getOperand(2)),
3172          "Invalid shufflevector operands!", &SV);
3173   visitInstruction(SV);
3174 }
3175 
3176 void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
3177   Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
3178 
3179   Assert(isa<PointerType>(TargetTy),
3180          "GEP base pointer is not a vector or a vector of pointers", &GEP);
3181   Assert(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
3182 
3183   SmallVector<Value*, 16> Idxs(GEP.idx_begin(), GEP.idx_end());
3184   Assert(all_of(
3185       Idxs, [](Value* V) { return V->getType()->isIntOrIntVectorTy(); }),
3186       "GEP indexes must be integers", &GEP);
3187   Type *ElTy =
3188       GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
3189   Assert(ElTy, "Invalid indices for GEP pointer type!", &GEP);
3190 
3191   Assert(GEP.getType()->isPtrOrPtrVectorTy() &&
3192              GEP.getResultElementType() == ElTy,
3193          "GEP is not of right type for indices!", &GEP, ElTy);
3194 
3195   if (GEP.getType()->isVectorTy()) {
3196     // Additional checks for vector GEPs.
3197     unsigned GEPWidth = GEP.getType()->getVectorNumElements();
3198     if (GEP.getPointerOperandType()->isVectorTy())
3199       Assert(GEPWidth == GEP.getPointerOperandType()->getVectorNumElements(),
3200              "Vector GEP result width doesn't match operand's", &GEP);
3201     for (Value *Idx : Idxs) {
3202       Type *IndexTy = Idx->getType();
3203       if (IndexTy->isVectorTy()) {
3204         unsigned IndexWidth = IndexTy->getVectorNumElements();
3205         Assert(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
3206       }
3207       Assert(IndexTy->isIntOrIntVectorTy(),
3208              "All GEP indices should be of integer type");
3209     }
3210   }
3211 
3212   if (auto *PTy = dyn_cast<PointerType>(GEP.getType())) {
3213     Assert(GEP.getAddressSpace() == PTy->getAddressSpace(),
3214            "GEP address space doesn't match type", &GEP);
3215   }
3216 
3217   visitInstruction(GEP);
3218 }
3219 
3220 static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
3221   return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
3222 }
3223 
3224 void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
3225   assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
3226          "precondition violation");
3227 
3228   unsigned NumOperands = Range->getNumOperands();
3229   Assert(NumOperands % 2 == 0, "Unfinished range!", Range);
3230   unsigned NumRanges = NumOperands / 2;
3231   Assert(NumRanges >= 1, "It should have at least one range!", Range);
3232 
3233   ConstantRange LastRange(1); // Dummy initial value
3234   for (unsigned i = 0; i < NumRanges; ++i) {
3235     ConstantInt *Low =
3236         mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
3237     Assert(Low, "The lower limit must be an integer!", Low);
3238     ConstantInt *High =
3239         mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
3240     Assert(High, "The upper limit must be an integer!", High);
3241     Assert(High->getType() == Low->getType() && High->getType() == Ty,
3242            "Range types must match instruction type!", &I);
3243 
3244     APInt HighV = High->getValue();
3245     APInt LowV = Low->getValue();
3246     ConstantRange CurRange(LowV, HighV);
3247     Assert(!CurRange.isEmptySet() && !CurRange.isFullSet(),
3248            "Range must not be empty!", Range);
3249     if (i != 0) {
3250       Assert(CurRange.intersectWith(LastRange).isEmptySet(),
3251              "Intervals are overlapping", Range);
3252       Assert(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
3253              Range);
3254       Assert(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
3255              Range);
3256     }
3257     LastRange = ConstantRange(LowV, HighV);
3258   }
3259   if (NumRanges > 2) {
3260     APInt FirstLow =
3261         mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
3262     APInt FirstHigh =
3263         mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
3264     ConstantRange FirstRange(FirstLow, FirstHigh);
3265     Assert(FirstRange.intersectWith(LastRange).isEmptySet(),
3266            "Intervals are overlapping", Range);
3267     Assert(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
3268            Range);
3269   }
3270 }
3271 
3272 void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
3273   unsigned Size = DL.getTypeSizeInBits(Ty);
3274   Assert(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
3275   Assert(!(Size & (Size - 1)),
3276          "atomic memory access' operand must have a power-of-two size", Ty, I);
3277 }
3278 
3279 void Verifier::visitLoadInst(LoadInst &LI) {
3280   PointerType *PTy = dyn_cast<PointerType>(LI.getOperand(0)->getType());
3281   Assert(PTy, "Load operand must be a pointer.", &LI);
3282   Type *ElTy = LI.getType();
3283   Assert(LI.getAlignment() <= Value::MaximumAlignment,
3284          "huge alignment values are unsupported", &LI);
3285   Assert(ElTy->isSized(), "loading unsized types is not allowed", &LI);
3286   if (LI.isAtomic()) {
3287     Assert(LI.getOrdering() != AtomicOrdering::Release &&
3288                LI.getOrdering() != AtomicOrdering::AcquireRelease,
3289            "Load cannot have Release ordering", &LI);
3290     Assert(LI.getAlignment() != 0,
3291            "Atomic load must specify explicit alignment", &LI);
3292     Assert(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
3293            "atomic load operand must have integer, pointer, or floating point "
3294            "type!",
3295            ElTy, &LI);
3296     checkAtomicMemAccessSize(ElTy, &LI);
3297   } else {
3298     Assert(LI.getSyncScopeID() == SyncScope::System,
3299            "Non-atomic load cannot have SynchronizationScope specified", &LI);
3300   }
3301 
3302   visitInstruction(LI);
3303 }
3304 
3305 void Verifier::visitStoreInst(StoreInst &SI) {
3306   PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
3307   Assert(PTy, "Store operand must be a pointer.", &SI);
3308   Type *ElTy = PTy->getElementType();
3309   Assert(ElTy == SI.getOperand(0)->getType(),
3310          "Stored value type does not match pointer operand type!", &SI, ElTy);
3311   Assert(SI.getAlignment() <= Value::MaximumAlignment,
3312          "huge alignment values are unsupported", &SI);
3313   Assert(ElTy->isSized(), "storing unsized types is not allowed", &SI);
3314   if (SI.isAtomic()) {
3315     Assert(SI.getOrdering() != AtomicOrdering::Acquire &&
3316                SI.getOrdering() != AtomicOrdering::AcquireRelease,
3317            "Store cannot have Acquire ordering", &SI);
3318     Assert(SI.getAlignment() != 0,
3319            "Atomic store must specify explicit alignment", &SI);
3320     Assert(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
3321            "atomic store operand must have integer, pointer, or floating point "
3322            "type!",
3323            ElTy, &SI);
3324     checkAtomicMemAccessSize(ElTy, &SI);
3325   } else {
3326     Assert(SI.getSyncScopeID() == SyncScope::System,
3327            "Non-atomic store cannot have SynchronizationScope specified", &SI);
3328   }
3329   visitInstruction(SI);
3330 }
3331 
3332 /// Check that SwiftErrorVal is used as a swifterror argument in CS.
3333 void Verifier::verifySwiftErrorCallSite(CallSite CS,
3334                                         const Value *SwiftErrorVal) {
3335   unsigned Idx = 0;
3336   for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
3337        I != E; ++I, ++Idx) {
3338     if (*I == SwiftErrorVal) {
3339       Assert(CS.paramHasAttr(Idx, Attribute::SwiftError),
3340              "swifterror value when used in a callsite should be marked "
3341              "with swifterror attribute",
3342               SwiftErrorVal, CS);
3343     }
3344   }
3345 }
3346 
3347 void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
3348   // Check that swifterror value is only used by loads, stores, or as
3349   // a swifterror argument.
3350   for (const User *U : SwiftErrorVal->users()) {
3351     Assert(isa<LoadInst>(U) || isa<StoreInst>(U) || isa<CallInst>(U) ||
3352            isa<InvokeInst>(U),
3353            "swifterror value can only be loaded and stored from, or "
3354            "as a swifterror argument!",
3355            SwiftErrorVal, U);
3356     // If it is used by a store, check it is the second operand.
3357     if (auto StoreI = dyn_cast<StoreInst>(U))
3358       Assert(StoreI->getOperand(1) == SwiftErrorVal,
3359              "swifterror value should be the second operand when used "
3360              "by stores", SwiftErrorVal, U);
3361     if (auto CallI = dyn_cast<CallInst>(U))
3362       verifySwiftErrorCallSite(const_cast<CallInst*>(CallI), SwiftErrorVal);
3363     if (auto II = dyn_cast<InvokeInst>(U))
3364       verifySwiftErrorCallSite(const_cast<InvokeInst*>(II), SwiftErrorVal);
3365   }
3366 }
3367 
3368 void Verifier::visitAllocaInst(AllocaInst &AI) {
3369   SmallPtrSet<Type*, 4> Visited;
3370   PointerType *PTy = AI.getType();
3371   // TODO: Relax this restriction?
3372   Assert(PTy->getAddressSpace() == DL.getAllocaAddrSpace(),
3373          "Allocation instruction pointer not in the stack address space!",
3374          &AI);
3375   Assert(AI.getAllocatedType()->isSized(&Visited),
3376          "Cannot allocate unsized type", &AI);
3377   Assert(AI.getArraySize()->getType()->isIntegerTy(),
3378          "Alloca array size must have integer type", &AI);
3379   Assert(AI.getAlignment() <= Value::MaximumAlignment,
3380          "huge alignment values are unsupported", &AI);
3381 
3382   if (AI.isSwiftError()) {
3383     verifySwiftErrorValue(&AI);
3384   }
3385 
3386   visitInstruction(AI);
3387 }
3388 
3389 void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
3390 
3391   // FIXME: more conditions???
3392   Assert(CXI.getSuccessOrdering() != AtomicOrdering::NotAtomic,
3393          "cmpxchg instructions must be atomic.", &CXI);
3394   Assert(CXI.getFailureOrdering() != AtomicOrdering::NotAtomic,
3395          "cmpxchg instructions must be atomic.", &CXI);
3396   Assert(CXI.getSuccessOrdering() != AtomicOrdering::Unordered,
3397          "cmpxchg instructions cannot be unordered.", &CXI);
3398   Assert(CXI.getFailureOrdering() != AtomicOrdering::Unordered,
3399          "cmpxchg instructions cannot be unordered.", &CXI);
3400   Assert(!isStrongerThan(CXI.getFailureOrdering(), CXI.getSuccessOrdering()),
3401          "cmpxchg instructions failure argument shall be no stronger than the "
3402          "success argument",
3403          &CXI);
3404   Assert(CXI.getFailureOrdering() != AtomicOrdering::Release &&
3405              CXI.getFailureOrdering() != AtomicOrdering::AcquireRelease,
3406          "cmpxchg failure ordering cannot include release semantics", &CXI);
3407 
3408   PointerType *PTy = dyn_cast<PointerType>(CXI.getOperand(0)->getType());
3409   Assert(PTy, "First cmpxchg operand must be a pointer.", &CXI);
3410   Type *ElTy = PTy->getElementType();
3411   Assert(ElTy->isIntOrPtrTy(),
3412          "cmpxchg operand must have integer or pointer type", ElTy, &CXI);
3413   checkAtomicMemAccessSize(ElTy, &CXI);
3414   Assert(ElTy == CXI.getOperand(1)->getType(),
3415          "Expected value type does not match pointer operand type!", &CXI,
3416          ElTy);
3417   Assert(ElTy == CXI.getOperand(2)->getType(),
3418          "Stored value type does not match pointer operand type!", &CXI, ElTy);
3419   visitInstruction(CXI);
3420 }
3421 
3422 void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
3423   Assert(RMWI.getOrdering() != AtomicOrdering::NotAtomic,
3424          "atomicrmw instructions must be atomic.", &RMWI);
3425   Assert(RMWI.getOrdering() != AtomicOrdering::Unordered,
3426          "atomicrmw instructions cannot be unordered.", &RMWI);
3427   auto Op = RMWI.getOperation();
3428   PointerType *PTy = dyn_cast<PointerType>(RMWI.getOperand(0)->getType());
3429   Assert(PTy, "First atomicrmw operand must be a pointer.", &RMWI);
3430   Type *ElTy = PTy->getElementType();
3431   Assert(ElTy->isIntegerTy(), "atomicrmw " +
3432          AtomicRMWInst::getOperationName(Op) +
3433          " operand must have integer type!",
3434          &RMWI, ElTy);
3435   checkAtomicMemAccessSize(ElTy, &RMWI);
3436   Assert(ElTy == RMWI.getOperand(1)->getType(),
3437          "Argument value type does not match pointer operand type!", &RMWI,
3438          ElTy);
3439   Assert(AtomicRMWInst::FIRST_BINOP <= Op && Op <= AtomicRMWInst::LAST_BINOP,
3440          "Invalid binary operation!", &RMWI);
3441   visitInstruction(RMWI);
3442 }
3443 
3444 void Verifier::visitFenceInst(FenceInst &FI) {
3445   const AtomicOrdering Ordering = FI.getOrdering();
3446   Assert(Ordering == AtomicOrdering::Acquire ||
3447              Ordering == AtomicOrdering::Release ||
3448              Ordering == AtomicOrdering::AcquireRelease ||
3449              Ordering == AtomicOrdering::SequentiallyConsistent,
3450          "fence instructions may only have acquire, release, acq_rel, or "
3451          "seq_cst ordering.",
3452          &FI);
3453   visitInstruction(FI);
3454 }
3455 
3456 void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
3457   Assert(ExtractValueInst::getIndexedType(EVI.getAggregateOperand()->getType(),
3458                                           EVI.getIndices()) == EVI.getType(),
3459          "Invalid ExtractValueInst operands!", &EVI);
3460 
3461   visitInstruction(EVI);
3462 }
3463 
3464 void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
3465   Assert(ExtractValueInst::getIndexedType(IVI.getAggregateOperand()->getType(),
3466                                           IVI.getIndices()) ==
3467              IVI.getOperand(1)->getType(),
3468          "Invalid InsertValueInst operands!", &IVI);
3469 
3470   visitInstruction(IVI);
3471 }
3472 
3473 static Value *getParentPad(Value *EHPad) {
3474   if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
3475     return FPI->getParentPad();
3476 
3477   return cast<CatchSwitchInst>(EHPad)->getParentPad();
3478 }
3479 
3480 void Verifier::visitEHPadPredecessors(Instruction &I) {
3481   assert(I.isEHPad());
3482 
3483   BasicBlock *BB = I.getParent();
3484   Function *F = BB->getParent();
3485 
3486   Assert(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
3487 
3488   if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
3489     // The landingpad instruction defines its parent as a landing pad block. The
3490     // landing pad block may be branched to only by the unwind edge of an
3491     // invoke.
3492     for (BasicBlock *PredBB : predecessors(BB)) {
3493       const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
3494       Assert(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
3495              "Block containing LandingPadInst must be jumped to "
3496              "only by the unwind edge of an invoke.",
3497              LPI);
3498     }
3499     return;
3500   }
3501   if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
3502     if (!pred_empty(BB))
3503       Assert(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
3504              "Block containg CatchPadInst must be jumped to "
3505              "only by its catchswitch.",
3506              CPI);
3507     Assert(BB != CPI->getCatchSwitch()->getUnwindDest(),
3508            "Catchswitch cannot unwind to one of its catchpads",
3509            CPI->getCatchSwitch(), CPI);
3510     return;
3511   }
3512 
3513   // Verify that each pred has a legal terminator with a legal to/from EH
3514   // pad relationship.
3515   Instruction *ToPad = &I;
3516   Value *ToPadParent = getParentPad(ToPad);
3517   for (BasicBlock *PredBB : predecessors(BB)) {
3518     Instruction *TI = PredBB->getTerminator();
3519     Value *FromPad;
3520     if (auto *II = dyn_cast<InvokeInst>(TI)) {
3521       Assert(II->getUnwindDest() == BB && II->getNormalDest() != BB,
3522              "EH pad must be jumped to via an unwind edge", ToPad, II);
3523       if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
3524         FromPad = Bundle->Inputs[0];
3525       else
3526         FromPad = ConstantTokenNone::get(II->getContext());
3527     } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
3528       FromPad = CRI->getOperand(0);
3529       Assert(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
3530     } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
3531       FromPad = CSI;
3532     } else {
3533       Assert(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
3534     }
3535 
3536     // The edge may exit from zero or more nested pads.
3537     SmallSet<Value *, 8> Seen;
3538     for (;; FromPad = getParentPad(FromPad)) {
3539       Assert(FromPad != ToPad,
3540              "EH pad cannot handle exceptions raised within it", FromPad, TI);
3541       if (FromPad == ToPadParent) {
3542         // This is a legal unwind edge.
3543         break;
3544       }
3545       Assert(!isa<ConstantTokenNone>(FromPad),
3546              "A single unwind edge may only enter one EH pad", TI);
3547       Assert(Seen.insert(FromPad).second,
3548              "EH pad jumps through a cycle of pads", FromPad);
3549     }
3550   }
3551 }
3552 
3553 void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
3554   // The landingpad instruction is ill-formed if it doesn't have any clauses and
3555   // isn't a cleanup.
3556   Assert(LPI.getNumClauses() > 0 || LPI.isCleanup(),
3557          "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
3558 
3559   visitEHPadPredecessors(LPI);
3560 
3561   if (!LandingPadResultTy)
3562     LandingPadResultTy = LPI.getType();
3563   else
3564     Assert(LandingPadResultTy == LPI.getType(),
3565            "The landingpad instruction should have a consistent result type "
3566            "inside a function.",
3567            &LPI);
3568 
3569   Function *F = LPI.getParent()->getParent();
3570   Assert(F->hasPersonalityFn(),
3571          "LandingPadInst needs to be in a function with a personality.", &LPI);
3572 
3573   // The landingpad instruction must be the first non-PHI instruction in the
3574   // block.
3575   Assert(LPI.getParent()->getLandingPadInst() == &LPI,
3576          "LandingPadInst not the first non-PHI instruction in the block.",
3577          &LPI);
3578 
3579   for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
3580     Constant *Clause = LPI.getClause(i);
3581     if (LPI.isCatch(i)) {
3582       Assert(isa<PointerType>(Clause->getType()),
3583              "Catch operand does not have pointer type!", &LPI);
3584     } else {
3585       Assert(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
3586       Assert(isa<ConstantArray>(Clause) || isa<ConstantAggregateZero>(Clause),
3587              "Filter operand is not an array of constants!", &LPI);
3588     }
3589   }
3590 
3591   visitInstruction(LPI);
3592 }
3593 
3594 void Verifier::visitResumeInst(ResumeInst &RI) {
3595   Assert(RI.getFunction()->hasPersonalityFn(),
3596          "ResumeInst needs to be in a function with a personality.", &RI);
3597 
3598   if (!LandingPadResultTy)
3599     LandingPadResultTy = RI.getValue()->getType();
3600   else
3601     Assert(LandingPadResultTy == RI.getValue()->getType(),
3602            "The resume instruction should have a consistent result type "
3603            "inside a function.",
3604            &RI);
3605 
3606   visitTerminator(RI);
3607 }
3608 
3609 void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
3610   BasicBlock *BB = CPI.getParent();
3611 
3612   Function *F = BB->getParent();
3613   Assert(F->hasPersonalityFn(),
3614          "CatchPadInst needs to be in a function with a personality.", &CPI);
3615 
3616   Assert(isa<CatchSwitchInst>(CPI.getParentPad()),
3617          "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
3618          CPI.getParentPad());
3619 
3620   // The catchpad instruction must be the first non-PHI instruction in the
3621   // block.
3622   Assert(BB->getFirstNonPHI() == &CPI,
3623          "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
3624 
3625   visitEHPadPredecessors(CPI);
3626   visitFuncletPadInst(CPI);
3627 }
3628 
3629 void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
3630   Assert(isa<CatchPadInst>(CatchReturn.getOperand(0)),
3631          "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
3632          CatchReturn.getOperand(0));
3633 
3634   visitTerminator(CatchReturn);
3635 }
3636 
3637 void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
3638   BasicBlock *BB = CPI.getParent();
3639 
3640   Function *F = BB->getParent();
3641   Assert(F->hasPersonalityFn(),
3642          "CleanupPadInst needs to be in a function with a personality.", &CPI);
3643 
3644   // The cleanuppad instruction must be the first non-PHI instruction in the
3645   // block.
3646   Assert(BB->getFirstNonPHI() == &CPI,
3647          "CleanupPadInst not the first non-PHI instruction in the block.",
3648          &CPI);
3649 
3650   auto *ParentPad = CPI.getParentPad();
3651   Assert(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
3652          "CleanupPadInst has an invalid parent.", &CPI);
3653 
3654   visitEHPadPredecessors(CPI);
3655   visitFuncletPadInst(CPI);
3656 }
3657 
3658 void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
3659   User *FirstUser = nullptr;
3660   Value *FirstUnwindPad = nullptr;
3661   SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
3662   SmallSet<FuncletPadInst *, 8> Seen;
3663 
3664   while (!Worklist.empty()) {
3665     FuncletPadInst *CurrentPad = Worklist.pop_back_val();
3666     Assert(Seen.insert(CurrentPad).second,
3667            "FuncletPadInst must not be nested within itself", CurrentPad);
3668     Value *UnresolvedAncestorPad = nullptr;
3669     for (User *U : CurrentPad->users()) {
3670       BasicBlock *UnwindDest;
3671       if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
3672         UnwindDest = CRI->getUnwindDest();
3673       } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
3674         // We allow catchswitch unwind to caller to nest
3675         // within an outer pad that unwinds somewhere else,
3676         // because catchswitch doesn't have a nounwind variant.
3677         // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
3678         if (CSI->unwindsToCaller())
3679           continue;
3680         UnwindDest = CSI->getUnwindDest();
3681       } else if (auto *II = dyn_cast<InvokeInst>(U)) {
3682         UnwindDest = II->getUnwindDest();
3683       } else if (isa<CallInst>(U)) {
3684         // Calls which don't unwind may be found inside funclet
3685         // pads that unwind somewhere else.  We don't *require*
3686         // such calls to be annotated nounwind.
3687         continue;
3688       } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
3689         // The unwind dest for a cleanup can only be found by
3690         // recursive search.  Add it to the worklist, and we'll
3691         // search for its first use that determines where it unwinds.
3692         Worklist.push_back(CPI);
3693         continue;
3694       } else {
3695         Assert(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
3696         continue;
3697       }
3698 
3699       Value *UnwindPad;
3700       bool ExitsFPI;
3701       if (UnwindDest) {
3702         UnwindPad = UnwindDest->getFirstNonPHI();
3703         if (!cast<Instruction>(UnwindPad)->isEHPad())
3704           continue;
3705         Value *UnwindParent = getParentPad(UnwindPad);
3706         // Ignore unwind edges that don't exit CurrentPad.
3707         if (UnwindParent == CurrentPad)
3708           continue;
3709         // Determine whether the original funclet pad is exited,
3710         // and if we are scanning nested pads determine how many
3711         // of them are exited so we can stop searching their
3712         // children.
3713         Value *ExitedPad = CurrentPad;
3714         ExitsFPI = false;
3715         do {
3716           if (ExitedPad == &FPI) {
3717             ExitsFPI = true;
3718             // Now we can resolve any ancestors of CurrentPad up to
3719             // FPI, but not including FPI since we need to make sure
3720             // to check all direct users of FPI for consistency.
3721             UnresolvedAncestorPad = &FPI;
3722             break;
3723           }
3724           Value *ExitedParent = getParentPad(ExitedPad);
3725           if (ExitedParent == UnwindParent) {
3726             // ExitedPad is the ancestor-most pad which this unwind
3727             // edge exits, so we can resolve up to it, meaning that
3728             // ExitedParent is the first ancestor still unresolved.
3729             UnresolvedAncestorPad = ExitedParent;
3730             break;
3731           }
3732           ExitedPad = ExitedParent;
3733         } while (!isa<ConstantTokenNone>(ExitedPad));
3734       } else {
3735         // Unwinding to caller exits all pads.
3736         UnwindPad = ConstantTokenNone::get(FPI.getContext());
3737         ExitsFPI = true;
3738         UnresolvedAncestorPad = &FPI;
3739       }
3740 
3741       if (ExitsFPI) {
3742         // This unwind edge exits FPI.  Make sure it agrees with other
3743         // such edges.
3744         if (FirstUser) {
3745           Assert(UnwindPad == FirstUnwindPad, "Unwind edges out of a funclet "
3746                                               "pad must have the same unwind "
3747                                               "dest",
3748                  &FPI, U, FirstUser);
3749         } else {
3750           FirstUser = U;
3751           FirstUnwindPad = UnwindPad;
3752           // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
3753           if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
3754               getParentPad(UnwindPad) == getParentPad(&FPI))
3755             SiblingFuncletInfo[&FPI] = cast<Instruction>(U);
3756         }
3757       }
3758       // Make sure we visit all uses of FPI, but for nested pads stop as
3759       // soon as we know where they unwind to.
3760       if (CurrentPad != &FPI)
3761         break;
3762     }
3763     if (UnresolvedAncestorPad) {
3764       if (CurrentPad == UnresolvedAncestorPad) {
3765         // When CurrentPad is FPI itself, we don't mark it as resolved even if
3766         // we've found an unwind edge that exits it, because we need to verify
3767         // all direct uses of FPI.
3768         assert(CurrentPad == &FPI);
3769         continue;
3770       }
3771       // Pop off the worklist any nested pads that we've found an unwind
3772       // destination for.  The pads on the worklist are the uncles,
3773       // great-uncles, etc. of CurrentPad.  We've found an unwind destination
3774       // for all ancestors of CurrentPad up to but not including
3775       // UnresolvedAncestorPad.
3776       Value *ResolvedPad = CurrentPad;
3777       while (!Worklist.empty()) {
3778         Value *UnclePad = Worklist.back();
3779         Value *AncestorPad = getParentPad(UnclePad);
3780         // Walk ResolvedPad up the ancestor list until we either find the
3781         // uncle's parent or the last resolved ancestor.
3782         while (ResolvedPad != AncestorPad) {
3783           Value *ResolvedParent = getParentPad(ResolvedPad);
3784           if (ResolvedParent == UnresolvedAncestorPad) {
3785             break;
3786           }
3787           ResolvedPad = ResolvedParent;
3788         }
3789         // If the resolved ancestor search didn't find the uncle's parent,
3790         // then the uncle is not yet resolved.
3791         if (ResolvedPad != AncestorPad)
3792           break;
3793         // This uncle is resolved, so pop it from the worklist.
3794         Worklist.pop_back();
3795       }
3796     }
3797   }
3798 
3799   if (FirstUnwindPad) {
3800     if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
3801       BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
3802       Value *SwitchUnwindPad;
3803       if (SwitchUnwindDest)
3804         SwitchUnwindPad = SwitchUnwindDest->getFirstNonPHI();
3805       else
3806         SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
3807       Assert(SwitchUnwindPad == FirstUnwindPad,
3808              "Unwind edges out of a catch must have the same unwind dest as "
3809              "the parent catchswitch",
3810              &FPI, FirstUser, CatchSwitch);
3811     }
3812   }
3813 
3814   visitInstruction(FPI);
3815 }
3816 
3817 void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
3818   BasicBlock *BB = CatchSwitch.getParent();
3819 
3820   Function *F = BB->getParent();
3821   Assert(F->hasPersonalityFn(),
3822          "CatchSwitchInst needs to be in a function with a personality.",
3823          &CatchSwitch);
3824 
3825   // The catchswitch instruction must be the first non-PHI instruction in the
3826   // block.
3827   Assert(BB->getFirstNonPHI() == &CatchSwitch,
3828          "CatchSwitchInst not the first non-PHI instruction in the block.",
3829          &CatchSwitch);
3830 
3831   auto *ParentPad = CatchSwitch.getParentPad();
3832   Assert(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
3833          "CatchSwitchInst has an invalid parent.", ParentPad);
3834 
3835   if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
3836     Instruction *I = UnwindDest->getFirstNonPHI();
3837     Assert(I->isEHPad() && !isa<LandingPadInst>(I),
3838            "CatchSwitchInst must unwind to an EH block which is not a "
3839            "landingpad.",
3840            &CatchSwitch);
3841 
3842     // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
3843     if (getParentPad(I) == ParentPad)
3844       SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
3845   }
3846 
3847   Assert(CatchSwitch.getNumHandlers() != 0,
3848          "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
3849 
3850   for (BasicBlock *Handler : CatchSwitch.handlers()) {
3851     Assert(isa<CatchPadInst>(Handler->getFirstNonPHI()),
3852            "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
3853   }
3854 
3855   visitEHPadPredecessors(CatchSwitch);
3856   visitTerminator(CatchSwitch);
3857 }
3858 
3859 void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
3860   Assert(isa<CleanupPadInst>(CRI.getOperand(0)),
3861          "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
3862          CRI.getOperand(0));
3863 
3864   if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
3865     Instruction *I = UnwindDest->getFirstNonPHI();
3866     Assert(I->isEHPad() && !isa<LandingPadInst>(I),
3867            "CleanupReturnInst must unwind to an EH block which is not a "
3868            "landingpad.",
3869            &CRI);
3870   }
3871 
3872   visitTerminator(CRI);
3873 }
3874 
3875 void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
3876   Instruction *Op = cast<Instruction>(I.getOperand(i));
3877   // If the we have an invalid invoke, don't try to compute the dominance.
3878   // We already reject it in the invoke specific checks and the dominance
3879   // computation doesn't handle multiple edges.
3880   if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
3881     if (II->getNormalDest() == II->getUnwindDest())
3882       return;
3883   }
3884 
3885   // Quick check whether the def has already been encountered in the same block.
3886   // PHI nodes are not checked to prevent accepting preceeding PHIs, because PHI
3887   // uses are defined to happen on the incoming edge, not at the instruction.
3888   //
3889   // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
3890   // wrapping an SSA value, assert that we've already encountered it.  See
3891   // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
3892   if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
3893     return;
3894 
3895   const Use &U = I.getOperandUse(i);
3896   Assert(DT.dominates(Op, U),
3897          "Instruction does not dominate all uses!", Op, &I);
3898 }
3899 
3900 void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
3901   Assert(I.getType()->isPointerTy(), "dereferenceable, dereferenceable_or_null "
3902          "apply only to pointer types", &I);
3903   Assert(isa<LoadInst>(I),
3904          "dereferenceable, dereferenceable_or_null apply only to load"
3905          " instructions, use attributes for calls or invokes", &I);
3906   Assert(MD->getNumOperands() == 1, "dereferenceable, dereferenceable_or_null "
3907          "take one operand!", &I);
3908   ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
3909   Assert(CI && CI->getType()->isIntegerTy(64), "dereferenceable, "
3910          "dereferenceable_or_null metadata value must be an i64!", &I);
3911 }
3912 
3913 /// verifyInstruction - Verify that an instruction is well formed.
3914 ///
3915 void Verifier::visitInstruction(Instruction &I) {
3916   BasicBlock *BB = I.getParent();
3917   Assert(BB, "Instruction not embedded in basic block!", &I);
3918 
3919   if (!isa<PHINode>(I)) {   // Check that non-phi nodes are not self referential
3920     for (User *U : I.users()) {
3921       Assert(U != (User *)&I || !DT.isReachableFromEntry(BB),
3922              "Only PHI nodes may reference their own value!", &I);
3923     }
3924   }
3925 
3926   // Check that void typed values don't have names
3927   Assert(!I.getType()->isVoidTy() || !I.hasName(),
3928          "Instruction has a name, but provides a void value!", &I);
3929 
3930   // Check that the return value of the instruction is either void or a legal
3931   // value type.
3932   Assert(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
3933          "Instruction returns a non-scalar type!", &I);
3934 
3935   // Check that the instruction doesn't produce metadata. Calls are already
3936   // checked against the callee type.
3937   Assert(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
3938          "Invalid use of metadata!", &I);
3939 
3940   // Check that all uses of the instruction, if they are instructions
3941   // themselves, actually have parent basic blocks.  If the use is not an
3942   // instruction, it is an error!
3943   for (Use &U : I.uses()) {
3944     if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
3945       Assert(Used->getParent() != nullptr,
3946              "Instruction referencing"
3947              " instruction not embedded in a basic block!",
3948              &I, Used);
3949     else {
3950       CheckFailed("Use of instruction is not an instruction!", U);
3951       return;
3952     }
3953   }
3954 
3955   // Get a pointer to the call base of the instruction if it is some form of
3956   // call.
3957   const CallBase *CBI = dyn_cast<CallBase>(&I);
3958 
3959   for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
3960     Assert(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
3961 
3962     // Check to make sure that only first-class-values are operands to
3963     // instructions.
3964     if (!I.getOperand(i)->getType()->isFirstClassType()) {
3965       Assert(false, "Instruction operands must be first-class values!", &I);
3966     }
3967 
3968     if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
3969       // Check to make sure that the "address of" an intrinsic function is never
3970       // taken.
3971       Assert(!F->isIntrinsic() ||
3972                  (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)),
3973              "Cannot take the address of an intrinsic!", &I);
3974       Assert(
3975           !F->isIntrinsic() || isa<CallInst>(I) ||
3976               F->getIntrinsicID() == Intrinsic::donothing ||
3977               F->getIntrinsicID() == Intrinsic::coro_resume ||
3978               F->getIntrinsicID() == Intrinsic::coro_destroy ||
3979               F->getIntrinsicID() == Intrinsic::experimental_patchpoint_void ||
3980               F->getIntrinsicID() == Intrinsic::experimental_patchpoint_i64 ||
3981               F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint,
3982           "Cannot invoke an intrinsic other than donothing, patchpoint, "
3983           "statepoint, coro_resume or coro_destroy",
3984           &I);
3985       Assert(F->getParent() == &M, "Referencing function in another module!",
3986              &I, &M, F, F->getParent());
3987     } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
3988       Assert(OpBB->getParent() == BB->getParent(),
3989              "Referring to a basic block in another function!", &I);
3990     } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
3991       Assert(OpArg->getParent() == BB->getParent(),
3992              "Referring to an argument in another function!", &I);
3993     } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
3994       Assert(GV->getParent() == &M, "Referencing global in another module!", &I,
3995              &M, GV, GV->getParent());
3996     } else if (isa<Instruction>(I.getOperand(i))) {
3997       verifyDominatesUse(I, i);
3998     } else if (isa<InlineAsm>(I.getOperand(i))) {
3999       Assert(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
4000              "Cannot take the address of an inline asm!", &I);
4001     } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(I.getOperand(i))) {
4002       if (CE->getType()->isPtrOrPtrVectorTy() ||
4003           !DL.getNonIntegralAddressSpaces().empty()) {
4004         // If we have a ConstantExpr pointer, we need to see if it came from an
4005         // illegal bitcast.  If the datalayout string specifies non-integral
4006         // address spaces then we also need to check for illegal ptrtoint and
4007         // inttoptr expressions.
4008         visitConstantExprsRecursively(CE);
4009       }
4010     }
4011   }
4012 
4013   if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
4014     Assert(I.getType()->isFPOrFPVectorTy(),
4015            "fpmath requires a floating point result!", &I);
4016     Assert(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
4017     if (ConstantFP *CFP0 =
4018             mdconst::dyn_extract_or_null<ConstantFP>(MD->getOperand(0))) {
4019       const APFloat &Accuracy = CFP0->getValueAPF();
4020       Assert(&Accuracy.getSemantics() == &APFloat::IEEEsingle(),
4021              "fpmath accuracy must have float type", &I);
4022       Assert(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
4023              "fpmath accuracy not a positive number!", &I);
4024     } else {
4025       Assert(false, "invalid fpmath accuracy!", &I);
4026     }
4027   }
4028 
4029   if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
4030     Assert(isa<LoadInst>(I) || isa<CallInst>(I) || isa<InvokeInst>(I),
4031            "Ranges are only for loads, calls and invokes!", &I);
4032     visitRangeMetadata(I, Range, I.getType());
4033   }
4034 
4035   if (I.getMetadata(LLVMContext::MD_nonnull)) {
4036     Assert(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
4037            &I);
4038     Assert(isa<LoadInst>(I),
4039            "nonnull applies only to load instructions, use attributes"
4040            " for calls or invokes",
4041            &I);
4042   }
4043 
4044   if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable))
4045     visitDereferenceableMetadata(I, MD);
4046 
4047   if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null))
4048     visitDereferenceableMetadata(I, MD);
4049 
4050   if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa))
4051     TBAAVerifyHelper.visitTBAAMetadata(I, TBAA);
4052 
4053   if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
4054     Assert(I.getType()->isPointerTy(), "align applies only to pointer types",
4055            &I);
4056     Assert(isa<LoadInst>(I), "align applies only to load instructions, "
4057            "use attributes for calls or invokes", &I);
4058     Assert(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
4059     ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
4060     Assert(CI && CI->getType()->isIntegerTy(64),
4061            "align metadata value must be an i64!", &I);
4062     uint64_t Align = CI->getZExtValue();
4063     Assert(isPowerOf2_64(Align),
4064            "align metadata value must be a power of 2!", &I);
4065     Assert(Align <= Value::MaximumAlignment,
4066            "alignment is larger that implementation defined limit", &I);
4067   }
4068 
4069   if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
4070     AssertDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
4071     visitMDNode(*N);
4072   }
4073 
4074   if (auto *DII = dyn_cast<DbgVariableIntrinsic>(&I))
4075     verifyFragmentExpression(*DII);
4076 
4077   InstsInThisBlock.insert(&I);
4078 }
4079 
4080 /// Allow intrinsics to be verified in different ways.
4081 void Verifier::visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS) {
4082   Function *IF = CS.getCalledFunction();
4083   Assert(IF->isDeclaration(), "Intrinsic functions should never be defined!",
4084          IF);
4085 
4086   // Verify that the intrinsic prototype lines up with what the .td files
4087   // describe.
4088   FunctionType *IFTy = IF->getFunctionType();
4089   bool IsVarArg = IFTy->isVarArg();
4090 
4091   SmallVector<Intrinsic::IITDescriptor, 8> Table;
4092   getIntrinsicInfoTableEntries(ID, Table);
4093   ArrayRef<Intrinsic::IITDescriptor> TableRef = Table;
4094 
4095   SmallVector<Type *, 4> ArgTys;
4096   Assert(!Intrinsic::matchIntrinsicType(IFTy->getReturnType(),
4097                                         TableRef, ArgTys),
4098          "Intrinsic has incorrect return type!", IF);
4099   for (unsigned i = 0, e = IFTy->getNumParams(); i != e; ++i)
4100     Assert(!Intrinsic::matchIntrinsicType(IFTy->getParamType(i),
4101                                           TableRef, ArgTys),
4102            "Intrinsic has incorrect argument type!", IF);
4103 
4104   // Verify if the intrinsic call matches the vararg property.
4105   if (IsVarArg)
4106     Assert(!Intrinsic::matchIntrinsicVarArg(IsVarArg, TableRef),
4107            "Intrinsic was not defined with variable arguments!", IF);
4108   else
4109     Assert(!Intrinsic::matchIntrinsicVarArg(IsVarArg, TableRef),
4110            "Callsite was not defined with variable arguments!", IF);
4111 
4112   // All descriptors should be absorbed by now.
4113   Assert(TableRef.empty(), "Intrinsic has too few arguments!", IF);
4114 
4115   // Now that we have the intrinsic ID and the actual argument types (and we
4116   // know they are legal for the intrinsic!) get the intrinsic name through the
4117   // usual means.  This allows us to verify the mangling of argument types into
4118   // the name.
4119   const std::string ExpectedName = Intrinsic::getName(ID, ArgTys);
4120   Assert(ExpectedName == IF->getName(),
4121          "Intrinsic name not mangled correctly for type arguments! "
4122          "Should be: " +
4123              ExpectedName,
4124          IF);
4125 
4126   // If the intrinsic takes MDNode arguments, verify that they are either global
4127   // or are local to *this* function.
4128   for (Value *V : CS.args())
4129     if (auto *MD = dyn_cast<MetadataAsValue>(V))
4130       visitMetadataAsValue(*MD, CS.getCaller());
4131 
4132   switch (ID) {
4133   default:
4134     break;
4135   case Intrinsic::coro_id: {
4136     auto *InfoArg = CS.getArgOperand(3)->stripPointerCasts();
4137     if (isa<ConstantPointerNull>(InfoArg))
4138       break;
4139     auto *GV = dyn_cast<GlobalVariable>(InfoArg);
4140     Assert(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
4141       "info argument of llvm.coro.begin must refer to an initialized "
4142       "constant");
4143     Constant *Init = GV->getInitializer();
4144     Assert(isa<ConstantStruct>(Init) || isa<ConstantArray>(Init),
4145       "info argument of llvm.coro.begin must refer to either a struct or "
4146       "an array");
4147     break;
4148   }
4149   case Intrinsic::ctlz:  // llvm.ctlz
4150   case Intrinsic::cttz:  // llvm.cttz
4151     Assert(isa<ConstantInt>(CS.getArgOperand(1)),
4152            "is_zero_undef argument of bit counting intrinsics must be a "
4153            "constant int",
4154            CS);
4155     break;
4156   case Intrinsic::experimental_constrained_fadd:
4157   case Intrinsic::experimental_constrained_fsub:
4158   case Intrinsic::experimental_constrained_fmul:
4159   case Intrinsic::experimental_constrained_fdiv:
4160   case Intrinsic::experimental_constrained_frem:
4161   case Intrinsic::experimental_constrained_fma:
4162   case Intrinsic::experimental_constrained_sqrt:
4163   case Intrinsic::experimental_constrained_pow:
4164   case Intrinsic::experimental_constrained_powi:
4165   case Intrinsic::experimental_constrained_sin:
4166   case Intrinsic::experimental_constrained_cos:
4167   case Intrinsic::experimental_constrained_exp:
4168   case Intrinsic::experimental_constrained_exp2:
4169   case Intrinsic::experimental_constrained_log:
4170   case Intrinsic::experimental_constrained_log10:
4171   case Intrinsic::experimental_constrained_log2:
4172   case Intrinsic::experimental_constrained_rint:
4173   case Intrinsic::experimental_constrained_nearbyint:
4174   case Intrinsic::experimental_constrained_maxnum:
4175   case Intrinsic::experimental_constrained_minnum:
4176   case Intrinsic::experimental_constrained_ceil:
4177   case Intrinsic::experimental_constrained_floor:
4178   case Intrinsic::experimental_constrained_round:
4179   case Intrinsic::experimental_constrained_trunc:
4180     visitConstrainedFPIntrinsic(
4181         cast<ConstrainedFPIntrinsic>(*CS.getInstruction()));
4182     break;
4183   case Intrinsic::dbg_declare: // llvm.dbg.declare
4184     Assert(isa<MetadataAsValue>(CS.getArgOperand(0)),
4185            "invalid llvm.dbg.declare intrinsic call 1", CS);
4186     visitDbgIntrinsic("declare", cast<DbgVariableIntrinsic>(*CS.getInstruction()));
4187     break;
4188   case Intrinsic::dbg_addr: // llvm.dbg.addr
4189     visitDbgIntrinsic("addr", cast<DbgVariableIntrinsic>(*CS.getInstruction()));
4190     break;
4191   case Intrinsic::dbg_value: // llvm.dbg.value
4192     visitDbgIntrinsic("value", cast<DbgVariableIntrinsic>(*CS.getInstruction()));
4193     break;
4194   case Intrinsic::dbg_label: // llvm.dbg.label
4195     visitDbgLabelIntrinsic("label", cast<DbgLabelInst>(*CS.getInstruction()));
4196     break;
4197   case Intrinsic::memcpy:
4198   case Intrinsic::memmove:
4199   case Intrinsic::memset: {
4200     const auto *MI = cast<MemIntrinsic>(CS.getInstruction());
4201     auto IsValidAlignment = [&](unsigned Alignment) -> bool {
4202       return Alignment == 0 || isPowerOf2_32(Alignment);
4203     };
4204     Assert(IsValidAlignment(MI->getDestAlignment()),
4205            "alignment of arg 0 of memory intrinsic must be 0 or a power of 2",
4206            CS);
4207     if (const auto *MTI = dyn_cast<MemTransferInst>(MI)) {
4208       Assert(IsValidAlignment(MTI->getSourceAlignment()),
4209              "alignment of arg 1 of memory intrinsic must be 0 or a power of 2",
4210              CS);
4211     }
4212     Assert(isa<ConstantInt>(CS.getArgOperand(3)),
4213            "isvolatile argument of memory intrinsics must be a constant int",
4214            CS);
4215     break;
4216   }
4217   case Intrinsic::memcpy_element_unordered_atomic:
4218   case Intrinsic::memmove_element_unordered_atomic:
4219   case Intrinsic::memset_element_unordered_atomic: {
4220     const auto *AMI = cast<AtomicMemIntrinsic>(CS.getInstruction());
4221 
4222     ConstantInt *ElementSizeCI =
4223         dyn_cast<ConstantInt>(AMI->getRawElementSizeInBytes());
4224     Assert(ElementSizeCI,
4225            "element size of the element-wise unordered atomic memory "
4226            "intrinsic must be a constant int",
4227            CS);
4228     const APInt &ElementSizeVal = ElementSizeCI->getValue();
4229     Assert(ElementSizeVal.isPowerOf2(),
4230            "element size of the element-wise atomic memory intrinsic "
4231            "must be a power of 2",
4232            CS);
4233 
4234     if (auto *LengthCI = dyn_cast<ConstantInt>(AMI->getLength())) {
4235       uint64_t Length = LengthCI->getZExtValue();
4236       uint64_t ElementSize = AMI->getElementSizeInBytes();
4237       Assert((Length % ElementSize) == 0,
4238              "constant length must be a multiple of the element size in the "
4239              "element-wise atomic memory intrinsic",
4240              CS);
4241     }
4242 
4243     auto IsValidAlignment = [&](uint64_t Alignment) {
4244       return isPowerOf2_64(Alignment) && ElementSizeVal.ule(Alignment);
4245     };
4246     uint64_t DstAlignment = AMI->getDestAlignment();
4247     Assert(IsValidAlignment(DstAlignment),
4248            "incorrect alignment of the destination argument", CS);
4249     if (const auto *AMT = dyn_cast<AtomicMemTransferInst>(AMI)) {
4250       uint64_t SrcAlignment = AMT->getSourceAlignment();
4251       Assert(IsValidAlignment(SrcAlignment),
4252              "incorrect alignment of the source argument", CS);
4253     }
4254     break;
4255   }
4256   case Intrinsic::gcroot:
4257   case Intrinsic::gcwrite:
4258   case Intrinsic::gcread:
4259     if (ID == Intrinsic::gcroot) {
4260       AllocaInst *AI =
4261         dyn_cast<AllocaInst>(CS.getArgOperand(0)->stripPointerCasts());
4262       Assert(AI, "llvm.gcroot parameter #1 must be an alloca.", CS);
4263       Assert(isa<Constant>(CS.getArgOperand(1)),
4264              "llvm.gcroot parameter #2 must be a constant.", CS);
4265       if (!AI->getAllocatedType()->isPointerTy()) {
4266         Assert(!isa<ConstantPointerNull>(CS.getArgOperand(1)),
4267                "llvm.gcroot parameter #1 must either be a pointer alloca, "
4268                "or argument #2 must be a non-null constant.",
4269                CS);
4270       }
4271     }
4272 
4273     Assert(CS.getParent()->getParent()->hasGC(),
4274            "Enclosing function does not use GC.", CS);
4275     break;
4276   case Intrinsic::init_trampoline:
4277     Assert(isa<Function>(CS.getArgOperand(1)->stripPointerCasts()),
4278            "llvm.init_trampoline parameter #2 must resolve to a function.",
4279            CS);
4280     break;
4281   case Intrinsic::prefetch:
4282     Assert(isa<ConstantInt>(CS.getArgOperand(1)) &&
4283                isa<ConstantInt>(CS.getArgOperand(2)) &&
4284                cast<ConstantInt>(CS.getArgOperand(1))->getZExtValue() < 2 &&
4285                cast<ConstantInt>(CS.getArgOperand(2))->getZExtValue() < 4,
4286            "invalid arguments to llvm.prefetch", CS);
4287     break;
4288   case Intrinsic::stackprotector:
4289     Assert(isa<AllocaInst>(CS.getArgOperand(1)->stripPointerCasts()),
4290            "llvm.stackprotector parameter #2 must resolve to an alloca.", CS);
4291     break;
4292   case Intrinsic::lifetime_start:
4293   case Intrinsic::lifetime_end:
4294   case Intrinsic::invariant_start:
4295     Assert(isa<ConstantInt>(CS.getArgOperand(0)),
4296            "size argument of memory use markers must be a constant integer",
4297            CS);
4298     break;
4299   case Intrinsic::invariant_end:
4300     Assert(isa<ConstantInt>(CS.getArgOperand(1)),
4301            "llvm.invariant.end parameter #2 must be a constant integer", CS);
4302     break;
4303 
4304   case Intrinsic::localescape: {
4305     BasicBlock *BB = CS.getParent();
4306     Assert(BB == &BB->getParent()->front(),
4307            "llvm.localescape used outside of entry block", CS);
4308     Assert(!SawFrameEscape,
4309            "multiple calls to llvm.localescape in one function", CS);
4310     for (Value *Arg : CS.args()) {
4311       if (isa<ConstantPointerNull>(Arg))
4312         continue; // Null values are allowed as placeholders.
4313       auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
4314       Assert(AI && AI->isStaticAlloca(),
4315              "llvm.localescape only accepts static allocas", CS);
4316     }
4317     FrameEscapeInfo[BB->getParent()].first = CS.getNumArgOperands();
4318     SawFrameEscape = true;
4319     break;
4320   }
4321   case Intrinsic::localrecover: {
4322     Value *FnArg = CS.getArgOperand(0)->stripPointerCasts();
4323     Function *Fn = dyn_cast<Function>(FnArg);
4324     Assert(Fn && !Fn->isDeclaration(),
4325            "llvm.localrecover first "
4326            "argument must be function defined in this module",
4327            CS);
4328     auto *IdxArg = dyn_cast<ConstantInt>(CS.getArgOperand(2));
4329     Assert(IdxArg, "idx argument of llvm.localrecover must be a constant int",
4330            CS);
4331     auto &Entry = FrameEscapeInfo[Fn];
4332     Entry.second = unsigned(
4333         std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
4334     break;
4335   }
4336 
4337   case Intrinsic::experimental_gc_statepoint:
4338     Assert(!CS.isInlineAsm(),
4339            "gc.statepoint support for inline assembly unimplemented", CS);
4340     Assert(CS.getParent()->getParent()->hasGC(),
4341            "Enclosing function does not use GC.", CS);
4342 
4343     verifyStatepoint(CS);
4344     break;
4345   case Intrinsic::experimental_gc_result: {
4346     Assert(CS.getParent()->getParent()->hasGC(),
4347            "Enclosing function does not use GC.", CS);
4348     // Are we tied to a statepoint properly?
4349     CallSite StatepointCS(CS.getArgOperand(0));
4350     const Function *StatepointFn =
4351       StatepointCS.getInstruction() ? StatepointCS.getCalledFunction() : nullptr;
4352     Assert(StatepointFn && StatepointFn->isDeclaration() &&
4353                StatepointFn->getIntrinsicID() ==
4354                    Intrinsic::experimental_gc_statepoint,
4355            "gc.result operand #1 must be from a statepoint", CS,
4356            CS.getArgOperand(0));
4357 
4358     // Assert that result type matches wrapped callee.
4359     const Value *Target = StatepointCS.getArgument(2);
4360     auto *PT = cast<PointerType>(Target->getType());
4361     auto *TargetFuncType = cast<FunctionType>(PT->getElementType());
4362     Assert(CS.getType() == TargetFuncType->getReturnType(),
4363            "gc.result result type does not match wrapped callee", CS);
4364     break;
4365   }
4366   case Intrinsic::experimental_gc_relocate: {
4367     Assert(CS.getNumArgOperands() == 3, "wrong number of arguments", CS);
4368 
4369     Assert(isa<PointerType>(CS.getType()->getScalarType()),
4370            "gc.relocate must return a pointer or a vector of pointers", CS);
4371 
4372     // Check that this relocate is correctly tied to the statepoint
4373 
4374     // This is case for relocate on the unwinding path of an invoke statepoint
4375     if (LandingPadInst *LandingPad =
4376           dyn_cast<LandingPadInst>(CS.getArgOperand(0))) {
4377 
4378       const BasicBlock *InvokeBB =
4379           LandingPad->getParent()->getUniquePredecessor();
4380 
4381       // Landingpad relocates should have only one predecessor with invoke
4382       // statepoint terminator
4383       Assert(InvokeBB, "safepoints should have unique landingpads",
4384              LandingPad->getParent());
4385       Assert(InvokeBB->getTerminator(), "safepoint block should be well formed",
4386              InvokeBB);
4387       Assert(isStatepoint(InvokeBB->getTerminator()),
4388              "gc relocate should be linked to a statepoint", InvokeBB);
4389     }
4390     else {
4391       // In all other cases relocate should be tied to the statepoint directly.
4392       // This covers relocates on a normal return path of invoke statepoint and
4393       // relocates of a call statepoint.
4394       auto Token = CS.getArgOperand(0);
4395       Assert(isa<Instruction>(Token) && isStatepoint(cast<Instruction>(Token)),
4396              "gc relocate is incorrectly tied to the statepoint", CS, Token);
4397     }
4398 
4399     // Verify rest of the relocate arguments.
4400 
4401     ImmutableCallSite StatepointCS(
4402         cast<GCRelocateInst>(*CS.getInstruction()).getStatepoint());
4403 
4404     // Both the base and derived must be piped through the safepoint.
4405     Value* Base = CS.getArgOperand(1);
4406     Assert(isa<ConstantInt>(Base),
4407            "gc.relocate operand #2 must be integer offset", CS);
4408 
4409     Value* Derived = CS.getArgOperand(2);
4410     Assert(isa<ConstantInt>(Derived),
4411            "gc.relocate operand #3 must be integer offset", CS);
4412 
4413     const int BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
4414     const int DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
4415     // Check the bounds
4416     Assert(0 <= BaseIndex && BaseIndex < (int)StatepointCS.arg_size(),
4417            "gc.relocate: statepoint base index out of bounds", CS);
4418     Assert(0 <= DerivedIndex && DerivedIndex < (int)StatepointCS.arg_size(),
4419            "gc.relocate: statepoint derived index out of bounds", CS);
4420 
4421     // Check that BaseIndex and DerivedIndex fall within the 'gc parameters'
4422     // section of the statepoint's argument.
4423     Assert(StatepointCS.arg_size() > 0,
4424            "gc.statepoint: insufficient arguments");
4425     Assert(isa<ConstantInt>(StatepointCS.getArgument(3)),
4426            "gc.statement: number of call arguments must be constant integer");
4427     const unsigned NumCallArgs =
4428         cast<ConstantInt>(StatepointCS.getArgument(3))->getZExtValue();
4429     Assert(StatepointCS.arg_size() > NumCallArgs + 5,
4430            "gc.statepoint: mismatch in number of call arguments");
4431     Assert(isa<ConstantInt>(StatepointCS.getArgument(NumCallArgs + 5)),
4432            "gc.statepoint: number of transition arguments must be "
4433            "a constant integer");
4434     const int NumTransitionArgs =
4435         cast<ConstantInt>(StatepointCS.getArgument(NumCallArgs + 5))
4436             ->getZExtValue();
4437     const int DeoptArgsStart = 4 + NumCallArgs + 1 + NumTransitionArgs + 1;
4438     Assert(isa<ConstantInt>(StatepointCS.getArgument(DeoptArgsStart)),
4439            "gc.statepoint: number of deoptimization arguments must be "
4440            "a constant integer");
4441     const int NumDeoptArgs =
4442         cast<ConstantInt>(StatepointCS.getArgument(DeoptArgsStart))
4443             ->getZExtValue();
4444     const int GCParamArgsStart = DeoptArgsStart + 1 + NumDeoptArgs;
4445     const int GCParamArgsEnd = StatepointCS.arg_size();
4446     Assert(GCParamArgsStart <= BaseIndex && BaseIndex < GCParamArgsEnd,
4447            "gc.relocate: statepoint base index doesn't fall within the "
4448            "'gc parameters' section of the statepoint call",
4449            CS);
4450     Assert(GCParamArgsStart <= DerivedIndex && DerivedIndex < GCParamArgsEnd,
4451            "gc.relocate: statepoint derived index doesn't fall within the "
4452            "'gc parameters' section of the statepoint call",
4453            CS);
4454 
4455     // Relocated value must be either a pointer type or vector-of-pointer type,
4456     // but gc_relocate does not need to return the same pointer type as the
4457     // relocated pointer. It can be casted to the correct type later if it's
4458     // desired. However, they must have the same address space and 'vectorness'
4459     GCRelocateInst &Relocate = cast<GCRelocateInst>(*CS.getInstruction());
4460     Assert(Relocate.getDerivedPtr()->getType()->isPtrOrPtrVectorTy(),
4461            "gc.relocate: relocated value must be a gc pointer", CS);
4462 
4463     auto ResultType = CS.getType();
4464     auto DerivedType = Relocate.getDerivedPtr()->getType();
4465     Assert(ResultType->isVectorTy() == DerivedType->isVectorTy(),
4466            "gc.relocate: vector relocates to vector and pointer to pointer",
4467            CS);
4468     Assert(
4469         ResultType->getPointerAddressSpace() ==
4470             DerivedType->getPointerAddressSpace(),
4471         "gc.relocate: relocating a pointer shouldn't change its address space",
4472         CS);
4473     break;
4474   }
4475   case Intrinsic::eh_exceptioncode:
4476   case Intrinsic::eh_exceptionpointer: {
4477     Assert(isa<CatchPadInst>(CS.getArgOperand(0)),
4478            "eh.exceptionpointer argument must be a catchpad", CS);
4479     break;
4480   }
4481   case Intrinsic::masked_load: {
4482     Assert(CS.getType()->isVectorTy(), "masked_load: must return a vector", CS);
4483 
4484     Value *Ptr = CS.getArgOperand(0);
4485     //Value *Alignment = CS.getArgOperand(1);
4486     Value *Mask = CS.getArgOperand(2);
4487     Value *PassThru = CS.getArgOperand(3);
4488     Assert(Mask->getType()->isVectorTy(),
4489            "masked_load: mask must be vector", CS);
4490 
4491     // DataTy is the overloaded type
4492     Type *DataTy = cast<PointerType>(Ptr->getType())->getElementType();
4493     Assert(DataTy == CS.getType(),
4494            "masked_load: return must match pointer type", CS);
4495     Assert(PassThru->getType() == DataTy,
4496            "masked_load: pass through and data type must match", CS);
4497     Assert(Mask->getType()->getVectorNumElements() ==
4498            DataTy->getVectorNumElements(),
4499            "masked_load: vector mask must be same length as data", CS);
4500     break;
4501   }
4502   case Intrinsic::masked_store: {
4503     Value *Val = CS.getArgOperand(0);
4504     Value *Ptr = CS.getArgOperand(1);
4505     //Value *Alignment = CS.getArgOperand(2);
4506     Value *Mask = CS.getArgOperand(3);
4507     Assert(Mask->getType()->isVectorTy(),
4508            "masked_store: mask must be vector", CS);
4509 
4510     // DataTy is the overloaded type
4511     Type *DataTy = cast<PointerType>(Ptr->getType())->getElementType();
4512     Assert(DataTy == Val->getType(),
4513            "masked_store: storee must match pointer type", CS);
4514     Assert(Mask->getType()->getVectorNumElements() ==
4515            DataTy->getVectorNumElements(),
4516            "masked_store: vector mask must be same length as data", CS);
4517     break;
4518   }
4519 
4520   case Intrinsic::experimental_guard: {
4521     Assert(CS.isCall(), "experimental_guard cannot be invoked", CS);
4522     Assert(CS.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
4523            "experimental_guard must have exactly one "
4524            "\"deopt\" operand bundle");
4525     break;
4526   }
4527 
4528   case Intrinsic::experimental_deoptimize: {
4529     Assert(CS.isCall(), "experimental_deoptimize cannot be invoked", CS);
4530     Assert(CS.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
4531            "experimental_deoptimize must have exactly one "
4532            "\"deopt\" operand bundle");
4533     Assert(CS.getType() == CS.getInstruction()->getFunction()->getReturnType(),
4534            "experimental_deoptimize return type must match caller return type");
4535 
4536     if (CS.isCall()) {
4537       auto *DeoptCI = CS.getInstruction();
4538       auto *RI = dyn_cast<ReturnInst>(DeoptCI->getNextNode());
4539       Assert(RI,
4540              "calls to experimental_deoptimize must be followed by a return");
4541 
4542       if (!CS.getType()->isVoidTy() && RI)
4543         Assert(RI->getReturnValue() == DeoptCI,
4544                "calls to experimental_deoptimize must be followed by a return "
4545                "of the value computed by experimental_deoptimize");
4546     }
4547 
4548     break;
4549   }
4550   case Intrinsic::sadd_sat:
4551   case Intrinsic::uadd_sat:
4552   case Intrinsic::ssub_sat:
4553   case Intrinsic::usub_sat: {
4554     Value *Op1 = CS.getArgOperand(0);
4555     Value *Op2 = CS.getArgOperand(1);
4556     Assert(Op1->getType()->isIntOrIntVectorTy(),
4557            "first operand of [us][add|sub]_sat must be an int type or vector "
4558            "of ints");
4559     Assert(Op2->getType()->isIntOrIntVectorTy(),
4560            "second operand of [us][add|sub]_sat must be an int type or vector "
4561            "of ints");
4562     break;
4563   }
4564   case Intrinsic::smul_fix: {
4565     Value *Op1 = CS.getArgOperand(0);
4566     Value *Op2 = CS.getArgOperand(1);
4567     Assert(Op1->getType()->isIntOrIntVectorTy(),
4568            "first operand of smul_fix must be an int type or vector "
4569            "of ints");
4570     Assert(Op2->getType()->isIntOrIntVectorTy(),
4571            "second operand of smul_fix must be an int type or vector "
4572            "of ints");
4573 
4574     auto *Op3 = dyn_cast<ConstantInt>(CS.getArgOperand(2));
4575     Assert(Op3, "third argument of smul_fix must be a constant integer");
4576     Assert(Op3->getType()->getBitWidth() <= 32,
4577            "third argument of smul_fix must fit within 32 bits");
4578     Assert(Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(),
4579            "the scale of smul_fix must be less than the width of the operands");
4580     break;
4581   }
4582   };
4583 }
4584 
4585 /// Carefully grab the subprogram from a local scope.
4586 ///
4587 /// This carefully grabs the subprogram from a local scope, avoiding the
4588 /// built-in assertions that would typically fire.
4589 static DISubprogram *getSubprogram(Metadata *LocalScope) {
4590   if (!LocalScope)
4591     return nullptr;
4592 
4593   if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
4594     return SP;
4595 
4596   if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
4597     return getSubprogram(LB->getRawScope());
4598 
4599   // Just return null; broken scope chains are checked elsewhere.
4600   assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
4601   return nullptr;
4602 }
4603 
4604 void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
4605   unsigned NumOperands = FPI.getNumArgOperands();
4606   Assert(((NumOperands == 5 && FPI.isTernaryOp()) ||
4607           (NumOperands == 3 && FPI.isUnaryOp()) || (NumOperands == 4)),
4608            "invalid arguments for constrained FP intrinsic", &FPI);
4609   Assert(isa<MetadataAsValue>(FPI.getArgOperand(NumOperands-1)),
4610          "invalid exception behavior argument", &FPI);
4611   Assert(isa<MetadataAsValue>(FPI.getArgOperand(NumOperands-2)),
4612          "invalid rounding mode argument", &FPI);
4613   Assert(FPI.getRoundingMode() != ConstrainedFPIntrinsic::rmInvalid,
4614          "invalid rounding mode argument", &FPI);
4615   Assert(FPI.getExceptionBehavior() != ConstrainedFPIntrinsic::ebInvalid,
4616          "invalid exception behavior argument", &FPI);
4617 }
4618 
4619 void Verifier::visitDbgIntrinsic(StringRef Kind, DbgVariableIntrinsic &DII) {
4620   auto *MD = cast<MetadataAsValue>(DII.getArgOperand(0))->getMetadata();
4621   AssertDI(isa<ValueAsMetadata>(MD) ||
4622              (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands()),
4623          "invalid llvm.dbg." + Kind + " intrinsic address/value", &DII, MD);
4624   AssertDI(isa<DILocalVariable>(DII.getRawVariable()),
4625          "invalid llvm.dbg." + Kind + " intrinsic variable", &DII,
4626          DII.getRawVariable());
4627   AssertDI(isa<DIExpression>(DII.getRawExpression()),
4628          "invalid llvm.dbg." + Kind + " intrinsic expression", &DII,
4629          DII.getRawExpression());
4630 
4631   // Ignore broken !dbg attachments; they're checked elsewhere.
4632   if (MDNode *N = DII.getDebugLoc().getAsMDNode())
4633     if (!isa<DILocation>(N))
4634       return;
4635 
4636   BasicBlock *BB = DII.getParent();
4637   Function *F = BB ? BB->getParent() : nullptr;
4638 
4639   // The scopes for variables and !dbg attachments must agree.
4640   DILocalVariable *Var = DII.getVariable();
4641   DILocation *Loc = DII.getDebugLoc();
4642   AssertDI(Loc, "llvm.dbg." + Kind + " intrinsic requires a !dbg attachment",
4643            &DII, BB, F);
4644 
4645   DISubprogram *VarSP = getSubprogram(Var->getRawScope());
4646   DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
4647   if (!VarSP || !LocSP)
4648     return; // Broken scope chains are checked elsewhere.
4649 
4650   AssertDI(VarSP == LocSP, "mismatched subprogram between llvm.dbg." + Kind +
4651                                " variable and !dbg attachment",
4652            &DII, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
4653            Loc->getScope()->getSubprogram());
4654 
4655   // This check is redundant with one in visitLocalVariable().
4656   AssertDI(isType(Var->getRawType()), "invalid type ref", Var,
4657            Var->getRawType());
4658   if (auto *Type = dyn_cast_or_null<DIType>(Var->getRawType()))
4659     if (Type->isBlockByrefStruct())
4660       AssertDI(DII.getExpression() && DII.getExpression()->getNumElements(),
4661                "BlockByRef variable without complex expression", Var, &DII);
4662 
4663   verifyFnArgs(DII);
4664 }
4665 
4666 void Verifier::visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI) {
4667   AssertDI(isa<DILabel>(DLI.getRawLabel()),
4668          "invalid llvm.dbg." + Kind + " intrinsic variable", &DLI,
4669          DLI.getRawLabel());
4670 
4671   // Ignore broken !dbg attachments; they're checked elsewhere.
4672   if (MDNode *N = DLI.getDebugLoc().getAsMDNode())
4673     if (!isa<DILocation>(N))
4674       return;
4675 
4676   BasicBlock *BB = DLI.getParent();
4677   Function *F = BB ? BB->getParent() : nullptr;
4678 
4679   // The scopes for variables and !dbg attachments must agree.
4680   DILabel *Label = DLI.getLabel();
4681   DILocation *Loc = DLI.getDebugLoc();
4682   Assert(Loc, "llvm.dbg." + Kind + " intrinsic requires a !dbg attachment",
4683          &DLI, BB, F);
4684 
4685   DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
4686   DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
4687   if (!LabelSP || !LocSP)
4688     return;
4689 
4690   AssertDI(LabelSP == LocSP, "mismatched subprogram between llvm.dbg." + Kind +
4691                              " label and !dbg attachment",
4692            &DLI, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
4693            Loc->getScope()->getSubprogram());
4694 }
4695 
4696 void Verifier::verifyFragmentExpression(const DbgVariableIntrinsic &I) {
4697   DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(I.getRawVariable());
4698   DIExpression *E = dyn_cast_or_null<DIExpression>(I.getRawExpression());
4699 
4700   // We don't know whether this intrinsic verified correctly.
4701   if (!V || !E || !E->isValid())
4702     return;
4703 
4704   // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
4705   auto Fragment = E->getFragmentInfo();
4706   if (!Fragment)
4707     return;
4708 
4709   // The frontend helps out GDB by emitting the members of local anonymous
4710   // unions as artificial local variables with shared storage. When SROA splits
4711   // the storage for artificial local variables that are smaller than the entire
4712   // union, the overhang piece will be outside of the allotted space for the
4713   // variable and this check fails.
4714   // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
4715   if (V->isArtificial())
4716     return;
4717 
4718   verifyFragmentExpression(*V, *Fragment, &I);
4719 }
4720 
4721 template <typename ValueOrMetadata>
4722 void Verifier::verifyFragmentExpression(const DIVariable &V,
4723                                         DIExpression::FragmentInfo Fragment,
4724                                         ValueOrMetadata *Desc) {
4725   // If there's no size, the type is broken, but that should be checked
4726   // elsewhere.
4727   auto VarSize = V.getSizeInBits();
4728   if (!VarSize)
4729     return;
4730 
4731   unsigned FragSize = Fragment.SizeInBits;
4732   unsigned FragOffset = Fragment.OffsetInBits;
4733   AssertDI(FragSize + FragOffset <= *VarSize,
4734          "fragment is larger than or outside of variable", Desc, &V);
4735   AssertDI(FragSize != *VarSize, "fragment covers entire variable", Desc, &V);
4736 }
4737 
4738 void Verifier::verifyFnArgs(const DbgVariableIntrinsic &I) {
4739   // This function does not take the scope of noninlined function arguments into
4740   // account. Don't run it if current function is nodebug, because it may
4741   // contain inlined debug intrinsics.
4742   if (!HasDebugInfo)
4743     return;
4744 
4745   // For performance reasons only check non-inlined ones.
4746   if (I.getDebugLoc()->getInlinedAt())
4747     return;
4748 
4749   DILocalVariable *Var = I.getVariable();
4750   AssertDI(Var, "dbg intrinsic without variable");
4751 
4752   unsigned ArgNo = Var->getArg();
4753   if (!ArgNo)
4754     return;
4755 
4756   // Verify there are no duplicate function argument debug info entries.
4757   // These will cause hard-to-debug assertions in the DWARF backend.
4758   if (DebugFnArgs.size() < ArgNo)
4759     DebugFnArgs.resize(ArgNo, nullptr);
4760 
4761   auto *Prev = DebugFnArgs[ArgNo - 1];
4762   DebugFnArgs[ArgNo - 1] = Var;
4763   AssertDI(!Prev || (Prev == Var), "conflicting debug info for argument", &I,
4764            Prev, Var);
4765 }
4766 
4767 void Verifier::verifyCompileUnits() {
4768   // When more than one Module is imported into the same context, such as during
4769   // an LTO build before linking the modules, ODR type uniquing may cause types
4770   // to point to a different CU. This check does not make sense in this case.
4771   if (M.getContext().isODRUniquingDebugTypes())
4772     return;
4773   auto *CUs = M.getNamedMetadata("llvm.dbg.cu");
4774   SmallPtrSet<const Metadata *, 2> Listed;
4775   if (CUs)
4776     Listed.insert(CUs->op_begin(), CUs->op_end());
4777   for (auto *CU : CUVisited)
4778     AssertDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
4779   CUVisited.clear();
4780 }
4781 
4782 void Verifier::verifyDeoptimizeCallingConvs() {
4783   if (DeoptimizeDeclarations.empty())
4784     return;
4785 
4786   const Function *First = DeoptimizeDeclarations[0];
4787   for (auto *F : makeArrayRef(DeoptimizeDeclarations).slice(1)) {
4788     Assert(First->getCallingConv() == F->getCallingConv(),
4789            "All llvm.experimental.deoptimize declarations must have the same "
4790            "calling convention",
4791            First, F);
4792   }
4793 }
4794 
4795 void Verifier::verifySourceDebugInfo(const DICompileUnit &U, const DIFile &F) {
4796   bool HasSource = F.getSource().hasValue();
4797   if (!HasSourceDebugInfo.count(&U))
4798     HasSourceDebugInfo[&U] = HasSource;
4799   AssertDI(HasSource == HasSourceDebugInfo[&U],
4800            "inconsistent use of embedded source");
4801 }
4802 
4803 //===----------------------------------------------------------------------===//
4804 //  Implement the public interfaces to this file...
4805 //===----------------------------------------------------------------------===//
4806 
4807 bool llvm::verifyFunction(const Function &f, raw_ostream *OS) {
4808   Function &F = const_cast<Function &>(f);
4809 
4810   // Don't use a raw_null_ostream.  Printing IR is expensive.
4811   Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent());
4812 
4813   // Note that this function's return value is inverted from what you would
4814   // expect of a function called "verify".
4815   return !V.verify(F);
4816 }
4817 
4818 bool llvm::verifyModule(const Module &M, raw_ostream *OS,
4819                         bool *BrokenDebugInfo) {
4820   // Don't use a raw_null_ostream.  Printing IR is expensive.
4821   Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M);
4822 
4823   bool Broken = false;
4824   for (const Function &F : M)
4825     Broken |= !V.verify(F);
4826 
4827   Broken |= !V.verify();
4828   if (BrokenDebugInfo)
4829     *BrokenDebugInfo = V.hasBrokenDebugInfo();
4830   // Note that this function's return value is inverted from what you would
4831   // expect of a function called "verify".
4832   return Broken;
4833 }
4834 
4835 namespace {
4836 
4837 struct VerifierLegacyPass : public FunctionPass {
4838   static char ID;
4839 
4840   std::unique_ptr<Verifier> V;
4841   bool FatalErrors = true;
4842 
4843   VerifierLegacyPass() : FunctionPass(ID) {
4844     initializeVerifierLegacyPassPass(*PassRegistry::getPassRegistry());
4845   }
4846   explicit VerifierLegacyPass(bool FatalErrors)
4847       : FunctionPass(ID),
4848         FatalErrors(FatalErrors) {
4849     initializeVerifierLegacyPassPass(*PassRegistry::getPassRegistry());
4850   }
4851 
4852   bool doInitialization(Module &M) override {
4853     V = llvm::make_unique<Verifier>(
4854         &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M);
4855     return false;
4856   }
4857 
4858   bool runOnFunction(Function &F) override {
4859     if (!V->verify(F) && FatalErrors) {
4860       errs() << "in function " << F.getName() << '\n';
4861       report_fatal_error("Broken function found, compilation aborted!");
4862     }
4863     return false;
4864   }
4865 
4866   bool doFinalization(Module &M) override {
4867     bool HasErrors = false;
4868     for (Function &F : M)
4869       if (F.isDeclaration())
4870         HasErrors |= !V->verify(F);
4871 
4872     HasErrors |= !V->verify();
4873     if (FatalErrors && (HasErrors || V->hasBrokenDebugInfo()))
4874       report_fatal_error("Broken module found, compilation aborted!");
4875     return false;
4876   }
4877 
4878   void getAnalysisUsage(AnalysisUsage &AU) const override {
4879     AU.setPreservesAll();
4880   }
4881 };
4882 
4883 } // end anonymous namespace
4884 
4885 /// Helper to issue failure from the TBAA verification
4886 template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) {
4887   if (Diagnostic)
4888     return Diagnostic->CheckFailed(Args...);
4889 }
4890 
4891 #define AssertTBAA(C, ...)                                                     \
4892   do {                                                                         \
4893     if (!(C)) {                                                                \
4894       CheckFailed(__VA_ARGS__);                                                \
4895       return false;                                                            \
4896     }                                                                          \
4897   } while (false)
4898 
4899 /// Verify that \p BaseNode can be used as the "base type" in the struct-path
4900 /// TBAA scheme.  This means \p BaseNode is either a scalar node, or a
4901 /// struct-type node describing an aggregate data structure (like a struct).
4902 TBAAVerifier::TBAABaseNodeSummary
4903 TBAAVerifier::verifyTBAABaseNode(Instruction &I, const MDNode *BaseNode,
4904                                  bool IsNewFormat) {
4905   if (BaseNode->getNumOperands() < 2) {
4906     CheckFailed("Base nodes must have at least two operands", &I, BaseNode);
4907     return {true, ~0u};
4908   }
4909 
4910   auto Itr = TBAABaseNodes.find(BaseNode);
4911   if (Itr != TBAABaseNodes.end())
4912     return Itr->second;
4913 
4914   auto Result = verifyTBAABaseNodeImpl(I, BaseNode, IsNewFormat);
4915   auto InsertResult = TBAABaseNodes.insert({BaseNode, Result});
4916   (void)InsertResult;
4917   assert(InsertResult.second && "We just checked!");
4918   return Result;
4919 }
4920 
4921 TBAAVerifier::TBAABaseNodeSummary
4922 TBAAVerifier::verifyTBAABaseNodeImpl(Instruction &I, const MDNode *BaseNode,
4923                                      bool IsNewFormat) {
4924   const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u};
4925 
4926   if (BaseNode->getNumOperands() == 2) {
4927     // Scalar nodes can only be accessed at offset 0.
4928     return isValidScalarTBAANode(BaseNode)
4929                ? TBAAVerifier::TBAABaseNodeSummary({false, 0})
4930                : InvalidNode;
4931   }
4932 
4933   if (IsNewFormat) {
4934     if (BaseNode->getNumOperands() % 3 != 0) {
4935       CheckFailed("Access tag nodes must have the number of operands that is a "
4936                   "multiple of 3!", BaseNode);
4937       return InvalidNode;
4938     }
4939   } else {
4940     if (BaseNode->getNumOperands() % 2 != 1) {
4941       CheckFailed("Struct tag nodes must have an odd number of operands!",
4942                   BaseNode);
4943       return InvalidNode;
4944     }
4945   }
4946 
4947   // Check the type size field.
4948   if (IsNewFormat) {
4949     auto *TypeSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
4950         BaseNode->getOperand(1));
4951     if (!TypeSizeNode) {
4952       CheckFailed("Type size nodes must be constants!", &I, BaseNode);
4953       return InvalidNode;
4954     }
4955   }
4956 
4957   // Check the type name field. In the new format it can be anything.
4958   if (!IsNewFormat && !isa<MDString>(BaseNode->getOperand(0))) {
4959     CheckFailed("Struct tag nodes have a string as their first operand",
4960                 BaseNode);
4961     return InvalidNode;
4962   }
4963 
4964   bool Failed = false;
4965 
4966   Optional<APInt> PrevOffset;
4967   unsigned BitWidth = ~0u;
4968 
4969   // We've already checked that BaseNode is not a degenerate root node with one
4970   // operand in \c verifyTBAABaseNode, so this loop should run at least once.
4971   unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
4972   unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
4973   for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
4974            Idx += NumOpsPerField) {
4975     const MDOperand &FieldTy = BaseNode->getOperand(Idx);
4976     const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1);
4977     if (!isa<MDNode>(FieldTy)) {
4978       CheckFailed("Incorrect field entry in struct type node!", &I, BaseNode);
4979       Failed = true;
4980       continue;
4981     }
4982 
4983     auto *OffsetEntryCI =
4984         mdconst::dyn_extract_or_null<ConstantInt>(FieldOffset);
4985     if (!OffsetEntryCI) {
4986       CheckFailed("Offset entries must be constants!", &I, BaseNode);
4987       Failed = true;
4988       continue;
4989     }
4990 
4991     if (BitWidth == ~0u)
4992       BitWidth = OffsetEntryCI->getBitWidth();
4993 
4994     if (OffsetEntryCI->getBitWidth() != BitWidth) {
4995       CheckFailed(
4996           "Bitwidth between the offsets and struct type entries must match", &I,
4997           BaseNode);
4998       Failed = true;
4999       continue;
5000     }
5001 
5002     // NB! As far as I can tell, we generate a non-strictly increasing offset
5003     // sequence only from structs that have zero size bit fields.  When
5004     // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we
5005     // pick the field lexically the latest in struct type metadata node.  This
5006     // mirrors the actual behavior of the alias analysis implementation.
5007     bool IsAscending =
5008         !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue());
5009 
5010     if (!IsAscending) {
5011       CheckFailed("Offsets must be increasing!", &I, BaseNode);
5012       Failed = true;
5013     }
5014 
5015     PrevOffset = OffsetEntryCI->getValue();
5016 
5017     if (IsNewFormat) {
5018       auto *MemberSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
5019           BaseNode->getOperand(Idx + 2));
5020       if (!MemberSizeNode) {
5021         CheckFailed("Member size entries must be constants!", &I, BaseNode);
5022         Failed = true;
5023         continue;
5024       }
5025     }
5026   }
5027 
5028   return Failed ? InvalidNode
5029                 : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth);
5030 }
5031 
5032 static bool IsRootTBAANode(const MDNode *MD) {
5033   return MD->getNumOperands() < 2;
5034 }
5035 
5036 static bool IsScalarTBAANodeImpl(const MDNode *MD,
5037                                  SmallPtrSetImpl<const MDNode *> &Visited) {
5038   if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3)
5039     return false;
5040 
5041   if (!isa<MDString>(MD->getOperand(0)))
5042     return false;
5043 
5044   if (MD->getNumOperands() == 3) {
5045     auto *Offset = mdconst::dyn_extract<ConstantInt>(MD->getOperand(2));
5046     if (!(Offset && Offset->isZero() && isa<MDString>(MD->getOperand(0))))
5047       return false;
5048   }
5049 
5050   auto *Parent = dyn_cast_or_null<MDNode>(MD->getOperand(1));
5051   return Parent && Visited.insert(Parent).second &&
5052          (IsRootTBAANode(Parent) || IsScalarTBAANodeImpl(Parent, Visited));
5053 }
5054 
5055 bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) {
5056   auto ResultIt = TBAAScalarNodes.find(MD);
5057   if (ResultIt != TBAAScalarNodes.end())
5058     return ResultIt->second;
5059 
5060   SmallPtrSet<const MDNode *, 4> Visited;
5061   bool Result = IsScalarTBAANodeImpl(MD, Visited);
5062   auto InsertResult = TBAAScalarNodes.insert({MD, Result});
5063   (void)InsertResult;
5064   assert(InsertResult.second && "Just checked!");
5065 
5066   return Result;
5067 }
5068 
5069 /// Returns the field node at the offset \p Offset in \p BaseNode.  Update \p
5070 /// Offset in place to be the offset within the field node returned.
5071 ///
5072 /// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode.
5073 MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(Instruction &I,
5074                                                    const MDNode *BaseNode,
5075                                                    APInt &Offset,
5076                                                    bool IsNewFormat) {
5077   assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!");
5078 
5079   // Scalar nodes have only one possible "field" -- their parent in the access
5080   // hierarchy.  Offset must be zero at this point, but our caller is supposed
5081   // to Assert that.
5082   if (BaseNode->getNumOperands() == 2)
5083     return cast<MDNode>(BaseNode->getOperand(1));
5084 
5085   unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
5086   unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
5087   for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
5088            Idx += NumOpsPerField) {
5089     auto *OffsetEntryCI =
5090         mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx + 1));
5091     if (OffsetEntryCI->getValue().ugt(Offset)) {
5092       if (Idx == FirstFieldOpNo) {
5093         CheckFailed("Could not find TBAA parent in struct type node", &I,
5094                     BaseNode, &Offset);
5095         return nullptr;
5096       }
5097 
5098       unsigned PrevIdx = Idx - NumOpsPerField;
5099       auto *PrevOffsetEntryCI =
5100           mdconst::extract<ConstantInt>(BaseNode->getOperand(PrevIdx + 1));
5101       Offset -= PrevOffsetEntryCI->getValue();
5102       return cast<MDNode>(BaseNode->getOperand(PrevIdx));
5103     }
5104   }
5105 
5106   unsigned LastIdx = BaseNode->getNumOperands() - NumOpsPerField;
5107   auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>(
5108       BaseNode->getOperand(LastIdx + 1));
5109   Offset -= LastOffsetEntryCI->getValue();
5110   return cast<MDNode>(BaseNode->getOperand(LastIdx));
5111 }
5112 
5113 static bool isNewFormatTBAATypeNode(llvm::MDNode *Type) {
5114   if (!Type || Type->getNumOperands() < 3)
5115     return false;
5116 
5117   // In the new format type nodes shall have a reference to the parent type as
5118   // its first operand.
5119   MDNode *Parent = dyn_cast_or_null<MDNode>(Type->getOperand(0));
5120   if (!Parent)
5121     return false;
5122 
5123   return true;
5124 }
5125 
5126 bool TBAAVerifier::visitTBAAMetadata(Instruction &I, const MDNode *MD) {
5127   AssertTBAA(isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) ||
5128                  isa<VAArgInst>(I) || isa<AtomicRMWInst>(I) ||
5129                  isa<AtomicCmpXchgInst>(I),
5130              "This instruction shall not have a TBAA access tag!", &I);
5131 
5132   bool IsStructPathTBAA =
5133       isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3;
5134 
5135   AssertTBAA(
5136       IsStructPathTBAA,
5137       "Old-style TBAA is no longer allowed, use struct-path TBAA instead", &I);
5138 
5139   MDNode *BaseNode = dyn_cast_or_null<MDNode>(MD->getOperand(0));
5140   MDNode *AccessType = dyn_cast_or_null<MDNode>(MD->getOperand(1));
5141 
5142   bool IsNewFormat = isNewFormatTBAATypeNode(AccessType);
5143 
5144   if (IsNewFormat) {
5145     AssertTBAA(MD->getNumOperands() == 4 || MD->getNumOperands() == 5,
5146                "Access tag metadata must have either 4 or 5 operands", &I, MD);
5147   } else {
5148     AssertTBAA(MD->getNumOperands() < 5,
5149                "Struct tag metadata must have either 3 or 4 operands", &I, MD);
5150   }
5151 
5152   // Check the access size field.
5153   if (IsNewFormat) {
5154     auto *AccessSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
5155         MD->getOperand(3));
5156     AssertTBAA(AccessSizeNode, "Access size field must be a constant", &I, MD);
5157   }
5158 
5159   // Check the immutability flag.
5160   unsigned ImmutabilityFlagOpNo = IsNewFormat ? 4 : 3;
5161   if (MD->getNumOperands() == ImmutabilityFlagOpNo + 1) {
5162     auto *IsImmutableCI = mdconst::dyn_extract_or_null<ConstantInt>(
5163         MD->getOperand(ImmutabilityFlagOpNo));
5164     AssertTBAA(IsImmutableCI,
5165                "Immutability tag on struct tag metadata must be a constant",
5166                &I, MD);
5167     AssertTBAA(
5168         IsImmutableCI->isZero() || IsImmutableCI->isOne(),
5169         "Immutability part of the struct tag metadata must be either 0 or 1",
5170         &I, MD);
5171   }
5172 
5173   AssertTBAA(BaseNode && AccessType,
5174              "Malformed struct tag metadata: base and access-type "
5175              "should be non-null and point to Metadata nodes",
5176              &I, MD, BaseNode, AccessType);
5177 
5178   if (!IsNewFormat) {
5179     AssertTBAA(isValidScalarTBAANode(AccessType),
5180                "Access type node must be a valid scalar type", &I, MD,
5181                AccessType);
5182   }
5183 
5184   auto *OffsetCI = mdconst::dyn_extract_or_null<ConstantInt>(MD->getOperand(2));
5185   AssertTBAA(OffsetCI, "Offset must be constant integer", &I, MD);
5186 
5187   APInt Offset = OffsetCI->getValue();
5188   bool SeenAccessTypeInPath = false;
5189 
5190   SmallPtrSet<MDNode *, 4> StructPath;
5191 
5192   for (/* empty */; BaseNode && !IsRootTBAANode(BaseNode);
5193        BaseNode = getFieldNodeFromTBAABaseNode(I, BaseNode, Offset,
5194                                                IsNewFormat)) {
5195     if (!StructPath.insert(BaseNode).second) {
5196       CheckFailed("Cycle detected in struct path", &I, MD);
5197       return false;
5198     }
5199 
5200     bool Invalid;
5201     unsigned BaseNodeBitWidth;
5202     std::tie(Invalid, BaseNodeBitWidth) = verifyTBAABaseNode(I, BaseNode,
5203                                                              IsNewFormat);
5204 
5205     // If the base node is invalid in itself, then we've already printed all the
5206     // errors we wanted to print.
5207     if (Invalid)
5208       return false;
5209 
5210     SeenAccessTypeInPath |= BaseNode == AccessType;
5211 
5212     if (isValidScalarTBAANode(BaseNode) || BaseNode == AccessType)
5213       AssertTBAA(Offset == 0, "Offset not zero at the point of scalar access",
5214                  &I, MD, &Offset);
5215 
5216     AssertTBAA(BaseNodeBitWidth == Offset.getBitWidth() ||
5217                    (BaseNodeBitWidth == 0 && Offset == 0) ||
5218                    (IsNewFormat && BaseNodeBitWidth == ~0u),
5219                "Access bit-width not the same as description bit-width", &I, MD,
5220                BaseNodeBitWidth, Offset.getBitWidth());
5221 
5222     if (IsNewFormat && SeenAccessTypeInPath)
5223       break;
5224   }
5225 
5226   AssertTBAA(SeenAccessTypeInPath, "Did not see access type in access path!",
5227              &I, MD);
5228   return true;
5229 }
5230 
5231 char VerifierLegacyPass::ID = 0;
5232 INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
5233 
5234 FunctionPass *llvm::createVerifierPass(bool FatalErrors) {
5235   return new VerifierLegacyPass(FatalErrors);
5236 }
5237 
5238 AnalysisKey VerifierAnalysis::Key;
5239 VerifierAnalysis::Result VerifierAnalysis::run(Module &M,
5240                                                ModuleAnalysisManager &) {
5241   Result Res;
5242   Res.IRBroken = llvm::verifyModule(M, &dbgs(), &Res.DebugInfoBroken);
5243   return Res;
5244 }
5245 
5246 VerifierAnalysis::Result VerifierAnalysis::run(Function &F,
5247                                                FunctionAnalysisManager &) {
5248   return { llvm::verifyFunction(F, &dbgs()), false };
5249 }
5250 
5251 PreservedAnalyses VerifierPass::run(Module &M, ModuleAnalysisManager &AM) {
5252   auto Res = AM.getResult<VerifierAnalysis>(M);
5253   if (FatalErrors && (Res.IRBroken || Res.DebugInfoBroken))
5254     report_fatal_error("Broken module found, compilation aborted!");
5255 
5256   return PreservedAnalyses::all();
5257 }
5258 
5259 PreservedAnalyses VerifierPass::run(Function &F, FunctionAnalysisManager &AM) {
5260   auto res = AM.getResult<VerifierAnalysis>(F);
5261   if (res.IRBroken && FatalErrors)
5262     report_fatal_error("Broken function found, compilation aborted!");
5263 
5264   return PreservedAnalyses::all();
5265 }
5266