1 //===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the function verifier interface, that can be used for some
10 // sanity checking of input to the system.
11 //
12 // Note that this does not provide full `Java style' security and verifications,
13 // instead it just tries to ensure that code is well-formed.
14 //
15 //  * Both of a binary operator's parameters are of the same type
16 //  * Verify that the indices of mem access instructions match other operands
17 //  * Verify that arithmetic and other things are only performed on first-class
18 //    types.  Verify that shifts & logicals only happen on integrals f.e.
19 //  * All of the constants in a switch statement are of the correct type
20 //  * The code is in valid SSA form
21 //  * It should be illegal to put a label into any other type (like a structure)
22 //    or to return one. [except constant arrays!]
23 //  * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
24 //  * PHI nodes must have an entry for each predecessor, with no extras.
25 //  * PHI nodes must be the first thing in a basic block, all grouped together
26 //  * PHI nodes must have at least one entry
27 //  * All basic blocks should only end with terminator insts, not contain them
28 //  * The entry node to a function must not have predecessors
29 //  * All Instructions must be embedded into a basic block
30 //  * Functions cannot take a void-typed parameter
31 //  * Verify that a function's argument list agrees with it's declared type.
32 //  * It is illegal to specify a name for a void value.
33 //  * It is illegal to have a internal global value with no initializer
34 //  * It is illegal to have a ret instruction that returns a value that does not
35 //    agree with the function return value type.
36 //  * Function call argument types match the function prototype
37 //  * A landing pad is defined by a landingpad instruction, and can be jumped to
38 //    only by the unwind edge of an invoke instruction.
39 //  * A landingpad instruction must be the first non-PHI instruction in the
40 //    block.
41 //  * Landingpad instructions must be in a function with a personality function.
42 //  * All other things that are tested by asserts spread about the code...
43 //
44 //===----------------------------------------------------------------------===//
45 
46 #include "llvm/IR/Verifier.h"
47 #include "llvm/ADT/APFloat.h"
48 #include "llvm/ADT/APInt.h"
49 #include "llvm/ADT/ArrayRef.h"
50 #include "llvm/ADT/DenseMap.h"
51 #include "llvm/ADT/MapVector.h"
52 #include "llvm/ADT/Optional.h"
53 #include "llvm/ADT/STLExtras.h"
54 #include "llvm/ADT/SmallPtrSet.h"
55 #include "llvm/ADT/SmallSet.h"
56 #include "llvm/ADT/SmallVector.h"
57 #include "llvm/ADT/StringExtras.h"
58 #include "llvm/ADT/StringMap.h"
59 #include "llvm/ADT/StringRef.h"
60 #include "llvm/ADT/Twine.h"
61 #include "llvm/ADT/ilist.h"
62 #include "llvm/BinaryFormat/Dwarf.h"
63 #include "llvm/IR/Argument.h"
64 #include "llvm/IR/Attributes.h"
65 #include "llvm/IR/BasicBlock.h"
66 #include "llvm/IR/CFG.h"
67 #include "llvm/IR/CallingConv.h"
68 #include "llvm/IR/Comdat.h"
69 #include "llvm/IR/Constant.h"
70 #include "llvm/IR/ConstantRange.h"
71 #include "llvm/IR/Constants.h"
72 #include "llvm/IR/DataLayout.h"
73 #include "llvm/IR/DebugInfo.h"
74 #include "llvm/IR/DebugInfoMetadata.h"
75 #include "llvm/IR/DebugLoc.h"
76 #include "llvm/IR/DerivedTypes.h"
77 #include "llvm/IR/Dominators.h"
78 #include "llvm/IR/Function.h"
79 #include "llvm/IR/GlobalAlias.h"
80 #include "llvm/IR/GlobalValue.h"
81 #include "llvm/IR/GlobalVariable.h"
82 #include "llvm/IR/InlineAsm.h"
83 #include "llvm/IR/InstVisitor.h"
84 #include "llvm/IR/InstrTypes.h"
85 #include "llvm/IR/Instruction.h"
86 #include "llvm/IR/Instructions.h"
87 #include "llvm/IR/IntrinsicInst.h"
88 #include "llvm/IR/Intrinsics.h"
89 #include "llvm/IR/IntrinsicsWebAssembly.h"
90 #include "llvm/IR/LLVMContext.h"
91 #include "llvm/IR/Metadata.h"
92 #include "llvm/IR/Module.h"
93 #include "llvm/IR/ModuleSlotTracker.h"
94 #include "llvm/IR/PassManager.h"
95 #include "llvm/IR/Statepoint.h"
96 #include "llvm/IR/Type.h"
97 #include "llvm/IR/Use.h"
98 #include "llvm/IR/User.h"
99 #include "llvm/IR/Value.h"
100 #include "llvm/InitializePasses.h"
101 #include "llvm/Pass.h"
102 #include "llvm/Support/AtomicOrdering.h"
103 #include "llvm/Support/Casting.h"
104 #include "llvm/Support/CommandLine.h"
105 #include "llvm/Support/Debug.h"
106 #include "llvm/Support/ErrorHandling.h"
107 #include "llvm/Support/MathExtras.h"
108 #include "llvm/Support/raw_ostream.h"
109 #include <algorithm>
110 #include <cassert>
111 #include <cstdint>
112 #include <memory>
113 #include <string>
114 #include <utility>
115 
116 using namespace llvm;
117 
118 namespace llvm {
119 
120 struct VerifierSupport {
121   raw_ostream *OS;
122   const Module &M;
123   ModuleSlotTracker MST;
124   Triple TT;
125   const DataLayout &DL;
126   LLVMContext &Context;
127 
128   /// Track the brokenness of the module while recursively visiting.
129   bool Broken = false;
130   /// Broken debug info can be "recovered" from by stripping the debug info.
131   bool BrokenDebugInfo = false;
132   /// Whether to treat broken debug info as an error.
133   bool TreatBrokenDebugInfoAsError = true;
134 
135   explicit VerifierSupport(raw_ostream *OS, const Module &M)
136       : OS(OS), M(M), MST(&M), TT(M.getTargetTriple()), DL(M.getDataLayout()),
137         Context(M.getContext()) {}
138 
139 private:
140   void Write(const Module *M) {
141     *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
142   }
143 
144   void Write(const Value *V) {
145     if (V)
146       Write(*V);
147   }
148 
149   void Write(const Value &V) {
150     if (isa<Instruction>(V)) {
151       V.print(*OS, MST);
152       *OS << '\n';
153     } else {
154       V.printAsOperand(*OS, true, MST);
155       *OS << '\n';
156     }
157   }
158 
159   void Write(const Metadata *MD) {
160     if (!MD)
161       return;
162     MD->print(*OS, MST, &M);
163     *OS << '\n';
164   }
165 
166   template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
167     Write(MD.get());
168   }
169 
170   void Write(const NamedMDNode *NMD) {
171     if (!NMD)
172       return;
173     NMD->print(*OS, MST);
174     *OS << '\n';
175   }
176 
177   void Write(Type *T) {
178     if (!T)
179       return;
180     *OS << ' ' << *T;
181   }
182 
183   void Write(const Comdat *C) {
184     if (!C)
185       return;
186     *OS << *C;
187   }
188 
189   void Write(const APInt *AI) {
190     if (!AI)
191       return;
192     *OS << *AI << '\n';
193   }
194 
195   void Write(const unsigned i) { *OS << i << '\n'; }
196 
197   template <typename T> void Write(ArrayRef<T> Vs) {
198     for (const T &V : Vs)
199       Write(V);
200   }
201 
202   template <typename T1, typename... Ts>
203   void WriteTs(const T1 &V1, const Ts &... Vs) {
204     Write(V1);
205     WriteTs(Vs...);
206   }
207 
208   template <typename... Ts> void WriteTs() {}
209 
210 public:
211   /// A check failed, so printout out the condition and the message.
212   ///
213   /// This provides a nice place to put a breakpoint if you want to see why
214   /// something is not correct.
215   void CheckFailed(const Twine &Message) {
216     if (OS)
217       *OS << Message << '\n';
218     Broken = true;
219   }
220 
221   /// A check failed (with values to print).
222   ///
223   /// This calls the Message-only version so that the above is easier to set a
224   /// breakpoint on.
225   template <typename T1, typename... Ts>
226   void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
227     CheckFailed(Message);
228     if (OS)
229       WriteTs(V1, Vs...);
230   }
231 
232   /// A debug info check failed.
233   void DebugInfoCheckFailed(const Twine &Message) {
234     if (OS)
235       *OS << Message << '\n';
236     Broken |= TreatBrokenDebugInfoAsError;
237     BrokenDebugInfo = true;
238   }
239 
240   /// A debug info check failed (with values to print).
241   template <typename T1, typename... Ts>
242   void DebugInfoCheckFailed(const Twine &Message, const T1 &V1,
243                             const Ts &... Vs) {
244     DebugInfoCheckFailed(Message);
245     if (OS)
246       WriteTs(V1, Vs...);
247   }
248 };
249 
250 } // namespace llvm
251 
252 namespace {
253 
254 class Verifier : public InstVisitor<Verifier>, VerifierSupport {
255   friend class InstVisitor<Verifier>;
256 
257   DominatorTree DT;
258 
259   /// When verifying a basic block, keep track of all of the
260   /// instructions we have seen so far.
261   ///
262   /// This allows us to do efficient dominance checks for the case when an
263   /// instruction has an operand that is an instruction in the same block.
264   SmallPtrSet<Instruction *, 16> InstsInThisBlock;
265 
266   /// Keep track of the metadata nodes that have been checked already.
267   SmallPtrSet<const Metadata *, 32> MDNodes;
268 
269   /// Keep track which DISubprogram is attached to which function.
270   DenseMap<const DISubprogram *, const Function *> DISubprogramAttachments;
271 
272   /// Track all DICompileUnits visited.
273   SmallPtrSet<const Metadata *, 2> CUVisited;
274 
275   /// The result type for a landingpad.
276   Type *LandingPadResultTy;
277 
278   /// Whether we've seen a call to @llvm.localescape in this function
279   /// already.
280   bool SawFrameEscape;
281 
282   /// Whether the current function has a DISubprogram attached to it.
283   bool HasDebugInfo = false;
284 
285   /// Whether source was present on the first DIFile encountered in each CU.
286   DenseMap<const DICompileUnit *, bool> HasSourceDebugInfo;
287 
288   /// Stores the count of how many objects were passed to llvm.localescape for a
289   /// given function and the largest index passed to llvm.localrecover.
290   DenseMap<Function *, std::pair<unsigned, unsigned>> FrameEscapeInfo;
291 
292   // Maps catchswitches and cleanuppads that unwind to siblings to the
293   // terminators that indicate the unwind, used to detect cycles therein.
294   MapVector<Instruction *, Instruction *> SiblingFuncletInfo;
295 
296   /// Cache of constants visited in search of ConstantExprs.
297   SmallPtrSet<const Constant *, 32> ConstantExprVisited;
298 
299   /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic.
300   SmallVector<const Function *, 4> DeoptimizeDeclarations;
301 
302   // Verify that this GlobalValue is only used in this module.
303   // This map is used to avoid visiting uses twice. We can arrive at a user
304   // twice, if they have multiple operands. In particular for very large
305   // constant expressions, we can arrive at a particular user many times.
306   SmallPtrSet<const Value *, 32> GlobalValueVisited;
307 
308   // Keeps track of duplicate function argument debug info.
309   SmallVector<const DILocalVariable *, 16> DebugFnArgs;
310 
311   TBAAVerifier TBAAVerifyHelper;
312 
313   void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
314 
315 public:
316   explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError,
317                     const Module &M)
318       : VerifierSupport(OS, M), LandingPadResultTy(nullptr),
319         SawFrameEscape(false), TBAAVerifyHelper(this) {
320     TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError;
321   }
322 
323   bool hasBrokenDebugInfo() const { return BrokenDebugInfo; }
324 
325   bool verify(const Function &F) {
326     assert(F.getParent() == &M &&
327            "An instance of this class only works with a specific module!");
328 
329     // First ensure the function is well-enough formed to compute dominance
330     // information, and directly compute a dominance tree. We don't rely on the
331     // pass manager to provide this as it isolates us from a potentially
332     // out-of-date dominator tree and makes it significantly more complex to run
333     // this code outside of a pass manager.
334     // FIXME: It's really gross that we have to cast away constness here.
335     if (!F.empty())
336       DT.recalculate(const_cast<Function &>(F));
337 
338     for (const BasicBlock &BB : F) {
339       if (!BB.empty() && BB.back().isTerminator())
340         continue;
341 
342       if (OS) {
343         *OS << "Basic Block in function '" << F.getName()
344             << "' does not have terminator!\n";
345         BB.printAsOperand(*OS, true, MST);
346         *OS << "\n";
347       }
348       return false;
349     }
350 
351     Broken = false;
352     // FIXME: We strip const here because the inst visitor strips const.
353     visit(const_cast<Function &>(F));
354     verifySiblingFuncletUnwinds();
355     InstsInThisBlock.clear();
356     DebugFnArgs.clear();
357     LandingPadResultTy = nullptr;
358     SawFrameEscape = false;
359     SiblingFuncletInfo.clear();
360 
361     return !Broken;
362   }
363 
364   /// Verify the module that this instance of \c Verifier was initialized with.
365   bool verify() {
366     Broken = false;
367 
368     // Collect all declarations of the llvm.experimental.deoptimize intrinsic.
369     for (const Function &F : M)
370       if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize)
371         DeoptimizeDeclarations.push_back(&F);
372 
373     // Now that we've visited every function, verify that we never asked to
374     // recover a frame index that wasn't escaped.
375     verifyFrameRecoverIndices();
376     for (const GlobalVariable &GV : M.globals())
377       visitGlobalVariable(GV);
378 
379     for (const GlobalAlias &GA : M.aliases())
380       visitGlobalAlias(GA);
381 
382     for (const NamedMDNode &NMD : M.named_metadata())
383       visitNamedMDNode(NMD);
384 
385     for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
386       visitComdat(SMEC.getValue());
387 
388     visitModuleFlags(M);
389     visitModuleIdents(M);
390     visitModuleCommandLines(M);
391 
392     verifyCompileUnits();
393 
394     verifyDeoptimizeCallingConvs();
395     DISubprogramAttachments.clear();
396     return !Broken;
397   }
398 
399 private:
400   /// Whether a metadata node is allowed to be, or contain, a DILocation.
401   enum class AreDebugLocsAllowed { No, Yes };
402 
403   // Verification methods...
404   void visitGlobalValue(const GlobalValue &GV);
405   void visitGlobalVariable(const GlobalVariable &GV);
406   void visitGlobalAlias(const GlobalAlias &GA);
407   void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
408   void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
409                            const GlobalAlias &A, const Constant &C);
410   void visitNamedMDNode(const NamedMDNode &NMD);
411   void visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs);
412   void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
413   void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
414   void visitComdat(const Comdat &C);
415   void visitModuleIdents(const Module &M);
416   void visitModuleCommandLines(const Module &M);
417   void visitModuleFlags(const Module &M);
418   void visitModuleFlag(const MDNode *Op,
419                        DenseMap<const MDString *, const MDNode *> &SeenIDs,
420                        SmallVectorImpl<const MDNode *> &Requirements);
421   void visitModuleFlagCGProfileEntry(const MDOperand &MDO);
422   void visitFunction(const Function &F);
423   void visitBasicBlock(BasicBlock &BB);
424   void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
425   void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
426   void visitProfMetadata(Instruction &I, MDNode *MD);
427 
428   template <class Ty> bool isValidMetadataArray(const MDTuple &N);
429 #define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
430 #include "llvm/IR/Metadata.def"
431   void visitDIScope(const DIScope &N);
432   void visitDIVariable(const DIVariable &N);
433   void visitDILexicalBlockBase(const DILexicalBlockBase &N);
434   void visitDITemplateParameter(const DITemplateParameter &N);
435 
436   void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
437 
438   // InstVisitor overrides...
439   using InstVisitor<Verifier>::visit;
440   void visit(Instruction &I);
441 
442   void visitTruncInst(TruncInst &I);
443   void visitZExtInst(ZExtInst &I);
444   void visitSExtInst(SExtInst &I);
445   void visitFPTruncInst(FPTruncInst &I);
446   void visitFPExtInst(FPExtInst &I);
447   void visitFPToUIInst(FPToUIInst &I);
448   void visitFPToSIInst(FPToSIInst &I);
449   void visitUIToFPInst(UIToFPInst &I);
450   void visitSIToFPInst(SIToFPInst &I);
451   void visitIntToPtrInst(IntToPtrInst &I);
452   void visitPtrToIntInst(PtrToIntInst &I);
453   void visitBitCastInst(BitCastInst &I);
454   void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
455   void visitPHINode(PHINode &PN);
456   void visitCallBase(CallBase &Call);
457   void visitUnaryOperator(UnaryOperator &U);
458   void visitBinaryOperator(BinaryOperator &B);
459   void visitICmpInst(ICmpInst &IC);
460   void visitFCmpInst(FCmpInst &FC);
461   void visitExtractElementInst(ExtractElementInst &EI);
462   void visitInsertElementInst(InsertElementInst &EI);
463   void visitShuffleVectorInst(ShuffleVectorInst &EI);
464   void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
465   void visitCallInst(CallInst &CI);
466   void visitInvokeInst(InvokeInst &II);
467   void visitGetElementPtrInst(GetElementPtrInst &GEP);
468   void visitLoadInst(LoadInst &LI);
469   void visitStoreInst(StoreInst &SI);
470   void verifyDominatesUse(Instruction &I, unsigned i);
471   void visitInstruction(Instruction &I);
472   void visitTerminator(Instruction &I);
473   void visitBranchInst(BranchInst &BI);
474   void visitReturnInst(ReturnInst &RI);
475   void visitSwitchInst(SwitchInst &SI);
476   void visitIndirectBrInst(IndirectBrInst &BI);
477   void visitCallBrInst(CallBrInst &CBI);
478   void visitSelectInst(SelectInst &SI);
479   void visitUserOp1(Instruction &I);
480   void visitUserOp2(Instruction &I) { visitUserOp1(I); }
481   void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call);
482   void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
483   void visitDbgIntrinsic(StringRef Kind, DbgVariableIntrinsic &DII);
484   void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
485   void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
486   void visitAtomicRMWInst(AtomicRMWInst &RMWI);
487   void visitFenceInst(FenceInst &FI);
488   void visitAllocaInst(AllocaInst &AI);
489   void visitExtractValueInst(ExtractValueInst &EVI);
490   void visitInsertValueInst(InsertValueInst &IVI);
491   void visitEHPadPredecessors(Instruction &I);
492   void visitLandingPadInst(LandingPadInst &LPI);
493   void visitResumeInst(ResumeInst &RI);
494   void visitCatchPadInst(CatchPadInst &CPI);
495   void visitCatchReturnInst(CatchReturnInst &CatchReturn);
496   void visitCleanupPadInst(CleanupPadInst &CPI);
497   void visitFuncletPadInst(FuncletPadInst &FPI);
498   void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
499   void visitCleanupReturnInst(CleanupReturnInst &CRI);
500 
501   void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal);
502   void verifySwiftErrorValue(const Value *SwiftErrorVal);
503   void verifyMustTailCall(CallInst &CI);
504   bool performTypeCheck(Intrinsic::ID ID, Function *F, Type *Ty, int VT,
505                         unsigned ArgNo, std::string &Suffix);
506   bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
507   void verifyAttributeTypes(AttributeSet Attrs, bool IsFunction,
508                             const Value *V);
509   void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
510   void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
511                            const Value *V, bool IsIntrinsic);
512   void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
513 
514   void visitConstantExprsRecursively(const Constant *EntryC);
515   void visitConstantExpr(const ConstantExpr *CE);
516   void verifyStatepoint(const CallBase &Call);
517   void verifyFrameRecoverIndices();
518   void verifySiblingFuncletUnwinds();
519 
520   void verifyFragmentExpression(const DbgVariableIntrinsic &I);
521   template <typename ValueOrMetadata>
522   void verifyFragmentExpression(const DIVariable &V,
523                                 DIExpression::FragmentInfo Fragment,
524                                 ValueOrMetadata *Desc);
525   void verifyFnArgs(const DbgVariableIntrinsic &I);
526   void verifyNotEntryValue(const DbgVariableIntrinsic &I);
527 
528   /// Module-level debug info verification...
529   void verifyCompileUnits();
530 
531   /// Module-level verification that all @llvm.experimental.deoptimize
532   /// declarations share the same calling convention.
533   void verifyDeoptimizeCallingConvs();
534 
535   /// Verify all-or-nothing property of DIFile source attribute within a CU.
536   void verifySourceDebugInfo(const DICompileUnit &U, const DIFile &F);
537 };
538 
539 } // end anonymous namespace
540 
541 /// We know that cond should be true, if not print an error message.
542 #define Assert(C, ...) \
543   do { if (!(C)) { CheckFailed(__VA_ARGS__); return; } } while (false)
544 
545 /// We know that a debug info condition should be true, if not print
546 /// an error message.
547 #define AssertDI(C, ...) \
548   do { if (!(C)) { DebugInfoCheckFailed(__VA_ARGS__); return; } } while (false)
549 
550 void Verifier::visit(Instruction &I) {
551   for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
552     Assert(I.getOperand(i) != nullptr, "Operand is null", &I);
553   InstVisitor<Verifier>::visit(I);
554 }
555 
556 // Helper to recursively iterate over indirect users. By
557 // returning false, the callback can ask to stop recursing
558 // further.
559 static void forEachUser(const Value *User,
560                         SmallPtrSet<const Value *, 32> &Visited,
561                         llvm::function_ref<bool(const Value *)> Callback) {
562   if (!Visited.insert(User).second)
563     return;
564   for (const Value *TheNextUser : User->materialized_users())
565     if (Callback(TheNextUser))
566       forEachUser(TheNextUser, Visited, Callback);
567 }
568 
569 void Verifier::visitGlobalValue(const GlobalValue &GV) {
570   Assert(!GV.isDeclaration() || GV.hasValidDeclarationLinkage(),
571          "Global is external, but doesn't have external or weak linkage!", &GV);
572 
573   Assert(GV.getAlignment() <= Value::MaximumAlignment,
574          "huge alignment values are unsupported", &GV);
575   Assert(!GV.hasAppendingLinkage() || isa<GlobalVariable>(GV),
576          "Only global variables can have appending linkage!", &GV);
577 
578   if (GV.hasAppendingLinkage()) {
579     const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
580     Assert(GVar && GVar->getValueType()->isArrayTy(),
581            "Only global arrays can have appending linkage!", GVar);
582   }
583 
584   if (GV.isDeclarationForLinker())
585     Assert(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
586 
587   if (GV.hasDLLImportStorageClass()) {
588     Assert(!GV.isDSOLocal(),
589            "GlobalValue with DLLImport Storage is dso_local!", &GV);
590 
591     Assert((GV.isDeclaration() && GV.hasExternalLinkage()) ||
592                GV.hasAvailableExternallyLinkage(),
593            "Global is marked as dllimport, but not external", &GV);
594   }
595 
596   if (GV.isImplicitDSOLocal())
597     Assert(GV.isDSOLocal(),
598            "GlobalValue with local linkage or non-default "
599            "visibility must be dso_local!",
600            &GV);
601 
602   forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
603     if (const Instruction *I = dyn_cast<Instruction>(V)) {
604       if (!I->getParent() || !I->getParent()->getParent())
605         CheckFailed("Global is referenced by parentless instruction!", &GV, &M,
606                     I);
607       else if (I->getParent()->getParent()->getParent() != &M)
608         CheckFailed("Global is referenced in a different module!", &GV, &M, I,
609                     I->getParent()->getParent(),
610                     I->getParent()->getParent()->getParent());
611       return false;
612     } else if (const Function *F = dyn_cast<Function>(V)) {
613       if (F->getParent() != &M)
614         CheckFailed("Global is used by function in a different module", &GV, &M,
615                     F, F->getParent());
616       return false;
617     }
618     return true;
619   });
620 }
621 
622 void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
623   if (GV.hasInitializer()) {
624     Assert(GV.getInitializer()->getType() == GV.getValueType(),
625            "Global variable initializer type does not match global "
626            "variable type!",
627            &GV);
628     // If the global has common linkage, it must have a zero initializer and
629     // cannot be constant.
630     if (GV.hasCommonLinkage()) {
631       Assert(GV.getInitializer()->isNullValue(),
632              "'common' global must have a zero initializer!", &GV);
633       Assert(!GV.isConstant(), "'common' global may not be marked constant!",
634              &GV);
635       Assert(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
636     }
637   }
638 
639   if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
640                        GV.getName() == "llvm.global_dtors")) {
641     Assert(!GV.hasInitializer() || GV.hasAppendingLinkage(),
642            "invalid linkage for intrinsic global variable", &GV);
643     // Don't worry about emitting an error for it not being an array,
644     // visitGlobalValue will complain on appending non-array.
645     if (ArrayType *ATy = dyn_cast<ArrayType>(GV.getValueType())) {
646       StructType *STy = dyn_cast<StructType>(ATy->getElementType());
647       PointerType *FuncPtrTy =
648           FunctionType::get(Type::getVoidTy(Context), false)->
649           getPointerTo(DL.getProgramAddressSpace());
650       Assert(STy &&
651                  (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
652                  STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
653                  STy->getTypeAtIndex(1) == FuncPtrTy,
654              "wrong type for intrinsic global variable", &GV);
655       Assert(STy->getNumElements() == 3,
656              "the third field of the element type is mandatory, "
657              "specify i8* null to migrate from the obsoleted 2-field form");
658       Type *ETy = STy->getTypeAtIndex(2);
659       Assert(ETy->isPointerTy() &&
660                  cast<PointerType>(ETy)->getElementType()->isIntegerTy(8),
661              "wrong type for intrinsic global variable", &GV);
662     }
663   }
664 
665   if (GV.hasName() && (GV.getName() == "llvm.used" ||
666                        GV.getName() == "llvm.compiler.used")) {
667     Assert(!GV.hasInitializer() || GV.hasAppendingLinkage(),
668            "invalid linkage for intrinsic global variable", &GV);
669     Type *GVType = GV.getValueType();
670     if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
671       PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
672       Assert(PTy, "wrong type for intrinsic global variable", &GV);
673       if (GV.hasInitializer()) {
674         const Constant *Init = GV.getInitializer();
675         const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
676         Assert(InitArray, "wrong initalizer for intrinsic global variable",
677                Init);
678         for (Value *Op : InitArray->operands()) {
679           Value *V = Op->stripPointerCasts();
680           Assert(isa<GlobalVariable>(V) || isa<Function>(V) ||
681                      isa<GlobalAlias>(V),
682                  "invalid llvm.used member", V);
683           Assert(V->hasName(), "members of llvm.used must be named", V);
684         }
685       }
686     }
687   }
688 
689   // Visit any debug info attachments.
690   SmallVector<MDNode *, 1> MDs;
691   GV.getMetadata(LLVMContext::MD_dbg, MDs);
692   for (auto *MD : MDs) {
693     if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
694       visitDIGlobalVariableExpression(*GVE);
695     else
696       AssertDI(false, "!dbg attachment of global variable must be a "
697                       "DIGlobalVariableExpression");
698   }
699 
700   // Scalable vectors cannot be global variables, since we don't know
701   // the runtime size. If the global is a struct or an array containing
702   // scalable vectors, that will be caught by the isValidElementType methods
703   // in StructType or ArrayType instead.
704   Assert(!isa<ScalableVectorType>(GV.getValueType()),
705          "Globals cannot contain scalable vectors", &GV);
706 
707   if (!GV.hasInitializer()) {
708     visitGlobalValue(GV);
709     return;
710   }
711 
712   // Walk any aggregate initializers looking for bitcasts between address spaces
713   visitConstantExprsRecursively(GV.getInitializer());
714 
715   visitGlobalValue(GV);
716 }
717 
718 void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
719   SmallPtrSet<const GlobalAlias*, 4> Visited;
720   Visited.insert(&GA);
721   visitAliaseeSubExpr(Visited, GA, C);
722 }
723 
724 void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
725                                    const GlobalAlias &GA, const Constant &C) {
726   if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
727     Assert(!GV->isDeclarationForLinker(), "Alias must point to a definition",
728            &GA);
729 
730     if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
731       Assert(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
732 
733       Assert(!GA2->isInterposable(), "Alias cannot point to an interposable alias",
734              &GA);
735     } else {
736       // Only continue verifying subexpressions of GlobalAliases.
737       // Do not recurse into global initializers.
738       return;
739     }
740   }
741 
742   if (const auto *CE = dyn_cast<ConstantExpr>(&C))
743     visitConstantExprsRecursively(CE);
744 
745   for (const Use &U : C.operands()) {
746     Value *V = &*U;
747     if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
748       visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
749     else if (const auto *C2 = dyn_cast<Constant>(V))
750       visitAliaseeSubExpr(Visited, GA, *C2);
751   }
752 }
753 
754 void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
755   Assert(GlobalAlias::isValidLinkage(GA.getLinkage()),
756          "Alias should have private, internal, linkonce, weak, linkonce_odr, "
757          "weak_odr, or external linkage!",
758          &GA);
759   const Constant *Aliasee = GA.getAliasee();
760   Assert(Aliasee, "Aliasee cannot be NULL!", &GA);
761   Assert(GA.getType() == Aliasee->getType(),
762          "Alias and aliasee types should match!", &GA);
763 
764   Assert(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
765          "Aliasee should be either GlobalValue or ConstantExpr", &GA);
766 
767   visitAliaseeSubExpr(GA, *Aliasee);
768 
769   visitGlobalValue(GA);
770 }
771 
772 void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
773   // There used to be various other llvm.dbg.* nodes, but we don't support
774   // upgrading them and we want to reserve the namespace for future uses.
775   if (NMD.getName().startswith("llvm.dbg."))
776     AssertDI(NMD.getName() == "llvm.dbg.cu",
777              "unrecognized named metadata node in the llvm.dbg namespace",
778              &NMD);
779   for (const MDNode *MD : NMD.operands()) {
780     if (NMD.getName() == "llvm.dbg.cu")
781       AssertDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
782 
783     if (!MD)
784       continue;
785 
786     visitMDNode(*MD, AreDebugLocsAllowed::Yes);
787   }
788 }
789 
790 void Verifier::visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs) {
791   // Only visit each node once.  Metadata can be mutually recursive, so this
792   // avoids infinite recursion here, as well as being an optimization.
793   if (!MDNodes.insert(&MD).second)
794     return;
795 
796   switch (MD.getMetadataID()) {
797   default:
798     llvm_unreachable("Invalid MDNode subclass");
799   case Metadata::MDTupleKind:
800     break;
801 #define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS)                                  \
802   case Metadata::CLASS##Kind:                                                  \
803     visit##CLASS(cast<CLASS>(MD));                                             \
804     break;
805 #include "llvm/IR/Metadata.def"
806   }
807 
808   for (const Metadata *Op : MD.operands()) {
809     if (!Op)
810       continue;
811     Assert(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
812            &MD, Op);
813     AssertDI(!isa<DILocation>(Op) || AllowLocs == AreDebugLocsAllowed::Yes,
814              "DILocation not allowed within this metadata node", &MD, Op);
815     if (auto *N = dyn_cast<MDNode>(Op)) {
816       visitMDNode(*N, AllowLocs);
817       continue;
818     }
819     if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
820       visitValueAsMetadata(*V, nullptr);
821       continue;
822     }
823   }
824 
825   // Check these last, so we diagnose problems in operands first.
826   Assert(!MD.isTemporary(), "Expected no forward declarations!", &MD);
827   Assert(MD.isResolved(), "All nodes should be resolved!", &MD);
828 }
829 
830 void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
831   Assert(MD.getValue(), "Expected valid value", &MD);
832   Assert(!MD.getValue()->getType()->isMetadataTy(),
833          "Unexpected metadata round-trip through values", &MD, MD.getValue());
834 
835   auto *L = dyn_cast<LocalAsMetadata>(&MD);
836   if (!L)
837     return;
838 
839   Assert(F, "function-local metadata used outside a function", L);
840 
841   // If this was an instruction, bb, or argument, verify that it is in the
842   // function that we expect.
843   Function *ActualF = nullptr;
844   if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
845     Assert(I->getParent(), "function-local metadata not in basic block", L, I);
846     ActualF = I->getParent()->getParent();
847   } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
848     ActualF = BB->getParent();
849   else if (Argument *A = dyn_cast<Argument>(L->getValue()))
850     ActualF = A->getParent();
851   assert(ActualF && "Unimplemented function local metadata case!");
852 
853   Assert(ActualF == F, "function-local metadata used in wrong function", L);
854 }
855 
856 void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
857   Metadata *MD = MDV.getMetadata();
858   if (auto *N = dyn_cast<MDNode>(MD)) {
859     visitMDNode(*N, AreDebugLocsAllowed::No);
860     return;
861   }
862 
863   // Only visit each node once.  Metadata can be mutually recursive, so this
864   // avoids infinite recursion here, as well as being an optimization.
865   if (!MDNodes.insert(MD).second)
866     return;
867 
868   if (auto *V = dyn_cast<ValueAsMetadata>(MD))
869     visitValueAsMetadata(*V, F);
870 }
871 
872 static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
873 static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
874 static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
875 
876 void Verifier::visitDILocation(const DILocation &N) {
877   AssertDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
878            "location requires a valid scope", &N, N.getRawScope());
879   if (auto *IA = N.getRawInlinedAt())
880     AssertDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
881   if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
882     AssertDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
883 }
884 
885 void Verifier::visitGenericDINode(const GenericDINode &N) {
886   AssertDI(N.getTag(), "invalid tag", &N);
887 }
888 
889 void Verifier::visitDIScope(const DIScope &N) {
890   if (auto *F = N.getRawFile())
891     AssertDI(isa<DIFile>(F), "invalid file", &N, F);
892 }
893 
894 void Verifier::visitDISubrange(const DISubrange &N) {
895   AssertDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
896   auto Count = N.getCount();
897   AssertDI(Count, "Count must either be a signed constant or a DIVariable",
898            &N);
899   AssertDI(!Count.is<ConstantInt*>() ||
900                Count.get<ConstantInt*>()->getSExtValue() >= -1,
901            "invalid subrange count", &N);
902 }
903 
904 void Verifier::visitDIEnumerator(const DIEnumerator &N) {
905   AssertDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
906 }
907 
908 void Verifier::visitDIBasicType(const DIBasicType &N) {
909   AssertDI(N.getTag() == dwarf::DW_TAG_base_type ||
910                N.getTag() == dwarf::DW_TAG_unspecified_type,
911            "invalid tag", &N);
912   AssertDI(!(N.isBigEndian() && N.isLittleEndian()) ,
913             "has conflicting flags", &N);
914 }
915 
916 void Verifier::visitDIDerivedType(const DIDerivedType &N) {
917   // Common scope checks.
918   visitDIScope(N);
919 
920   AssertDI(N.getTag() == dwarf::DW_TAG_typedef ||
921                N.getTag() == dwarf::DW_TAG_pointer_type ||
922                N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
923                N.getTag() == dwarf::DW_TAG_reference_type ||
924                N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
925                N.getTag() == dwarf::DW_TAG_const_type ||
926                N.getTag() == dwarf::DW_TAG_volatile_type ||
927                N.getTag() == dwarf::DW_TAG_restrict_type ||
928                N.getTag() == dwarf::DW_TAG_atomic_type ||
929                N.getTag() == dwarf::DW_TAG_member ||
930                N.getTag() == dwarf::DW_TAG_inheritance ||
931                N.getTag() == dwarf::DW_TAG_friend,
932            "invalid tag", &N);
933   if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
934     AssertDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
935              N.getRawExtraData());
936   }
937 
938   AssertDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
939   AssertDI(isType(N.getRawBaseType()), "invalid base type", &N,
940            N.getRawBaseType());
941 
942   if (N.getDWARFAddressSpace()) {
943     AssertDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
944                  N.getTag() == dwarf::DW_TAG_reference_type ||
945                  N.getTag() == dwarf::DW_TAG_rvalue_reference_type,
946              "DWARF address space only applies to pointer or reference types",
947              &N);
948   }
949 }
950 
951 /// Detect mutually exclusive flags.
952 static bool hasConflictingReferenceFlags(unsigned Flags) {
953   return ((Flags & DINode::FlagLValueReference) &&
954           (Flags & DINode::FlagRValueReference)) ||
955          ((Flags & DINode::FlagTypePassByValue) &&
956           (Flags & DINode::FlagTypePassByReference));
957 }
958 
959 void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
960   auto *Params = dyn_cast<MDTuple>(&RawParams);
961   AssertDI(Params, "invalid template params", &N, &RawParams);
962   for (Metadata *Op : Params->operands()) {
963     AssertDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter",
964              &N, Params, Op);
965   }
966 }
967 
968 void Verifier::visitDICompositeType(const DICompositeType &N) {
969   // Common scope checks.
970   visitDIScope(N);
971 
972   AssertDI(N.getTag() == dwarf::DW_TAG_array_type ||
973                N.getTag() == dwarf::DW_TAG_structure_type ||
974                N.getTag() == dwarf::DW_TAG_union_type ||
975                N.getTag() == dwarf::DW_TAG_enumeration_type ||
976                N.getTag() == dwarf::DW_TAG_class_type ||
977                N.getTag() == dwarf::DW_TAG_variant_part,
978            "invalid tag", &N);
979 
980   AssertDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
981   AssertDI(isType(N.getRawBaseType()), "invalid base type", &N,
982            N.getRawBaseType());
983 
984   AssertDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
985            "invalid composite elements", &N, N.getRawElements());
986   AssertDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
987            N.getRawVTableHolder());
988   AssertDI(!hasConflictingReferenceFlags(N.getFlags()),
989            "invalid reference flags", &N);
990   unsigned DIBlockByRefStruct = 1 << 4;
991   AssertDI((N.getFlags() & DIBlockByRefStruct) == 0,
992            "DIBlockByRefStruct on DICompositeType is no longer supported", &N);
993 
994   if (N.isVector()) {
995     const DINodeArray Elements = N.getElements();
996     AssertDI(Elements.size() == 1 &&
997              Elements[0]->getTag() == dwarf::DW_TAG_subrange_type,
998              "invalid vector, expected one element of type subrange", &N);
999   }
1000 
1001   if (auto *Params = N.getRawTemplateParams())
1002     visitTemplateParams(N, *Params);
1003 
1004   if (N.getTag() == dwarf::DW_TAG_class_type ||
1005       N.getTag() == dwarf::DW_TAG_union_type) {
1006     AssertDI(N.getFile() && !N.getFile()->getFilename().empty(),
1007              "class/union requires a filename", &N, N.getFile());
1008   }
1009 
1010   if (auto *D = N.getRawDiscriminator()) {
1011     AssertDI(isa<DIDerivedType>(D) && N.getTag() == dwarf::DW_TAG_variant_part,
1012              "discriminator can only appear on variant part");
1013   }
1014 }
1015 
1016 void Verifier::visitDISubroutineType(const DISubroutineType &N) {
1017   AssertDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
1018   if (auto *Types = N.getRawTypeArray()) {
1019     AssertDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
1020     for (Metadata *Ty : N.getTypeArray()->operands()) {
1021       AssertDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
1022     }
1023   }
1024   AssertDI(!hasConflictingReferenceFlags(N.getFlags()),
1025            "invalid reference flags", &N);
1026 }
1027 
1028 void Verifier::visitDIFile(const DIFile &N) {
1029   AssertDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
1030   Optional<DIFile::ChecksumInfo<StringRef>> Checksum = N.getChecksum();
1031   if (Checksum) {
1032     AssertDI(Checksum->Kind <= DIFile::ChecksumKind::CSK_Last,
1033              "invalid checksum kind", &N);
1034     size_t Size;
1035     switch (Checksum->Kind) {
1036     case DIFile::CSK_MD5:
1037       Size = 32;
1038       break;
1039     case DIFile::CSK_SHA1:
1040       Size = 40;
1041       break;
1042     case DIFile::CSK_SHA256:
1043       Size = 64;
1044       break;
1045     }
1046     AssertDI(Checksum->Value.size() == Size, "invalid checksum length", &N);
1047     AssertDI(Checksum->Value.find_if_not(llvm::isHexDigit) == StringRef::npos,
1048              "invalid checksum", &N);
1049   }
1050 }
1051 
1052 void Verifier::visitDICompileUnit(const DICompileUnit &N) {
1053   AssertDI(N.isDistinct(), "compile units must be distinct", &N);
1054   AssertDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
1055 
1056   // Don't bother verifying the compilation directory or producer string
1057   // as those could be empty.
1058   AssertDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
1059            N.getRawFile());
1060   AssertDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
1061            N.getFile());
1062 
1063   verifySourceDebugInfo(N, *N.getFile());
1064 
1065   AssertDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind),
1066            "invalid emission kind", &N);
1067 
1068   if (auto *Array = N.getRawEnumTypes()) {
1069     AssertDI(isa<MDTuple>(Array), "invalid enum list", &N, Array);
1070     for (Metadata *Op : N.getEnumTypes()->operands()) {
1071       auto *Enum = dyn_cast_or_null<DICompositeType>(Op);
1072       AssertDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
1073                "invalid enum type", &N, N.getEnumTypes(), Op);
1074     }
1075   }
1076   if (auto *Array = N.getRawRetainedTypes()) {
1077     AssertDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
1078     for (Metadata *Op : N.getRetainedTypes()->operands()) {
1079       AssertDI(Op && (isa<DIType>(Op) ||
1080                       (isa<DISubprogram>(Op) &&
1081                        !cast<DISubprogram>(Op)->isDefinition())),
1082                "invalid retained type", &N, Op);
1083     }
1084   }
1085   if (auto *Array = N.getRawGlobalVariables()) {
1086     AssertDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
1087     for (Metadata *Op : N.getGlobalVariables()->operands()) {
1088       AssertDI(Op && (isa<DIGlobalVariableExpression>(Op)),
1089                "invalid global variable ref", &N, Op);
1090     }
1091   }
1092   if (auto *Array = N.getRawImportedEntities()) {
1093     AssertDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
1094     for (Metadata *Op : N.getImportedEntities()->operands()) {
1095       AssertDI(Op && isa<DIImportedEntity>(Op), "invalid imported entity ref",
1096                &N, Op);
1097     }
1098   }
1099   if (auto *Array = N.getRawMacros()) {
1100     AssertDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1101     for (Metadata *Op : N.getMacros()->operands()) {
1102       AssertDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1103     }
1104   }
1105   CUVisited.insert(&N);
1106 }
1107 
1108 void Verifier::visitDISubprogram(const DISubprogram &N) {
1109   AssertDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
1110   AssertDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1111   if (auto *F = N.getRawFile())
1112     AssertDI(isa<DIFile>(F), "invalid file", &N, F);
1113   else
1114     AssertDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
1115   if (auto *T = N.getRawType())
1116     AssertDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
1117   AssertDI(isType(N.getRawContainingType()), "invalid containing type", &N,
1118            N.getRawContainingType());
1119   if (auto *Params = N.getRawTemplateParams())
1120     visitTemplateParams(N, *Params);
1121   if (auto *S = N.getRawDeclaration())
1122     AssertDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
1123              "invalid subprogram declaration", &N, S);
1124   if (auto *RawNode = N.getRawRetainedNodes()) {
1125     auto *Node = dyn_cast<MDTuple>(RawNode);
1126     AssertDI(Node, "invalid retained nodes list", &N, RawNode);
1127     for (Metadata *Op : Node->operands()) {
1128       AssertDI(Op && (isa<DILocalVariable>(Op) || isa<DILabel>(Op)),
1129                "invalid retained nodes, expected DILocalVariable or DILabel",
1130                &N, Node, Op);
1131     }
1132   }
1133   AssertDI(!hasConflictingReferenceFlags(N.getFlags()),
1134            "invalid reference flags", &N);
1135 
1136   auto *Unit = N.getRawUnit();
1137   if (N.isDefinition()) {
1138     // Subprogram definitions (not part of the type hierarchy).
1139     AssertDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
1140     AssertDI(Unit, "subprogram definitions must have a compile unit", &N);
1141     AssertDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
1142     if (N.getFile())
1143       verifySourceDebugInfo(*N.getUnit(), *N.getFile());
1144   } else {
1145     // Subprogram declarations (part of the type hierarchy).
1146     AssertDI(!Unit, "subprogram declarations must not have a compile unit", &N);
1147   }
1148 
1149   if (auto *RawThrownTypes = N.getRawThrownTypes()) {
1150     auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
1151     AssertDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
1152     for (Metadata *Op : ThrownTypes->operands())
1153       AssertDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
1154                Op);
1155   }
1156 
1157   if (N.areAllCallsDescribed())
1158     AssertDI(N.isDefinition(),
1159              "DIFlagAllCallsDescribed must be attached to a definition");
1160 }
1161 
1162 void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
1163   AssertDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
1164   AssertDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1165            "invalid local scope", &N, N.getRawScope());
1166   if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1167     AssertDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1168 }
1169 
1170 void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
1171   visitDILexicalBlockBase(N);
1172 
1173   AssertDI(N.getLine() || !N.getColumn(),
1174            "cannot have column info without line info", &N);
1175 }
1176 
1177 void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
1178   visitDILexicalBlockBase(N);
1179 }
1180 
1181 void Verifier::visitDICommonBlock(const DICommonBlock &N) {
1182   AssertDI(N.getTag() == dwarf::DW_TAG_common_block, "invalid tag", &N);
1183   if (auto *S = N.getRawScope())
1184     AssertDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1185   if (auto *S = N.getRawDecl())
1186     AssertDI(isa<DIGlobalVariable>(S), "invalid declaration", &N, S);
1187 }
1188 
1189 void Verifier::visitDINamespace(const DINamespace &N) {
1190   AssertDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
1191   if (auto *S = N.getRawScope())
1192     AssertDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1193 }
1194 
1195 void Verifier::visitDIMacro(const DIMacro &N) {
1196   AssertDI(N.getMacinfoType() == dwarf::DW_MACINFO_define ||
1197                N.getMacinfoType() == dwarf::DW_MACINFO_undef,
1198            "invalid macinfo type", &N);
1199   AssertDI(!N.getName().empty(), "anonymous macro", &N);
1200   if (!N.getValue().empty()) {
1201     assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
1202   }
1203 }
1204 
1205 void Verifier::visitDIMacroFile(const DIMacroFile &N) {
1206   AssertDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file,
1207            "invalid macinfo type", &N);
1208   if (auto *F = N.getRawFile())
1209     AssertDI(isa<DIFile>(F), "invalid file", &N, F);
1210 
1211   if (auto *Array = N.getRawElements()) {
1212     AssertDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1213     for (Metadata *Op : N.getElements()->operands()) {
1214       AssertDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1215     }
1216   }
1217 }
1218 
1219 void Verifier::visitDIModule(const DIModule &N) {
1220   AssertDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
1221   AssertDI(!N.getName().empty(), "anonymous module", &N);
1222 }
1223 
1224 void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
1225   AssertDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1226 }
1227 
1228 void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
1229   visitDITemplateParameter(N);
1230 
1231   AssertDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
1232            &N);
1233 }
1234 
1235 void Verifier::visitDITemplateValueParameter(
1236     const DITemplateValueParameter &N) {
1237   visitDITemplateParameter(N);
1238 
1239   AssertDI(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
1240                N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
1241                N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
1242            "invalid tag", &N);
1243 }
1244 
1245 void Verifier::visitDIVariable(const DIVariable &N) {
1246   if (auto *S = N.getRawScope())
1247     AssertDI(isa<DIScope>(S), "invalid scope", &N, S);
1248   if (auto *F = N.getRawFile())
1249     AssertDI(isa<DIFile>(F), "invalid file", &N, F);
1250 }
1251 
1252 void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
1253   // Checks common to all variables.
1254   visitDIVariable(N);
1255 
1256   AssertDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1257   AssertDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1258   AssertDI(N.getType(), "missing global variable type", &N);
1259   if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
1260     AssertDI(isa<DIDerivedType>(Member),
1261              "invalid static data member declaration", &N, Member);
1262   }
1263 }
1264 
1265 void Verifier::visitDILocalVariable(const DILocalVariable &N) {
1266   // Checks common to all variables.
1267   visitDIVariable(N);
1268 
1269   AssertDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1270   AssertDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1271   AssertDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1272            "local variable requires a valid scope", &N, N.getRawScope());
1273   if (auto Ty = N.getType())
1274     AssertDI(!isa<DISubroutineType>(Ty), "invalid type", &N, N.getType());
1275 }
1276 
1277 void Verifier::visitDILabel(const DILabel &N) {
1278   if (auto *S = N.getRawScope())
1279     AssertDI(isa<DIScope>(S), "invalid scope", &N, S);
1280   if (auto *F = N.getRawFile())
1281     AssertDI(isa<DIFile>(F), "invalid file", &N, F);
1282 
1283   AssertDI(N.getTag() == dwarf::DW_TAG_label, "invalid tag", &N);
1284   AssertDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1285            "label requires a valid scope", &N, N.getRawScope());
1286 }
1287 
1288 void Verifier::visitDIExpression(const DIExpression &N) {
1289   AssertDI(N.isValid(), "invalid expression", &N);
1290 }
1291 
1292 void Verifier::visitDIGlobalVariableExpression(
1293     const DIGlobalVariableExpression &GVE) {
1294   AssertDI(GVE.getVariable(), "missing variable");
1295   if (auto *Var = GVE.getVariable())
1296     visitDIGlobalVariable(*Var);
1297   if (auto *Expr = GVE.getExpression()) {
1298     visitDIExpression(*Expr);
1299     if (auto Fragment = Expr->getFragmentInfo())
1300       verifyFragmentExpression(*GVE.getVariable(), *Fragment, &GVE);
1301   }
1302 }
1303 
1304 void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
1305   AssertDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
1306   if (auto *T = N.getRawType())
1307     AssertDI(isType(T), "invalid type ref", &N, T);
1308   if (auto *F = N.getRawFile())
1309     AssertDI(isa<DIFile>(F), "invalid file", &N, F);
1310 }
1311 
1312 void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
1313   AssertDI(N.getTag() == dwarf::DW_TAG_imported_module ||
1314                N.getTag() == dwarf::DW_TAG_imported_declaration,
1315            "invalid tag", &N);
1316   if (auto *S = N.getRawScope())
1317     AssertDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
1318   AssertDI(isDINode(N.getRawEntity()), "invalid imported entity", &N,
1319            N.getRawEntity());
1320 }
1321 
1322 void Verifier::visitComdat(const Comdat &C) {
1323   // In COFF the Module is invalid if the GlobalValue has private linkage.
1324   // Entities with private linkage don't have entries in the symbol table.
1325   if (TT.isOSBinFormatCOFF())
1326     if (const GlobalValue *GV = M.getNamedValue(C.getName()))
1327       Assert(!GV->hasPrivateLinkage(),
1328              "comdat global value has private linkage", GV);
1329 }
1330 
1331 void Verifier::visitModuleIdents(const Module &M) {
1332   const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
1333   if (!Idents)
1334     return;
1335 
1336   // llvm.ident takes a list of metadata entry. Each entry has only one string.
1337   // Scan each llvm.ident entry and make sure that this requirement is met.
1338   for (const MDNode *N : Idents->operands()) {
1339     Assert(N->getNumOperands() == 1,
1340            "incorrect number of operands in llvm.ident metadata", N);
1341     Assert(dyn_cast_or_null<MDString>(N->getOperand(0)),
1342            ("invalid value for llvm.ident metadata entry operand"
1343             "(the operand should be a string)"),
1344            N->getOperand(0));
1345   }
1346 }
1347 
1348 void Verifier::visitModuleCommandLines(const Module &M) {
1349   const NamedMDNode *CommandLines = M.getNamedMetadata("llvm.commandline");
1350   if (!CommandLines)
1351     return;
1352 
1353   // llvm.commandline takes a list of metadata entry. Each entry has only one
1354   // string. Scan each llvm.commandline entry and make sure that this
1355   // requirement is met.
1356   for (const MDNode *N : CommandLines->operands()) {
1357     Assert(N->getNumOperands() == 1,
1358            "incorrect number of operands in llvm.commandline metadata", N);
1359     Assert(dyn_cast_or_null<MDString>(N->getOperand(0)),
1360            ("invalid value for llvm.commandline metadata entry operand"
1361             "(the operand should be a string)"),
1362            N->getOperand(0));
1363   }
1364 }
1365 
1366 void Verifier::visitModuleFlags(const Module &M) {
1367   const NamedMDNode *Flags = M.getModuleFlagsMetadata();
1368   if (!Flags) return;
1369 
1370   // Scan each flag, and track the flags and requirements.
1371   DenseMap<const MDString*, const MDNode*> SeenIDs;
1372   SmallVector<const MDNode*, 16> Requirements;
1373   for (const MDNode *MDN : Flags->operands())
1374     visitModuleFlag(MDN, SeenIDs, Requirements);
1375 
1376   // Validate that the requirements in the module are valid.
1377   for (const MDNode *Requirement : Requirements) {
1378     const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
1379     const Metadata *ReqValue = Requirement->getOperand(1);
1380 
1381     const MDNode *Op = SeenIDs.lookup(Flag);
1382     if (!Op) {
1383       CheckFailed("invalid requirement on flag, flag is not present in module",
1384                   Flag);
1385       continue;
1386     }
1387 
1388     if (Op->getOperand(2) != ReqValue) {
1389       CheckFailed(("invalid requirement on flag, "
1390                    "flag does not have the required value"),
1391                   Flag);
1392       continue;
1393     }
1394   }
1395 }
1396 
1397 void
1398 Verifier::visitModuleFlag(const MDNode *Op,
1399                           DenseMap<const MDString *, const MDNode *> &SeenIDs,
1400                           SmallVectorImpl<const MDNode *> &Requirements) {
1401   // Each module flag should have three arguments, the merge behavior (a
1402   // constant int), the flag ID (an MDString), and the value.
1403   Assert(Op->getNumOperands() == 3,
1404          "incorrect number of operands in module flag", Op);
1405   Module::ModFlagBehavior MFB;
1406   if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
1407     Assert(
1408         mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0)),
1409         "invalid behavior operand in module flag (expected constant integer)",
1410         Op->getOperand(0));
1411     Assert(false,
1412            "invalid behavior operand in module flag (unexpected constant)",
1413            Op->getOperand(0));
1414   }
1415   MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
1416   Assert(ID, "invalid ID operand in module flag (expected metadata string)",
1417          Op->getOperand(1));
1418 
1419   // Sanity check the values for behaviors with additional requirements.
1420   switch (MFB) {
1421   case Module::Error:
1422   case Module::Warning:
1423   case Module::Override:
1424     // These behavior types accept any value.
1425     break;
1426 
1427   case Module::Max: {
1428     Assert(mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2)),
1429            "invalid value for 'max' module flag (expected constant integer)",
1430            Op->getOperand(2));
1431     break;
1432   }
1433 
1434   case Module::Require: {
1435     // The value should itself be an MDNode with two operands, a flag ID (an
1436     // MDString), and a value.
1437     MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
1438     Assert(Value && Value->getNumOperands() == 2,
1439            "invalid value for 'require' module flag (expected metadata pair)",
1440            Op->getOperand(2));
1441     Assert(isa<MDString>(Value->getOperand(0)),
1442            ("invalid value for 'require' module flag "
1443             "(first value operand should be a string)"),
1444            Value->getOperand(0));
1445 
1446     // Append it to the list of requirements, to check once all module flags are
1447     // scanned.
1448     Requirements.push_back(Value);
1449     break;
1450   }
1451 
1452   case Module::Append:
1453   case Module::AppendUnique: {
1454     // These behavior types require the operand be an MDNode.
1455     Assert(isa<MDNode>(Op->getOperand(2)),
1456            "invalid value for 'append'-type module flag "
1457            "(expected a metadata node)",
1458            Op->getOperand(2));
1459     break;
1460   }
1461   }
1462 
1463   // Unless this is a "requires" flag, check the ID is unique.
1464   if (MFB != Module::Require) {
1465     bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
1466     Assert(Inserted,
1467            "module flag identifiers must be unique (or of 'require' type)", ID);
1468   }
1469 
1470   if (ID->getString() == "wchar_size") {
1471     ConstantInt *Value
1472       = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1473     Assert(Value, "wchar_size metadata requires constant integer argument");
1474   }
1475 
1476   if (ID->getString() == "Linker Options") {
1477     // If the llvm.linker.options named metadata exists, we assume that the
1478     // bitcode reader has upgraded the module flag. Otherwise the flag might
1479     // have been created by a client directly.
1480     Assert(M.getNamedMetadata("llvm.linker.options"),
1481            "'Linker Options' named metadata no longer supported");
1482   }
1483 
1484   if (ID->getString() == "SemanticInterposition") {
1485     ConstantInt *Value =
1486         mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1487     Assert(Value,
1488            "SemanticInterposition metadata requires constant integer argument");
1489   }
1490 
1491   if (ID->getString() == "CG Profile") {
1492     for (const MDOperand &MDO : cast<MDNode>(Op->getOperand(2))->operands())
1493       visitModuleFlagCGProfileEntry(MDO);
1494   }
1495 }
1496 
1497 void Verifier::visitModuleFlagCGProfileEntry(const MDOperand &MDO) {
1498   auto CheckFunction = [&](const MDOperand &FuncMDO) {
1499     if (!FuncMDO)
1500       return;
1501     auto F = dyn_cast<ValueAsMetadata>(FuncMDO);
1502     Assert(F && isa<Function>(F->getValue()), "expected a Function or null",
1503            FuncMDO);
1504   };
1505   auto Node = dyn_cast_or_null<MDNode>(MDO);
1506   Assert(Node && Node->getNumOperands() == 3, "expected a MDNode triple", MDO);
1507   CheckFunction(Node->getOperand(0));
1508   CheckFunction(Node->getOperand(1));
1509   auto Count = dyn_cast_or_null<ConstantAsMetadata>(Node->getOperand(2));
1510   Assert(Count && Count->getType()->isIntegerTy(),
1511          "expected an integer constant", Node->getOperand(2));
1512 }
1513 
1514 /// Return true if this attribute kind only applies to functions.
1515 static bool isFuncOnlyAttr(Attribute::AttrKind Kind) {
1516   switch (Kind) {
1517   case Attribute::NoMerge:
1518   case Attribute::NoReturn:
1519   case Attribute::NoSync:
1520   case Attribute::WillReturn:
1521   case Attribute::NoCfCheck:
1522   case Attribute::NoUnwind:
1523   case Attribute::NoInline:
1524   case Attribute::AlwaysInline:
1525   case Attribute::OptimizeForSize:
1526   case Attribute::StackProtect:
1527   case Attribute::StackProtectReq:
1528   case Attribute::StackProtectStrong:
1529   case Attribute::SafeStack:
1530   case Attribute::ShadowCallStack:
1531   case Attribute::NoRedZone:
1532   case Attribute::NoImplicitFloat:
1533   case Attribute::Naked:
1534   case Attribute::InlineHint:
1535   case Attribute::StackAlignment:
1536   case Attribute::UWTable:
1537   case Attribute::NonLazyBind:
1538   case Attribute::ReturnsTwice:
1539   case Attribute::SanitizeAddress:
1540   case Attribute::SanitizeHWAddress:
1541   case Attribute::SanitizeMemTag:
1542   case Attribute::SanitizeThread:
1543   case Attribute::SanitizeMemory:
1544   case Attribute::MinSize:
1545   case Attribute::NoDuplicate:
1546   case Attribute::Builtin:
1547   case Attribute::NoBuiltin:
1548   case Attribute::Cold:
1549   case Attribute::OptForFuzzing:
1550   case Attribute::OptimizeNone:
1551   case Attribute::JumpTable:
1552   case Attribute::Convergent:
1553   case Attribute::ArgMemOnly:
1554   case Attribute::NoRecurse:
1555   case Attribute::InaccessibleMemOnly:
1556   case Attribute::InaccessibleMemOrArgMemOnly:
1557   case Attribute::AllocSize:
1558   case Attribute::SpeculativeLoadHardening:
1559   case Attribute::Speculatable:
1560   case Attribute::StrictFP:
1561     return true;
1562   default:
1563     break;
1564   }
1565   return false;
1566 }
1567 
1568 /// Return true if this is a function attribute that can also appear on
1569 /// arguments.
1570 static bool isFuncOrArgAttr(Attribute::AttrKind Kind) {
1571   return Kind == Attribute::ReadOnly || Kind == Attribute::WriteOnly ||
1572          Kind == Attribute::ReadNone || Kind == Attribute::NoFree ||
1573          Kind == Attribute::Preallocated;
1574 }
1575 
1576 void Verifier::verifyAttributeTypes(AttributeSet Attrs, bool IsFunction,
1577                                     const Value *V) {
1578   for (Attribute A : Attrs) {
1579     if (A.isStringAttribute())
1580       continue;
1581 
1582     if (A.isIntAttribute() !=
1583         Attribute::doesAttrKindHaveArgument(A.getKindAsEnum())) {
1584       CheckFailed("Attribute '" + A.getAsString() + "' should have an Argument",
1585                   V);
1586       return;
1587     }
1588 
1589     if (isFuncOnlyAttr(A.getKindAsEnum())) {
1590       if (!IsFunction) {
1591         CheckFailed("Attribute '" + A.getAsString() +
1592                         "' only applies to functions!",
1593                     V);
1594         return;
1595       }
1596     } else if (IsFunction && !isFuncOrArgAttr(A.getKindAsEnum())) {
1597       CheckFailed("Attribute '" + A.getAsString() +
1598                       "' does not apply to functions!",
1599                   V);
1600       return;
1601     }
1602   }
1603 }
1604 
1605 // VerifyParameterAttrs - Check the given attributes for an argument or return
1606 // value of the specified type.  The value V is printed in error messages.
1607 void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
1608                                     const Value *V) {
1609   if (!Attrs.hasAttributes())
1610     return;
1611 
1612   verifyAttributeTypes(Attrs, /*IsFunction=*/false, V);
1613 
1614   if (Attrs.hasAttribute(Attribute::ImmArg)) {
1615     Assert(Attrs.getNumAttributes() == 1,
1616            "Attribute 'immarg' is incompatible with other attributes", V);
1617   }
1618 
1619   // Check for mutually incompatible attributes.  Only inreg is compatible with
1620   // sret.
1621   unsigned AttrCount = 0;
1622   AttrCount += Attrs.hasAttribute(Attribute::ByVal);
1623   AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
1624   AttrCount += Attrs.hasAttribute(Attribute::Preallocated);
1625   AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
1626                Attrs.hasAttribute(Attribute::InReg);
1627   AttrCount += Attrs.hasAttribute(Attribute::Nest);
1628   Assert(AttrCount <= 1,
1629          "Attributes 'byval', 'inalloca', 'preallocated', 'inreg', 'nest', "
1630          "and 'sret' are incompatible!",
1631          V);
1632 
1633   Assert(!(Attrs.hasAttribute(Attribute::InAlloca) &&
1634            Attrs.hasAttribute(Attribute::ReadOnly)),
1635          "Attributes "
1636          "'inalloca and readonly' are incompatible!",
1637          V);
1638 
1639   Assert(!(Attrs.hasAttribute(Attribute::StructRet) &&
1640            Attrs.hasAttribute(Attribute::Returned)),
1641          "Attributes "
1642          "'sret and returned' are incompatible!",
1643          V);
1644 
1645   Assert(!(Attrs.hasAttribute(Attribute::ZExt) &&
1646            Attrs.hasAttribute(Attribute::SExt)),
1647          "Attributes "
1648          "'zeroext and signext' are incompatible!",
1649          V);
1650 
1651   Assert(!(Attrs.hasAttribute(Attribute::ReadNone) &&
1652            Attrs.hasAttribute(Attribute::ReadOnly)),
1653          "Attributes "
1654          "'readnone and readonly' are incompatible!",
1655          V);
1656 
1657   Assert(!(Attrs.hasAttribute(Attribute::ReadNone) &&
1658            Attrs.hasAttribute(Attribute::WriteOnly)),
1659          "Attributes "
1660          "'readnone and writeonly' are incompatible!",
1661          V);
1662 
1663   Assert(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
1664            Attrs.hasAttribute(Attribute::WriteOnly)),
1665          "Attributes "
1666          "'readonly and writeonly' are incompatible!",
1667          V);
1668 
1669   Assert(!(Attrs.hasAttribute(Attribute::NoInline) &&
1670            Attrs.hasAttribute(Attribute::AlwaysInline)),
1671          "Attributes "
1672          "'noinline and alwaysinline' are incompatible!",
1673          V);
1674 
1675   if (Attrs.hasAttribute(Attribute::ByVal) && Attrs.getByValType()) {
1676     Assert(Attrs.getByValType() == cast<PointerType>(Ty)->getElementType(),
1677            "Attribute 'byval' type does not match parameter!", V);
1678   }
1679 
1680   if (Attrs.hasAttribute(Attribute::Preallocated)) {
1681     Assert(Attrs.getPreallocatedType() ==
1682                cast<PointerType>(Ty)->getElementType(),
1683            "Attribute 'preallocated' type does not match parameter!", V);
1684   }
1685 
1686   AttrBuilder IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty);
1687   Assert(!AttrBuilder(Attrs).overlaps(IncompatibleAttrs),
1688          "Wrong types for attribute: " +
1689              AttributeSet::get(Context, IncompatibleAttrs).getAsString(),
1690          V);
1691 
1692   if (PointerType *PTy = dyn_cast<PointerType>(Ty)) {
1693     SmallPtrSet<Type*, 4> Visited;
1694     if (!PTy->getElementType()->isSized(&Visited)) {
1695       Assert(!Attrs.hasAttribute(Attribute::ByVal) &&
1696                  !Attrs.hasAttribute(Attribute::InAlloca) &&
1697                  !Attrs.hasAttribute(Attribute::Preallocated),
1698              "Attributes 'byval', 'inalloca', and 'preallocated' do not "
1699              "support unsized types!",
1700              V);
1701     }
1702     if (!isa<PointerType>(PTy->getElementType()))
1703       Assert(!Attrs.hasAttribute(Attribute::SwiftError),
1704              "Attribute 'swifterror' only applies to parameters "
1705              "with pointer to pointer type!",
1706              V);
1707   } else {
1708     Assert(!Attrs.hasAttribute(Attribute::ByVal),
1709            "Attribute 'byval' only applies to parameters with pointer type!",
1710            V);
1711     Assert(!Attrs.hasAttribute(Attribute::SwiftError),
1712            "Attribute 'swifterror' only applies to parameters "
1713            "with pointer type!",
1714            V);
1715   }
1716 }
1717 
1718 // Check parameter attributes against a function type.
1719 // The value V is printed in error messages.
1720 void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
1721                                    const Value *V, bool IsIntrinsic) {
1722   if (Attrs.isEmpty())
1723     return;
1724 
1725   bool SawNest = false;
1726   bool SawReturned = false;
1727   bool SawSRet = false;
1728   bool SawSwiftSelf = false;
1729   bool SawSwiftError = false;
1730 
1731   // Verify return value attributes.
1732   AttributeSet RetAttrs = Attrs.getRetAttributes();
1733   Assert((!RetAttrs.hasAttribute(Attribute::ByVal) &&
1734           !RetAttrs.hasAttribute(Attribute::Nest) &&
1735           !RetAttrs.hasAttribute(Attribute::StructRet) &&
1736           !RetAttrs.hasAttribute(Attribute::NoCapture) &&
1737           !RetAttrs.hasAttribute(Attribute::NoFree) &&
1738           !RetAttrs.hasAttribute(Attribute::Returned) &&
1739           !RetAttrs.hasAttribute(Attribute::InAlloca) &&
1740           !RetAttrs.hasAttribute(Attribute::Preallocated) &&
1741           !RetAttrs.hasAttribute(Attribute::SwiftSelf) &&
1742           !RetAttrs.hasAttribute(Attribute::SwiftError)),
1743          "Attributes 'byval', 'inalloca', 'preallocated', 'nest', 'sret', "
1744          "'nocapture', 'nofree', "
1745          "'returned', 'swiftself', and 'swifterror' do not apply to return "
1746          "values!",
1747          V);
1748   Assert((!RetAttrs.hasAttribute(Attribute::ReadOnly) &&
1749           !RetAttrs.hasAttribute(Attribute::WriteOnly) &&
1750           !RetAttrs.hasAttribute(Attribute::ReadNone)),
1751          "Attribute '" + RetAttrs.getAsString() +
1752              "' does not apply to function returns",
1753          V);
1754   verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
1755 
1756   // Verify parameter attributes.
1757   for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
1758     Type *Ty = FT->getParamType(i);
1759     AttributeSet ArgAttrs = Attrs.getParamAttributes(i);
1760 
1761     if (!IsIntrinsic) {
1762       Assert(!ArgAttrs.hasAttribute(Attribute::ImmArg),
1763              "immarg attribute only applies to intrinsics",V);
1764     }
1765 
1766     verifyParameterAttrs(ArgAttrs, Ty, V);
1767 
1768     if (ArgAttrs.hasAttribute(Attribute::Nest)) {
1769       Assert(!SawNest, "More than one parameter has attribute nest!", V);
1770       SawNest = true;
1771     }
1772 
1773     if (ArgAttrs.hasAttribute(Attribute::Returned)) {
1774       Assert(!SawReturned, "More than one parameter has attribute returned!",
1775              V);
1776       Assert(Ty->canLosslesslyBitCastTo(FT->getReturnType()),
1777              "Incompatible argument and return types for 'returned' attribute",
1778              V);
1779       SawReturned = true;
1780     }
1781 
1782     if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
1783       Assert(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
1784       Assert(i == 0 || i == 1,
1785              "Attribute 'sret' is not on first or second parameter!", V);
1786       SawSRet = true;
1787     }
1788 
1789     if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
1790       Assert(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
1791       SawSwiftSelf = true;
1792     }
1793 
1794     if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
1795       Assert(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!",
1796              V);
1797       SawSwiftError = true;
1798     }
1799 
1800     if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
1801       Assert(i == FT->getNumParams() - 1,
1802              "inalloca isn't on the last parameter!", V);
1803     }
1804   }
1805 
1806   if (!Attrs.hasAttributes(AttributeList::FunctionIndex))
1807     return;
1808 
1809   verifyAttributeTypes(Attrs.getFnAttributes(), /*IsFunction=*/true, V);
1810 
1811   Assert(!(Attrs.hasFnAttribute(Attribute::ReadNone) &&
1812            Attrs.hasFnAttribute(Attribute::ReadOnly)),
1813          "Attributes 'readnone and readonly' are incompatible!", V);
1814 
1815   Assert(!(Attrs.hasFnAttribute(Attribute::ReadNone) &&
1816            Attrs.hasFnAttribute(Attribute::WriteOnly)),
1817          "Attributes 'readnone and writeonly' are incompatible!", V);
1818 
1819   Assert(!(Attrs.hasFnAttribute(Attribute::ReadOnly) &&
1820            Attrs.hasFnAttribute(Attribute::WriteOnly)),
1821          "Attributes 'readonly and writeonly' are incompatible!", V);
1822 
1823   Assert(!(Attrs.hasFnAttribute(Attribute::ReadNone) &&
1824            Attrs.hasFnAttribute(Attribute::InaccessibleMemOrArgMemOnly)),
1825          "Attributes 'readnone and inaccessiblemem_or_argmemonly' are "
1826          "incompatible!",
1827          V);
1828 
1829   Assert(!(Attrs.hasFnAttribute(Attribute::ReadNone) &&
1830            Attrs.hasFnAttribute(Attribute::InaccessibleMemOnly)),
1831          "Attributes 'readnone and inaccessiblememonly' are incompatible!", V);
1832 
1833   Assert(!(Attrs.hasFnAttribute(Attribute::NoInline) &&
1834            Attrs.hasFnAttribute(Attribute::AlwaysInline)),
1835          "Attributes 'noinline and alwaysinline' are incompatible!", V);
1836 
1837   if (Attrs.hasFnAttribute(Attribute::OptimizeNone)) {
1838     Assert(Attrs.hasFnAttribute(Attribute::NoInline),
1839            "Attribute 'optnone' requires 'noinline'!", V);
1840 
1841     Assert(!Attrs.hasFnAttribute(Attribute::OptimizeForSize),
1842            "Attributes 'optsize and optnone' are incompatible!", V);
1843 
1844     Assert(!Attrs.hasFnAttribute(Attribute::MinSize),
1845            "Attributes 'minsize and optnone' are incompatible!", V);
1846   }
1847 
1848   if (Attrs.hasFnAttribute(Attribute::JumpTable)) {
1849     const GlobalValue *GV = cast<GlobalValue>(V);
1850     Assert(GV->hasGlobalUnnamedAddr(),
1851            "Attribute 'jumptable' requires 'unnamed_addr'", V);
1852   }
1853 
1854   if (Attrs.hasFnAttribute(Attribute::AllocSize)) {
1855     std::pair<unsigned, Optional<unsigned>> Args =
1856         Attrs.getAllocSizeArgs(AttributeList::FunctionIndex);
1857 
1858     auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
1859       if (ParamNo >= FT->getNumParams()) {
1860         CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
1861         return false;
1862       }
1863 
1864       if (!FT->getParamType(ParamNo)->isIntegerTy()) {
1865         CheckFailed("'allocsize' " + Name +
1866                         " argument must refer to an integer parameter",
1867                     V);
1868         return false;
1869       }
1870 
1871       return true;
1872     };
1873 
1874     if (!CheckParam("element size", Args.first))
1875       return;
1876 
1877     if (Args.second && !CheckParam("number of elements", *Args.second))
1878       return;
1879   }
1880 
1881   if (Attrs.hasFnAttribute("frame-pointer")) {
1882     StringRef FP = Attrs.getAttribute(AttributeList::FunctionIndex,
1883                                       "frame-pointer").getValueAsString();
1884     if (FP != "all" && FP != "non-leaf" && FP != "none")
1885       CheckFailed("invalid value for 'frame-pointer' attribute: " + FP, V);
1886   }
1887 
1888   if (Attrs.hasFnAttribute("patchable-function-prefix")) {
1889     StringRef S = Attrs
1890                       .getAttribute(AttributeList::FunctionIndex,
1891                                     "patchable-function-prefix")
1892                       .getValueAsString();
1893     unsigned N;
1894     if (S.getAsInteger(10, N))
1895       CheckFailed(
1896           "\"patchable-function-prefix\" takes an unsigned integer: " + S, V);
1897   }
1898   if (Attrs.hasFnAttribute("patchable-function-entry")) {
1899     StringRef S = Attrs
1900                       .getAttribute(AttributeList::FunctionIndex,
1901                                     "patchable-function-entry")
1902                       .getValueAsString();
1903     unsigned N;
1904     if (S.getAsInteger(10, N))
1905       CheckFailed(
1906           "\"patchable-function-entry\" takes an unsigned integer: " + S, V);
1907   }
1908 }
1909 
1910 void Verifier::verifyFunctionMetadata(
1911     ArrayRef<std::pair<unsigned, MDNode *>> MDs) {
1912   for (const auto &Pair : MDs) {
1913     if (Pair.first == LLVMContext::MD_prof) {
1914       MDNode *MD = Pair.second;
1915       Assert(MD->getNumOperands() >= 2,
1916              "!prof annotations should have no less than 2 operands", MD);
1917 
1918       // Check first operand.
1919       Assert(MD->getOperand(0) != nullptr, "first operand should not be null",
1920              MD);
1921       Assert(isa<MDString>(MD->getOperand(0)),
1922              "expected string with name of the !prof annotation", MD);
1923       MDString *MDS = cast<MDString>(MD->getOperand(0));
1924       StringRef ProfName = MDS->getString();
1925       Assert(ProfName.equals("function_entry_count") ||
1926                  ProfName.equals("synthetic_function_entry_count"),
1927              "first operand should be 'function_entry_count'"
1928              " or 'synthetic_function_entry_count'",
1929              MD);
1930 
1931       // Check second operand.
1932       Assert(MD->getOperand(1) != nullptr, "second operand should not be null",
1933              MD);
1934       Assert(isa<ConstantAsMetadata>(MD->getOperand(1)),
1935              "expected integer argument to function_entry_count", MD);
1936     }
1937   }
1938 }
1939 
1940 void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
1941   if (!ConstantExprVisited.insert(EntryC).second)
1942     return;
1943 
1944   SmallVector<const Constant *, 16> Stack;
1945   Stack.push_back(EntryC);
1946 
1947   while (!Stack.empty()) {
1948     const Constant *C = Stack.pop_back_val();
1949 
1950     // Check this constant expression.
1951     if (const auto *CE = dyn_cast<ConstantExpr>(C))
1952       visitConstantExpr(CE);
1953 
1954     if (const auto *GV = dyn_cast<GlobalValue>(C)) {
1955       // Global Values get visited separately, but we do need to make sure
1956       // that the global value is in the correct module
1957       Assert(GV->getParent() == &M, "Referencing global in another module!",
1958              EntryC, &M, GV, GV->getParent());
1959       continue;
1960     }
1961 
1962     // Visit all sub-expressions.
1963     for (const Use &U : C->operands()) {
1964       const auto *OpC = dyn_cast<Constant>(U);
1965       if (!OpC)
1966         continue;
1967       if (!ConstantExprVisited.insert(OpC).second)
1968         continue;
1969       Stack.push_back(OpC);
1970     }
1971   }
1972 }
1973 
1974 void Verifier::visitConstantExpr(const ConstantExpr *CE) {
1975   if (CE->getOpcode() == Instruction::BitCast)
1976     Assert(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
1977                                  CE->getType()),
1978            "Invalid bitcast", CE);
1979 
1980   if (CE->getOpcode() == Instruction::IntToPtr ||
1981       CE->getOpcode() == Instruction::PtrToInt) {
1982     auto *PtrTy = CE->getOpcode() == Instruction::IntToPtr
1983                       ? CE->getType()
1984                       : CE->getOperand(0)->getType();
1985     StringRef Msg = CE->getOpcode() == Instruction::IntToPtr
1986                         ? "inttoptr not supported for non-integral pointers"
1987                         : "ptrtoint not supported for non-integral pointers";
1988     Assert(
1989         !DL.isNonIntegralPointerType(cast<PointerType>(PtrTy->getScalarType())),
1990         Msg);
1991   }
1992 }
1993 
1994 bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
1995   // There shouldn't be more attribute sets than there are parameters plus the
1996   // function and return value.
1997   return Attrs.getNumAttrSets() <= Params + 2;
1998 }
1999 
2000 /// Verify that statepoint intrinsic is well formed.
2001 void Verifier::verifyStatepoint(const CallBase &Call) {
2002   assert(Call.getCalledFunction() &&
2003          Call.getCalledFunction()->getIntrinsicID() ==
2004              Intrinsic::experimental_gc_statepoint);
2005 
2006   Assert(!Call.doesNotAccessMemory() && !Call.onlyReadsMemory() &&
2007              !Call.onlyAccessesArgMemory(),
2008          "gc.statepoint must read and write all memory to preserve "
2009          "reordering restrictions required by safepoint semantics",
2010          Call);
2011 
2012   const int64_t NumPatchBytes =
2013       cast<ConstantInt>(Call.getArgOperand(1))->getSExtValue();
2014   assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
2015   Assert(NumPatchBytes >= 0,
2016          "gc.statepoint number of patchable bytes must be "
2017          "positive",
2018          Call);
2019 
2020   const Value *Target = Call.getArgOperand(2);
2021   auto *PT = dyn_cast<PointerType>(Target->getType());
2022   Assert(PT && PT->getElementType()->isFunctionTy(),
2023          "gc.statepoint callee must be of function pointer type", Call, Target);
2024   FunctionType *TargetFuncType = cast<FunctionType>(PT->getElementType());
2025 
2026   const int NumCallArgs = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
2027   Assert(NumCallArgs >= 0,
2028          "gc.statepoint number of arguments to underlying call "
2029          "must be positive",
2030          Call);
2031   const int NumParams = (int)TargetFuncType->getNumParams();
2032   if (TargetFuncType->isVarArg()) {
2033     Assert(NumCallArgs >= NumParams,
2034            "gc.statepoint mismatch in number of vararg call args", Call);
2035 
2036     // TODO: Remove this limitation
2037     Assert(TargetFuncType->getReturnType()->isVoidTy(),
2038            "gc.statepoint doesn't support wrapping non-void "
2039            "vararg functions yet",
2040            Call);
2041   } else
2042     Assert(NumCallArgs == NumParams,
2043            "gc.statepoint mismatch in number of call args", Call);
2044 
2045   const uint64_t Flags
2046     = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
2047   Assert((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
2048          "unknown flag used in gc.statepoint flags argument", Call);
2049 
2050   // Verify that the types of the call parameter arguments match
2051   // the type of the wrapped callee.
2052   AttributeList Attrs = Call.getAttributes();
2053   for (int i = 0; i < NumParams; i++) {
2054     Type *ParamType = TargetFuncType->getParamType(i);
2055     Type *ArgType = Call.getArgOperand(5 + i)->getType();
2056     Assert(ArgType == ParamType,
2057            "gc.statepoint call argument does not match wrapped "
2058            "function type",
2059            Call);
2060 
2061     if (TargetFuncType->isVarArg()) {
2062       AttributeSet ArgAttrs = Attrs.getParamAttributes(5 + i);
2063       Assert(!ArgAttrs.hasAttribute(Attribute::StructRet),
2064              "Attribute 'sret' cannot be used for vararg call arguments!",
2065              Call);
2066     }
2067   }
2068 
2069   const int EndCallArgsInx = 4 + NumCallArgs;
2070 
2071   const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1);
2072   Assert(isa<ConstantInt>(NumTransitionArgsV),
2073          "gc.statepoint number of transition arguments "
2074          "must be constant integer",
2075          Call);
2076   const int NumTransitionArgs =
2077       cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
2078   Assert(NumTransitionArgs >= 0,
2079          "gc.statepoint number of transition arguments must be positive", Call);
2080   const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
2081 
2082   const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1);
2083   Assert(isa<ConstantInt>(NumDeoptArgsV),
2084          "gc.statepoint number of deoptimization arguments "
2085          "must be constant integer",
2086          Call);
2087   const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
2088   Assert(NumDeoptArgs >= 0,
2089          "gc.statepoint number of deoptimization arguments "
2090          "must be positive",
2091          Call);
2092 
2093   const int ExpectedNumArgs =
2094       7 + NumCallArgs + NumTransitionArgs + NumDeoptArgs;
2095   Assert(ExpectedNumArgs <= (int)Call.arg_size(),
2096          "gc.statepoint too few arguments according to length fields", Call);
2097 
2098   // Check that the only uses of this gc.statepoint are gc.result or
2099   // gc.relocate calls which are tied to this statepoint and thus part
2100   // of the same statepoint sequence
2101   for (const User *U : Call.users()) {
2102     const CallInst *UserCall = dyn_cast<const CallInst>(U);
2103     Assert(UserCall, "illegal use of statepoint token", Call, U);
2104     if (!UserCall)
2105       continue;
2106     Assert(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall),
2107            "gc.result or gc.relocate are the only value uses "
2108            "of a gc.statepoint",
2109            Call, U);
2110     if (isa<GCResultInst>(UserCall)) {
2111       Assert(UserCall->getArgOperand(0) == &Call,
2112              "gc.result connected to wrong gc.statepoint", Call, UserCall);
2113     } else if (isa<GCRelocateInst>(Call)) {
2114       Assert(UserCall->getArgOperand(0) == &Call,
2115              "gc.relocate connected to wrong gc.statepoint", Call, UserCall);
2116     }
2117   }
2118 
2119   // Note: It is legal for a single derived pointer to be listed multiple
2120   // times.  It's non-optimal, but it is legal.  It can also happen after
2121   // insertion if we strip a bitcast away.
2122   // Note: It is really tempting to check that each base is relocated and
2123   // that a derived pointer is never reused as a base pointer.  This turns
2124   // out to be problematic since optimizations run after safepoint insertion
2125   // can recognize equality properties that the insertion logic doesn't know
2126   // about.  See example statepoint.ll in the verifier subdirectory
2127 }
2128 
2129 void Verifier::verifyFrameRecoverIndices() {
2130   for (auto &Counts : FrameEscapeInfo) {
2131     Function *F = Counts.first;
2132     unsigned EscapedObjectCount = Counts.second.first;
2133     unsigned MaxRecoveredIndex = Counts.second.second;
2134     Assert(MaxRecoveredIndex <= EscapedObjectCount,
2135            "all indices passed to llvm.localrecover must be less than the "
2136            "number of arguments passed to llvm.localescape in the parent "
2137            "function",
2138            F);
2139   }
2140 }
2141 
2142 static Instruction *getSuccPad(Instruction *Terminator) {
2143   BasicBlock *UnwindDest;
2144   if (auto *II = dyn_cast<InvokeInst>(Terminator))
2145     UnwindDest = II->getUnwindDest();
2146   else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
2147     UnwindDest = CSI->getUnwindDest();
2148   else
2149     UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
2150   return UnwindDest->getFirstNonPHI();
2151 }
2152 
2153 void Verifier::verifySiblingFuncletUnwinds() {
2154   SmallPtrSet<Instruction *, 8> Visited;
2155   SmallPtrSet<Instruction *, 8> Active;
2156   for (const auto &Pair : SiblingFuncletInfo) {
2157     Instruction *PredPad = Pair.first;
2158     if (Visited.count(PredPad))
2159       continue;
2160     Active.insert(PredPad);
2161     Instruction *Terminator = Pair.second;
2162     do {
2163       Instruction *SuccPad = getSuccPad(Terminator);
2164       if (Active.count(SuccPad)) {
2165         // Found a cycle; report error
2166         Instruction *CyclePad = SuccPad;
2167         SmallVector<Instruction *, 8> CycleNodes;
2168         do {
2169           CycleNodes.push_back(CyclePad);
2170           Instruction *CycleTerminator = SiblingFuncletInfo[CyclePad];
2171           if (CycleTerminator != CyclePad)
2172             CycleNodes.push_back(CycleTerminator);
2173           CyclePad = getSuccPad(CycleTerminator);
2174         } while (CyclePad != SuccPad);
2175         Assert(false, "EH pads can't handle each other's exceptions",
2176                ArrayRef<Instruction *>(CycleNodes));
2177       }
2178       // Don't re-walk a node we've already checked
2179       if (!Visited.insert(SuccPad).second)
2180         break;
2181       // Walk to this successor if it has a map entry.
2182       PredPad = SuccPad;
2183       auto TermI = SiblingFuncletInfo.find(PredPad);
2184       if (TermI == SiblingFuncletInfo.end())
2185         break;
2186       Terminator = TermI->second;
2187       Active.insert(PredPad);
2188     } while (true);
2189     // Each node only has one successor, so we've walked all the active
2190     // nodes' successors.
2191     Active.clear();
2192   }
2193 }
2194 
2195 // visitFunction - Verify that a function is ok.
2196 //
2197 void Verifier::visitFunction(const Function &F) {
2198   visitGlobalValue(F);
2199 
2200   // Check function arguments.
2201   FunctionType *FT = F.getFunctionType();
2202   unsigned NumArgs = F.arg_size();
2203 
2204   Assert(&Context == &F.getContext(),
2205          "Function context does not match Module context!", &F);
2206 
2207   Assert(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
2208   Assert(FT->getNumParams() == NumArgs,
2209          "# formal arguments must match # of arguments for function type!", &F,
2210          FT);
2211   Assert(F.getReturnType()->isFirstClassType() ||
2212              F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
2213          "Functions cannot return aggregate values!", &F);
2214 
2215   Assert(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
2216          "Invalid struct return type!", &F);
2217 
2218   AttributeList Attrs = F.getAttributes();
2219 
2220   Assert(verifyAttributeCount(Attrs, FT->getNumParams()),
2221          "Attribute after last parameter!", &F);
2222 
2223   bool isLLVMdotName = F.getName().size() >= 5 &&
2224                        F.getName().substr(0, 5) == "llvm.";
2225 
2226   // Check function attributes.
2227   verifyFunctionAttrs(FT, Attrs, &F, isLLVMdotName);
2228 
2229   // On function declarations/definitions, we do not support the builtin
2230   // attribute. We do not check this in VerifyFunctionAttrs since that is
2231   // checking for Attributes that can/can not ever be on functions.
2232   Assert(!Attrs.hasFnAttribute(Attribute::Builtin),
2233          "Attribute 'builtin' can only be applied to a callsite.", &F);
2234 
2235   // Check that this function meets the restrictions on this calling convention.
2236   // Sometimes varargs is used for perfectly forwarding thunks, so some of these
2237   // restrictions can be lifted.
2238   switch (F.getCallingConv()) {
2239   default:
2240   case CallingConv::C:
2241     break;
2242   case CallingConv::AMDGPU_KERNEL:
2243   case CallingConv::SPIR_KERNEL:
2244     Assert(F.getReturnType()->isVoidTy(),
2245            "Calling convention requires void return type", &F);
2246     LLVM_FALLTHROUGH;
2247   case CallingConv::AMDGPU_VS:
2248   case CallingConv::AMDGPU_HS:
2249   case CallingConv::AMDGPU_GS:
2250   case CallingConv::AMDGPU_PS:
2251   case CallingConv::AMDGPU_CS:
2252     Assert(!F.hasStructRetAttr(),
2253            "Calling convention does not allow sret", &F);
2254     LLVM_FALLTHROUGH;
2255   case CallingConv::Fast:
2256   case CallingConv::Cold:
2257   case CallingConv::Intel_OCL_BI:
2258   case CallingConv::PTX_Kernel:
2259   case CallingConv::PTX_Device:
2260     Assert(!F.isVarArg(), "Calling convention does not support varargs or "
2261                           "perfect forwarding!",
2262            &F);
2263     break;
2264   }
2265 
2266   // Check that the argument values match the function type for this function...
2267   unsigned i = 0;
2268   for (const Argument &Arg : F.args()) {
2269     Assert(Arg.getType() == FT->getParamType(i),
2270            "Argument value does not match function argument type!", &Arg,
2271            FT->getParamType(i));
2272     Assert(Arg.getType()->isFirstClassType(),
2273            "Function arguments must have first-class types!", &Arg);
2274     if (!isLLVMdotName) {
2275       Assert(!Arg.getType()->isMetadataTy(),
2276              "Function takes metadata but isn't an intrinsic", &Arg, &F);
2277       Assert(!Arg.getType()->isTokenTy(),
2278              "Function takes token but isn't an intrinsic", &Arg, &F);
2279     }
2280 
2281     // Check that swifterror argument is only used by loads and stores.
2282     if (Attrs.hasParamAttribute(i, Attribute::SwiftError)) {
2283       verifySwiftErrorValue(&Arg);
2284     }
2285     ++i;
2286   }
2287 
2288   if (!isLLVMdotName)
2289     Assert(!F.getReturnType()->isTokenTy(),
2290            "Functions returns a token but isn't an intrinsic", &F);
2291 
2292   // Get the function metadata attachments.
2293   SmallVector<std::pair<unsigned, MDNode *>, 4> MDs;
2294   F.getAllMetadata(MDs);
2295   assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
2296   verifyFunctionMetadata(MDs);
2297 
2298   // Check validity of the personality function
2299   if (F.hasPersonalityFn()) {
2300     auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
2301     if (Per)
2302       Assert(Per->getParent() == F.getParent(),
2303              "Referencing personality function in another module!",
2304              &F, F.getParent(), Per, Per->getParent());
2305   }
2306 
2307   if (F.isMaterializable()) {
2308     // Function has a body somewhere we can't see.
2309     Assert(MDs.empty(), "unmaterialized function cannot have metadata", &F,
2310            MDs.empty() ? nullptr : MDs.front().second);
2311   } else if (F.isDeclaration()) {
2312     for (const auto &I : MDs) {
2313       // This is used for call site debug information.
2314       AssertDI(I.first != LLVMContext::MD_dbg ||
2315                    !cast<DISubprogram>(I.second)->isDistinct(),
2316                "function declaration may only have a unique !dbg attachment",
2317                &F);
2318       Assert(I.first != LLVMContext::MD_prof,
2319              "function declaration may not have a !prof attachment", &F);
2320 
2321       // Verify the metadata itself.
2322       visitMDNode(*I.second, AreDebugLocsAllowed::Yes);
2323     }
2324     Assert(!F.hasPersonalityFn(),
2325            "Function declaration shouldn't have a personality routine", &F);
2326   } else {
2327     // Verify that this function (which has a body) is not named "llvm.*".  It
2328     // is not legal to define intrinsics.
2329     Assert(!isLLVMdotName, "llvm intrinsics cannot be defined!", &F);
2330 
2331     // Check the entry node
2332     const BasicBlock *Entry = &F.getEntryBlock();
2333     Assert(pred_empty(Entry),
2334            "Entry block to function must not have predecessors!", Entry);
2335 
2336     // The address of the entry block cannot be taken, unless it is dead.
2337     if (Entry->hasAddressTaken()) {
2338       Assert(!BlockAddress::lookup(Entry)->isConstantUsed(),
2339              "blockaddress may not be used with the entry block!", Entry);
2340     }
2341 
2342     unsigned NumDebugAttachments = 0, NumProfAttachments = 0;
2343     // Visit metadata attachments.
2344     for (const auto &I : MDs) {
2345       // Verify that the attachment is legal.
2346       auto AllowLocs = AreDebugLocsAllowed::No;
2347       switch (I.first) {
2348       default:
2349         break;
2350       case LLVMContext::MD_dbg: {
2351         ++NumDebugAttachments;
2352         AssertDI(NumDebugAttachments == 1,
2353                  "function must have a single !dbg attachment", &F, I.second);
2354         AssertDI(isa<DISubprogram>(I.second),
2355                  "function !dbg attachment must be a subprogram", &F, I.second);
2356         auto *SP = cast<DISubprogram>(I.second);
2357         const Function *&AttachedTo = DISubprogramAttachments[SP];
2358         AssertDI(!AttachedTo || AttachedTo == &F,
2359                  "DISubprogram attached to more than one function", SP, &F);
2360         AttachedTo = &F;
2361         AllowLocs = AreDebugLocsAllowed::Yes;
2362         break;
2363       }
2364       case LLVMContext::MD_prof:
2365         ++NumProfAttachments;
2366         Assert(NumProfAttachments == 1,
2367                "function must have a single !prof attachment", &F, I.second);
2368         break;
2369       }
2370 
2371       // Verify the metadata itself.
2372       visitMDNode(*I.second, AllowLocs);
2373     }
2374   }
2375 
2376   // If this function is actually an intrinsic, verify that it is only used in
2377   // direct call/invokes, never having its "address taken".
2378   // Only do this if the module is materialized, otherwise we don't have all the
2379   // uses.
2380   if (F.getIntrinsicID() && F.getParent()->isMaterialized()) {
2381     const User *U;
2382     if (F.hasAddressTaken(&U))
2383       Assert(false, "Invalid user of intrinsic instruction!", U);
2384   }
2385 
2386   auto *N = F.getSubprogram();
2387   HasDebugInfo = (N != nullptr);
2388   if (!HasDebugInfo)
2389     return;
2390 
2391   // Check that all !dbg attachments lead to back to N.
2392   //
2393   // FIXME: Check this incrementally while visiting !dbg attachments.
2394   // FIXME: Only check when N is the canonical subprogram for F.
2395   SmallPtrSet<const MDNode *, 32> Seen;
2396   auto VisitDebugLoc = [&](const Instruction &I, const MDNode *Node) {
2397     // Be careful about using DILocation here since we might be dealing with
2398     // broken code (this is the Verifier after all).
2399     const DILocation *DL = dyn_cast_or_null<DILocation>(Node);
2400     if (!DL)
2401       return;
2402     if (!Seen.insert(DL).second)
2403       return;
2404 
2405     Metadata *Parent = DL->getRawScope();
2406     AssertDI(Parent && isa<DILocalScope>(Parent),
2407              "DILocation's scope must be a DILocalScope", N, &F, &I, DL,
2408              Parent);
2409 
2410     DILocalScope *Scope = DL->getInlinedAtScope();
2411     Assert(Scope, "Failed to find DILocalScope", DL);
2412 
2413     if (!Seen.insert(Scope).second)
2414       return;
2415 
2416     DISubprogram *SP = Scope->getSubprogram();
2417 
2418     // Scope and SP could be the same MDNode and we don't want to skip
2419     // validation in that case
2420     if (SP && ((Scope != SP) && !Seen.insert(SP).second))
2421       return;
2422 
2423     AssertDI(SP->describes(&F),
2424              "!dbg attachment points at wrong subprogram for function", N, &F,
2425              &I, DL, Scope, SP);
2426   };
2427   for (auto &BB : F)
2428     for (auto &I : BB) {
2429       VisitDebugLoc(I, I.getDebugLoc().getAsMDNode());
2430       // The llvm.loop annotations also contain two DILocations.
2431       if (auto MD = I.getMetadata(LLVMContext::MD_loop))
2432         for (unsigned i = 1; i < MD->getNumOperands(); ++i)
2433           VisitDebugLoc(I, dyn_cast_or_null<MDNode>(MD->getOperand(i)));
2434       if (BrokenDebugInfo)
2435         return;
2436     }
2437 }
2438 
2439 // verifyBasicBlock - Verify that a basic block is well formed...
2440 //
2441 void Verifier::visitBasicBlock(BasicBlock &BB) {
2442   InstsInThisBlock.clear();
2443 
2444   // Ensure that basic blocks have terminators!
2445   Assert(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
2446 
2447   // Check constraints that this basic block imposes on all of the PHI nodes in
2448   // it.
2449   if (isa<PHINode>(BB.front())) {
2450     SmallVector<BasicBlock*, 8> Preds(pred_begin(&BB), pred_end(&BB));
2451     SmallVector<std::pair<BasicBlock*, Value*>, 8> Values;
2452     llvm::sort(Preds);
2453     for (const PHINode &PN : BB.phis()) {
2454       // Ensure that PHI nodes have at least one entry!
2455       Assert(PN.getNumIncomingValues() != 0,
2456              "PHI nodes must have at least one entry.  If the block is dead, "
2457              "the PHI should be removed!",
2458              &PN);
2459       Assert(PN.getNumIncomingValues() == Preds.size(),
2460              "PHINode should have one entry for each predecessor of its "
2461              "parent basic block!",
2462              &PN);
2463 
2464       // Get and sort all incoming values in the PHI node...
2465       Values.clear();
2466       Values.reserve(PN.getNumIncomingValues());
2467       for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
2468         Values.push_back(
2469             std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
2470       llvm::sort(Values);
2471 
2472       for (unsigned i = 0, e = Values.size(); i != e; ++i) {
2473         // Check to make sure that if there is more than one entry for a
2474         // particular basic block in this PHI node, that the incoming values are
2475         // all identical.
2476         //
2477         Assert(i == 0 || Values[i].first != Values[i - 1].first ||
2478                    Values[i].second == Values[i - 1].second,
2479                "PHI node has multiple entries for the same basic block with "
2480                "different incoming values!",
2481                &PN, Values[i].first, Values[i].second, Values[i - 1].second);
2482 
2483         // Check to make sure that the predecessors and PHI node entries are
2484         // matched up.
2485         Assert(Values[i].first == Preds[i],
2486                "PHI node entries do not match predecessors!", &PN,
2487                Values[i].first, Preds[i]);
2488       }
2489     }
2490   }
2491 
2492   // Check that all instructions have their parent pointers set up correctly.
2493   for (auto &I : BB)
2494   {
2495     Assert(I.getParent() == &BB, "Instruction has bogus parent pointer!");
2496   }
2497 }
2498 
2499 void Verifier::visitTerminator(Instruction &I) {
2500   // Ensure that terminators only exist at the end of the basic block.
2501   Assert(&I == I.getParent()->getTerminator(),
2502          "Terminator found in the middle of a basic block!", I.getParent());
2503   visitInstruction(I);
2504 }
2505 
2506 void Verifier::visitBranchInst(BranchInst &BI) {
2507   if (BI.isConditional()) {
2508     Assert(BI.getCondition()->getType()->isIntegerTy(1),
2509            "Branch condition is not 'i1' type!", &BI, BI.getCondition());
2510   }
2511   visitTerminator(BI);
2512 }
2513 
2514 void Verifier::visitReturnInst(ReturnInst &RI) {
2515   Function *F = RI.getParent()->getParent();
2516   unsigned N = RI.getNumOperands();
2517   if (F->getReturnType()->isVoidTy())
2518     Assert(N == 0,
2519            "Found return instr that returns non-void in Function of void "
2520            "return type!",
2521            &RI, F->getReturnType());
2522   else
2523     Assert(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
2524            "Function return type does not match operand "
2525            "type of return inst!",
2526            &RI, F->getReturnType());
2527 
2528   // Check to make sure that the return value has necessary properties for
2529   // terminators...
2530   visitTerminator(RI);
2531 }
2532 
2533 void Verifier::visitSwitchInst(SwitchInst &SI) {
2534   // Check to make sure that all of the constants in the switch instruction
2535   // have the same type as the switched-on value.
2536   Type *SwitchTy = SI.getCondition()->getType();
2537   SmallPtrSet<ConstantInt*, 32> Constants;
2538   for (auto &Case : SI.cases()) {
2539     Assert(Case.getCaseValue()->getType() == SwitchTy,
2540            "Switch constants must all be same type as switch value!", &SI);
2541     Assert(Constants.insert(Case.getCaseValue()).second,
2542            "Duplicate integer as switch case", &SI, Case.getCaseValue());
2543   }
2544 
2545   visitTerminator(SI);
2546 }
2547 
2548 void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
2549   Assert(BI.getAddress()->getType()->isPointerTy(),
2550          "Indirectbr operand must have pointer type!", &BI);
2551   for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
2552     Assert(BI.getDestination(i)->getType()->isLabelTy(),
2553            "Indirectbr destinations must all have pointer type!", &BI);
2554 
2555   visitTerminator(BI);
2556 }
2557 
2558 void Verifier::visitCallBrInst(CallBrInst &CBI) {
2559   Assert(CBI.isInlineAsm(), "Callbr is currently only used for asm-goto!",
2560          &CBI);
2561   for (unsigned i = 0, e = CBI.getNumSuccessors(); i != e; ++i)
2562     Assert(CBI.getSuccessor(i)->getType()->isLabelTy(),
2563            "Callbr successors must all have pointer type!", &CBI);
2564   for (unsigned i = 0, e = CBI.getNumOperands(); i != e; ++i) {
2565     Assert(i >= CBI.getNumArgOperands() || !isa<BasicBlock>(CBI.getOperand(i)),
2566            "Using an unescaped label as a callbr argument!", &CBI);
2567     if (isa<BasicBlock>(CBI.getOperand(i)))
2568       for (unsigned j = i + 1; j != e; ++j)
2569         Assert(CBI.getOperand(i) != CBI.getOperand(j),
2570                "Duplicate callbr destination!", &CBI);
2571   }
2572   {
2573     SmallPtrSet<BasicBlock *, 4> ArgBBs;
2574     for (Value *V : CBI.args())
2575       if (auto *BA = dyn_cast<BlockAddress>(V))
2576         ArgBBs.insert(BA->getBasicBlock());
2577     for (BasicBlock *BB : CBI.getIndirectDests())
2578       Assert(ArgBBs.find(BB) != ArgBBs.end(),
2579              "Indirect label missing from arglist.", &CBI);
2580   }
2581 
2582   visitTerminator(CBI);
2583 }
2584 
2585 void Verifier::visitSelectInst(SelectInst &SI) {
2586   Assert(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
2587                                          SI.getOperand(2)),
2588          "Invalid operands for select instruction!", &SI);
2589 
2590   Assert(SI.getTrueValue()->getType() == SI.getType(),
2591          "Select values must have same type as select instruction!", &SI);
2592   visitInstruction(SI);
2593 }
2594 
2595 /// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
2596 /// a pass, if any exist, it's an error.
2597 ///
2598 void Verifier::visitUserOp1(Instruction &I) {
2599   Assert(false, "User-defined operators should not live outside of a pass!", &I);
2600 }
2601 
2602 void Verifier::visitTruncInst(TruncInst &I) {
2603   // Get the source and destination types
2604   Type *SrcTy = I.getOperand(0)->getType();
2605   Type *DestTy = I.getType();
2606 
2607   // Get the size of the types in bits, we'll need this later
2608   unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2609   unsigned DestBitSize = DestTy->getScalarSizeInBits();
2610 
2611   Assert(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
2612   Assert(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
2613   Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2614          "trunc source and destination must both be a vector or neither", &I);
2615   Assert(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
2616 
2617   visitInstruction(I);
2618 }
2619 
2620 void Verifier::visitZExtInst(ZExtInst &I) {
2621   // Get the source and destination types
2622   Type *SrcTy = I.getOperand(0)->getType();
2623   Type *DestTy = I.getType();
2624 
2625   // Get the size of the types in bits, we'll need this later
2626   Assert(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
2627   Assert(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
2628   Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2629          "zext source and destination must both be a vector or neither", &I);
2630   unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2631   unsigned DestBitSize = DestTy->getScalarSizeInBits();
2632 
2633   Assert(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
2634 
2635   visitInstruction(I);
2636 }
2637 
2638 void Verifier::visitSExtInst(SExtInst &I) {
2639   // Get the source and destination types
2640   Type *SrcTy = I.getOperand(0)->getType();
2641   Type *DestTy = I.getType();
2642 
2643   // Get the size of the types in bits, we'll need this later
2644   unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2645   unsigned DestBitSize = DestTy->getScalarSizeInBits();
2646 
2647   Assert(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
2648   Assert(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
2649   Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2650          "sext source and destination must both be a vector or neither", &I);
2651   Assert(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
2652 
2653   visitInstruction(I);
2654 }
2655 
2656 void Verifier::visitFPTruncInst(FPTruncInst &I) {
2657   // Get the source and destination types
2658   Type *SrcTy = I.getOperand(0)->getType();
2659   Type *DestTy = I.getType();
2660   // Get the size of the types in bits, we'll need this later
2661   unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2662   unsigned DestBitSize = DestTy->getScalarSizeInBits();
2663 
2664   Assert(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
2665   Assert(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
2666   Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2667          "fptrunc source and destination must both be a vector or neither", &I);
2668   Assert(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
2669 
2670   visitInstruction(I);
2671 }
2672 
2673 void Verifier::visitFPExtInst(FPExtInst &I) {
2674   // Get the source and destination types
2675   Type *SrcTy = I.getOperand(0)->getType();
2676   Type *DestTy = I.getType();
2677 
2678   // Get the size of the types in bits, we'll need this later
2679   unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2680   unsigned DestBitSize = DestTy->getScalarSizeInBits();
2681 
2682   Assert(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
2683   Assert(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
2684   Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2685          "fpext source and destination must both be a vector or neither", &I);
2686   Assert(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
2687 
2688   visitInstruction(I);
2689 }
2690 
2691 void Verifier::visitUIToFPInst(UIToFPInst &I) {
2692   // Get the source and destination types
2693   Type *SrcTy = I.getOperand(0)->getType();
2694   Type *DestTy = I.getType();
2695 
2696   bool SrcVec = SrcTy->isVectorTy();
2697   bool DstVec = DestTy->isVectorTy();
2698 
2699   Assert(SrcVec == DstVec,
2700          "UIToFP source and dest must both be vector or scalar", &I);
2701   Assert(SrcTy->isIntOrIntVectorTy(),
2702          "UIToFP source must be integer or integer vector", &I);
2703   Assert(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
2704          &I);
2705 
2706   if (SrcVec && DstVec)
2707     Assert(cast<VectorType>(SrcTy)->getNumElements() ==
2708                cast<VectorType>(DestTy)->getNumElements(),
2709            "UIToFP source and dest vector length mismatch", &I);
2710 
2711   visitInstruction(I);
2712 }
2713 
2714 void Verifier::visitSIToFPInst(SIToFPInst &I) {
2715   // Get the source and destination types
2716   Type *SrcTy = I.getOperand(0)->getType();
2717   Type *DestTy = I.getType();
2718 
2719   bool SrcVec = SrcTy->isVectorTy();
2720   bool DstVec = DestTy->isVectorTy();
2721 
2722   Assert(SrcVec == DstVec,
2723          "SIToFP source and dest must both be vector or scalar", &I);
2724   Assert(SrcTy->isIntOrIntVectorTy(),
2725          "SIToFP source must be integer or integer vector", &I);
2726   Assert(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
2727          &I);
2728 
2729   if (SrcVec && DstVec)
2730     Assert(cast<VectorType>(SrcTy)->getNumElements() ==
2731                cast<VectorType>(DestTy)->getNumElements(),
2732            "SIToFP source and dest vector length mismatch", &I);
2733 
2734   visitInstruction(I);
2735 }
2736 
2737 void Verifier::visitFPToUIInst(FPToUIInst &I) {
2738   // Get the source and destination types
2739   Type *SrcTy = I.getOperand(0)->getType();
2740   Type *DestTy = I.getType();
2741 
2742   bool SrcVec = SrcTy->isVectorTy();
2743   bool DstVec = DestTy->isVectorTy();
2744 
2745   Assert(SrcVec == DstVec,
2746          "FPToUI source and dest must both be vector or scalar", &I);
2747   Assert(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector",
2748          &I);
2749   Assert(DestTy->isIntOrIntVectorTy(),
2750          "FPToUI result must be integer or integer vector", &I);
2751 
2752   if (SrcVec && DstVec)
2753     Assert(cast<VectorType>(SrcTy)->getNumElements() ==
2754                cast<VectorType>(DestTy)->getNumElements(),
2755            "FPToUI source and dest vector length mismatch", &I);
2756 
2757   visitInstruction(I);
2758 }
2759 
2760 void Verifier::visitFPToSIInst(FPToSIInst &I) {
2761   // Get the source and destination types
2762   Type *SrcTy = I.getOperand(0)->getType();
2763   Type *DestTy = I.getType();
2764 
2765   bool SrcVec = SrcTy->isVectorTy();
2766   bool DstVec = DestTy->isVectorTy();
2767 
2768   Assert(SrcVec == DstVec,
2769          "FPToSI source and dest must both be vector or scalar", &I);
2770   Assert(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector",
2771          &I);
2772   Assert(DestTy->isIntOrIntVectorTy(),
2773          "FPToSI result must be integer or integer vector", &I);
2774 
2775   if (SrcVec && DstVec)
2776     Assert(cast<VectorType>(SrcTy)->getNumElements() ==
2777                cast<VectorType>(DestTy)->getNumElements(),
2778            "FPToSI source and dest vector length mismatch", &I);
2779 
2780   visitInstruction(I);
2781 }
2782 
2783 void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
2784   // Get the source and destination types
2785   Type *SrcTy = I.getOperand(0)->getType();
2786   Type *DestTy = I.getType();
2787 
2788   Assert(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
2789 
2790   if (auto *PTy = dyn_cast<PointerType>(SrcTy->getScalarType()))
2791     Assert(!DL.isNonIntegralPointerType(PTy),
2792            "ptrtoint not supported for non-integral pointers");
2793 
2794   Assert(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
2795   Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
2796          &I);
2797 
2798   if (SrcTy->isVectorTy()) {
2799     VectorType *VSrc = cast<VectorType>(SrcTy);
2800     VectorType *VDest = cast<VectorType>(DestTy);
2801     Assert(VSrc->getNumElements() == VDest->getNumElements(),
2802            "PtrToInt Vector width mismatch", &I);
2803   }
2804 
2805   visitInstruction(I);
2806 }
2807 
2808 void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
2809   // Get the source and destination types
2810   Type *SrcTy = I.getOperand(0)->getType();
2811   Type *DestTy = I.getType();
2812 
2813   Assert(SrcTy->isIntOrIntVectorTy(),
2814          "IntToPtr source must be an integral", &I);
2815   Assert(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
2816 
2817   if (auto *PTy = dyn_cast<PointerType>(DestTy->getScalarType()))
2818     Assert(!DL.isNonIntegralPointerType(PTy),
2819            "inttoptr not supported for non-integral pointers");
2820 
2821   Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
2822          &I);
2823   if (SrcTy->isVectorTy()) {
2824     VectorType *VSrc = cast<VectorType>(SrcTy);
2825     VectorType *VDest = cast<VectorType>(DestTy);
2826     Assert(VSrc->getNumElements() == VDest->getNumElements(),
2827            "IntToPtr Vector width mismatch", &I);
2828   }
2829   visitInstruction(I);
2830 }
2831 
2832 void Verifier::visitBitCastInst(BitCastInst &I) {
2833   Assert(
2834       CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
2835       "Invalid bitcast", &I);
2836   visitInstruction(I);
2837 }
2838 
2839 void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
2840   Type *SrcTy = I.getOperand(0)->getType();
2841   Type *DestTy = I.getType();
2842 
2843   Assert(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
2844          &I);
2845   Assert(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
2846          &I);
2847   Assert(SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace(),
2848          "AddrSpaceCast must be between different address spaces", &I);
2849   if (auto *SrcVTy = dyn_cast<VectorType>(SrcTy))
2850     Assert(SrcVTy->getNumElements() ==
2851                cast<VectorType>(DestTy)->getNumElements(),
2852            "AddrSpaceCast vector pointer number of elements mismatch", &I);
2853   visitInstruction(I);
2854 }
2855 
2856 /// visitPHINode - Ensure that a PHI node is well formed.
2857 ///
2858 void Verifier::visitPHINode(PHINode &PN) {
2859   // Ensure that the PHI nodes are all grouped together at the top of the block.
2860   // This can be tested by checking whether the instruction before this is
2861   // either nonexistent (because this is begin()) or is a PHI node.  If not,
2862   // then there is some other instruction before a PHI.
2863   Assert(&PN == &PN.getParent()->front() ||
2864              isa<PHINode>(--BasicBlock::iterator(&PN)),
2865          "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
2866 
2867   // Check that a PHI doesn't yield a Token.
2868   Assert(!PN.getType()->isTokenTy(), "PHI nodes cannot have token type!");
2869 
2870   // Check that all of the values of the PHI node have the same type as the
2871   // result, and that the incoming blocks are really basic blocks.
2872   for (Value *IncValue : PN.incoming_values()) {
2873     Assert(PN.getType() == IncValue->getType(),
2874            "PHI node operands are not the same type as the result!", &PN);
2875   }
2876 
2877   // All other PHI node constraints are checked in the visitBasicBlock method.
2878 
2879   visitInstruction(PN);
2880 }
2881 
2882 void Verifier::visitCallBase(CallBase &Call) {
2883   Assert(Call.getCalledOperand()->getType()->isPointerTy(),
2884          "Called function must be a pointer!", Call);
2885   PointerType *FPTy = cast<PointerType>(Call.getCalledOperand()->getType());
2886 
2887   Assert(FPTy->getElementType()->isFunctionTy(),
2888          "Called function is not pointer to function type!", Call);
2889 
2890   Assert(FPTy->getElementType() == Call.getFunctionType(),
2891          "Called function is not the same type as the call!", Call);
2892 
2893   FunctionType *FTy = Call.getFunctionType();
2894 
2895   // Verify that the correct number of arguments are being passed
2896   if (FTy->isVarArg())
2897     Assert(Call.arg_size() >= FTy->getNumParams(),
2898            "Called function requires more parameters than were provided!",
2899            Call);
2900   else
2901     Assert(Call.arg_size() == FTy->getNumParams(),
2902            "Incorrect number of arguments passed to called function!", Call);
2903 
2904   // Verify that all arguments to the call match the function type.
2905   for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
2906     Assert(Call.getArgOperand(i)->getType() == FTy->getParamType(i),
2907            "Call parameter type does not match function signature!",
2908            Call.getArgOperand(i), FTy->getParamType(i), Call);
2909 
2910   AttributeList Attrs = Call.getAttributes();
2911 
2912   Assert(verifyAttributeCount(Attrs, Call.arg_size()),
2913          "Attribute after last parameter!", Call);
2914 
2915   bool IsIntrinsic = Call.getCalledFunction() &&
2916                      Call.getCalledFunction()->getName().startswith("llvm.");
2917 
2918   Function *Callee =
2919       dyn_cast<Function>(Call.getCalledOperand()->stripPointerCasts());
2920 
2921   if (Attrs.hasAttribute(AttributeList::FunctionIndex, Attribute::Speculatable)) {
2922     // Don't allow speculatable on call sites, unless the underlying function
2923     // declaration is also speculatable.
2924     Assert(Callee && Callee->isSpeculatable(),
2925            "speculatable attribute may not apply to call sites", Call);
2926   }
2927 
2928   if (Attrs.hasAttribute(AttributeList::FunctionIndex,
2929                          Attribute::Preallocated)) {
2930     Assert(Call.getCalledFunction()->getIntrinsicID() ==
2931                Intrinsic::call_preallocated_arg,
2932            "preallocated as a call site attribute can only be on "
2933            "llvm.call.preallocated.arg");
2934   }
2935 
2936   // Verify call attributes.
2937   verifyFunctionAttrs(FTy, Attrs, &Call, IsIntrinsic);
2938 
2939   // Conservatively check the inalloca argument.
2940   // We have a bug if we can find that there is an underlying alloca without
2941   // inalloca.
2942   if (Call.hasInAllocaArgument()) {
2943     Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1);
2944     if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
2945       Assert(AI->isUsedWithInAlloca(),
2946              "inalloca argument for call has mismatched alloca", AI, Call);
2947   }
2948 
2949   // For each argument of the callsite, if it has the swifterror argument,
2950   // make sure the underlying alloca/parameter it comes from has a swifterror as
2951   // well.
2952   for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
2953     if (Call.paramHasAttr(i, Attribute::SwiftError)) {
2954       Value *SwiftErrorArg = Call.getArgOperand(i);
2955       if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
2956         Assert(AI->isSwiftError(),
2957                "swifterror argument for call has mismatched alloca", AI, Call);
2958         continue;
2959       }
2960       auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
2961       Assert(ArgI,
2962              "swifterror argument should come from an alloca or parameter",
2963              SwiftErrorArg, Call);
2964       Assert(ArgI->hasSwiftErrorAttr(),
2965              "swifterror argument for call has mismatched parameter", ArgI,
2966              Call);
2967     }
2968 
2969     if (Attrs.hasParamAttribute(i, Attribute::ImmArg)) {
2970       // Don't allow immarg on call sites, unless the underlying declaration
2971       // also has the matching immarg.
2972       Assert(Callee && Callee->hasParamAttribute(i, Attribute::ImmArg),
2973              "immarg may not apply only to call sites",
2974              Call.getArgOperand(i), Call);
2975     }
2976 
2977     if (Call.paramHasAttr(i, Attribute::ImmArg)) {
2978       Value *ArgVal = Call.getArgOperand(i);
2979       Assert(isa<ConstantInt>(ArgVal) || isa<ConstantFP>(ArgVal),
2980              "immarg operand has non-immediate parameter", ArgVal, Call);
2981     }
2982 
2983     if (Call.paramHasAttr(i, Attribute::Preallocated)) {
2984       Value *ArgVal = Call.getArgOperand(i);
2985       Assert(Call.countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0,
2986              "preallocated operand requires a preallocated bundle", ArgVal,
2987              Call);
2988     }
2989   }
2990 
2991   if (FTy->isVarArg()) {
2992     // FIXME? is 'nest' even legal here?
2993     bool SawNest = false;
2994     bool SawReturned = false;
2995 
2996     for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
2997       if (Attrs.hasParamAttribute(Idx, Attribute::Nest))
2998         SawNest = true;
2999       if (Attrs.hasParamAttribute(Idx, Attribute::Returned))
3000         SawReturned = true;
3001     }
3002 
3003     // Check attributes on the varargs part.
3004     for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) {
3005       Type *Ty = Call.getArgOperand(Idx)->getType();
3006       AttributeSet ArgAttrs = Attrs.getParamAttributes(Idx);
3007       verifyParameterAttrs(ArgAttrs, Ty, &Call);
3008 
3009       if (ArgAttrs.hasAttribute(Attribute::Nest)) {
3010         Assert(!SawNest, "More than one parameter has attribute nest!", Call);
3011         SawNest = true;
3012       }
3013 
3014       if (ArgAttrs.hasAttribute(Attribute::Returned)) {
3015         Assert(!SawReturned, "More than one parameter has attribute returned!",
3016                Call);
3017         Assert(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
3018                "Incompatible argument and return types for 'returned' "
3019                "attribute",
3020                Call);
3021         SawReturned = true;
3022       }
3023 
3024       // Statepoint intrinsic is vararg but the wrapped function may be not.
3025       // Allow sret here and check the wrapped function in verifyStatepoint.
3026       if (!Call.getCalledFunction() ||
3027           Call.getCalledFunction()->getIntrinsicID() !=
3028               Intrinsic::experimental_gc_statepoint)
3029         Assert(!ArgAttrs.hasAttribute(Attribute::StructRet),
3030                "Attribute 'sret' cannot be used for vararg call arguments!",
3031                Call);
3032 
3033       if (ArgAttrs.hasAttribute(Attribute::InAlloca))
3034         Assert(Idx == Call.arg_size() - 1,
3035                "inalloca isn't on the last argument!", Call);
3036     }
3037   }
3038 
3039   // Verify that there's no metadata unless it's a direct call to an intrinsic.
3040   if (!IsIntrinsic) {
3041     for (Type *ParamTy : FTy->params()) {
3042       Assert(!ParamTy->isMetadataTy(),
3043              "Function has metadata parameter but isn't an intrinsic", Call);
3044       Assert(!ParamTy->isTokenTy(),
3045              "Function has token parameter but isn't an intrinsic", Call);
3046     }
3047   }
3048 
3049   // Verify that indirect calls don't return tokens.
3050   if (!Call.getCalledFunction())
3051     Assert(!FTy->getReturnType()->isTokenTy(),
3052            "Return type cannot be token for indirect call!");
3053 
3054   if (Function *F = Call.getCalledFunction())
3055     if (Intrinsic::ID ID = (Intrinsic::ID)F->getIntrinsicID())
3056       visitIntrinsicCall(ID, Call);
3057 
3058   // Verify that a callsite has at most one "deopt", at most one "funclet", at
3059   // most one "gc-transition", at most one "cfguardtarget",
3060   // and at most one "preallocated" operand bundle.
3061   bool FoundDeoptBundle = false, FoundFuncletBundle = false,
3062        FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false,
3063        FoundPreallocatedBundle = false;
3064   for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
3065     OperandBundleUse BU = Call.getOperandBundleAt(i);
3066     uint32_t Tag = BU.getTagID();
3067     if (Tag == LLVMContext::OB_deopt) {
3068       Assert(!FoundDeoptBundle, "Multiple deopt operand bundles", Call);
3069       FoundDeoptBundle = true;
3070     } else if (Tag == LLVMContext::OB_gc_transition) {
3071       Assert(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
3072              Call);
3073       FoundGCTransitionBundle = true;
3074     } else if (Tag == LLVMContext::OB_funclet) {
3075       Assert(!FoundFuncletBundle, "Multiple funclet operand bundles", Call);
3076       FoundFuncletBundle = true;
3077       Assert(BU.Inputs.size() == 1,
3078              "Expected exactly one funclet bundle operand", Call);
3079       Assert(isa<FuncletPadInst>(BU.Inputs.front()),
3080              "Funclet bundle operands should correspond to a FuncletPadInst",
3081              Call);
3082     } else if (Tag == LLVMContext::OB_cfguardtarget) {
3083       Assert(!FoundCFGuardTargetBundle,
3084              "Multiple CFGuardTarget operand bundles", Call);
3085       FoundCFGuardTargetBundle = true;
3086       Assert(BU.Inputs.size() == 1,
3087              "Expected exactly one cfguardtarget bundle operand", Call);
3088     } else if (Tag == LLVMContext::OB_preallocated) {
3089       Assert(!FoundPreallocatedBundle, "Multiple preallocated operand bundles",
3090              Call);
3091       FoundPreallocatedBundle = true;
3092       Assert(BU.Inputs.size() == 1,
3093              "Expected exactly one preallocated bundle operand", Call);
3094       auto Input = dyn_cast<IntrinsicInst>(BU.Inputs.front());
3095       Assert(Input &&
3096                  Input->getIntrinsicID() == Intrinsic::call_preallocated_setup,
3097              "\"preallocated\" argument must be a token from "
3098              "llvm.call.preallocated.setup",
3099              Call);
3100     }
3101   }
3102 
3103   // Verify that each inlinable callsite of a debug-info-bearing function in a
3104   // debug-info-bearing function has a debug location attached to it. Failure to
3105   // do so causes assertion failures when the inliner sets up inline scope info.
3106   if (Call.getFunction()->getSubprogram() && Call.getCalledFunction() &&
3107       Call.getCalledFunction()->getSubprogram())
3108     AssertDI(Call.getDebugLoc(),
3109              "inlinable function call in a function with "
3110              "debug info must have a !dbg location",
3111              Call);
3112 
3113   visitInstruction(Call);
3114 }
3115 
3116 /// Two types are "congruent" if they are identical, or if they are both pointer
3117 /// types with different pointee types and the same address space.
3118 static bool isTypeCongruent(Type *L, Type *R) {
3119   if (L == R)
3120     return true;
3121   PointerType *PL = dyn_cast<PointerType>(L);
3122   PointerType *PR = dyn_cast<PointerType>(R);
3123   if (!PL || !PR)
3124     return false;
3125   return PL->getAddressSpace() == PR->getAddressSpace();
3126 }
3127 
3128 static AttrBuilder getParameterABIAttributes(int I, AttributeList Attrs) {
3129   static const Attribute::AttrKind ABIAttrs[] = {
3130       Attribute::StructRet,   Attribute::ByVal,     Attribute::InAlloca,
3131       Attribute::InReg,       Attribute::SwiftSelf, Attribute::SwiftError,
3132       Attribute::Preallocated};
3133   AttrBuilder Copy;
3134   for (auto AK : ABIAttrs) {
3135     if (Attrs.hasParamAttribute(I, AK))
3136       Copy.addAttribute(AK);
3137   }
3138   // `align` is ABI-affecting only in combination with `byval`.
3139   if (Attrs.hasParamAttribute(I, Attribute::Alignment) &&
3140       Attrs.hasParamAttribute(I, Attribute::ByVal))
3141     Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
3142   return Copy;
3143 }
3144 
3145 void Verifier::verifyMustTailCall(CallInst &CI) {
3146   Assert(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
3147 
3148   // - The caller and callee prototypes must match.  Pointer types of
3149   //   parameters or return types may differ in pointee type, but not
3150   //   address space.
3151   Function *F = CI.getParent()->getParent();
3152   FunctionType *CallerTy = F->getFunctionType();
3153   FunctionType *CalleeTy = CI.getFunctionType();
3154   if (!CI.getCalledFunction() || !CI.getCalledFunction()->isIntrinsic()) {
3155     Assert(CallerTy->getNumParams() == CalleeTy->getNumParams(),
3156            "cannot guarantee tail call due to mismatched parameter counts",
3157            &CI);
3158     for (int I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
3159       Assert(
3160           isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
3161           "cannot guarantee tail call due to mismatched parameter types", &CI);
3162     }
3163   }
3164   Assert(CallerTy->isVarArg() == CalleeTy->isVarArg(),
3165          "cannot guarantee tail call due to mismatched varargs", &CI);
3166   Assert(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
3167          "cannot guarantee tail call due to mismatched return types", &CI);
3168 
3169   // - The calling conventions of the caller and callee must match.
3170   Assert(F->getCallingConv() == CI.getCallingConv(),
3171          "cannot guarantee tail call due to mismatched calling conv", &CI);
3172 
3173   // - All ABI-impacting function attributes, such as sret, byval, inreg,
3174   //   returned, preallocated, and inalloca, must match.
3175   AttributeList CallerAttrs = F->getAttributes();
3176   AttributeList CalleeAttrs = CI.getAttributes();
3177   for (int I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
3178     AttrBuilder CallerABIAttrs = getParameterABIAttributes(I, CallerAttrs);
3179     AttrBuilder CalleeABIAttrs = getParameterABIAttributes(I, CalleeAttrs);
3180     Assert(CallerABIAttrs == CalleeABIAttrs,
3181            "cannot guarantee tail call due to mismatched ABI impacting "
3182            "function attributes",
3183            &CI, CI.getOperand(I));
3184   }
3185 
3186   // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
3187   //   or a pointer bitcast followed by a ret instruction.
3188   // - The ret instruction must return the (possibly bitcasted) value
3189   //   produced by the call or void.
3190   Value *RetVal = &CI;
3191   Instruction *Next = CI.getNextNode();
3192 
3193   // Handle the optional bitcast.
3194   if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
3195     Assert(BI->getOperand(0) == RetVal,
3196            "bitcast following musttail call must use the call", BI);
3197     RetVal = BI;
3198     Next = BI->getNextNode();
3199   }
3200 
3201   // Check the return.
3202   ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
3203   Assert(Ret, "musttail call must precede a ret with an optional bitcast",
3204          &CI);
3205   Assert(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal,
3206          "musttail call result must be returned", Ret);
3207 }
3208 
3209 void Verifier::visitCallInst(CallInst &CI) {
3210   visitCallBase(CI);
3211 
3212   if (CI.isMustTailCall())
3213     verifyMustTailCall(CI);
3214 }
3215 
3216 void Verifier::visitInvokeInst(InvokeInst &II) {
3217   visitCallBase(II);
3218 
3219   // Verify that the first non-PHI instruction of the unwind destination is an
3220   // exception handling instruction.
3221   Assert(
3222       II.getUnwindDest()->isEHPad(),
3223       "The unwind destination does not have an exception handling instruction!",
3224       &II);
3225 
3226   visitTerminator(II);
3227 }
3228 
3229 /// visitUnaryOperator - Check the argument to the unary operator.
3230 ///
3231 void Verifier::visitUnaryOperator(UnaryOperator &U) {
3232   Assert(U.getType() == U.getOperand(0)->getType(),
3233          "Unary operators must have same type for"
3234          "operands and result!",
3235          &U);
3236 
3237   switch (U.getOpcode()) {
3238   // Check that floating-point arithmetic operators are only used with
3239   // floating-point operands.
3240   case Instruction::FNeg:
3241     Assert(U.getType()->isFPOrFPVectorTy(),
3242            "FNeg operator only works with float types!", &U);
3243     break;
3244   default:
3245     llvm_unreachable("Unknown UnaryOperator opcode!");
3246   }
3247 
3248   visitInstruction(U);
3249 }
3250 
3251 /// visitBinaryOperator - Check that both arguments to the binary operator are
3252 /// of the same type!
3253 ///
3254 void Verifier::visitBinaryOperator(BinaryOperator &B) {
3255   Assert(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
3256          "Both operands to a binary operator are not of the same type!", &B);
3257 
3258   switch (B.getOpcode()) {
3259   // Check that integer arithmetic operators are only used with
3260   // integral operands.
3261   case Instruction::Add:
3262   case Instruction::Sub:
3263   case Instruction::Mul:
3264   case Instruction::SDiv:
3265   case Instruction::UDiv:
3266   case Instruction::SRem:
3267   case Instruction::URem:
3268     Assert(B.getType()->isIntOrIntVectorTy(),
3269            "Integer arithmetic operators only work with integral types!", &B);
3270     Assert(B.getType() == B.getOperand(0)->getType(),
3271            "Integer arithmetic operators must have same type "
3272            "for operands and result!",
3273            &B);
3274     break;
3275   // Check that floating-point arithmetic operators are only used with
3276   // floating-point operands.
3277   case Instruction::FAdd:
3278   case Instruction::FSub:
3279   case Instruction::FMul:
3280   case Instruction::FDiv:
3281   case Instruction::FRem:
3282     Assert(B.getType()->isFPOrFPVectorTy(),
3283            "Floating-point arithmetic operators only work with "
3284            "floating-point types!",
3285            &B);
3286     Assert(B.getType() == B.getOperand(0)->getType(),
3287            "Floating-point arithmetic operators must have same type "
3288            "for operands and result!",
3289            &B);
3290     break;
3291   // Check that logical operators are only used with integral operands.
3292   case Instruction::And:
3293   case Instruction::Or:
3294   case Instruction::Xor:
3295     Assert(B.getType()->isIntOrIntVectorTy(),
3296            "Logical operators only work with integral types!", &B);
3297     Assert(B.getType() == B.getOperand(0)->getType(),
3298            "Logical operators must have same type for operands and result!",
3299            &B);
3300     break;
3301   case Instruction::Shl:
3302   case Instruction::LShr:
3303   case Instruction::AShr:
3304     Assert(B.getType()->isIntOrIntVectorTy(),
3305            "Shifts only work with integral types!", &B);
3306     Assert(B.getType() == B.getOperand(0)->getType(),
3307            "Shift return type must be same as operands!", &B);
3308     break;
3309   default:
3310     llvm_unreachable("Unknown BinaryOperator opcode!");
3311   }
3312 
3313   visitInstruction(B);
3314 }
3315 
3316 void Verifier::visitICmpInst(ICmpInst &IC) {
3317   // Check that the operands are the same type
3318   Type *Op0Ty = IC.getOperand(0)->getType();
3319   Type *Op1Ty = IC.getOperand(1)->getType();
3320   Assert(Op0Ty == Op1Ty,
3321          "Both operands to ICmp instruction are not of the same type!", &IC);
3322   // Check that the operands are the right type
3323   Assert(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
3324          "Invalid operand types for ICmp instruction", &IC);
3325   // Check that the predicate is valid.
3326   Assert(IC.isIntPredicate(),
3327          "Invalid predicate in ICmp instruction!", &IC);
3328 
3329   visitInstruction(IC);
3330 }
3331 
3332 void Verifier::visitFCmpInst(FCmpInst &FC) {
3333   // Check that the operands are the same type
3334   Type *Op0Ty = FC.getOperand(0)->getType();
3335   Type *Op1Ty = FC.getOperand(1)->getType();
3336   Assert(Op0Ty == Op1Ty,
3337          "Both operands to FCmp instruction are not of the same type!", &FC);
3338   // Check that the operands are the right type
3339   Assert(Op0Ty->isFPOrFPVectorTy(),
3340          "Invalid operand types for FCmp instruction", &FC);
3341   // Check that the predicate is valid.
3342   Assert(FC.isFPPredicate(),
3343          "Invalid predicate in FCmp instruction!", &FC);
3344 
3345   visitInstruction(FC);
3346 }
3347 
3348 void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
3349   Assert(
3350       ExtractElementInst::isValidOperands(EI.getOperand(0), EI.getOperand(1)),
3351       "Invalid extractelement operands!", &EI);
3352   visitInstruction(EI);
3353 }
3354 
3355 void Verifier::visitInsertElementInst(InsertElementInst &IE) {
3356   Assert(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1),
3357                                             IE.getOperand(2)),
3358          "Invalid insertelement operands!", &IE);
3359   visitInstruction(IE);
3360 }
3361 
3362 void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
3363   Assert(ShuffleVectorInst::isValidOperands(SV.getOperand(0), SV.getOperand(1),
3364                                             SV.getShuffleMask()),
3365          "Invalid shufflevector operands!", &SV);
3366   visitInstruction(SV);
3367 }
3368 
3369 void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
3370   Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
3371 
3372   Assert(isa<PointerType>(TargetTy),
3373          "GEP base pointer is not a vector or a vector of pointers", &GEP);
3374   Assert(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
3375 
3376   SmallVector<Value*, 16> Idxs(GEP.idx_begin(), GEP.idx_end());
3377   Assert(all_of(
3378       Idxs, [](Value* V) { return V->getType()->isIntOrIntVectorTy(); }),
3379       "GEP indexes must be integers", &GEP);
3380   Type *ElTy =
3381       GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
3382   Assert(ElTy, "Invalid indices for GEP pointer type!", &GEP);
3383 
3384   Assert(GEP.getType()->isPtrOrPtrVectorTy() &&
3385              GEP.getResultElementType() == ElTy,
3386          "GEP is not of right type for indices!", &GEP, ElTy);
3387 
3388   if (auto *GEPVTy = dyn_cast<VectorType>(GEP.getType())) {
3389     // Additional checks for vector GEPs.
3390     unsigned GEPWidth = GEPVTy->getNumElements();
3391     if (GEP.getPointerOperandType()->isVectorTy())
3392       Assert(
3393           GEPWidth ==
3394               cast<VectorType>(GEP.getPointerOperandType())->getNumElements(),
3395           "Vector GEP result width doesn't match operand's", &GEP);
3396     for (Value *Idx : Idxs) {
3397       Type *IndexTy = Idx->getType();
3398       if (auto *IndexVTy = dyn_cast<VectorType>(IndexTy)) {
3399         unsigned IndexWidth = IndexVTy->getNumElements();
3400         Assert(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
3401       }
3402       Assert(IndexTy->isIntOrIntVectorTy(),
3403              "All GEP indices should be of integer type");
3404     }
3405   }
3406 
3407   if (auto *PTy = dyn_cast<PointerType>(GEP.getType())) {
3408     Assert(GEP.getAddressSpace() == PTy->getAddressSpace(),
3409            "GEP address space doesn't match type", &GEP);
3410   }
3411 
3412   visitInstruction(GEP);
3413 }
3414 
3415 static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
3416   return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
3417 }
3418 
3419 void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
3420   assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
3421          "precondition violation");
3422 
3423   unsigned NumOperands = Range->getNumOperands();
3424   Assert(NumOperands % 2 == 0, "Unfinished range!", Range);
3425   unsigned NumRanges = NumOperands / 2;
3426   Assert(NumRanges >= 1, "It should have at least one range!", Range);
3427 
3428   ConstantRange LastRange(1, true); // Dummy initial value
3429   for (unsigned i = 0; i < NumRanges; ++i) {
3430     ConstantInt *Low =
3431         mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
3432     Assert(Low, "The lower limit must be an integer!", Low);
3433     ConstantInt *High =
3434         mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
3435     Assert(High, "The upper limit must be an integer!", High);
3436     Assert(High->getType() == Low->getType() && High->getType() == Ty,
3437            "Range types must match instruction type!", &I);
3438 
3439     APInt HighV = High->getValue();
3440     APInt LowV = Low->getValue();
3441     ConstantRange CurRange(LowV, HighV);
3442     Assert(!CurRange.isEmptySet() && !CurRange.isFullSet(),
3443            "Range must not be empty!", Range);
3444     if (i != 0) {
3445       Assert(CurRange.intersectWith(LastRange).isEmptySet(),
3446              "Intervals are overlapping", Range);
3447       Assert(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
3448              Range);
3449       Assert(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
3450              Range);
3451     }
3452     LastRange = ConstantRange(LowV, HighV);
3453   }
3454   if (NumRanges > 2) {
3455     APInt FirstLow =
3456         mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
3457     APInt FirstHigh =
3458         mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
3459     ConstantRange FirstRange(FirstLow, FirstHigh);
3460     Assert(FirstRange.intersectWith(LastRange).isEmptySet(),
3461            "Intervals are overlapping", Range);
3462     Assert(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
3463            Range);
3464   }
3465 }
3466 
3467 void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
3468   unsigned Size = DL.getTypeSizeInBits(Ty);
3469   Assert(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
3470   Assert(!(Size & (Size - 1)),
3471          "atomic memory access' operand must have a power-of-two size", Ty, I);
3472 }
3473 
3474 void Verifier::visitLoadInst(LoadInst &LI) {
3475   PointerType *PTy = dyn_cast<PointerType>(LI.getOperand(0)->getType());
3476   Assert(PTy, "Load operand must be a pointer.", &LI);
3477   Type *ElTy = LI.getType();
3478   Assert(LI.getAlignment() <= Value::MaximumAlignment,
3479          "huge alignment values are unsupported", &LI);
3480   Assert(ElTy->isSized(), "loading unsized types is not allowed", &LI);
3481   if (LI.isAtomic()) {
3482     Assert(LI.getOrdering() != AtomicOrdering::Release &&
3483                LI.getOrdering() != AtomicOrdering::AcquireRelease,
3484            "Load cannot have Release ordering", &LI);
3485     Assert(LI.getAlignment() != 0,
3486            "Atomic load must specify explicit alignment", &LI);
3487     Assert(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
3488            "atomic load operand must have integer, pointer, or floating point "
3489            "type!",
3490            ElTy, &LI);
3491     checkAtomicMemAccessSize(ElTy, &LI);
3492   } else {
3493     Assert(LI.getSyncScopeID() == SyncScope::System,
3494            "Non-atomic load cannot have SynchronizationScope specified", &LI);
3495   }
3496 
3497   visitInstruction(LI);
3498 }
3499 
3500 void Verifier::visitStoreInst(StoreInst &SI) {
3501   PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
3502   Assert(PTy, "Store operand must be a pointer.", &SI);
3503   Type *ElTy = PTy->getElementType();
3504   Assert(ElTy == SI.getOperand(0)->getType(),
3505          "Stored value type does not match pointer operand type!", &SI, ElTy);
3506   Assert(SI.getAlignment() <= Value::MaximumAlignment,
3507          "huge alignment values are unsupported", &SI);
3508   Assert(ElTy->isSized(), "storing unsized types is not allowed", &SI);
3509   if (SI.isAtomic()) {
3510     Assert(SI.getOrdering() != AtomicOrdering::Acquire &&
3511                SI.getOrdering() != AtomicOrdering::AcquireRelease,
3512            "Store cannot have Acquire ordering", &SI);
3513     Assert(SI.getAlignment() != 0,
3514            "Atomic store must specify explicit alignment", &SI);
3515     Assert(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
3516            "atomic store operand must have integer, pointer, or floating point "
3517            "type!",
3518            ElTy, &SI);
3519     checkAtomicMemAccessSize(ElTy, &SI);
3520   } else {
3521     Assert(SI.getSyncScopeID() == SyncScope::System,
3522            "Non-atomic store cannot have SynchronizationScope specified", &SI);
3523   }
3524   visitInstruction(SI);
3525 }
3526 
3527 /// Check that SwiftErrorVal is used as a swifterror argument in CS.
3528 void Verifier::verifySwiftErrorCall(CallBase &Call,
3529                                     const Value *SwiftErrorVal) {
3530   unsigned Idx = 0;
3531   for (auto I = Call.arg_begin(), E = Call.arg_end(); I != E; ++I, ++Idx) {
3532     if (*I == SwiftErrorVal) {
3533       Assert(Call.paramHasAttr(Idx, Attribute::SwiftError),
3534              "swifterror value when used in a callsite should be marked "
3535              "with swifterror attribute",
3536              SwiftErrorVal, Call);
3537     }
3538   }
3539 }
3540 
3541 void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
3542   // Check that swifterror value is only used by loads, stores, or as
3543   // a swifterror argument.
3544   for (const User *U : SwiftErrorVal->users()) {
3545     Assert(isa<LoadInst>(U) || isa<StoreInst>(U) || isa<CallInst>(U) ||
3546            isa<InvokeInst>(U),
3547            "swifterror value can only be loaded and stored from, or "
3548            "as a swifterror argument!",
3549            SwiftErrorVal, U);
3550     // If it is used by a store, check it is the second operand.
3551     if (auto StoreI = dyn_cast<StoreInst>(U))
3552       Assert(StoreI->getOperand(1) == SwiftErrorVal,
3553              "swifterror value should be the second operand when used "
3554              "by stores", SwiftErrorVal, U);
3555     if (auto *Call = dyn_cast<CallBase>(U))
3556       verifySwiftErrorCall(*const_cast<CallBase *>(Call), SwiftErrorVal);
3557   }
3558 }
3559 
3560 void Verifier::visitAllocaInst(AllocaInst &AI) {
3561   SmallPtrSet<Type*, 4> Visited;
3562   PointerType *PTy = AI.getType();
3563   // TODO: Relax this restriction?
3564   Assert(PTy->getAddressSpace() == DL.getAllocaAddrSpace(),
3565          "Allocation instruction pointer not in the stack address space!",
3566          &AI);
3567   Assert(AI.getAllocatedType()->isSized(&Visited),
3568          "Cannot allocate unsized type", &AI);
3569   Assert(AI.getArraySize()->getType()->isIntegerTy(),
3570          "Alloca array size must have integer type", &AI);
3571   Assert(AI.getAlignment() <= Value::MaximumAlignment,
3572          "huge alignment values are unsupported", &AI);
3573 
3574   if (AI.isSwiftError()) {
3575     verifySwiftErrorValue(&AI);
3576   }
3577 
3578   visitInstruction(AI);
3579 }
3580 
3581 void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
3582 
3583   // FIXME: more conditions???
3584   Assert(CXI.getSuccessOrdering() != AtomicOrdering::NotAtomic,
3585          "cmpxchg instructions must be atomic.", &CXI);
3586   Assert(CXI.getFailureOrdering() != AtomicOrdering::NotAtomic,
3587          "cmpxchg instructions must be atomic.", &CXI);
3588   Assert(CXI.getSuccessOrdering() != AtomicOrdering::Unordered,
3589          "cmpxchg instructions cannot be unordered.", &CXI);
3590   Assert(CXI.getFailureOrdering() != AtomicOrdering::Unordered,
3591          "cmpxchg instructions cannot be unordered.", &CXI);
3592   Assert(!isStrongerThan(CXI.getFailureOrdering(), CXI.getSuccessOrdering()),
3593          "cmpxchg instructions failure argument shall be no stronger than the "
3594          "success argument",
3595          &CXI);
3596   Assert(CXI.getFailureOrdering() != AtomicOrdering::Release &&
3597              CXI.getFailureOrdering() != AtomicOrdering::AcquireRelease,
3598          "cmpxchg failure ordering cannot include release semantics", &CXI);
3599 
3600   PointerType *PTy = dyn_cast<PointerType>(CXI.getOperand(0)->getType());
3601   Assert(PTy, "First cmpxchg operand must be a pointer.", &CXI);
3602   Type *ElTy = PTy->getElementType();
3603   Assert(ElTy->isIntOrPtrTy(),
3604          "cmpxchg operand must have integer or pointer type", ElTy, &CXI);
3605   checkAtomicMemAccessSize(ElTy, &CXI);
3606   Assert(ElTy == CXI.getOperand(1)->getType(),
3607          "Expected value type does not match pointer operand type!", &CXI,
3608          ElTy);
3609   Assert(ElTy == CXI.getOperand(2)->getType(),
3610          "Stored value type does not match pointer operand type!", &CXI, ElTy);
3611   visitInstruction(CXI);
3612 }
3613 
3614 void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
3615   Assert(RMWI.getOrdering() != AtomicOrdering::NotAtomic,
3616          "atomicrmw instructions must be atomic.", &RMWI);
3617   Assert(RMWI.getOrdering() != AtomicOrdering::Unordered,
3618          "atomicrmw instructions cannot be unordered.", &RMWI);
3619   auto Op = RMWI.getOperation();
3620   PointerType *PTy = dyn_cast<PointerType>(RMWI.getOperand(0)->getType());
3621   Assert(PTy, "First atomicrmw operand must be a pointer.", &RMWI);
3622   Type *ElTy = PTy->getElementType();
3623   if (Op == AtomicRMWInst::Xchg) {
3624     Assert(ElTy->isIntegerTy() || ElTy->isFloatingPointTy(), "atomicrmw " +
3625            AtomicRMWInst::getOperationName(Op) +
3626            " operand must have integer or floating point type!",
3627            &RMWI, ElTy);
3628   } else if (AtomicRMWInst::isFPOperation(Op)) {
3629     Assert(ElTy->isFloatingPointTy(), "atomicrmw " +
3630            AtomicRMWInst::getOperationName(Op) +
3631            " operand must have floating point type!",
3632            &RMWI, ElTy);
3633   } else {
3634     Assert(ElTy->isIntegerTy(), "atomicrmw " +
3635            AtomicRMWInst::getOperationName(Op) +
3636            " operand must have integer type!",
3637            &RMWI, ElTy);
3638   }
3639   checkAtomicMemAccessSize(ElTy, &RMWI);
3640   Assert(ElTy == RMWI.getOperand(1)->getType(),
3641          "Argument value type does not match pointer operand type!", &RMWI,
3642          ElTy);
3643   Assert(AtomicRMWInst::FIRST_BINOP <= Op && Op <= AtomicRMWInst::LAST_BINOP,
3644          "Invalid binary operation!", &RMWI);
3645   visitInstruction(RMWI);
3646 }
3647 
3648 void Verifier::visitFenceInst(FenceInst &FI) {
3649   const AtomicOrdering Ordering = FI.getOrdering();
3650   Assert(Ordering == AtomicOrdering::Acquire ||
3651              Ordering == AtomicOrdering::Release ||
3652              Ordering == AtomicOrdering::AcquireRelease ||
3653              Ordering == AtomicOrdering::SequentiallyConsistent,
3654          "fence instructions may only have acquire, release, acq_rel, or "
3655          "seq_cst ordering.",
3656          &FI);
3657   visitInstruction(FI);
3658 }
3659 
3660 void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
3661   Assert(ExtractValueInst::getIndexedType(EVI.getAggregateOperand()->getType(),
3662                                           EVI.getIndices()) == EVI.getType(),
3663          "Invalid ExtractValueInst operands!", &EVI);
3664 
3665   visitInstruction(EVI);
3666 }
3667 
3668 void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
3669   Assert(ExtractValueInst::getIndexedType(IVI.getAggregateOperand()->getType(),
3670                                           IVI.getIndices()) ==
3671              IVI.getOperand(1)->getType(),
3672          "Invalid InsertValueInst operands!", &IVI);
3673 
3674   visitInstruction(IVI);
3675 }
3676 
3677 static Value *getParentPad(Value *EHPad) {
3678   if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
3679     return FPI->getParentPad();
3680 
3681   return cast<CatchSwitchInst>(EHPad)->getParentPad();
3682 }
3683 
3684 void Verifier::visitEHPadPredecessors(Instruction &I) {
3685   assert(I.isEHPad());
3686 
3687   BasicBlock *BB = I.getParent();
3688   Function *F = BB->getParent();
3689 
3690   Assert(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
3691 
3692   if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
3693     // The landingpad instruction defines its parent as a landing pad block. The
3694     // landing pad block may be branched to only by the unwind edge of an
3695     // invoke.
3696     for (BasicBlock *PredBB : predecessors(BB)) {
3697       const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
3698       Assert(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
3699              "Block containing LandingPadInst must be jumped to "
3700              "only by the unwind edge of an invoke.",
3701              LPI);
3702     }
3703     return;
3704   }
3705   if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
3706     if (!pred_empty(BB))
3707       Assert(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
3708              "Block containg CatchPadInst must be jumped to "
3709              "only by its catchswitch.",
3710              CPI);
3711     Assert(BB != CPI->getCatchSwitch()->getUnwindDest(),
3712            "Catchswitch cannot unwind to one of its catchpads",
3713            CPI->getCatchSwitch(), CPI);
3714     return;
3715   }
3716 
3717   // Verify that each pred has a legal terminator with a legal to/from EH
3718   // pad relationship.
3719   Instruction *ToPad = &I;
3720   Value *ToPadParent = getParentPad(ToPad);
3721   for (BasicBlock *PredBB : predecessors(BB)) {
3722     Instruction *TI = PredBB->getTerminator();
3723     Value *FromPad;
3724     if (auto *II = dyn_cast<InvokeInst>(TI)) {
3725       Assert(II->getUnwindDest() == BB && II->getNormalDest() != BB,
3726              "EH pad must be jumped to via an unwind edge", ToPad, II);
3727       if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
3728         FromPad = Bundle->Inputs[0];
3729       else
3730         FromPad = ConstantTokenNone::get(II->getContext());
3731     } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
3732       FromPad = CRI->getOperand(0);
3733       Assert(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
3734     } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
3735       FromPad = CSI;
3736     } else {
3737       Assert(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
3738     }
3739 
3740     // The edge may exit from zero or more nested pads.
3741     SmallSet<Value *, 8> Seen;
3742     for (;; FromPad = getParentPad(FromPad)) {
3743       Assert(FromPad != ToPad,
3744              "EH pad cannot handle exceptions raised within it", FromPad, TI);
3745       if (FromPad == ToPadParent) {
3746         // This is a legal unwind edge.
3747         break;
3748       }
3749       Assert(!isa<ConstantTokenNone>(FromPad),
3750              "A single unwind edge may only enter one EH pad", TI);
3751       Assert(Seen.insert(FromPad).second,
3752              "EH pad jumps through a cycle of pads", FromPad);
3753     }
3754   }
3755 }
3756 
3757 void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
3758   // The landingpad instruction is ill-formed if it doesn't have any clauses and
3759   // isn't a cleanup.
3760   Assert(LPI.getNumClauses() > 0 || LPI.isCleanup(),
3761          "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
3762 
3763   visitEHPadPredecessors(LPI);
3764 
3765   if (!LandingPadResultTy)
3766     LandingPadResultTy = LPI.getType();
3767   else
3768     Assert(LandingPadResultTy == LPI.getType(),
3769            "The landingpad instruction should have a consistent result type "
3770            "inside a function.",
3771            &LPI);
3772 
3773   Function *F = LPI.getParent()->getParent();
3774   Assert(F->hasPersonalityFn(),
3775          "LandingPadInst needs to be in a function with a personality.", &LPI);
3776 
3777   // The landingpad instruction must be the first non-PHI instruction in the
3778   // block.
3779   Assert(LPI.getParent()->getLandingPadInst() == &LPI,
3780          "LandingPadInst not the first non-PHI instruction in the block.",
3781          &LPI);
3782 
3783   for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
3784     Constant *Clause = LPI.getClause(i);
3785     if (LPI.isCatch(i)) {
3786       Assert(isa<PointerType>(Clause->getType()),
3787              "Catch operand does not have pointer type!", &LPI);
3788     } else {
3789       Assert(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
3790       Assert(isa<ConstantArray>(Clause) || isa<ConstantAggregateZero>(Clause),
3791              "Filter operand is not an array of constants!", &LPI);
3792     }
3793   }
3794 
3795   visitInstruction(LPI);
3796 }
3797 
3798 void Verifier::visitResumeInst(ResumeInst &RI) {
3799   Assert(RI.getFunction()->hasPersonalityFn(),
3800          "ResumeInst needs to be in a function with a personality.", &RI);
3801 
3802   if (!LandingPadResultTy)
3803     LandingPadResultTy = RI.getValue()->getType();
3804   else
3805     Assert(LandingPadResultTy == RI.getValue()->getType(),
3806            "The resume instruction should have a consistent result type "
3807            "inside a function.",
3808            &RI);
3809 
3810   visitTerminator(RI);
3811 }
3812 
3813 void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
3814   BasicBlock *BB = CPI.getParent();
3815 
3816   Function *F = BB->getParent();
3817   Assert(F->hasPersonalityFn(),
3818          "CatchPadInst needs to be in a function with a personality.", &CPI);
3819 
3820   Assert(isa<CatchSwitchInst>(CPI.getParentPad()),
3821          "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
3822          CPI.getParentPad());
3823 
3824   // The catchpad instruction must be the first non-PHI instruction in the
3825   // block.
3826   Assert(BB->getFirstNonPHI() == &CPI,
3827          "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
3828 
3829   visitEHPadPredecessors(CPI);
3830   visitFuncletPadInst(CPI);
3831 }
3832 
3833 void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
3834   Assert(isa<CatchPadInst>(CatchReturn.getOperand(0)),
3835          "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
3836          CatchReturn.getOperand(0));
3837 
3838   visitTerminator(CatchReturn);
3839 }
3840 
3841 void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
3842   BasicBlock *BB = CPI.getParent();
3843 
3844   Function *F = BB->getParent();
3845   Assert(F->hasPersonalityFn(),
3846          "CleanupPadInst needs to be in a function with a personality.", &CPI);
3847 
3848   // The cleanuppad instruction must be the first non-PHI instruction in the
3849   // block.
3850   Assert(BB->getFirstNonPHI() == &CPI,
3851          "CleanupPadInst not the first non-PHI instruction in the block.",
3852          &CPI);
3853 
3854   auto *ParentPad = CPI.getParentPad();
3855   Assert(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
3856          "CleanupPadInst has an invalid parent.", &CPI);
3857 
3858   visitEHPadPredecessors(CPI);
3859   visitFuncletPadInst(CPI);
3860 }
3861 
3862 void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
3863   User *FirstUser = nullptr;
3864   Value *FirstUnwindPad = nullptr;
3865   SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
3866   SmallSet<FuncletPadInst *, 8> Seen;
3867 
3868   while (!Worklist.empty()) {
3869     FuncletPadInst *CurrentPad = Worklist.pop_back_val();
3870     Assert(Seen.insert(CurrentPad).second,
3871            "FuncletPadInst must not be nested within itself", CurrentPad);
3872     Value *UnresolvedAncestorPad = nullptr;
3873     for (User *U : CurrentPad->users()) {
3874       BasicBlock *UnwindDest;
3875       if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
3876         UnwindDest = CRI->getUnwindDest();
3877       } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
3878         // We allow catchswitch unwind to caller to nest
3879         // within an outer pad that unwinds somewhere else,
3880         // because catchswitch doesn't have a nounwind variant.
3881         // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
3882         if (CSI->unwindsToCaller())
3883           continue;
3884         UnwindDest = CSI->getUnwindDest();
3885       } else if (auto *II = dyn_cast<InvokeInst>(U)) {
3886         UnwindDest = II->getUnwindDest();
3887       } else if (isa<CallInst>(U)) {
3888         // Calls which don't unwind may be found inside funclet
3889         // pads that unwind somewhere else.  We don't *require*
3890         // such calls to be annotated nounwind.
3891         continue;
3892       } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
3893         // The unwind dest for a cleanup can only be found by
3894         // recursive search.  Add it to the worklist, and we'll
3895         // search for its first use that determines where it unwinds.
3896         Worklist.push_back(CPI);
3897         continue;
3898       } else {
3899         Assert(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
3900         continue;
3901       }
3902 
3903       Value *UnwindPad;
3904       bool ExitsFPI;
3905       if (UnwindDest) {
3906         UnwindPad = UnwindDest->getFirstNonPHI();
3907         if (!cast<Instruction>(UnwindPad)->isEHPad())
3908           continue;
3909         Value *UnwindParent = getParentPad(UnwindPad);
3910         // Ignore unwind edges that don't exit CurrentPad.
3911         if (UnwindParent == CurrentPad)
3912           continue;
3913         // Determine whether the original funclet pad is exited,
3914         // and if we are scanning nested pads determine how many
3915         // of them are exited so we can stop searching their
3916         // children.
3917         Value *ExitedPad = CurrentPad;
3918         ExitsFPI = false;
3919         do {
3920           if (ExitedPad == &FPI) {
3921             ExitsFPI = true;
3922             // Now we can resolve any ancestors of CurrentPad up to
3923             // FPI, but not including FPI since we need to make sure
3924             // to check all direct users of FPI for consistency.
3925             UnresolvedAncestorPad = &FPI;
3926             break;
3927           }
3928           Value *ExitedParent = getParentPad(ExitedPad);
3929           if (ExitedParent == UnwindParent) {
3930             // ExitedPad is the ancestor-most pad which this unwind
3931             // edge exits, so we can resolve up to it, meaning that
3932             // ExitedParent is the first ancestor still unresolved.
3933             UnresolvedAncestorPad = ExitedParent;
3934             break;
3935           }
3936           ExitedPad = ExitedParent;
3937         } while (!isa<ConstantTokenNone>(ExitedPad));
3938       } else {
3939         // Unwinding to caller exits all pads.
3940         UnwindPad = ConstantTokenNone::get(FPI.getContext());
3941         ExitsFPI = true;
3942         UnresolvedAncestorPad = &FPI;
3943       }
3944 
3945       if (ExitsFPI) {
3946         // This unwind edge exits FPI.  Make sure it agrees with other
3947         // such edges.
3948         if (FirstUser) {
3949           Assert(UnwindPad == FirstUnwindPad, "Unwind edges out of a funclet "
3950                                               "pad must have the same unwind "
3951                                               "dest",
3952                  &FPI, U, FirstUser);
3953         } else {
3954           FirstUser = U;
3955           FirstUnwindPad = UnwindPad;
3956           // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
3957           if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
3958               getParentPad(UnwindPad) == getParentPad(&FPI))
3959             SiblingFuncletInfo[&FPI] = cast<Instruction>(U);
3960         }
3961       }
3962       // Make sure we visit all uses of FPI, but for nested pads stop as
3963       // soon as we know where they unwind to.
3964       if (CurrentPad != &FPI)
3965         break;
3966     }
3967     if (UnresolvedAncestorPad) {
3968       if (CurrentPad == UnresolvedAncestorPad) {
3969         // When CurrentPad is FPI itself, we don't mark it as resolved even if
3970         // we've found an unwind edge that exits it, because we need to verify
3971         // all direct uses of FPI.
3972         assert(CurrentPad == &FPI);
3973         continue;
3974       }
3975       // Pop off the worklist any nested pads that we've found an unwind
3976       // destination for.  The pads on the worklist are the uncles,
3977       // great-uncles, etc. of CurrentPad.  We've found an unwind destination
3978       // for all ancestors of CurrentPad up to but not including
3979       // UnresolvedAncestorPad.
3980       Value *ResolvedPad = CurrentPad;
3981       while (!Worklist.empty()) {
3982         Value *UnclePad = Worklist.back();
3983         Value *AncestorPad = getParentPad(UnclePad);
3984         // Walk ResolvedPad up the ancestor list until we either find the
3985         // uncle's parent or the last resolved ancestor.
3986         while (ResolvedPad != AncestorPad) {
3987           Value *ResolvedParent = getParentPad(ResolvedPad);
3988           if (ResolvedParent == UnresolvedAncestorPad) {
3989             break;
3990           }
3991           ResolvedPad = ResolvedParent;
3992         }
3993         // If the resolved ancestor search didn't find the uncle's parent,
3994         // then the uncle is not yet resolved.
3995         if (ResolvedPad != AncestorPad)
3996           break;
3997         // This uncle is resolved, so pop it from the worklist.
3998         Worklist.pop_back();
3999       }
4000     }
4001   }
4002 
4003   if (FirstUnwindPad) {
4004     if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
4005       BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
4006       Value *SwitchUnwindPad;
4007       if (SwitchUnwindDest)
4008         SwitchUnwindPad = SwitchUnwindDest->getFirstNonPHI();
4009       else
4010         SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
4011       Assert(SwitchUnwindPad == FirstUnwindPad,
4012              "Unwind edges out of a catch must have the same unwind dest as "
4013              "the parent catchswitch",
4014              &FPI, FirstUser, CatchSwitch);
4015     }
4016   }
4017 
4018   visitInstruction(FPI);
4019 }
4020 
4021 void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
4022   BasicBlock *BB = CatchSwitch.getParent();
4023 
4024   Function *F = BB->getParent();
4025   Assert(F->hasPersonalityFn(),
4026          "CatchSwitchInst needs to be in a function with a personality.",
4027          &CatchSwitch);
4028 
4029   // The catchswitch instruction must be the first non-PHI instruction in the
4030   // block.
4031   Assert(BB->getFirstNonPHI() == &CatchSwitch,
4032          "CatchSwitchInst not the first non-PHI instruction in the block.",
4033          &CatchSwitch);
4034 
4035   auto *ParentPad = CatchSwitch.getParentPad();
4036   Assert(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4037          "CatchSwitchInst has an invalid parent.", ParentPad);
4038 
4039   if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
4040     Instruction *I = UnwindDest->getFirstNonPHI();
4041     Assert(I->isEHPad() && !isa<LandingPadInst>(I),
4042            "CatchSwitchInst must unwind to an EH block which is not a "
4043            "landingpad.",
4044            &CatchSwitch);
4045 
4046     // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
4047     if (getParentPad(I) == ParentPad)
4048       SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
4049   }
4050 
4051   Assert(CatchSwitch.getNumHandlers() != 0,
4052          "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
4053 
4054   for (BasicBlock *Handler : CatchSwitch.handlers()) {
4055     Assert(isa<CatchPadInst>(Handler->getFirstNonPHI()),
4056            "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
4057   }
4058 
4059   visitEHPadPredecessors(CatchSwitch);
4060   visitTerminator(CatchSwitch);
4061 }
4062 
4063 void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
4064   Assert(isa<CleanupPadInst>(CRI.getOperand(0)),
4065          "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
4066          CRI.getOperand(0));
4067 
4068   if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
4069     Instruction *I = UnwindDest->getFirstNonPHI();
4070     Assert(I->isEHPad() && !isa<LandingPadInst>(I),
4071            "CleanupReturnInst must unwind to an EH block which is not a "
4072            "landingpad.",
4073            &CRI);
4074   }
4075 
4076   visitTerminator(CRI);
4077 }
4078 
4079 void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
4080   Instruction *Op = cast<Instruction>(I.getOperand(i));
4081   // If the we have an invalid invoke, don't try to compute the dominance.
4082   // We already reject it in the invoke specific checks and the dominance
4083   // computation doesn't handle multiple edges.
4084   if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
4085     if (II->getNormalDest() == II->getUnwindDest())
4086       return;
4087   }
4088 
4089   // Quick check whether the def has already been encountered in the same block.
4090   // PHI nodes are not checked to prevent accepting preceding PHIs, because PHI
4091   // uses are defined to happen on the incoming edge, not at the instruction.
4092   //
4093   // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
4094   // wrapping an SSA value, assert that we've already encountered it.  See
4095   // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
4096   if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
4097     return;
4098 
4099   const Use &U = I.getOperandUse(i);
4100   Assert(DT.dominates(Op, U),
4101          "Instruction does not dominate all uses!", Op, &I);
4102 }
4103 
4104 void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
4105   Assert(I.getType()->isPointerTy(), "dereferenceable, dereferenceable_or_null "
4106          "apply only to pointer types", &I);
4107   Assert((isa<LoadInst>(I) || isa<IntToPtrInst>(I)),
4108          "dereferenceable, dereferenceable_or_null apply only to load"
4109          " and inttoptr instructions, use attributes for calls or invokes", &I);
4110   Assert(MD->getNumOperands() == 1, "dereferenceable, dereferenceable_or_null "
4111          "take one operand!", &I);
4112   ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
4113   Assert(CI && CI->getType()->isIntegerTy(64), "dereferenceable, "
4114          "dereferenceable_or_null metadata value must be an i64!", &I);
4115 }
4116 
4117 void Verifier::visitProfMetadata(Instruction &I, MDNode *MD) {
4118   Assert(MD->getNumOperands() >= 2,
4119          "!prof annotations should have no less than 2 operands", MD);
4120 
4121   // Check first operand.
4122   Assert(MD->getOperand(0) != nullptr, "first operand should not be null", MD);
4123   Assert(isa<MDString>(MD->getOperand(0)),
4124          "expected string with name of the !prof annotation", MD);
4125   MDString *MDS = cast<MDString>(MD->getOperand(0));
4126   StringRef ProfName = MDS->getString();
4127 
4128   // Check consistency of !prof branch_weights metadata.
4129   if (ProfName.equals("branch_weights")) {
4130     unsigned ExpectedNumOperands = 0;
4131     if (BranchInst *BI = dyn_cast<BranchInst>(&I))
4132       ExpectedNumOperands = BI->getNumSuccessors();
4133     else if (SwitchInst *SI = dyn_cast<SwitchInst>(&I))
4134       ExpectedNumOperands = SI->getNumSuccessors();
4135     else if (isa<CallInst>(&I) || isa<InvokeInst>(&I))
4136       ExpectedNumOperands = 1;
4137     else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(&I))
4138       ExpectedNumOperands = IBI->getNumDestinations();
4139     else if (isa<SelectInst>(&I))
4140       ExpectedNumOperands = 2;
4141     else
4142       CheckFailed("!prof branch_weights are not allowed for this instruction",
4143                   MD);
4144 
4145     Assert(MD->getNumOperands() == 1 + ExpectedNumOperands,
4146            "Wrong number of operands", MD);
4147     for (unsigned i = 1; i < MD->getNumOperands(); ++i) {
4148       auto &MDO = MD->getOperand(i);
4149       Assert(MDO, "second operand should not be null", MD);
4150       Assert(mdconst::dyn_extract<ConstantInt>(MDO),
4151              "!prof brunch_weights operand is not a const int");
4152     }
4153   }
4154 }
4155 
4156 /// verifyInstruction - Verify that an instruction is well formed.
4157 ///
4158 void Verifier::visitInstruction(Instruction &I) {
4159   BasicBlock *BB = I.getParent();
4160   Assert(BB, "Instruction not embedded in basic block!", &I);
4161 
4162   if (!isa<PHINode>(I)) {   // Check that non-phi nodes are not self referential
4163     for (User *U : I.users()) {
4164       Assert(U != (User *)&I || !DT.isReachableFromEntry(BB),
4165              "Only PHI nodes may reference their own value!", &I);
4166     }
4167   }
4168 
4169   // Check that void typed values don't have names
4170   Assert(!I.getType()->isVoidTy() || !I.hasName(),
4171          "Instruction has a name, but provides a void value!", &I);
4172 
4173   // Check that the return value of the instruction is either void or a legal
4174   // value type.
4175   Assert(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
4176          "Instruction returns a non-scalar type!", &I);
4177 
4178   // Check that the instruction doesn't produce metadata. Calls are already
4179   // checked against the callee type.
4180   Assert(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
4181          "Invalid use of metadata!", &I);
4182 
4183   // Check that all uses of the instruction, if they are instructions
4184   // themselves, actually have parent basic blocks.  If the use is not an
4185   // instruction, it is an error!
4186   for (Use &U : I.uses()) {
4187     if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
4188       Assert(Used->getParent() != nullptr,
4189              "Instruction referencing"
4190              " instruction not embedded in a basic block!",
4191              &I, Used);
4192     else {
4193       CheckFailed("Use of instruction is not an instruction!", U);
4194       return;
4195     }
4196   }
4197 
4198   // Get a pointer to the call base of the instruction if it is some form of
4199   // call.
4200   const CallBase *CBI = dyn_cast<CallBase>(&I);
4201 
4202   for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
4203     Assert(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
4204 
4205     // Check to make sure that only first-class-values are operands to
4206     // instructions.
4207     if (!I.getOperand(i)->getType()->isFirstClassType()) {
4208       Assert(false, "Instruction operands must be first-class values!", &I);
4209     }
4210 
4211     if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
4212       // Check to make sure that the "address of" an intrinsic function is never
4213       // taken.
4214       Assert(!F->isIntrinsic() ||
4215                  (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)),
4216              "Cannot take the address of an intrinsic!", &I);
4217       Assert(
4218           !F->isIntrinsic() || isa<CallInst>(I) ||
4219               F->getIntrinsicID() == Intrinsic::donothing ||
4220               F->getIntrinsicID() == Intrinsic::coro_resume ||
4221               F->getIntrinsicID() == Intrinsic::coro_destroy ||
4222               F->getIntrinsicID() == Intrinsic::experimental_patchpoint_void ||
4223               F->getIntrinsicID() == Intrinsic::experimental_patchpoint_i64 ||
4224               F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint ||
4225               F->getIntrinsicID() == Intrinsic::wasm_rethrow_in_catch,
4226           "Cannot invoke an intrinsic other than donothing, patchpoint, "
4227           "statepoint, coro_resume or coro_destroy",
4228           &I);
4229       Assert(F->getParent() == &M, "Referencing function in another module!",
4230              &I, &M, F, F->getParent());
4231     } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
4232       Assert(OpBB->getParent() == BB->getParent(),
4233              "Referring to a basic block in another function!", &I);
4234     } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
4235       Assert(OpArg->getParent() == BB->getParent(),
4236              "Referring to an argument in another function!", &I);
4237     } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
4238       Assert(GV->getParent() == &M, "Referencing global in another module!", &I,
4239              &M, GV, GV->getParent());
4240     } else if (isa<Instruction>(I.getOperand(i))) {
4241       verifyDominatesUse(I, i);
4242     } else if (isa<InlineAsm>(I.getOperand(i))) {
4243       Assert(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
4244              "Cannot take the address of an inline asm!", &I);
4245     } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(I.getOperand(i))) {
4246       if (CE->getType()->isPtrOrPtrVectorTy() ||
4247           !DL.getNonIntegralAddressSpaces().empty()) {
4248         // If we have a ConstantExpr pointer, we need to see if it came from an
4249         // illegal bitcast.  If the datalayout string specifies non-integral
4250         // address spaces then we also need to check for illegal ptrtoint and
4251         // inttoptr expressions.
4252         visitConstantExprsRecursively(CE);
4253       }
4254     }
4255   }
4256 
4257   if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
4258     Assert(I.getType()->isFPOrFPVectorTy(),
4259            "fpmath requires a floating point result!", &I);
4260     Assert(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
4261     if (ConstantFP *CFP0 =
4262             mdconst::dyn_extract_or_null<ConstantFP>(MD->getOperand(0))) {
4263       const APFloat &Accuracy = CFP0->getValueAPF();
4264       Assert(&Accuracy.getSemantics() == &APFloat::IEEEsingle(),
4265              "fpmath accuracy must have float type", &I);
4266       Assert(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
4267              "fpmath accuracy not a positive number!", &I);
4268     } else {
4269       Assert(false, "invalid fpmath accuracy!", &I);
4270     }
4271   }
4272 
4273   if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
4274     Assert(isa<LoadInst>(I) || isa<CallInst>(I) || isa<InvokeInst>(I),
4275            "Ranges are only for loads, calls and invokes!", &I);
4276     visitRangeMetadata(I, Range, I.getType());
4277   }
4278 
4279   if (I.getMetadata(LLVMContext::MD_nonnull)) {
4280     Assert(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
4281            &I);
4282     Assert(isa<LoadInst>(I),
4283            "nonnull applies only to load instructions, use attributes"
4284            " for calls or invokes",
4285            &I);
4286   }
4287 
4288   if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable))
4289     visitDereferenceableMetadata(I, MD);
4290 
4291   if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null))
4292     visitDereferenceableMetadata(I, MD);
4293 
4294   if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa))
4295     TBAAVerifyHelper.visitTBAAMetadata(I, TBAA);
4296 
4297   if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
4298     Assert(I.getType()->isPointerTy(), "align applies only to pointer types",
4299            &I);
4300     Assert(isa<LoadInst>(I), "align applies only to load instructions, "
4301            "use attributes for calls or invokes", &I);
4302     Assert(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
4303     ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
4304     Assert(CI && CI->getType()->isIntegerTy(64),
4305            "align metadata value must be an i64!", &I);
4306     uint64_t Align = CI->getZExtValue();
4307     Assert(isPowerOf2_64(Align),
4308            "align metadata value must be a power of 2!", &I);
4309     Assert(Align <= Value::MaximumAlignment,
4310            "alignment is larger that implementation defined limit", &I);
4311   }
4312 
4313   if (MDNode *MD = I.getMetadata(LLVMContext::MD_prof))
4314     visitProfMetadata(I, MD);
4315 
4316   if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
4317     AssertDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
4318     visitMDNode(*N, AreDebugLocsAllowed::Yes);
4319   }
4320 
4321   if (auto *DII = dyn_cast<DbgVariableIntrinsic>(&I)) {
4322     verifyFragmentExpression(*DII);
4323     verifyNotEntryValue(*DII);
4324   }
4325 
4326   SmallVector<std::pair<unsigned, MDNode *>, 4> MDs;
4327   I.getAllMetadata(MDs);
4328   for (auto Attachment : MDs) {
4329     unsigned Kind = Attachment.first;
4330     auto AllowLocs =
4331         (Kind == LLVMContext::MD_dbg || Kind == LLVMContext::MD_loop)
4332             ? AreDebugLocsAllowed::Yes
4333             : AreDebugLocsAllowed::No;
4334     visitMDNode(*Attachment.second, AllowLocs);
4335   }
4336 
4337   InstsInThisBlock.insert(&I);
4338 }
4339 
4340 /// Allow intrinsics to be verified in different ways.
4341 void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
4342   Function *IF = Call.getCalledFunction();
4343   Assert(IF->isDeclaration(), "Intrinsic functions should never be defined!",
4344          IF);
4345 
4346   // Verify that the intrinsic prototype lines up with what the .td files
4347   // describe.
4348   FunctionType *IFTy = IF->getFunctionType();
4349   bool IsVarArg = IFTy->isVarArg();
4350 
4351   SmallVector<Intrinsic::IITDescriptor, 8> Table;
4352   getIntrinsicInfoTableEntries(ID, Table);
4353   ArrayRef<Intrinsic::IITDescriptor> TableRef = Table;
4354 
4355   // Walk the descriptors to extract overloaded types.
4356   SmallVector<Type *, 4> ArgTys;
4357   Intrinsic::MatchIntrinsicTypesResult Res =
4358       Intrinsic::matchIntrinsicSignature(IFTy, TableRef, ArgTys);
4359   Assert(Res != Intrinsic::MatchIntrinsicTypes_NoMatchRet,
4360          "Intrinsic has incorrect return type!", IF);
4361   Assert(Res != Intrinsic::MatchIntrinsicTypes_NoMatchArg,
4362          "Intrinsic has incorrect argument type!", IF);
4363 
4364   // Verify if the intrinsic call matches the vararg property.
4365   if (IsVarArg)
4366     Assert(!Intrinsic::matchIntrinsicVarArg(IsVarArg, TableRef),
4367            "Intrinsic was not defined with variable arguments!", IF);
4368   else
4369     Assert(!Intrinsic::matchIntrinsicVarArg(IsVarArg, TableRef),
4370            "Callsite was not defined with variable arguments!", IF);
4371 
4372   // All descriptors should be absorbed by now.
4373   Assert(TableRef.empty(), "Intrinsic has too few arguments!", IF);
4374 
4375   // Now that we have the intrinsic ID and the actual argument types (and we
4376   // know they are legal for the intrinsic!) get the intrinsic name through the
4377   // usual means.  This allows us to verify the mangling of argument types into
4378   // the name.
4379   const std::string ExpectedName = Intrinsic::getName(ID, ArgTys);
4380   Assert(ExpectedName == IF->getName(),
4381          "Intrinsic name not mangled correctly for type arguments! "
4382          "Should be: " +
4383              ExpectedName,
4384          IF);
4385 
4386   // If the intrinsic takes MDNode arguments, verify that they are either global
4387   // or are local to *this* function.
4388   for (Value *V : Call.args())
4389     if (auto *MD = dyn_cast<MetadataAsValue>(V))
4390       visitMetadataAsValue(*MD, Call.getCaller());
4391 
4392   switch (ID) {
4393   default:
4394     break;
4395   case Intrinsic::assume: {
4396     for (auto &Elem : Call.bundle_op_infos()) {
4397       Assert(Elem.Tag->getKey() == "ignore" ||
4398                  Attribute::isExistingAttribute(Elem.Tag->getKey()),
4399              "tags must be valid attribute names");
4400       Assert(Elem.End - Elem.Begin <= 2, "to many arguments");
4401       Attribute::AttrKind Kind =
4402           Attribute::getAttrKindFromName(Elem.Tag->getKey());
4403       if (Kind == Attribute::None)
4404         break;
4405       if (Attribute::doesAttrKindHaveArgument(Kind)) {
4406         Assert(Elem.End - Elem.Begin == 2,
4407                "this attribute should have 2 arguments");
4408         Assert(isa<ConstantInt>(Call.getOperand(Elem.Begin + 1)),
4409                "the second argument should be a constant integral value");
4410       } else if (isFuncOnlyAttr(Kind)) {
4411         Assert((Elem.End - Elem.Begin) == 0, "this attribute has no argument");
4412       } else if (!isFuncOrArgAttr(Kind)) {
4413         Assert((Elem.End - Elem.Begin) == 1,
4414                "this attribute should have one argument");
4415       }
4416     }
4417     break;
4418   }
4419   case Intrinsic::coro_id: {
4420     auto *InfoArg = Call.getArgOperand(3)->stripPointerCasts();
4421     if (isa<ConstantPointerNull>(InfoArg))
4422       break;
4423     auto *GV = dyn_cast<GlobalVariable>(InfoArg);
4424     Assert(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
4425       "info argument of llvm.coro.begin must refer to an initialized "
4426       "constant");
4427     Constant *Init = GV->getInitializer();
4428     Assert(isa<ConstantStruct>(Init) || isa<ConstantArray>(Init),
4429       "info argument of llvm.coro.begin must refer to either a struct or "
4430       "an array");
4431     break;
4432   }
4433 #define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC)                        \
4434   case Intrinsic::INTRINSIC:
4435 #include "llvm/IR/ConstrainedOps.def"
4436     visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
4437     break;
4438   case Intrinsic::dbg_declare: // llvm.dbg.declare
4439     Assert(isa<MetadataAsValue>(Call.getArgOperand(0)),
4440            "invalid llvm.dbg.declare intrinsic call 1", Call);
4441     visitDbgIntrinsic("declare", cast<DbgVariableIntrinsic>(Call));
4442     break;
4443   case Intrinsic::dbg_addr: // llvm.dbg.addr
4444     visitDbgIntrinsic("addr", cast<DbgVariableIntrinsic>(Call));
4445     break;
4446   case Intrinsic::dbg_value: // llvm.dbg.value
4447     visitDbgIntrinsic("value", cast<DbgVariableIntrinsic>(Call));
4448     break;
4449   case Intrinsic::dbg_label: // llvm.dbg.label
4450     visitDbgLabelIntrinsic("label", cast<DbgLabelInst>(Call));
4451     break;
4452   case Intrinsic::memcpy:
4453   case Intrinsic::memcpy_inline:
4454   case Intrinsic::memmove:
4455   case Intrinsic::memset: {
4456     const auto *MI = cast<MemIntrinsic>(&Call);
4457     auto IsValidAlignment = [&](unsigned Alignment) -> bool {
4458       return Alignment == 0 || isPowerOf2_32(Alignment);
4459     };
4460     Assert(IsValidAlignment(MI->getDestAlignment()),
4461            "alignment of arg 0 of memory intrinsic must be 0 or a power of 2",
4462            Call);
4463     if (const auto *MTI = dyn_cast<MemTransferInst>(MI)) {
4464       Assert(IsValidAlignment(MTI->getSourceAlignment()),
4465              "alignment of arg 1 of memory intrinsic must be 0 or a power of 2",
4466              Call);
4467     }
4468 
4469     break;
4470   }
4471   case Intrinsic::memcpy_element_unordered_atomic:
4472   case Intrinsic::memmove_element_unordered_atomic:
4473   case Intrinsic::memset_element_unordered_atomic: {
4474     const auto *AMI = cast<AtomicMemIntrinsic>(&Call);
4475 
4476     ConstantInt *ElementSizeCI =
4477         cast<ConstantInt>(AMI->getRawElementSizeInBytes());
4478     const APInt &ElementSizeVal = ElementSizeCI->getValue();
4479     Assert(ElementSizeVal.isPowerOf2(),
4480            "element size of the element-wise atomic memory intrinsic "
4481            "must be a power of 2",
4482            Call);
4483 
4484     auto IsValidAlignment = [&](uint64_t Alignment) {
4485       return isPowerOf2_64(Alignment) && ElementSizeVal.ule(Alignment);
4486     };
4487     uint64_t DstAlignment = AMI->getDestAlignment();
4488     Assert(IsValidAlignment(DstAlignment),
4489            "incorrect alignment of the destination argument", Call);
4490     if (const auto *AMT = dyn_cast<AtomicMemTransferInst>(AMI)) {
4491       uint64_t SrcAlignment = AMT->getSourceAlignment();
4492       Assert(IsValidAlignment(SrcAlignment),
4493              "incorrect alignment of the source argument", Call);
4494     }
4495     break;
4496   }
4497   case Intrinsic::call_preallocated_setup: {
4498     auto *NumArgs = dyn_cast<ConstantInt>(Call.getArgOperand(0));
4499     Assert(NumArgs != nullptr,
4500            "llvm.call.preallocated.setup argument must be a constant");
4501     bool FoundCall = false;
4502     for (User *U : Call.users()) {
4503       auto *UseCall = dyn_cast<CallBase>(U);
4504       Assert(UseCall != nullptr,
4505              "Uses of llvm.call.preallocated.setup must be calls");
4506       const Function *Fn = UseCall->getCalledFunction();
4507       if (Fn && Fn->getIntrinsicID() == Intrinsic::call_preallocated_arg) {
4508         auto *AllocArgIndex = dyn_cast<ConstantInt>(UseCall->getArgOperand(1));
4509         Assert(AllocArgIndex != nullptr,
4510                "llvm.call.preallocated.alloc arg index must be a constant");
4511         auto AllocArgIndexInt = AllocArgIndex->getValue();
4512         Assert(AllocArgIndexInt.sge(0) &&
4513                    AllocArgIndexInt.slt(NumArgs->getValue()),
4514                "llvm.call.preallocated.alloc arg index must be between 0 and "
4515                "corresponding "
4516                "llvm.call.preallocated.setup's argument count");
4517       } else {
4518         Assert(!FoundCall, "Can have at most one call corresponding to a "
4519                            "llvm.call.preallocated.setup");
4520         FoundCall = true;
4521         size_t NumPreallocatedArgs = 0;
4522         for (unsigned i = 0; i < UseCall->getNumArgOperands(); i++) {
4523           if (UseCall->paramHasAttr(i, Attribute::Preallocated)) {
4524             ++NumPreallocatedArgs;
4525           }
4526         }
4527         Assert(NumArgs->equalsInt(NumPreallocatedArgs),
4528                "llvm.call.preallocated.setup arg size must be equal to number "
4529                "of preallocated arguments "
4530                "at call site",
4531                Call, *UseCall);
4532         // getOperandBundle() cannot be called if more than one of the operand
4533         // bundle exists. There is already a check elsewhere for this, so skip
4534         // here if we see more than one.
4535         if (UseCall->countOperandBundlesOfType(LLVMContext::OB_preallocated) >
4536             1) {
4537           return;
4538         }
4539         auto PreallocatedBundle =
4540             UseCall->getOperandBundle(LLVMContext::OB_preallocated);
4541         Assert(PreallocatedBundle,
4542                "Use of llvm.call.preallocated.setup outside intrinsics "
4543                "must be in \"preallocated\" operand bundle");
4544         Assert(PreallocatedBundle->Inputs.front().get() == &Call,
4545                "preallocated bundle must have token from corresponding "
4546                "llvm.call.preallocated.setup");
4547       }
4548     }
4549     break;
4550   }
4551   case Intrinsic::call_preallocated_arg: {
4552     auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
4553     Assert(Token && Token->getCalledFunction()->getIntrinsicID() ==
4554                         Intrinsic::call_preallocated_setup,
4555            "llvm.call.preallocated.arg token argument must be a "
4556            "llvm.call.preallocated.setup");
4557     Assert(Call.hasFnAttr(Attribute::Preallocated),
4558            "llvm.call.preallocated.arg must be called with a \"preallocated\" "
4559            "call site attribute");
4560     break;
4561   }
4562   case Intrinsic::gcroot:
4563   case Intrinsic::gcwrite:
4564   case Intrinsic::gcread:
4565     if (ID == Intrinsic::gcroot) {
4566       AllocaInst *AI =
4567           dyn_cast<AllocaInst>(Call.getArgOperand(0)->stripPointerCasts());
4568       Assert(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
4569       Assert(isa<Constant>(Call.getArgOperand(1)),
4570              "llvm.gcroot parameter #2 must be a constant.", Call);
4571       if (!AI->getAllocatedType()->isPointerTy()) {
4572         Assert(!isa<ConstantPointerNull>(Call.getArgOperand(1)),
4573                "llvm.gcroot parameter #1 must either be a pointer alloca, "
4574                "or argument #2 must be a non-null constant.",
4575                Call);
4576       }
4577     }
4578 
4579     Assert(Call.getParent()->getParent()->hasGC(),
4580            "Enclosing function does not use GC.", Call);
4581     break;
4582   case Intrinsic::init_trampoline:
4583     Assert(isa<Function>(Call.getArgOperand(1)->stripPointerCasts()),
4584            "llvm.init_trampoline parameter #2 must resolve to a function.",
4585            Call);
4586     break;
4587   case Intrinsic::prefetch:
4588     Assert(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2 &&
4589            cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
4590            "invalid arguments to llvm.prefetch", Call);
4591     break;
4592   case Intrinsic::stackprotector:
4593     Assert(isa<AllocaInst>(Call.getArgOperand(1)->stripPointerCasts()),
4594            "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
4595     break;
4596   case Intrinsic::localescape: {
4597     BasicBlock *BB = Call.getParent();
4598     Assert(BB == &BB->getParent()->front(),
4599            "llvm.localescape used outside of entry block", Call);
4600     Assert(!SawFrameEscape,
4601            "multiple calls to llvm.localescape in one function", Call);
4602     for (Value *Arg : Call.args()) {
4603       if (isa<ConstantPointerNull>(Arg))
4604         continue; // Null values are allowed as placeholders.
4605       auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
4606       Assert(AI && AI->isStaticAlloca(),
4607              "llvm.localescape only accepts static allocas", Call);
4608     }
4609     FrameEscapeInfo[BB->getParent()].first = Call.getNumArgOperands();
4610     SawFrameEscape = true;
4611     break;
4612   }
4613   case Intrinsic::localrecover: {
4614     Value *FnArg = Call.getArgOperand(0)->stripPointerCasts();
4615     Function *Fn = dyn_cast<Function>(FnArg);
4616     Assert(Fn && !Fn->isDeclaration(),
4617            "llvm.localrecover first "
4618            "argument must be function defined in this module",
4619            Call);
4620     auto *IdxArg = cast<ConstantInt>(Call.getArgOperand(2));
4621     auto &Entry = FrameEscapeInfo[Fn];
4622     Entry.second = unsigned(
4623         std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
4624     break;
4625   }
4626 
4627   case Intrinsic::experimental_gc_statepoint:
4628     if (auto *CI = dyn_cast<CallInst>(&Call))
4629       Assert(!CI->isInlineAsm(),
4630              "gc.statepoint support for inline assembly unimplemented", CI);
4631     Assert(Call.getParent()->getParent()->hasGC(),
4632            "Enclosing function does not use GC.", Call);
4633 
4634     verifyStatepoint(Call);
4635     break;
4636   case Intrinsic::experimental_gc_result: {
4637     Assert(Call.getParent()->getParent()->hasGC(),
4638            "Enclosing function does not use GC.", Call);
4639     // Are we tied to a statepoint properly?
4640     const auto *StatepointCall = dyn_cast<CallBase>(Call.getArgOperand(0));
4641     const Function *StatepointFn =
4642         StatepointCall ? StatepointCall->getCalledFunction() : nullptr;
4643     Assert(StatepointFn && StatepointFn->isDeclaration() &&
4644                StatepointFn->getIntrinsicID() ==
4645                    Intrinsic::experimental_gc_statepoint,
4646            "gc.result operand #1 must be from a statepoint", Call,
4647            Call.getArgOperand(0));
4648 
4649     // Assert that result type matches wrapped callee.
4650     const Value *Target = StatepointCall->getArgOperand(2);
4651     auto *PT = cast<PointerType>(Target->getType());
4652     auto *TargetFuncType = cast<FunctionType>(PT->getElementType());
4653     Assert(Call.getType() == TargetFuncType->getReturnType(),
4654            "gc.result result type does not match wrapped callee", Call);
4655     break;
4656   }
4657   case Intrinsic::experimental_gc_relocate: {
4658     Assert(Call.getNumArgOperands() == 3, "wrong number of arguments", Call);
4659 
4660     Assert(isa<PointerType>(Call.getType()->getScalarType()),
4661            "gc.relocate must return a pointer or a vector of pointers", Call);
4662 
4663     // Check that this relocate is correctly tied to the statepoint
4664 
4665     // This is case for relocate on the unwinding path of an invoke statepoint
4666     if (LandingPadInst *LandingPad =
4667             dyn_cast<LandingPadInst>(Call.getArgOperand(0))) {
4668 
4669       const BasicBlock *InvokeBB =
4670           LandingPad->getParent()->getUniquePredecessor();
4671 
4672       // Landingpad relocates should have only one predecessor with invoke
4673       // statepoint terminator
4674       Assert(InvokeBB, "safepoints should have unique landingpads",
4675              LandingPad->getParent());
4676       Assert(InvokeBB->getTerminator(), "safepoint block should be well formed",
4677              InvokeBB);
4678       Assert(isStatepoint(InvokeBB->getTerminator()),
4679              "gc relocate should be linked to a statepoint", InvokeBB);
4680     } else {
4681       // In all other cases relocate should be tied to the statepoint directly.
4682       // This covers relocates on a normal return path of invoke statepoint and
4683       // relocates of a call statepoint.
4684       auto Token = Call.getArgOperand(0);
4685       Assert(isa<Instruction>(Token) && isStatepoint(cast<Instruction>(Token)),
4686              "gc relocate is incorrectly tied to the statepoint", Call, Token);
4687     }
4688 
4689     // Verify rest of the relocate arguments.
4690     const CallBase &StatepointCall =
4691         *cast<CallBase>(cast<GCRelocateInst>(Call).getStatepoint());
4692 
4693     // Both the base and derived must be piped through the safepoint.
4694     Value *Base = Call.getArgOperand(1);
4695     Assert(isa<ConstantInt>(Base),
4696            "gc.relocate operand #2 must be integer offset", Call);
4697 
4698     Value *Derived = Call.getArgOperand(2);
4699     Assert(isa<ConstantInt>(Derived),
4700            "gc.relocate operand #3 must be integer offset", Call);
4701 
4702     const int BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
4703     const int DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
4704     // Check the bounds
4705     Assert(0 <= BaseIndex && BaseIndex < (int)StatepointCall.arg_size(),
4706            "gc.relocate: statepoint base index out of bounds", Call);
4707     Assert(0 <= DerivedIndex && DerivedIndex < (int)StatepointCall.arg_size(),
4708            "gc.relocate: statepoint derived index out of bounds", Call);
4709 
4710     // Check that BaseIndex and DerivedIndex fall within the 'gc parameters'
4711     // section of the statepoint's argument.
4712     Assert(StatepointCall.arg_size() > 0,
4713            "gc.statepoint: insufficient arguments");
4714     Assert(isa<ConstantInt>(StatepointCall.getArgOperand(3)),
4715            "gc.statement: number of call arguments must be constant integer");
4716     const unsigned NumCallArgs =
4717         cast<ConstantInt>(StatepointCall.getArgOperand(3))->getZExtValue();
4718     Assert(StatepointCall.arg_size() > NumCallArgs + 5,
4719            "gc.statepoint: mismatch in number of call arguments");
4720     Assert(isa<ConstantInt>(StatepointCall.getArgOperand(NumCallArgs + 5)),
4721            "gc.statepoint: number of transition arguments must be "
4722            "a constant integer");
4723     const int NumTransitionArgs =
4724         cast<ConstantInt>(StatepointCall.getArgOperand(NumCallArgs + 5))
4725             ->getZExtValue();
4726     const int DeoptArgsStart = 4 + NumCallArgs + 1 + NumTransitionArgs + 1;
4727     Assert(isa<ConstantInt>(StatepointCall.getArgOperand(DeoptArgsStart)),
4728            "gc.statepoint: number of deoptimization arguments must be "
4729            "a constant integer");
4730     const int NumDeoptArgs =
4731         cast<ConstantInt>(StatepointCall.getArgOperand(DeoptArgsStart))
4732             ->getZExtValue();
4733     const int GCParamArgsStart = DeoptArgsStart + 1 + NumDeoptArgs;
4734     const int GCParamArgsEnd = StatepointCall.arg_size();
4735     Assert(GCParamArgsStart <= BaseIndex && BaseIndex < GCParamArgsEnd,
4736            "gc.relocate: statepoint base index doesn't fall within the "
4737            "'gc parameters' section of the statepoint call",
4738            Call);
4739     Assert(GCParamArgsStart <= DerivedIndex && DerivedIndex < GCParamArgsEnd,
4740            "gc.relocate: statepoint derived index doesn't fall within the "
4741            "'gc parameters' section of the statepoint call",
4742            Call);
4743 
4744     // Relocated value must be either a pointer type or vector-of-pointer type,
4745     // but gc_relocate does not need to return the same pointer type as the
4746     // relocated pointer. It can be casted to the correct type later if it's
4747     // desired. However, they must have the same address space and 'vectorness'
4748     GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
4749     Assert(Relocate.getDerivedPtr()->getType()->isPtrOrPtrVectorTy(),
4750            "gc.relocate: relocated value must be a gc pointer", Call);
4751 
4752     auto ResultType = Call.getType();
4753     auto DerivedType = Relocate.getDerivedPtr()->getType();
4754     Assert(ResultType->isVectorTy() == DerivedType->isVectorTy(),
4755            "gc.relocate: vector relocates to vector and pointer to pointer",
4756            Call);
4757     Assert(
4758         ResultType->getPointerAddressSpace() ==
4759             DerivedType->getPointerAddressSpace(),
4760         "gc.relocate: relocating a pointer shouldn't change its address space",
4761         Call);
4762     break;
4763   }
4764   case Intrinsic::eh_exceptioncode:
4765   case Intrinsic::eh_exceptionpointer: {
4766     Assert(isa<CatchPadInst>(Call.getArgOperand(0)),
4767            "eh.exceptionpointer argument must be a catchpad", Call);
4768     break;
4769   }
4770   case Intrinsic::masked_load: {
4771     Assert(Call.getType()->isVectorTy(), "masked_load: must return a vector",
4772            Call);
4773 
4774     Value *Ptr = Call.getArgOperand(0);
4775     ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(1));
4776     Value *Mask = Call.getArgOperand(2);
4777     Value *PassThru = Call.getArgOperand(3);
4778     Assert(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
4779            Call);
4780     Assert(Alignment->getValue().isPowerOf2(),
4781            "masked_load: alignment must be a power of 2", Call);
4782 
4783     // DataTy is the overloaded type
4784     Type *DataTy = cast<PointerType>(Ptr->getType())->getElementType();
4785     Assert(DataTy == Call.getType(),
4786            "masked_load: return must match pointer type", Call);
4787     Assert(PassThru->getType() == DataTy,
4788            "masked_load: pass through and data type must match", Call);
4789     Assert(cast<VectorType>(Mask->getType())->getNumElements() ==
4790                cast<VectorType>(DataTy)->getNumElements(),
4791            "masked_load: vector mask must be same length as data", Call);
4792     break;
4793   }
4794   case Intrinsic::masked_store: {
4795     Value *Val = Call.getArgOperand(0);
4796     Value *Ptr = Call.getArgOperand(1);
4797     ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(2));
4798     Value *Mask = Call.getArgOperand(3);
4799     Assert(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
4800            Call);
4801     Assert(Alignment->getValue().isPowerOf2(),
4802            "masked_store: alignment must be a power of 2", Call);
4803 
4804     // DataTy is the overloaded type
4805     Type *DataTy = cast<PointerType>(Ptr->getType())->getElementType();
4806     Assert(DataTy == Val->getType(),
4807            "masked_store: storee must match pointer type", Call);
4808     Assert(cast<VectorType>(Mask->getType())->getNumElements() ==
4809                cast<VectorType>(DataTy)->getNumElements(),
4810            "masked_store: vector mask must be same length as data", Call);
4811     break;
4812   }
4813 
4814   case Intrinsic::masked_gather: {
4815     const APInt &Alignment =
4816         cast<ConstantInt>(Call.getArgOperand(1))->getValue();
4817     Assert(Alignment.isNullValue() || Alignment.isPowerOf2(),
4818            "masked_gather: alignment must be 0 or a power of 2", Call);
4819     break;
4820   }
4821   case Intrinsic::masked_scatter: {
4822     const APInt &Alignment =
4823         cast<ConstantInt>(Call.getArgOperand(2))->getValue();
4824     Assert(Alignment.isNullValue() || Alignment.isPowerOf2(),
4825            "masked_scatter: alignment must be 0 or a power of 2", Call);
4826     break;
4827   }
4828 
4829   case Intrinsic::experimental_guard: {
4830     Assert(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
4831     Assert(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
4832            "experimental_guard must have exactly one "
4833            "\"deopt\" operand bundle");
4834     break;
4835   }
4836 
4837   case Intrinsic::experimental_deoptimize: {
4838     Assert(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
4839            Call);
4840     Assert(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
4841            "experimental_deoptimize must have exactly one "
4842            "\"deopt\" operand bundle");
4843     Assert(Call.getType() == Call.getFunction()->getReturnType(),
4844            "experimental_deoptimize return type must match caller return type");
4845 
4846     if (isa<CallInst>(Call)) {
4847       auto *RI = dyn_cast<ReturnInst>(Call.getNextNode());
4848       Assert(RI,
4849              "calls to experimental_deoptimize must be followed by a return");
4850 
4851       if (!Call.getType()->isVoidTy() && RI)
4852         Assert(RI->getReturnValue() == &Call,
4853                "calls to experimental_deoptimize must be followed by a return "
4854                "of the value computed by experimental_deoptimize");
4855     }
4856 
4857     break;
4858   }
4859   case Intrinsic::sadd_sat:
4860   case Intrinsic::uadd_sat:
4861   case Intrinsic::ssub_sat:
4862   case Intrinsic::usub_sat: {
4863     Value *Op1 = Call.getArgOperand(0);
4864     Value *Op2 = Call.getArgOperand(1);
4865     Assert(Op1->getType()->isIntOrIntVectorTy(),
4866            "first operand of [us][add|sub]_sat must be an int type or vector "
4867            "of ints");
4868     Assert(Op2->getType()->isIntOrIntVectorTy(),
4869            "second operand of [us][add|sub]_sat must be an int type or vector "
4870            "of ints");
4871     break;
4872   }
4873   case Intrinsic::smul_fix:
4874   case Intrinsic::smul_fix_sat:
4875   case Intrinsic::umul_fix:
4876   case Intrinsic::umul_fix_sat:
4877   case Intrinsic::sdiv_fix:
4878   case Intrinsic::sdiv_fix_sat:
4879   case Intrinsic::udiv_fix:
4880   case Intrinsic::udiv_fix_sat: {
4881     Value *Op1 = Call.getArgOperand(0);
4882     Value *Op2 = Call.getArgOperand(1);
4883     Assert(Op1->getType()->isIntOrIntVectorTy(),
4884            "first operand of [us][mul|div]_fix[_sat] must be an int type or "
4885            "vector of ints");
4886     Assert(Op2->getType()->isIntOrIntVectorTy(),
4887            "second operand of [us][mul|div]_fix[_sat] must be an int type or "
4888            "vector of ints");
4889 
4890     auto *Op3 = cast<ConstantInt>(Call.getArgOperand(2));
4891     Assert(Op3->getType()->getBitWidth() <= 32,
4892            "third argument of [us][mul|div]_fix[_sat] must fit within 32 bits");
4893 
4894     if (ID == Intrinsic::smul_fix || ID == Intrinsic::smul_fix_sat ||
4895         ID == Intrinsic::sdiv_fix || ID == Intrinsic::sdiv_fix_sat) {
4896       Assert(
4897           Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(),
4898           "the scale of s[mul|div]_fix[_sat] must be less than the width of "
4899           "the operands");
4900     } else {
4901       Assert(Op3->getZExtValue() <= Op1->getType()->getScalarSizeInBits(),
4902              "the scale of u[mul|div]_fix[_sat] must be less than or equal "
4903              "to the width of the operands");
4904     }
4905     break;
4906   }
4907   case Intrinsic::lround:
4908   case Intrinsic::llround:
4909   case Intrinsic::lrint:
4910   case Intrinsic::llrint: {
4911     Type *ValTy = Call.getArgOperand(0)->getType();
4912     Type *ResultTy = Call.getType();
4913     Assert(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
4914            "Intrinsic does not support vectors", &Call);
4915     break;
4916   }
4917   case Intrinsic::bswap: {
4918     Type *Ty = Call.getType();
4919     unsigned Size = Ty->getScalarSizeInBits();
4920     Assert(Size % 16 == 0, "bswap must be an even number of bytes", &Call);
4921     break;
4922   }
4923   case Intrinsic::matrix_multiply:
4924   case Intrinsic::matrix_transpose:
4925   case Intrinsic::matrix_columnwise_load:
4926   case Intrinsic::matrix_columnwise_store: {
4927     ConstantInt *NumRows;
4928     ConstantInt *NumColumns;
4929     VectorType *TypeToCheck;
4930     switch (ID) {
4931     case Intrinsic::matrix_multiply:
4932       NumRows = cast<ConstantInt>(Call.getArgOperand(2));
4933       NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
4934       TypeToCheck = cast<VectorType>(Call.getType());
4935       break;
4936     case Intrinsic::matrix_transpose:
4937       NumRows = cast<ConstantInt>(Call.getArgOperand(1));
4938       NumColumns = cast<ConstantInt>(Call.getArgOperand(2));
4939       TypeToCheck = cast<VectorType>(Call.getType());
4940       break;
4941     case Intrinsic::matrix_columnwise_load:
4942       NumRows = cast<ConstantInt>(Call.getArgOperand(2));
4943       NumColumns = cast<ConstantInt>(Call.getArgOperand(3));
4944       TypeToCheck = cast<VectorType>(Call.getType());
4945       break;
4946     case Intrinsic::matrix_columnwise_store:
4947       NumRows = cast<ConstantInt>(Call.getArgOperand(3));
4948       NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
4949       TypeToCheck = cast<VectorType>(Call.getArgOperand(0)->getType());
4950       break;
4951     default:
4952       llvm_unreachable("unexpected intrinsic");
4953     }
4954     Assert(TypeToCheck->getNumElements() ==
4955                NumRows->getZExtValue() * NumColumns->getZExtValue(),
4956            "result of a matrix operation does not fit in the returned vector");
4957     break;
4958   }
4959   };
4960 }
4961 
4962 /// Carefully grab the subprogram from a local scope.
4963 ///
4964 /// This carefully grabs the subprogram from a local scope, avoiding the
4965 /// built-in assertions that would typically fire.
4966 static DISubprogram *getSubprogram(Metadata *LocalScope) {
4967   if (!LocalScope)
4968     return nullptr;
4969 
4970   if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
4971     return SP;
4972 
4973   if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
4974     return getSubprogram(LB->getRawScope());
4975 
4976   // Just return null; broken scope chains are checked elsewhere.
4977   assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
4978   return nullptr;
4979 }
4980 
4981 void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
4982   unsigned NumOperands;
4983   bool HasRoundingMD;
4984   switch (FPI.getIntrinsicID()) {
4985 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC)                         \
4986   case Intrinsic::INTRINSIC:                                                   \
4987     NumOperands = NARG;                                                        \
4988     HasRoundingMD = ROUND_MODE;                                                \
4989     break;
4990 #include "llvm/IR/ConstrainedOps.def"
4991   default:
4992     llvm_unreachable("Invalid constrained FP intrinsic!");
4993   }
4994   NumOperands += (1 + HasRoundingMD);
4995   // Compare intrinsics carry an extra predicate metadata operand.
4996   if (isa<ConstrainedFPCmpIntrinsic>(FPI))
4997     NumOperands += 1;
4998   Assert((FPI.getNumArgOperands() == NumOperands),
4999          "invalid arguments for constrained FP intrinsic", &FPI);
5000 
5001   switch (FPI.getIntrinsicID()) {
5002   case Intrinsic::experimental_constrained_lrint:
5003   case Intrinsic::experimental_constrained_llrint: {
5004     Type *ValTy = FPI.getArgOperand(0)->getType();
5005     Type *ResultTy = FPI.getType();
5006     Assert(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
5007            "Intrinsic does not support vectors", &FPI);
5008   }
5009     break;
5010 
5011   case Intrinsic::experimental_constrained_lround:
5012   case Intrinsic::experimental_constrained_llround: {
5013     Type *ValTy = FPI.getArgOperand(0)->getType();
5014     Type *ResultTy = FPI.getType();
5015     Assert(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
5016            "Intrinsic does not support vectors", &FPI);
5017     break;
5018   }
5019 
5020   case Intrinsic::experimental_constrained_fcmp:
5021   case Intrinsic::experimental_constrained_fcmps: {
5022     auto Pred = cast<ConstrainedFPCmpIntrinsic>(&FPI)->getPredicate();
5023     Assert(CmpInst::isFPPredicate(Pred),
5024            "invalid predicate for constrained FP comparison intrinsic", &FPI);
5025     break;
5026   }
5027 
5028   case Intrinsic::experimental_constrained_fptosi:
5029   case Intrinsic::experimental_constrained_fptoui: {
5030     Value *Operand = FPI.getArgOperand(0);
5031     uint64_t NumSrcElem = 0;
5032     Assert(Operand->getType()->isFPOrFPVectorTy(),
5033            "Intrinsic first argument must be floating point", &FPI);
5034     if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
5035       NumSrcElem = OperandT->getNumElements();
5036     }
5037 
5038     Operand = &FPI;
5039     Assert((NumSrcElem > 0) == Operand->getType()->isVectorTy(),
5040            "Intrinsic first argument and result disagree on vector use", &FPI);
5041     Assert(Operand->getType()->isIntOrIntVectorTy(),
5042            "Intrinsic result must be an integer", &FPI);
5043     if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
5044       Assert(NumSrcElem == OperandT->getNumElements(),
5045              "Intrinsic first argument and result vector lengths must be equal",
5046              &FPI);
5047     }
5048   }
5049     break;
5050 
5051   case Intrinsic::experimental_constrained_sitofp:
5052   case Intrinsic::experimental_constrained_uitofp: {
5053     Value *Operand = FPI.getArgOperand(0);
5054     uint64_t NumSrcElem = 0;
5055     Assert(Operand->getType()->isIntOrIntVectorTy(),
5056            "Intrinsic first argument must be integer", &FPI);
5057     if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
5058       NumSrcElem = OperandT->getNumElements();
5059     }
5060 
5061     Operand = &FPI;
5062     Assert((NumSrcElem > 0) == Operand->getType()->isVectorTy(),
5063            "Intrinsic first argument and result disagree on vector use", &FPI);
5064     Assert(Operand->getType()->isFPOrFPVectorTy(),
5065            "Intrinsic result must be a floating point", &FPI);
5066     if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
5067       Assert(NumSrcElem == OperandT->getNumElements(),
5068              "Intrinsic first argument and result vector lengths must be equal",
5069              &FPI);
5070     }
5071   } break;
5072 
5073   case Intrinsic::experimental_constrained_fptrunc:
5074   case Intrinsic::experimental_constrained_fpext: {
5075     Value *Operand = FPI.getArgOperand(0);
5076     Type *OperandTy = Operand->getType();
5077     Value *Result = &FPI;
5078     Type *ResultTy = Result->getType();
5079     Assert(OperandTy->isFPOrFPVectorTy(),
5080            "Intrinsic first argument must be FP or FP vector", &FPI);
5081     Assert(ResultTy->isFPOrFPVectorTy(),
5082            "Intrinsic result must be FP or FP vector", &FPI);
5083     Assert(OperandTy->isVectorTy() == ResultTy->isVectorTy(),
5084            "Intrinsic first argument and result disagree on vector use", &FPI);
5085     if (OperandTy->isVectorTy()) {
5086       auto *OperandVecTy = cast<VectorType>(OperandTy);
5087       auto *ResultVecTy = cast<VectorType>(ResultTy);
5088       Assert(OperandVecTy->getNumElements() == ResultVecTy->getNumElements(),
5089              "Intrinsic first argument and result vector lengths must be equal",
5090              &FPI);
5091     }
5092     if (FPI.getIntrinsicID() == Intrinsic::experimental_constrained_fptrunc) {
5093       Assert(OperandTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits(),
5094              "Intrinsic first argument's type must be larger than result type",
5095              &FPI);
5096     } else {
5097       Assert(OperandTy->getScalarSizeInBits() < ResultTy->getScalarSizeInBits(),
5098              "Intrinsic first argument's type must be smaller than result type",
5099              &FPI);
5100     }
5101   }
5102     break;
5103 
5104   default:
5105     break;
5106   }
5107 
5108   // If a non-metadata argument is passed in a metadata slot then the
5109   // error will be caught earlier when the incorrect argument doesn't
5110   // match the specification in the intrinsic call table. Thus, no
5111   // argument type check is needed here.
5112 
5113   Assert(FPI.getExceptionBehavior().hasValue(),
5114          "invalid exception behavior argument", &FPI);
5115   if (HasRoundingMD) {
5116     Assert(FPI.getRoundingMode().hasValue(),
5117            "invalid rounding mode argument", &FPI);
5118   }
5119 }
5120 
5121 void Verifier::visitDbgIntrinsic(StringRef Kind, DbgVariableIntrinsic &DII) {
5122   auto *MD = cast<MetadataAsValue>(DII.getArgOperand(0))->getMetadata();
5123   AssertDI(isa<ValueAsMetadata>(MD) ||
5124              (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands()),
5125          "invalid llvm.dbg." + Kind + " intrinsic address/value", &DII, MD);
5126   AssertDI(isa<DILocalVariable>(DII.getRawVariable()),
5127          "invalid llvm.dbg." + Kind + " intrinsic variable", &DII,
5128          DII.getRawVariable());
5129   AssertDI(isa<DIExpression>(DII.getRawExpression()),
5130          "invalid llvm.dbg." + Kind + " intrinsic expression", &DII,
5131          DII.getRawExpression());
5132 
5133   // Ignore broken !dbg attachments; they're checked elsewhere.
5134   if (MDNode *N = DII.getDebugLoc().getAsMDNode())
5135     if (!isa<DILocation>(N))
5136       return;
5137 
5138   BasicBlock *BB = DII.getParent();
5139   Function *F = BB ? BB->getParent() : nullptr;
5140 
5141   // The scopes for variables and !dbg attachments must agree.
5142   DILocalVariable *Var = DII.getVariable();
5143   DILocation *Loc = DII.getDebugLoc();
5144   AssertDI(Loc, "llvm.dbg." + Kind + " intrinsic requires a !dbg attachment",
5145            &DII, BB, F);
5146 
5147   DISubprogram *VarSP = getSubprogram(Var->getRawScope());
5148   DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
5149   if (!VarSP || !LocSP)
5150     return; // Broken scope chains are checked elsewhere.
5151 
5152   AssertDI(VarSP == LocSP, "mismatched subprogram between llvm.dbg." + Kind +
5153                                " variable and !dbg attachment",
5154            &DII, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
5155            Loc->getScope()->getSubprogram());
5156 
5157   // This check is redundant with one in visitLocalVariable().
5158   AssertDI(isType(Var->getRawType()), "invalid type ref", Var,
5159            Var->getRawType());
5160   verifyFnArgs(DII);
5161 }
5162 
5163 void Verifier::visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI) {
5164   AssertDI(isa<DILabel>(DLI.getRawLabel()),
5165          "invalid llvm.dbg." + Kind + " intrinsic variable", &DLI,
5166          DLI.getRawLabel());
5167 
5168   // Ignore broken !dbg attachments; they're checked elsewhere.
5169   if (MDNode *N = DLI.getDebugLoc().getAsMDNode())
5170     if (!isa<DILocation>(N))
5171       return;
5172 
5173   BasicBlock *BB = DLI.getParent();
5174   Function *F = BB ? BB->getParent() : nullptr;
5175 
5176   // The scopes for variables and !dbg attachments must agree.
5177   DILabel *Label = DLI.getLabel();
5178   DILocation *Loc = DLI.getDebugLoc();
5179   Assert(Loc, "llvm.dbg." + Kind + " intrinsic requires a !dbg attachment",
5180          &DLI, BB, F);
5181 
5182   DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
5183   DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
5184   if (!LabelSP || !LocSP)
5185     return;
5186 
5187   AssertDI(LabelSP == LocSP, "mismatched subprogram between llvm.dbg." + Kind +
5188                              " label and !dbg attachment",
5189            &DLI, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
5190            Loc->getScope()->getSubprogram());
5191 }
5192 
5193 void Verifier::verifyFragmentExpression(const DbgVariableIntrinsic &I) {
5194   DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(I.getRawVariable());
5195   DIExpression *E = dyn_cast_or_null<DIExpression>(I.getRawExpression());
5196 
5197   // We don't know whether this intrinsic verified correctly.
5198   if (!V || !E || !E->isValid())
5199     return;
5200 
5201   // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
5202   auto Fragment = E->getFragmentInfo();
5203   if (!Fragment)
5204     return;
5205 
5206   // The frontend helps out GDB by emitting the members of local anonymous
5207   // unions as artificial local variables with shared storage. When SROA splits
5208   // the storage for artificial local variables that are smaller than the entire
5209   // union, the overhang piece will be outside of the allotted space for the
5210   // variable and this check fails.
5211   // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
5212   if (V->isArtificial())
5213     return;
5214 
5215   verifyFragmentExpression(*V, *Fragment, &I);
5216 }
5217 
5218 template <typename ValueOrMetadata>
5219 void Verifier::verifyFragmentExpression(const DIVariable &V,
5220                                         DIExpression::FragmentInfo Fragment,
5221                                         ValueOrMetadata *Desc) {
5222   // If there's no size, the type is broken, but that should be checked
5223   // elsewhere.
5224   auto VarSize = V.getSizeInBits();
5225   if (!VarSize)
5226     return;
5227 
5228   unsigned FragSize = Fragment.SizeInBits;
5229   unsigned FragOffset = Fragment.OffsetInBits;
5230   AssertDI(FragSize + FragOffset <= *VarSize,
5231          "fragment is larger than or outside of variable", Desc, &V);
5232   AssertDI(FragSize != *VarSize, "fragment covers entire variable", Desc, &V);
5233 }
5234 
5235 void Verifier::verifyFnArgs(const DbgVariableIntrinsic &I) {
5236   // This function does not take the scope of noninlined function arguments into
5237   // account. Don't run it if current function is nodebug, because it may
5238   // contain inlined debug intrinsics.
5239   if (!HasDebugInfo)
5240     return;
5241 
5242   // For performance reasons only check non-inlined ones.
5243   if (I.getDebugLoc()->getInlinedAt())
5244     return;
5245 
5246   DILocalVariable *Var = I.getVariable();
5247   AssertDI(Var, "dbg intrinsic without variable");
5248 
5249   unsigned ArgNo = Var->getArg();
5250   if (!ArgNo)
5251     return;
5252 
5253   // Verify there are no duplicate function argument debug info entries.
5254   // These will cause hard-to-debug assertions in the DWARF backend.
5255   if (DebugFnArgs.size() < ArgNo)
5256     DebugFnArgs.resize(ArgNo, nullptr);
5257 
5258   auto *Prev = DebugFnArgs[ArgNo - 1];
5259   DebugFnArgs[ArgNo - 1] = Var;
5260   AssertDI(!Prev || (Prev == Var), "conflicting debug info for argument", &I,
5261            Prev, Var);
5262 }
5263 
5264 void Verifier::verifyNotEntryValue(const DbgVariableIntrinsic &I) {
5265   DIExpression *E = dyn_cast_or_null<DIExpression>(I.getRawExpression());
5266 
5267   // We don't know whether this intrinsic verified correctly.
5268   if (!E || !E->isValid())
5269     return;
5270 
5271   AssertDI(!E->isEntryValue(), "Entry values are only allowed in MIR", &I);
5272 }
5273 
5274 void Verifier::verifyCompileUnits() {
5275   // When more than one Module is imported into the same context, such as during
5276   // an LTO build before linking the modules, ODR type uniquing may cause types
5277   // to point to a different CU. This check does not make sense in this case.
5278   if (M.getContext().isODRUniquingDebugTypes())
5279     return;
5280   auto *CUs = M.getNamedMetadata("llvm.dbg.cu");
5281   SmallPtrSet<const Metadata *, 2> Listed;
5282   if (CUs)
5283     Listed.insert(CUs->op_begin(), CUs->op_end());
5284   for (auto *CU : CUVisited)
5285     AssertDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
5286   CUVisited.clear();
5287 }
5288 
5289 void Verifier::verifyDeoptimizeCallingConvs() {
5290   if (DeoptimizeDeclarations.empty())
5291     return;
5292 
5293   const Function *First = DeoptimizeDeclarations[0];
5294   for (auto *F : makeArrayRef(DeoptimizeDeclarations).slice(1)) {
5295     Assert(First->getCallingConv() == F->getCallingConv(),
5296            "All llvm.experimental.deoptimize declarations must have the same "
5297            "calling convention",
5298            First, F);
5299   }
5300 }
5301 
5302 void Verifier::verifySourceDebugInfo(const DICompileUnit &U, const DIFile &F) {
5303   bool HasSource = F.getSource().hasValue();
5304   if (!HasSourceDebugInfo.count(&U))
5305     HasSourceDebugInfo[&U] = HasSource;
5306   AssertDI(HasSource == HasSourceDebugInfo[&U],
5307            "inconsistent use of embedded source");
5308 }
5309 
5310 //===----------------------------------------------------------------------===//
5311 //  Implement the public interfaces to this file...
5312 //===----------------------------------------------------------------------===//
5313 
5314 bool llvm::verifyFunction(const Function &f, raw_ostream *OS) {
5315   Function &F = const_cast<Function &>(f);
5316 
5317   // Don't use a raw_null_ostream.  Printing IR is expensive.
5318   Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent());
5319 
5320   // Note that this function's return value is inverted from what you would
5321   // expect of a function called "verify".
5322   return !V.verify(F);
5323 }
5324 
5325 bool llvm::verifyModule(const Module &M, raw_ostream *OS,
5326                         bool *BrokenDebugInfo) {
5327   // Don't use a raw_null_ostream.  Printing IR is expensive.
5328   Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M);
5329 
5330   bool Broken = false;
5331   for (const Function &F : M)
5332     Broken |= !V.verify(F);
5333 
5334   Broken |= !V.verify();
5335   if (BrokenDebugInfo)
5336     *BrokenDebugInfo = V.hasBrokenDebugInfo();
5337   // Note that this function's return value is inverted from what you would
5338   // expect of a function called "verify".
5339   return Broken;
5340 }
5341 
5342 namespace {
5343 
5344 struct VerifierLegacyPass : public FunctionPass {
5345   static char ID;
5346 
5347   std::unique_ptr<Verifier> V;
5348   bool FatalErrors = true;
5349 
5350   VerifierLegacyPass() : FunctionPass(ID) {
5351     initializeVerifierLegacyPassPass(*PassRegistry::getPassRegistry());
5352   }
5353   explicit VerifierLegacyPass(bool FatalErrors)
5354       : FunctionPass(ID),
5355         FatalErrors(FatalErrors) {
5356     initializeVerifierLegacyPassPass(*PassRegistry::getPassRegistry());
5357   }
5358 
5359   bool doInitialization(Module &M) override {
5360     V = std::make_unique<Verifier>(
5361         &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M);
5362     return false;
5363   }
5364 
5365   bool runOnFunction(Function &F) override {
5366     if (!V->verify(F) && FatalErrors) {
5367       errs() << "in function " << F.getName() << '\n';
5368       report_fatal_error("Broken function found, compilation aborted!");
5369     }
5370     return false;
5371   }
5372 
5373   bool doFinalization(Module &M) override {
5374     bool HasErrors = false;
5375     for (Function &F : M)
5376       if (F.isDeclaration())
5377         HasErrors |= !V->verify(F);
5378 
5379     HasErrors |= !V->verify();
5380     if (FatalErrors && (HasErrors || V->hasBrokenDebugInfo()))
5381       report_fatal_error("Broken module found, compilation aborted!");
5382     return false;
5383   }
5384 
5385   void getAnalysisUsage(AnalysisUsage &AU) const override {
5386     AU.setPreservesAll();
5387   }
5388 };
5389 
5390 } // end anonymous namespace
5391 
5392 /// Helper to issue failure from the TBAA verification
5393 template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) {
5394   if (Diagnostic)
5395     return Diagnostic->CheckFailed(Args...);
5396 }
5397 
5398 #define AssertTBAA(C, ...)                                                     \
5399   do {                                                                         \
5400     if (!(C)) {                                                                \
5401       CheckFailed(__VA_ARGS__);                                                \
5402       return false;                                                            \
5403     }                                                                          \
5404   } while (false)
5405 
5406 /// Verify that \p BaseNode can be used as the "base type" in the struct-path
5407 /// TBAA scheme.  This means \p BaseNode is either a scalar node, or a
5408 /// struct-type node describing an aggregate data structure (like a struct).
5409 TBAAVerifier::TBAABaseNodeSummary
5410 TBAAVerifier::verifyTBAABaseNode(Instruction &I, const MDNode *BaseNode,
5411                                  bool IsNewFormat) {
5412   if (BaseNode->getNumOperands() < 2) {
5413     CheckFailed("Base nodes must have at least two operands", &I, BaseNode);
5414     return {true, ~0u};
5415   }
5416 
5417   auto Itr = TBAABaseNodes.find(BaseNode);
5418   if (Itr != TBAABaseNodes.end())
5419     return Itr->second;
5420 
5421   auto Result = verifyTBAABaseNodeImpl(I, BaseNode, IsNewFormat);
5422   auto InsertResult = TBAABaseNodes.insert({BaseNode, Result});
5423   (void)InsertResult;
5424   assert(InsertResult.second && "We just checked!");
5425   return Result;
5426 }
5427 
5428 TBAAVerifier::TBAABaseNodeSummary
5429 TBAAVerifier::verifyTBAABaseNodeImpl(Instruction &I, const MDNode *BaseNode,
5430                                      bool IsNewFormat) {
5431   const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u};
5432 
5433   if (BaseNode->getNumOperands() == 2) {
5434     // Scalar nodes can only be accessed at offset 0.
5435     return isValidScalarTBAANode(BaseNode)
5436                ? TBAAVerifier::TBAABaseNodeSummary({false, 0})
5437                : InvalidNode;
5438   }
5439 
5440   if (IsNewFormat) {
5441     if (BaseNode->getNumOperands() % 3 != 0) {
5442       CheckFailed("Access tag nodes must have the number of operands that is a "
5443                   "multiple of 3!", BaseNode);
5444       return InvalidNode;
5445     }
5446   } else {
5447     if (BaseNode->getNumOperands() % 2 != 1) {
5448       CheckFailed("Struct tag nodes must have an odd number of operands!",
5449                   BaseNode);
5450       return InvalidNode;
5451     }
5452   }
5453 
5454   // Check the type size field.
5455   if (IsNewFormat) {
5456     auto *TypeSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
5457         BaseNode->getOperand(1));
5458     if (!TypeSizeNode) {
5459       CheckFailed("Type size nodes must be constants!", &I, BaseNode);
5460       return InvalidNode;
5461     }
5462   }
5463 
5464   // Check the type name field. In the new format it can be anything.
5465   if (!IsNewFormat && !isa<MDString>(BaseNode->getOperand(0))) {
5466     CheckFailed("Struct tag nodes have a string as their first operand",
5467                 BaseNode);
5468     return InvalidNode;
5469   }
5470 
5471   bool Failed = false;
5472 
5473   Optional<APInt> PrevOffset;
5474   unsigned BitWidth = ~0u;
5475 
5476   // We've already checked that BaseNode is not a degenerate root node with one
5477   // operand in \c verifyTBAABaseNode, so this loop should run at least once.
5478   unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
5479   unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
5480   for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
5481            Idx += NumOpsPerField) {
5482     const MDOperand &FieldTy = BaseNode->getOperand(Idx);
5483     const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1);
5484     if (!isa<MDNode>(FieldTy)) {
5485       CheckFailed("Incorrect field entry in struct type node!", &I, BaseNode);
5486       Failed = true;
5487       continue;
5488     }
5489 
5490     auto *OffsetEntryCI =
5491         mdconst::dyn_extract_or_null<ConstantInt>(FieldOffset);
5492     if (!OffsetEntryCI) {
5493       CheckFailed("Offset entries must be constants!", &I, BaseNode);
5494       Failed = true;
5495       continue;
5496     }
5497 
5498     if (BitWidth == ~0u)
5499       BitWidth = OffsetEntryCI->getBitWidth();
5500 
5501     if (OffsetEntryCI->getBitWidth() != BitWidth) {
5502       CheckFailed(
5503           "Bitwidth between the offsets and struct type entries must match", &I,
5504           BaseNode);
5505       Failed = true;
5506       continue;
5507     }
5508 
5509     // NB! As far as I can tell, we generate a non-strictly increasing offset
5510     // sequence only from structs that have zero size bit fields.  When
5511     // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we
5512     // pick the field lexically the latest in struct type metadata node.  This
5513     // mirrors the actual behavior of the alias analysis implementation.
5514     bool IsAscending =
5515         !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue());
5516 
5517     if (!IsAscending) {
5518       CheckFailed("Offsets must be increasing!", &I, BaseNode);
5519       Failed = true;
5520     }
5521 
5522     PrevOffset = OffsetEntryCI->getValue();
5523 
5524     if (IsNewFormat) {
5525       auto *MemberSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
5526           BaseNode->getOperand(Idx + 2));
5527       if (!MemberSizeNode) {
5528         CheckFailed("Member size entries must be constants!", &I, BaseNode);
5529         Failed = true;
5530         continue;
5531       }
5532     }
5533   }
5534 
5535   return Failed ? InvalidNode
5536                 : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth);
5537 }
5538 
5539 static bool IsRootTBAANode(const MDNode *MD) {
5540   return MD->getNumOperands() < 2;
5541 }
5542 
5543 static bool IsScalarTBAANodeImpl(const MDNode *MD,
5544                                  SmallPtrSetImpl<const MDNode *> &Visited) {
5545   if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3)
5546     return false;
5547 
5548   if (!isa<MDString>(MD->getOperand(0)))
5549     return false;
5550 
5551   if (MD->getNumOperands() == 3) {
5552     auto *Offset = mdconst::dyn_extract<ConstantInt>(MD->getOperand(2));
5553     if (!(Offset && Offset->isZero() && isa<MDString>(MD->getOperand(0))))
5554       return false;
5555   }
5556 
5557   auto *Parent = dyn_cast_or_null<MDNode>(MD->getOperand(1));
5558   return Parent && Visited.insert(Parent).second &&
5559          (IsRootTBAANode(Parent) || IsScalarTBAANodeImpl(Parent, Visited));
5560 }
5561 
5562 bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) {
5563   auto ResultIt = TBAAScalarNodes.find(MD);
5564   if (ResultIt != TBAAScalarNodes.end())
5565     return ResultIt->second;
5566 
5567   SmallPtrSet<const MDNode *, 4> Visited;
5568   bool Result = IsScalarTBAANodeImpl(MD, Visited);
5569   auto InsertResult = TBAAScalarNodes.insert({MD, Result});
5570   (void)InsertResult;
5571   assert(InsertResult.second && "Just checked!");
5572 
5573   return Result;
5574 }
5575 
5576 /// Returns the field node at the offset \p Offset in \p BaseNode.  Update \p
5577 /// Offset in place to be the offset within the field node returned.
5578 ///
5579 /// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode.
5580 MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(Instruction &I,
5581                                                    const MDNode *BaseNode,
5582                                                    APInt &Offset,
5583                                                    bool IsNewFormat) {
5584   assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!");
5585 
5586   // Scalar nodes have only one possible "field" -- their parent in the access
5587   // hierarchy.  Offset must be zero at this point, but our caller is supposed
5588   // to Assert that.
5589   if (BaseNode->getNumOperands() == 2)
5590     return cast<MDNode>(BaseNode->getOperand(1));
5591 
5592   unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
5593   unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
5594   for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
5595            Idx += NumOpsPerField) {
5596     auto *OffsetEntryCI =
5597         mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx + 1));
5598     if (OffsetEntryCI->getValue().ugt(Offset)) {
5599       if (Idx == FirstFieldOpNo) {
5600         CheckFailed("Could not find TBAA parent in struct type node", &I,
5601                     BaseNode, &Offset);
5602         return nullptr;
5603       }
5604 
5605       unsigned PrevIdx = Idx - NumOpsPerField;
5606       auto *PrevOffsetEntryCI =
5607           mdconst::extract<ConstantInt>(BaseNode->getOperand(PrevIdx + 1));
5608       Offset -= PrevOffsetEntryCI->getValue();
5609       return cast<MDNode>(BaseNode->getOperand(PrevIdx));
5610     }
5611   }
5612 
5613   unsigned LastIdx = BaseNode->getNumOperands() - NumOpsPerField;
5614   auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>(
5615       BaseNode->getOperand(LastIdx + 1));
5616   Offset -= LastOffsetEntryCI->getValue();
5617   return cast<MDNode>(BaseNode->getOperand(LastIdx));
5618 }
5619 
5620 static bool isNewFormatTBAATypeNode(llvm::MDNode *Type) {
5621   if (!Type || Type->getNumOperands() < 3)
5622     return false;
5623 
5624   // In the new format type nodes shall have a reference to the parent type as
5625   // its first operand.
5626   MDNode *Parent = dyn_cast_or_null<MDNode>(Type->getOperand(0));
5627   if (!Parent)
5628     return false;
5629 
5630   return true;
5631 }
5632 
5633 bool TBAAVerifier::visitTBAAMetadata(Instruction &I, const MDNode *MD) {
5634   AssertTBAA(isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) ||
5635                  isa<VAArgInst>(I) || isa<AtomicRMWInst>(I) ||
5636                  isa<AtomicCmpXchgInst>(I),
5637              "This instruction shall not have a TBAA access tag!", &I);
5638 
5639   bool IsStructPathTBAA =
5640       isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3;
5641 
5642   AssertTBAA(
5643       IsStructPathTBAA,
5644       "Old-style TBAA is no longer allowed, use struct-path TBAA instead", &I);
5645 
5646   MDNode *BaseNode = dyn_cast_or_null<MDNode>(MD->getOperand(0));
5647   MDNode *AccessType = dyn_cast_or_null<MDNode>(MD->getOperand(1));
5648 
5649   bool IsNewFormat = isNewFormatTBAATypeNode(AccessType);
5650 
5651   if (IsNewFormat) {
5652     AssertTBAA(MD->getNumOperands() == 4 || MD->getNumOperands() == 5,
5653                "Access tag metadata must have either 4 or 5 operands", &I, MD);
5654   } else {
5655     AssertTBAA(MD->getNumOperands() < 5,
5656                "Struct tag metadata must have either 3 or 4 operands", &I, MD);
5657   }
5658 
5659   // Check the access size field.
5660   if (IsNewFormat) {
5661     auto *AccessSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
5662         MD->getOperand(3));
5663     AssertTBAA(AccessSizeNode, "Access size field must be a constant", &I, MD);
5664   }
5665 
5666   // Check the immutability flag.
5667   unsigned ImmutabilityFlagOpNo = IsNewFormat ? 4 : 3;
5668   if (MD->getNumOperands() == ImmutabilityFlagOpNo + 1) {
5669     auto *IsImmutableCI = mdconst::dyn_extract_or_null<ConstantInt>(
5670         MD->getOperand(ImmutabilityFlagOpNo));
5671     AssertTBAA(IsImmutableCI,
5672                "Immutability tag on struct tag metadata must be a constant",
5673                &I, MD);
5674     AssertTBAA(
5675         IsImmutableCI->isZero() || IsImmutableCI->isOne(),
5676         "Immutability part of the struct tag metadata must be either 0 or 1",
5677         &I, MD);
5678   }
5679 
5680   AssertTBAA(BaseNode && AccessType,
5681              "Malformed struct tag metadata: base and access-type "
5682              "should be non-null and point to Metadata nodes",
5683              &I, MD, BaseNode, AccessType);
5684 
5685   if (!IsNewFormat) {
5686     AssertTBAA(isValidScalarTBAANode(AccessType),
5687                "Access type node must be a valid scalar type", &I, MD,
5688                AccessType);
5689   }
5690 
5691   auto *OffsetCI = mdconst::dyn_extract_or_null<ConstantInt>(MD->getOperand(2));
5692   AssertTBAA(OffsetCI, "Offset must be constant integer", &I, MD);
5693 
5694   APInt Offset = OffsetCI->getValue();
5695   bool SeenAccessTypeInPath = false;
5696 
5697   SmallPtrSet<MDNode *, 4> StructPath;
5698 
5699   for (/* empty */; BaseNode && !IsRootTBAANode(BaseNode);
5700        BaseNode = getFieldNodeFromTBAABaseNode(I, BaseNode, Offset,
5701                                                IsNewFormat)) {
5702     if (!StructPath.insert(BaseNode).second) {
5703       CheckFailed("Cycle detected in struct path", &I, MD);
5704       return false;
5705     }
5706 
5707     bool Invalid;
5708     unsigned BaseNodeBitWidth;
5709     std::tie(Invalid, BaseNodeBitWidth) = verifyTBAABaseNode(I, BaseNode,
5710                                                              IsNewFormat);
5711 
5712     // If the base node is invalid in itself, then we've already printed all the
5713     // errors we wanted to print.
5714     if (Invalid)
5715       return false;
5716 
5717     SeenAccessTypeInPath |= BaseNode == AccessType;
5718 
5719     if (isValidScalarTBAANode(BaseNode) || BaseNode == AccessType)
5720       AssertTBAA(Offset == 0, "Offset not zero at the point of scalar access",
5721                  &I, MD, &Offset);
5722 
5723     AssertTBAA(BaseNodeBitWidth == Offset.getBitWidth() ||
5724                    (BaseNodeBitWidth == 0 && Offset == 0) ||
5725                    (IsNewFormat && BaseNodeBitWidth == ~0u),
5726                "Access bit-width not the same as description bit-width", &I, MD,
5727                BaseNodeBitWidth, Offset.getBitWidth());
5728 
5729     if (IsNewFormat && SeenAccessTypeInPath)
5730       break;
5731   }
5732 
5733   AssertTBAA(SeenAccessTypeInPath, "Did not see access type in access path!",
5734              &I, MD);
5735   return true;
5736 }
5737 
5738 char VerifierLegacyPass::ID = 0;
5739 INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
5740 
5741 FunctionPass *llvm::createVerifierPass(bool FatalErrors) {
5742   return new VerifierLegacyPass(FatalErrors);
5743 }
5744 
5745 AnalysisKey VerifierAnalysis::Key;
5746 VerifierAnalysis::Result VerifierAnalysis::run(Module &M,
5747                                                ModuleAnalysisManager &) {
5748   Result Res;
5749   Res.IRBroken = llvm::verifyModule(M, &dbgs(), &Res.DebugInfoBroken);
5750   return Res;
5751 }
5752 
5753 VerifierAnalysis::Result VerifierAnalysis::run(Function &F,
5754                                                FunctionAnalysisManager &) {
5755   return { llvm::verifyFunction(F, &dbgs()), false };
5756 }
5757 
5758 PreservedAnalyses VerifierPass::run(Module &M, ModuleAnalysisManager &AM) {
5759   auto Res = AM.getResult<VerifierAnalysis>(M);
5760   if (FatalErrors && (Res.IRBroken || Res.DebugInfoBroken))
5761     report_fatal_error("Broken module found, compilation aborted!");
5762 
5763   return PreservedAnalyses::all();
5764 }
5765 
5766 PreservedAnalyses VerifierPass::run(Function &F, FunctionAnalysisManager &AM) {
5767   auto res = AM.getResult<VerifierAnalysis>(F);
5768   if (res.IRBroken && FatalErrors)
5769     report_fatal_error("Broken function found, compilation aborted!");
5770 
5771   return PreservedAnalyses::all();
5772 }
5773