1 //===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This provides C++ code generation targeting the Itanium C++ ABI.  The class
10 // in this file generates structures that follow the Itanium C++ ABI, which is
11 // documented at:
12 //  https://itanium-cxx-abi.github.io/cxx-abi/abi.html
13 //  https://itanium-cxx-abi.github.io/cxx-abi/abi-eh.html
14 //
15 // It also supports the closely-related ARM ABI, documented at:
16 // https://developer.arm.com/documentation/ihi0041/g/
17 //
18 //===----------------------------------------------------------------------===//
19 
20 #include "CGCXXABI.h"
21 #include "CGCleanup.h"
22 #include "CGRecordLayout.h"
23 #include "CGVTables.h"
24 #include "CodeGenFunction.h"
25 #include "CodeGenModule.h"
26 #include "TargetInfo.h"
27 #include "clang/AST/Attr.h"
28 #include "clang/AST/Mangle.h"
29 #include "clang/AST/StmtCXX.h"
30 #include "clang/AST/Type.h"
31 #include "clang/CodeGen/ConstantInitBuilder.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/GlobalValue.h"
34 #include "llvm/IR/Instructions.h"
35 #include "llvm/IR/Intrinsics.h"
36 #include "llvm/IR/Value.h"
37 #include "llvm/Support/ScopedPrinter.h"
38 
39 using namespace clang;
40 using namespace CodeGen;
41 
42 namespace {
43 class ItaniumCXXABI : public CodeGen::CGCXXABI {
44   /// VTables - All the vtables which have been defined.
45   llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables;
46 
47   /// All the thread wrapper functions that have been used.
48   llvm::SmallVector<std::pair<const VarDecl *, llvm::Function *>, 8>
49       ThreadWrappers;
50 
51 protected:
52   bool UseARMMethodPtrABI;
53   bool UseARMGuardVarABI;
54   bool Use32BitVTableOffsetABI;
55 
56   ItaniumMangleContext &getMangleContext() {
57     return cast<ItaniumMangleContext>(CodeGen::CGCXXABI::getMangleContext());
58   }
59 
60 public:
61   ItaniumCXXABI(CodeGen::CodeGenModule &CGM,
62                 bool UseARMMethodPtrABI = false,
63                 bool UseARMGuardVarABI = false) :
64     CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI),
65     UseARMGuardVarABI(UseARMGuardVarABI),
66     Use32BitVTableOffsetABI(false) { }
67 
68   bool classifyReturnType(CGFunctionInfo &FI) const override;
69 
70   RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override {
71     // If C++ prohibits us from making a copy, pass by address.
72     if (!RD->canPassInRegisters())
73       return RAA_Indirect;
74     return RAA_Default;
75   }
76 
77   bool isThisCompleteObject(GlobalDecl GD) const override {
78     // The Itanium ABI has separate complete-object vs.  base-object
79     // variants of both constructors and destructors.
80     if (isa<CXXDestructorDecl>(GD.getDecl())) {
81       switch (GD.getDtorType()) {
82       case Dtor_Complete:
83       case Dtor_Deleting:
84         return true;
85 
86       case Dtor_Base:
87         return false;
88 
89       case Dtor_Comdat:
90         llvm_unreachable("emitting dtor comdat as function?");
91       }
92       llvm_unreachable("bad dtor kind");
93     }
94     if (isa<CXXConstructorDecl>(GD.getDecl())) {
95       switch (GD.getCtorType()) {
96       case Ctor_Complete:
97         return true;
98 
99       case Ctor_Base:
100         return false;
101 
102       case Ctor_CopyingClosure:
103       case Ctor_DefaultClosure:
104         llvm_unreachable("closure ctors in Itanium ABI?");
105 
106       case Ctor_Comdat:
107         llvm_unreachable("emitting ctor comdat as function?");
108       }
109       llvm_unreachable("bad dtor kind");
110     }
111 
112     // No other kinds.
113     return false;
114   }
115 
116   bool isZeroInitializable(const MemberPointerType *MPT) override;
117 
118   llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
119 
120   CGCallee
121     EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
122                                     const Expr *E,
123                                     Address This,
124                                     llvm::Value *&ThisPtrForCall,
125                                     llvm::Value *MemFnPtr,
126                                     const MemberPointerType *MPT) override;
127 
128   llvm::Value *
129     EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
130                                  Address Base,
131                                  llvm::Value *MemPtr,
132                                  const MemberPointerType *MPT) override;
133 
134   llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
135                                            const CastExpr *E,
136                                            llvm::Value *Src) override;
137   llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
138                                               llvm::Constant *Src) override;
139 
140   llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override;
141 
142   llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override;
143   llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
144                                         CharUnits offset) override;
145   llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override;
146   llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD,
147                                      CharUnits ThisAdjustment);
148 
149   llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
150                                            llvm::Value *L, llvm::Value *R,
151                                            const MemberPointerType *MPT,
152                                            bool Inequality) override;
153 
154   llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
155                                          llvm::Value *Addr,
156                                          const MemberPointerType *MPT) override;
157 
158   void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE,
159                                Address Ptr, QualType ElementType,
160                                const CXXDestructorDecl *Dtor) override;
161 
162   void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
163   void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override;
164 
165   void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
166 
167   llvm::CallInst *
168   emitTerminateForUnexpectedException(CodeGenFunction &CGF,
169                                       llvm::Value *Exn) override;
170 
171   void EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD);
172   llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override;
173   CatchTypeInfo
174   getAddrOfCXXCatchHandlerType(QualType Ty,
175                                QualType CatchHandlerType) override {
176     return CatchTypeInfo{getAddrOfRTTIDescriptor(Ty), 0};
177   }
178 
179   bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override;
180   void EmitBadTypeidCall(CodeGenFunction &CGF) override;
181   llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
182                           Address ThisPtr,
183                           llvm::Type *StdTypeInfoPtrTy) override;
184 
185   bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
186                                           QualType SrcRecordTy) override;
187 
188   llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, Address Value,
189                                    QualType SrcRecordTy, QualType DestTy,
190                                    QualType DestRecordTy,
191                                    llvm::BasicBlock *CastEnd) override;
192 
193   llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
194                                      QualType SrcRecordTy,
195                                      QualType DestTy) override;
196 
197   bool EmitBadCastCall(CodeGenFunction &CGF) override;
198 
199   llvm::Value *
200     GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This,
201                               const CXXRecordDecl *ClassDecl,
202                               const CXXRecordDecl *BaseClassDecl) override;
203 
204   void EmitCXXConstructors(const CXXConstructorDecl *D) override;
205 
206   AddedStructorArgCounts
207   buildStructorSignature(GlobalDecl GD,
208                          SmallVectorImpl<CanQualType> &ArgTys) override;
209 
210   bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor,
211                               CXXDtorType DT) const override {
212     // Itanium does not emit any destructor variant as an inline thunk.
213     // Delegating may occur as an optimization, but all variants are either
214     // emitted with external linkage or as linkonce if they are inline and used.
215     return false;
216   }
217 
218   void EmitCXXDestructors(const CXXDestructorDecl *D) override;
219 
220   void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy,
221                                  FunctionArgList &Params) override;
222 
223   void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override;
224 
225   AddedStructorArgs getImplicitConstructorArgs(CodeGenFunction &CGF,
226                                                const CXXConstructorDecl *D,
227                                                CXXCtorType Type,
228                                                bool ForVirtualBase,
229                                                bool Delegating) override;
230 
231   llvm::Value *getCXXDestructorImplicitParam(CodeGenFunction &CGF,
232                                              const CXXDestructorDecl *DD,
233                                              CXXDtorType Type,
234                                              bool ForVirtualBase,
235                                              bool Delegating) override;
236 
237   void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
238                           CXXDtorType Type, bool ForVirtualBase,
239                           bool Delegating, Address This,
240                           QualType ThisTy) override;
241 
242   void emitVTableDefinitions(CodeGenVTables &CGVT,
243                              const CXXRecordDecl *RD) override;
244 
245   bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF,
246                                            CodeGenFunction::VPtr Vptr) override;
247 
248   bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override {
249     return true;
250   }
251 
252   llvm::Constant *
253   getVTableAddressPoint(BaseSubobject Base,
254                         const CXXRecordDecl *VTableClass) override;
255 
256   llvm::Value *getVTableAddressPointInStructor(
257       CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
258       BaseSubobject Base, const CXXRecordDecl *NearestVBase) override;
259 
260   llvm::Value *getVTableAddressPointInStructorWithVTT(
261       CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
262       BaseSubobject Base, const CXXRecordDecl *NearestVBase);
263 
264   llvm::Constant *
265   getVTableAddressPointForConstExpr(BaseSubobject Base,
266                                     const CXXRecordDecl *VTableClass) override;
267 
268   llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
269                                         CharUnits VPtrOffset) override;
270 
271   CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD,
272                                      Address This, llvm::Type *Ty,
273                                      SourceLocation Loc) override;
274 
275   llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF,
276                                          const CXXDestructorDecl *Dtor,
277                                          CXXDtorType DtorType, Address This,
278                                          DeleteOrMemberCallExpr E) override;
279 
280   void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
281 
282   bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override;
283   bool canSpeculativelyEmitVTableAsBaseClass(const CXXRecordDecl *RD) const;
284 
285   void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD,
286                        bool ReturnAdjustment) override {
287     // Allow inlining of thunks by emitting them with available_externally
288     // linkage together with vtables when needed.
289     if (ForVTable && !Thunk->hasLocalLinkage())
290       Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
291     CGM.setGVProperties(Thunk, GD);
292   }
293 
294   bool exportThunk() override { return true; }
295 
296   llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
297                                      const ThisAdjustment &TA) override;
298 
299   llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
300                                        const ReturnAdjustment &RA) override;
301 
302   size_t getSrcArgforCopyCtor(const CXXConstructorDecl *,
303                               FunctionArgList &Args) const override {
304     assert(!Args.empty() && "expected the arglist to not be empty!");
305     return Args.size() - 1;
306   }
307 
308   StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; }
309   StringRef GetDeletedVirtualCallName() override
310     { return "__cxa_deleted_virtual"; }
311 
312   CharUnits getArrayCookieSizeImpl(QualType elementType) override;
313   Address InitializeArrayCookie(CodeGenFunction &CGF,
314                                 Address NewPtr,
315                                 llvm::Value *NumElements,
316                                 const CXXNewExpr *expr,
317                                 QualType ElementType) override;
318   llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
319                                    Address allocPtr,
320                                    CharUnits cookieSize) override;
321 
322   void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
323                        llvm::GlobalVariable *DeclPtr,
324                        bool PerformInit) override;
325   void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
326                           llvm::FunctionCallee dtor,
327                           llvm::Constant *addr) override;
328 
329   llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD,
330                                                 llvm::Value *Val);
331   void EmitThreadLocalInitFuncs(
332       CodeGenModule &CGM,
333       ArrayRef<const VarDecl *> CXXThreadLocals,
334       ArrayRef<llvm::Function *> CXXThreadLocalInits,
335       ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
336 
337   bool usesThreadWrapperFunction(const VarDecl *VD) const override {
338     return !isEmittedWithConstantInitializer(VD) ||
339            mayNeedDestruction(VD);
340   }
341   LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD,
342                                       QualType LValType) override;
343 
344   bool NeedsVTTParameter(GlobalDecl GD) override;
345 
346   /**************************** RTTI Uniqueness ******************************/
347 
348 protected:
349   /// Returns true if the ABI requires RTTI type_info objects to be unique
350   /// across a program.
351   virtual bool shouldRTTIBeUnique() const { return true; }
352 
353 public:
354   /// What sort of unique-RTTI behavior should we use?
355   enum RTTIUniquenessKind {
356     /// We are guaranteeing, or need to guarantee, that the RTTI string
357     /// is unique.
358     RUK_Unique,
359 
360     /// We are not guaranteeing uniqueness for the RTTI string, so we
361     /// can demote to hidden visibility but must use string comparisons.
362     RUK_NonUniqueHidden,
363 
364     /// We are not guaranteeing uniqueness for the RTTI string, so we
365     /// have to use string comparisons, but we also have to emit it with
366     /// non-hidden visibility.
367     RUK_NonUniqueVisible
368   };
369 
370   /// Return the required visibility status for the given type and linkage in
371   /// the current ABI.
372   RTTIUniquenessKind
373   classifyRTTIUniqueness(QualType CanTy,
374                          llvm::GlobalValue::LinkageTypes Linkage) const;
375   friend class ItaniumRTTIBuilder;
376 
377   void emitCXXStructor(GlobalDecl GD) override;
378 
379   std::pair<llvm::Value *, const CXXRecordDecl *>
380   LoadVTablePtr(CodeGenFunction &CGF, Address This,
381                 const CXXRecordDecl *RD) override;
382 
383  private:
384    bool hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl *RD) const {
385      const auto &VtableLayout =
386          CGM.getItaniumVTableContext().getVTableLayout(RD);
387 
388      for (const auto &VtableComponent : VtableLayout.vtable_components()) {
389        // Skip empty slot.
390        if (!VtableComponent.isUsedFunctionPointerKind())
391          continue;
392 
393        const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
394        if (!Method->getCanonicalDecl()->isInlined())
395          continue;
396 
397        StringRef Name = CGM.getMangledName(VtableComponent.getGlobalDecl());
398        auto *Entry = CGM.GetGlobalValue(Name);
399        // This checks if virtual inline function has already been emitted.
400        // Note that it is possible that this inline function would be emitted
401        // after trying to emit vtable speculatively. Because of this we do
402        // an extra pass after emitting all deferred vtables to find and emit
403        // these vtables opportunistically.
404        if (!Entry || Entry->isDeclaration())
405          return true;
406      }
407      return false;
408   }
409 
410   bool isVTableHidden(const CXXRecordDecl *RD) const {
411     const auto &VtableLayout =
412             CGM.getItaniumVTableContext().getVTableLayout(RD);
413 
414     for (const auto &VtableComponent : VtableLayout.vtable_components()) {
415       if (VtableComponent.isRTTIKind()) {
416         const CXXRecordDecl *RTTIDecl = VtableComponent.getRTTIDecl();
417         if (RTTIDecl->getVisibility() == Visibility::HiddenVisibility)
418           return true;
419       } else if (VtableComponent.isUsedFunctionPointerKind()) {
420         const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
421         if (Method->getVisibility() == Visibility::HiddenVisibility &&
422             !Method->isDefined())
423           return true;
424       }
425     }
426     return false;
427   }
428 };
429 
430 class ARMCXXABI : public ItaniumCXXABI {
431 public:
432   ARMCXXABI(CodeGen::CodeGenModule &CGM) :
433     ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
434                   /*UseARMGuardVarABI=*/true) {}
435 
436   bool HasThisReturn(GlobalDecl GD) const override {
437     return (isa<CXXConstructorDecl>(GD.getDecl()) || (
438               isa<CXXDestructorDecl>(GD.getDecl()) &&
439               GD.getDtorType() != Dtor_Deleting));
440   }
441 
442   void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV,
443                            QualType ResTy) override;
444 
445   CharUnits getArrayCookieSizeImpl(QualType elementType) override;
446   Address InitializeArrayCookie(CodeGenFunction &CGF,
447                                 Address NewPtr,
448                                 llvm::Value *NumElements,
449                                 const CXXNewExpr *expr,
450                                 QualType ElementType) override;
451   llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, Address allocPtr,
452                                    CharUnits cookieSize) override;
453 };
454 
455 class AppleARM64CXXABI : public ARMCXXABI {
456 public:
457   AppleARM64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {
458     Use32BitVTableOffsetABI = true;
459   }
460 
461   // ARM64 libraries are prepared for non-unique RTTI.
462   bool shouldRTTIBeUnique() const override { return false; }
463 };
464 
465 class FuchsiaCXXABI final : public ItaniumCXXABI {
466 public:
467   explicit FuchsiaCXXABI(CodeGen::CodeGenModule &CGM)
468       : ItaniumCXXABI(CGM) {}
469 
470 private:
471   bool HasThisReturn(GlobalDecl GD) const override {
472     return isa<CXXConstructorDecl>(GD.getDecl()) ||
473            (isa<CXXDestructorDecl>(GD.getDecl()) &&
474             GD.getDtorType() != Dtor_Deleting);
475   }
476 };
477 
478 class WebAssemblyCXXABI final : public ItaniumCXXABI {
479 public:
480   explicit WebAssemblyCXXABI(CodeGen::CodeGenModule &CGM)
481       : ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
482                       /*UseARMGuardVarABI=*/true) {}
483   void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
484   llvm::CallInst *
485   emitTerminateForUnexpectedException(CodeGenFunction &CGF,
486                                       llvm::Value *Exn) override;
487 
488 private:
489   bool HasThisReturn(GlobalDecl GD) const override {
490     return isa<CXXConstructorDecl>(GD.getDecl()) ||
491            (isa<CXXDestructorDecl>(GD.getDecl()) &&
492             GD.getDtorType() != Dtor_Deleting);
493   }
494   bool canCallMismatchedFunctionType() const override { return false; }
495 };
496 
497 class XLCXXABI final : public ItaniumCXXABI {
498 public:
499   explicit XLCXXABI(CodeGen::CodeGenModule &CGM)
500       : ItaniumCXXABI(CGM) {}
501 
502   void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
503                           llvm::FunctionCallee dtor,
504                           llvm::Constant *addr) override;
505 
506   bool useSinitAndSterm() const override { return true; }
507 
508 private:
509   void emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
510                              llvm::Constant *addr);
511 };
512 }
513 
514 CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
515   switch (CGM.getContext().getCXXABIKind()) {
516   // For IR-generation purposes, there's no significant difference
517   // between the ARM and iOS ABIs.
518   case TargetCXXABI::GenericARM:
519   case TargetCXXABI::iOS:
520   case TargetCXXABI::WatchOS:
521     return new ARMCXXABI(CGM);
522 
523   case TargetCXXABI::AppleARM64:
524     return new AppleARM64CXXABI(CGM);
525 
526   case TargetCXXABI::Fuchsia:
527     return new FuchsiaCXXABI(CGM);
528 
529   // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't
530   // include the other 32-bit ARM oddities: constructor/destructor return values
531   // and array cookies.
532   case TargetCXXABI::GenericAArch64:
533     return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
534                              /*UseARMGuardVarABI=*/true);
535 
536   case TargetCXXABI::GenericMIPS:
537     return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
538 
539   case TargetCXXABI::WebAssembly:
540     return new WebAssemblyCXXABI(CGM);
541 
542   case TargetCXXABI::XL:
543     return new XLCXXABI(CGM);
544 
545   case TargetCXXABI::GenericItanium:
546     if (CGM.getContext().getTargetInfo().getTriple().getArch()
547         == llvm::Triple::le32) {
548       // For PNaCl, use ARM-style method pointers so that PNaCl code
549       // does not assume anything about the alignment of function
550       // pointers.
551       return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
552     }
553     return new ItaniumCXXABI(CGM);
554 
555   case TargetCXXABI::Microsoft:
556     llvm_unreachable("Microsoft ABI is not Itanium-based");
557   }
558   llvm_unreachable("bad ABI kind");
559 }
560 
561 llvm::Type *
562 ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
563   if (MPT->isMemberDataPointer())
564     return CGM.PtrDiffTy;
565   return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy);
566 }
567 
568 /// In the Itanium and ARM ABIs, method pointers have the form:
569 ///   struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr;
570 ///
571 /// In the Itanium ABI:
572 ///  - method pointers are virtual if (memptr.ptr & 1) is nonzero
573 ///  - the this-adjustment is (memptr.adj)
574 ///  - the virtual offset is (memptr.ptr - 1)
575 ///
576 /// In the ARM ABI:
577 ///  - method pointers are virtual if (memptr.adj & 1) is nonzero
578 ///  - the this-adjustment is (memptr.adj >> 1)
579 ///  - the virtual offset is (memptr.ptr)
580 /// ARM uses 'adj' for the virtual flag because Thumb functions
581 /// may be only single-byte aligned.
582 ///
583 /// If the member is virtual, the adjusted 'this' pointer points
584 /// to a vtable pointer from which the virtual offset is applied.
585 ///
586 /// If the member is non-virtual, memptr.ptr is the address of
587 /// the function to call.
588 CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
589     CodeGenFunction &CGF, const Expr *E, Address ThisAddr,
590     llvm::Value *&ThisPtrForCall,
591     llvm::Value *MemFnPtr, const MemberPointerType *MPT) {
592   CGBuilderTy &Builder = CGF.Builder;
593 
594   const FunctionProtoType *FPT =
595     MPT->getPointeeType()->getAs<FunctionProtoType>();
596   auto *RD =
597       cast<CXXRecordDecl>(MPT->getClass()->castAs<RecordType>()->getDecl());
598 
599   llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
600       CGM.getTypes().arrangeCXXMethodType(RD, FPT, /*FD=*/nullptr));
601 
602   llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1);
603 
604   llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual");
605   llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual");
606   llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end");
607 
608   // Extract memptr.adj, which is in the second field.
609   llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj");
610 
611   // Compute the true adjustment.
612   llvm::Value *Adj = RawAdj;
613   if (UseARMMethodPtrABI)
614     Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted");
615 
616   // Apply the adjustment and cast back to the original struct type
617   // for consistency.
618   llvm::Value *This = ThisAddr.getPointer();
619   llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy());
620   Ptr = Builder.CreateInBoundsGEP(Builder.getInt8Ty(), Ptr, Adj);
621   This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted");
622   ThisPtrForCall = This;
623 
624   // Load the function pointer.
625   llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr");
626 
627   // If the LSB in the function pointer is 1, the function pointer points to
628   // a virtual function.
629   llvm::Value *IsVirtual;
630   if (UseARMMethodPtrABI)
631     IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1);
632   else
633     IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1);
634   IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual");
635   Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual);
636 
637   // In the virtual path, the adjustment left 'This' pointing to the
638   // vtable of the correct base subobject.  The "function pointer" is an
639   // offset within the vtable (+1 for the virtual flag on non-ARM).
640   CGF.EmitBlock(FnVirtual);
641 
642   // Cast the adjusted this to a pointer to vtable pointer and load.
643   llvm::Type *VTableTy = Builder.getInt8PtrTy();
644   CharUnits VTablePtrAlign =
645     CGF.CGM.getDynamicOffsetAlignment(ThisAddr.getAlignment(), RD,
646                                       CGF.getPointerAlign());
647   llvm::Value *VTable = CGF.GetVTablePtr(
648       Address(This, ThisAddr.getElementType(), VTablePtrAlign), VTableTy, RD);
649 
650   // Apply the offset.
651   // On ARM64, to reserve extra space in virtual member function pointers,
652   // we only pay attention to the low 32 bits of the offset.
653   llvm::Value *VTableOffset = FnAsInt;
654   if (!UseARMMethodPtrABI)
655     VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1);
656   if (Use32BitVTableOffsetABI) {
657     VTableOffset = Builder.CreateTrunc(VTableOffset, CGF.Int32Ty);
658     VTableOffset = Builder.CreateZExt(VTableOffset, CGM.PtrDiffTy);
659   }
660 
661   // Check the address of the function pointer if CFI on member function
662   // pointers is enabled.
663   llvm::Constant *CheckSourceLocation;
664   llvm::Constant *CheckTypeDesc;
665   bool ShouldEmitCFICheck = CGF.SanOpts.has(SanitizerKind::CFIMFCall) &&
666                             CGM.HasHiddenLTOVisibility(RD);
667   bool ShouldEmitVFEInfo = CGM.getCodeGenOpts().VirtualFunctionElimination &&
668                            CGM.HasHiddenLTOVisibility(RD);
669   bool ShouldEmitWPDInfo =
670       CGM.getCodeGenOpts().WholeProgramVTables &&
671       // Don't insert type tests if we are forcing public std visibility.
672       !CGM.HasLTOVisibilityPublicStd(RD);
673   llvm::Value *VirtualFn = nullptr;
674 
675   {
676     CodeGenFunction::SanitizerScope SanScope(&CGF);
677     llvm::Value *TypeId = nullptr;
678     llvm::Value *CheckResult = nullptr;
679 
680     if (ShouldEmitCFICheck || ShouldEmitVFEInfo || ShouldEmitWPDInfo) {
681       // If doing CFI, VFE or WPD, we will need the metadata node to check
682       // against.
683       llvm::Metadata *MD =
684           CGM.CreateMetadataIdentifierForVirtualMemPtrType(QualType(MPT, 0));
685       TypeId = llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
686     }
687 
688     if (ShouldEmitVFEInfo) {
689       llvm::Value *VFPAddr =
690           Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
691 
692       // If doing VFE, load from the vtable with a type.checked.load intrinsic
693       // call. Note that we use the GEP to calculate the address to load from
694       // and pass 0 as the offset to the intrinsic. This is because every
695       // vtable slot of the correct type is marked with matching metadata, and
696       // we know that the load must be from one of these slots.
697       llvm::Value *CheckedLoad = Builder.CreateCall(
698           CGM.getIntrinsic(llvm::Intrinsic::type_checked_load),
699           {VFPAddr, llvm::ConstantInt::get(CGM.Int32Ty, 0), TypeId});
700       CheckResult = Builder.CreateExtractValue(CheckedLoad, 1);
701       VirtualFn = Builder.CreateExtractValue(CheckedLoad, 0);
702       VirtualFn = Builder.CreateBitCast(VirtualFn, FTy->getPointerTo(),
703                                         "memptr.virtualfn");
704     } else {
705       // When not doing VFE, emit a normal load, as it allows more
706       // optimisations than type.checked.load.
707       if (ShouldEmitCFICheck || ShouldEmitWPDInfo) {
708         llvm::Value *VFPAddr =
709             Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
710         CheckResult = Builder.CreateCall(
711             CGM.getIntrinsic(llvm::Intrinsic::type_test),
712             {Builder.CreateBitCast(VFPAddr, CGF.Int8PtrTy), TypeId});
713       }
714 
715       if (CGM.getItaniumVTableContext().isRelativeLayout()) {
716         VirtualFn = CGF.Builder.CreateCall(
717             CGM.getIntrinsic(llvm::Intrinsic::load_relative,
718                              {VTableOffset->getType()}),
719             {VTable, VTableOffset});
720         VirtualFn = CGF.Builder.CreateBitCast(VirtualFn, FTy->getPointerTo());
721       } else {
722         llvm::Value *VFPAddr =
723             CGF.Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
724         VFPAddr = CGF.Builder.CreateBitCast(
725             VFPAddr, FTy->getPointerTo()->getPointerTo());
726         VirtualFn = CGF.Builder.CreateAlignedLoad(
727             FTy->getPointerTo(), VFPAddr, CGF.getPointerAlign(),
728             "memptr.virtualfn");
729       }
730     }
731     assert(VirtualFn && "Virtual fuction pointer not created!");
732     assert((!ShouldEmitCFICheck || !ShouldEmitVFEInfo || !ShouldEmitWPDInfo ||
733             CheckResult) &&
734            "Check result required but not created!");
735 
736     if (ShouldEmitCFICheck) {
737       // If doing CFI, emit the check.
738       CheckSourceLocation = CGF.EmitCheckSourceLocation(E->getBeginLoc());
739       CheckTypeDesc = CGF.EmitCheckTypeDescriptor(QualType(MPT, 0));
740       llvm::Constant *StaticData[] = {
741           llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_VMFCall),
742           CheckSourceLocation,
743           CheckTypeDesc,
744       };
745 
746       if (CGM.getCodeGenOpts().SanitizeTrap.has(SanitizerKind::CFIMFCall)) {
747         CGF.EmitTrapCheck(CheckResult, SanitizerHandler::CFICheckFail);
748       } else {
749         llvm::Value *AllVtables = llvm::MetadataAsValue::get(
750             CGM.getLLVMContext(),
751             llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
752         llvm::Value *ValidVtable = Builder.CreateCall(
753             CGM.getIntrinsic(llvm::Intrinsic::type_test), {VTable, AllVtables});
754         CGF.EmitCheck(std::make_pair(CheckResult, SanitizerKind::CFIMFCall),
755                       SanitizerHandler::CFICheckFail, StaticData,
756                       {VTable, ValidVtable});
757       }
758 
759       FnVirtual = Builder.GetInsertBlock();
760     }
761   } // End of sanitizer scope
762 
763   CGF.EmitBranch(FnEnd);
764 
765   // In the non-virtual path, the function pointer is actually a
766   // function pointer.
767   CGF.EmitBlock(FnNonVirtual);
768   llvm::Value *NonVirtualFn =
769     Builder.CreateIntToPtr(FnAsInt, FTy->getPointerTo(), "memptr.nonvirtualfn");
770 
771   // Check the function pointer if CFI on member function pointers is enabled.
772   if (ShouldEmitCFICheck) {
773     CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl();
774     if (RD->hasDefinition()) {
775       CodeGenFunction::SanitizerScope SanScope(&CGF);
776 
777       llvm::Constant *StaticData[] = {
778           llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_NVMFCall),
779           CheckSourceLocation,
780           CheckTypeDesc,
781       };
782 
783       llvm::Value *Bit = Builder.getFalse();
784       llvm::Value *CastedNonVirtualFn =
785           Builder.CreateBitCast(NonVirtualFn, CGF.Int8PtrTy);
786       for (const CXXRecordDecl *Base : CGM.getMostBaseClasses(RD)) {
787         llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(
788             getContext().getMemberPointerType(
789                 MPT->getPointeeType(),
790                 getContext().getRecordType(Base).getTypePtr()));
791         llvm::Value *TypeId =
792             llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
793 
794         llvm::Value *TypeTest =
795             Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
796                                {CastedNonVirtualFn, TypeId});
797         Bit = Builder.CreateOr(Bit, TypeTest);
798       }
799 
800       CGF.EmitCheck(std::make_pair(Bit, SanitizerKind::CFIMFCall),
801                     SanitizerHandler::CFICheckFail, StaticData,
802                     {CastedNonVirtualFn, llvm::UndefValue::get(CGF.IntPtrTy)});
803 
804       FnNonVirtual = Builder.GetInsertBlock();
805     }
806   }
807 
808   // We're done.
809   CGF.EmitBlock(FnEnd);
810   llvm::PHINode *CalleePtr = Builder.CreatePHI(FTy->getPointerTo(), 2);
811   CalleePtr->addIncoming(VirtualFn, FnVirtual);
812   CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual);
813 
814   CGCallee Callee(FPT, CalleePtr);
815   return Callee;
816 }
817 
818 /// Compute an l-value by applying the given pointer-to-member to a
819 /// base object.
820 llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(
821     CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr,
822     const MemberPointerType *MPT) {
823   assert(MemPtr->getType() == CGM.PtrDiffTy);
824 
825   CGBuilderTy &Builder = CGF.Builder;
826 
827   // Cast to char*.
828   Base = Builder.CreateElementBitCast(Base, CGF.Int8Ty);
829 
830   // Apply the offset, which we assume is non-null.
831   llvm::Value *Addr = Builder.CreateInBoundsGEP(
832       Base.getElementType(), Base.getPointer(), MemPtr, "memptr.offset");
833 
834   // Cast the address to the appropriate pointer type, adopting the
835   // address space of the base pointer.
836   llvm::Type *PType = CGF.ConvertTypeForMem(MPT->getPointeeType())
837                             ->getPointerTo(Base.getAddressSpace());
838   return Builder.CreateBitCast(Addr, PType);
839 }
840 
841 /// Perform a bitcast, derived-to-base, or base-to-derived member pointer
842 /// conversion.
843 ///
844 /// Bitcast conversions are always a no-op under Itanium.
845 ///
846 /// Obligatory offset/adjustment diagram:
847 ///         <-- offset -->          <-- adjustment -->
848 ///   |--------------------------|----------------------|--------------------|
849 ///   ^Derived address point     ^Base address point    ^Member address point
850 ///
851 /// So when converting a base member pointer to a derived member pointer,
852 /// we add the offset to the adjustment because the address point has
853 /// decreased;  and conversely, when converting a derived MP to a base MP
854 /// we subtract the offset from the adjustment because the address point
855 /// has increased.
856 ///
857 /// The standard forbids (at compile time) conversion to and from
858 /// virtual bases, which is why we don't have to consider them here.
859 ///
860 /// The standard forbids (at run time) casting a derived MP to a base
861 /// MP when the derived MP does not point to a member of the base.
862 /// This is why -1 is a reasonable choice for null data member
863 /// pointers.
864 llvm::Value *
865 ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
866                                            const CastExpr *E,
867                                            llvm::Value *src) {
868   assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
869          E->getCastKind() == CK_BaseToDerivedMemberPointer ||
870          E->getCastKind() == CK_ReinterpretMemberPointer);
871 
872   // Under Itanium, reinterprets don't require any additional processing.
873   if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
874 
875   // Use constant emission if we can.
876   if (isa<llvm::Constant>(src))
877     return EmitMemberPointerConversion(E, cast<llvm::Constant>(src));
878 
879   llvm::Constant *adj = getMemberPointerAdjustment(E);
880   if (!adj) return src;
881 
882   CGBuilderTy &Builder = CGF.Builder;
883   bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
884 
885   const MemberPointerType *destTy =
886     E->getType()->castAs<MemberPointerType>();
887 
888   // For member data pointers, this is just a matter of adding the
889   // offset if the source is non-null.
890   if (destTy->isMemberDataPointer()) {
891     llvm::Value *dst;
892     if (isDerivedToBase)
893       dst = Builder.CreateNSWSub(src, adj, "adj");
894     else
895       dst = Builder.CreateNSWAdd(src, adj, "adj");
896 
897     // Null check.
898     llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType());
899     llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull");
900     return Builder.CreateSelect(isNull, src, dst);
901   }
902 
903   // The this-adjustment is left-shifted by 1 on ARM.
904   if (UseARMMethodPtrABI) {
905     uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
906     offset <<= 1;
907     adj = llvm::ConstantInt::get(adj->getType(), offset);
908   }
909 
910   llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj");
911   llvm::Value *dstAdj;
912   if (isDerivedToBase)
913     dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj");
914   else
915     dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj");
916 
917   return Builder.CreateInsertValue(src, dstAdj, 1);
918 }
919 
920 llvm::Constant *
921 ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
922                                            llvm::Constant *src) {
923   assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
924          E->getCastKind() == CK_BaseToDerivedMemberPointer ||
925          E->getCastKind() == CK_ReinterpretMemberPointer);
926 
927   // Under Itanium, reinterprets don't require any additional processing.
928   if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
929 
930   // If the adjustment is trivial, we don't need to do anything.
931   llvm::Constant *adj = getMemberPointerAdjustment(E);
932   if (!adj) return src;
933 
934   bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
935 
936   const MemberPointerType *destTy =
937     E->getType()->castAs<MemberPointerType>();
938 
939   // For member data pointers, this is just a matter of adding the
940   // offset if the source is non-null.
941   if (destTy->isMemberDataPointer()) {
942     // null maps to null.
943     if (src->isAllOnesValue()) return src;
944 
945     if (isDerivedToBase)
946       return llvm::ConstantExpr::getNSWSub(src, adj);
947     else
948       return llvm::ConstantExpr::getNSWAdd(src, adj);
949   }
950 
951   // The this-adjustment is left-shifted by 1 on ARM.
952   if (UseARMMethodPtrABI) {
953     uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
954     offset <<= 1;
955     adj = llvm::ConstantInt::get(adj->getType(), offset);
956   }
957 
958   llvm::Constant *srcAdj = llvm::ConstantExpr::getExtractValue(src, 1);
959   llvm::Constant *dstAdj;
960   if (isDerivedToBase)
961     dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj);
962   else
963     dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj);
964 
965   return llvm::ConstantExpr::getInsertValue(src, dstAdj, 1);
966 }
967 
968 llvm::Constant *
969 ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
970   // Itanium C++ ABI 2.3:
971   //   A NULL pointer is represented as -1.
972   if (MPT->isMemberDataPointer())
973     return llvm::ConstantInt::get(CGM.PtrDiffTy, -1ULL, /*isSigned=*/true);
974 
975   llvm::Constant *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0);
976   llvm::Constant *Values[2] = { Zero, Zero };
977   return llvm::ConstantStruct::getAnon(Values);
978 }
979 
980 llvm::Constant *
981 ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
982                                      CharUnits offset) {
983   // Itanium C++ ABI 2.3:
984   //   A pointer to data member is an offset from the base address of
985   //   the class object containing it, represented as a ptrdiff_t
986   return llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity());
987 }
988 
989 llvm::Constant *
990 ItaniumCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
991   return BuildMemberPointer(MD, CharUnits::Zero());
992 }
993 
994 llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
995                                                   CharUnits ThisAdjustment) {
996   assert(MD->isInstance() && "Member function must not be static!");
997 
998   CodeGenTypes &Types = CGM.getTypes();
999 
1000   // Get the function pointer (or index if this is a virtual function).
1001   llvm::Constant *MemPtr[2];
1002   if (MD->isVirtual()) {
1003     uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(MD);
1004     uint64_t VTableOffset;
1005     if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1006       // Multiply by 4-byte relative offsets.
1007       VTableOffset = Index * 4;
1008     } else {
1009       const ASTContext &Context = getContext();
1010       CharUnits PointerWidth = Context.toCharUnitsFromBits(
1011           Context.getTargetInfo().getPointerWidth(0));
1012       VTableOffset = Index * PointerWidth.getQuantity();
1013     }
1014 
1015     if (UseARMMethodPtrABI) {
1016       // ARM C++ ABI 3.2.1:
1017       //   This ABI specifies that adj contains twice the this
1018       //   adjustment, plus 1 if the member function is virtual. The
1019       //   least significant bit of adj then makes exactly the same
1020       //   discrimination as the least significant bit of ptr does for
1021       //   Itanium.
1022       MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset);
1023       MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1024                                          2 * ThisAdjustment.getQuantity() + 1);
1025     } else {
1026       // Itanium C++ ABI 2.3:
1027       //   For a virtual function, [the pointer field] is 1 plus the
1028       //   virtual table offset (in bytes) of the function,
1029       //   represented as a ptrdiff_t.
1030       MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset + 1);
1031       MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1032                                          ThisAdjustment.getQuantity());
1033     }
1034   } else {
1035     const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
1036     llvm::Type *Ty;
1037     // Check whether the function has a computable LLVM signature.
1038     if (Types.isFuncTypeConvertible(FPT)) {
1039       // The function has a computable LLVM signature; use the correct type.
1040       Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD));
1041     } else {
1042       // Use an arbitrary non-function type to tell GetAddrOfFunction that the
1043       // function type is incomplete.
1044       Ty = CGM.PtrDiffTy;
1045     }
1046     llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty);
1047 
1048     MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy);
1049     MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1050                                        (UseARMMethodPtrABI ? 2 : 1) *
1051                                        ThisAdjustment.getQuantity());
1052   }
1053 
1054   return llvm::ConstantStruct::getAnon(MemPtr);
1055 }
1056 
1057 llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
1058                                                  QualType MPType) {
1059   const MemberPointerType *MPT = MPType->castAs<MemberPointerType>();
1060   const ValueDecl *MPD = MP.getMemberPointerDecl();
1061   if (!MPD)
1062     return EmitNullMemberPointer(MPT);
1063 
1064   CharUnits ThisAdjustment = getContext().getMemberPointerPathAdjustment(MP);
1065 
1066   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD))
1067     return BuildMemberPointer(MD, ThisAdjustment);
1068 
1069   CharUnits FieldOffset =
1070     getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD));
1071   return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset);
1072 }
1073 
1074 /// The comparison algorithm is pretty easy: the member pointers are
1075 /// the same if they're either bitwise identical *or* both null.
1076 ///
1077 /// ARM is different here only because null-ness is more complicated.
1078 llvm::Value *
1079 ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
1080                                            llvm::Value *L,
1081                                            llvm::Value *R,
1082                                            const MemberPointerType *MPT,
1083                                            bool Inequality) {
1084   CGBuilderTy &Builder = CGF.Builder;
1085 
1086   llvm::ICmpInst::Predicate Eq;
1087   llvm::Instruction::BinaryOps And, Or;
1088   if (Inequality) {
1089     Eq = llvm::ICmpInst::ICMP_NE;
1090     And = llvm::Instruction::Or;
1091     Or = llvm::Instruction::And;
1092   } else {
1093     Eq = llvm::ICmpInst::ICMP_EQ;
1094     And = llvm::Instruction::And;
1095     Or = llvm::Instruction::Or;
1096   }
1097 
1098   // Member data pointers are easy because there's a unique null
1099   // value, so it just comes down to bitwise equality.
1100   if (MPT->isMemberDataPointer())
1101     return Builder.CreateICmp(Eq, L, R);
1102 
1103   // For member function pointers, the tautologies are more complex.
1104   // The Itanium tautology is:
1105   //   (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj))
1106   // The ARM tautology is:
1107   //   (L == R) <==> (L.ptr == R.ptr &&
1108   //                  (L.adj == R.adj ||
1109   //                   (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0)))
1110   // The inequality tautologies have exactly the same structure, except
1111   // applying De Morgan's laws.
1112 
1113   llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr");
1114   llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr");
1115 
1116   // This condition tests whether L.ptr == R.ptr.  This must always be
1117   // true for equality to hold.
1118   llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr");
1119 
1120   // This condition, together with the assumption that L.ptr == R.ptr,
1121   // tests whether the pointers are both null.  ARM imposes an extra
1122   // condition.
1123   llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType());
1124   llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null");
1125 
1126   // This condition tests whether L.adj == R.adj.  If this isn't
1127   // true, the pointers are unequal unless they're both null.
1128   llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj");
1129   llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj");
1130   llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj");
1131 
1132   // Null member function pointers on ARM clear the low bit of Adj,
1133   // so the zero condition has to check that neither low bit is set.
1134   if (UseARMMethodPtrABI) {
1135     llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1);
1136 
1137     // Compute (l.adj | r.adj) & 1 and test it against zero.
1138     llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj");
1139     llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One);
1140     llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero,
1141                                                       "cmp.or.adj");
1142     EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero);
1143   }
1144 
1145   // Tie together all our conditions.
1146   llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq);
1147   Result = Builder.CreateBinOp(And, PtrEq, Result,
1148                                Inequality ? "memptr.ne" : "memptr.eq");
1149   return Result;
1150 }
1151 
1152 llvm::Value *
1153 ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
1154                                           llvm::Value *MemPtr,
1155                                           const MemberPointerType *MPT) {
1156   CGBuilderTy &Builder = CGF.Builder;
1157 
1158   /// For member data pointers, this is just a check against -1.
1159   if (MPT->isMemberDataPointer()) {
1160     assert(MemPtr->getType() == CGM.PtrDiffTy);
1161     llvm::Value *NegativeOne =
1162       llvm::Constant::getAllOnesValue(MemPtr->getType());
1163     return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool");
1164   }
1165 
1166   // In Itanium, a member function pointer is not null if 'ptr' is not null.
1167   llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr");
1168 
1169   llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0);
1170   llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool");
1171 
1172   // On ARM, a member function pointer is also non-null if the low bit of 'adj'
1173   // (the virtual bit) is set.
1174   if (UseARMMethodPtrABI) {
1175     llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1);
1176     llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj");
1177     llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit");
1178     llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero,
1179                                                   "memptr.isvirtual");
1180     Result = Builder.CreateOr(Result, IsVirtual);
1181   }
1182 
1183   return Result;
1184 }
1185 
1186 bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
1187   const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl();
1188   if (!RD)
1189     return false;
1190 
1191   // If C++ prohibits us from making a copy, return by address.
1192   if (!RD->canPassInRegisters()) {
1193     auto Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType());
1194     FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
1195     return true;
1196   }
1197   return false;
1198 }
1199 
1200 /// The Itanium ABI requires non-zero initialization only for data
1201 /// member pointers, for which '0' is a valid offset.
1202 bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
1203   return MPT->isMemberFunctionPointer();
1204 }
1205 
1206 /// The Itanium ABI always places an offset to the complete object
1207 /// at entry -2 in the vtable.
1208 void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
1209                                             const CXXDeleteExpr *DE,
1210                                             Address Ptr,
1211                                             QualType ElementType,
1212                                             const CXXDestructorDecl *Dtor) {
1213   bool UseGlobalDelete = DE->isGlobalDelete();
1214   if (UseGlobalDelete) {
1215     // Derive the complete-object pointer, which is what we need
1216     // to pass to the deallocation function.
1217 
1218     // Grab the vtable pointer as an intptr_t*.
1219     auto *ClassDecl =
1220         cast<CXXRecordDecl>(ElementType->castAs<RecordType>()->getDecl());
1221     llvm::Value *VTable =
1222         CGF.GetVTablePtr(Ptr, CGF.IntPtrTy->getPointerTo(), ClassDecl);
1223 
1224     // Track back to entry -2 and pull out the offset there.
1225     llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
1226         CGF.IntPtrTy, VTable, -2, "complete-offset.ptr");
1227     llvm::Value *Offset = CGF.Builder.CreateAlignedLoad(CGF.IntPtrTy, OffsetPtr,                                                        CGF.getPointerAlign());
1228 
1229     // Apply the offset.
1230     llvm::Value *CompletePtr =
1231       CGF.Builder.CreateBitCast(Ptr.getPointer(), CGF.Int8PtrTy);
1232     CompletePtr =
1233         CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, CompletePtr, Offset);
1234 
1235     // If we're supposed to call the global delete, make sure we do so
1236     // even if the destructor throws.
1237     CGF.pushCallObjectDeleteCleanup(DE->getOperatorDelete(), CompletePtr,
1238                                     ElementType);
1239   }
1240 
1241   // FIXME: Provide a source location here even though there's no
1242   // CXXMemberCallExpr for dtor call.
1243   CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
1244   EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, DE);
1245 
1246   if (UseGlobalDelete)
1247     CGF.PopCleanupBlock();
1248 }
1249 
1250 void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
1251   // void __cxa_rethrow();
1252 
1253   llvm::FunctionType *FTy =
1254     llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
1255 
1256   llvm::FunctionCallee Fn = CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
1257 
1258   if (isNoReturn)
1259     CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, None);
1260   else
1261     CGF.EmitRuntimeCallOrInvoke(Fn);
1262 }
1263 
1264 static llvm::FunctionCallee getAllocateExceptionFn(CodeGenModule &CGM) {
1265   // void *__cxa_allocate_exception(size_t thrown_size);
1266 
1267   llvm::FunctionType *FTy =
1268     llvm::FunctionType::get(CGM.Int8PtrTy, CGM.SizeTy, /*isVarArg=*/false);
1269 
1270   return CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception");
1271 }
1272 
1273 static llvm::FunctionCallee getThrowFn(CodeGenModule &CGM) {
1274   // void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
1275   //                  void (*dest) (void *));
1276 
1277   llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.Int8PtrTy, CGM.Int8PtrTy };
1278   llvm::FunctionType *FTy =
1279     llvm::FunctionType::get(CGM.VoidTy, Args, /*isVarArg=*/false);
1280 
1281   return CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
1282 }
1283 
1284 void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
1285   QualType ThrowType = E->getSubExpr()->getType();
1286   // Now allocate the exception object.
1287   llvm::Type *SizeTy = CGF.ConvertType(getContext().getSizeType());
1288   uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity();
1289 
1290   llvm::FunctionCallee AllocExceptionFn = getAllocateExceptionFn(CGM);
1291   llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall(
1292       AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception");
1293 
1294   CharUnits ExnAlign = CGF.getContext().getExnObjectAlignment();
1295   CGF.EmitAnyExprToExn(
1296       E->getSubExpr(), Address(ExceptionPtr, CGM.Int8Ty, ExnAlign));
1297 
1298   // Now throw the exception.
1299   llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType,
1300                                                          /*ForEH=*/true);
1301 
1302   // The address of the destructor.  If the exception type has a
1303   // trivial destructor (or isn't a record), we just pass null.
1304   llvm::Constant *Dtor = nullptr;
1305   if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
1306     CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
1307     if (!Record->hasTrivialDestructor()) {
1308       CXXDestructorDecl *DtorD = Record->getDestructor();
1309       Dtor = CGM.getAddrOfCXXStructor(GlobalDecl(DtorD, Dtor_Complete));
1310       Dtor = llvm::ConstantExpr::getBitCast(Dtor, CGM.Int8PtrTy);
1311     }
1312   }
1313   if (!Dtor) Dtor = llvm::Constant::getNullValue(CGM.Int8PtrTy);
1314 
1315   llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor };
1316   CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(CGM), args);
1317 }
1318 
1319 static llvm::FunctionCallee getItaniumDynamicCastFn(CodeGenFunction &CGF) {
1320   // void *__dynamic_cast(const void *sub,
1321   //                      const abi::__class_type_info *src,
1322   //                      const abi::__class_type_info *dst,
1323   //                      std::ptrdiff_t src2dst_offset);
1324 
1325   llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
1326   llvm::Type *PtrDiffTy =
1327     CGF.ConvertType(CGF.getContext().getPointerDiffType());
1328 
1329   llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
1330 
1331   llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false);
1332 
1333   // Mark the function as nounwind readonly.
1334   llvm::Attribute::AttrKind FuncAttrs[] = { llvm::Attribute::NoUnwind,
1335                                             llvm::Attribute::ReadOnly };
1336   llvm::AttributeList Attrs = llvm::AttributeList::get(
1337       CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, FuncAttrs);
1338 
1339   return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs);
1340 }
1341 
1342 static llvm::FunctionCallee getBadCastFn(CodeGenFunction &CGF) {
1343   // void __cxa_bad_cast();
1344   llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1345   return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
1346 }
1347 
1348 /// Compute the src2dst_offset hint as described in the
1349 /// Itanium C++ ABI [2.9.7]
1350 static CharUnits computeOffsetHint(ASTContext &Context,
1351                                    const CXXRecordDecl *Src,
1352                                    const CXXRecordDecl *Dst) {
1353   CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1354                      /*DetectVirtual=*/false);
1355 
1356   // If Dst is not derived from Src we can skip the whole computation below and
1357   // return that Src is not a public base of Dst.  Record all inheritance paths.
1358   if (!Dst->isDerivedFrom(Src, Paths))
1359     return CharUnits::fromQuantity(-2ULL);
1360 
1361   unsigned NumPublicPaths = 0;
1362   CharUnits Offset;
1363 
1364   // Now walk all possible inheritance paths.
1365   for (const CXXBasePath &Path : Paths) {
1366     if (Path.Access != AS_public)  // Ignore non-public inheritance.
1367       continue;
1368 
1369     ++NumPublicPaths;
1370 
1371     for (const CXXBasePathElement &PathElement : Path) {
1372       // If the path contains a virtual base class we can't give any hint.
1373       // -1: no hint.
1374       if (PathElement.Base->isVirtual())
1375         return CharUnits::fromQuantity(-1ULL);
1376 
1377       if (NumPublicPaths > 1) // Won't use offsets, skip computation.
1378         continue;
1379 
1380       // Accumulate the base class offsets.
1381       const ASTRecordLayout &L = Context.getASTRecordLayout(PathElement.Class);
1382       Offset += L.getBaseClassOffset(
1383           PathElement.Base->getType()->getAsCXXRecordDecl());
1384     }
1385   }
1386 
1387   // -2: Src is not a public base of Dst.
1388   if (NumPublicPaths == 0)
1389     return CharUnits::fromQuantity(-2ULL);
1390 
1391   // -3: Src is a multiple public base type but never a virtual base type.
1392   if (NumPublicPaths > 1)
1393     return CharUnits::fromQuantity(-3ULL);
1394 
1395   // Otherwise, the Src type is a unique public nonvirtual base type of Dst.
1396   // Return the offset of Src from the origin of Dst.
1397   return Offset;
1398 }
1399 
1400 static llvm::FunctionCallee getBadTypeidFn(CodeGenFunction &CGF) {
1401   // void __cxa_bad_typeid();
1402   llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1403 
1404   return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1405 }
1406 
1407 bool ItaniumCXXABI::shouldTypeidBeNullChecked(bool IsDeref,
1408                                               QualType SrcRecordTy) {
1409   return IsDeref;
1410 }
1411 
1412 void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
1413   llvm::FunctionCallee Fn = getBadTypeidFn(CGF);
1414   llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
1415   Call->setDoesNotReturn();
1416   CGF.Builder.CreateUnreachable();
1417 }
1418 
1419 llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
1420                                        QualType SrcRecordTy,
1421                                        Address ThisPtr,
1422                                        llvm::Type *StdTypeInfoPtrTy) {
1423   auto *ClassDecl =
1424       cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
1425   llvm::Value *Value =
1426       CGF.GetVTablePtr(ThisPtr, StdTypeInfoPtrTy->getPointerTo(), ClassDecl);
1427 
1428   if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1429     // Load the type info.
1430     Value = CGF.Builder.CreateBitCast(Value, CGM.Int8PtrTy);
1431     Value = CGF.Builder.CreateCall(
1432         CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
1433         {Value, llvm::ConstantInt::get(CGM.Int32Ty, -4)});
1434 
1435     // Setup to dereference again since this is a proxy we accessed.
1436     Value = CGF.Builder.CreateBitCast(Value, StdTypeInfoPtrTy->getPointerTo());
1437   } else {
1438     // Load the type info.
1439     Value =
1440         CGF.Builder.CreateConstInBoundsGEP1_64(StdTypeInfoPtrTy, Value, -1ULL);
1441   }
1442   return CGF.Builder.CreateAlignedLoad(StdTypeInfoPtrTy, Value,
1443                                        CGF.getPointerAlign());
1444 }
1445 
1446 bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
1447                                                        QualType SrcRecordTy) {
1448   return SrcIsPtr;
1449 }
1450 
1451 llvm::Value *ItaniumCXXABI::EmitDynamicCastCall(
1452     CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
1453     QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
1454   llvm::Type *PtrDiffLTy =
1455       CGF.ConvertType(CGF.getContext().getPointerDiffType());
1456   llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1457 
1458   llvm::Value *SrcRTTI =
1459       CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
1460   llvm::Value *DestRTTI =
1461       CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
1462 
1463   // Compute the offset hint.
1464   const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1465   const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1466   llvm::Value *OffsetHint = llvm::ConstantInt::get(
1467       PtrDiffLTy,
1468       computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity());
1469 
1470   // Emit the call to __dynamic_cast.
1471   llvm::Value *Value = ThisAddr.getPointer();
1472   Value = CGF.EmitCastToVoidPtr(Value);
1473 
1474   llvm::Value *args[] = {Value, SrcRTTI, DestRTTI, OffsetHint};
1475   Value = CGF.EmitNounwindRuntimeCall(getItaniumDynamicCastFn(CGF), args);
1476   Value = CGF.Builder.CreateBitCast(Value, DestLTy);
1477 
1478   /// C++ [expr.dynamic.cast]p9:
1479   ///   A failed cast to reference type throws std::bad_cast
1480   if (DestTy->isReferenceType()) {
1481     llvm::BasicBlock *BadCastBlock =
1482         CGF.createBasicBlock("dynamic_cast.bad_cast");
1483 
1484     llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
1485     CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
1486 
1487     CGF.EmitBlock(BadCastBlock);
1488     EmitBadCastCall(CGF);
1489   }
1490 
1491   return Value;
1492 }
1493 
1494 llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF,
1495                                                   Address ThisAddr,
1496                                                   QualType SrcRecordTy,
1497                                                   QualType DestTy) {
1498   llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1499   auto *ClassDecl =
1500       cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
1501   llvm::Value *OffsetToTop;
1502   if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1503     // Get the vtable pointer.
1504     llvm::Value *VTable =
1505         CGF.GetVTablePtr(ThisAddr, CGM.Int32Ty->getPointerTo(), ClassDecl);
1506 
1507     // Get the offset-to-top from the vtable.
1508     OffsetToTop =
1509         CGF.Builder.CreateConstInBoundsGEP1_32(CGM.Int32Ty, VTable, -2U);
1510     OffsetToTop = CGF.Builder.CreateAlignedLoad(
1511         CGM.Int32Ty, OffsetToTop, CharUnits::fromQuantity(4), "offset.to.top");
1512   } else {
1513     llvm::Type *PtrDiffLTy =
1514         CGF.ConvertType(CGF.getContext().getPointerDiffType());
1515 
1516     // Get the vtable pointer.
1517     llvm::Value *VTable =
1518         CGF.GetVTablePtr(ThisAddr, PtrDiffLTy->getPointerTo(), ClassDecl);
1519 
1520     // Get the offset-to-top from the vtable.
1521     OffsetToTop =
1522         CGF.Builder.CreateConstInBoundsGEP1_64(PtrDiffLTy, VTable, -2ULL);
1523     OffsetToTop = CGF.Builder.CreateAlignedLoad(
1524         PtrDiffLTy, OffsetToTop, CGF.getPointerAlign(), "offset.to.top");
1525   }
1526   // Finally, add the offset to the pointer.
1527   llvm::Value *Value = ThisAddr.getPointer();
1528   Value = CGF.EmitCastToVoidPtr(Value);
1529   Value = CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, Value, OffsetToTop);
1530   return CGF.Builder.CreateBitCast(Value, DestLTy);
1531 }
1532 
1533 bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
1534   llvm::FunctionCallee Fn = getBadCastFn(CGF);
1535   llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
1536   Call->setDoesNotReturn();
1537   CGF.Builder.CreateUnreachable();
1538   return true;
1539 }
1540 
1541 llvm::Value *
1542 ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
1543                                          Address This,
1544                                          const CXXRecordDecl *ClassDecl,
1545                                          const CXXRecordDecl *BaseClassDecl) {
1546   llvm::Value *VTablePtr = CGF.GetVTablePtr(This, CGM.Int8PtrTy, ClassDecl);
1547   CharUnits VBaseOffsetOffset =
1548       CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl,
1549                                                                BaseClassDecl);
1550   llvm::Value *VBaseOffsetPtr =
1551     CGF.Builder.CreateConstGEP1_64(
1552         CGF.Int8Ty, VTablePtr, VBaseOffsetOffset.getQuantity(),
1553         "vbase.offset.ptr");
1554 
1555   llvm::Value *VBaseOffset;
1556   if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1557     VBaseOffsetPtr =
1558         CGF.Builder.CreateBitCast(VBaseOffsetPtr, CGF.Int32Ty->getPointerTo());
1559     VBaseOffset = CGF.Builder.CreateAlignedLoad(
1560         CGF.Int32Ty, VBaseOffsetPtr, CharUnits::fromQuantity(4),
1561         "vbase.offset");
1562   } else {
1563     VBaseOffsetPtr = CGF.Builder.CreateBitCast(VBaseOffsetPtr,
1564                                                CGM.PtrDiffTy->getPointerTo());
1565     VBaseOffset = CGF.Builder.CreateAlignedLoad(
1566         CGM.PtrDiffTy, VBaseOffsetPtr, CGF.getPointerAlign(), "vbase.offset");
1567   }
1568   return VBaseOffset;
1569 }
1570 
1571 void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
1572   // Just make sure we're in sync with TargetCXXABI.
1573   assert(CGM.getTarget().getCXXABI().hasConstructorVariants());
1574 
1575   // The constructor used for constructing this as a base class;
1576   // ignores virtual bases.
1577   CGM.EmitGlobal(GlobalDecl(D, Ctor_Base));
1578 
1579   // The constructor used for constructing this as a complete class;
1580   // constructs the virtual bases, then calls the base constructor.
1581   if (!D->getParent()->isAbstract()) {
1582     // We don't need to emit the complete ctor if the class is abstract.
1583     CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete));
1584   }
1585 }
1586 
1587 CGCXXABI::AddedStructorArgCounts
1588 ItaniumCXXABI::buildStructorSignature(GlobalDecl GD,
1589                                       SmallVectorImpl<CanQualType> &ArgTys) {
1590   ASTContext &Context = getContext();
1591 
1592   // All parameters are already in place except VTT, which goes after 'this'.
1593   // These are Clang types, so we don't need to worry about sret yet.
1594 
1595   // Check if we need to add a VTT parameter (which has type void **).
1596   if ((isa<CXXConstructorDecl>(GD.getDecl()) ? GD.getCtorType() == Ctor_Base
1597                                              : GD.getDtorType() == Dtor_Base) &&
1598       cast<CXXMethodDecl>(GD.getDecl())->getParent()->getNumVBases() != 0) {
1599     ArgTys.insert(ArgTys.begin() + 1,
1600                   Context.getPointerType(Context.VoidPtrTy));
1601     return AddedStructorArgCounts::prefix(1);
1602   }
1603   return AddedStructorArgCounts{};
1604 }
1605 
1606 void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
1607   // The destructor used for destructing this as a base class; ignores
1608   // virtual bases.
1609   CGM.EmitGlobal(GlobalDecl(D, Dtor_Base));
1610 
1611   // The destructor used for destructing this as a most-derived class;
1612   // call the base destructor and then destructs any virtual bases.
1613   CGM.EmitGlobal(GlobalDecl(D, Dtor_Complete));
1614 
1615   // The destructor in a virtual table is always a 'deleting'
1616   // destructor, which calls the complete destructor and then uses the
1617   // appropriate operator delete.
1618   if (D->isVirtual())
1619     CGM.EmitGlobal(GlobalDecl(D, Dtor_Deleting));
1620 }
1621 
1622 void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
1623                                               QualType &ResTy,
1624                                               FunctionArgList &Params) {
1625   const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
1626   assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD));
1627 
1628   // Check if we need a VTT parameter as well.
1629   if (NeedsVTTParameter(CGF.CurGD)) {
1630     ASTContext &Context = getContext();
1631 
1632     // FIXME: avoid the fake decl
1633     QualType T = Context.getPointerType(Context.VoidPtrTy);
1634     auto *VTTDecl = ImplicitParamDecl::Create(
1635         Context, /*DC=*/nullptr, MD->getLocation(), &Context.Idents.get("vtt"),
1636         T, ImplicitParamDecl::CXXVTT);
1637     Params.insert(Params.begin() + 1, VTTDecl);
1638     getStructorImplicitParamDecl(CGF) = VTTDecl;
1639   }
1640 }
1641 
1642 void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
1643   // Naked functions have no prolog.
1644   if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr<NakedAttr>())
1645     return;
1646 
1647   /// Initialize the 'this' slot. In the Itanium C++ ABI, no prologue
1648   /// adjustments are required, because they are all handled by thunks.
1649   setCXXABIThisValue(CGF, loadIncomingCXXThis(CGF));
1650 
1651   /// Initialize the 'vtt' slot if needed.
1652   if (getStructorImplicitParamDecl(CGF)) {
1653     getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad(
1654         CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)), "vtt");
1655   }
1656 
1657   /// If this is a function that the ABI specifies returns 'this', initialize
1658   /// the return slot to 'this' at the start of the function.
1659   ///
1660   /// Unlike the setting of return types, this is done within the ABI
1661   /// implementation instead of by clients of CGCXXABI because:
1662   /// 1) getThisValue is currently protected
1663   /// 2) in theory, an ABI could implement 'this' returns some other way;
1664   ///    HasThisReturn only specifies a contract, not the implementation
1665   if (HasThisReturn(CGF.CurGD))
1666     CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
1667 }
1668 
1669 CGCXXABI::AddedStructorArgs ItaniumCXXABI::getImplicitConstructorArgs(
1670     CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type,
1671     bool ForVirtualBase, bool Delegating) {
1672   if (!NeedsVTTParameter(GlobalDecl(D, Type)))
1673     return AddedStructorArgs{};
1674 
1675   // Insert the implicit 'vtt' argument as the second argument.
1676   llvm::Value *VTT =
1677       CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating);
1678   QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1679   return AddedStructorArgs::prefix({{VTT, VTTTy}});
1680 }
1681 
1682 llvm::Value *ItaniumCXXABI::getCXXDestructorImplicitParam(
1683     CodeGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type,
1684     bool ForVirtualBase, bool Delegating) {
1685   GlobalDecl GD(DD, Type);
1686   return CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
1687 }
1688 
1689 void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
1690                                        const CXXDestructorDecl *DD,
1691                                        CXXDtorType Type, bool ForVirtualBase,
1692                                        bool Delegating, Address This,
1693                                        QualType ThisTy) {
1694   GlobalDecl GD(DD, Type);
1695   llvm::Value *VTT =
1696       getCXXDestructorImplicitParam(CGF, DD, Type, ForVirtualBase, Delegating);
1697   QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1698 
1699   CGCallee Callee;
1700   if (getContext().getLangOpts().AppleKext &&
1701       Type != Dtor_Base && DD->isVirtual())
1702     Callee = CGF.BuildAppleKextVirtualDestructorCall(DD, Type, DD->getParent());
1703   else
1704     Callee = CGCallee::forDirect(CGM.getAddrOfCXXStructor(GD), GD);
1705 
1706   CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, VTT, VTTTy,
1707                             nullptr);
1708 }
1709 
1710 void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
1711                                           const CXXRecordDecl *RD) {
1712   llvm::GlobalVariable *VTable = getAddrOfVTable(RD, CharUnits());
1713   if (VTable->hasInitializer())
1714     return;
1715 
1716   ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext();
1717   const VTableLayout &VTLayout = VTContext.getVTableLayout(RD);
1718   llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
1719   llvm::Constant *RTTI =
1720       CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD));
1721 
1722   // Create and set the initializer.
1723   ConstantInitBuilder builder(CGM);
1724   auto components = builder.beginStruct();
1725   CGVT.createVTableInitializer(components, VTLayout, RTTI,
1726                                llvm::GlobalValue::isLocalLinkage(Linkage));
1727   components.finishAndSetAsInitializer(VTable);
1728 
1729   // Set the correct linkage.
1730   VTable->setLinkage(Linkage);
1731 
1732   if (CGM.supportsCOMDAT() && VTable->isWeakForLinker())
1733     VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName()));
1734 
1735   // Set the right visibility.
1736   CGM.setGVProperties(VTable, RD);
1737 
1738   // If this is the magic class __cxxabiv1::__fundamental_type_info,
1739   // we will emit the typeinfo for the fundamental types. This is the
1740   // same behaviour as GCC.
1741   const DeclContext *DC = RD->getDeclContext();
1742   if (RD->getIdentifier() &&
1743       RD->getIdentifier()->isStr("__fundamental_type_info") &&
1744       isa<NamespaceDecl>(DC) && cast<NamespaceDecl>(DC)->getIdentifier() &&
1745       cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") &&
1746       DC->getParent()->isTranslationUnit())
1747     EmitFundamentalRTTIDescriptors(RD);
1748 
1749   // Always emit type metadata on non-available_externally definitions, and on
1750   // available_externally definitions if we are performing whole program
1751   // devirtualization. For WPD we need the type metadata on all vtable
1752   // definitions to ensure we associate derived classes with base classes
1753   // defined in headers but with a strong definition only in a shared library.
1754   if (!VTable->isDeclarationForLinker() ||
1755       CGM.getCodeGenOpts().WholeProgramVTables) {
1756     CGM.EmitVTableTypeMetadata(RD, VTable, VTLayout);
1757     // For available_externally definitions, add the vtable to
1758     // @llvm.compiler.used so that it isn't deleted before whole program
1759     // analysis.
1760     if (VTable->isDeclarationForLinker()) {
1761       assert(CGM.getCodeGenOpts().WholeProgramVTables);
1762       CGM.addCompilerUsedGlobal(VTable);
1763     }
1764   }
1765 
1766   if (VTContext.isRelativeLayout() && !VTable->isDSOLocal())
1767     CGVT.GenerateRelativeVTableAlias(VTable, VTable->getName());
1768 }
1769 
1770 bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField(
1771     CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) {
1772   if (Vptr.NearestVBase == nullptr)
1773     return false;
1774   return NeedsVTTParameter(CGF.CurGD);
1775 }
1776 
1777 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor(
1778     CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1779     const CXXRecordDecl *NearestVBase) {
1780 
1781   if ((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1782       NeedsVTTParameter(CGF.CurGD)) {
1783     return getVTableAddressPointInStructorWithVTT(CGF, VTableClass, Base,
1784                                                   NearestVBase);
1785   }
1786   return getVTableAddressPoint(Base, VTableClass);
1787 }
1788 
1789 llvm::Constant *
1790 ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base,
1791                                      const CXXRecordDecl *VTableClass) {
1792   llvm::GlobalValue *VTable = getAddrOfVTable(VTableClass, CharUnits());
1793 
1794   // Find the appropriate vtable within the vtable group, and the address point
1795   // within that vtable.
1796   VTableLayout::AddressPointLocation AddressPoint =
1797       CGM.getItaniumVTableContext()
1798           .getVTableLayout(VTableClass)
1799           .getAddressPoint(Base);
1800   llvm::Value *Indices[] = {
1801     llvm::ConstantInt::get(CGM.Int32Ty, 0),
1802     llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.VTableIndex),
1803     llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.AddressPointIndex),
1804   };
1805 
1806   return llvm::ConstantExpr::getGetElementPtr(VTable->getValueType(), VTable,
1807                                               Indices, /*InBounds=*/true,
1808                                               /*InRangeIndex=*/1);
1809 }
1810 
1811 // Check whether all the non-inline virtual methods for the class have the
1812 // specified attribute.
1813 template <typename T>
1814 static bool CXXRecordAllNonInlineVirtualsHaveAttr(const CXXRecordDecl *RD) {
1815   bool FoundNonInlineVirtualMethodWithAttr = false;
1816   for (const auto *D : RD->noload_decls()) {
1817     if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
1818       if (!FD->isVirtualAsWritten() || FD->isInlineSpecified() ||
1819           FD->doesThisDeclarationHaveABody())
1820         continue;
1821       if (!D->hasAttr<T>())
1822         return false;
1823       FoundNonInlineVirtualMethodWithAttr = true;
1824     }
1825   }
1826 
1827   // We didn't find any non-inline virtual methods missing the attribute.  We
1828   // will return true when we found at least one non-inline virtual with the
1829   // attribute.  (This lets our caller know that the attribute needs to be
1830   // propagated up to the vtable.)
1831   return FoundNonInlineVirtualMethodWithAttr;
1832 }
1833 
1834 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
1835     CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1836     const CXXRecordDecl *NearestVBase) {
1837   assert((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1838          NeedsVTTParameter(CGF.CurGD) && "This class doesn't have VTT");
1839 
1840   // Get the secondary vpointer index.
1841   uint64_t VirtualPointerIndex =
1842       CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base);
1843 
1844   /// Load the VTT.
1845   llvm::Value *VTT = CGF.LoadCXXVTT();
1846   if (VirtualPointerIndex)
1847     VTT = CGF.Builder.CreateConstInBoundsGEP1_64(
1848         CGF.VoidPtrTy, VTT, VirtualPointerIndex);
1849 
1850   // And load the address point from the VTT.
1851   return CGF.Builder.CreateAlignedLoad(CGF.VoidPtrTy, VTT,
1852                                        CGF.getPointerAlign());
1853 }
1854 
1855 llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr(
1856     BaseSubobject Base, const CXXRecordDecl *VTableClass) {
1857   return getVTableAddressPoint(Base, VTableClass);
1858 }
1859 
1860 llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
1861                                                      CharUnits VPtrOffset) {
1862   assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets");
1863 
1864   llvm::GlobalVariable *&VTable = VTables[RD];
1865   if (VTable)
1866     return VTable;
1867 
1868   // Queue up this vtable for possible deferred emission.
1869   CGM.addDeferredVTable(RD);
1870 
1871   SmallString<256> Name;
1872   llvm::raw_svector_ostream Out(Name);
1873   getMangleContext().mangleCXXVTable(RD, Out);
1874 
1875   const VTableLayout &VTLayout =
1876       CGM.getItaniumVTableContext().getVTableLayout(RD);
1877   llvm::Type *VTableType = CGM.getVTables().getVTableType(VTLayout);
1878 
1879   // Use pointer alignment for the vtable. Otherwise we would align them based
1880   // on the size of the initializer which doesn't make sense as only single
1881   // values are read.
1882   unsigned PAlign = CGM.getItaniumVTableContext().isRelativeLayout()
1883                         ? 32
1884                         : CGM.getTarget().getPointerAlign(0);
1885 
1886   VTable = CGM.CreateOrReplaceCXXRuntimeVariable(
1887       Name, VTableType, llvm::GlobalValue::ExternalLinkage,
1888       getContext().toCharUnitsFromBits(PAlign).getQuantity());
1889   VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1890 
1891   // In MS C++ if you have a class with virtual functions in which you are using
1892   // selective member import/export, then all virtual functions must be exported
1893   // unless they are inline, otherwise a link error will result. To match this
1894   // behavior, for such classes, we dllimport the vtable if it is defined
1895   // externally and all the non-inline virtual methods are marked dllimport, and
1896   // we dllexport the vtable if it is defined in this TU and all the non-inline
1897   // virtual methods are marked dllexport.
1898   if (CGM.getTarget().hasPS4DLLImportExport()) {
1899     if ((!RD->hasAttr<DLLImportAttr>()) && (!RD->hasAttr<DLLExportAttr>())) {
1900       if (CGM.getVTables().isVTableExternal(RD)) {
1901         if (CXXRecordAllNonInlineVirtualsHaveAttr<DLLImportAttr>(RD))
1902           VTable->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
1903       } else {
1904         if (CXXRecordAllNonInlineVirtualsHaveAttr<DLLExportAttr>(RD))
1905           VTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
1906       }
1907     }
1908   }
1909   CGM.setGVProperties(VTable, RD);
1910 
1911   return VTable;
1912 }
1913 
1914 CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
1915                                                   GlobalDecl GD,
1916                                                   Address This,
1917                                                   llvm::Type *Ty,
1918                                                   SourceLocation Loc) {
1919   llvm::Type *TyPtr = Ty->getPointerTo();
1920   auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
1921   llvm::Value *VTable = CGF.GetVTablePtr(
1922       This, TyPtr->getPointerTo(), MethodDecl->getParent());
1923 
1924   uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
1925   llvm::Value *VFunc;
1926   if (CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) {
1927     VFunc = CGF.EmitVTableTypeCheckedLoad(
1928         MethodDecl->getParent(), VTable, TyPtr,
1929         VTableIndex * CGM.getContext().getTargetInfo().getPointerWidth(0) / 8);
1930   } else {
1931     CGF.EmitTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc);
1932 
1933     llvm::Value *VFuncLoad;
1934     if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1935       VTable = CGF.Builder.CreateBitCast(VTable, CGM.Int8PtrTy);
1936       llvm::Value *Load = CGF.Builder.CreateCall(
1937           CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
1938           {VTable, llvm::ConstantInt::get(CGM.Int32Ty, 4 * VTableIndex)});
1939       VFuncLoad = CGF.Builder.CreateBitCast(Load, TyPtr);
1940     } else {
1941       VTable =
1942           CGF.Builder.CreateBitCast(VTable, TyPtr->getPointerTo());
1943       llvm::Value *VTableSlotPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
1944           TyPtr, VTable, VTableIndex, "vfn");
1945       VFuncLoad =
1946           CGF.Builder.CreateAlignedLoad(TyPtr, VTableSlotPtr,
1947                                         CGF.getPointerAlign());
1948     }
1949 
1950     // Add !invariant.load md to virtual function load to indicate that
1951     // function didn't change inside vtable.
1952     // It's safe to add it without -fstrict-vtable-pointers, but it would not
1953     // help in devirtualization because it will only matter if we will have 2
1954     // the same virtual function loads from the same vtable load, which won't
1955     // happen without enabled devirtualization with -fstrict-vtable-pointers.
1956     if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1957         CGM.getCodeGenOpts().StrictVTablePointers) {
1958       if (auto *VFuncLoadInstr = dyn_cast<llvm::Instruction>(VFuncLoad)) {
1959         VFuncLoadInstr->setMetadata(
1960             llvm::LLVMContext::MD_invariant_load,
1961             llvm::MDNode::get(CGM.getLLVMContext(),
1962                               llvm::ArrayRef<llvm::Metadata *>()));
1963       }
1964     }
1965     VFunc = VFuncLoad;
1966   }
1967 
1968   CGCallee Callee(GD, VFunc);
1969   return Callee;
1970 }
1971 
1972 llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
1973     CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
1974     Address This, DeleteOrMemberCallExpr E) {
1975   auto *CE = E.dyn_cast<const CXXMemberCallExpr *>();
1976   auto *D = E.dyn_cast<const CXXDeleteExpr *>();
1977   assert((CE != nullptr) ^ (D != nullptr));
1978   assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
1979   assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
1980 
1981   GlobalDecl GD(Dtor, DtorType);
1982   const CGFunctionInfo *FInfo =
1983       &CGM.getTypes().arrangeCXXStructorDeclaration(GD);
1984   llvm::FunctionType *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
1985   CGCallee Callee = CGCallee::forVirtual(CE, GD, This, Ty);
1986 
1987   QualType ThisTy;
1988   if (CE) {
1989     ThisTy = CE->getObjectType();
1990   } else {
1991     ThisTy = D->getDestroyedType();
1992   }
1993 
1994   CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, nullptr,
1995                             QualType(), nullptr);
1996   return nullptr;
1997 }
1998 
1999 void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
2000   CodeGenVTables &VTables = CGM.getVTables();
2001   llvm::GlobalVariable *VTT = VTables.GetAddrOfVTT(RD);
2002   VTables.EmitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD);
2003 }
2004 
2005 bool ItaniumCXXABI::canSpeculativelyEmitVTableAsBaseClass(
2006     const CXXRecordDecl *RD) const {
2007   // We don't emit available_externally vtables if we are in -fapple-kext mode
2008   // because kext mode does not permit devirtualization.
2009   if (CGM.getLangOpts().AppleKext)
2010     return false;
2011 
2012   // If the vtable is hidden then it is not safe to emit an available_externally
2013   // copy of vtable.
2014   if (isVTableHidden(RD))
2015     return false;
2016 
2017   if (CGM.getCodeGenOpts().ForceEmitVTables)
2018     return true;
2019 
2020   // If we don't have any not emitted inline virtual function then we are safe
2021   // to emit an available_externally copy of vtable.
2022   // FIXME we can still emit a copy of the vtable if we
2023   // can emit definition of the inline functions.
2024   if (hasAnyUnusedVirtualInlineFunction(RD))
2025     return false;
2026 
2027   // For a class with virtual bases, we must also be able to speculatively
2028   // emit the VTT, because CodeGen doesn't have separate notions of "can emit
2029   // the vtable" and "can emit the VTT". For a base subobject, this means we
2030   // need to be able to emit non-virtual base vtables.
2031   if (RD->getNumVBases()) {
2032     for (const auto &B : RD->bases()) {
2033       auto *BRD = B.getType()->getAsCXXRecordDecl();
2034       assert(BRD && "no class for base specifier");
2035       if (B.isVirtual() || !BRD->isDynamicClass())
2036         continue;
2037       if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
2038         return false;
2039     }
2040   }
2041 
2042   return true;
2043 }
2044 
2045 bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
2046   if (!canSpeculativelyEmitVTableAsBaseClass(RD))
2047     return false;
2048 
2049   // For a complete-object vtable (or more specifically, for the VTT), we need
2050   // to be able to speculatively emit the vtables of all dynamic virtual bases.
2051   for (const auto &B : RD->vbases()) {
2052     auto *BRD = B.getType()->getAsCXXRecordDecl();
2053     assert(BRD && "no class for base specifier");
2054     if (!BRD->isDynamicClass())
2055       continue;
2056     if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
2057       return false;
2058   }
2059 
2060   return true;
2061 }
2062 static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
2063                                           Address InitialPtr,
2064                                           int64_t NonVirtualAdjustment,
2065                                           int64_t VirtualAdjustment,
2066                                           bool IsReturnAdjustment) {
2067   if (!NonVirtualAdjustment && !VirtualAdjustment)
2068     return InitialPtr.getPointer();
2069 
2070   Address V = CGF.Builder.CreateElementBitCast(InitialPtr, CGF.Int8Ty);
2071 
2072   // In a base-to-derived cast, the non-virtual adjustment is applied first.
2073   if (NonVirtualAdjustment && !IsReturnAdjustment) {
2074     V = CGF.Builder.CreateConstInBoundsByteGEP(V,
2075                               CharUnits::fromQuantity(NonVirtualAdjustment));
2076   }
2077 
2078   // Perform the virtual adjustment if we have one.
2079   llvm::Value *ResultPtr;
2080   if (VirtualAdjustment) {
2081     Address VTablePtrPtr = CGF.Builder.CreateElementBitCast(V, CGF.Int8PtrTy);
2082     llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
2083 
2084     llvm::Value *Offset;
2085     llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
2086         CGF.Int8Ty, VTablePtr, VirtualAdjustment);
2087     if (CGF.CGM.getItaniumVTableContext().isRelativeLayout()) {
2088       // Load the adjustment offset from the vtable as a 32-bit int.
2089       OffsetPtr =
2090           CGF.Builder.CreateBitCast(OffsetPtr, CGF.Int32Ty->getPointerTo());
2091       Offset =
2092           CGF.Builder.CreateAlignedLoad(CGF.Int32Ty, OffsetPtr,
2093                                         CharUnits::fromQuantity(4));
2094     } else {
2095       llvm::Type *PtrDiffTy =
2096           CGF.ConvertType(CGF.getContext().getPointerDiffType());
2097 
2098       OffsetPtr =
2099           CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo());
2100 
2101       // Load the adjustment offset from the vtable.
2102       Offset = CGF.Builder.CreateAlignedLoad(PtrDiffTy, OffsetPtr,
2103                                              CGF.getPointerAlign());
2104     }
2105     // Adjust our pointer.
2106     ResultPtr = CGF.Builder.CreateInBoundsGEP(
2107         V.getElementType(), V.getPointer(), Offset);
2108   } else {
2109     ResultPtr = V.getPointer();
2110   }
2111 
2112   // In a derived-to-base conversion, the non-virtual adjustment is
2113   // applied second.
2114   if (NonVirtualAdjustment && IsReturnAdjustment) {
2115     ResultPtr = CGF.Builder.CreateConstInBoundsGEP1_64(CGF.Int8Ty, ResultPtr,
2116                                                        NonVirtualAdjustment);
2117   }
2118 
2119   // Cast back to the original type.
2120   return CGF.Builder.CreateBitCast(ResultPtr, InitialPtr.getType());
2121 }
2122 
2123 llvm::Value *ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF,
2124                                                   Address This,
2125                                                   const ThisAdjustment &TA) {
2126   return performTypeAdjustment(CGF, This, TA.NonVirtual,
2127                                TA.Virtual.Itanium.VCallOffsetOffset,
2128                                /*IsReturnAdjustment=*/false);
2129 }
2130 
2131 llvm::Value *
2132 ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
2133                                        const ReturnAdjustment &RA) {
2134   return performTypeAdjustment(CGF, Ret, RA.NonVirtual,
2135                                RA.Virtual.Itanium.VBaseOffsetOffset,
2136                                /*IsReturnAdjustment=*/true);
2137 }
2138 
2139 void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
2140                                     RValue RV, QualType ResultType) {
2141   if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl()))
2142     return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType);
2143 
2144   // Destructor thunks in the ARM ABI have indeterminate results.
2145   llvm::Type *T = CGF.ReturnValue.getElementType();
2146   RValue Undef = RValue::get(llvm::UndefValue::get(T));
2147   return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType);
2148 }
2149 
2150 /************************** Array allocation cookies **************************/
2151 
2152 CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2153   // The array cookie is a size_t; pad that up to the element alignment.
2154   // The cookie is actually right-justified in that space.
2155   return std::max(CharUnits::fromQuantity(CGM.SizeSizeInBytes),
2156                   CGM.getContext().getPreferredTypeAlignInChars(elementType));
2157 }
2158 
2159 Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2160                                              Address NewPtr,
2161                                              llvm::Value *NumElements,
2162                                              const CXXNewExpr *expr,
2163                                              QualType ElementType) {
2164   assert(requiresArrayCookie(expr));
2165 
2166   unsigned AS = NewPtr.getAddressSpace();
2167 
2168   ASTContext &Ctx = getContext();
2169   CharUnits SizeSize = CGF.getSizeSize();
2170 
2171   // The size of the cookie.
2172   CharUnits CookieSize =
2173       std::max(SizeSize, Ctx.getPreferredTypeAlignInChars(ElementType));
2174   assert(CookieSize == getArrayCookieSizeImpl(ElementType));
2175 
2176   // Compute an offset to the cookie.
2177   Address CookiePtr = NewPtr;
2178   CharUnits CookieOffset = CookieSize - SizeSize;
2179   if (!CookieOffset.isZero())
2180     CookiePtr = CGF.Builder.CreateConstInBoundsByteGEP(CookiePtr, CookieOffset);
2181 
2182   // Write the number of elements into the appropriate slot.
2183   Address NumElementsPtr =
2184       CGF.Builder.CreateElementBitCast(CookiePtr, CGF.SizeTy);
2185   llvm::Instruction *SI = CGF.Builder.CreateStore(NumElements, NumElementsPtr);
2186 
2187   // Handle the array cookie specially in ASan.
2188   if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) && AS == 0 &&
2189       (expr->getOperatorNew()->isReplaceableGlobalAllocationFunction() ||
2190        CGM.getCodeGenOpts().SanitizeAddressPoisonCustomArrayCookie)) {
2191     // The store to the CookiePtr does not need to be instrumented.
2192     CGM.getSanitizerMetadata()->disableSanitizerForInstruction(SI);
2193     llvm::FunctionType *FTy =
2194         llvm::FunctionType::get(CGM.VoidTy, NumElementsPtr.getType(), false);
2195     llvm::FunctionCallee F =
2196         CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie");
2197     CGF.Builder.CreateCall(F, NumElementsPtr.getPointer());
2198   }
2199 
2200   // Finally, compute a pointer to the actual data buffer by skipping
2201   // over the cookie completely.
2202   return CGF.Builder.CreateConstInBoundsByteGEP(NewPtr, CookieSize);
2203 }
2204 
2205 llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2206                                                 Address allocPtr,
2207                                                 CharUnits cookieSize) {
2208   // The element size is right-justified in the cookie.
2209   Address numElementsPtr = allocPtr;
2210   CharUnits numElementsOffset = cookieSize - CGF.getSizeSize();
2211   if (!numElementsOffset.isZero())
2212     numElementsPtr =
2213       CGF.Builder.CreateConstInBoundsByteGEP(numElementsPtr, numElementsOffset);
2214 
2215   unsigned AS = allocPtr.getAddressSpace();
2216   numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
2217   if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) || AS != 0)
2218     return CGF.Builder.CreateLoad(numElementsPtr);
2219   // In asan mode emit a function call instead of a regular load and let the
2220   // run-time deal with it: if the shadow is properly poisoned return the
2221   // cookie, otherwise return 0 to avoid an infinite loop calling DTORs.
2222   // We can't simply ignore this load using nosanitize metadata because
2223   // the metadata may be lost.
2224   llvm::FunctionType *FTy =
2225       llvm::FunctionType::get(CGF.SizeTy, CGF.SizeTy->getPointerTo(0), false);
2226   llvm::FunctionCallee F =
2227       CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie");
2228   return CGF.Builder.CreateCall(F, numElementsPtr.getPointer());
2229 }
2230 
2231 CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2232   // ARM says that the cookie is always:
2233   //   struct array_cookie {
2234   //     std::size_t element_size; // element_size != 0
2235   //     std::size_t element_count;
2236   //   };
2237   // But the base ABI doesn't give anything an alignment greater than
2238   // 8, so we can dismiss this as typical ABI-author blindness to
2239   // actual language complexity and round up to the element alignment.
2240   return std::max(CharUnits::fromQuantity(2 * CGM.SizeSizeInBytes),
2241                   CGM.getContext().getTypeAlignInChars(elementType));
2242 }
2243 
2244 Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2245                                          Address newPtr,
2246                                          llvm::Value *numElements,
2247                                          const CXXNewExpr *expr,
2248                                          QualType elementType) {
2249   assert(requiresArrayCookie(expr));
2250 
2251   // The cookie is always at the start of the buffer.
2252   Address cookie = newPtr;
2253 
2254   // The first element is the element size.
2255   cookie = CGF.Builder.CreateElementBitCast(cookie, CGF.SizeTy);
2256   llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy,
2257                  getContext().getTypeSizeInChars(elementType).getQuantity());
2258   CGF.Builder.CreateStore(elementSize, cookie);
2259 
2260   // The second element is the element count.
2261   cookie = CGF.Builder.CreateConstInBoundsGEP(cookie, 1);
2262   CGF.Builder.CreateStore(numElements, cookie);
2263 
2264   // Finally, compute a pointer to the actual data buffer by skipping
2265   // over the cookie completely.
2266   CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType);
2267   return CGF.Builder.CreateConstInBoundsByteGEP(newPtr, cookieSize);
2268 }
2269 
2270 llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2271                                             Address allocPtr,
2272                                             CharUnits cookieSize) {
2273   // The number of elements is at offset sizeof(size_t) relative to
2274   // the allocated pointer.
2275   Address numElementsPtr
2276     = CGF.Builder.CreateConstInBoundsByteGEP(allocPtr, CGF.getSizeSize());
2277 
2278   numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
2279   return CGF.Builder.CreateLoad(numElementsPtr);
2280 }
2281 
2282 /*********************** Static local initialization **************************/
2283 
2284 static llvm::FunctionCallee getGuardAcquireFn(CodeGenModule &CGM,
2285                                               llvm::PointerType *GuardPtrTy) {
2286   // int __cxa_guard_acquire(__guard *guard_object);
2287   llvm::FunctionType *FTy =
2288     llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
2289                             GuardPtrTy, /*isVarArg=*/false);
2290   return CGM.CreateRuntimeFunction(
2291       FTy, "__cxa_guard_acquire",
2292       llvm::AttributeList::get(CGM.getLLVMContext(),
2293                                llvm::AttributeList::FunctionIndex,
2294                                llvm::Attribute::NoUnwind));
2295 }
2296 
2297 static llvm::FunctionCallee getGuardReleaseFn(CodeGenModule &CGM,
2298                                               llvm::PointerType *GuardPtrTy) {
2299   // void __cxa_guard_release(__guard *guard_object);
2300   llvm::FunctionType *FTy =
2301     llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2302   return CGM.CreateRuntimeFunction(
2303       FTy, "__cxa_guard_release",
2304       llvm::AttributeList::get(CGM.getLLVMContext(),
2305                                llvm::AttributeList::FunctionIndex,
2306                                llvm::Attribute::NoUnwind));
2307 }
2308 
2309 static llvm::FunctionCallee getGuardAbortFn(CodeGenModule &CGM,
2310                                             llvm::PointerType *GuardPtrTy) {
2311   // void __cxa_guard_abort(__guard *guard_object);
2312   llvm::FunctionType *FTy =
2313     llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2314   return CGM.CreateRuntimeFunction(
2315       FTy, "__cxa_guard_abort",
2316       llvm::AttributeList::get(CGM.getLLVMContext(),
2317                                llvm::AttributeList::FunctionIndex,
2318                                llvm::Attribute::NoUnwind));
2319 }
2320 
2321 namespace {
2322   struct CallGuardAbort final : EHScopeStack::Cleanup {
2323     llvm::GlobalVariable *Guard;
2324     CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
2325 
2326     void Emit(CodeGenFunction &CGF, Flags flags) override {
2327       CGF.EmitNounwindRuntimeCall(getGuardAbortFn(CGF.CGM, Guard->getType()),
2328                                   Guard);
2329     }
2330   };
2331 }
2332 
2333 /// The ARM code here follows the Itanium code closely enough that we
2334 /// just special-case it at particular places.
2335 void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
2336                                     const VarDecl &D,
2337                                     llvm::GlobalVariable *var,
2338                                     bool shouldPerformInit) {
2339   CGBuilderTy &Builder = CGF.Builder;
2340 
2341   // Inline variables that weren't instantiated from variable templates have
2342   // partially-ordered initialization within their translation unit.
2343   bool NonTemplateInline =
2344       D.isInline() &&
2345       !isTemplateInstantiation(D.getTemplateSpecializationKind());
2346 
2347   // We only need to use thread-safe statics for local non-TLS variables and
2348   // inline variables; other global initialization is always single-threaded
2349   // or (through lazy dynamic loading in multiple threads) unsequenced.
2350   bool threadsafe = getContext().getLangOpts().ThreadsafeStatics &&
2351                     (D.isLocalVarDecl() || NonTemplateInline) &&
2352                     !D.getTLSKind();
2353 
2354   // If we have a global variable with internal linkage and thread-safe statics
2355   // are disabled, we can just let the guard variable be of type i8.
2356   bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage();
2357 
2358   llvm::IntegerType *guardTy;
2359   CharUnits guardAlignment;
2360   if (useInt8GuardVariable) {
2361     guardTy = CGF.Int8Ty;
2362     guardAlignment = CharUnits::One();
2363   } else {
2364     // Guard variables are 64 bits in the generic ABI and size width on ARM
2365     // (i.e. 32-bit on AArch32, 64-bit on AArch64).
2366     if (UseARMGuardVarABI) {
2367       guardTy = CGF.SizeTy;
2368       guardAlignment = CGF.getSizeAlign();
2369     } else {
2370       guardTy = CGF.Int64Ty;
2371       guardAlignment = CharUnits::fromQuantity(
2372                              CGM.getDataLayout().getABITypeAlignment(guardTy));
2373     }
2374   }
2375   llvm::PointerType *guardPtrTy = guardTy->getPointerTo(
2376       CGF.CGM.getDataLayout().getDefaultGlobalsAddressSpace());
2377 
2378   // Create the guard variable if we don't already have it (as we
2379   // might if we're double-emitting this function body).
2380   llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D);
2381   if (!guard) {
2382     // Mangle the name for the guard.
2383     SmallString<256> guardName;
2384     {
2385       llvm::raw_svector_ostream out(guardName);
2386       getMangleContext().mangleStaticGuardVariable(&D, out);
2387     }
2388 
2389     // Create the guard variable with a zero-initializer.
2390     // Just absorb linkage and visibility from the guarded variable.
2391     guard = new llvm::GlobalVariable(CGM.getModule(), guardTy,
2392                                      false, var->getLinkage(),
2393                                      llvm::ConstantInt::get(guardTy, 0),
2394                                      guardName.str());
2395     guard->setDSOLocal(var->isDSOLocal());
2396     guard->setVisibility(var->getVisibility());
2397     // If the variable is thread-local, so is its guard variable.
2398     guard->setThreadLocalMode(var->getThreadLocalMode());
2399     guard->setAlignment(guardAlignment.getAsAlign());
2400 
2401     // The ABI says: "It is suggested that it be emitted in the same COMDAT
2402     // group as the associated data object." In practice, this doesn't work for
2403     // non-ELF and non-Wasm object formats, so only do it for ELF and Wasm.
2404     llvm::Comdat *C = var->getComdat();
2405     if (!D.isLocalVarDecl() && C &&
2406         (CGM.getTarget().getTriple().isOSBinFormatELF() ||
2407          CGM.getTarget().getTriple().isOSBinFormatWasm())) {
2408       guard->setComdat(C);
2409     } else if (CGM.supportsCOMDAT() && guard->isWeakForLinker()) {
2410       guard->setComdat(CGM.getModule().getOrInsertComdat(guard->getName()));
2411     }
2412 
2413     CGM.setStaticLocalDeclGuardAddress(&D, guard);
2414   }
2415 
2416   Address guardAddr = Address(guard, guard->getValueType(), guardAlignment);
2417 
2418   // Test whether the variable has completed initialization.
2419   //
2420   // Itanium C++ ABI 3.3.2:
2421   //   The following is pseudo-code showing how these functions can be used:
2422   //     if (obj_guard.first_byte == 0) {
2423   //       if ( __cxa_guard_acquire (&obj_guard) ) {
2424   //         try {
2425   //           ... initialize the object ...;
2426   //         } catch (...) {
2427   //            __cxa_guard_abort (&obj_guard);
2428   //            throw;
2429   //         }
2430   //         ... queue object destructor with __cxa_atexit() ...;
2431   //         __cxa_guard_release (&obj_guard);
2432   //       }
2433   //     }
2434 
2435   // Load the first byte of the guard variable.
2436   llvm::LoadInst *LI =
2437       Builder.CreateLoad(Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
2438 
2439   // Itanium ABI:
2440   //   An implementation supporting thread-safety on multiprocessor
2441   //   systems must also guarantee that references to the initialized
2442   //   object do not occur before the load of the initialization flag.
2443   //
2444   // In LLVM, we do this by marking the load Acquire.
2445   if (threadsafe)
2446     LI->setAtomic(llvm::AtomicOrdering::Acquire);
2447 
2448   // For ARM, we should only check the first bit, rather than the entire byte:
2449   //
2450   // ARM C++ ABI 3.2.3.1:
2451   //   To support the potential use of initialization guard variables
2452   //   as semaphores that are the target of ARM SWP and LDREX/STREX
2453   //   synchronizing instructions we define a static initialization
2454   //   guard variable to be a 4-byte aligned, 4-byte word with the
2455   //   following inline access protocol.
2456   //     #define INITIALIZED 1
2457   //     if ((obj_guard & INITIALIZED) != INITIALIZED) {
2458   //       if (__cxa_guard_acquire(&obj_guard))
2459   //         ...
2460   //     }
2461   //
2462   // and similarly for ARM64:
2463   //
2464   // ARM64 C++ ABI 3.2.2:
2465   //   This ABI instead only specifies the value bit 0 of the static guard
2466   //   variable; all other bits are platform defined. Bit 0 shall be 0 when the
2467   //   variable is not initialized and 1 when it is.
2468   llvm::Value *V =
2469       (UseARMGuardVarABI && !useInt8GuardVariable)
2470           ? Builder.CreateAnd(LI, llvm::ConstantInt::get(CGM.Int8Ty, 1))
2471           : LI;
2472   llvm::Value *NeedsInit = Builder.CreateIsNull(V, "guard.uninitialized");
2473 
2474   llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check");
2475   llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
2476 
2477   // Check if the first byte of the guard variable is zero.
2478   CGF.EmitCXXGuardedInitBranch(NeedsInit, InitCheckBlock, EndBlock,
2479                                CodeGenFunction::GuardKind::VariableGuard, &D);
2480 
2481   CGF.EmitBlock(InitCheckBlock);
2482 
2483   // Variables used when coping with thread-safe statics and exceptions.
2484   if (threadsafe) {
2485     // Call __cxa_guard_acquire.
2486     llvm::Value *V
2487       = CGF.EmitNounwindRuntimeCall(getGuardAcquireFn(CGM, guardPtrTy), guard);
2488 
2489     llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
2490 
2491     Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
2492                          InitBlock, EndBlock);
2493 
2494     // Call __cxa_guard_abort along the exceptional edge.
2495     CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard);
2496 
2497     CGF.EmitBlock(InitBlock);
2498   }
2499 
2500   // Emit the initializer and add a global destructor if appropriate.
2501   CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit);
2502 
2503   if (threadsafe) {
2504     // Pop the guard-abort cleanup if we pushed one.
2505     CGF.PopCleanupBlock();
2506 
2507     // Call __cxa_guard_release.  This cannot throw.
2508     CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy),
2509                                 guardAddr.getPointer());
2510   } else {
2511     // Store 1 into the first byte of the guard variable after initialization is
2512     // complete.
2513     Builder.CreateStore(llvm::ConstantInt::get(CGM.Int8Ty, 1),
2514                         Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
2515   }
2516 
2517   CGF.EmitBlock(EndBlock);
2518 }
2519 
2520 /// Register a global destructor using __cxa_atexit.
2521 static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
2522                                         llvm::FunctionCallee dtor,
2523                                         llvm::Constant *addr, bool TLS) {
2524   assert(!CGF.getTarget().getTriple().isOSAIX() &&
2525          "unexpected call to emitGlobalDtorWithCXAAtExit");
2526   assert((TLS || CGF.getTypes().getCodeGenOpts().CXAAtExit) &&
2527          "__cxa_atexit is disabled");
2528   const char *Name = "__cxa_atexit";
2529   if (TLS) {
2530     const llvm::Triple &T = CGF.getTarget().getTriple();
2531     Name = T.isOSDarwin() ?  "_tlv_atexit" : "__cxa_thread_atexit";
2532   }
2533 
2534   // We're assuming that the destructor function is something we can
2535   // reasonably call with the default CC.  Go ahead and cast it to the
2536   // right prototype.
2537   llvm::Type *dtorTy =
2538     llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, false)->getPointerTo();
2539 
2540   // Preserve address space of addr.
2541   auto AddrAS = addr ? addr->getType()->getPointerAddressSpace() : 0;
2542   auto AddrInt8PtrTy =
2543       AddrAS ? CGF.Int8Ty->getPointerTo(AddrAS) : CGF.Int8PtrTy;
2544 
2545   // Create a variable that binds the atexit to this shared object.
2546   llvm::Constant *handle =
2547       CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle");
2548   auto *GV = cast<llvm::GlobalValue>(handle->stripPointerCasts());
2549   GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
2550 
2551   // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
2552   llvm::Type *paramTys[] = {dtorTy, AddrInt8PtrTy, handle->getType()};
2553   llvm::FunctionType *atexitTy =
2554     llvm::FunctionType::get(CGF.IntTy, paramTys, false);
2555 
2556   // Fetch the actual function.
2557   llvm::FunctionCallee atexit = CGF.CGM.CreateRuntimeFunction(atexitTy, Name);
2558   if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit.getCallee()))
2559     fn->setDoesNotThrow();
2560 
2561   if (!addr)
2562     // addr is null when we are trying to register a dtor annotated with
2563     // __attribute__((destructor)) in a constructor function. Using null here is
2564     // okay because this argument is just passed back to the destructor
2565     // function.
2566     addr = llvm::Constant::getNullValue(CGF.Int8PtrTy);
2567 
2568   llvm::Value *args[] = {llvm::ConstantExpr::getBitCast(
2569                              cast<llvm::Constant>(dtor.getCallee()), dtorTy),
2570                          llvm::ConstantExpr::getBitCast(addr, AddrInt8PtrTy),
2571                          handle};
2572   CGF.EmitNounwindRuntimeCall(atexit, args);
2573 }
2574 
2575 static llvm::Function *createGlobalInitOrCleanupFn(CodeGen::CodeGenModule &CGM,
2576                                                    StringRef FnName) {
2577   // Create a function that registers/unregisters destructors that have the same
2578   // priority.
2579   llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false);
2580   llvm::Function *GlobalInitOrCleanupFn = CGM.CreateGlobalInitOrCleanUpFunction(
2581       FTy, FnName, CGM.getTypes().arrangeNullaryFunction(), SourceLocation());
2582 
2583   return GlobalInitOrCleanupFn;
2584 }
2585 
2586 void CodeGenModule::unregisterGlobalDtorsWithUnAtExit() {
2587   for (const auto &I : DtorsUsingAtExit) {
2588     int Priority = I.first;
2589     std::string GlobalCleanupFnName =
2590         std::string("__GLOBAL_cleanup_") + llvm::to_string(Priority);
2591 
2592     llvm::Function *GlobalCleanupFn =
2593         createGlobalInitOrCleanupFn(*this, GlobalCleanupFnName);
2594 
2595     CodeGenFunction CGF(*this);
2596     CGF.StartFunction(GlobalDecl(), getContext().VoidTy, GlobalCleanupFn,
2597                       getTypes().arrangeNullaryFunction(), FunctionArgList(),
2598                       SourceLocation(), SourceLocation());
2599     auto AL = ApplyDebugLocation::CreateArtificial(CGF);
2600 
2601     // Get the destructor function type, void(*)(void).
2602     llvm::FunctionType *dtorFuncTy = llvm::FunctionType::get(CGF.VoidTy, false);
2603     llvm::Type *dtorTy = dtorFuncTy->getPointerTo();
2604 
2605     // Destructor functions are run/unregistered in non-ascending
2606     // order of their priorities.
2607     const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
2608     auto itv = Dtors.rbegin();
2609     while (itv != Dtors.rend()) {
2610       llvm::Function *Dtor = *itv;
2611 
2612       // We're assuming that the destructor function is something we can
2613       // reasonably call with the correct CC.  Go ahead and cast it to the
2614       // right prototype.
2615       llvm::Constant *dtor = llvm::ConstantExpr::getBitCast(Dtor, dtorTy);
2616       llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtor);
2617       llvm::Value *NeedsDestruct =
2618           CGF.Builder.CreateIsNull(V, "needs_destruct");
2619 
2620       llvm::BasicBlock *DestructCallBlock =
2621           CGF.createBasicBlock("destruct.call");
2622       llvm::BasicBlock *EndBlock = CGF.createBasicBlock(
2623           (itv + 1) != Dtors.rend() ? "unatexit.call" : "destruct.end");
2624       // Check if unatexit returns a value of 0. If it does, jump to
2625       // DestructCallBlock, otherwise jump to EndBlock directly.
2626       CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock);
2627 
2628       CGF.EmitBlock(DestructCallBlock);
2629 
2630       // Emit the call to casted Dtor.
2631       llvm::CallInst *CI = CGF.Builder.CreateCall(dtorFuncTy, dtor);
2632       // Make sure the call and the callee agree on calling convention.
2633       CI->setCallingConv(Dtor->getCallingConv());
2634 
2635       CGF.EmitBlock(EndBlock);
2636 
2637       itv++;
2638     }
2639 
2640     CGF.FinishFunction();
2641     AddGlobalDtor(GlobalCleanupFn, Priority);
2642   }
2643 }
2644 
2645 void CodeGenModule::registerGlobalDtorsWithAtExit() {
2646   for (const auto &I : DtorsUsingAtExit) {
2647     int Priority = I.first;
2648     std::string GlobalInitFnName =
2649         std::string("__GLOBAL_init_") + llvm::to_string(Priority);
2650     llvm::Function *GlobalInitFn =
2651         createGlobalInitOrCleanupFn(*this, GlobalInitFnName);
2652 
2653     CodeGenFunction CGF(*this);
2654     CGF.StartFunction(GlobalDecl(), getContext().VoidTy, GlobalInitFn,
2655                       getTypes().arrangeNullaryFunction(), FunctionArgList(),
2656                       SourceLocation(), SourceLocation());
2657     auto AL = ApplyDebugLocation::CreateArtificial(CGF);
2658 
2659     // Since constructor functions are run in non-descending order of their
2660     // priorities, destructors are registered in non-descending order of their
2661     // priorities, and since destructor functions are run in the reverse order
2662     // of their registration, destructor functions are run in non-ascending
2663     // order of their priorities.
2664     const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
2665     for (auto *Dtor : Dtors) {
2666       // Register the destructor function calling __cxa_atexit if it is
2667       // available. Otherwise fall back on calling atexit.
2668       if (getCodeGenOpts().CXAAtExit) {
2669         emitGlobalDtorWithCXAAtExit(CGF, Dtor, nullptr, false);
2670       } else {
2671         // Get the destructor function type, void(*)(void).
2672         llvm::Type *dtorTy =
2673             llvm::FunctionType::get(CGF.VoidTy, false)->getPointerTo();
2674 
2675         // We're assuming that the destructor function is something we can
2676         // reasonably call with the correct CC.  Go ahead and cast it to the
2677         // right prototype.
2678         CGF.registerGlobalDtorWithAtExit(
2679             llvm::ConstantExpr::getBitCast(Dtor, dtorTy));
2680       }
2681     }
2682 
2683     CGF.FinishFunction();
2684     AddGlobalCtor(GlobalInitFn, Priority, nullptr);
2685   }
2686 
2687   if (getCXXABI().useSinitAndSterm())
2688     unregisterGlobalDtorsWithUnAtExit();
2689 }
2690 
2691 /// Register a global destructor as best as we know how.
2692 void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
2693                                        llvm::FunctionCallee dtor,
2694                                        llvm::Constant *addr) {
2695   if (D.isNoDestroy(CGM.getContext()))
2696     return;
2697 
2698   // emitGlobalDtorWithCXAAtExit will emit a call to either __cxa_thread_atexit
2699   // or __cxa_atexit depending on whether this VarDecl is a thread-local storage
2700   // or not. CXAAtExit controls only __cxa_atexit, so use it if it is enabled.
2701   // We can always use __cxa_thread_atexit.
2702   if (CGM.getCodeGenOpts().CXAAtExit || D.getTLSKind())
2703     return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, D.getTLSKind());
2704 
2705   // In Apple kexts, we want to add a global destructor entry.
2706   // FIXME: shouldn't this be guarded by some variable?
2707   if (CGM.getLangOpts().AppleKext) {
2708     // Generate a global destructor entry.
2709     return CGM.AddCXXDtorEntry(dtor, addr);
2710   }
2711 
2712   CGF.registerGlobalDtorWithAtExit(D, dtor, addr);
2713 }
2714 
2715 static bool isThreadWrapperReplaceable(const VarDecl *VD,
2716                                        CodeGen::CodeGenModule &CGM) {
2717   assert(!VD->isStaticLocal() && "static local VarDecls don't need wrappers!");
2718   // Darwin prefers to have references to thread local variables to go through
2719   // the thread wrapper instead of directly referencing the backing variable.
2720   return VD->getTLSKind() == VarDecl::TLS_Dynamic &&
2721          CGM.getTarget().getTriple().isOSDarwin();
2722 }
2723 
2724 /// Get the appropriate linkage for the wrapper function. This is essentially
2725 /// the weak form of the variable's linkage; every translation unit which needs
2726 /// the wrapper emits a copy, and we want the linker to merge them.
2727 static llvm::GlobalValue::LinkageTypes
2728 getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM) {
2729   llvm::GlobalValue::LinkageTypes VarLinkage =
2730       CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false);
2731 
2732   // For internal linkage variables, we don't need an external or weak wrapper.
2733   if (llvm::GlobalValue::isLocalLinkage(VarLinkage))
2734     return VarLinkage;
2735 
2736   // If the thread wrapper is replaceable, give it appropriate linkage.
2737   if (isThreadWrapperReplaceable(VD, CGM))
2738     if (!llvm::GlobalVariable::isLinkOnceLinkage(VarLinkage) &&
2739         !llvm::GlobalVariable::isWeakODRLinkage(VarLinkage))
2740       return VarLinkage;
2741   return llvm::GlobalValue::WeakODRLinkage;
2742 }
2743 
2744 llvm::Function *
2745 ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD,
2746                                              llvm::Value *Val) {
2747   // Mangle the name for the thread_local wrapper function.
2748   SmallString<256> WrapperName;
2749   {
2750     llvm::raw_svector_ostream Out(WrapperName);
2751     getMangleContext().mangleItaniumThreadLocalWrapper(VD, Out);
2752   }
2753 
2754   // FIXME: If VD is a definition, we should regenerate the function attributes
2755   // before returning.
2756   if (llvm::Value *V = CGM.getModule().getNamedValue(WrapperName))
2757     return cast<llvm::Function>(V);
2758 
2759   QualType RetQT = VD->getType();
2760   if (RetQT->isReferenceType())
2761     RetQT = RetQT.getNonReferenceType();
2762 
2763   const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
2764       getContext().getPointerType(RetQT), FunctionArgList());
2765 
2766   llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FI);
2767   llvm::Function *Wrapper =
2768       llvm::Function::Create(FnTy, getThreadLocalWrapperLinkage(VD, CGM),
2769                              WrapperName.str(), &CGM.getModule());
2770 
2771   if (CGM.supportsCOMDAT() && Wrapper->isWeakForLinker())
2772     Wrapper->setComdat(CGM.getModule().getOrInsertComdat(Wrapper->getName()));
2773 
2774   CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Wrapper, /*IsThunk=*/false);
2775 
2776   // Always resolve references to the wrapper at link time.
2777   if (!Wrapper->hasLocalLinkage())
2778     if (!isThreadWrapperReplaceable(VD, CGM) ||
2779         llvm::GlobalVariable::isLinkOnceLinkage(Wrapper->getLinkage()) ||
2780         llvm::GlobalVariable::isWeakODRLinkage(Wrapper->getLinkage()) ||
2781         VD->getVisibility() == HiddenVisibility)
2782       Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility);
2783 
2784   if (isThreadWrapperReplaceable(VD, CGM)) {
2785     Wrapper->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2786     Wrapper->addFnAttr(llvm::Attribute::NoUnwind);
2787   }
2788 
2789   ThreadWrappers.push_back({VD, Wrapper});
2790   return Wrapper;
2791 }
2792 
2793 void ItaniumCXXABI::EmitThreadLocalInitFuncs(
2794     CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
2795     ArrayRef<llvm::Function *> CXXThreadLocalInits,
2796     ArrayRef<const VarDecl *> CXXThreadLocalInitVars) {
2797   llvm::Function *InitFunc = nullptr;
2798 
2799   // Separate initializers into those with ordered (or partially-ordered)
2800   // initialization and those with unordered initialization.
2801   llvm::SmallVector<llvm::Function *, 8> OrderedInits;
2802   llvm::SmallDenseMap<const VarDecl *, llvm::Function *> UnorderedInits;
2803   for (unsigned I = 0; I != CXXThreadLocalInits.size(); ++I) {
2804     if (isTemplateInstantiation(
2805             CXXThreadLocalInitVars[I]->getTemplateSpecializationKind()))
2806       UnorderedInits[CXXThreadLocalInitVars[I]->getCanonicalDecl()] =
2807           CXXThreadLocalInits[I];
2808     else
2809       OrderedInits.push_back(CXXThreadLocalInits[I]);
2810   }
2811 
2812   if (!OrderedInits.empty()) {
2813     // Generate a guarded initialization function.
2814     llvm::FunctionType *FTy =
2815         llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
2816     const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2817     InitFunc = CGM.CreateGlobalInitOrCleanUpFunction(FTy, "__tls_init", FI,
2818                                                      SourceLocation(),
2819                                                      /*TLS=*/true);
2820     llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
2821         CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
2822         llvm::GlobalVariable::InternalLinkage,
2823         llvm::ConstantInt::get(CGM.Int8Ty, 0), "__tls_guard");
2824     Guard->setThreadLocal(true);
2825     Guard->setThreadLocalMode(CGM.GetDefaultLLVMTLSModel());
2826 
2827     CharUnits GuardAlign = CharUnits::One();
2828     Guard->setAlignment(GuardAlign.getAsAlign());
2829 
2830     CodeGenFunction(CGM).GenerateCXXGlobalInitFunc(
2831         InitFunc, OrderedInits, ConstantAddress(Guard, CGM.Int8Ty, GuardAlign));
2832     // On Darwin platforms, use CXX_FAST_TLS calling convention.
2833     if (CGM.getTarget().getTriple().isOSDarwin()) {
2834       InitFunc->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2835       InitFunc->addFnAttr(llvm::Attribute::NoUnwind);
2836     }
2837   }
2838 
2839   // Create declarations for thread wrappers for all thread-local variables
2840   // with non-discardable definitions in this translation unit.
2841   for (const VarDecl *VD : CXXThreadLocals) {
2842     if (VD->hasDefinition() &&
2843         !isDiscardableGVALinkage(getContext().GetGVALinkageForVariable(VD))) {
2844       llvm::GlobalValue *GV = CGM.GetGlobalValue(CGM.getMangledName(VD));
2845       getOrCreateThreadLocalWrapper(VD, GV);
2846     }
2847   }
2848 
2849   // Emit all referenced thread wrappers.
2850   for (auto VDAndWrapper : ThreadWrappers) {
2851     const VarDecl *VD = VDAndWrapper.first;
2852     llvm::GlobalVariable *Var =
2853         cast<llvm::GlobalVariable>(CGM.GetGlobalValue(CGM.getMangledName(VD)));
2854     llvm::Function *Wrapper = VDAndWrapper.second;
2855 
2856     // Some targets require that all access to thread local variables go through
2857     // the thread wrapper.  This means that we cannot attempt to create a thread
2858     // wrapper or a thread helper.
2859     if (!VD->hasDefinition()) {
2860       if (isThreadWrapperReplaceable(VD, CGM)) {
2861         Wrapper->setLinkage(llvm::Function::ExternalLinkage);
2862         continue;
2863       }
2864 
2865       // If this isn't a TU in which this variable is defined, the thread
2866       // wrapper is discardable.
2867       if (Wrapper->getLinkage() == llvm::Function::WeakODRLinkage)
2868         Wrapper->setLinkage(llvm::Function::LinkOnceODRLinkage);
2869     }
2870 
2871     CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Wrapper);
2872 
2873     // Mangle the name for the thread_local initialization function.
2874     SmallString<256> InitFnName;
2875     {
2876       llvm::raw_svector_ostream Out(InitFnName);
2877       getMangleContext().mangleItaniumThreadLocalInit(VD, Out);
2878     }
2879 
2880     llvm::FunctionType *InitFnTy = llvm::FunctionType::get(CGM.VoidTy, false);
2881 
2882     // If we have a definition for the variable, emit the initialization
2883     // function as an alias to the global Init function (if any). Otherwise,
2884     // produce a declaration of the initialization function.
2885     llvm::GlobalValue *Init = nullptr;
2886     bool InitIsInitFunc = false;
2887     bool HasConstantInitialization = false;
2888     if (!usesThreadWrapperFunction(VD)) {
2889       HasConstantInitialization = true;
2890     } else if (VD->hasDefinition()) {
2891       InitIsInitFunc = true;
2892       llvm::Function *InitFuncToUse = InitFunc;
2893       if (isTemplateInstantiation(VD->getTemplateSpecializationKind()))
2894         InitFuncToUse = UnorderedInits.lookup(VD->getCanonicalDecl());
2895       if (InitFuncToUse)
2896         Init = llvm::GlobalAlias::create(Var->getLinkage(), InitFnName.str(),
2897                                          InitFuncToUse);
2898     } else {
2899       // Emit a weak global function referring to the initialization function.
2900       // This function will not exist if the TU defining the thread_local
2901       // variable in question does not need any dynamic initialization for
2902       // its thread_local variables.
2903       Init = llvm::Function::Create(InitFnTy,
2904                                     llvm::GlobalVariable::ExternalWeakLinkage,
2905                                     InitFnName.str(), &CGM.getModule());
2906       const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2907       CGM.SetLLVMFunctionAttributes(
2908           GlobalDecl(), FI, cast<llvm::Function>(Init), /*IsThunk=*/false);
2909     }
2910 
2911     if (Init) {
2912       Init->setVisibility(Var->getVisibility());
2913       // Don't mark an extern_weak function DSO local on windows.
2914       if (!CGM.getTriple().isOSWindows() || !Init->hasExternalWeakLinkage())
2915         Init->setDSOLocal(Var->isDSOLocal());
2916     }
2917 
2918     llvm::LLVMContext &Context = CGM.getModule().getContext();
2919 
2920     // The linker on AIX is not happy with missing weak symbols.  However,
2921     // other TUs will not know whether the initialization routine exists
2922     // so create an empty, init function to satisfy the linker.
2923     // This is needed whenever a thread wrapper function is not used, and
2924     // also when the symbol is weak.
2925     if (CGM.getTriple().isOSAIX() && VD->hasDefinition() &&
2926         isEmittedWithConstantInitializer(VD, true) &&
2927         !mayNeedDestruction(VD)) {
2928       // Init should be null.  If it were non-null, then the logic above would
2929       // either be defining the function to be an alias or declaring the
2930       // function with the expectation that the definition of the variable
2931       // is elsewhere.
2932       assert(Init == nullptr && "Expected Init to be null.");
2933 
2934       llvm::Function *Func = llvm::Function::Create(
2935           InitFnTy, Var->getLinkage(), InitFnName.str(), &CGM.getModule());
2936       const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2937       CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI,
2938                                     cast<llvm::Function>(Func),
2939                                     /*IsThunk=*/false);
2940       // Create a function body that just returns
2941       llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Func);
2942       CGBuilderTy Builder(CGM, Entry);
2943       Builder.CreateRetVoid();
2944     }
2945 
2946     llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Wrapper);
2947     CGBuilderTy Builder(CGM, Entry);
2948     if (HasConstantInitialization) {
2949       // No dynamic initialization to invoke.
2950     } else if (InitIsInitFunc) {
2951       if (Init) {
2952         llvm::CallInst *CallVal = Builder.CreateCall(InitFnTy, Init);
2953         if (isThreadWrapperReplaceable(VD, CGM)) {
2954           CallVal->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2955           llvm::Function *Fn =
2956               cast<llvm::Function>(cast<llvm::GlobalAlias>(Init)->getAliasee());
2957           Fn->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2958         }
2959       }
2960     } else if (CGM.getTriple().isOSAIX()) {
2961       // On AIX, except if constinit and also neither of class type or of
2962       // (possibly multi-dimensional) array of class type, thread_local vars
2963       // will have init routines regardless of whether they are
2964       // const-initialized.  Since the routine is guaranteed to exist, we can
2965       // unconditionally call it without testing for its existance.  This
2966       // avoids potentially unresolved weak symbols which the AIX linker
2967       // isn't happy with.
2968       Builder.CreateCall(InitFnTy, Init);
2969     } else {
2970       // Don't know whether we have an init function. Call it if it exists.
2971       llvm::Value *Have = Builder.CreateIsNotNull(Init);
2972       llvm::BasicBlock *InitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
2973       llvm::BasicBlock *ExitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
2974       Builder.CreateCondBr(Have, InitBB, ExitBB);
2975 
2976       Builder.SetInsertPoint(InitBB);
2977       Builder.CreateCall(InitFnTy, Init);
2978       Builder.CreateBr(ExitBB);
2979 
2980       Builder.SetInsertPoint(ExitBB);
2981     }
2982 
2983     // For a reference, the result of the wrapper function is a pointer to
2984     // the referenced object.
2985     llvm::Value *Val = Var;
2986     if (VD->getType()->isReferenceType()) {
2987       CharUnits Align = CGM.getContext().getDeclAlign(VD);
2988       Val = Builder.CreateAlignedLoad(Var->getValueType(), Var, Align);
2989     }
2990     if (Val->getType() != Wrapper->getReturnType())
2991       Val = Builder.CreatePointerBitCastOrAddrSpaceCast(
2992           Val, Wrapper->getReturnType(), "");
2993     Builder.CreateRet(Val);
2994   }
2995 }
2996 
2997 LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
2998                                                    const VarDecl *VD,
2999                                                    QualType LValType) {
3000   llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD);
3001   llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val);
3002 
3003   llvm::CallInst *CallVal = CGF.Builder.CreateCall(Wrapper);
3004   CallVal->setCallingConv(Wrapper->getCallingConv());
3005 
3006   LValue LV;
3007   if (VD->getType()->isReferenceType())
3008     LV = CGF.MakeNaturalAlignAddrLValue(CallVal, LValType);
3009   else
3010     LV = CGF.MakeAddrLValue(CallVal, LValType,
3011                             CGF.getContext().getDeclAlign(VD));
3012   // FIXME: need setObjCGCLValueClass?
3013   return LV;
3014 }
3015 
3016 /// Return whether the given global decl needs a VTT parameter, which it does
3017 /// if it's a base constructor or destructor with virtual bases.
3018 bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) {
3019   const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
3020 
3021   // We don't have any virtual bases, just return early.
3022   if (!MD->getParent()->getNumVBases())
3023     return false;
3024 
3025   // Check if we have a base constructor.
3026   if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base)
3027     return true;
3028 
3029   // Check if we have a base destructor.
3030   if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
3031     return true;
3032 
3033   return false;
3034 }
3035 
3036 namespace {
3037 class ItaniumRTTIBuilder {
3038   CodeGenModule &CGM;  // Per-module state.
3039   llvm::LLVMContext &VMContext;
3040   const ItaniumCXXABI &CXXABI;  // Per-module state.
3041 
3042   /// Fields - The fields of the RTTI descriptor currently being built.
3043   SmallVector<llvm::Constant *, 16> Fields;
3044 
3045   /// GetAddrOfTypeName - Returns the mangled type name of the given type.
3046   llvm::GlobalVariable *
3047   GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage);
3048 
3049   /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI
3050   /// descriptor of the given type.
3051   llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty);
3052 
3053   /// BuildVTablePointer - Build the vtable pointer for the given type.
3054   void BuildVTablePointer(const Type *Ty);
3055 
3056   /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
3057   /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b.
3058   void BuildSIClassTypeInfo(const CXXRecordDecl *RD);
3059 
3060   /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
3061   /// classes with bases that do not satisfy the abi::__si_class_type_info
3062   /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
3063   void BuildVMIClassTypeInfo(const CXXRecordDecl *RD);
3064 
3065   /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used
3066   /// for pointer types.
3067   void BuildPointerTypeInfo(QualType PointeeTy);
3068 
3069   /// BuildObjCObjectTypeInfo - Build the appropriate kind of
3070   /// type_info for an object type.
3071   void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty);
3072 
3073   /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
3074   /// struct, used for member pointer types.
3075   void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty);
3076 
3077 public:
3078   ItaniumRTTIBuilder(const ItaniumCXXABI &ABI)
3079       : CGM(ABI.CGM), VMContext(CGM.getModule().getContext()), CXXABI(ABI) {}
3080 
3081   // Pointer type info flags.
3082   enum {
3083     /// PTI_Const - Type has const qualifier.
3084     PTI_Const = 0x1,
3085 
3086     /// PTI_Volatile - Type has volatile qualifier.
3087     PTI_Volatile = 0x2,
3088 
3089     /// PTI_Restrict - Type has restrict qualifier.
3090     PTI_Restrict = 0x4,
3091 
3092     /// PTI_Incomplete - Type is incomplete.
3093     PTI_Incomplete = 0x8,
3094 
3095     /// PTI_ContainingClassIncomplete - Containing class is incomplete.
3096     /// (in pointer to member).
3097     PTI_ContainingClassIncomplete = 0x10,
3098 
3099     /// PTI_TransactionSafe - Pointee is transaction_safe function (C++ TM TS).
3100     //PTI_TransactionSafe = 0x20,
3101 
3102     /// PTI_Noexcept - Pointee is noexcept function (C++1z).
3103     PTI_Noexcept = 0x40,
3104   };
3105 
3106   // VMI type info flags.
3107   enum {
3108     /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance.
3109     VMI_NonDiamondRepeat = 0x1,
3110 
3111     /// VMI_DiamondShaped - Class is diamond shaped.
3112     VMI_DiamondShaped = 0x2
3113   };
3114 
3115   // Base class type info flags.
3116   enum {
3117     /// BCTI_Virtual - Base class is virtual.
3118     BCTI_Virtual = 0x1,
3119 
3120     /// BCTI_Public - Base class is public.
3121     BCTI_Public = 0x2
3122   };
3123 
3124   /// BuildTypeInfo - Build the RTTI type info struct for the given type, or
3125   /// link to an existing RTTI descriptor if one already exists.
3126   llvm::Constant *BuildTypeInfo(QualType Ty);
3127 
3128   /// BuildTypeInfo - Build the RTTI type info struct for the given type.
3129   llvm::Constant *BuildTypeInfo(
3130       QualType Ty,
3131       llvm::GlobalVariable::LinkageTypes Linkage,
3132       llvm::GlobalValue::VisibilityTypes Visibility,
3133       llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass);
3134 };
3135 }
3136 
3137 llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName(
3138     QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage) {
3139   SmallString<256> Name;
3140   llvm::raw_svector_ostream Out(Name);
3141   CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out);
3142 
3143   // We know that the mangled name of the type starts at index 4 of the
3144   // mangled name of the typename, so we can just index into it in order to
3145   // get the mangled name of the type.
3146   llvm::Constant *Init = llvm::ConstantDataArray::getString(VMContext,
3147                                                             Name.substr(4));
3148   auto Align = CGM.getContext().getTypeAlignInChars(CGM.getContext().CharTy);
3149 
3150   llvm::GlobalVariable *GV = CGM.CreateOrReplaceCXXRuntimeVariable(
3151       Name, Init->getType(), Linkage, Align.getQuantity());
3152 
3153   GV->setInitializer(Init);
3154 
3155   return GV;
3156 }
3157 
3158 llvm::Constant *
3159 ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
3160   // Mangle the RTTI name.
3161   SmallString<256> Name;
3162   llvm::raw_svector_ostream Out(Name);
3163   CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3164 
3165   // Look for an existing global.
3166   llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name);
3167 
3168   if (!GV) {
3169     // Create a new global variable.
3170     // Note for the future: If we would ever like to do deferred emission of
3171     // RTTI, check if emitting vtables opportunistically need any adjustment.
3172 
3173     GV = new llvm::GlobalVariable(CGM.getModule(), CGM.Int8PtrTy,
3174                                   /*isConstant=*/true,
3175                                   llvm::GlobalValue::ExternalLinkage, nullptr,
3176                                   Name);
3177     const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
3178     CGM.setGVProperties(GV, RD);
3179     // Import the typeinfo symbol when all non-inline virtual methods are
3180     // imported.
3181     if (CGM.getTarget().hasPS4DLLImportExport()) {
3182       if (RD && CXXRecordAllNonInlineVirtualsHaveAttr<DLLImportAttr>(RD)) {
3183         GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
3184         CGM.setDSOLocal(GV);
3185       }
3186     }
3187   }
3188 
3189   return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
3190 }
3191 
3192 /// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type
3193 /// info for that type is defined in the standard library.
3194 static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
3195   // Itanium C++ ABI 2.9.2:
3196   //   Basic type information (e.g. for "int", "bool", etc.) will be kept in
3197   //   the run-time support library. Specifically, the run-time support
3198   //   library should contain type_info objects for the types X, X* and
3199   //   X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char,
3200   //   unsigned char, signed char, short, unsigned short, int, unsigned int,
3201   //   long, unsigned long, long long, unsigned long long, float, double,
3202   //   long double, char16_t, char32_t, and the IEEE 754r decimal and
3203   //   half-precision floating point types.
3204   //
3205   // GCC also emits RTTI for __int128.
3206   // FIXME: We do not emit RTTI information for decimal types here.
3207 
3208   // Types added here must also be added to EmitFundamentalRTTIDescriptors.
3209   switch (Ty->getKind()) {
3210     case BuiltinType::Void:
3211     case BuiltinType::NullPtr:
3212     case BuiltinType::Bool:
3213     case BuiltinType::WChar_S:
3214     case BuiltinType::WChar_U:
3215     case BuiltinType::Char_U:
3216     case BuiltinType::Char_S:
3217     case BuiltinType::UChar:
3218     case BuiltinType::SChar:
3219     case BuiltinType::Short:
3220     case BuiltinType::UShort:
3221     case BuiltinType::Int:
3222     case BuiltinType::UInt:
3223     case BuiltinType::Long:
3224     case BuiltinType::ULong:
3225     case BuiltinType::LongLong:
3226     case BuiltinType::ULongLong:
3227     case BuiltinType::Half:
3228     case BuiltinType::Float:
3229     case BuiltinType::Double:
3230     case BuiltinType::LongDouble:
3231     case BuiltinType::Float16:
3232     case BuiltinType::Float128:
3233     case BuiltinType::Ibm128:
3234     case BuiltinType::Char8:
3235     case BuiltinType::Char16:
3236     case BuiltinType::Char32:
3237     case BuiltinType::Int128:
3238     case BuiltinType::UInt128:
3239       return true;
3240 
3241 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
3242     case BuiltinType::Id:
3243 #include "clang/Basic/OpenCLImageTypes.def"
3244 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
3245     case BuiltinType::Id:
3246 #include "clang/Basic/OpenCLExtensionTypes.def"
3247     case BuiltinType::OCLSampler:
3248     case BuiltinType::OCLEvent:
3249     case BuiltinType::OCLClkEvent:
3250     case BuiltinType::OCLQueue:
3251     case BuiltinType::OCLReserveID:
3252 #define SVE_TYPE(Name, Id, SingletonId) \
3253     case BuiltinType::Id:
3254 #include "clang/Basic/AArch64SVEACLETypes.def"
3255 #define PPC_VECTOR_TYPE(Name, Id, Size) \
3256     case BuiltinType::Id:
3257 #include "clang/Basic/PPCTypes.def"
3258 #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3259 #include "clang/Basic/RISCVVTypes.def"
3260     case BuiltinType::ShortAccum:
3261     case BuiltinType::Accum:
3262     case BuiltinType::LongAccum:
3263     case BuiltinType::UShortAccum:
3264     case BuiltinType::UAccum:
3265     case BuiltinType::ULongAccum:
3266     case BuiltinType::ShortFract:
3267     case BuiltinType::Fract:
3268     case BuiltinType::LongFract:
3269     case BuiltinType::UShortFract:
3270     case BuiltinType::UFract:
3271     case BuiltinType::ULongFract:
3272     case BuiltinType::SatShortAccum:
3273     case BuiltinType::SatAccum:
3274     case BuiltinType::SatLongAccum:
3275     case BuiltinType::SatUShortAccum:
3276     case BuiltinType::SatUAccum:
3277     case BuiltinType::SatULongAccum:
3278     case BuiltinType::SatShortFract:
3279     case BuiltinType::SatFract:
3280     case BuiltinType::SatLongFract:
3281     case BuiltinType::SatUShortFract:
3282     case BuiltinType::SatUFract:
3283     case BuiltinType::SatULongFract:
3284     case BuiltinType::BFloat16:
3285       return false;
3286 
3287     case BuiltinType::Dependent:
3288 #define BUILTIN_TYPE(Id, SingletonId)
3289 #define PLACEHOLDER_TYPE(Id, SingletonId) \
3290     case BuiltinType::Id:
3291 #include "clang/AST/BuiltinTypes.def"
3292       llvm_unreachable("asking for RRTI for a placeholder type!");
3293 
3294     case BuiltinType::ObjCId:
3295     case BuiltinType::ObjCClass:
3296     case BuiltinType::ObjCSel:
3297       llvm_unreachable("FIXME: Objective-C types are unsupported!");
3298   }
3299 
3300   llvm_unreachable("Invalid BuiltinType Kind!");
3301 }
3302 
3303 static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
3304   QualType PointeeTy = PointerTy->getPointeeType();
3305   const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(PointeeTy);
3306   if (!BuiltinTy)
3307     return false;
3308 
3309   // Check the qualifiers.
3310   Qualifiers Quals = PointeeTy.getQualifiers();
3311   Quals.removeConst();
3312 
3313   if (!Quals.empty())
3314     return false;
3315 
3316   return TypeInfoIsInStandardLibrary(BuiltinTy);
3317 }
3318 
3319 /// IsStandardLibraryRTTIDescriptor - Returns whether the type
3320 /// information for the given type exists in the standard library.
3321 static bool IsStandardLibraryRTTIDescriptor(QualType Ty) {
3322   // Type info for builtin types is defined in the standard library.
3323   if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty))
3324     return TypeInfoIsInStandardLibrary(BuiltinTy);
3325 
3326   // Type info for some pointer types to builtin types is defined in the
3327   // standard library.
3328   if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
3329     return TypeInfoIsInStandardLibrary(PointerTy);
3330 
3331   return false;
3332 }
3333 
3334 /// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
3335 /// the given type exists somewhere else, and that we should not emit the type
3336 /// information in this translation unit.  Assumes that it is not a
3337 /// standard-library type.
3338 static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM,
3339                                             QualType Ty) {
3340   ASTContext &Context = CGM.getContext();
3341 
3342   // If RTTI is disabled, assume it might be disabled in the
3343   // translation unit that defines any potential key function, too.
3344   if (!Context.getLangOpts().RTTI) return false;
3345 
3346   if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3347     const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
3348     if (!RD->hasDefinition())
3349       return false;
3350 
3351     if (!RD->isDynamicClass())
3352       return false;
3353 
3354     // FIXME: this may need to be reconsidered if the key function
3355     // changes.
3356     // N.B. We must always emit the RTTI data ourselves if there exists a key
3357     // function.
3358     bool IsDLLImport = RD->hasAttr<DLLImportAttr>();
3359 
3360     // Don't import the RTTI but emit it locally.
3361     if (CGM.getTriple().isWindowsGNUEnvironment())
3362       return false;
3363 
3364     if (CGM.getVTables().isVTableExternal(RD)) {
3365       if (CGM.getTarget().hasPS4DLLImportExport())
3366         return true;
3367 
3368       return IsDLLImport && !CGM.getTriple().isWindowsItaniumEnvironment()
3369                  ? false
3370                  : true;
3371     }
3372     if (IsDLLImport)
3373       return true;
3374   }
3375 
3376   return false;
3377 }
3378 
3379 /// IsIncompleteClassType - Returns whether the given record type is incomplete.
3380 static bool IsIncompleteClassType(const RecordType *RecordTy) {
3381   return !RecordTy->getDecl()->isCompleteDefinition();
3382 }
3383 
3384 /// ContainsIncompleteClassType - Returns whether the given type contains an
3385 /// incomplete class type. This is true if
3386 ///
3387 ///   * The given type is an incomplete class type.
3388 ///   * The given type is a pointer type whose pointee type contains an
3389 ///     incomplete class type.
3390 ///   * The given type is a member pointer type whose class is an incomplete
3391 ///     class type.
3392 ///   * The given type is a member pointer type whoise pointee type contains an
3393 ///     incomplete class type.
3394 /// is an indirect or direct pointer to an incomplete class type.
3395 static bool ContainsIncompleteClassType(QualType Ty) {
3396   if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3397     if (IsIncompleteClassType(RecordTy))
3398       return true;
3399   }
3400 
3401   if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
3402     return ContainsIncompleteClassType(PointerTy->getPointeeType());
3403 
3404   if (const MemberPointerType *MemberPointerTy =
3405       dyn_cast<MemberPointerType>(Ty)) {
3406     // Check if the class type is incomplete.
3407     const RecordType *ClassType = cast<RecordType>(MemberPointerTy->getClass());
3408     if (IsIncompleteClassType(ClassType))
3409       return true;
3410 
3411     return ContainsIncompleteClassType(MemberPointerTy->getPointeeType());
3412   }
3413 
3414   return false;
3415 }
3416 
3417 // CanUseSingleInheritance - Return whether the given record decl has a "single,
3418 // public, non-virtual base at offset zero (i.e. the derived class is dynamic
3419 // iff the base is)", according to Itanium C++ ABI, 2.95p6b.
3420 static bool CanUseSingleInheritance(const CXXRecordDecl *RD) {
3421   // Check the number of bases.
3422   if (RD->getNumBases() != 1)
3423     return false;
3424 
3425   // Get the base.
3426   CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin();
3427 
3428   // Check that the base is not virtual.
3429   if (Base->isVirtual())
3430     return false;
3431 
3432   // Check that the base is public.
3433   if (Base->getAccessSpecifier() != AS_public)
3434     return false;
3435 
3436   // Check that the class is dynamic iff the base is.
3437   auto *BaseDecl =
3438       cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
3439   if (!BaseDecl->isEmpty() &&
3440       BaseDecl->isDynamicClass() != RD->isDynamicClass())
3441     return false;
3442 
3443   return true;
3444 }
3445 
3446 void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
3447   // abi::__class_type_info.
3448   static const char * const ClassTypeInfo =
3449     "_ZTVN10__cxxabiv117__class_type_infoE";
3450   // abi::__si_class_type_info.
3451   static const char * const SIClassTypeInfo =
3452     "_ZTVN10__cxxabiv120__si_class_type_infoE";
3453   // abi::__vmi_class_type_info.
3454   static const char * const VMIClassTypeInfo =
3455     "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
3456 
3457   const char *VTableName = nullptr;
3458 
3459   switch (Ty->getTypeClass()) {
3460 #define TYPE(Class, Base)
3461 #define ABSTRACT_TYPE(Class, Base)
3462 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3463 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3464 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
3465 #include "clang/AST/TypeNodes.inc"
3466     llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3467 
3468   case Type::LValueReference:
3469   case Type::RValueReference:
3470     llvm_unreachable("References shouldn't get here");
3471 
3472   case Type::Auto:
3473   case Type::DeducedTemplateSpecialization:
3474     llvm_unreachable("Undeduced type shouldn't get here");
3475 
3476   case Type::Pipe:
3477     llvm_unreachable("Pipe types shouldn't get here");
3478 
3479   case Type::Builtin:
3480   case Type::BitInt:
3481   // GCC treats vector and complex types as fundamental types.
3482   case Type::Vector:
3483   case Type::ExtVector:
3484   case Type::ConstantMatrix:
3485   case Type::Complex:
3486   case Type::Atomic:
3487   // FIXME: GCC treats block pointers as fundamental types?!
3488   case Type::BlockPointer:
3489     // abi::__fundamental_type_info.
3490     VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE";
3491     break;
3492 
3493   case Type::ConstantArray:
3494   case Type::IncompleteArray:
3495   case Type::VariableArray:
3496     // abi::__array_type_info.
3497     VTableName = "_ZTVN10__cxxabiv117__array_type_infoE";
3498     break;
3499 
3500   case Type::FunctionNoProto:
3501   case Type::FunctionProto:
3502     // abi::__function_type_info.
3503     VTableName = "_ZTVN10__cxxabiv120__function_type_infoE";
3504     break;
3505 
3506   case Type::Enum:
3507     // abi::__enum_type_info.
3508     VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE";
3509     break;
3510 
3511   case Type::Record: {
3512     const CXXRecordDecl *RD =
3513       cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3514 
3515     if (!RD->hasDefinition() || !RD->getNumBases()) {
3516       VTableName = ClassTypeInfo;
3517     } else if (CanUseSingleInheritance(RD)) {
3518       VTableName = SIClassTypeInfo;
3519     } else {
3520       VTableName = VMIClassTypeInfo;
3521     }
3522 
3523     break;
3524   }
3525 
3526   case Type::ObjCObject:
3527     // Ignore protocol qualifiers.
3528     Ty = cast<ObjCObjectType>(Ty)->getBaseType().getTypePtr();
3529 
3530     // Handle id and Class.
3531     if (isa<BuiltinType>(Ty)) {
3532       VTableName = ClassTypeInfo;
3533       break;
3534     }
3535 
3536     assert(isa<ObjCInterfaceType>(Ty));
3537     LLVM_FALLTHROUGH;
3538 
3539   case Type::ObjCInterface:
3540     if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) {
3541       VTableName = SIClassTypeInfo;
3542     } else {
3543       VTableName = ClassTypeInfo;
3544     }
3545     break;
3546 
3547   case Type::ObjCObjectPointer:
3548   case Type::Pointer:
3549     // abi::__pointer_type_info.
3550     VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE";
3551     break;
3552 
3553   case Type::MemberPointer:
3554     // abi::__pointer_to_member_type_info.
3555     VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE";
3556     break;
3557   }
3558 
3559   llvm::Constant *VTable = nullptr;
3560 
3561   // Check if the alias exists. If it doesn't, then get or create the global.
3562   if (CGM.getItaniumVTableContext().isRelativeLayout())
3563     VTable = CGM.getModule().getNamedAlias(VTableName);
3564   if (!VTable)
3565     VTable = CGM.getModule().getOrInsertGlobal(VTableName, CGM.Int8PtrTy);
3566 
3567   CGM.setDSOLocal(cast<llvm::GlobalValue>(VTable->stripPointerCasts()));
3568 
3569   llvm::Type *PtrDiffTy =
3570       CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
3571 
3572   // The vtable address point is 2.
3573   if (CGM.getItaniumVTableContext().isRelativeLayout()) {
3574     // The vtable address point is 8 bytes after its start:
3575     // 4 for the offset to top + 4 for the relative offset to rtti.
3576     llvm::Constant *Eight = llvm::ConstantInt::get(CGM.Int32Ty, 8);
3577     VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
3578     VTable =
3579         llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8Ty, VTable, Eight);
3580   } else {
3581     llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
3582     VTable = llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8PtrTy, VTable,
3583                                                           Two);
3584   }
3585   VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
3586 
3587   Fields.push_back(VTable);
3588 }
3589 
3590 /// Return the linkage that the type info and type info name constants
3591 /// should have for the given type.
3592 static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM,
3593                                                              QualType Ty) {
3594   // Itanium C++ ABI 2.9.5p7:
3595   //   In addition, it and all of the intermediate abi::__pointer_type_info
3596   //   structs in the chain down to the abi::__class_type_info for the
3597   //   incomplete class type must be prevented from resolving to the
3598   //   corresponding type_info structs for the complete class type, possibly
3599   //   by making them local static objects. Finally, a dummy class RTTI is
3600   //   generated for the incomplete type that will not resolve to the final
3601   //   complete class RTTI (because the latter need not exist), possibly by
3602   //   making it a local static object.
3603   if (ContainsIncompleteClassType(Ty))
3604     return llvm::GlobalValue::InternalLinkage;
3605 
3606   switch (Ty->getLinkage()) {
3607   case NoLinkage:
3608   case InternalLinkage:
3609   case UniqueExternalLinkage:
3610     return llvm::GlobalValue::InternalLinkage;
3611 
3612   case VisibleNoLinkage:
3613   case ModuleInternalLinkage:
3614   case ModuleLinkage:
3615   case ExternalLinkage:
3616     // RTTI is not enabled, which means that this type info struct is going
3617     // to be used for exception handling. Give it linkonce_odr linkage.
3618     if (!CGM.getLangOpts().RTTI)
3619       return llvm::GlobalValue::LinkOnceODRLinkage;
3620 
3621     if (const RecordType *Record = dyn_cast<RecordType>(Ty)) {
3622       const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
3623       if (RD->hasAttr<WeakAttr>())
3624         return llvm::GlobalValue::WeakODRLinkage;
3625       if (CGM.getTriple().isWindowsItaniumEnvironment())
3626         if (RD->hasAttr<DLLImportAttr>() &&
3627             ShouldUseExternalRTTIDescriptor(CGM, Ty))
3628           return llvm::GlobalValue::ExternalLinkage;
3629       // MinGW always uses LinkOnceODRLinkage for type info.
3630       if (RD->isDynamicClass() &&
3631           !CGM.getContext()
3632                .getTargetInfo()
3633                .getTriple()
3634                .isWindowsGNUEnvironment())
3635         return CGM.getVTableLinkage(RD);
3636     }
3637 
3638     return llvm::GlobalValue::LinkOnceODRLinkage;
3639   }
3640 
3641   llvm_unreachable("Invalid linkage!");
3642 }
3643 
3644 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty) {
3645   // We want to operate on the canonical type.
3646   Ty = Ty.getCanonicalType();
3647 
3648   // Check if we've already emitted an RTTI descriptor for this type.
3649   SmallString<256> Name;
3650   llvm::raw_svector_ostream Out(Name);
3651   CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3652 
3653   llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
3654   if (OldGV && !OldGV->isDeclaration()) {
3655     assert(!OldGV->hasAvailableExternallyLinkage() &&
3656            "available_externally typeinfos not yet implemented");
3657 
3658     return llvm::ConstantExpr::getBitCast(OldGV, CGM.Int8PtrTy);
3659   }
3660 
3661   // Check if there is already an external RTTI descriptor for this type.
3662   if (IsStandardLibraryRTTIDescriptor(Ty) ||
3663       ShouldUseExternalRTTIDescriptor(CGM, Ty))
3664     return GetAddrOfExternalRTTIDescriptor(Ty);
3665 
3666   // Emit the standard library with external linkage.
3667   llvm::GlobalVariable::LinkageTypes Linkage = getTypeInfoLinkage(CGM, Ty);
3668 
3669   // Give the type_info object and name the formal visibility of the
3670   // type itself.
3671   llvm::GlobalValue::VisibilityTypes llvmVisibility;
3672   if (llvm::GlobalValue::isLocalLinkage(Linkage))
3673     // If the linkage is local, only default visibility makes sense.
3674     llvmVisibility = llvm::GlobalValue::DefaultVisibility;
3675   else if (CXXABI.classifyRTTIUniqueness(Ty, Linkage) ==
3676            ItaniumCXXABI::RUK_NonUniqueHidden)
3677     llvmVisibility = llvm::GlobalValue::HiddenVisibility;
3678   else
3679     llvmVisibility = CodeGenModule::GetLLVMVisibility(Ty->getVisibility());
3680 
3681   llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
3682       llvm::GlobalValue::DefaultStorageClass;
3683   if (auto RD = Ty->getAsCXXRecordDecl()) {
3684     if ((CGM.getTriple().isWindowsItaniumEnvironment() &&
3685          RD->hasAttr<DLLExportAttr>()) ||
3686         (CGM.shouldMapVisibilityToDLLExport(RD) &&
3687          !llvm::GlobalValue::isLocalLinkage(Linkage) &&
3688          llvmVisibility == llvm::GlobalValue::DefaultVisibility))
3689       DLLStorageClass = llvm::GlobalValue::DLLExportStorageClass;
3690   }
3691   return BuildTypeInfo(Ty, Linkage, llvmVisibility, DLLStorageClass);
3692 }
3693 
3694 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
3695       QualType Ty,
3696       llvm::GlobalVariable::LinkageTypes Linkage,
3697       llvm::GlobalValue::VisibilityTypes Visibility,
3698       llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass) {
3699   // Add the vtable pointer.
3700   BuildVTablePointer(cast<Type>(Ty));
3701 
3702   // And the name.
3703   llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage);
3704   llvm::Constant *TypeNameField;
3705 
3706   // If we're supposed to demote the visibility, be sure to set a flag
3707   // to use a string comparison for type_info comparisons.
3708   ItaniumCXXABI::RTTIUniquenessKind RTTIUniqueness =
3709       CXXABI.classifyRTTIUniqueness(Ty, Linkage);
3710   if (RTTIUniqueness != ItaniumCXXABI::RUK_Unique) {
3711     // The flag is the sign bit, which on ARM64 is defined to be clear
3712     // for global pointers.  This is very ARM64-specific.
3713     TypeNameField = llvm::ConstantExpr::getPtrToInt(TypeName, CGM.Int64Ty);
3714     llvm::Constant *flag =
3715         llvm::ConstantInt::get(CGM.Int64Ty, ((uint64_t)1) << 63);
3716     TypeNameField = llvm::ConstantExpr::getAdd(TypeNameField, flag);
3717     TypeNameField =
3718         llvm::ConstantExpr::getIntToPtr(TypeNameField, CGM.Int8PtrTy);
3719   } else {
3720     TypeNameField = llvm::ConstantExpr::getBitCast(TypeName, CGM.Int8PtrTy);
3721   }
3722   Fields.push_back(TypeNameField);
3723 
3724   switch (Ty->getTypeClass()) {
3725 #define TYPE(Class, Base)
3726 #define ABSTRACT_TYPE(Class, Base)
3727 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3728 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3729 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
3730 #include "clang/AST/TypeNodes.inc"
3731     llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3732 
3733   // GCC treats vector types as fundamental types.
3734   case Type::Builtin:
3735   case Type::Vector:
3736   case Type::ExtVector:
3737   case Type::ConstantMatrix:
3738   case Type::Complex:
3739   case Type::BlockPointer:
3740     // Itanium C++ ABI 2.9.5p4:
3741     // abi::__fundamental_type_info adds no data members to std::type_info.
3742     break;
3743 
3744   case Type::LValueReference:
3745   case Type::RValueReference:
3746     llvm_unreachable("References shouldn't get here");
3747 
3748   case Type::Auto:
3749   case Type::DeducedTemplateSpecialization:
3750     llvm_unreachable("Undeduced type shouldn't get here");
3751 
3752   case Type::Pipe:
3753     break;
3754 
3755   case Type::BitInt:
3756     break;
3757 
3758   case Type::ConstantArray:
3759   case Type::IncompleteArray:
3760   case Type::VariableArray:
3761     // Itanium C++ ABI 2.9.5p5:
3762     // abi::__array_type_info adds no data members to std::type_info.
3763     break;
3764 
3765   case Type::FunctionNoProto:
3766   case Type::FunctionProto:
3767     // Itanium C++ ABI 2.9.5p5:
3768     // abi::__function_type_info adds no data members to std::type_info.
3769     break;
3770 
3771   case Type::Enum:
3772     // Itanium C++ ABI 2.9.5p5:
3773     // abi::__enum_type_info adds no data members to std::type_info.
3774     break;
3775 
3776   case Type::Record: {
3777     const CXXRecordDecl *RD =
3778       cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3779     if (!RD->hasDefinition() || !RD->getNumBases()) {
3780       // We don't need to emit any fields.
3781       break;
3782     }
3783 
3784     if (CanUseSingleInheritance(RD))
3785       BuildSIClassTypeInfo(RD);
3786     else
3787       BuildVMIClassTypeInfo(RD);
3788 
3789     break;
3790   }
3791 
3792   case Type::ObjCObject:
3793   case Type::ObjCInterface:
3794     BuildObjCObjectTypeInfo(cast<ObjCObjectType>(Ty));
3795     break;
3796 
3797   case Type::ObjCObjectPointer:
3798     BuildPointerTypeInfo(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
3799     break;
3800 
3801   case Type::Pointer:
3802     BuildPointerTypeInfo(cast<PointerType>(Ty)->getPointeeType());
3803     break;
3804 
3805   case Type::MemberPointer:
3806     BuildPointerToMemberTypeInfo(cast<MemberPointerType>(Ty));
3807     break;
3808 
3809   case Type::Atomic:
3810     // No fields, at least for the moment.
3811     break;
3812   }
3813 
3814   llvm::Constant *Init = llvm::ConstantStruct::getAnon(Fields);
3815 
3816   SmallString<256> Name;
3817   llvm::raw_svector_ostream Out(Name);
3818   CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3819   llvm::Module &M = CGM.getModule();
3820   llvm::GlobalVariable *OldGV = M.getNamedGlobal(Name);
3821   llvm::GlobalVariable *GV =
3822       new llvm::GlobalVariable(M, Init->getType(),
3823                                /*isConstant=*/true, Linkage, Init, Name);
3824 
3825   // Export the typeinfo in the same circumstances as the vtable is exported.
3826   auto GVDLLStorageClass = DLLStorageClass;
3827   if (CGM.getTarget().hasPS4DLLImportExport()) {
3828     if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3829       const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
3830       if (RD->hasAttr<DLLExportAttr>() ||
3831           CXXRecordAllNonInlineVirtualsHaveAttr<DLLExportAttr>(RD)) {
3832         GVDLLStorageClass = llvm::GlobalVariable::DLLExportStorageClass;
3833       }
3834     }
3835   }
3836 
3837   // If there's already an old global variable, replace it with the new one.
3838   if (OldGV) {
3839     GV->takeName(OldGV);
3840     llvm::Constant *NewPtr =
3841       llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
3842     OldGV->replaceAllUsesWith(NewPtr);
3843     OldGV->eraseFromParent();
3844   }
3845 
3846   if (CGM.supportsCOMDAT() && GV->isWeakForLinker())
3847     GV->setComdat(M.getOrInsertComdat(GV->getName()));
3848 
3849   CharUnits Align =
3850       CGM.getContext().toCharUnitsFromBits(CGM.getTarget().getPointerAlign(0));
3851   GV->setAlignment(Align.getAsAlign());
3852 
3853   // The Itanium ABI specifies that type_info objects must be globally
3854   // unique, with one exception: if the type is an incomplete class
3855   // type or a (possibly indirect) pointer to one.  That exception
3856   // affects the general case of comparing type_info objects produced
3857   // by the typeid operator, which is why the comparison operators on
3858   // std::type_info generally use the type_info name pointers instead
3859   // of the object addresses.  However, the language's built-in uses
3860   // of RTTI generally require class types to be complete, even when
3861   // manipulating pointers to those class types.  This allows the
3862   // implementation of dynamic_cast to rely on address equality tests,
3863   // which is much faster.
3864 
3865   // All of this is to say that it's important that both the type_info
3866   // object and the type_info name be uniqued when weakly emitted.
3867 
3868   TypeName->setVisibility(Visibility);
3869   CGM.setDSOLocal(TypeName);
3870 
3871   GV->setVisibility(Visibility);
3872   CGM.setDSOLocal(GV);
3873 
3874   TypeName->setDLLStorageClass(DLLStorageClass);
3875   GV->setDLLStorageClass(CGM.getTarget().hasPS4DLLImportExport()
3876                              ? GVDLLStorageClass
3877                              : DLLStorageClass);
3878 
3879   TypeName->setPartition(CGM.getCodeGenOpts().SymbolPartition);
3880   GV->setPartition(CGM.getCodeGenOpts().SymbolPartition);
3881 
3882   return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
3883 }
3884 
3885 /// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info
3886 /// for the given Objective-C object type.
3887 void ItaniumRTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) {
3888   // Drop qualifiers.
3889   const Type *T = OT->getBaseType().getTypePtr();
3890   assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T));
3891 
3892   // The builtin types are abi::__class_type_infos and don't require
3893   // extra fields.
3894   if (isa<BuiltinType>(T)) return;
3895 
3896   ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(T)->getDecl();
3897   ObjCInterfaceDecl *Super = Class->getSuperClass();
3898 
3899   // Root classes are also __class_type_info.
3900   if (!Super) return;
3901 
3902   QualType SuperTy = CGM.getContext().getObjCInterfaceType(Super);
3903 
3904   // Everything else is single inheritance.
3905   llvm::Constant *BaseTypeInfo =
3906       ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(SuperTy);
3907   Fields.push_back(BaseTypeInfo);
3908 }
3909 
3910 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
3911 /// inheritance, according to the Itanium C++ ABI, 2.95p6b.
3912 void ItaniumRTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) {
3913   // Itanium C++ ABI 2.9.5p6b:
3914   // It adds to abi::__class_type_info a single member pointing to the
3915   // type_info structure for the base type,
3916   llvm::Constant *BaseTypeInfo =
3917     ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(RD->bases_begin()->getType());
3918   Fields.push_back(BaseTypeInfo);
3919 }
3920 
3921 namespace {
3922   /// SeenBases - Contains virtual and non-virtual bases seen when traversing
3923   /// a class hierarchy.
3924   struct SeenBases {
3925     llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases;
3926     llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases;
3927   };
3928 }
3929 
3930 /// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in
3931 /// abi::__vmi_class_type_info.
3932 ///
3933 static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base,
3934                                              SeenBases &Bases) {
3935 
3936   unsigned Flags = 0;
3937 
3938   auto *BaseDecl =
3939       cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
3940 
3941   if (Base->isVirtual()) {
3942     // Mark the virtual base as seen.
3943     if (!Bases.VirtualBases.insert(BaseDecl).second) {
3944       // If this virtual base has been seen before, then the class is diamond
3945       // shaped.
3946       Flags |= ItaniumRTTIBuilder::VMI_DiamondShaped;
3947     } else {
3948       if (Bases.NonVirtualBases.count(BaseDecl))
3949         Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3950     }
3951   } else {
3952     // Mark the non-virtual base as seen.
3953     if (!Bases.NonVirtualBases.insert(BaseDecl).second) {
3954       // If this non-virtual base has been seen before, then the class has non-
3955       // diamond shaped repeated inheritance.
3956       Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3957     } else {
3958       if (Bases.VirtualBases.count(BaseDecl))
3959         Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3960     }
3961   }
3962 
3963   // Walk all bases.
3964   for (const auto &I : BaseDecl->bases())
3965     Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
3966 
3967   return Flags;
3968 }
3969 
3970 static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) {
3971   unsigned Flags = 0;
3972   SeenBases Bases;
3973 
3974   // Walk all bases.
3975   for (const auto &I : RD->bases())
3976     Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
3977 
3978   return Flags;
3979 }
3980 
3981 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
3982 /// classes with bases that do not satisfy the abi::__si_class_type_info
3983 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
3984 void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
3985   llvm::Type *UnsignedIntLTy =
3986     CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
3987 
3988   // Itanium C++ ABI 2.9.5p6c:
3989   //   __flags is a word with flags describing details about the class
3990   //   structure, which may be referenced by using the __flags_masks
3991   //   enumeration. These flags refer to both direct and indirect bases.
3992   unsigned Flags = ComputeVMIClassTypeInfoFlags(RD);
3993   Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
3994 
3995   // Itanium C++ ABI 2.9.5p6c:
3996   //   __base_count is a word with the number of direct proper base class
3997   //   descriptions that follow.
3998   Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, RD->getNumBases()));
3999 
4000   if (!RD->getNumBases())
4001     return;
4002 
4003   // Now add the base class descriptions.
4004 
4005   // Itanium C++ ABI 2.9.5p6c:
4006   //   __base_info[] is an array of base class descriptions -- one for every
4007   //   direct proper base. Each description is of the type:
4008   //
4009   //   struct abi::__base_class_type_info {
4010   //   public:
4011   //     const __class_type_info *__base_type;
4012   //     long __offset_flags;
4013   //
4014   //     enum __offset_flags_masks {
4015   //       __virtual_mask = 0x1,
4016   //       __public_mask = 0x2,
4017   //       __offset_shift = 8
4018   //     };
4019   //   };
4020 
4021   // If we're in mingw and 'long' isn't wide enough for a pointer, use 'long
4022   // long' instead of 'long' for __offset_flags. libstdc++abi uses long long on
4023   // LLP64 platforms.
4024   // FIXME: Consider updating libc++abi to match, and extend this logic to all
4025   // LLP64 platforms.
4026   QualType OffsetFlagsTy = CGM.getContext().LongTy;
4027   const TargetInfo &TI = CGM.getContext().getTargetInfo();
4028   if (TI.getTriple().isOSCygMing() && TI.getPointerWidth(0) > TI.getLongWidth())
4029     OffsetFlagsTy = CGM.getContext().LongLongTy;
4030   llvm::Type *OffsetFlagsLTy =
4031       CGM.getTypes().ConvertType(OffsetFlagsTy);
4032 
4033   for (const auto &Base : RD->bases()) {
4034     // The __base_type member points to the RTTI for the base type.
4035     Fields.push_back(ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Base.getType()));
4036 
4037     auto *BaseDecl =
4038         cast<CXXRecordDecl>(Base.getType()->castAs<RecordType>()->getDecl());
4039 
4040     int64_t OffsetFlags = 0;
4041 
4042     // All but the lower 8 bits of __offset_flags are a signed offset.
4043     // For a non-virtual base, this is the offset in the object of the base
4044     // subobject. For a virtual base, this is the offset in the virtual table of
4045     // the virtual base offset for the virtual base referenced (negative).
4046     CharUnits Offset;
4047     if (Base.isVirtual())
4048       Offset =
4049         CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(RD, BaseDecl);
4050     else {
4051       const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
4052       Offset = Layout.getBaseClassOffset(BaseDecl);
4053     };
4054 
4055     OffsetFlags = uint64_t(Offset.getQuantity()) << 8;
4056 
4057     // The low-order byte of __offset_flags contains flags, as given by the
4058     // masks from the enumeration __offset_flags_masks.
4059     if (Base.isVirtual())
4060       OffsetFlags |= BCTI_Virtual;
4061     if (Base.getAccessSpecifier() == AS_public)
4062       OffsetFlags |= BCTI_Public;
4063 
4064     Fields.push_back(llvm::ConstantInt::get(OffsetFlagsLTy, OffsetFlags));
4065   }
4066 }
4067 
4068 /// Compute the flags for a __pbase_type_info, and remove the corresponding
4069 /// pieces from \p Type.
4070 static unsigned extractPBaseFlags(ASTContext &Ctx, QualType &Type) {
4071   unsigned Flags = 0;
4072 
4073   if (Type.isConstQualified())
4074     Flags |= ItaniumRTTIBuilder::PTI_Const;
4075   if (Type.isVolatileQualified())
4076     Flags |= ItaniumRTTIBuilder::PTI_Volatile;
4077   if (Type.isRestrictQualified())
4078     Flags |= ItaniumRTTIBuilder::PTI_Restrict;
4079   Type = Type.getUnqualifiedType();
4080 
4081   // Itanium C++ ABI 2.9.5p7:
4082   //   When the abi::__pbase_type_info is for a direct or indirect pointer to an
4083   //   incomplete class type, the incomplete target type flag is set.
4084   if (ContainsIncompleteClassType(Type))
4085     Flags |= ItaniumRTTIBuilder::PTI_Incomplete;
4086 
4087   if (auto *Proto = Type->getAs<FunctionProtoType>()) {
4088     if (Proto->isNothrow()) {
4089       Flags |= ItaniumRTTIBuilder::PTI_Noexcept;
4090       Type = Ctx.getFunctionTypeWithExceptionSpec(Type, EST_None);
4091     }
4092   }
4093 
4094   return Flags;
4095 }
4096 
4097 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct,
4098 /// used for pointer types.
4099 void ItaniumRTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) {
4100   // Itanium C++ ABI 2.9.5p7:
4101   //   __flags is a flag word describing the cv-qualification and other
4102   //   attributes of the type pointed to
4103   unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
4104 
4105   llvm::Type *UnsignedIntLTy =
4106     CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
4107   Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4108 
4109   // Itanium C++ ABI 2.9.5p7:
4110   //  __pointee is a pointer to the std::type_info derivation for the
4111   //  unqualified type being pointed to.
4112   llvm::Constant *PointeeTypeInfo =
4113       ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
4114   Fields.push_back(PointeeTypeInfo);
4115 }
4116 
4117 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
4118 /// struct, used for member pointer types.
4119 void
4120 ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
4121   QualType PointeeTy = Ty->getPointeeType();
4122 
4123   // Itanium C++ ABI 2.9.5p7:
4124   //   __flags is a flag word describing the cv-qualification and other
4125   //   attributes of the type pointed to.
4126   unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
4127 
4128   const RecordType *ClassType = cast<RecordType>(Ty->getClass());
4129   if (IsIncompleteClassType(ClassType))
4130     Flags |= PTI_ContainingClassIncomplete;
4131 
4132   llvm::Type *UnsignedIntLTy =
4133     CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
4134   Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4135 
4136   // Itanium C++ ABI 2.9.5p7:
4137   //   __pointee is a pointer to the std::type_info derivation for the
4138   //   unqualified type being pointed to.
4139   llvm::Constant *PointeeTypeInfo =
4140       ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
4141   Fields.push_back(PointeeTypeInfo);
4142 
4143   // Itanium C++ ABI 2.9.5p9:
4144   //   __context is a pointer to an abi::__class_type_info corresponding to the
4145   //   class type containing the member pointed to
4146   //   (e.g., the "A" in "int A::*").
4147   Fields.push_back(
4148       ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(QualType(ClassType, 0)));
4149 }
4150 
4151 llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) {
4152   return ItaniumRTTIBuilder(*this).BuildTypeInfo(Ty);
4153 }
4154 
4155 void ItaniumCXXABI::EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD) {
4156   // Types added here must also be added to TypeInfoIsInStandardLibrary.
4157   QualType FundamentalTypes[] = {
4158       getContext().VoidTy,             getContext().NullPtrTy,
4159       getContext().BoolTy,             getContext().WCharTy,
4160       getContext().CharTy,             getContext().UnsignedCharTy,
4161       getContext().SignedCharTy,       getContext().ShortTy,
4162       getContext().UnsignedShortTy,    getContext().IntTy,
4163       getContext().UnsignedIntTy,      getContext().LongTy,
4164       getContext().UnsignedLongTy,     getContext().LongLongTy,
4165       getContext().UnsignedLongLongTy, getContext().Int128Ty,
4166       getContext().UnsignedInt128Ty,   getContext().HalfTy,
4167       getContext().FloatTy,            getContext().DoubleTy,
4168       getContext().LongDoubleTy,       getContext().Float128Ty,
4169       getContext().Char8Ty,            getContext().Char16Ty,
4170       getContext().Char32Ty
4171   };
4172   llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
4173       RD->hasAttr<DLLExportAttr>() || CGM.shouldMapVisibilityToDLLExport(RD)
4174           ? llvm::GlobalValue::DLLExportStorageClass
4175           : llvm::GlobalValue::DefaultStorageClass;
4176   llvm::GlobalValue::VisibilityTypes Visibility =
4177       CodeGenModule::GetLLVMVisibility(RD->getVisibility());
4178   for (const QualType &FundamentalType : FundamentalTypes) {
4179     QualType PointerType = getContext().getPointerType(FundamentalType);
4180     QualType PointerTypeConst = getContext().getPointerType(
4181         FundamentalType.withConst());
4182     for (QualType Type : {FundamentalType, PointerType, PointerTypeConst})
4183       ItaniumRTTIBuilder(*this).BuildTypeInfo(
4184           Type, llvm::GlobalValue::ExternalLinkage,
4185           Visibility, DLLStorageClass);
4186   }
4187 }
4188 
4189 /// What sort of uniqueness rules should we use for the RTTI for the
4190 /// given type?
4191 ItaniumCXXABI::RTTIUniquenessKind ItaniumCXXABI::classifyRTTIUniqueness(
4192     QualType CanTy, llvm::GlobalValue::LinkageTypes Linkage) const {
4193   if (shouldRTTIBeUnique())
4194     return RUK_Unique;
4195 
4196   // It's only necessary for linkonce_odr or weak_odr linkage.
4197   if (Linkage != llvm::GlobalValue::LinkOnceODRLinkage &&
4198       Linkage != llvm::GlobalValue::WeakODRLinkage)
4199     return RUK_Unique;
4200 
4201   // It's only necessary with default visibility.
4202   if (CanTy->getVisibility() != DefaultVisibility)
4203     return RUK_Unique;
4204 
4205   // If we're not required to publish this symbol, hide it.
4206   if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
4207     return RUK_NonUniqueHidden;
4208 
4209   // If we're required to publish this symbol, as we might be under an
4210   // explicit instantiation, leave it with default visibility but
4211   // enable string-comparisons.
4212   assert(Linkage == llvm::GlobalValue::WeakODRLinkage);
4213   return RUK_NonUniqueVisible;
4214 }
4215 
4216 // Find out how to codegen the complete destructor and constructor
4217 namespace {
4218 enum class StructorCodegen { Emit, RAUW, Alias, COMDAT };
4219 }
4220 static StructorCodegen getCodegenToUse(CodeGenModule &CGM,
4221                                        const CXXMethodDecl *MD) {
4222   if (!CGM.getCodeGenOpts().CXXCtorDtorAliases)
4223     return StructorCodegen::Emit;
4224 
4225   // The complete and base structors are not equivalent if there are any virtual
4226   // bases, so emit separate functions.
4227   if (MD->getParent()->getNumVBases())
4228     return StructorCodegen::Emit;
4229 
4230   GlobalDecl AliasDecl;
4231   if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) {
4232     AliasDecl = GlobalDecl(DD, Dtor_Complete);
4233   } else {
4234     const auto *CD = cast<CXXConstructorDecl>(MD);
4235     AliasDecl = GlobalDecl(CD, Ctor_Complete);
4236   }
4237   llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
4238 
4239   if (llvm::GlobalValue::isDiscardableIfUnused(Linkage))
4240     return StructorCodegen::RAUW;
4241 
4242   // FIXME: Should we allow available_externally aliases?
4243   if (!llvm::GlobalAlias::isValidLinkage(Linkage))
4244     return StructorCodegen::RAUW;
4245 
4246   if (llvm::GlobalValue::isWeakForLinker(Linkage)) {
4247     // Only ELF and wasm support COMDATs with arbitrary names (C5/D5).
4248     if (CGM.getTarget().getTriple().isOSBinFormatELF() ||
4249         CGM.getTarget().getTriple().isOSBinFormatWasm())
4250       return StructorCodegen::COMDAT;
4251     return StructorCodegen::Emit;
4252   }
4253 
4254   return StructorCodegen::Alias;
4255 }
4256 
4257 static void emitConstructorDestructorAlias(CodeGenModule &CGM,
4258                                            GlobalDecl AliasDecl,
4259                                            GlobalDecl TargetDecl) {
4260   llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
4261 
4262   StringRef MangledName = CGM.getMangledName(AliasDecl);
4263   llvm::GlobalValue *Entry = CGM.GetGlobalValue(MangledName);
4264   if (Entry && !Entry->isDeclaration())
4265     return;
4266 
4267   auto *Aliasee = cast<llvm::GlobalValue>(CGM.GetAddrOfGlobal(TargetDecl));
4268 
4269   // Create the alias with no name.
4270   auto *Alias = llvm::GlobalAlias::create(Linkage, "", Aliasee);
4271 
4272   // Constructors and destructors are always unnamed_addr.
4273   Alias->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
4274 
4275   // Switch any previous uses to the alias.
4276   if (Entry) {
4277     assert(Entry->getType() == Aliasee->getType() &&
4278            "declaration exists with different type");
4279     Alias->takeName(Entry);
4280     Entry->replaceAllUsesWith(Alias);
4281     Entry->eraseFromParent();
4282   } else {
4283     Alias->setName(MangledName);
4284   }
4285 
4286   // Finally, set up the alias with its proper name and attributes.
4287   CGM.SetCommonAttributes(AliasDecl, Alias);
4288 }
4289 
4290 void ItaniumCXXABI::emitCXXStructor(GlobalDecl GD) {
4291   auto *MD = cast<CXXMethodDecl>(GD.getDecl());
4292   auto *CD = dyn_cast<CXXConstructorDecl>(MD);
4293   const CXXDestructorDecl *DD = CD ? nullptr : cast<CXXDestructorDecl>(MD);
4294 
4295   StructorCodegen CGType = getCodegenToUse(CGM, MD);
4296 
4297   if (CD ? GD.getCtorType() == Ctor_Complete
4298          : GD.getDtorType() == Dtor_Complete) {
4299     GlobalDecl BaseDecl;
4300     if (CD)
4301       BaseDecl = GD.getWithCtorType(Ctor_Base);
4302     else
4303       BaseDecl = GD.getWithDtorType(Dtor_Base);
4304 
4305     if (CGType == StructorCodegen::Alias || CGType == StructorCodegen::COMDAT) {
4306       emitConstructorDestructorAlias(CGM, GD, BaseDecl);
4307       return;
4308     }
4309 
4310     if (CGType == StructorCodegen::RAUW) {
4311       StringRef MangledName = CGM.getMangledName(GD);
4312       auto *Aliasee = CGM.GetAddrOfGlobal(BaseDecl);
4313       CGM.addReplacement(MangledName, Aliasee);
4314       return;
4315     }
4316   }
4317 
4318   // The base destructor is equivalent to the base destructor of its
4319   // base class if there is exactly one non-virtual base class with a
4320   // non-trivial destructor, there are no fields with a non-trivial
4321   // destructor, and the body of the destructor is trivial.
4322   if (DD && GD.getDtorType() == Dtor_Base &&
4323       CGType != StructorCodegen::COMDAT &&
4324       !CGM.TryEmitBaseDestructorAsAlias(DD))
4325     return;
4326 
4327   // FIXME: The deleting destructor is equivalent to the selected operator
4328   // delete if:
4329   //  * either the delete is a destroying operator delete or the destructor
4330   //    would be trivial if it weren't virtual,
4331   //  * the conversion from the 'this' parameter to the first parameter of the
4332   //    destructor is equivalent to a bitcast,
4333   //  * the destructor does not have an implicit "this" return, and
4334   //  * the operator delete has the same calling convention and IR function type
4335   //    as the destructor.
4336   // In such cases we should try to emit the deleting dtor as an alias to the
4337   // selected 'operator delete'.
4338 
4339   llvm::Function *Fn = CGM.codegenCXXStructor(GD);
4340 
4341   if (CGType == StructorCodegen::COMDAT) {
4342     SmallString<256> Buffer;
4343     llvm::raw_svector_ostream Out(Buffer);
4344     if (DD)
4345       getMangleContext().mangleCXXDtorComdat(DD, Out);
4346     else
4347       getMangleContext().mangleCXXCtorComdat(CD, Out);
4348     llvm::Comdat *C = CGM.getModule().getOrInsertComdat(Out.str());
4349     Fn->setComdat(C);
4350   } else {
4351     CGM.maybeSetTrivialComdat(*MD, *Fn);
4352   }
4353 }
4354 
4355 static llvm::FunctionCallee getBeginCatchFn(CodeGenModule &CGM) {
4356   // void *__cxa_begin_catch(void*);
4357   llvm::FunctionType *FTy = llvm::FunctionType::get(
4358       CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4359 
4360   return CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch");
4361 }
4362 
4363 static llvm::FunctionCallee getEndCatchFn(CodeGenModule &CGM) {
4364   // void __cxa_end_catch();
4365   llvm::FunctionType *FTy =
4366       llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
4367 
4368   return CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch");
4369 }
4370 
4371 static llvm::FunctionCallee getGetExceptionPtrFn(CodeGenModule &CGM) {
4372   // void *__cxa_get_exception_ptr(void*);
4373   llvm::FunctionType *FTy = llvm::FunctionType::get(
4374       CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4375 
4376   return CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr");
4377 }
4378 
4379 namespace {
4380   /// A cleanup to call __cxa_end_catch.  In many cases, the caught
4381   /// exception type lets us state definitively that the thrown exception
4382   /// type does not have a destructor.  In particular:
4383   ///   - Catch-alls tell us nothing, so we have to conservatively
4384   ///     assume that the thrown exception might have a destructor.
4385   ///   - Catches by reference behave according to their base types.
4386   ///   - Catches of non-record types will only trigger for exceptions
4387   ///     of non-record types, which never have destructors.
4388   ///   - Catches of record types can trigger for arbitrary subclasses
4389   ///     of the caught type, so we have to assume the actual thrown
4390   ///     exception type might have a throwing destructor, even if the
4391   ///     caught type's destructor is trivial or nothrow.
4392   struct CallEndCatch final : EHScopeStack::Cleanup {
4393     CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
4394     bool MightThrow;
4395 
4396     void Emit(CodeGenFunction &CGF, Flags flags) override {
4397       if (!MightThrow) {
4398         CGF.EmitNounwindRuntimeCall(getEndCatchFn(CGF.CGM));
4399         return;
4400       }
4401 
4402       CGF.EmitRuntimeCallOrInvoke(getEndCatchFn(CGF.CGM));
4403     }
4404   };
4405 }
4406 
4407 /// Emits a call to __cxa_begin_catch and enters a cleanup to call
4408 /// __cxa_end_catch.
4409 ///
4410 /// \param EndMightThrow - true if __cxa_end_catch might throw
4411 static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
4412                                    llvm::Value *Exn,
4413                                    bool EndMightThrow) {
4414   llvm::CallInst *call =
4415     CGF.EmitNounwindRuntimeCall(getBeginCatchFn(CGF.CGM), Exn);
4416 
4417   CGF.EHStack.pushCleanup<CallEndCatch>(NormalAndEHCleanup, EndMightThrow);
4418 
4419   return call;
4420 }
4421 
4422 /// A "special initializer" callback for initializing a catch
4423 /// parameter during catch initialization.
4424 static void InitCatchParam(CodeGenFunction &CGF,
4425                            const VarDecl &CatchParam,
4426                            Address ParamAddr,
4427                            SourceLocation Loc) {
4428   // Load the exception from where the landing pad saved it.
4429   llvm::Value *Exn = CGF.getExceptionFromSlot();
4430 
4431   CanQualType CatchType =
4432     CGF.CGM.getContext().getCanonicalType(CatchParam.getType());
4433   llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType);
4434 
4435   // If we're catching by reference, we can just cast the object
4436   // pointer to the appropriate pointer.
4437   if (isa<ReferenceType>(CatchType)) {
4438     QualType CaughtType = cast<ReferenceType>(CatchType)->getPointeeType();
4439     bool EndCatchMightThrow = CaughtType->isRecordType();
4440 
4441     // __cxa_begin_catch returns the adjusted object pointer.
4442     llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow);
4443 
4444     // We have no way to tell the personality function that we're
4445     // catching by reference, so if we're catching a pointer,
4446     // __cxa_begin_catch will actually return that pointer by value.
4447     if (const PointerType *PT = dyn_cast<PointerType>(CaughtType)) {
4448       QualType PointeeType = PT->getPointeeType();
4449 
4450       // When catching by reference, generally we should just ignore
4451       // this by-value pointer and use the exception object instead.
4452       if (!PointeeType->isRecordType()) {
4453 
4454         // Exn points to the struct _Unwind_Exception header, which
4455         // we have to skip past in order to reach the exception data.
4456         unsigned HeaderSize =
4457           CGF.CGM.getTargetCodeGenInfo().getSizeOfUnwindException();
4458         AdjustedExn =
4459             CGF.Builder.CreateConstGEP1_32(CGF.Int8Ty, Exn, HeaderSize);
4460 
4461       // However, if we're catching a pointer-to-record type that won't
4462       // work, because the personality function might have adjusted
4463       // the pointer.  There's actually no way for us to fully satisfy
4464       // the language/ABI contract here:  we can't use Exn because it
4465       // might have the wrong adjustment, but we can't use the by-value
4466       // pointer because it's off by a level of abstraction.
4467       //
4468       // The current solution is to dump the adjusted pointer into an
4469       // alloca, which breaks language semantics (because changing the
4470       // pointer doesn't change the exception) but at least works.
4471       // The better solution would be to filter out non-exact matches
4472       // and rethrow them, but this is tricky because the rethrow
4473       // really needs to be catchable by other sites at this landing
4474       // pad.  The best solution is to fix the personality function.
4475       } else {
4476         // Pull the pointer for the reference type off.
4477         llvm::Type *PtrTy = CGF.ConvertTypeForMem(CaughtType);
4478 
4479         // Create the temporary and write the adjusted pointer into it.
4480         Address ExnPtrTmp =
4481           CGF.CreateTempAlloca(PtrTy, CGF.getPointerAlign(), "exn.byref.tmp");
4482         llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
4483         CGF.Builder.CreateStore(Casted, ExnPtrTmp);
4484 
4485         // Bind the reference to the temporary.
4486         AdjustedExn = ExnPtrTmp.getPointer();
4487       }
4488     }
4489 
4490     llvm::Value *ExnCast =
4491       CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref");
4492     CGF.Builder.CreateStore(ExnCast, ParamAddr);
4493     return;
4494   }
4495 
4496   // Scalars and complexes.
4497   TypeEvaluationKind TEK = CGF.getEvaluationKind(CatchType);
4498   if (TEK != TEK_Aggregate) {
4499     llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false);
4500 
4501     // If the catch type is a pointer type, __cxa_begin_catch returns
4502     // the pointer by value.
4503     if (CatchType->hasPointerRepresentation()) {
4504       llvm::Value *CastExn =
4505         CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted");
4506 
4507       switch (CatchType.getQualifiers().getObjCLifetime()) {
4508       case Qualifiers::OCL_Strong:
4509         CastExn = CGF.EmitARCRetainNonBlock(CastExn);
4510         LLVM_FALLTHROUGH;
4511 
4512       case Qualifiers::OCL_None:
4513       case Qualifiers::OCL_ExplicitNone:
4514       case Qualifiers::OCL_Autoreleasing:
4515         CGF.Builder.CreateStore(CastExn, ParamAddr);
4516         return;
4517 
4518       case Qualifiers::OCL_Weak:
4519         CGF.EmitARCInitWeak(ParamAddr, CastExn);
4520         return;
4521       }
4522       llvm_unreachable("bad ownership qualifier!");
4523     }
4524 
4525     // Otherwise, it returns a pointer into the exception object.
4526 
4527     llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
4528     llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
4529 
4530     LValue srcLV = CGF.MakeNaturalAlignAddrLValue(Cast, CatchType);
4531     LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType);
4532     switch (TEK) {
4533     case TEK_Complex:
4534       CGF.EmitStoreOfComplex(CGF.EmitLoadOfComplex(srcLV, Loc), destLV,
4535                              /*init*/ true);
4536       return;
4537     case TEK_Scalar: {
4538       llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(srcLV, Loc);
4539       CGF.EmitStoreOfScalar(ExnLoad, destLV, /*init*/ true);
4540       return;
4541     }
4542     case TEK_Aggregate:
4543       llvm_unreachable("evaluation kind filtered out!");
4544     }
4545     llvm_unreachable("bad evaluation kind");
4546   }
4547 
4548   assert(isa<RecordType>(CatchType) && "unexpected catch type!");
4549   auto catchRD = CatchType->getAsCXXRecordDecl();
4550   CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD);
4551 
4552   llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
4553 
4554   // Check for a copy expression.  If we don't have a copy expression,
4555   // that means a trivial copy is okay.
4556   const Expr *copyExpr = CatchParam.getInit();
4557   if (!copyExpr) {
4558     llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true);
4559     Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4560                         LLVMCatchTy, caughtExnAlignment);
4561     LValue Dest = CGF.MakeAddrLValue(ParamAddr, CatchType);
4562     LValue Src = CGF.MakeAddrLValue(adjustedExn, CatchType);
4563     CGF.EmitAggregateCopy(Dest, Src, CatchType, AggValueSlot::DoesNotOverlap);
4564     return;
4565   }
4566 
4567   // We have to call __cxa_get_exception_ptr to get the adjusted
4568   // pointer before copying.
4569   llvm::CallInst *rawAdjustedExn =
4570     CGF.EmitNounwindRuntimeCall(getGetExceptionPtrFn(CGF.CGM), Exn);
4571 
4572   // Cast that to the appropriate type.
4573   Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4574                       LLVMCatchTy, caughtExnAlignment);
4575 
4576   // The copy expression is defined in terms of an OpaqueValueExpr.
4577   // Find it and map it to the adjusted expression.
4578   CodeGenFunction::OpaqueValueMapping
4579     opaque(CGF, OpaqueValueExpr::findInCopyConstruct(copyExpr),
4580            CGF.MakeAddrLValue(adjustedExn, CatchParam.getType()));
4581 
4582   // Call the copy ctor in a terminate scope.
4583   CGF.EHStack.pushTerminate();
4584 
4585   // Perform the copy construction.
4586   CGF.EmitAggExpr(copyExpr,
4587                   AggValueSlot::forAddr(ParamAddr, Qualifiers(),
4588                                         AggValueSlot::IsNotDestructed,
4589                                         AggValueSlot::DoesNotNeedGCBarriers,
4590                                         AggValueSlot::IsNotAliased,
4591                                         AggValueSlot::DoesNotOverlap));
4592 
4593   // Leave the terminate scope.
4594   CGF.EHStack.popTerminate();
4595 
4596   // Undo the opaque value mapping.
4597   opaque.pop();
4598 
4599   // Finally we can call __cxa_begin_catch.
4600   CallBeginCatch(CGF, Exn, true);
4601 }
4602 
4603 /// Begins a catch statement by initializing the catch variable and
4604 /// calling __cxa_begin_catch.
4605 void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF,
4606                                    const CXXCatchStmt *S) {
4607   // We have to be very careful with the ordering of cleanups here:
4608   //   C++ [except.throw]p4:
4609   //     The destruction [of the exception temporary] occurs
4610   //     immediately after the destruction of the object declared in
4611   //     the exception-declaration in the handler.
4612   //
4613   // So the precise ordering is:
4614   //   1.  Construct catch variable.
4615   //   2.  __cxa_begin_catch
4616   //   3.  Enter __cxa_end_catch cleanup
4617   //   4.  Enter dtor cleanup
4618   //
4619   // We do this by using a slightly abnormal initialization process.
4620   // Delegation sequence:
4621   //   - ExitCXXTryStmt opens a RunCleanupsScope
4622   //     - EmitAutoVarAlloca creates the variable and debug info
4623   //       - InitCatchParam initializes the variable from the exception
4624   //       - CallBeginCatch calls __cxa_begin_catch
4625   //       - CallBeginCatch enters the __cxa_end_catch cleanup
4626   //     - EmitAutoVarCleanups enters the variable destructor cleanup
4627   //   - EmitCXXTryStmt emits the code for the catch body
4628   //   - EmitCXXTryStmt close the RunCleanupsScope
4629 
4630   VarDecl *CatchParam = S->getExceptionDecl();
4631   if (!CatchParam) {
4632     llvm::Value *Exn = CGF.getExceptionFromSlot();
4633     CallBeginCatch(CGF, Exn, true);
4634     return;
4635   }
4636 
4637   // Emit the local.
4638   CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam);
4639   InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getBeginLoc());
4640   CGF.EmitAutoVarCleanups(var);
4641 }
4642 
4643 /// Get or define the following function:
4644 ///   void @__clang_call_terminate(i8* %exn) nounwind noreturn
4645 /// This code is used only in C++.
4646 static llvm::FunctionCallee getClangCallTerminateFn(CodeGenModule &CGM) {
4647   llvm::FunctionType *fnTy =
4648     llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4649   llvm::FunctionCallee fnRef = CGM.CreateRuntimeFunction(
4650       fnTy, "__clang_call_terminate", llvm::AttributeList(), /*Local=*/true);
4651   llvm::Function *fn =
4652       cast<llvm::Function>(fnRef.getCallee()->stripPointerCasts());
4653   if (fn->empty()) {
4654     fn->setDoesNotThrow();
4655     fn->setDoesNotReturn();
4656 
4657     // What we really want is to massively penalize inlining without
4658     // forbidding it completely.  The difference between that and
4659     // 'noinline' is negligible.
4660     fn->addFnAttr(llvm::Attribute::NoInline);
4661 
4662     // Allow this function to be shared across translation units, but
4663     // we don't want it to turn into an exported symbol.
4664     fn->setLinkage(llvm::Function::LinkOnceODRLinkage);
4665     fn->setVisibility(llvm::Function::HiddenVisibility);
4666     if (CGM.supportsCOMDAT())
4667       fn->setComdat(CGM.getModule().getOrInsertComdat(fn->getName()));
4668 
4669     // Set up the function.
4670     llvm::BasicBlock *entry =
4671         llvm::BasicBlock::Create(CGM.getLLVMContext(), "", fn);
4672     CGBuilderTy builder(CGM, entry);
4673 
4674     // Pull the exception pointer out of the parameter list.
4675     llvm::Value *exn = &*fn->arg_begin();
4676 
4677     // Call __cxa_begin_catch(exn).
4678     llvm::CallInst *catchCall = builder.CreateCall(getBeginCatchFn(CGM), exn);
4679     catchCall->setDoesNotThrow();
4680     catchCall->setCallingConv(CGM.getRuntimeCC());
4681 
4682     // Call std::terminate().
4683     llvm::CallInst *termCall = builder.CreateCall(CGM.getTerminateFn());
4684     termCall->setDoesNotThrow();
4685     termCall->setDoesNotReturn();
4686     termCall->setCallingConv(CGM.getRuntimeCC());
4687 
4688     // std::terminate cannot return.
4689     builder.CreateUnreachable();
4690   }
4691   return fnRef;
4692 }
4693 
4694 llvm::CallInst *
4695 ItaniumCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
4696                                                    llvm::Value *Exn) {
4697   // In C++, we want to call __cxa_begin_catch() before terminating.
4698   if (Exn) {
4699     assert(CGF.CGM.getLangOpts().CPlusPlus);
4700     return CGF.EmitNounwindRuntimeCall(getClangCallTerminateFn(CGF.CGM), Exn);
4701   }
4702   return CGF.EmitNounwindRuntimeCall(CGF.CGM.getTerminateFn());
4703 }
4704 
4705 std::pair<llvm::Value *, const CXXRecordDecl *>
4706 ItaniumCXXABI::LoadVTablePtr(CodeGenFunction &CGF, Address This,
4707                              const CXXRecordDecl *RD) {
4708   return {CGF.GetVTablePtr(This, CGM.Int8PtrTy, RD), RD};
4709 }
4710 
4711 void WebAssemblyCXXABI::emitBeginCatch(CodeGenFunction &CGF,
4712                                        const CXXCatchStmt *C) {
4713   if (CGF.getTarget().hasFeature("exception-handling"))
4714     CGF.EHStack.pushCleanup<CatchRetScope>(
4715         NormalCleanup, cast<llvm::CatchPadInst>(CGF.CurrentFuncletPad));
4716   ItaniumCXXABI::emitBeginCatch(CGF, C);
4717 }
4718 
4719 llvm::CallInst *
4720 WebAssemblyCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
4721                                                        llvm::Value *Exn) {
4722   // Itanium ABI calls __clang_call_terminate(), which __cxa_begin_catch() on
4723   // the violating exception to mark it handled, but it is currently hard to do
4724   // with wasm EH instruction structure with catch/catch_all, we just call
4725   // std::terminate and ignore the violating exception as in CGCXXABI.
4726   // TODO Consider code transformation that makes calling __clang_call_terminate
4727   // possible.
4728   return CGCXXABI::emitTerminateForUnexpectedException(CGF, Exn);
4729 }
4730 
4731 /// Register a global destructor as best as we know how.
4732 void XLCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
4733                                   llvm::FunctionCallee Dtor,
4734                                   llvm::Constant *Addr) {
4735   if (D.getTLSKind() != VarDecl::TLS_None) {
4736     // atexit routine expects "int(*)(int,...)"
4737     llvm::FunctionType *FTy =
4738         llvm::FunctionType::get(CGM.IntTy, CGM.IntTy, true);
4739     llvm::PointerType *FpTy = FTy->getPointerTo();
4740 
4741     // extern "C" int __pt_atexit_np(int flags, int(*)(int,...), ...);
4742     llvm::FunctionType *AtExitTy =
4743         llvm::FunctionType::get(CGM.IntTy, {CGM.IntTy, FpTy}, true);
4744 
4745     // Fetch the actual function.
4746     llvm::FunctionCallee AtExit =
4747         CGM.CreateRuntimeFunction(AtExitTy, "__pt_atexit_np");
4748 
4749     // Create __dtor function for the var decl.
4750     llvm::Function *DtorStub = CGF.createTLSAtExitStub(D, Dtor, Addr, AtExit);
4751 
4752     // Register above __dtor with atexit().
4753     // First param is flags and must be 0, second param is function ptr
4754     llvm::Value *NV = llvm::Constant::getNullValue(CGM.IntTy);
4755     CGF.EmitNounwindRuntimeCall(AtExit, {NV, DtorStub});
4756 
4757     // Cannot unregister TLS __dtor so done
4758     return;
4759   }
4760 
4761   // Create __dtor function for the var decl.
4762   llvm::Function *DtorStub = CGF.createAtExitStub(D, Dtor, Addr);
4763 
4764   // Register above __dtor with atexit().
4765   CGF.registerGlobalDtorWithAtExit(DtorStub);
4766 
4767   // Emit __finalize function to unregister __dtor and (as appropriate) call
4768   // __dtor.
4769   emitCXXStermFinalizer(D, DtorStub, Addr);
4770 }
4771 
4772 void XLCXXABI::emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
4773                                      llvm::Constant *addr) {
4774   llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false);
4775   SmallString<256> FnName;
4776   {
4777     llvm::raw_svector_ostream Out(FnName);
4778     getMangleContext().mangleDynamicStermFinalizer(&D, Out);
4779   }
4780 
4781   // Create the finalization action associated with a variable.
4782   const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
4783   llvm::Function *StermFinalizer = CGM.CreateGlobalInitOrCleanUpFunction(
4784       FTy, FnName.str(), FI, D.getLocation());
4785 
4786   CodeGenFunction CGF(CGM);
4787 
4788   CGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, StermFinalizer, FI,
4789                     FunctionArgList(), D.getLocation(),
4790                     D.getInit()->getExprLoc());
4791 
4792   // The unatexit subroutine unregisters __dtor functions that were previously
4793   // registered by the atexit subroutine. If the referenced function is found,
4794   // the unatexit returns a value of 0, meaning that the cleanup is still
4795   // pending (and we should call the __dtor function).
4796   llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtorStub);
4797 
4798   llvm::Value *NeedsDestruct = CGF.Builder.CreateIsNull(V, "needs_destruct");
4799 
4800   llvm::BasicBlock *DestructCallBlock = CGF.createBasicBlock("destruct.call");
4801   llvm::BasicBlock *EndBlock = CGF.createBasicBlock("destruct.end");
4802 
4803   // Check if unatexit returns a value of 0. If it does, jump to
4804   // DestructCallBlock, otherwise jump to EndBlock directly.
4805   CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock);
4806 
4807   CGF.EmitBlock(DestructCallBlock);
4808 
4809   // Emit the call to dtorStub.
4810   llvm::CallInst *CI = CGF.Builder.CreateCall(dtorStub);
4811 
4812   // Make sure the call and the callee agree on calling convention.
4813   CI->setCallingConv(dtorStub->getCallingConv());
4814 
4815   CGF.EmitBlock(EndBlock);
4816 
4817   CGF.FinishFunction();
4818 
4819   if (auto *IPA = D.getAttr<InitPriorityAttr>()) {
4820     CGM.AddCXXPrioritizedStermFinalizerEntry(StermFinalizer,
4821                                              IPA->getPriority());
4822   } else if (isTemplateInstantiation(D.getTemplateSpecializationKind()) ||
4823              getContext().GetGVALinkageForVariable(&D) == GVA_DiscardableODR) {
4824     // According to C++ [basic.start.init]p2, class template static data
4825     // members (i.e., implicitly or explicitly instantiated specializations)
4826     // have unordered initialization. As a consequence, we can put them into
4827     // their own llvm.global_dtors entry.
4828     CGM.AddCXXStermFinalizerToGlobalDtor(StermFinalizer, 65535);
4829   } else {
4830     CGM.AddCXXStermFinalizerEntry(StermFinalizer);
4831   }
4832 }
4833