1 //===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This provides C++ code generation targeting the Itanium C++ ABI.  The class
10 // in this file generates structures that follow the Itanium C++ ABI, which is
11 // documented at:
12 //  https://itanium-cxx-abi.github.io/cxx-abi/abi.html
13 //  https://itanium-cxx-abi.github.io/cxx-abi/abi-eh.html
14 //
15 // It also supports the closely-related ARM ABI, documented at:
16 // https://developer.arm.com/documentation/ihi0041/g/
17 //
18 //===----------------------------------------------------------------------===//
19 
20 #include "CGCXXABI.h"
21 #include "CGCleanup.h"
22 #include "CGRecordLayout.h"
23 #include "CGVTables.h"
24 #include "CodeGenFunction.h"
25 #include "CodeGenModule.h"
26 #include "TargetInfo.h"
27 #include "clang/AST/Attr.h"
28 #include "clang/AST/Mangle.h"
29 #include "clang/AST/StmtCXX.h"
30 #include "clang/AST/Type.h"
31 #include "clang/CodeGen/ConstantInitBuilder.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/GlobalValue.h"
34 #include "llvm/IR/Instructions.h"
35 #include "llvm/IR/Intrinsics.h"
36 #include "llvm/IR/Value.h"
37 #include "llvm/Support/ScopedPrinter.h"
38 
39 using namespace clang;
40 using namespace CodeGen;
41 
42 namespace {
43 class ItaniumCXXABI : public CodeGen::CGCXXABI {
44   /// VTables - All the vtables which have been defined.
45   llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables;
46 
47   /// All the thread wrapper functions that have been used.
48   llvm::SmallVector<std::pair<const VarDecl *, llvm::Function *>, 8>
49       ThreadWrappers;
50 
51 protected:
52   bool UseARMMethodPtrABI;
53   bool UseARMGuardVarABI;
54   bool Use32BitVTableOffsetABI;
55 
56   ItaniumMangleContext &getMangleContext() {
57     return cast<ItaniumMangleContext>(CodeGen::CGCXXABI::getMangleContext());
58   }
59 
60 public:
61   ItaniumCXXABI(CodeGen::CodeGenModule &CGM,
62                 bool UseARMMethodPtrABI = false,
63                 bool UseARMGuardVarABI = false) :
64     CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI),
65     UseARMGuardVarABI(UseARMGuardVarABI),
66     Use32BitVTableOffsetABI(false) { }
67 
68   bool classifyReturnType(CGFunctionInfo &FI) const override;
69 
70   RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override {
71     // If C++ prohibits us from making a copy, pass by address.
72     if (!RD->canPassInRegisters())
73       return RAA_Indirect;
74     return RAA_Default;
75   }
76 
77   bool isThisCompleteObject(GlobalDecl GD) const override {
78     // The Itanium ABI has separate complete-object vs.  base-object
79     // variants of both constructors and destructors.
80     if (isa<CXXDestructorDecl>(GD.getDecl())) {
81       switch (GD.getDtorType()) {
82       case Dtor_Complete:
83       case Dtor_Deleting:
84         return true;
85 
86       case Dtor_Base:
87         return false;
88 
89       case Dtor_Comdat:
90         llvm_unreachable("emitting dtor comdat as function?");
91       }
92       llvm_unreachable("bad dtor kind");
93     }
94     if (isa<CXXConstructorDecl>(GD.getDecl())) {
95       switch (GD.getCtorType()) {
96       case Ctor_Complete:
97         return true;
98 
99       case Ctor_Base:
100         return false;
101 
102       case Ctor_CopyingClosure:
103       case Ctor_DefaultClosure:
104         llvm_unreachable("closure ctors in Itanium ABI?");
105 
106       case Ctor_Comdat:
107         llvm_unreachable("emitting ctor comdat as function?");
108       }
109       llvm_unreachable("bad dtor kind");
110     }
111 
112     // No other kinds.
113     return false;
114   }
115 
116   bool isZeroInitializable(const MemberPointerType *MPT) override;
117 
118   llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
119 
120   CGCallee
121     EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
122                                     const Expr *E,
123                                     Address This,
124                                     llvm::Value *&ThisPtrForCall,
125                                     llvm::Value *MemFnPtr,
126                                     const MemberPointerType *MPT) override;
127 
128   llvm::Value *
129     EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
130                                  Address Base,
131                                  llvm::Value *MemPtr,
132                                  const MemberPointerType *MPT) override;
133 
134   llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
135                                            const CastExpr *E,
136                                            llvm::Value *Src) override;
137   llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
138                                               llvm::Constant *Src) override;
139 
140   llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override;
141 
142   llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override;
143   llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
144                                         CharUnits offset) override;
145   llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override;
146   llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD,
147                                      CharUnits ThisAdjustment);
148 
149   llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
150                                            llvm::Value *L, llvm::Value *R,
151                                            const MemberPointerType *MPT,
152                                            bool Inequality) override;
153 
154   llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
155                                          llvm::Value *Addr,
156                                          const MemberPointerType *MPT) override;
157 
158   void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE,
159                                Address Ptr, QualType ElementType,
160                                const CXXDestructorDecl *Dtor) override;
161 
162   void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
163   void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override;
164 
165   void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
166 
167   llvm::CallInst *
168   emitTerminateForUnexpectedException(CodeGenFunction &CGF,
169                                       llvm::Value *Exn) override;
170 
171   void EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD);
172   llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override;
173   CatchTypeInfo
174   getAddrOfCXXCatchHandlerType(QualType Ty,
175                                QualType CatchHandlerType) override {
176     return CatchTypeInfo{getAddrOfRTTIDescriptor(Ty), 0};
177   }
178 
179   bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override;
180   void EmitBadTypeidCall(CodeGenFunction &CGF) override;
181   llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
182                           Address ThisPtr,
183                           llvm::Type *StdTypeInfoPtrTy) override;
184 
185   bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
186                                           QualType SrcRecordTy) override;
187 
188   llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, Address Value,
189                                    QualType SrcRecordTy, QualType DestTy,
190                                    QualType DestRecordTy,
191                                    llvm::BasicBlock *CastEnd) override;
192 
193   llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
194                                      QualType SrcRecordTy,
195                                      QualType DestTy) override;
196 
197   bool EmitBadCastCall(CodeGenFunction &CGF) override;
198 
199   llvm::Value *
200     GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This,
201                               const CXXRecordDecl *ClassDecl,
202                               const CXXRecordDecl *BaseClassDecl) override;
203 
204   void EmitCXXConstructors(const CXXConstructorDecl *D) override;
205 
206   AddedStructorArgCounts
207   buildStructorSignature(GlobalDecl GD,
208                          SmallVectorImpl<CanQualType> &ArgTys) override;
209 
210   bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor,
211                               CXXDtorType DT) const override {
212     // Itanium does not emit any destructor variant as an inline thunk.
213     // Delegating may occur as an optimization, but all variants are either
214     // emitted with external linkage or as linkonce if they are inline and used.
215     return false;
216   }
217 
218   void EmitCXXDestructors(const CXXDestructorDecl *D) override;
219 
220   void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy,
221                                  FunctionArgList &Params) override;
222 
223   void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override;
224 
225   AddedStructorArgs getImplicitConstructorArgs(CodeGenFunction &CGF,
226                                                const CXXConstructorDecl *D,
227                                                CXXCtorType Type,
228                                                bool ForVirtualBase,
229                                                bool Delegating) override;
230 
231   llvm::Value *getCXXDestructorImplicitParam(CodeGenFunction &CGF,
232                                              const CXXDestructorDecl *DD,
233                                              CXXDtorType Type,
234                                              bool ForVirtualBase,
235                                              bool Delegating) override;
236 
237   void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
238                           CXXDtorType Type, bool ForVirtualBase,
239                           bool Delegating, Address This,
240                           QualType ThisTy) override;
241 
242   void emitVTableDefinitions(CodeGenVTables &CGVT,
243                              const CXXRecordDecl *RD) override;
244 
245   bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF,
246                                            CodeGenFunction::VPtr Vptr) override;
247 
248   bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override {
249     return true;
250   }
251 
252   llvm::Constant *
253   getVTableAddressPoint(BaseSubobject Base,
254                         const CXXRecordDecl *VTableClass) override;
255 
256   llvm::Value *getVTableAddressPointInStructor(
257       CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
258       BaseSubobject Base, const CXXRecordDecl *NearestVBase) override;
259 
260   llvm::Value *getVTableAddressPointInStructorWithVTT(
261       CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
262       BaseSubobject Base, const CXXRecordDecl *NearestVBase);
263 
264   llvm::Constant *
265   getVTableAddressPointForConstExpr(BaseSubobject Base,
266                                     const CXXRecordDecl *VTableClass) override;
267 
268   llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
269                                         CharUnits VPtrOffset) override;
270 
271   CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD,
272                                      Address This, llvm::Type *Ty,
273                                      SourceLocation Loc) override;
274 
275   llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF,
276                                          const CXXDestructorDecl *Dtor,
277                                          CXXDtorType DtorType, Address This,
278                                          DeleteOrMemberCallExpr E) override;
279 
280   void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
281 
282   bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override;
283   bool canSpeculativelyEmitVTableAsBaseClass(const CXXRecordDecl *RD) const;
284 
285   void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD,
286                        bool ReturnAdjustment) override {
287     // Allow inlining of thunks by emitting them with available_externally
288     // linkage together with vtables when needed.
289     if (ForVTable && !Thunk->hasLocalLinkage())
290       Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
291     CGM.setGVProperties(Thunk, GD);
292   }
293 
294   bool exportThunk() override { return true; }
295 
296   llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
297                                      const ThisAdjustment &TA) override;
298 
299   llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
300                                        const ReturnAdjustment &RA) override;
301 
302   size_t getSrcArgforCopyCtor(const CXXConstructorDecl *,
303                               FunctionArgList &Args) const override {
304     assert(!Args.empty() && "expected the arglist to not be empty!");
305     return Args.size() - 1;
306   }
307 
308   StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; }
309   StringRef GetDeletedVirtualCallName() override
310     { return "__cxa_deleted_virtual"; }
311 
312   CharUnits getArrayCookieSizeImpl(QualType elementType) override;
313   Address InitializeArrayCookie(CodeGenFunction &CGF,
314                                 Address NewPtr,
315                                 llvm::Value *NumElements,
316                                 const CXXNewExpr *expr,
317                                 QualType ElementType) override;
318   llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
319                                    Address allocPtr,
320                                    CharUnits cookieSize) override;
321 
322   void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
323                        llvm::GlobalVariable *DeclPtr,
324                        bool PerformInit) override;
325   void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
326                           llvm::FunctionCallee dtor,
327                           llvm::Constant *addr) override;
328 
329   llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD,
330                                                 llvm::Value *Val);
331   void EmitThreadLocalInitFuncs(
332       CodeGenModule &CGM,
333       ArrayRef<const VarDecl *> CXXThreadLocals,
334       ArrayRef<llvm::Function *> CXXThreadLocalInits,
335       ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
336 
337   /// Determine whether we will definitely emit this variable with a constant
338   /// initializer, either because the language semantics demand it or because
339   /// we know that the initializer is a constant.
340   bool isEmittedWithConstantInitializer(const VarDecl *VD) const {
341     VD = VD->getMostRecentDecl();
342     if (VD->hasAttr<ConstInitAttr>())
343       return true;
344 
345     // All later checks examine the initializer specified on the variable. If
346     // the variable is weak, such examination would not be correct.
347     if (VD->isWeak() || VD->hasAttr<SelectAnyAttr>())
348       return false;
349 
350     const VarDecl *InitDecl = VD->getInitializingDeclaration();
351     if (!InitDecl)
352       return false;
353 
354     // If there's no initializer to run, this is constant initialization.
355     if (!InitDecl->hasInit())
356       return true;
357 
358     // If we have the only definition, we don't need a thread wrapper if we
359     // will emit the value as a constant.
360     if (isUniqueGVALinkage(getContext().GetGVALinkageForVariable(VD)))
361       return !VD->needsDestruction(getContext()) && InitDecl->evaluateValue();
362 
363     // Otherwise, we need a thread wrapper unless we know that every
364     // translation unit will emit the value as a constant. We rely on the
365     // variable being constant-initialized in every translation unit if it's
366     // constant-initialized in any translation unit, which isn't actually
367     // guaranteed by the standard but is necessary for sanity.
368     return InitDecl->hasConstantInitialization();
369   }
370 
371   bool usesThreadWrapperFunction(const VarDecl *VD) const override {
372     return !isEmittedWithConstantInitializer(VD) ||
373            VD->needsDestruction(getContext());
374   }
375   LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD,
376                                       QualType LValType) override;
377 
378   bool NeedsVTTParameter(GlobalDecl GD) override;
379 
380   /**************************** RTTI Uniqueness ******************************/
381 
382 protected:
383   /// Returns true if the ABI requires RTTI type_info objects to be unique
384   /// across a program.
385   virtual bool shouldRTTIBeUnique() const { return true; }
386 
387 public:
388   /// What sort of unique-RTTI behavior should we use?
389   enum RTTIUniquenessKind {
390     /// We are guaranteeing, or need to guarantee, that the RTTI string
391     /// is unique.
392     RUK_Unique,
393 
394     /// We are not guaranteeing uniqueness for the RTTI string, so we
395     /// can demote to hidden visibility but must use string comparisons.
396     RUK_NonUniqueHidden,
397 
398     /// We are not guaranteeing uniqueness for the RTTI string, so we
399     /// have to use string comparisons, but we also have to emit it with
400     /// non-hidden visibility.
401     RUK_NonUniqueVisible
402   };
403 
404   /// Return the required visibility status for the given type and linkage in
405   /// the current ABI.
406   RTTIUniquenessKind
407   classifyRTTIUniqueness(QualType CanTy,
408                          llvm::GlobalValue::LinkageTypes Linkage) const;
409   friend class ItaniumRTTIBuilder;
410 
411   void emitCXXStructor(GlobalDecl GD) override;
412 
413   std::pair<llvm::Value *, const CXXRecordDecl *>
414   LoadVTablePtr(CodeGenFunction &CGF, Address This,
415                 const CXXRecordDecl *RD) override;
416 
417  private:
418    bool hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl *RD) const {
419      const auto &VtableLayout =
420          CGM.getItaniumVTableContext().getVTableLayout(RD);
421 
422      for (const auto &VtableComponent : VtableLayout.vtable_components()) {
423        // Skip empty slot.
424        if (!VtableComponent.isUsedFunctionPointerKind())
425          continue;
426 
427        const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
428        if (!Method->getCanonicalDecl()->isInlined())
429          continue;
430 
431        StringRef Name = CGM.getMangledName(VtableComponent.getGlobalDecl());
432        auto *Entry = CGM.GetGlobalValue(Name);
433        // This checks if virtual inline function has already been emitted.
434        // Note that it is possible that this inline function would be emitted
435        // after trying to emit vtable speculatively. Because of this we do
436        // an extra pass after emitting all deferred vtables to find and emit
437        // these vtables opportunistically.
438        if (!Entry || Entry->isDeclaration())
439          return true;
440      }
441      return false;
442   }
443 
444   bool isVTableHidden(const CXXRecordDecl *RD) const {
445     const auto &VtableLayout =
446             CGM.getItaniumVTableContext().getVTableLayout(RD);
447 
448     for (const auto &VtableComponent : VtableLayout.vtable_components()) {
449       if (VtableComponent.isRTTIKind()) {
450         const CXXRecordDecl *RTTIDecl = VtableComponent.getRTTIDecl();
451         if (RTTIDecl->getVisibility() == Visibility::HiddenVisibility)
452           return true;
453       } else if (VtableComponent.isUsedFunctionPointerKind()) {
454         const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
455         if (Method->getVisibility() == Visibility::HiddenVisibility &&
456             !Method->isDefined())
457           return true;
458       }
459     }
460     return false;
461   }
462 };
463 
464 class ARMCXXABI : public ItaniumCXXABI {
465 public:
466   ARMCXXABI(CodeGen::CodeGenModule &CGM) :
467     ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
468                   /*UseARMGuardVarABI=*/true) {}
469 
470   bool HasThisReturn(GlobalDecl GD) const override {
471     return (isa<CXXConstructorDecl>(GD.getDecl()) || (
472               isa<CXXDestructorDecl>(GD.getDecl()) &&
473               GD.getDtorType() != Dtor_Deleting));
474   }
475 
476   void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV,
477                            QualType ResTy) override;
478 
479   CharUnits getArrayCookieSizeImpl(QualType elementType) override;
480   Address InitializeArrayCookie(CodeGenFunction &CGF,
481                                 Address NewPtr,
482                                 llvm::Value *NumElements,
483                                 const CXXNewExpr *expr,
484                                 QualType ElementType) override;
485   llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, Address allocPtr,
486                                    CharUnits cookieSize) override;
487 };
488 
489 class AppleARM64CXXABI : public ARMCXXABI {
490 public:
491   AppleARM64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {
492     Use32BitVTableOffsetABI = true;
493   }
494 
495   // ARM64 libraries are prepared for non-unique RTTI.
496   bool shouldRTTIBeUnique() const override { return false; }
497 };
498 
499 class FuchsiaCXXABI final : public ItaniumCXXABI {
500 public:
501   explicit FuchsiaCXXABI(CodeGen::CodeGenModule &CGM)
502       : ItaniumCXXABI(CGM) {}
503 
504 private:
505   bool HasThisReturn(GlobalDecl GD) const override {
506     return isa<CXXConstructorDecl>(GD.getDecl()) ||
507            (isa<CXXDestructorDecl>(GD.getDecl()) &&
508             GD.getDtorType() != Dtor_Deleting);
509   }
510 };
511 
512 class WebAssemblyCXXABI final : public ItaniumCXXABI {
513 public:
514   explicit WebAssemblyCXXABI(CodeGen::CodeGenModule &CGM)
515       : ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
516                       /*UseARMGuardVarABI=*/true) {}
517   void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
518   llvm::CallInst *
519   emitTerminateForUnexpectedException(CodeGenFunction &CGF,
520                                       llvm::Value *Exn) override;
521 
522 private:
523   bool HasThisReturn(GlobalDecl GD) const override {
524     return isa<CXXConstructorDecl>(GD.getDecl()) ||
525            (isa<CXXDestructorDecl>(GD.getDecl()) &&
526             GD.getDtorType() != Dtor_Deleting);
527   }
528   bool canCallMismatchedFunctionType() const override { return false; }
529 };
530 
531 class XLCXXABI final : public ItaniumCXXABI {
532 public:
533   explicit XLCXXABI(CodeGen::CodeGenModule &CGM)
534       : ItaniumCXXABI(CGM) {}
535 
536   void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
537                           llvm::FunctionCallee dtor,
538                           llvm::Constant *addr) override;
539 
540   bool useSinitAndSterm() const override { return true; }
541 
542 private:
543   void emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
544                              llvm::Constant *addr);
545 };
546 }
547 
548 CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
549   switch (CGM.getTarget().getCXXABI().getKind()) {
550   // For IR-generation purposes, there's no significant difference
551   // between the ARM and iOS ABIs.
552   case TargetCXXABI::GenericARM:
553   case TargetCXXABI::iOS:
554   case TargetCXXABI::WatchOS:
555     return new ARMCXXABI(CGM);
556 
557   case TargetCXXABI::AppleARM64:
558     return new AppleARM64CXXABI(CGM);
559 
560   case TargetCXXABI::Fuchsia:
561     return new FuchsiaCXXABI(CGM);
562 
563   // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't
564   // include the other 32-bit ARM oddities: constructor/destructor return values
565   // and array cookies.
566   case TargetCXXABI::GenericAArch64:
567     return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
568                              /*UseARMGuardVarABI=*/true);
569 
570   case TargetCXXABI::GenericMIPS:
571     return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
572 
573   case TargetCXXABI::WebAssembly:
574     return new WebAssemblyCXXABI(CGM);
575 
576   case TargetCXXABI::XL:
577     return new XLCXXABI(CGM);
578 
579   case TargetCXXABI::GenericItanium:
580     if (CGM.getContext().getTargetInfo().getTriple().getArch()
581         == llvm::Triple::le32) {
582       // For PNaCl, use ARM-style method pointers so that PNaCl code
583       // does not assume anything about the alignment of function
584       // pointers.
585       return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
586     }
587     return new ItaniumCXXABI(CGM);
588 
589   case TargetCXXABI::Microsoft:
590     llvm_unreachable("Microsoft ABI is not Itanium-based");
591   }
592   llvm_unreachable("bad ABI kind");
593 }
594 
595 llvm::Type *
596 ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
597   if (MPT->isMemberDataPointer())
598     return CGM.PtrDiffTy;
599   return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy);
600 }
601 
602 /// In the Itanium and ARM ABIs, method pointers have the form:
603 ///   struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr;
604 ///
605 /// In the Itanium ABI:
606 ///  - method pointers are virtual if (memptr.ptr & 1) is nonzero
607 ///  - the this-adjustment is (memptr.adj)
608 ///  - the virtual offset is (memptr.ptr - 1)
609 ///
610 /// In the ARM ABI:
611 ///  - method pointers are virtual if (memptr.adj & 1) is nonzero
612 ///  - the this-adjustment is (memptr.adj >> 1)
613 ///  - the virtual offset is (memptr.ptr)
614 /// ARM uses 'adj' for the virtual flag because Thumb functions
615 /// may be only single-byte aligned.
616 ///
617 /// If the member is virtual, the adjusted 'this' pointer points
618 /// to a vtable pointer from which the virtual offset is applied.
619 ///
620 /// If the member is non-virtual, memptr.ptr is the address of
621 /// the function to call.
622 CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
623     CodeGenFunction &CGF, const Expr *E, Address ThisAddr,
624     llvm::Value *&ThisPtrForCall,
625     llvm::Value *MemFnPtr, const MemberPointerType *MPT) {
626   CGBuilderTy &Builder = CGF.Builder;
627 
628   const FunctionProtoType *FPT =
629     MPT->getPointeeType()->getAs<FunctionProtoType>();
630   auto *RD =
631       cast<CXXRecordDecl>(MPT->getClass()->castAs<RecordType>()->getDecl());
632 
633   llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
634       CGM.getTypes().arrangeCXXMethodType(RD, FPT, /*FD=*/nullptr));
635 
636   llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1);
637 
638   llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual");
639   llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual");
640   llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end");
641 
642   // Extract memptr.adj, which is in the second field.
643   llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj");
644 
645   // Compute the true adjustment.
646   llvm::Value *Adj = RawAdj;
647   if (UseARMMethodPtrABI)
648     Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted");
649 
650   // Apply the adjustment and cast back to the original struct type
651   // for consistency.
652   llvm::Value *This = ThisAddr.getPointer();
653   llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy());
654   Ptr = Builder.CreateInBoundsGEP(Ptr, Adj);
655   This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted");
656   ThisPtrForCall = This;
657 
658   // Load the function pointer.
659   llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr");
660 
661   // If the LSB in the function pointer is 1, the function pointer points to
662   // a virtual function.
663   llvm::Value *IsVirtual;
664   if (UseARMMethodPtrABI)
665     IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1);
666   else
667     IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1);
668   IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual");
669   Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual);
670 
671   // In the virtual path, the adjustment left 'This' pointing to the
672   // vtable of the correct base subobject.  The "function pointer" is an
673   // offset within the vtable (+1 for the virtual flag on non-ARM).
674   CGF.EmitBlock(FnVirtual);
675 
676   // Cast the adjusted this to a pointer to vtable pointer and load.
677   llvm::Type *VTableTy = Builder.getInt8PtrTy();
678   CharUnits VTablePtrAlign =
679     CGF.CGM.getDynamicOffsetAlignment(ThisAddr.getAlignment(), RD,
680                                       CGF.getPointerAlign());
681   llvm::Value *VTable =
682     CGF.GetVTablePtr(Address(This, VTablePtrAlign), VTableTy, RD);
683 
684   // Apply the offset.
685   // On ARM64, to reserve extra space in virtual member function pointers,
686   // we only pay attention to the low 32 bits of the offset.
687   llvm::Value *VTableOffset = FnAsInt;
688   if (!UseARMMethodPtrABI)
689     VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1);
690   if (Use32BitVTableOffsetABI) {
691     VTableOffset = Builder.CreateTrunc(VTableOffset, CGF.Int32Ty);
692     VTableOffset = Builder.CreateZExt(VTableOffset, CGM.PtrDiffTy);
693   }
694 
695   // Check the address of the function pointer if CFI on member function
696   // pointers is enabled.
697   llvm::Constant *CheckSourceLocation;
698   llvm::Constant *CheckTypeDesc;
699   bool ShouldEmitCFICheck = CGF.SanOpts.has(SanitizerKind::CFIMFCall) &&
700                             CGM.HasHiddenLTOVisibility(RD);
701   bool ShouldEmitVFEInfo = CGM.getCodeGenOpts().VirtualFunctionElimination &&
702                            CGM.HasHiddenLTOVisibility(RD);
703   bool ShouldEmitWPDInfo =
704       CGM.getCodeGenOpts().WholeProgramVTables &&
705       // Don't insert type tests if we are forcing public std visibility.
706       !CGM.HasLTOVisibilityPublicStd(RD);
707   llvm::Value *VirtualFn = nullptr;
708 
709   {
710     CodeGenFunction::SanitizerScope SanScope(&CGF);
711     llvm::Value *TypeId = nullptr;
712     llvm::Value *CheckResult = nullptr;
713 
714     if (ShouldEmitCFICheck || ShouldEmitVFEInfo || ShouldEmitWPDInfo) {
715       // If doing CFI, VFE or WPD, we will need the metadata node to check
716       // against.
717       llvm::Metadata *MD =
718           CGM.CreateMetadataIdentifierForVirtualMemPtrType(QualType(MPT, 0));
719       TypeId = llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
720     }
721 
722     if (ShouldEmitVFEInfo) {
723       llvm::Value *VFPAddr = Builder.CreateGEP(VTable, VTableOffset);
724 
725       // If doing VFE, load from the vtable with a type.checked.load intrinsic
726       // call. Note that we use the GEP to calculate the address to load from
727       // and pass 0 as the offset to the intrinsic. This is because every
728       // vtable slot of the correct type is marked with matching metadata, and
729       // we know that the load must be from one of these slots.
730       llvm::Value *CheckedLoad = Builder.CreateCall(
731           CGM.getIntrinsic(llvm::Intrinsic::type_checked_load),
732           {VFPAddr, llvm::ConstantInt::get(CGM.Int32Ty, 0), TypeId});
733       CheckResult = Builder.CreateExtractValue(CheckedLoad, 1);
734       VirtualFn = Builder.CreateExtractValue(CheckedLoad, 0);
735       VirtualFn = Builder.CreateBitCast(VirtualFn, FTy->getPointerTo(),
736                                         "memptr.virtualfn");
737     } else {
738       // When not doing VFE, emit a normal load, as it allows more
739       // optimisations than type.checked.load.
740       if (ShouldEmitCFICheck || ShouldEmitWPDInfo) {
741         llvm::Value *VFPAddr = Builder.CreateGEP(VTable, VTableOffset);
742         CheckResult = Builder.CreateCall(
743             CGM.getIntrinsic(llvm::Intrinsic::type_test),
744             {Builder.CreateBitCast(VFPAddr, CGF.Int8PtrTy), TypeId});
745       }
746 
747       if (CGM.getItaniumVTableContext().isRelativeLayout()) {
748         VirtualFn = CGF.Builder.CreateCall(
749             CGM.getIntrinsic(llvm::Intrinsic::load_relative,
750                              {VTableOffset->getType()}),
751             {VTable, VTableOffset});
752         VirtualFn = CGF.Builder.CreateBitCast(VirtualFn, FTy->getPointerTo());
753       } else {
754         llvm::Value *VFPAddr = CGF.Builder.CreateGEP(VTable, VTableOffset);
755         VFPAddr = CGF.Builder.CreateBitCast(
756             VFPAddr, FTy->getPointerTo()->getPointerTo());
757         VirtualFn = CGF.Builder.CreateAlignedLoad(
758             VFPAddr, CGF.getPointerAlign(), "memptr.virtualfn");
759       }
760     }
761     assert(VirtualFn && "Virtual fuction pointer not created!");
762     assert((!ShouldEmitCFICheck || !ShouldEmitVFEInfo || !ShouldEmitWPDInfo ||
763             CheckResult) &&
764            "Check result required but not created!");
765 
766     if (ShouldEmitCFICheck) {
767       // If doing CFI, emit the check.
768       CheckSourceLocation = CGF.EmitCheckSourceLocation(E->getBeginLoc());
769       CheckTypeDesc = CGF.EmitCheckTypeDescriptor(QualType(MPT, 0));
770       llvm::Constant *StaticData[] = {
771           llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_VMFCall),
772           CheckSourceLocation,
773           CheckTypeDesc,
774       };
775 
776       if (CGM.getCodeGenOpts().SanitizeTrap.has(SanitizerKind::CFIMFCall)) {
777         CGF.EmitTrapCheck(CheckResult, SanitizerHandler::CFICheckFail);
778       } else {
779         llvm::Value *AllVtables = llvm::MetadataAsValue::get(
780             CGM.getLLVMContext(),
781             llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
782         llvm::Value *ValidVtable = Builder.CreateCall(
783             CGM.getIntrinsic(llvm::Intrinsic::type_test), {VTable, AllVtables});
784         CGF.EmitCheck(std::make_pair(CheckResult, SanitizerKind::CFIMFCall),
785                       SanitizerHandler::CFICheckFail, StaticData,
786                       {VTable, ValidVtable});
787       }
788 
789       FnVirtual = Builder.GetInsertBlock();
790     }
791   } // End of sanitizer scope
792 
793   CGF.EmitBranch(FnEnd);
794 
795   // In the non-virtual path, the function pointer is actually a
796   // function pointer.
797   CGF.EmitBlock(FnNonVirtual);
798   llvm::Value *NonVirtualFn =
799     Builder.CreateIntToPtr(FnAsInt, FTy->getPointerTo(), "memptr.nonvirtualfn");
800 
801   // Check the function pointer if CFI on member function pointers is enabled.
802   if (ShouldEmitCFICheck) {
803     CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl();
804     if (RD->hasDefinition()) {
805       CodeGenFunction::SanitizerScope SanScope(&CGF);
806 
807       llvm::Constant *StaticData[] = {
808           llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_NVMFCall),
809           CheckSourceLocation,
810           CheckTypeDesc,
811       };
812 
813       llvm::Value *Bit = Builder.getFalse();
814       llvm::Value *CastedNonVirtualFn =
815           Builder.CreateBitCast(NonVirtualFn, CGF.Int8PtrTy);
816       for (const CXXRecordDecl *Base : CGM.getMostBaseClasses(RD)) {
817         llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(
818             getContext().getMemberPointerType(
819                 MPT->getPointeeType(),
820                 getContext().getRecordType(Base).getTypePtr()));
821         llvm::Value *TypeId =
822             llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
823 
824         llvm::Value *TypeTest =
825             Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
826                                {CastedNonVirtualFn, TypeId});
827         Bit = Builder.CreateOr(Bit, TypeTest);
828       }
829 
830       CGF.EmitCheck(std::make_pair(Bit, SanitizerKind::CFIMFCall),
831                     SanitizerHandler::CFICheckFail, StaticData,
832                     {CastedNonVirtualFn, llvm::UndefValue::get(CGF.IntPtrTy)});
833 
834       FnNonVirtual = Builder.GetInsertBlock();
835     }
836   }
837 
838   // We're done.
839   CGF.EmitBlock(FnEnd);
840   llvm::PHINode *CalleePtr = Builder.CreatePHI(FTy->getPointerTo(), 2);
841   CalleePtr->addIncoming(VirtualFn, FnVirtual);
842   CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual);
843 
844   CGCallee Callee(FPT, CalleePtr);
845   return Callee;
846 }
847 
848 /// Compute an l-value by applying the given pointer-to-member to a
849 /// base object.
850 llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(
851     CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr,
852     const MemberPointerType *MPT) {
853   assert(MemPtr->getType() == CGM.PtrDiffTy);
854 
855   CGBuilderTy &Builder = CGF.Builder;
856 
857   // Cast to char*.
858   Base = Builder.CreateElementBitCast(Base, CGF.Int8Ty);
859 
860   // Apply the offset, which we assume is non-null.
861   llvm::Value *Addr =
862     Builder.CreateInBoundsGEP(Base.getPointer(), MemPtr, "memptr.offset");
863 
864   // Cast the address to the appropriate pointer type, adopting the
865   // address space of the base pointer.
866   llvm::Type *PType = CGF.ConvertTypeForMem(MPT->getPointeeType())
867                             ->getPointerTo(Base.getAddressSpace());
868   return Builder.CreateBitCast(Addr, PType);
869 }
870 
871 /// Perform a bitcast, derived-to-base, or base-to-derived member pointer
872 /// conversion.
873 ///
874 /// Bitcast conversions are always a no-op under Itanium.
875 ///
876 /// Obligatory offset/adjustment diagram:
877 ///         <-- offset -->          <-- adjustment -->
878 ///   |--------------------------|----------------------|--------------------|
879 ///   ^Derived address point     ^Base address point    ^Member address point
880 ///
881 /// So when converting a base member pointer to a derived member pointer,
882 /// we add the offset to the adjustment because the address point has
883 /// decreased;  and conversely, when converting a derived MP to a base MP
884 /// we subtract the offset from the adjustment because the address point
885 /// has increased.
886 ///
887 /// The standard forbids (at compile time) conversion to and from
888 /// virtual bases, which is why we don't have to consider them here.
889 ///
890 /// The standard forbids (at run time) casting a derived MP to a base
891 /// MP when the derived MP does not point to a member of the base.
892 /// This is why -1 is a reasonable choice for null data member
893 /// pointers.
894 llvm::Value *
895 ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
896                                            const CastExpr *E,
897                                            llvm::Value *src) {
898   assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
899          E->getCastKind() == CK_BaseToDerivedMemberPointer ||
900          E->getCastKind() == CK_ReinterpretMemberPointer);
901 
902   // Under Itanium, reinterprets don't require any additional processing.
903   if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
904 
905   // Use constant emission if we can.
906   if (isa<llvm::Constant>(src))
907     return EmitMemberPointerConversion(E, cast<llvm::Constant>(src));
908 
909   llvm::Constant *adj = getMemberPointerAdjustment(E);
910   if (!adj) return src;
911 
912   CGBuilderTy &Builder = CGF.Builder;
913   bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
914 
915   const MemberPointerType *destTy =
916     E->getType()->castAs<MemberPointerType>();
917 
918   // For member data pointers, this is just a matter of adding the
919   // offset if the source is non-null.
920   if (destTy->isMemberDataPointer()) {
921     llvm::Value *dst;
922     if (isDerivedToBase)
923       dst = Builder.CreateNSWSub(src, adj, "adj");
924     else
925       dst = Builder.CreateNSWAdd(src, adj, "adj");
926 
927     // Null check.
928     llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType());
929     llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull");
930     return Builder.CreateSelect(isNull, src, dst);
931   }
932 
933   // The this-adjustment is left-shifted by 1 on ARM.
934   if (UseARMMethodPtrABI) {
935     uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
936     offset <<= 1;
937     adj = llvm::ConstantInt::get(adj->getType(), offset);
938   }
939 
940   llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj");
941   llvm::Value *dstAdj;
942   if (isDerivedToBase)
943     dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj");
944   else
945     dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj");
946 
947   return Builder.CreateInsertValue(src, dstAdj, 1);
948 }
949 
950 llvm::Constant *
951 ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
952                                            llvm::Constant *src) {
953   assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
954          E->getCastKind() == CK_BaseToDerivedMemberPointer ||
955          E->getCastKind() == CK_ReinterpretMemberPointer);
956 
957   // Under Itanium, reinterprets don't require any additional processing.
958   if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
959 
960   // If the adjustment is trivial, we don't need to do anything.
961   llvm::Constant *adj = getMemberPointerAdjustment(E);
962   if (!adj) return src;
963 
964   bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
965 
966   const MemberPointerType *destTy =
967     E->getType()->castAs<MemberPointerType>();
968 
969   // For member data pointers, this is just a matter of adding the
970   // offset if the source is non-null.
971   if (destTy->isMemberDataPointer()) {
972     // null maps to null.
973     if (src->isAllOnesValue()) return src;
974 
975     if (isDerivedToBase)
976       return llvm::ConstantExpr::getNSWSub(src, adj);
977     else
978       return llvm::ConstantExpr::getNSWAdd(src, adj);
979   }
980 
981   // The this-adjustment is left-shifted by 1 on ARM.
982   if (UseARMMethodPtrABI) {
983     uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
984     offset <<= 1;
985     adj = llvm::ConstantInt::get(adj->getType(), offset);
986   }
987 
988   llvm::Constant *srcAdj = llvm::ConstantExpr::getExtractValue(src, 1);
989   llvm::Constant *dstAdj;
990   if (isDerivedToBase)
991     dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj);
992   else
993     dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj);
994 
995   return llvm::ConstantExpr::getInsertValue(src, dstAdj, 1);
996 }
997 
998 llvm::Constant *
999 ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
1000   // Itanium C++ ABI 2.3:
1001   //   A NULL pointer is represented as -1.
1002   if (MPT->isMemberDataPointer())
1003     return llvm::ConstantInt::get(CGM.PtrDiffTy, -1ULL, /*isSigned=*/true);
1004 
1005   llvm::Constant *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0);
1006   llvm::Constant *Values[2] = { Zero, Zero };
1007   return llvm::ConstantStruct::getAnon(Values);
1008 }
1009 
1010 llvm::Constant *
1011 ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
1012                                      CharUnits offset) {
1013   // Itanium C++ ABI 2.3:
1014   //   A pointer to data member is an offset from the base address of
1015   //   the class object containing it, represented as a ptrdiff_t
1016   return llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity());
1017 }
1018 
1019 llvm::Constant *
1020 ItaniumCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
1021   return BuildMemberPointer(MD, CharUnits::Zero());
1022 }
1023 
1024 llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
1025                                                   CharUnits ThisAdjustment) {
1026   assert(MD->isInstance() && "Member function must not be static!");
1027 
1028   CodeGenTypes &Types = CGM.getTypes();
1029 
1030   // Get the function pointer (or index if this is a virtual function).
1031   llvm::Constant *MemPtr[2];
1032   if (MD->isVirtual()) {
1033     uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(MD);
1034     uint64_t VTableOffset;
1035     if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1036       // Multiply by 4-byte relative offsets.
1037       VTableOffset = Index * 4;
1038     } else {
1039       const ASTContext &Context = getContext();
1040       CharUnits PointerWidth = Context.toCharUnitsFromBits(
1041           Context.getTargetInfo().getPointerWidth(0));
1042       VTableOffset = Index * PointerWidth.getQuantity();
1043     }
1044 
1045     if (UseARMMethodPtrABI) {
1046       // ARM C++ ABI 3.2.1:
1047       //   This ABI specifies that adj contains twice the this
1048       //   adjustment, plus 1 if the member function is virtual. The
1049       //   least significant bit of adj then makes exactly the same
1050       //   discrimination as the least significant bit of ptr does for
1051       //   Itanium.
1052       MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset);
1053       MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1054                                          2 * ThisAdjustment.getQuantity() + 1);
1055     } else {
1056       // Itanium C++ ABI 2.3:
1057       //   For a virtual function, [the pointer field] is 1 plus the
1058       //   virtual table offset (in bytes) of the function,
1059       //   represented as a ptrdiff_t.
1060       MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset + 1);
1061       MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1062                                          ThisAdjustment.getQuantity());
1063     }
1064   } else {
1065     const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
1066     llvm::Type *Ty;
1067     // Check whether the function has a computable LLVM signature.
1068     if (Types.isFuncTypeConvertible(FPT)) {
1069       // The function has a computable LLVM signature; use the correct type.
1070       Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD));
1071     } else {
1072       // Use an arbitrary non-function type to tell GetAddrOfFunction that the
1073       // function type is incomplete.
1074       Ty = CGM.PtrDiffTy;
1075     }
1076     llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty);
1077 
1078     MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy);
1079     MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1080                                        (UseARMMethodPtrABI ? 2 : 1) *
1081                                        ThisAdjustment.getQuantity());
1082   }
1083 
1084   return llvm::ConstantStruct::getAnon(MemPtr);
1085 }
1086 
1087 llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
1088                                                  QualType MPType) {
1089   const MemberPointerType *MPT = MPType->castAs<MemberPointerType>();
1090   const ValueDecl *MPD = MP.getMemberPointerDecl();
1091   if (!MPD)
1092     return EmitNullMemberPointer(MPT);
1093 
1094   CharUnits ThisAdjustment = getContext().getMemberPointerPathAdjustment(MP);
1095 
1096   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD))
1097     return BuildMemberPointer(MD, ThisAdjustment);
1098 
1099   CharUnits FieldOffset =
1100     getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD));
1101   return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset);
1102 }
1103 
1104 /// The comparison algorithm is pretty easy: the member pointers are
1105 /// the same if they're either bitwise identical *or* both null.
1106 ///
1107 /// ARM is different here only because null-ness is more complicated.
1108 llvm::Value *
1109 ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
1110                                            llvm::Value *L,
1111                                            llvm::Value *R,
1112                                            const MemberPointerType *MPT,
1113                                            bool Inequality) {
1114   CGBuilderTy &Builder = CGF.Builder;
1115 
1116   llvm::ICmpInst::Predicate Eq;
1117   llvm::Instruction::BinaryOps And, Or;
1118   if (Inequality) {
1119     Eq = llvm::ICmpInst::ICMP_NE;
1120     And = llvm::Instruction::Or;
1121     Or = llvm::Instruction::And;
1122   } else {
1123     Eq = llvm::ICmpInst::ICMP_EQ;
1124     And = llvm::Instruction::And;
1125     Or = llvm::Instruction::Or;
1126   }
1127 
1128   // Member data pointers are easy because there's a unique null
1129   // value, so it just comes down to bitwise equality.
1130   if (MPT->isMemberDataPointer())
1131     return Builder.CreateICmp(Eq, L, R);
1132 
1133   // For member function pointers, the tautologies are more complex.
1134   // The Itanium tautology is:
1135   //   (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj))
1136   // The ARM tautology is:
1137   //   (L == R) <==> (L.ptr == R.ptr &&
1138   //                  (L.adj == R.adj ||
1139   //                   (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0)))
1140   // The inequality tautologies have exactly the same structure, except
1141   // applying De Morgan's laws.
1142 
1143   llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr");
1144   llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr");
1145 
1146   // This condition tests whether L.ptr == R.ptr.  This must always be
1147   // true for equality to hold.
1148   llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr");
1149 
1150   // This condition, together with the assumption that L.ptr == R.ptr,
1151   // tests whether the pointers are both null.  ARM imposes an extra
1152   // condition.
1153   llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType());
1154   llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null");
1155 
1156   // This condition tests whether L.adj == R.adj.  If this isn't
1157   // true, the pointers are unequal unless they're both null.
1158   llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj");
1159   llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj");
1160   llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj");
1161 
1162   // Null member function pointers on ARM clear the low bit of Adj,
1163   // so the zero condition has to check that neither low bit is set.
1164   if (UseARMMethodPtrABI) {
1165     llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1);
1166 
1167     // Compute (l.adj | r.adj) & 1 and test it against zero.
1168     llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj");
1169     llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One);
1170     llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero,
1171                                                       "cmp.or.adj");
1172     EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero);
1173   }
1174 
1175   // Tie together all our conditions.
1176   llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq);
1177   Result = Builder.CreateBinOp(And, PtrEq, Result,
1178                                Inequality ? "memptr.ne" : "memptr.eq");
1179   return Result;
1180 }
1181 
1182 llvm::Value *
1183 ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
1184                                           llvm::Value *MemPtr,
1185                                           const MemberPointerType *MPT) {
1186   CGBuilderTy &Builder = CGF.Builder;
1187 
1188   /// For member data pointers, this is just a check against -1.
1189   if (MPT->isMemberDataPointer()) {
1190     assert(MemPtr->getType() == CGM.PtrDiffTy);
1191     llvm::Value *NegativeOne =
1192       llvm::Constant::getAllOnesValue(MemPtr->getType());
1193     return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool");
1194   }
1195 
1196   // In Itanium, a member function pointer is not null if 'ptr' is not null.
1197   llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr");
1198 
1199   llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0);
1200   llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool");
1201 
1202   // On ARM, a member function pointer is also non-null if the low bit of 'adj'
1203   // (the virtual bit) is set.
1204   if (UseARMMethodPtrABI) {
1205     llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1);
1206     llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj");
1207     llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit");
1208     llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero,
1209                                                   "memptr.isvirtual");
1210     Result = Builder.CreateOr(Result, IsVirtual);
1211   }
1212 
1213   return Result;
1214 }
1215 
1216 bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
1217   const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl();
1218   if (!RD)
1219     return false;
1220 
1221   // If C++ prohibits us from making a copy, return by address.
1222   if (!RD->canPassInRegisters()) {
1223     auto Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType());
1224     FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
1225     return true;
1226   }
1227   return false;
1228 }
1229 
1230 /// The Itanium ABI requires non-zero initialization only for data
1231 /// member pointers, for which '0' is a valid offset.
1232 bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
1233   return MPT->isMemberFunctionPointer();
1234 }
1235 
1236 /// The Itanium ABI always places an offset to the complete object
1237 /// at entry -2 in the vtable.
1238 void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
1239                                             const CXXDeleteExpr *DE,
1240                                             Address Ptr,
1241                                             QualType ElementType,
1242                                             const CXXDestructorDecl *Dtor) {
1243   bool UseGlobalDelete = DE->isGlobalDelete();
1244   if (UseGlobalDelete) {
1245     // Derive the complete-object pointer, which is what we need
1246     // to pass to the deallocation function.
1247 
1248     // Grab the vtable pointer as an intptr_t*.
1249     auto *ClassDecl =
1250         cast<CXXRecordDecl>(ElementType->castAs<RecordType>()->getDecl());
1251     llvm::Value *VTable =
1252         CGF.GetVTablePtr(Ptr, CGF.IntPtrTy->getPointerTo(), ClassDecl);
1253 
1254     // Track back to entry -2 and pull out the offset there.
1255     llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
1256         VTable, -2, "complete-offset.ptr");
1257     llvm::Value *Offset =
1258       CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign());
1259 
1260     // Apply the offset.
1261     llvm::Value *CompletePtr =
1262       CGF.Builder.CreateBitCast(Ptr.getPointer(), CGF.Int8PtrTy);
1263     CompletePtr = CGF.Builder.CreateInBoundsGEP(CompletePtr, Offset);
1264 
1265     // If we're supposed to call the global delete, make sure we do so
1266     // even if the destructor throws.
1267     CGF.pushCallObjectDeleteCleanup(DE->getOperatorDelete(), CompletePtr,
1268                                     ElementType);
1269   }
1270 
1271   // FIXME: Provide a source location here even though there's no
1272   // CXXMemberCallExpr for dtor call.
1273   CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
1274   EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, DE);
1275 
1276   if (UseGlobalDelete)
1277     CGF.PopCleanupBlock();
1278 }
1279 
1280 void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
1281   // void __cxa_rethrow();
1282 
1283   llvm::FunctionType *FTy =
1284     llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
1285 
1286   llvm::FunctionCallee Fn = CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
1287 
1288   if (isNoReturn)
1289     CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, None);
1290   else
1291     CGF.EmitRuntimeCallOrInvoke(Fn);
1292 }
1293 
1294 static llvm::FunctionCallee getAllocateExceptionFn(CodeGenModule &CGM) {
1295   // void *__cxa_allocate_exception(size_t thrown_size);
1296 
1297   llvm::FunctionType *FTy =
1298     llvm::FunctionType::get(CGM.Int8PtrTy, CGM.SizeTy, /*isVarArg=*/false);
1299 
1300   return CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception");
1301 }
1302 
1303 static llvm::FunctionCallee getThrowFn(CodeGenModule &CGM) {
1304   // void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
1305   //                  void (*dest) (void *));
1306 
1307   llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.Int8PtrTy, CGM.Int8PtrTy };
1308   llvm::FunctionType *FTy =
1309     llvm::FunctionType::get(CGM.VoidTy, Args, /*isVarArg=*/false);
1310 
1311   return CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
1312 }
1313 
1314 void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
1315   QualType ThrowType = E->getSubExpr()->getType();
1316   // Now allocate the exception object.
1317   llvm::Type *SizeTy = CGF.ConvertType(getContext().getSizeType());
1318   uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity();
1319 
1320   llvm::FunctionCallee AllocExceptionFn = getAllocateExceptionFn(CGM);
1321   llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall(
1322       AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception");
1323 
1324   CharUnits ExnAlign = CGF.getContext().getExnObjectAlignment();
1325   CGF.EmitAnyExprToExn(E->getSubExpr(), Address(ExceptionPtr, ExnAlign));
1326 
1327   // Now throw the exception.
1328   llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType,
1329                                                          /*ForEH=*/true);
1330 
1331   // The address of the destructor.  If the exception type has a
1332   // trivial destructor (or isn't a record), we just pass null.
1333   llvm::Constant *Dtor = nullptr;
1334   if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
1335     CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
1336     if (!Record->hasTrivialDestructor()) {
1337       CXXDestructorDecl *DtorD = Record->getDestructor();
1338       Dtor = CGM.getAddrOfCXXStructor(GlobalDecl(DtorD, Dtor_Complete));
1339       Dtor = llvm::ConstantExpr::getBitCast(Dtor, CGM.Int8PtrTy);
1340     }
1341   }
1342   if (!Dtor) Dtor = llvm::Constant::getNullValue(CGM.Int8PtrTy);
1343 
1344   llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor };
1345   CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(CGM), args);
1346 }
1347 
1348 static llvm::FunctionCallee getItaniumDynamicCastFn(CodeGenFunction &CGF) {
1349   // void *__dynamic_cast(const void *sub,
1350   //                      const abi::__class_type_info *src,
1351   //                      const abi::__class_type_info *dst,
1352   //                      std::ptrdiff_t src2dst_offset);
1353 
1354   llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
1355   llvm::Type *PtrDiffTy =
1356     CGF.ConvertType(CGF.getContext().getPointerDiffType());
1357 
1358   llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
1359 
1360   llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false);
1361 
1362   // Mark the function as nounwind readonly.
1363   llvm::Attribute::AttrKind FuncAttrs[] = { llvm::Attribute::NoUnwind,
1364                                             llvm::Attribute::ReadOnly };
1365   llvm::AttributeList Attrs = llvm::AttributeList::get(
1366       CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, FuncAttrs);
1367 
1368   return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs);
1369 }
1370 
1371 static llvm::FunctionCallee getBadCastFn(CodeGenFunction &CGF) {
1372   // void __cxa_bad_cast();
1373   llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1374   return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
1375 }
1376 
1377 /// Compute the src2dst_offset hint as described in the
1378 /// Itanium C++ ABI [2.9.7]
1379 static CharUnits computeOffsetHint(ASTContext &Context,
1380                                    const CXXRecordDecl *Src,
1381                                    const CXXRecordDecl *Dst) {
1382   CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1383                      /*DetectVirtual=*/false);
1384 
1385   // If Dst is not derived from Src we can skip the whole computation below and
1386   // return that Src is not a public base of Dst.  Record all inheritance paths.
1387   if (!Dst->isDerivedFrom(Src, Paths))
1388     return CharUnits::fromQuantity(-2ULL);
1389 
1390   unsigned NumPublicPaths = 0;
1391   CharUnits Offset;
1392 
1393   // Now walk all possible inheritance paths.
1394   for (const CXXBasePath &Path : Paths) {
1395     if (Path.Access != AS_public)  // Ignore non-public inheritance.
1396       continue;
1397 
1398     ++NumPublicPaths;
1399 
1400     for (const CXXBasePathElement &PathElement : Path) {
1401       // If the path contains a virtual base class we can't give any hint.
1402       // -1: no hint.
1403       if (PathElement.Base->isVirtual())
1404         return CharUnits::fromQuantity(-1ULL);
1405 
1406       if (NumPublicPaths > 1) // Won't use offsets, skip computation.
1407         continue;
1408 
1409       // Accumulate the base class offsets.
1410       const ASTRecordLayout &L = Context.getASTRecordLayout(PathElement.Class);
1411       Offset += L.getBaseClassOffset(
1412           PathElement.Base->getType()->getAsCXXRecordDecl());
1413     }
1414   }
1415 
1416   // -2: Src is not a public base of Dst.
1417   if (NumPublicPaths == 0)
1418     return CharUnits::fromQuantity(-2ULL);
1419 
1420   // -3: Src is a multiple public base type but never a virtual base type.
1421   if (NumPublicPaths > 1)
1422     return CharUnits::fromQuantity(-3ULL);
1423 
1424   // Otherwise, the Src type is a unique public nonvirtual base type of Dst.
1425   // Return the offset of Src from the origin of Dst.
1426   return Offset;
1427 }
1428 
1429 static llvm::FunctionCallee getBadTypeidFn(CodeGenFunction &CGF) {
1430   // void __cxa_bad_typeid();
1431   llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1432 
1433   return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1434 }
1435 
1436 bool ItaniumCXXABI::shouldTypeidBeNullChecked(bool IsDeref,
1437                                               QualType SrcRecordTy) {
1438   return IsDeref;
1439 }
1440 
1441 void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
1442   llvm::FunctionCallee Fn = getBadTypeidFn(CGF);
1443   llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
1444   Call->setDoesNotReturn();
1445   CGF.Builder.CreateUnreachable();
1446 }
1447 
1448 llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
1449                                        QualType SrcRecordTy,
1450                                        Address ThisPtr,
1451                                        llvm::Type *StdTypeInfoPtrTy) {
1452   auto *ClassDecl =
1453       cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
1454   llvm::Value *Value =
1455       CGF.GetVTablePtr(ThisPtr, StdTypeInfoPtrTy->getPointerTo(), ClassDecl);
1456 
1457   if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1458     // Load the type info.
1459     Value = CGF.Builder.CreateBitCast(Value, CGM.Int8PtrTy);
1460     Value = CGF.Builder.CreateCall(
1461         CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
1462         {Value, llvm::ConstantInt::get(CGM.Int32Ty, -4)});
1463 
1464     // Setup to dereference again since this is a proxy we accessed.
1465     Value = CGF.Builder.CreateBitCast(Value, StdTypeInfoPtrTy->getPointerTo());
1466   } else {
1467     // Load the type info.
1468     Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL);
1469   }
1470   return CGF.Builder.CreateAlignedLoad(Value, CGF.getPointerAlign());
1471 }
1472 
1473 bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
1474                                                        QualType SrcRecordTy) {
1475   return SrcIsPtr;
1476 }
1477 
1478 llvm::Value *ItaniumCXXABI::EmitDynamicCastCall(
1479     CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
1480     QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
1481   llvm::Type *PtrDiffLTy =
1482       CGF.ConvertType(CGF.getContext().getPointerDiffType());
1483   llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1484 
1485   llvm::Value *SrcRTTI =
1486       CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
1487   llvm::Value *DestRTTI =
1488       CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
1489 
1490   // Compute the offset hint.
1491   const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1492   const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1493   llvm::Value *OffsetHint = llvm::ConstantInt::get(
1494       PtrDiffLTy,
1495       computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity());
1496 
1497   // Emit the call to __dynamic_cast.
1498   llvm::Value *Value = ThisAddr.getPointer();
1499   Value = CGF.EmitCastToVoidPtr(Value);
1500 
1501   llvm::Value *args[] = {Value, SrcRTTI, DestRTTI, OffsetHint};
1502   Value = CGF.EmitNounwindRuntimeCall(getItaniumDynamicCastFn(CGF), args);
1503   Value = CGF.Builder.CreateBitCast(Value, DestLTy);
1504 
1505   /// C++ [expr.dynamic.cast]p9:
1506   ///   A failed cast to reference type throws std::bad_cast
1507   if (DestTy->isReferenceType()) {
1508     llvm::BasicBlock *BadCastBlock =
1509         CGF.createBasicBlock("dynamic_cast.bad_cast");
1510 
1511     llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
1512     CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
1513 
1514     CGF.EmitBlock(BadCastBlock);
1515     EmitBadCastCall(CGF);
1516   }
1517 
1518   return Value;
1519 }
1520 
1521 llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF,
1522                                                   Address ThisAddr,
1523                                                   QualType SrcRecordTy,
1524                                                   QualType DestTy) {
1525   llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1526   auto *ClassDecl =
1527       cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
1528   llvm::Value *OffsetToTop;
1529   if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1530     // Get the vtable pointer.
1531     llvm::Value *VTable =
1532         CGF.GetVTablePtr(ThisAddr, CGM.Int32Ty->getPointerTo(), ClassDecl);
1533 
1534     // Get the offset-to-top from the vtable.
1535     OffsetToTop =
1536         CGF.Builder.CreateConstInBoundsGEP1_32(/*Type=*/nullptr, VTable, -2U);
1537     OffsetToTop = CGF.Builder.CreateAlignedLoad(
1538         OffsetToTop, CharUnits::fromQuantity(4), "offset.to.top");
1539   } else {
1540     llvm::Type *PtrDiffLTy =
1541         CGF.ConvertType(CGF.getContext().getPointerDiffType());
1542 
1543     // Get the vtable pointer.
1544     llvm::Value *VTable =
1545         CGF.GetVTablePtr(ThisAddr, PtrDiffLTy->getPointerTo(), ClassDecl);
1546 
1547     // Get the offset-to-top from the vtable.
1548     OffsetToTop = CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL);
1549     OffsetToTop = CGF.Builder.CreateAlignedLoad(
1550         OffsetToTop, CGF.getPointerAlign(), "offset.to.top");
1551   }
1552   // Finally, add the offset to the pointer.
1553   llvm::Value *Value = ThisAddr.getPointer();
1554   Value = CGF.EmitCastToVoidPtr(Value);
1555   Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop);
1556   return CGF.Builder.CreateBitCast(Value, DestLTy);
1557 }
1558 
1559 bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
1560   llvm::FunctionCallee Fn = getBadCastFn(CGF);
1561   llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
1562   Call->setDoesNotReturn();
1563   CGF.Builder.CreateUnreachable();
1564   return true;
1565 }
1566 
1567 llvm::Value *
1568 ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
1569                                          Address This,
1570                                          const CXXRecordDecl *ClassDecl,
1571                                          const CXXRecordDecl *BaseClassDecl) {
1572   llvm::Value *VTablePtr = CGF.GetVTablePtr(This, CGM.Int8PtrTy, ClassDecl);
1573   CharUnits VBaseOffsetOffset =
1574       CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl,
1575                                                                BaseClassDecl);
1576   llvm::Value *VBaseOffsetPtr =
1577     CGF.Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetOffset.getQuantity(),
1578                                    "vbase.offset.ptr");
1579 
1580   llvm::Value *VBaseOffset;
1581   if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1582     VBaseOffsetPtr =
1583         CGF.Builder.CreateBitCast(VBaseOffsetPtr, CGF.Int32Ty->getPointerTo());
1584     VBaseOffset = CGF.Builder.CreateAlignedLoad(
1585         VBaseOffsetPtr, CharUnits::fromQuantity(4), "vbase.offset");
1586   } else {
1587     VBaseOffsetPtr = CGF.Builder.CreateBitCast(VBaseOffsetPtr,
1588                                                CGM.PtrDiffTy->getPointerTo());
1589     VBaseOffset = CGF.Builder.CreateAlignedLoad(
1590         VBaseOffsetPtr, CGF.getPointerAlign(), "vbase.offset");
1591   }
1592   return VBaseOffset;
1593 }
1594 
1595 void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
1596   // Just make sure we're in sync with TargetCXXABI.
1597   assert(CGM.getTarget().getCXXABI().hasConstructorVariants());
1598 
1599   // The constructor used for constructing this as a base class;
1600   // ignores virtual bases.
1601   CGM.EmitGlobal(GlobalDecl(D, Ctor_Base));
1602 
1603   // The constructor used for constructing this as a complete class;
1604   // constructs the virtual bases, then calls the base constructor.
1605   if (!D->getParent()->isAbstract()) {
1606     // We don't need to emit the complete ctor if the class is abstract.
1607     CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete));
1608   }
1609 }
1610 
1611 CGCXXABI::AddedStructorArgCounts
1612 ItaniumCXXABI::buildStructorSignature(GlobalDecl GD,
1613                                       SmallVectorImpl<CanQualType> &ArgTys) {
1614   ASTContext &Context = getContext();
1615 
1616   // All parameters are already in place except VTT, which goes after 'this'.
1617   // These are Clang types, so we don't need to worry about sret yet.
1618 
1619   // Check if we need to add a VTT parameter (which has type void **).
1620   if ((isa<CXXConstructorDecl>(GD.getDecl()) ? GD.getCtorType() == Ctor_Base
1621                                              : GD.getDtorType() == Dtor_Base) &&
1622       cast<CXXMethodDecl>(GD.getDecl())->getParent()->getNumVBases() != 0) {
1623     ArgTys.insert(ArgTys.begin() + 1,
1624                   Context.getPointerType(Context.VoidPtrTy));
1625     return AddedStructorArgCounts::prefix(1);
1626   }
1627   return AddedStructorArgCounts{};
1628 }
1629 
1630 void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
1631   // The destructor used for destructing this as a base class; ignores
1632   // virtual bases.
1633   CGM.EmitGlobal(GlobalDecl(D, Dtor_Base));
1634 
1635   // The destructor used for destructing this as a most-derived class;
1636   // call the base destructor and then destructs any virtual bases.
1637   CGM.EmitGlobal(GlobalDecl(D, Dtor_Complete));
1638 
1639   // The destructor in a virtual table is always a 'deleting'
1640   // destructor, which calls the complete destructor and then uses the
1641   // appropriate operator delete.
1642   if (D->isVirtual())
1643     CGM.EmitGlobal(GlobalDecl(D, Dtor_Deleting));
1644 }
1645 
1646 void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
1647                                               QualType &ResTy,
1648                                               FunctionArgList &Params) {
1649   const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
1650   assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD));
1651 
1652   // Check if we need a VTT parameter as well.
1653   if (NeedsVTTParameter(CGF.CurGD)) {
1654     ASTContext &Context = getContext();
1655 
1656     // FIXME: avoid the fake decl
1657     QualType T = Context.getPointerType(Context.VoidPtrTy);
1658     auto *VTTDecl = ImplicitParamDecl::Create(
1659         Context, /*DC=*/nullptr, MD->getLocation(), &Context.Idents.get("vtt"),
1660         T, ImplicitParamDecl::CXXVTT);
1661     Params.insert(Params.begin() + 1, VTTDecl);
1662     getStructorImplicitParamDecl(CGF) = VTTDecl;
1663   }
1664 }
1665 
1666 void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
1667   // Naked functions have no prolog.
1668   if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr<NakedAttr>())
1669     return;
1670 
1671   /// Initialize the 'this' slot. In the Itanium C++ ABI, no prologue
1672   /// adjustments are required, because they are all handled by thunks.
1673   setCXXABIThisValue(CGF, loadIncomingCXXThis(CGF));
1674 
1675   /// Initialize the 'vtt' slot if needed.
1676   if (getStructorImplicitParamDecl(CGF)) {
1677     getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad(
1678         CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)), "vtt");
1679   }
1680 
1681   /// If this is a function that the ABI specifies returns 'this', initialize
1682   /// the return slot to 'this' at the start of the function.
1683   ///
1684   /// Unlike the setting of return types, this is done within the ABI
1685   /// implementation instead of by clients of CGCXXABI because:
1686   /// 1) getThisValue is currently protected
1687   /// 2) in theory, an ABI could implement 'this' returns some other way;
1688   ///    HasThisReturn only specifies a contract, not the implementation
1689   if (HasThisReturn(CGF.CurGD))
1690     CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
1691 }
1692 
1693 CGCXXABI::AddedStructorArgs ItaniumCXXABI::getImplicitConstructorArgs(
1694     CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type,
1695     bool ForVirtualBase, bool Delegating) {
1696   if (!NeedsVTTParameter(GlobalDecl(D, Type)))
1697     return AddedStructorArgs{};
1698 
1699   // Insert the implicit 'vtt' argument as the second argument.
1700   llvm::Value *VTT =
1701       CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating);
1702   QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1703   return AddedStructorArgs::prefix({{VTT, VTTTy}});
1704 }
1705 
1706 llvm::Value *ItaniumCXXABI::getCXXDestructorImplicitParam(
1707     CodeGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type,
1708     bool ForVirtualBase, bool Delegating) {
1709   GlobalDecl GD(DD, Type);
1710   return CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
1711 }
1712 
1713 void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
1714                                        const CXXDestructorDecl *DD,
1715                                        CXXDtorType Type, bool ForVirtualBase,
1716                                        bool Delegating, Address This,
1717                                        QualType ThisTy) {
1718   GlobalDecl GD(DD, Type);
1719   llvm::Value *VTT =
1720       getCXXDestructorImplicitParam(CGF, DD, Type, ForVirtualBase, Delegating);
1721   QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1722 
1723   CGCallee Callee;
1724   if (getContext().getLangOpts().AppleKext &&
1725       Type != Dtor_Base && DD->isVirtual())
1726     Callee = CGF.BuildAppleKextVirtualDestructorCall(DD, Type, DD->getParent());
1727   else
1728     Callee = CGCallee::forDirect(CGM.getAddrOfCXXStructor(GD), GD);
1729 
1730   CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, VTT, VTTTy,
1731                             nullptr);
1732 }
1733 
1734 void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
1735                                           const CXXRecordDecl *RD) {
1736   llvm::GlobalVariable *VTable = getAddrOfVTable(RD, CharUnits());
1737   if (VTable->hasInitializer())
1738     return;
1739 
1740   ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext();
1741   const VTableLayout &VTLayout = VTContext.getVTableLayout(RD);
1742   llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
1743   llvm::Constant *RTTI =
1744       CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD));
1745 
1746   // Create and set the initializer.
1747   ConstantInitBuilder builder(CGM);
1748   auto components = builder.beginStruct();
1749   CGVT.createVTableInitializer(components, VTLayout, RTTI,
1750                                llvm::GlobalValue::isLocalLinkage(Linkage));
1751   components.finishAndSetAsInitializer(VTable);
1752 
1753   // Set the correct linkage.
1754   VTable->setLinkage(Linkage);
1755 
1756   if (CGM.supportsCOMDAT() && VTable->isWeakForLinker())
1757     VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName()));
1758 
1759   // Set the right visibility.
1760   CGM.setGVProperties(VTable, RD);
1761 
1762   // If this is the magic class __cxxabiv1::__fundamental_type_info,
1763   // we will emit the typeinfo for the fundamental types. This is the
1764   // same behaviour as GCC.
1765   const DeclContext *DC = RD->getDeclContext();
1766   if (RD->getIdentifier() &&
1767       RD->getIdentifier()->isStr("__fundamental_type_info") &&
1768       isa<NamespaceDecl>(DC) && cast<NamespaceDecl>(DC)->getIdentifier() &&
1769       cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") &&
1770       DC->getParent()->isTranslationUnit())
1771     EmitFundamentalRTTIDescriptors(RD);
1772 
1773   // Always emit type metadata on non-available_externally definitions, and on
1774   // available_externally definitions if we are performing whole program
1775   // devirtualization. For WPD we need the type metadata on all vtable
1776   // definitions to ensure we associate derived classes with base classes
1777   // defined in headers but with a strong definition only in a shared library.
1778   if (!VTable->isDeclarationForLinker() ||
1779       CGM.getCodeGenOpts().WholeProgramVTables) {
1780     CGM.EmitVTableTypeMetadata(RD, VTable, VTLayout);
1781     // For available_externally definitions, add the vtable to
1782     // @llvm.compiler.used so that it isn't deleted before whole program
1783     // analysis.
1784     if (VTable->isDeclarationForLinker()) {
1785       assert(CGM.getCodeGenOpts().WholeProgramVTables);
1786       CGM.addCompilerUsedGlobal(VTable);
1787     }
1788   }
1789 
1790   if (VTContext.isRelativeLayout() && !VTable->isDSOLocal())
1791     CGVT.GenerateRelativeVTableAlias(VTable, VTable->getName());
1792 }
1793 
1794 bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField(
1795     CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) {
1796   if (Vptr.NearestVBase == nullptr)
1797     return false;
1798   return NeedsVTTParameter(CGF.CurGD);
1799 }
1800 
1801 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor(
1802     CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1803     const CXXRecordDecl *NearestVBase) {
1804 
1805   if ((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1806       NeedsVTTParameter(CGF.CurGD)) {
1807     return getVTableAddressPointInStructorWithVTT(CGF, VTableClass, Base,
1808                                                   NearestVBase);
1809   }
1810   return getVTableAddressPoint(Base, VTableClass);
1811 }
1812 
1813 llvm::Constant *
1814 ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base,
1815                                      const CXXRecordDecl *VTableClass) {
1816   llvm::GlobalValue *VTable = getAddrOfVTable(VTableClass, CharUnits());
1817 
1818   // Find the appropriate vtable within the vtable group, and the address point
1819   // within that vtable.
1820   VTableLayout::AddressPointLocation AddressPoint =
1821       CGM.getItaniumVTableContext()
1822           .getVTableLayout(VTableClass)
1823           .getAddressPoint(Base);
1824   llvm::Value *Indices[] = {
1825     llvm::ConstantInt::get(CGM.Int32Ty, 0),
1826     llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.VTableIndex),
1827     llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.AddressPointIndex),
1828   };
1829 
1830   return llvm::ConstantExpr::getGetElementPtr(VTable->getValueType(), VTable,
1831                                               Indices, /*InBounds=*/true,
1832                                               /*InRangeIndex=*/1);
1833 }
1834 
1835 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
1836     CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1837     const CXXRecordDecl *NearestVBase) {
1838   assert((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1839          NeedsVTTParameter(CGF.CurGD) && "This class doesn't have VTT");
1840 
1841   // Get the secondary vpointer index.
1842   uint64_t VirtualPointerIndex =
1843       CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base);
1844 
1845   /// Load the VTT.
1846   llvm::Value *VTT = CGF.LoadCXXVTT();
1847   if (VirtualPointerIndex)
1848     VTT = CGF.Builder.CreateConstInBoundsGEP1_64(VTT, VirtualPointerIndex);
1849 
1850   // And load the address point from the VTT.
1851   return CGF.Builder.CreateAlignedLoad(VTT, CGF.getPointerAlign());
1852 }
1853 
1854 llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr(
1855     BaseSubobject Base, const CXXRecordDecl *VTableClass) {
1856   return getVTableAddressPoint(Base, VTableClass);
1857 }
1858 
1859 llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
1860                                                      CharUnits VPtrOffset) {
1861   assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets");
1862 
1863   llvm::GlobalVariable *&VTable = VTables[RD];
1864   if (VTable)
1865     return VTable;
1866 
1867   // Queue up this vtable for possible deferred emission.
1868   CGM.addDeferredVTable(RD);
1869 
1870   SmallString<256> Name;
1871   llvm::raw_svector_ostream Out(Name);
1872   getMangleContext().mangleCXXVTable(RD, Out);
1873 
1874   const VTableLayout &VTLayout =
1875       CGM.getItaniumVTableContext().getVTableLayout(RD);
1876   llvm::Type *VTableType = CGM.getVTables().getVTableType(VTLayout);
1877 
1878   // Use pointer alignment for the vtable. Otherwise we would align them based
1879   // on the size of the initializer which doesn't make sense as only single
1880   // values are read.
1881   unsigned PAlign = CGM.getItaniumVTableContext().isRelativeLayout()
1882                         ? 32
1883                         : CGM.getTarget().getPointerAlign(0);
1884 
1885   VTable = CGM.CreateOrReplaceCXXRuntimeVariable(
1886       Name, VTableType, llvm::GlobalValue::ExternalLinkage,
1887       getContext().toCharUnitsFromBits(PAlign).getQuantity());
1888   VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1889 
1890   CGM.setGVProperties(VTable, RD);
1891 
1892   return VTable;
1893 }
1894 
1895 CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
1896                                                   GlobalDecl GD,
1897                                                   Address This,
1898                                                   llvm::Type *Ty,
1899                                                   SourceLocation Loc) {
1900   auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
1901   llvm::Value *VTable = CGF.GetVTablePtr(
1902       This, Ty->getPointerTo()->getPointerTo(), MethodDecl->getParent());
1903 
1904   uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
1905   llvm::Value *VFunc;
1906   if (CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) {
1907     VFunc = CGF.EmitVTableTypeCheckedLoad(
1908         MethodDecl->getParent(), VTable,
1909         VTableIndex * CGM.getContext().getTargetInfo().getPointerWidth(0) / 8);
1910   } else {
1911     CGF.EmitTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc);
1912 
1913     llvm::Value *VFuncLoad;
1914     if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1915       VTable = CGF.Builder.CreateBitCast(VTable, CGM.Int8PtrTy);
1916       llvm::Value *Load = CGF.Builder.CreateCall(
1917           CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
1918           {VTable, llvm::ConstantInt::get(CGM.Int32Ty, 4 * VTableIndex)});
1919       VFuncLoad = CGF.Builder.CreateBitCast(Load, Ty->getPointerTo());
1920     } else {
1921       VTable =
1922           CGF.Builder.CreateBitCast(VTable, Ty->getPointerTo()->getPointerTo());
1923       llvm::Value *VTableSlotPtr =
1924           CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfn");
1925       VFuncLoad =
1926           CGF.Builder.CreateAlignedLoad(VTableSlotPtr, CGF.getPointerAlign());
1927     }
1928 
1929     // Add !invariant.load md to virtual function load to indicate that
1930     // function didn't change inside vtable.
1931     // It's safe to add it without -fstrict-vtable-pointers, but it would not
1932     // help in devirtualization because it will only matter if we will have 2
1933     // the same virtual function loads from the same vtable load, which won't
1934     // happen without enabled devirtualization with -fstrict-vtable-pointers.
1935     if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1936         CGM.getCodeGenOpts().StrictVTablePointers) {
1937       if (auto *VFuncLoadInstr = dyn_cast<llvm::Instruction>(VFuncLoad)) {
1938         VFuncLoadInstr->setMetadata(
1939             llvm::LLVMContext::MD_invariant_load,
1940             llvm::MDNode::get(CGM.getLLVMContext(),
1941                               llvm::ArrayRef<llvm::Metadata *>()));
1942       }
1943     }
1944     VFunc = VFuncLoad;
1945   }
1946 
1947   CGCallee Callee(GD, VFunc);
1948   return Callee;
1949 }
1950 
1951 llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
1952     CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
1953     Address This, DeleteOrMemberCallExpr E) {
1954   auto *CE = E.dyn_cast<const CXXMemberCallExpr *>();
1955   auto *D = E.dyn_cast<const CXXDeleteExpr *>();
1956   assert((CE != nullptr) ^ (D != nullptr));
1957   assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
1958   assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
1959 
1960   GlobalDecl GD(Dtor, DtorType);
1961   const CGFunctionInfo *FInfo =
1962       &CGM.getTypes().arrangeCXXStructorDeclaration(GD);
1963   llvm::FunctionType *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
1964   CGCallee Callee = CGCallee::forVirtual(CE, GD, This, Ty);
1965 
1966   QualType ThisTy;
1967   if (CE) {
1968     ThisTy = CE->getObjectType();
1969   } else {
1970     ThisTy = D->getDestroyedType();
1971   }
1972 
1973   CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, nullptr,
1974                             QualType(), nullptr);
1975   return nullptr;
1976 }
1977 
1978 void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
1979   CodeGenVTables &VTables = CGM.getVTables();
1980   llvm::GlobalVariable *VTT = VTables.GetAddrOfVTT(RD);
1981   VTables.EmitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD);
1982 }
1983 
1984 bool ItaniumCXXABI::canSpeculativelyEmitVTableAsBaseClass(
1985     const CXXRecordDecl *RD) const {
1986   // We don't emit available_externally vtables if we are in -fapple-kext mode
1987   // because kext mode does not permit devirtualization.
1988   if (CGM.getLangOpts().AppleKext)
1989     return false;
1990 
1991   // If the vtable is hidden then it is not safe to emit an available_externally
1992   // copy of vtable.
1993   if (isVTableHidden(RD))
1994     return false;
1995 
1996   if (CGM.getCodeGenOpts().ForceEmitVTables)
1997     return true;
1998 
1999   // If we don't have any not emitted inline virtual function then we are safe
2000   // to emit an available_externally copy of vtable.
2001   // FIXME we can still emit a copy of the vtable if we
2002   // can emit definition of the inline functions.
2003   if (hasAnyUnusedVirtualInlineFunction(RD))
2004     return false;
2005 
2006   // For a class with virtual bases, we must also be able to speculatively
2007   // emit the VTT, because CodeGen doesn't have separate notions of "can emit
2008   // the vtable" and "can emit the VTT". For a base subobject, this means we
2009   // need to be able to emit non-virtual base vtables.
2010   if (RD->getNumVBases()) {
2011     for (const auto &B : RD->bases()) {
2012       auto *BRD = B.getType()->getAsCXXRecordDecl();
2013       assert(BRD && "no class for base specifier");
2014       if (B.isVirtual() || !BRD->isDynamicClass())
2015         continue;
2016       if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
2017         return false;
2018     }
2019   }
2020 
2021   return true;
2022 }
2023 
2024 bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
2025   if (!canSpeculativelyEmitVTableAsBaseClass(RD))
2026     return false;
2027 
2028   // For a complete-object vtable (or more specifically, for the VTT), we need
2029   // to be able to speculatively emit the vtables of all dynamic virtual bases.
2030   for (const auto &B : RD->vbases()) {
2031     auto *BRD = B.getType()->getAsCXXRecordDecl();
2032     assert(BRD && "no class for base specifier");
2033     if (!BRD->isDynamicClass())
2034       continue;
2035     if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
2036       return false;
2037   }
2038 
2039   return true;
2040 }
2041 static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
2042                                           Address InitialPtr,
2043                                           int64_t NonVirtualAdjustment,
2044                                           int64_t VirtualAdjustment,
2045                                           bool IsReturnAdjustment) {
2046   if (!NonVirtualAdjustment && !VirtualAdjustment)
2047     return InitialPtr.getPointer();
2048 
2049   Address V = CGF.Builder.CreateElementBitCast(InitialPtr, CGF.Int8Ty);
2050 
2051   // In a base-to-derived cast, the non-virtual adjustment is applied first.
2052   if (NonVirtualAdjustment && !IsReturnAdjustment) {
2053     V = CGF.Builder.CreateConstInBoundsByteGEP(V,
2054                               CharUnits::fromQuantity(NonVirtualAdjustment));
2055   }
2056 
2057   // Perform the virtual adjustment if we have one.
2058   llvm::Value *ResultPtr;
2059   if (VirtualAdjustment) {
2060     Address VTablePtrPtr = CGF.Builder.CreateElementBitCast(V, CGF.Int8PtrTy);
2061     llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
2062 
2063     llvm::Value *Offset;
2064     llvm::Value *OffsetPtr =
2065         CGF.Builder.CreateConstInBoundsGEP1_64(VTablePtr, VirtualAdjustment);
2066     if (CGF.CGM.getItaniumVTableContext().isRelativeLayout()) {
2067       // Load the adjustment offset from the vtable as a 32-bit int.
2068       OffsetPtr =
2069           CGF.Builder.CreateBitCast(OffsetPtr, CGF.Int32Ty->getPointerTo());
2070       Offset =
2071           CGF.Builder.CreateAlignedLoad(OffsetPtr, CharUnits::fromQuantity(4));
2072     } else {
2073       llvm::Type *PtrDiffTy =
2074           CGF.ConvertType(CGF.getContext().getPointerDiffType());
2075 
2076       OffsetPtr =
2077           CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo());
2078 
2079       // Load the adjustment offset from the vtable.
2080       Offset = CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign());
2081     }
2082     // Adjust our pointer.
2083     ResultPtr = CGF.Builder.CreateInBoundsGEP(V.getPointer(), Offset);
2084   } else {
2085     ResultPtr = V.getPointer();
2086   }
2087 
2088   // In a derived-to-base conversion, the non-virtual adjustment is
2089   // applied second.
2090   if (NonVirtualAdjustment && IsReturnAdjustment) {
2091     ResultPtr = CGF.Builder.CreateConstInBoundsGEP1_64(ResultPtr,
2092                                                        NonVirtualAdjustment);
2093   }
2094 
2095   // Cast back to the original type.
2096   return CGF.Builder.CreateBitCast(ResultPtr, InitialPtr.getType());
2097 }
2098 
2099 llvm::Value *ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF,
2100                                                   Address This,
2101                                                   const ThisAdjustment &TA) {
2102   return performTypeAdjustment(CGF, This, TA.NonVirtual,
2103                                TA.Virtual.Itanium.VCallOffsetOffset,
2104                                /*IsReturnAdjustment=*/false);
2105 }
2106 
2107 llvm::Value *
2108 ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
2109                                        const ReturnAdjustment &RA) {
2110   return performTypeAdjustment(CGF, Ret, RA.NonVirtual,
2111                                RA.Virtual.Itanium.VBaseOffsetOffset,
2112                                /*IsReturnAdjustment=*/true);
2113 }
2114 
2115 void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
2116                                     RValue RV, QualType ResultType) {
2117   if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl()))
2118     return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType);
2119 
2120   // Destructor thunks in the ARM ABI have indeterminate results.
2121   llvm::Type *T = CGF.ReturnValue.getElementType();
2122   RValue Undef = RValue::get(llvm::UndefValue::get(T));
2123   return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType);
2124 }
2125 
2126 /************************** Array allocation cookies **************************/
2127 
2128 CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2129   // The array cookie is a size_t; pad that up to the element alignment.
2130   // The cookie is actually right-justified in that space.
2131   return std::max(CharUnits::fromQuantity(CGM.SizeSizeInBytes),
2132                   CGM.getContext().getPreferredTypeAlignInChars(elementType));
2133 }
2134 
2135 Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2136                                              Address NewPtr,
2137                                              llvm::Value *NumElements,
2138                                              const CXXNewExpr *expr,
2139                                              QualType ElementType) {
2140   assert(requiresArrayCookie(expr));
2141 
2142   unsigned AS = NewPtr.getAddressSpace();
2143 
2144   ASTContext &Ctx = getContext();
2145   CharUnits SizeSize = CGF.getSizeSize();
2146 
2147   // The size of the cookie.
2148   CharUnits CookieSize =
2149       std::max(SizeSize, Ctx.getPreferredTypeAlignInChars(ElementType));
2150   assert(CookieSize == getArrayCookieSizeImpl(ElementType));
2151 
2152   // Compute an offset to the cookie.
2153   Address CookiePtr = NewPtr;
2154   CharUnits CookieOffset = CookieSize - SizeSize;
2155   if (!CookieOffset.isZero())
2156     CookiePtr = CGF.Builder.CreateConstInBoundsByteGEP(CookiePtr, CookieOffset);
2157 
2158   // Write the number of elements into the appropriate slot.
2159   Address NumElementsPtr =
2160       CGF.Builder.CreateElementBitCast(CookiePtr, CGF.SizeTy);
2161   llvm::Instruction *SI = CGF.Builder.CreateStore(NumElements, NumElementsPtr);
2162 
2163   // Handle the array cookie specially in ASan.
2164   if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) && AS == 0 &&
2165       (expr->getOperatorNew()->isReplaceableGlobalAllocationFunction() ||
2166        CGM.getCodeGenOpts().SanitizeAddressPoisonCustomArrayCookie)) {
2167     // The store to the CookiePtr does not need to be instrumented.
2168     CGM.getSanitizerMetadata()->disableSanitizerForInstruction(SI);
2169     llvm::FunctionType *FTy =
2170         llvm::FunctionType::get(CGM.VoidTy, NumElementsPtr.getType(), false);
2171     llvm::FunctionCallee F =
2172         CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie");
2173     CGF.Builder.CreateCall(F, NumElementsPtr.getPointer());
2174   }
2175 
2176   // Finally, compute a pointer to the actual data buffer by skipping
2177   // over the cookie completely.
2178   return CGF.Builder.CreateConstInBoundsByteGEP(NewPtr, CookieSize);
2179 }
2180 
2181 llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2182                                                 Address allocPtr,
2183                                                 CharUnits cookieSize) {
2184   // The element size is right-justified in the cookie.
2185   Address numElementsPtr = allocPtr;
2186   CharUnits numElementsOffset = cookieSize - CGF.getSizeSize();
2187   if (!numElementsOffset.isZero())
2188     numElementsPtr =
2189       CGF.Builder.CreateConstInBoundsByteGEP(numElementsPtr, numElementsOffset);
2190 
2191   unsigned AS = allocPtr.getAddressSpace();
2192   numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
2193   if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) || AS != 0)
2194     return CGF.Builder.CreateLoad(numElementsPtr);
2195   // In asan mode emit a function call instead of a regular load and let the
2196   // run-time deal with it: if the shadow is properly poisoned return the
2197   // cookie, otherwise return 0 to avoid an infinite loop calling DTORs.
2198   // We can't simply ignore this load using nosanitize metadata because
2199   // the metadata may be lost.
2200   llvm::FunctionType *FTy =
2201       llvm::FunctionType::get(CGF.SizeTy, CGF.SizeTy->getPointerTo(0), false);
2202   llvm::FunctionCallee F =
2203       CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie");
2204   return CGF.Builder.CreateCall(F, numElementsPtr.getPointer());
2205 }
2206 
2207 CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2208   // ARM says that the cookie is always:
2209   //   struct array_cookie {
2210   //     std::size_t element_size; // element_size != 0
2211   //     std::size_t element_count;
2212   //   };
2213   // But the base ABI doesn't give anything an alignment greater than
2214   // 8, so we can dismiss this as typical ABI-author blindness to
2215   // actual language complexity and round up to the element alignment.
2216   return std::max(CharUnits::fromQuantity(2 * CGM.SizeSizeInBytes),
2217                   CGM.getContext().getTypeAlignInChars(elementType));
2218 }
2219 
2220 Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2221                                          Address newPtr,
2222                                          llvm::Value *numElements,
2223                                          const CXXNewExpr *expr,
2224                                          QualType elementType) {
2225   assert(requiresArrayCookie(expr));
2226 
2227   // The cookie is always at the start of the buffer.
2228   Address cookie = newPtr;
2229 
2230   // The first element is the element size.
2231   cookie = CGF.Builder.CreateElementBitCast(cookie, CGF.SizeTy);
2232   llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy,
2233                  getContext().getTypeSizeInChars(elementType).getQuantity());
2234   CGF.Builder.CreateStore(elementSize, cookie);
2235 
2236   // The second element is the element count.
2237   cookie = CGF.Builder.CreateConstInBoundsGEP(cookie, 1);
2238   CGF.Builder.CreateStore(numElements, cookie);
2239 
2240   // Finally, compute a pointer to the actual data buffer by skipping
2241   // over the cookie completely.
2242   CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType);
2243   return CGF.Builder.CreateConstInBoundsByteGEP(newPtr, cookieSize);
2244 }
2245 
2246 llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2247                                             Address allocPtr,
2248                                             CharUnits cookieSize) {
2249   // The number of elements is at offset sizeof(size_t) relative to
2250   // the allocated pointer.
2251   Address numElementsPtr
2252     = CGF.Builder.CreateConstInBoundsByteGEP(allocPtr, CGF.getSizeSize());
2253 
2254   numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
2255   return CGF.Builder.CreateLoad(numElementsPtr);
2256 }
2257 
2258 /*********************** Static local initialization **************************/
2259 
2260 static llvm::FunctionCallee getGuardAcquireFn(CodeGenModule &CGM,
2261                                               llvm::PointerType *GuardPtrTy) {
2262   // int __cxa_guard_acquire(__guard *guard_object);
2263   llvm::FunctionType *FTy =
2264     llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
2265                             GuardPtrTy, /*isVarArg=*/false);
2266   return CGM.CreateRuntimeFunction(
2267       FTy, "__cxa_guard_acquire",
2268       llvm::AttributeList::get(CGM.getLLVMContext(),
2269                                llvm::AttributeList::FunctionIndex,
2270                                llvm::Attribute::NoUnwind));
2271 }
2272 
2273 static llvm::FunctionCallee getGuardReleaseFn(CodeGenModule &CGM,
2274                                               llvm::PointerType *GuardPtrTy) {
2275   // void __cxa_guard_release(__guard *guard_object);
2276   llvm::FunctionType *FTy =
2277     llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2278   return CGM.CreateRuntimeFunction(
2279       FTy, "__cxa_guard_release",
2280       llvm::AttributeList::get(CGM.getLLVMContext(),
2281                                llvm::AttributeList::FunctionIndex,
2282                                llvm::Attribute::NoUnwind));
2283 }
2284 
2285 static llvm::FunctionCallee getGuardAbortFn(CodeGenModule &CGM,
2286                                             llvm::PointerType *GuardPtrTy) {
2287   // void __cxa_guard_abort(__guard *guard_object);
2288   llvm::FunctionType *FTy =
2289     llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2290   return CGM.CreateRuntimeFunction(
2291       FTy, "__cxa_guard_abort",
2292       llvm::AttributeList::get(CGM.getLLVMContext(),
2293                                llvm::AttributeList::FunctionIndex,
2294                                llvm::Attribute::NoUnwind));
2295 }
2296 
2297 namespace {
2298   struct CallGuardAbort final : EHScopeStack::Cleanup {
2299     llvm::GlobalVariable *Guard;
2300     CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
2301 
2302     void Emit(CodeGenFunction &CGF, Flags flags) override {
2303       CGF.EmitNounwindRuntimeCall(getGuardAbortFn(CGF.CGM, Guard->getType()),
2304                                   Guard);
2305     }
2306   };
2307 }
2308 
2309 /// The ARM code here follows the Itanium code closely enough that we
2310 /// just special-case it at particular places.
2311 void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
2312                                     const VarDecl &D,
2313                                     llvm::GlobalVariable *var,
2314                                     bool shouldPerformInit) {
2315   CGBuilderTy &Builder = CGF.Builder;
2316 
2317   // Inline variables that weren't instantiated from variable templates have
2318   // partially-ordered initialization within their translation unit.
2319   bool NonTemplateInline =
2320       D.isInline() &&
2321       !isTemplateInstantiation(D.getTemplateSpecializationKind());
2322 
2323   // We only need to use thread-safe statics for local non-TLS variables and
2324   // inline variables; other global initialization is always single-threaded
2325   // or (through lazy dynamic loading in multiple threads) unsequenced.
2326   bool threadsafe = getContext().getLangOpts().ThreadsafeStatics &&
2327                     (D.isLocalVarDecl() || NonTemplateInline) &&
2328                     !D.getTLSKind();
2329 
2330   // If we have a global variable with internal linkage and thread-safe statics
2331   // are disabled, we can just let the guard variable be of type i8.
2332   bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage();
2333 
2334   llvm::IntegerType *guardTy;
2335   CharUnits guardAlignment;
2336   if (useInt8GuardVariable) {
2337     guardTy = CGF.Int8Ty;
2338     guardAlignment = CharUnits::One();
2339   } else {
2340     // Guard variables are 64 bits in the generic ABI and size width on ARM
2341     // (i.e. 32-bit on AArch32, 64-bit on AArch64).
2342     if (UseARMGuardVarABI) {
2343       guardTy = CGF.SizeTy;
2344       guardAlignment = CGF.getSizeAlign();
2345     } else {
2346       guardTy = CGF.Int64Ty;
2347       guardAlignment = CharUnits::fromQuantity(
2348                              CGM.getDataLayout().getABITypeAlignment(guardTy));
2349     }
2350   }
2351   llvm::PointerType *guardPtrTy = guardTy->getPointerTo(
2352       CGF.CGM.getDataLayout().getDefaultGlobalsAddressSpace());
2353 
2354   // Create the guard variable if we don't already have it (as we
2355   // might if we're double-emitting this function body).
2356   llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D);
2357   if (!guard) {
2358     // Mangle the name for the guard.
2359     SmallString<256> guardName;
2360     {
2361       llvm::raw_svector_ostream out(guardName);
2362       getMangleContext().mangleStaticGuardVariable(&D, out);
2363     }
2364 
2365     // Create the guard variable with a zero-initializer.
2366     // Just absorb linkage and visibility from the guarded variable.
2367     guard = new llvm::GlobalVariable(CGM.getModule(), guardTy,
2368                                      false, var->getLinkage(),
2369                                      llvm::ConstantInt::get(guardTy, 0),
2370                                      guardName.str());
2371     guard->setDSOLocal(var->isDSOLocal());
2372     guard->setVisibility(var->getVisibility());
2373     // If the variable is thread-local, so is its guard variable.
2374     guard->setThreadLocalMode(var->getThreadLocalMode());
2375     guard->setAlignment(guardAlignment.getAsAlign());
2376 
2377     // The ABI says: "It is suggested that it be emitted in the same COMDAT
2378     // group as the associated data object." In practice, this doesn't work for
2379     // non-ELF and non-Wasm object formats, so only do it for ELF and Wasm.
2380     llvm::Comdat *C = var->getComdat();
2381     if (!D.isLocalVarDecl() && C &&
2382         (CGM.getTarget().getTriple().isOSBinFormatELF() ||
2383          CGM.getTarget().getTriple().isOSBinFormatWasm())) {
2384       guard->setComdat(C);
2385       // An inline variable's guard function is run from the per-TU
2386       // initialization function, not via a dedicated global ctor function, so
2387       // we can't put it in a comdat.
2388       if (!NonTemplateInline)
2389         CGF.CurFn->setComdat(C);
2390     } else if (CGM.supportsCOMDAT() && guard->isWeakForLinker()) {
2391       guard->setComdat(CGM.getModule().getOrInsertComdat(guard->getName()));
2392     }
2393 
2394     CGM.setStaticLocalDeclGuardAddress(&D, guard);
2395   }
2396 
2397   Address guardAddr = Address(guard, guardAlignment);
2398 
2399   // Test whether the variable has completed initialization.
2400   //
2401   // Itanium C++ ABI 3.3.2:
2402   //   The following is pseudo-code showing how these functions can be used:
2403   //     if (obj_guard.first_byte == 0) {
2404   //       if ( __cxa_guard_acquire (&obj_guard) ) {
2405   //         try {
2406   //           ... initialize the object ...;
2407   //         } catch (...) {
2408   //            __cxa_guard_abort (&obj_guard);
2409   //            throw;
2410   //         }
2411   //         ... queue object destructor with __cxa_atexit() ...;
2412   //         __cxa_guard_release (&obj_guard);
2413   //       }
2414   //     }
2415 
2416   // Load the first byte of the guard variable.
2417   llvm::LoadInst *LI =
2418       Builder.CreateLoad(Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
2419 
2420   // Itanium ABI:
2421   //   An implementation supporting thread-safety on multiprocessor
2422   //   systems must also guarantee that references to the initialized
2423   //   object do not occur before the load of the initialization flag.
2424   //
2425   // In LLVM, we do this by marking the load Acquire.
2426   if (threadsafe)
2427     LI->setAtomic(llvm::AtomicOrdering::Acquire);
2428 
2429   // For ARM, we should only check the first bit, rather than the entire byte:
2430   //
2431   // ARM C++ ABI 3.2.3.1:
2432   //   To support the potential use of initialization guard variables
2433   //   as semaphores that are the target of ARM SWP and LDREX/STREX
2434   //   synchronizing instructions we define a static initialization
2435   //   guard variable to be a 4-byte aligned, 4-byte word with the
2436   //   following inline access protocol.
2437   //     #define INITIALIZED 1
2438   //     if ((obj_guard & INITIALIZED) != INITIALIZED) {
2439   //       if (__cxa_guard_acquire(&obj_guard))
2440   //         ...
2441   //     }
2442   //
2443   // and similarly for ARM64:
2444   //
2445   // ARM64 C++ ABI 3.2.2:
2446   //   This ABI instead only specifies the value bit 0 of the static guard
2447   //   variable; all other bits are platform defined. Bit 0 shall be 0 when the
2448   //   variable is not initialized and 1 when it is.
2449   llvm::Value *V =
2450       (UseARMGuardVarABI && !useInt8GuardVariable)
2451           ? Builder.CreateAnd(LI, llvm::ConstantInt::get(CGM.Int8Ty, 1))
2452           : LI;
2453   llvm::Value *NeedsInit = Builder.CreateIsNull(V, "guard.uninitialized");
2454 
2455   llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check");
2456   llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
2457 
2458   // Check if the first byte of the guard variable is zero.
2459   CGF.EmitCXXGuardedInitBranch(NeedsInit, InitCheckBlock, EndBlock,
2460                                CodeGenFunction::GuardKind::VariableGuard, &D);
2461 
2462   CGF.EmitBlock(InitCheckBlock);
2463 
2464   // Variables used when coping with thread-safe statics and exceptions.
2465   if (threadsafe) {
2466     // Call __cxa_guard_acquire.
2467     llvm::Value *V
2468       = CGF.EmitNounwindRuntimeCall(getGuardAcquireFn(CGM, guardPtrTy), guard);
2469 
2470     llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
2471 
2472     Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
2473                          InitBlock, EndBlock);
2474 
2475     // Call __cxa_guard_abort along the exceptional edge.
2476     CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard);
2477 
2478     CGF.EmitBlock(InitBlock);
2479   }
2480 
2481   // Emit the initializer and add a global destructor if appropriate.
2482   CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit);
2483 
2484   if (threadsafe) {
2485     // Pop the guard-abort cleanup if we pushed one.
2486     CGF.PopCleanupBlock();
2487 
2488     // Call __cxa_guard_release.  This cannot throw.
2489     CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy),
2490                                 guardAddr.getPointer());
2491   } else {
2492     // Store 1 into the first byte of the guard variable after initialization is
2493     // complete.
2494     Builder.CreateStore(llvm::ConstantInt::get(CGM.Int8Ty, 1),
2495                         Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
2496   }
2497 
2498   CGF.EmitBlock(EndBlock);
2499 }
2500 
2501 /// Register a global destructor using __cxa_atexit.
2502 static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
2503                                         llvm::FunctionCallee dtor,
2504                                         llvm::Constant *addr, bool TLS) {
2505   assert((TLS || CGF.getTypes().getCodeGenOpts().CXAAtExit) &&
2506          "__cxa_atexit is disabled");
2507   const char *Name = "__cxa_atexit";
2508   if (TLS) {
2509     const llvm::Triple &T = CGF.getTarget().getTriple();
2510     Name = T.isOSDarwin() ?  "_tlv_atexit" : "__cxa_thread_atexit";
2511   }
2512 
2513   // We're assuming that the destructor function is something we can
2514   // reasonably call with the default CC.  Go ahead and cast it to the
2515   // right prototype.
2516   llvm::Type *dtorTy =
2517     llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, false)->getPointerTo();
2518 
2519   // Preserve address space of addr.
2520   auto AddrAS = addr ? addr->getType()->getPointerAddressSpace() : 0;
2521   auto AddrInt8PtrTy =
2522       AddrAS ? CGF.Int8Ty->getPointerTo(AddrAS) : CGF.Int8PtrTy;
2523 
2524   // Create a variable that binds the atexit to this shared object.
2525   llvm::Constant *handle =
2526       CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle");
2527   auto *GV = cast<llvm::GlobalValue>(handle->stripPointerCasts());
2528   GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
2529 
2530   // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
2531   llvm::Type *paramTys[] = {dtorTy, AddrInt8PtrTy, handle->getType()};
2532   llvm::FunctionType *atexitTy =
2533     llvm::FunctionType::get(CGF.IntTy, paramTys, false);
2534 
2535   // Fetch the actual function.
2536   llvm::FunctionCallee atexit = CGF.CGM.CreateRuntimeFunction(atexitTy, Name);
2537   if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit.getCallee()))
2538     fn->setDoesNotThrow();
2539 
2540   if (!addr)
2541     // addr is null when we are trying to register a dtor annotated with
2542     // __attribute__((destructor)) in a constructor function. Using null here is
2543     // okay because this argument is just passed back to the destructor
2544     // function.
2545     addr = llvm::Constant::getNullValue(CGF.Int8PtrTy);
2546 
2547   llvm::Value *args[] = {llvm::ConstantExpr::getBitCast(
2548                              cast<llvm::Constant>(dtor.getCallee()), dtorTy),
2549                          llvm::ConstantExpr::getBitCast(addr, AddrInt8PtrTy),
2550                          handle};
2551   CGF.EmitNounwindRuntimeCall(atexit, args);
2552 }
2553 
2554 static llvm::Function *createGlobalInitOrCleanupFn(CodeGen::CodeGenModule &CGM,
2555                                                    StringRef FnName) {
2556   // Create a function that registers/unregisters destructors that have the same
2557   // priority.
2558   llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false);
2559   llvm::Function *GlobalInitOrCleanupFn = CGM.CreateGlobalInitOrCleanUpFunction(
2560       FTy, FnName, CGM.getTypes().arrangeNullaryFunction(), SourceLocation());
2561 
2562   return GlobalInitOrCleanupFn;
2563 }
2564 
2565 static FunctionDecl *
2566 createGlobalInitOrCleanupFnDecl(CodeGen::CodeGenModule &CGM, StringRef FnName) {
2567   ASTContext &Ctx = CGM.getContext();
2568   QualType FunctionTy = Ctx.getFunctionType(Ctx.VoidTy, llvm::None, {});
2569   return FunctionDecl::Create(
2570       Ctx, Ctx.getTranslationUnitDecl(), SourceLocation(), SourceLocation(),
2571       &Ctx.Idents.get(FnName), FunctionTy, nullptr, SC_Static, false, false);
2572 }
2573 
2574 void CodeGenModule::unregisterGlobalDtorsWithUnAtExit() {
2575   for (const auto &I : DtorsUsingAtExit) {
2576     int Priority = I.first;
2577     std::string GlobalCleanupFnName =
2578         std::string("__GLOBAL_cleanup_") + llvm::to_string(Priority);
2579 
2580     llvm::Function *GlobalCleanupFn =
2581         createGlobalInitOrCleanupFn(*this, GlobalCleanupFnName);
2582 
2583     FunctionDecl *GlobalCleanupFD =
2584         createGlobalInitOrCleanupFnDecl(*this, GlobalCleanupFnName);
2585 
2586     CodeGenFunction CGF(*this);
2587     CGF.StartFunction(GlobalDecl(GlobalCleanupFD), getContext().VoidTy,
2588                       GlobalCleanupFn, getTypes().arrangeNullaryFunction(),
2589                       FunctionArgList(), SourceLocation(), SourceLocation());
2590 
2591     // Get the destructor function type, void(*)(void).
2592     llvm::FunctionType *dtorFuncTy = llvm::FunctionType::get(CGF.VoidTy, false);
2593     llvm::Type *dtorTy = dtorFuncTy->getPointerTo();
2594 
2595     // Destructor functions are run/unregistered in non-ascending
2596     // order of their priorities.
2597     const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
2598     auto itv = Dtors.rbegin();
2599     while (itv != Dtors.rend()) {
2600       llvm::Function *Dtor = *itv;
2601 
2602       // We're assuming that the destructor function is something we can
2603       // reasonably call with the correct CC.  Go ahead and cast it to the
2604       // right prototype.
2605       llvm::Constant *dtor = llvm::ConstantExpr::getBitCast(Dtor, dtorTy);
2606       llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtor);
2607       llvm::Value *NeedsDestruct =
2608           CGF.Builder.CreateIsNull(V, "needs_destruct");
2609 
2610       llvm::BasicBlock *DestructCallBlock =
2611           CGF.createBasicBlock("destruct.call");
2612       llvm::BasicBlock *EndBlock = CGF.createBasicBlock(
2613           (itv + 1) != Dtors.rend() ? "unatexit.call" : "destruct.end");
2614       // Check if unatexit returns a value of 0. If it does, jump to
2615       // DestructCallBlock, otherwise jump to EndBlock directly.
2616       CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock);
2617 
2618       CGF.EmitBlock(DestructCallBlock);
2619 
2620       // Emit the call to casted Dtor.
2621       llvm::CallInst *CI = CGF.Builder.CreateCall(dtorFuncTy, dtor);
2622       // Make sure the call and the callee agree on calling convention.
2623       CI->setCallingConv(Dtor->getCallingConv());
2624 
2625       CGF.EmitBlock(EndBlock);
2626 
2627       itv++;
2628     }
2629 
2630     CGF.FinishFunction();
2631     AddGlobalDtor(GlobalCleanupFn, Priority);
2632   }
2633 }
2634 
2635 void CodeGenModule::registerGlobalDtorsWithAtExit() {
2636   for (const auto &I : DtorsUsingAtExit) {
2637     int Priority = I.first;
2638     std::string GlobalInitFnName =
2639         std::string("__GLOBAL_init_") + llvm::to_string(Priority);
2640     llvm::Function *GlobalInitFn =
2641         createGlobalInitOrCleanupFn(*this, GlobalInitFnName);
2642     FunctionDecl *GlobalInitFD =
2643         createGlobalInitOrCleanupFnDecl(*this, GlobalInitFnName);
2644 
2645     CodeGenFunction CGF(*this);
2646     CGF.StartFunction(GlobalDecl(GlobalInitFD), getContext().VoidTy,
2647                       GlobalInitFn, getTypes().arrangeNullaryFunction(),
2648                       FunctionArgList(), SourceLocation(), SourceLocation());
2649 
2650     // Since constructor functions are run in non-descending order of their
2651     // priorities, destructors are registered in non-descending order of their
2652     // priorities, and since destructor functions are run in the reverse order
2653     // of their registration, destructor functions are run in non-ascending
2654     // order of their priorities.
2655     const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
2656     for (auto *Dtor : Dtors) {
2657       // Register the destructor function calling __cxa_atexit if it is
2658       // available. Otherwise fall back on calling atexit.
2659       if (getCodeGenOpts().CXAAtExit) {
2660         emitGlobalDtorWithCXAAtExit(CGF, Dtor, nullptr, false);
2661       } else {
2662         // Get the destructor function type, void(*)(void).
2663         llvm::Type *dtorTy =
2664             llvm::FunctionType::get(CGF.VoidTy, false)->getPointerTo();
2665 
2666         // We're assuming that the destructor function is something we can
2667         // reasonably call with the correct CC.  Go ahead and cast it to the
2668         // right prototype.
2669         CGF.registerGlobalDtorWithAtExit(
2670             llvm::ConstantExpr::getBitCast(Dtor, dtorTy));
2671       }
2672     }
2673 
2674     CGF.FinishFunction();
2675     AddGlobalCtor(GlobalInitFn, Priority, nullptr);
2676   }
2677 
2678   if (getCXXABI().useSinitAndSterm())
2679     unregisterGlobalDtorsWithUnAtExit();
2680 }
2681 
2682 /// Register a global destructor as best as we know how.
2683 void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
2684                                        llvm::FunctionCallee dtor,
2685                                        llvm::Constant *addr) {
2686   if (D.isNoDestroy(CGM.getContext()))
2687     return;
2688 
2689   // emitGlobalDtorWithCXAAtExit will emit a call to either __cxa_thread_atexit
2690   // or __cxa_atexit depending on whether this VarDecl is a thread-local storage
2691   // or not. CXAAtExit controls only __cxa_atexit, so use it if it is enabled.
2692   // We can always use __cxa_thread_atexit.
2693   if (CGM.getCodeGenOpts().CXAAtExit || D.getTLSKind())
2694     return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, D.getTLSKind());
2695 
2696   // In Apple kexts, we want to add a global destructor entry.
2697   // FIXME: shouldn't this be guarded by some variable?
2698   if (CGM.getLangOpts().AppleKext) {
2699     // Generate a global destructor entry.
2700     return CGM.AddCXXDtorEntry(dtor, addr);
2701   }
2702 
2703   CGF.registerGlobalDtorWithAtExit(D, dtor, addr);
2704 }
2705 
2706 static bool isThreadWrapperReplaceable(const VarDecl *VD,
2707                                        CodeGen::CodeGenModule &CGM) {
2708   assert(!VD->isStaticLocal() && "static local VarDecls don't need wrappers!");
2709   // Darwin prefers to have references to thread local variables to go through
2710   // the thread wrapper instead of directly referencing the backing variable.
2711   return VD->getTLSKind() == VarDecl::TLS_Dynamic &&
2712          CGM.getTarget().getTriple().isOSDarwin();
2713 }
2714 
2715 /// Get the appropriate linkage for the wrapper function. This is essentially
2716 /// the weak form of the variable's linkage; every translation unit which needs
2717 /// the wrapper emits a copy, and we want the linker to merge them.
2718 static llvm::GlobalValue::LinkageTypes
2719 getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM) {
2720   llvm::GlobalValue::LinkageTypes VarLinkage =
2721       CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false);
2722 
2723   // For internal linkage variables, we don't need an external or weak wrapper.
2724   if (llvm::GlobalValue::isLocalLinkage(VarLinkage))
2725     return VarLinkage;
2726 
2727   // If the thread wrapper is replaceable, give it appropriate linkage.
2728   if (isThreadWrapperReplaceable(VD, CGM))
2729     if (!llvm::GlobalVariable::isLinkOnceLinkage(VarLinkage) &&
2730         !llvm::GlobalVariable::isWeakODRLinkage(VarLinkage))
2731       return VarLinkage;
2732   return llvm::GlobalValue::WeakODRLinkage;
2733 }
2734 
2735 llvm::Function *
2736 ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD,
2737                                              llvm::Value *Val) {
2738   // Mangle the name for the thread_local wrapper function.
2739   SmallString<256> WrapperName;
2740   {
2741     llvm::raw_svector_ostream Out(WrapperName);
2742     getMangleContext().mangleItaniumThreadLocalWrapper(VD, Out);
2743   }
2744 
2745   // FIXME: If VD is a definition, we should regenerate the function attributes
2746   // before returning.
2747   if (llvm::Value *V = CGM.getModule().getNamedValue(WrapperName))
2748     return cast<llvm::Function>(V);
2749 
2750   QualType RetQT = VD->getType();
2751   if (RetQT->isReferenceType())
2752     RetQT = RetQT.getNonReferenceType();
2753 
2754   const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
2755       getContext().getPointerType(RetQT), FunctionArgList());
2756 
2757   llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FI);
2758   llvm::Function *Wrapper =
2759       llvm::Function::Create(FnTy, getThreadLocalWrapperLinkage(VD, CGM),
2760                              WrapperName.str(), &CGM.getModule());
2761 
2762   if (CGM.supportsCOMDAT() && Wrapper->isWeakForLinker())
2763     Wrapper->setComdat(CGM.getModule().getOrInsertComdat(Wrapper->getName()));
2764 
2765   CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Wrapper);
2766 
2767   // Always resolve references to the wrapper at link time.
2768   if (!Wrapper->hasLocalLinkage())
2769     if (!isThreadWrapperReplaceable(VD, CGM) ||
2770         llvm::GlobalVariable::isLinkOnceLinkage(Wrapper->getLinkage()) ||
2771         llvm::GlobalVariable::isWeakODRLinkage(Wrapper->getLinkage()) ||
2772         VD->getVisibility() == HiddenVisibility)
2773       Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility);
2774 
2775   if (isThreadWrapperReplaceable(VD, CGM)) {
2776     Wrapper->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2777     Wrapper->addFnAttr(llvm::Attribute::NoUnwind);
2778   }
2779 
2780   ThreadWrappers.push_back({VD, Wrapper});
2781   return Wrapper;
2782 }
2783 
2784 void ItaniumCXXABI::EmitThreadLocalInitFuncs(
2785     CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
2786     ArrayRef<llvm::Function *> CXXThreadLocalInits,
2787     ArrayRef<const VarDecl *> CXXThreadLocalInitVars) {
2788   llvm::Function *InitFunc = nullptr;
2789 
2790   // Separate initializers into those with ordered (or partially-ordered)
2791   // initialization and those with unordered initialization.
2792   llvm::SmallVector<llvm::Function *, 8> OrderedInits;
2793   llvm::SmallDenseMap<const VarDecl *, llvm::Function *> UnorderedInits;
2794   for (unsigned I = 0; I != CXXThreadLocalInits.size(); ++I) {
2795     if (isTemplateInstantiation(
2796             CXXThreadLocalInitVars[I]->getTemplateSpecializationKind()))
2797       UnorderedInits[CXXThreadLocalInitVars[I]->getCanonicalDecl()] =
2798           CXXThreadLocalInits[I];
2799     else
2800       OrderedInits.push_back(CXXThreadLocalInits[I]);
2801   }
2802 
2803   if (!OrderedInits.empty()) {
2804     // Generate a guarded initialization function.
2805     llvm::FunctionType *FTy =
2806         llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
2807     const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2808     InitFunc = CGM.CreateGlobalInitOrCleanUpFunction(FTy, "__tls_init", FI,
2809                                                      SourceLocation(),
2810                                                      /*TLS=*/true);
2811     llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
2812         CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
2813         llvm::GlobalVariable::InternalLinkage,
2814         llvm::ConstantInt::get(CGM.Int8Ty, 0), "__tls_guard");
2815     Guard->setThreadLocal(true);
2816     Guard->setThreadLocalMode(CGM.GetDefaultLLVMTLSModel());
2817 
2818     CharUnits GuardAlign = CharUnits::One();
2819     Guard->setAlignment(GuardAlign.getAsAlign());
2820 
2821     CodeGenFunction(CGM).GenerateCXXGlobalInitFunc(
2822         InitFunc, OrderedInits, ConstantAddress(Guard, GuardAlign));
2823     // On Darwin platforms, use CXX_FAST_TLS calling convention.
2824     if (CGM.getTarget().getTriple().isOSDarwin()) {
2825       InitFunc->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2826       InitFunc->addFnAttr(llvm::Attribute::NoUnwind);
2827     }
2828   }
2829 
2830   // Create declarations for thread wrappers for all thread-local variables
2831   // with non-discardable definitions in this translation unit.
2832   for (const VarDecl *VD : CXXThreadLocals) {
2833     if (VD->hasDefinition() &&
2834         !isDiscardableGVALinkage(getContext().GetGVALinkageForVariable(VD))) {
2835       llvm::GlobalValue *GV = CGM.GetGlobalValue(CGM.getMangledName(VD));
2836       getOrCreateThreadLocalWrapper(VD, GV);
2837     }
2838   }
2839 
2840   // Emit all referenced thread wrappers.
2841   for (auto VDAndWrapper : ThreadWrappers) {
2842     const VarDecl *VD = VDAndWrapper.first;
2843     llvm::GlobalVariable *Var =
2844         cast<llvm::GlobalVariable>(CGM.GetGlobalValue(CGM.getMangledName(VD)));
2845     llvm::Function *Wrapper = VDAndWrapper.second;
2846 
2847     // Some targets require that all access to thread local variables go through
2848     // the thread wrapper.  This means that we cannot attempt to create a thread
2849     // wrapper or a thread helper.
2850     if (!VD->hasDefinition()) {
2851       if (isThreadWrapperReplaceable(VD, CGM)) {
2852         Wrapper->setLinkage(llvm::Function::ExternalLinkage);
2853         continue;
2854       }
2855 
2856       // If this isn't a TU in which this variable is defined, the thread
2857       // wrapper is discardable.
2858       if (Wrapper->getLinkage() == llvm::Function::WeakODRLinkage)
2859         Wrapper->setLinkage(llvm::Function::LinkOnceODRLinkage);
2860     }
2861 
2862     CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Wrapper);
2863 
2864     // Mangle the name for the thread_local initialization function.
2865     SmallString<256> InitFnName;
2866     {
2867       llvm::raw_svector_ostream Out(InitFnName);
2868       getMangleContext().mangleItaniumThreadLocalInit(VD, Out);
2869     }
2870 
2871     llvm::FunctionType *InitFnTy = llvm::FunctionType::get(CGM.VoidTy, false);
2872 
2873     // If we have a definition for the variable, emit the initialization
2874     // function as an alias to the global Init function (if any). Otherwise,
2875     // produce a declaration of the initialization function.
2876     llvm::GlobalValue *Init = nullptr;
2877     bool InitIsInitFunc = false;
2878     bool HasConstantInitialization = false;
2879     if (!usesThreadWrapperFunction(VD)) {
2880       HasConstantInitialization = true;
2881     } else if (VD->hasDefinition()) {
2882       InitIsInitFunc = true;
2883       llvm::Function *InitFuncToUse = InitFunc;
2884       if (isTemplateInstantiation(VD->getTemplateSpecializationKind()))
2885         InitFuncToUse = UnorderedInits.lookup(VD->getCanonicalDecl());
2886       if (InitFuncToUse)
2887         Init = llvm::GlobalAlias::create(Var->getLinkage(), InitFnName.str(),
2888                                          InitFuncToUse);
2889     } else {
2890       // Emit a weak global function referring to the initialization function.
2891       // This function will not exist if the TU defining the thread_local
2892       // variable in question does not need any dynamic initialization for
2893       // its thread_local variables.
2894       Init = llvm::Function::Create(InitFnTy,
2895                                     llvm::GlobalVariable::ExternalWeakLinkage,
2896                                     InitFnName.str(), &CGM.getModule());
2897       const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2898       CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI,
2899                                     cast<llvm::Function>(Init));
2900     }
2901 
2902     if (Init) {
2903       Init->setVisibility(Var->getVisibility());
2904       // Don't mark an extern_weak function DSO local on windows.
2905       if (!CGM.getTriple().isOSWindows() || !Init->hasExternalWeakLinkage())
2906         Init->setDSOLocal(Var->isDSOLocal());
2907     }
2908 
2909     llvm::LLVMContext &Context = CGM.getModule().getContext();
2910     llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Wrapper);
2911     CGBuilderTy Builder(CGM, Entry);
2912     if (HasConstantInitialization) {
2913       // No dynamic initialization to invoke.
2914     } else if (InitIsInitFunc) {
2915       if (Init) {
2916         llvm::CallInst *CallVal = Builder.CreateCall(InitFnTy, Init);
2917         if (isThreadWrapperReplaceable(VD, CGM)) {
2918           CallVal->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2919           llvm::Function *Fn =
2920               cast<llvm::Function>(cast<llvm::GlobalAlias>(Init)->getAliasee());
2921           Fn->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2922         }
2923       }
2924     } else {
2925       // Don't know whether we have an init function. Call it if it exists.
2926       llvm::Value *Have = Builder.CreateIsNotNull(Init);
2927       llvm::BasicBlock *InitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
2928       llvm::BasicBlock *ExitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
2929       Builder.CreateCondBr(Have, InitBB, ExitBB);
2930 
2931       Builder.SetInsertPoint(InitBB);
2932       Builder.CreateCall(InitFnTy, Init);
2933       Builder.CreateBr(ExitBB);
2934 
2935       Builder.SetInsertPoint(ExitBB);
2936     }
2937 
2938     // For a reference, the result of the wrapper function is a pointer to
2939     // the referenced object.
2940     llvm::Value *Val = Var;
2941     if (VD->getType()->isReferenceType()) {
2942       CharUnits Align = CGM.getContext().getDeclAlign(VD);
2943       Val = Builder.CreateAlignedLoad(Val, Align);
2944     }
2945     if (Val->getType() != Wrapper->getReturnType())
2946       Val = Builder.CreatePointerBitCastOrAddrSpaceCast(
2947           Val, Wrapper->getReturnType(), "");
2948     Builder.CreateRet(Val);
2949   }
2950 }
2951 
2952 LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
2953                                                    const VarDecl *VD,
2954                                                    QualType LValType) {
2955   llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD);
2956   llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val);
2957 
2958   llvm::CallInst *CallVal = CGF.Builder.CreateCall(Wrapper);
2959   CallVal->setCallingConv(Wrapper->getCallingConv());
2960 
2961   LValue LV;
2962   if (VD->getType()->isReferenceType())
2963     LV = CGF.MakeNaturalAlignAddrLValue(CallVal, LValType);
2964   else
2965     LV = CGF.MakeAddrLValue(CallVal, LValType,
2966                             CGF.getContext().getDeclAlign(VD));
2967   // FIXME: need setObjCGCLValueClass?
2968   return LV;
2969 }
2970 
2971 /// Return whether the given global decl needs a VTT parameter, which it does
2972 /// if it's a base constructor or destructor with virtual bases.
2973 bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) {
2974   const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
2975 
2976   // We don't have any virtual bases, just return early.
2977   if (!MD->getParent()->getNumVBases())
2978     return false;
2979 
2980   // Check if we have a base constructor.
2981   if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base)
2982     return true;
2983 
2984   // Check if we have a base destructor.
2985   if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
2986     return true;
2987 
2988   return false;
2989 }
2990 
2991 namespace {
2992 class ItaniumRTTIBuilder {
2993   CodeGenModule &CGM;  // Per-module state.
2994   llvm::LLVMContext &VMContext;
2995   const ItaniumCXXABI &CXXABI;  // Per-module state.
2996 
2997   /// Fields - The fields of the RTTI descriptor currently being built.
2998   SmallVector<llvm::Constant *, 16> Fields;
2999 
3000   /// GetAddrOfTypeName - Returns the mangled type name of the given type.
3001   llvm::GlobalVariable *
3002   GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage);
3003 
3004   /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI
3005   /// descriptor of the given type.
3006   llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty);
3007 
3008   /// BuildVTablePointer - Build the vtable pointer for the given type.
3009   void BuildVTablePointer(const Type *Ty);
3010 
3011   /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
3012   /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b.
3013   void BuildSIClassTypeInfo(const CXXRecordDecl *RD);
3014 
3015   /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
3016   /// classes with bases that do not satisfy the abi::__si_class_type_info
3017   /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
3018   void BuildVMIClassTypeInfo(const CXXRecordDecl *RD);
3019 
3020   /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used
3021   /// for pointer types.
3022   void BuildPointerTypeInfo(QualType PointeeTy);
3023 
3024   /// BuildObjCObjectTypeInfo - Build the appropriate kind of
3025   /// type_info for an object type.
3026   void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty);
3027 
3028   /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
3029   /// struct, used for member pointer types.
3030   void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty);
3031 
3032 public:
3033   ItaniumRTTIBuilder(const ItaniumCXXABI &ABI)
3034       : CGM(ABI.CGM), VMContext(CGM.getModule().getContext()), CXXABI(ABI) {}
3035 
3036   // Pointer type info flags.
3037   enum {
3038     /// PTI_Const - Type has const qualifier.
3039     PTI_Const = 0x1,
3040 
3041     /// PTI_Volatile - Type has volatile qualifier.
3042     PTI_Volatile = 0x2,
3043 
3044     /// PTI_Restrict - Type has restrict qualifier.
3045     PTI_Restrict = 0x4,
3046 
3047     /// PTI_Incomplete - Type is incomplete.
3048     PTI_Incomplete = 0x8,
3049 
3050     /// PTI_ContainingClassIncomplete - Containing class is incomplete.
3051     /// (in pointer to member).
3052     PTI_ContainingClassIncomplete = 0x10,
3053 
3054     /// PTI_TransactionSafe - Pointee is transaction_safe function (C++ TM TS).
3055     //PTI_TransactionSafe = 0x20,
3056 
3057     /// PTI_Noexcept - Pointee is noexcept function (C++1z).
3058     PTI_Noexcept = 0x40,
3059   };
3060 
3061   // VMI type info flags.
3062   enum {
3063     /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance.
3064     VMI_NonDiamondRepeat = 0x1,
3065 
3066     /// VMI_DiamondShaped - Class is diamond shaped.
3067     VMI_DiamondShaped = 0x2
3068   };
3069 
3070   // Base class type info flags.
3071   enum {
3072     /// BCTI_Virtual - Base class is virtual.
3073     BCTI_Virtual = 0x1,
3074 
3075     /// BCTI_Public - Base class is public.
3076     BCTI_Public = 0x2
3077   };
3078 
3079   /// BuildTypeInfo - Build the RTTI type info struct for the given type, or
3080   /// link to an existing RTTI descriptor if one already exists.
3081   llvm::Constant *BuildTypeInfo(QualType Ty);
3082 
3083   /// BuildTypeInfo - Build the RTTI type info struct for the given type.
3084   llvm::Constant *BuildTypeInfo(
3085       QualType Ty,
3086       llvm::GlobalVariable::LinkageTypes Linkage,
3087       llvm::GlobalValue::VisibilityTypes Visibility,
3088       llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass);
3089 };
3090 }
3091 
3092 llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName(
3093     QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage) {
3094   SmallString<256> Name;
3095   llvm::raw_svector_ostream Out(Name);
3096   CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out);
3097 
3098   // We know that the mangled name of the type starts at index 4 of the
3099   // mangled name of the typename, so we can just index into it in order to
3100   // get the mangled name of the type.
3101   llvm::Constant *Init = llvm::ConstantDataArray::getString(VMContext,
3102                                                             Name.substr(4));
3103   auto Align = CGM.getContext().getTypeAlignInChars(CGM.getContext().CharTy);
3104 
3105   llvm::GlobalVariable *GV = CGM.CreateOrReplaceCXXRuntimeVariable(
3106       Name, Init->getType(), Linkage, Align.getQuantity());
3107 
3108   GV->setInitializer(Init);
3109 
3110   return GV;
3111 }
3112 
3113 llvm::Constant *
3114 ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
3115   // Mangle the RTTI name.
3116   SmallString<256> Name;
3117   llvm::raw_svector_ostream Out(Name);
3118   CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3119 
3120   // Look for an existing global.
3121   llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name);
3122 
3123   if (!GV) {
3124     // Create a new global variable.
3125     // Note for the future: If we would ever like to do deferred emission of
3126     // RTTI, check if emitting vtables opportunistically need any adjustment.
3127 
3128     GV = new llvm::GlobalVariable(CGM.getModule(), CGM.Int8PtrTy,
3129                                   /*isConstant=*/true,
3130                                   llvm::GlobalValue::ExternalLinkage, nullptr,
3131                                   Name);
3132     const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
3133     CGM.setGVProperties(GV, RD);
3134   }
3135 
3136   return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
3137 }
3138 
3139 /// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type
3140 /// info for that type is defined in the standard library.
3141 static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
3142   // Itanium C++ ABI 2.9.2:
3143   //   Basic type information (e.g. for "int", "bool", etc.) will be kept in
3144   //   the run-time support library. Specifically, the run-time support
3145   //   library should contain type_info objects for the types X, X* and
3146   //   X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char,
3147   //   unsigned char, signed char, short, unsigned short, int, unsigned int,
3148   //   long, unsigned long, long long, unsigned long long, float, double,
3149   //   long double, char16_t, char32_t, and the IEEE 754r decimal and
3150   //   half-precision floating point types.
3151   //
3152   // GCC also emits RTTI for __int128.
3153   // FIXME: We do not emit RTTI information for decimal types here.
3154 
3155   // Types added here must also be added to EmitFundamentalRTTIDescriptors.
3156   switch (Ty->getKind()) {
3157     case BuiltinType::Void:
3158     case BuiltinType::NullPtr:
3159     case BuiltinType::Bool:
3160     case BuiltinType::WChar_S:
3161     case BuiltinType::WChar_U:
3162     case BuiltinType::Char_U:
3163     case BuiltinType::Char_S:
3164     case BuiltinType::UChar:
3165     case BuiltinType::SChar:
3166     case BuiltinType::Short:
3167     case BuiltinType::UShort:
3168     case BuiltinType::Int:
3169     case BuiltinType::UInt:
3170     case BuiltinType::Long:
3171     case BuiltinType::ULong:
3172     case BuiltinType::LongLong:
3173     case BuiltinType::ULongLong:
3174     case BuiltinType::Half:
3175     case BuiltinType::Float:
3176     case BuiltinType::Double:
3177     case BuiltinType::LongDouble:
3178     case BuiltinType::Float16:
3179     case BuiltinType::Float128:
3180     case BuiltinType::Char8:
3181     case BuiltinType::Char16:
3182     case BuiltinType::Char32:
3183     case BuiltinType::Int128:
3184     case BuiltinType::UInt128:
3185       return true;
3186 
3187 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
3188     case BuiltinType::Id:
3189 #include "clang/Basic/OpenCLImageTypes.def"
3190 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
3191     case BuiltinType::Id:
3192 #include "clang/Basic/OpenCLExtensionTypes.def"
3193     case BuiltinType::OCLSampler:
3194     case BuiltinType::OCLEvent:
3195     case BuiltinType::OCLClkEvent:
3196     case BuiltinType::OCLQueue:
3197     case BuiltinType::OCLReserveID:
3198 #define SVE_TYPE(Name, Id, SingletonId) \
3199     case BuiltinType::Id:
3200 #include "clang/Basic/AArch64SVEACLETypes.def"
3201 #define PPC_VECTOR_TYPE(Name, Id, Size) \
3202     case BuiltinType::Id:
3203 #include "clang/Basic/PPCTypes.def"
3204 #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3205 #include "clang/Basic/RISCVVTypes.def"
3206     case BuiltinType::ShortAccum:
3207     case BuiltinType::Accum:
3208     case BuiltinType::LongAccum:
3209     case BuiltinType::UShortAccum:
3210     case BuiltinType::UAccum:
3211     case BuiltinType::ULongAccum:
3212     case BuiltinType::ShortFract:
3213     case BuiltinType::Fract:
3214     case BuiltinType::LongFract:
3215     case BuiltinType::UShortFract:
3216     case BuiltinType::UFract:
3217     case BuiltinType::ULongFract:
3218     case BuiltinType::SatShortAccum:
3219     case BuiltinType::SatAccum:
3220     case BuiltinType::SatLongAccum:
3221     case BuiltinType::SatUShortAccum:
3222     case BuiltinType::SatUAccum:
3223     case BuiltinType::SatULongAccum:
3224     case BuiltinType::SatShortFract:
3225     case BuiltinType::SatFract:
3226     case BuiltinType::SatLongFract:
3227     case BuiltinType::SatUShortFract:
3228     case BuiltinType::SatUFract:
3229     case BuiltinType::SatULongFract:
3230     case BuiltinType::BFloat16:
3231       return false;
3232 
3233     case BuiltinType::Dependent:
3234 #define BUILTIN_TYPE(Id, SingletonId)
3235 #define PLACEHOLDER_TYPE(Id, SingletonId) \
3236     case BuiltinType::Id:
3237 #include "clang/AST/BuiltinTypes.def"
3238       llvm_unreachable("asking for RRTI for a placeholder type!");
3239 
3240     case BuiltinType::ObjCId:
3241     case BuiltinType::ObjCClass:
3242     case BuiltinType::ObjCSel:
3243       llvm_unreachable("FIXME: Objective-C types are unsupported!");
3244   }
3245 
3246   llvm_unreachable("Invalid BuiltinType Kind!");
3247 }
3248 
3249 static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
3250   QualType PointeeTy = PointerTy->getPointeeType();
3251   const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(PointeeTy);
3252   if (!BuiltinTy)
3253     return false;
3254 
3255   // Check the qualifiers.
3256   Qualifiers Quals = PointeeTy.getQualifiers();
3257   Quals.removeConst();
3258 
3259   if (!Quals.empty())
3260     return false;
3261 
3262   return TypeInfoIsInStandardLibrary(BuiltinTy);
3263 }
3264 
3265 /// IsStandardLibraryRTTIDescriptor - Returns whether the type
3266 /// information for the given type exists in the standard library.
3267 static bool IsStandardLibraryRTTIDescriptor(QualType Ty) {
3268   // Type info for builtin types is defined in the standard library.
3269   if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty))
3270     return TypeInfoIsInStandardLibrary(BuiltinTy);
3271 
3272   // Type info for some pointer types to builtin types is defined in the
3273   // standard library.
3274   if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
3275     return TypeInfoIsInStandardLibrary(PointerTy);
3276 
3277   return false;
3278 }
3279 
3280 /// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
3281 /// the given type exists somewhere else, and that we should not emit the type
3282 /// information in this translation unit.  Assumes that it is not a
3283 /// standard-library type.
3284 static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM,
3285                                             QualType Ty) {
3286   ASTContext &Context = CGM.getContext();
3287 
3288   // If RTTI is disabled, assume it might be disabled in the
3289   // translation unit that defines any potential key function, too.
3290   if (!Context.getLangOpts().RTTI) return false;
3291 
3292   if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3293     const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
3294     if (!RD->hasDefinition())
3295       return false;
3296 
3297     if (!RD->isDynamicClass())
3298       return false;
3299 
3300     // FIXME: this may need to be reconsidered if the key function
3301     // changes.
3302     // N.B. We must always emit the RTTI data ourselves if there exists a key
3303     // function.
3304     bool IsDLLImport = RD->hasAttr<DLLImportAttr>();
3305 
3306     // Don't import the RTTI but emit it locally.
3307     if (CGM.getTriple().isWindowsGNUEnvironment())
3308       return false;
3309 
3310     if (CGM.getVTables().isVTableExternal(RD))
3311       return IsDLLImport && !CGM.getTriple().isWindowsItaniumEnvironment()
3312                  ? false
3313                  : true;
3314 
3315     if (IsDLLImport)
3316       return true;
3317   }
3318 
3319   return false;
3320 }
3321 
3322 /// IsIncompleteClassType - Returns whether the given record type is incomplete.
3323 static bool IsIncompleteClassType(const RecordType *RecordTy) {
3324   return !RecordTy->getDecl()->isCompleteDefinition();
3325 }
3326 
3327 /// ContainsIncompleteClassType - Returns whether the given type contains an
3328 /// incomplete class type. This is true if
3329 ///
3330 ///   * The given type is an incomplete class type.
3331 ///   * The given type is a pointer type whose pointee type contains an
3332 ///     incomplete class type.
3333 ///   * The given type is a member pointer type whose class is an incomplete
3334 ///     class type.
3335 ///   * The given type is a member pointer type whoise pointee type contains an
3336 ///     incomplete class type.
3337 /// is an indirect or direct pointer to an incomplete class type.
3338 static bool ContainsIncompleteClassType(QualType Ty) {
3339   if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3340     if (IsIncompleteClassType(RecordTy))
3341       return true;
3342   }
3343 
3344   if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
3345     return ContainsIncompleteClassType(PointerTy->getPointeeType());
3346 
3347   if (const MemberPointerType *MemberPointerTy =
3348       dyn_cast<MemberPointerType>(Ty)) {
3349     // Check if the class type is incomplete.
3350     const RecordType *ClassType = cast<RecordType>(MemberPointerTy->getClass());
3351     if (IsIncompleteClassType(ClassType))
3352       return true;
3353 
3354     return ContainsIncompleteClassType(MemberPointerTy->getPointeeType());
3355   }
3356 
3357   return false;
3358 }
3359 
3360 // CanUseSingleInheritance - Return whether the given record decl has a "single,
3361 // public, non-virtual base at offset zero (i.e. the derived class is dynamic
3362 // iff the base is)", according to Itanium C++ ABI, 2.95p6b.
3363 static bool CanUseSingleInheritance(const CXXRecordDecl *RD) {
3364   // Check the number of bases.
3365   if (RD->getNumBases() != 1)
3366     return false;
3367 
3368   // Get the base.
3369   CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin();
3370 
3371   // Check that the base is not virtual.
3372   if (Base->isVirtual())
3373     return false;
3374 
3375   // Check that the base is public.
3376   if (Base->getAccessSpecifier() != AS_public)
3377     return false;
3378 
3379   // Check that the class is dynamic iff the base is.
3380   auto *BaseDecl =
3381       cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
3382   if (!BaseDecl->isEmpty() &&
3383       BaseDecl->isDynamicClass() != RD->isDynamicClass())
3384     return false;
3385 
3386   return true;
3387 }
3388 
3389 void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
3390   // abi::__class_type_info.
3391   static const char * const ClassTypeInfo =
3392     "_ZTVN10__cxxabiv117__class_type_infoE";
3393   // abi::__si_class_type_info.
3394   static const char * const SIClassTypeInfo =
3395     "_ZTVN10__cxxabiv120__si_class_type_infoE";
3396   // abi::__vmi_class_type_info.
3397   static const char * const VMIClassTypeInfo =
3398     "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
3399 
3400   const char *VTableName = nullptr;
3401 
3402   switch (Ty->getTypeClass()) {
3403 #define TYPE(Class, Base)
3404 #define ABSTRACT_TYPE(Class, Base)
3405 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3406 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3407 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
3408 #include "clang/AST/TypeNodes.inc"
3409     llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3410 
3411   case Type::LValueReference:
3412   case Type::RValueReference:
3413     llvm_unreachable("References shouldn't get here");
3414 
3415   case Type::Auto:
3416   case Type::DeducedTemplateSpecialization:
3417     llvm_unreachable("Undeduced type shouldn't get here");
3418 
3419   case Type::Pipe:
3420     llvm_unreachable("Pipe types shouldn't get here");
3421 
3422   case Type::Builtin:
3423   case Type::ExtInt:
3424   // GCC treats vector and complex types as fundamental types.
3425   case Type::Vector:
3426   case Type::ExtVector:
3427   case Type::ConstantMatrix:
3428   case Type::Complex:
3429   case Type::Atomic:
3430   // FIXME: GCC treats block pointers as fundamental types?!
3431   case Type::BlockPointer:
3432     // abi::__fundamental_type_info.
3433     VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE";
3434     break;
3435 
3436   case Type::ConstantArray:
3437   case Type::IncompleteArray:
3438   case Type::VariableArray:
3439     // abi::__array_type_info.
3440     VTableName = "_ZTVN10__cxxabiv117__array_type_infoE";
3441     break;
3442 
3443   case Type::FunctionNoProto:
3444   case Type::FunctionProto:
3445     // abi::__function_type_info.
3446     VTableName = "_ZTVN10__cxxabiv120__function_type_infoE";
3447     break;
3448 
3449   case Type::Enum:
3450     // abi::__enum_type_info.
3451     VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE";
3452     break;
3453 
3454   case Type::Record: {
3455     const CXXRecordDecl *RD =
3456       cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3457 
3458     if (!RD->hasDefinition() || !RD->getNumBases()) {
3459       VTableName = ClassTypeInfo;
3460     } else if (CanUseSingleInheritance(RD)) {
3461       VTableName = SIClassTypeInfo;
3462     } else {
3463       VTableName = VMIClassTypeInfo;
3464     }
3465 
3466     break;
3467   }
3468 
3469   case Type::ObjCObject:
3470     // Ignore protocol qualifiers.
3471     Ty = cast<ObjCObjectType>(Ty)->getBaseType().getTypePtr();
3472 
3473     // Handle id and Class.
3474     if (isa<BuiltinType>(Ty)) {
3475       VTableName = ClassTypeInfo;
3476       break;
3477     }
3478 
3479     assert(isa<ObjCInterfaceType>(Ty));
3480     LLVM_FALLTHROUGH;
3481 
3482   case Type::ObjCInterface:
3483     if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) {
3484       VTableName = SIClassTypeInfo;
3485     } else {
3486       VTableName = ClassTypeInfo;
3487     }
3488     break;
3489 
3490   case Type::ObjCObjectPointer:
3491   case Type::Pointer:
3492     // abi::__pointer_type_info.
3493     VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE";
3494     break;
3495 
3496   case Type::MemberPointer:
3497     // abi::__pointer_to_member_type_info.
3498     VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE";
3499     break;
3500   }
3501 
3502   llvm::Constant *VTable = nullptr;
3503 
3504   // Check if the alias exists. If it doesn't, then get or create the global.
3505   if (CGM.getItaniumVTableContext().isRelativeLayout())
3506     VTable = CGM.getModule().getNamedAlias(VTableName);
3507   if (!VTable)
3508     VTable = CGM.getModule().getOrInsertGlobal(VTableName, CGM.Int8PtrTy);
3509 
3510   CGM.setDSOLocal(cast<llvm::GlobalValue>(VTable->stripPointerCasts()));
3511 
3512   llvm::Type *PtrDiffTy =
3513       CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
3514 
3515   // The vtable address point is 2.
3516   if (CGM.getItaniumVTableContext().isRelativeLayout()) {
3517     // The vtable address point is 8 bytes after its start:
3518     // 4 for the offset to top + 4 for the relative offset to rtti.
3519     llvm::Constant *Eight = llvm::ConstantInt::get(CGM.Int32Ty, 8);
3520     VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
3521     VTable =
3522         llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8Ty, VTable, Eight);
3523   } else {
3524     llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
3525     VTable = llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8PtrTy, VTable,
3526                                                           Two);
3527   }
3528   VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
3529 
3530   Fields.push_back(VTable);
3531 }
3532 
3533 /// Return the linkage that the type info and type info name constants
3534 /// should have for the given type.
3535 static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM,
3536                                                              QualType Ty) {
3537   // Itanium C++ ABI 2.9.5p7:
3538   //   In addition, it and all of the intermediate abi::__pointer_type_info
3539   //   structs in the chain down to the abi::__class_type_info for the
3540   //   incomplete class type must be prevented from resolving to the
3541   //   corresponding type_info structs for the complete class type, possibly
3542   //   by making them local static objects. Finally, a dummy class RTTI is
3543   //   generated for the incomplete type that will not resolve to the final
3544   //   complete class RTTI (because the latter need not exist), possibly by
3545   //   making it a local static object.
3546   if (ContainsIncompleteClassType(Ty))
3547     return llvm::GlobalValue::InternalLinkage;
3548 
3549   switch (Ty->getLinkage()) {
3550   case NoLinkage:
3551   case InternalLinkage:
3552   case UniqueExternalLinkage:
3553     return llvm::GlobalValue::InternalLinkage;
3554 
3555   case VisibleNoLinkage:
3556   case ModuleInternalLinkage:
3557   case ModuleLinkage:
3558   case ExternalLinkage:
3559     // RTTI is not enabled, which means that this type info struct is going
3560     // to be used for exception handling. Give it linkonce_odr linkage.
3561     if (!CGM.getLangOpts().RTTI)
3562       return llvm::GlobalValue::LinkOnceODRLinkage;
3563 
3564     if (const RecordType *Record = dyn_cast<RecordType>(Ty)) {
3565       const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
3566       if (RD->hasAttr<WeakAttr>())
3567         return llvm::GlobalValue::WeakODRLinkage;
3568       if (CGM.getTriple().isWindowsItaniumEnvironment())
3569         if (RD->hasAttr<DLLImportAttr>() &&
3570             ShouldUseExternalRTTIDescriptor(CGM, Ty))
3571           return llvm::GlobalValue::ExternalLinkage;
3572       // MinGW always uses LinkOnceODRLinkage for type info.
3573       if (RD->isDynamicClass() &&
3574           !CGM.getContext()
3575                .getTargetInfo()
3576                .getTriple()
3577                .isWindowsGNUEnvironment())
3578         return CGM.getVTableLinkage(RD);
3579     }
3580 
3581     return llvm::GlobalValue::LinkOnceODRLinkage;
3582   }
3583 
3584   llvm_unreachable("Invalid linkage!");
3585 }
3586 
3587 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty) {
3588   // We want to operate on the canonical type.
3589   Ty = Ty.getCanonicalType();
3590 
3591   // Check if we've already emitted an RTTI descriptor for this type.
3592   SmallString<256> Name;
3593   llvm::raw_svector_ostream Out(Name);
3594   CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3595 
3596   llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
3597   if (OldGV && !OldGV->isDeclaration()) {
3598     assert(!OldGV->hasAvailableExternallyLinkage() &&
3599            "available_externally typeinfos not yet implemented");
3600 
3601     return llvm::ConstantExpr::getBitCast(OldGV, CGM.Int8PtrTy);
3602   }
3603 
3604   // Check if there is already an external RTTI descriptor for this type.
3605   if (IsStandardLibraryRTTIDescriptor(Ty) ||
3606       ShouldUseExternalRTTIDescriptor(CGM, Ty))
3607     return GetAddrOfExternalRTTIDescriptor(Ty);
3608 
3609   // Emit the standard library with external linkage.
3610   llvm::GlobalVariable::LinkageTypes Linkage = getTypeInfoLinkage(CGM, Ty);
3611 
3612   // Give the type_info object and name the formal visibility of the
3613   // type itself.
3614   llvm::GlobalValue::VisibilityTypes llvmVisibility;
3615   if (llvm::GlobalValue::isLocalLinkage(Linkage))
3616     // If the linkage is local, only default visibility makes sense.
3617     llvmVisibility = llvm::GlobalValue::DefaultVisibility;
3618   else if (CXXABI.classifyRTTIUniqueness(Ty, Linkage) ==
3619            ItaniumCXXABI::RUK_NonUniqueHidden)
3620     llvmVisibility = llvm::GlobalValue::HiddenVisibility;
3621   else
3622     llvmVisibility = CodeGenModule::GetLLVMVisibility(Ty->getVisibility());
3623 
3624   llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
3625       llvm::GlobalValue::DefaultStorageClass;
3626   if (CGM.getTriple().isWindowsItaniumEnvironment()) {
3627     auto RD = Ty->getAsCXXRecordDecl();
3628     if (RD && RD->hasAttr<DLLExportAttr>())
3629       DLLStorageClass = llvm::GlobalValue::DLLExportStorageClass;
3630   }
3631 
3632   return BuildTypeInfo(Ty, Linkage, llvmVisibility, DLLStorageClass);
3633 }
3634 
3635 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
3636       QualType Ty,
3637       llvm::GlobalVariable::LinkageTypes Linkage,
3638       llvm::GlobalValue::VisibilityTypes Visibility,
3639       llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass) {
3640   // Add the vtable pointer.
3641   BuildVTablePointer(cast<Type>(Ty));
3642 
3643   // And the name.
3644   llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage);
3645   llvm::Constant *TypeNameField;
3646 
3647   // If we're supposed to demote the visibility, be sure to set a flag
3648   // to use a string comparison for type_info comparisons.
3649   ItaniumCXXABI::RTTIUniquenessKind RTTIUniqueness =
3650       CXXABI.classifyRTTIUniqueness(Ty, Linkage);
3651   if (RTTIUniqueness != ItaniumCXXABI::RUK_Unique) {
3652     // The flag is the sign bit, which on ARM64 is defined to be clear
3653     // for global pointers.  This is very ARM64-specific.
3654     TypeNameField = llvm::ConstantExpr::getPtrToInt(TypeName, CGM.Int64Ty);
3655     llvm::Constant *flag =
3656         llvm::ConstantInt::get(CGM.Int64Ty, ((uint64_t)1) << 63);
3657     TypeNameField = llvm::ConstantExpr::getAdd(TypeNameField, flag);
3658     TypeNameField =
3659         llvm::ConstantExpr::getIntToPtr(TypeNameField, CGM.Int8PtrTy);
3660   } else {
3661     TypeNameField = llvm::ConstantExpr::getBitCast(TypeName, CGM.Int8PtrTy);
3662   }
3663   Fields.push_back(TypeNameField);
3664 
3665   switch (Ty->getTypeClass()) {
3666 #define TYPE(Class, Base)
3667 #define ABSTRACT_TYPE(Class, Base)
3668 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3669 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3670 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
3671 #include "clang/AST/TypeNodes.inc"
3672     llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3673 
3674   // GCC treats vector types as fundamental types.
3675   case Type::Builtin:
3676   case Type::Vector:
3677   case Type::ExtVector:
3678   case Type::ConstantMatrix:
3679   case Type::Complex:
3680   case Type::BlockPointer:
3681     // Itanium C++ ABI 2.9.5p4:
3682     // abi::__fundamental_type_info adds no data members to std::type_info.
3683     break;
3684 
3685   case Type::LValueReference:
3686   case Type::RValueReference:
3687     llvm_unreachable("References shouldn't get here");
3688 
3689   case Type::Auto:
3690   case Type::DeducedTemplateSpecialization:
3691     llvm_unreachable("Undeduced type shouldn't get here");
3692 
3693   case Type::Pipe:
3694     break;
3695 
3696   case Type::ExtInt:
3697     break;
3698 
3699   case Type::ConstantArray:
3700   case Type::IncompleteArray:
3701   case Type::VariableArray:
3702     // Itanium C++ ABI 2.9.5p5:
3703     // abi::__array_type_info adds no data members to std::type_info.
3704     break;
3705 
3706   case Type::FunctionNoProto:
3707   case Type::FunctionProto:
3708     // Itanium C++ ABI 2.9.5p5:
3709     // abi::__function_type_info adds no data members to std::type_info.
3710     break;
3711 
3712   case Type::Enum:
3713     // Itanium C++ ABI 2.9.5p5:
3714     // abi::__enum_type_info adds no data members to std::type_info.
3715     break;
3716 
3717   case Type::Record: {
3718     const CXXRecordDecl *RD =
3719       cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3720     if (!RD->hasDefinition() || !RD->getNumBases()) {
3721       // We don't need to emit any fields.
3722       break;
3723     }
3724 
3725     if (CanUseSingleInheritance(RD))
3726       BuildSIClassTypeInfo(RD);
3727     else
3728       BuildVMIClassTypeInfo(RD);
3729 
3730     break;
3731   }
3732 
3733   case Type::ObjCObject:
3734   case Type::ObjCInterface:
3735     BuildObjCObjectTypeInfo(cast<ObjCObjectType>(Ty));
3736     break;
3737 
3738   case Type::ObjCObjectPointer:
3739     BuildPointerTypeInfo(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
3740     break;
3741 
3742   case Type::Pointer:
3743     BuildPointerTypeInfo(cast<PointerType>(Ty)->getPointeeType());
3744     break;
3745 
3746   case Type::MemberPointer:
3747     BuildPointerToMemberTypeInfo(cast<MemberPointerType>(Ty));
3748     break;
3749 
3750   case Type::Atomic:
3751     // No fields, at least for the moment.
3752     break;
3753   }
3754 
3755   llvm::Constant *Init = llvm::ConstantStruct::getAnon(Fields);
3756 
3757   SmallString<256> Name;
3758   llvm::raw_svector_ostream Out(Name);
3759   CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3760   llvm::Module &M = CGM.getModule();
3761   llvm::GlobalVariable *OldGV = M.getNamedGlobal(Name);
3762   llvm::GlobalVariable *GV =
3763       new llvm::GlobalVariable(M, Init->getType(),
3764                                /*isConstant=*/true, Linkage, Init, Name);
3765 
3766   // If there's already an old global variable, replace it with the new one.
3767   if (OldGV) {
3768     GV->takeName(OldGV);
3769     llvm::Constant *NewPtr =
3770       llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
3771     OldGV->replaceAllUsesWith(NewPtr);
3772     OldGV->eraseFromParent();
3773   }
3774 
3775   if (CGM.supportsCOMDAT() && GV->isWeakForLinker())
3776     GV->setComdat(M.getOrInsertComdat(GV->getName()));
3777 
3778   CharUnits Align =
3779       CGM.getContext().toCharUnitsFromBits(CGM.getTarget().getPointerAlign(0));
3780   GV->setAlignment(Align.getAsAlign());
3781 
3782   // The Itanium ABI specifies that type_info objects must be globally
3783   // unique, with one exception: if the type is an incomplete class
3784   // type or a (possibly indirect) pointer to one.  That exception
3785   // affects the general case of comparing type_info objects produced
3786   // by the typeid operator, which is why the comparison operators on
3787   // std::type_info generally use the type_info name pointers instead
3788   // of the object addresses.  However, the language's built-in uses
3789   // of RTTI generally require class types to be complete, even when
3790   // manipulating pointers to those class types.  This allows the
3791   // implementation of dynamic_cast to rely on address equality tests,
3792   // which is much faster.
3793 
3794   // All of this is to say that it's important that both the type_info
3795   // object and the type_info name be uniqued when weakly emitted.
3796 
3797   TypeName->setVisibility(Visibility);
3798   CGM.setDSOLocal(TypeName);
3799 
3800   GV->setVisibility(Visibility);
3801   CGM.setDSOLocal(GV);
3802 
3803   TypeName->setDLLStorageClass(DLLStorageClass);
3804   GV->setDLLStorageClass(DLLStorageClass);
3805 
3806   TypeName->setPartition(CGM.getCodeGenOpts().SymbolPartition);
3807   GV->setPartition(CGM.getCodeGenOpts().SymbolPartition);
3808 
3809   return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
3810 }
3811 
3812 /// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info
3813 /// for the given Objective-C object type.
3814 void ItaniumRTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) {
3815   // Drop qualifiers.
3816   const Type *T = OT->getBaseType().getTypePtr();
3817   assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T));
3818 
3819   // The builtin types are abi::__class_type_infos and don't require
3820   // extra fields.
3821   if (isa<BuiltinType>(T)) return;
3822 
3823   ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(T)->getDecl();
3824   ObjCInterfaceDecl *Super = Class->getSuperClass();
3825 
3826   // Root classes are also __class_type_info.
3827   if (!Super) return;
3828 
3829   QualType SuperTy = CGM.getContext().getObjCInterfaceType(Super);
3830 
3831   // Everything else is single inheritance.
3832   llvm::Constant *BaseTypeInfo =
3833       ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(SuperTy);
3834   Fields.push_back(BaseTypeInfo);
3835 }
3836 
3837 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
3838 /// inheritance, according to the Itanium C++ ABI, 2.95p6b.
3839 void ItaniumRTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) {
3840   // Itanium C++ ABI 2.9.5p6b:
3841   // It adds to abi::__class_type_info a single member pointing to the
3842   // type_info structure for the base type,
3843   llvm::Constant *BaseTypeInfo =
3844     ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(RD->bases_begin()->getType());
3845   Fields.push_back(BaseTypeInfo);
3846 }
3847 
3848 namespace {
3849   /// SeenBases - Contains virtual and non-virtual bases seen when traversing
3850   /// a class hierarchy.
3851   struct SeenBases {
3852     llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases;
3853     llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases;
3854   };
3855 }
3856 
3857 /// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in
3858 /// abi::__vmi_class_type_info.
3859 ///
3860 static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base,
3861                                              SeenBases &Bases) {
3862 
3863   unsigned Flags = 0;
3864 
3865   auto *BaseDecl =
3866       cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
3867 
3868   if (Base->isVirtual()) {
3869     // Mark the virtual base as seen.
3870     if (!Bases.VirtualBases.insert(BaseDecl).second) {
3871       // If this virtual base has been seen before, then the class is diamond
3872       // shaped.
3873       Flags |= ItaniumRTTIBuilder::VMI_DiamondShaped;
3874     } else {
3875       if (Bases.NonVirtualBases.count(BaseDecl))
3876         Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3877     }
3878   } else {
3879     // Mark the non-virtual base as seen.
3880     if (!Bases.NonVirtualBases.insert(BaseDecl).second) {
3881       // If this non-virtual base has been seen before, then the class has non-
3882       // diamond shaped repeated inheritance.
3883       Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3884     } else {
3885       if (Bases.VirtualBases.count(BaseDecl))
3886         Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3887     }
3888   }
3889 
3890   // Walk all bases.
3891   for (const auto &I : BaseDecl->bases())
3892     Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
3893 
3894   return Flags;
3895 }
3896 
3897 static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) {
3898   unsigned Flags = 0;
3899   SeenBases Bases;
3900 
3901   // Walk all bases.
3902   for (const auto &I : RD->bases())
3903     Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
3904 
3905   return Flags;
3906 }
3907 
3908 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
3909 /// classes with bases that do not satisfy the abi::__si_class_type_info
3910 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
3911 void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
3912   llvm::Type *UnsignedIntLTy =
3913     CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
3914 
3915   // Itanium C++ ABI 2.9.5p6c:
3916   //   __flags is a word with flags describing details about the class
3917   //   structure, which may be referenced by using the __flags_masks
3918   //   enumeration. These flags refer to both direct and indirect bases.
3919   unsigned Flags = ComputeVMIClassTypeInfoFlags(RD);
3920   Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
3921 
3922   // Itanium C++ ABI 2.9.5p6c:
3923   //   __base_count is a word with the number of direct proper base class
3924   //   descriptions that follow.
3925   Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, RD->getNumBases()));
3926 
3927   if (!RD->getNumBases())
3928     return;
3929 
3930   // Now add the base class descriptions.
3931 
3932   // Itanium C++ ABI 2.9.5p6c:
3933   //   __base_info[] is an array of base class descriptions -- one for every
3934   //   direct proper base. Each description is of the type:
3935   //
3936   //   struct abi::__base_class_type_info {
3937   //   public:
3938   //     const __class_type_info *__base_type;
3939   //     long __offset_flags;
3940   //
3941   //     enum __offset_flags_masks {
3942   //       __virtual_mask = 0x1,
3943   //       __public_mask = 0x2,
3944   //       __offset_shift = 8
3945   //     };
3946   //   };
3947 
3948   // If we're in mingw and 'long' isn't wide enough for a pointer, use 'long
3949   // long' instead of 'long' for __offset_flags. libstdc++abi uses long long on
3950   // LLP64 platforms.
3951   // FIXME: Consider updating libc++abi to match, and extend this logic to all
3952   // LLP64 platforms.
3953   QualType OffsetFlagsTy = CGM.getContext().LongTy;
3954   const TargetInfo &TI = CGM.getContext().getTargetInfo();
3955   if (TI.getTriple().isOSCygMing() && TI.getPointerWidth(0) > TI.getLongWidth())
3956     OffsetFlagsTy = CGM.getContext().LongLongTy;
3957   llvm::Type *OffsetFlagsLTy =
3958       CGM.getTypes().ConvertType(OffsetFlagsTy);
3959 
3960   for (const auto &Base : RD->bases()) {
3961     // The __base_type member points to the RTTI for the base type.
3962     Fields.push_back(ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Base.getType()));
3963 
3964     auto *BaseDecl =
3965         cast<CXXRecordDecl>(Base.getType()->castAs<RecordType>()->getDecl());
3966 
3967     int64_t OffsetFlags = 0;
3968 
3969     // All but the lower 8 bits of __offset_flags are a signed offset.
3970     // For a non-virtual base, this is the offset in the object of the base
3971     // subobject. For a virtual base, this is the offset in the virtual table of
3972     // the virtual base offset for the virtual base referenced (negative).
3973     CharUnits Offset;
3974     if (Base.isVirtual())
3975       Offset =
3976         CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(RD, BaseDecl);
3977     else {
3978       const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
3979       Offset = Layout.getBaseClassOffset(BaseDecl);
3980     };
3981 
3982     OffsetFlags = uint64_t(Offset.getQuantity()) << 8;
3983 
3984     // The low-order byte of __offset_flags contains flags, as given by the
3985     // masks from the enumeration __offset_flags_masks.
3986     if (Base.isVirtual())
3987       OffsetFlags |= BCTI_Virtual;
3988     if (Base.getAccessSpecifier() == AS_public)
3989       OffsetFlags |= BCTI_Public;
3990 
3991     Fields.push_back(llvm::ConstantInt::get(OffsetFlagsLTy, OffsetFlags));
3992   }
3993 }
3994 
3995 /// Compute the flags for a __pbase_type_info, and remove the corresponding
3996 /// pieces from \p Type.
3997 static unsigned extractPBaseFlags(ASTContext &Ctx, QualType &Type) {
3998   unsigned Flags = 0;
3999 
4000   if (Type.isConstQualified())
4001     Flags |= ItaniumRTTIBuilder::PTI_Const;
4002   if (Type.isVolatileQualified())
4003     Flags |= ItaniumRTTIBuilder::PTI_Volatile;
4004   if (Type.isRestrictQualified())
4005     Flags |= ItaniumRTTIBuilder::PTI_Restrict;
4006   Type = Type.getUnqualifiedType();
4007 
4008   // Itanium C++ ABI 2.9.5p7:
4009   //   When the abi::__pbase_type_info is for a direct or indirect pointer to an
4010   //   incomplete class type, the incomplete target type flag is set.
4011   if (ContainsIncompleteClassType(Type))
4012     Flags |= ItaniumRTTIBuilder::PTI_Incomplete;
4013 
4014   if (auto *Proto = Type->getAs<FunctionProtoType>()) {
4015     if (Proto->isNothrow()) {
4016       Flags |= ItaniumRTTIBuilder::PTI_Noexcept;
4017       Type = Ctx.getFunctionTypeWithExceptionSpec(Type, EST_None);
4018     }
4019   }
4020 
4021   return Flags;
4022 }
4023 
4024 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct,
4025 /// used for pointer types.
4026 void ItaniumRTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) {
4027   // Itanium C++ ABI 2.9.5p7:
4028   //   __flags is a flag word describing the cv-qualification and other
4029   //   attributes of the type pointed to
4030   unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
4031 
4032   llvm::Type *UnsignedIntLTy =
4033     CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
4034   Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4035 
4036   // Itanium C++ ABI 2.9.5p7:
4037   //  __pointee is a pointer to the std::type_info derivation for the
4038   //  unqualified type being pointed to.
4039   llvm::Constant *PointeeTypeInfo =
4040       ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
4041   Fields.push_back(PointeeTypeInfo);
4042 }
4043 
4044 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
4045 /// struct, used for member pointer types.
4046 void
4047 ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
4048   QualType PointeeTy = Ty->getPointeeType();
4049 
4050   // Itanium C++ ABI 2.9.5p7:
4051   //   __flags is a flag word describing the cv-qualification and other
4052   //   attributes of the type pointed to.
4053   unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
4054 
4055   const RecordType *ClassType = cast<RecordType>(Ty->getClass());
4056   if (IsIncompleteClassType(ClassType))
4057     Flags |= PTI_ContainingClassIncomplete;
4058 
4059   llvm::Type *UnsignedIntLTy =
4060     CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
4061   Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4062 
4063   // Itanium C++ ABI 2.9.5p7:
4064   //   __pointee is a pointer to the std::type_info derivation for the
4065   //   unqualified type being pointed to.
4066   llvm::Constant *PointeeTypeInfo =
4067       ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
4068   Fields.push_back(PointeeTypeInfo);
4069 
4070   // Itanium C++ ABI 2.9.5p9:
4071   //   __context is a pointer to an abi::__class_type_info corresponding to the
4072   //   class type containing the member pointed to
4073   //   (e.g., the "A" in "int A::*").
4074   Fields.push_back(
4075       ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(QualType(ClassType, 0)));
4076 }
4077 
4078 llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) {
4079   return ItaniumRTTIBuilder(*this).BuildTypeInfo(Ty);
4080 }
4081 
4082 void ItaniumCXXABI::EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD) {
4083   // Types added here must also be added to TypeInfoIsInStandardLibrary.
4084   QualType FundamentalTypes[] = {
4085       getContext().VoidTy,             getContext().NullPtrTy,
4086       getContext().BoolTy,             getContext().WCharTy,
4087       getContext().CharTy,             getContext().UnsignedCharTy,
4088       getContext().SignedCharTy,       getContext().ShortTy,
4089       getContext().UnsignedShortTy,    getContext().IntTy,
4090       getContext().UnsignedIntTy,      getContext().LongTy,
4091       getContext().UnsignedLongTy,     getContext().LongLongTy,
4092       getContext().UnsignedLongLongTy, getContext().Int128Ty,
4093       getContext().UnsignedInt128Ty,   getContext().HalfTy,
4094       getContext().FloatTy,            getContext().DoubleTy,
4095       getContext().LongDoubleTy,       getContext().Float128Ty,
4096       getContext().Char8Ty,            getContext().Char16Ty,
4097       getContext().Char32Ty
4098   };
4099   llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
4100       RD->hasAttr<DLLExportAttr>()
4101       ? llvm::GlobalValue::DLLExportStorageClass
4102       : llvm::GlobalValue::DefaultStorageClass;
4103   llvm::GlobalValue::VisibilityTypes Visibility =
4104       CodeGenModule::GetLLVMVisibility(RD->getVisibility());
4105   for (const QualType &FundamentalType : FundamentalTypes) {
4106     QualType PointerType = getContext().getPointerType(FundamentalType);
4107     QualType PointerTypeConst = getContext().getPointerType(
4108         FundamentalType.withConst());
4109     for (QualType Type : {FundamentalType, PointerType, PointerTypeConst})
4110       ItaniumRTTIBuilder(*this).BuildTypeInfo(
4111           Type, llvm::GlobalValue::ExternalLinkage,
4112           Visibility, DLLStorageClass);
4113   }
4114 }
4115 
4116 /// What sort of uniqueness rules should we use for the RTTI for the
4117 /// given type?
4118 ItaniumCXXABI::RTTIUniquenessKind ItaniumCXXABI::classifyRTTIUniqueness(
4119     QualType CanTy, llvm::GlobalValue::LinkageTypes Linkage) const {
4120   if (shouldRTTIBeUnique())
4121     return RUK_Unique;
4122 
4123   // It's only necessary for linkonce_odr or weak_odr linkage.
4124   if (Linkage != llvm::GlobalValue::LinkOnceODRLinkage &&
4125       Linkage != llvm::GlobalValue::WeakODRLinkage)
4126     return RUK_Unique;
4127 
4128   // It's only necessary with default visibility.
4129   if (CanTy->getVisibility() != DefaultVisibility)
4130     return RUK_Unique;
4131 
4132   // If we're not required to publish this symbol, hide it.
4133   if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
4134     return RUK_NonUniqueHidden;
4135 
4136   // If we're required to publish this symbol, as we might be under an
4137   // explicit instantiation, leave it with default visibility but
4138   // enable string-comparisons.
4139   assert(Linkage == llvm::GlobalValue::WeakODRLinkage);
4140   return RUK_NonUniqueVisible;
4141 }
4142 
4143 // Find out how to codegen the complete destructor and constructor
4144 namespace {
4145 enum class StructorCodegen { Emit, RAUW, Alias, COMDAT };
4146 }
4147 static StructorCodegen getCodegenToUse(CodeGenModule &CGM,
4148                                        const CXXMethodDecl *MD) {
4149   if (!CGM.getCodeGenOpts().CXXCtorDtorAliases)
4150     return StructorCodegen::Emit;
4151 
4152   // The complete and base structors are not equivalent if there are any virtual
4153   // bases, so emit separate functions.
4154   if (MD->getParent()->getNumVBases())
4155     return StructorCodegen::Emit;
4156 
4157   GlobalDecl AliasDecl;
4158   if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) {
4159     AliasDecl = GlobalDecl(DD, Dtor_Complete);
4160   } else {
4161     const auto *CD = cast<CXXConstructorDecl>(MD);
4162     AliasDecl = GlobalDecl(CD, Ctor_Complete);
4163   }
4164   llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
4165 
4166   if (llvm::GlobalValue::isDiscardableIfUnused(Linkage))
4167     return StructorCodegen::RAUW;
4168 
4169   // FIXME: Should we allow available_externally aliases?
4170   if (!llvm::GlobalAlias::isValidLinkage(Linkage))
4171     return StructorCodegen::RAUW;
4172 
4173   if (llvm::GlobalValue::isWeakForLinker(Linkage)) {
4174     // Only ELF and wasm support COMDATs with arbitrary names (C5/D5).
4175     if (CGM.getTarget().getTriple().isOSBinFormatELF() ||
4176         CGM.getTarget().getTriple().isOSBinFormatWasm())
4177       return StructorCodegen::COMDAT;
4178     return StructorCodegen::Emit;
4179   }
4180 
4181   return StructorCodegen::Alias;
4182 }
4183 
4184 static void emitConstructorDestructorAlias(CodeGenModule &CGM,
4185                                            GlobalDecl AliasDecl,
4186                                            GlobalDecl TargetDecl) {
4187   llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
4188 
4189   StringRef MangledName = CGM.getMangledName(AliasDecl);
4190   llvm::GlobalValue *Entry = CGM.GetGlobalValue(MangledName);
4191   if (Entry && !Entry->isDeclaration())
4192     return;
4193 
4194   auto *Aliasee = cast<llvm::GlobalValue>(CGM.GetAddrOfGlobal(TargetDecl));
4195 
4196   // Create the alias with no name.
4197   auto *Alias = llvm::GlobalAlias::create(Linkage, "", Aliasee);
4198 
4199   // Constructors and destructors are always unnamed_addr.
4200   Alias->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
4201 
4202   // Switch any previous uses to the alias.
4203   if (Entry) {
4204     assert(Entry->getType() == Aliasee->getType() &&
4205            "declaration exists with different type");
4206     Alias->takeName(Entry);
4207     Entry->replaceAllUsesWith(Alias);
4208     Entry->eraseFromParent();
4209   } else {
4210     Alias->setName(MangledName);
4211   }
4212 
4213   // Finally, set up the alias with its proper name and attributes.
4214   CGM.SetCommonAttributes(AliasDecl, Alias);
4215 }
4216 
4217 void ItaniumCXXABI::emitCXXStructor(GlobalDecl GD) {
4218   auto *MD = cast<CXXMethodDecl>(GD.getDecl());
4219   auto *CD = dyn_cast<CXXConstructorDecl>(MD);
4220   const CXXDestructorDecl *DD = CD ? nullptr : cast<CXXDestructorDecl>(MD);
4221 
4222   StructorCodegen CGType = getCodegenToUse(CGM, MD);
4223 
4224   if (CD ? GD.getCtorType() == Ctor_Complete
4225          : GD.getDtorType() == Dtor_Complete) {
4226     GlobalDecl BaseDecl;
4227     if (CD)
4228       BaseDecl = GD.getWithCtorType(Ctor_Base);
4229     else
4230       BaseDecl = GD.getWithDtorType(Dtor_Base);
4231 
4232     if (CGType == StructorCodegen::Alias || CGType == StructorCodegen::COMDAT) {
4233       emitConstructorDestructorAlias(CGM, GD, BaseDecl);
4234       return;
4235     }
4236 
4237     if (CGType == StructorCodegen::RAUW) {
4238       StringRef MangledName = CGM.getMangledName(GD);
4239       auto *Aliasee = CGM.GetAddrOfGlobal(BaseDecl);
4240       CGM.addReplacement(MangledName, Aliasee);
4241       return;
4242     }
4243   }
4244 
4245   // The base destructor is equivalent to the base destructor of its
4246   // base class if there is exactly one non-virtual base class with a
4247   // non-trivial destructor, there are no fields with a non-trivial
4248   // destructor, and the body of the destructor is trivial.
4249   if (DD && GD.getDtorType() == Dtor_Base &&
4250       CGType != StructorCodegen::COMDAT &&
4251       !CGM.TryEmitBaseDestructorAsAlias(DD))
4252     return;
4253 
4254   // FIXME: The deleting destructor is equivalent to the selected operator
4255   // delete if:
4256   //  * either the delete is a destroying operator delete or the destructor
4257   //    would be trivial if it weren't virtual,
4258   //  * the conversion from the 'this' parameter to the first parameter of the
4259   //    destructor is equivalent to a bitcast,
4260   //  * the destructor does not have an implicit "this" return, and
4261   //  * the operator delete has the same calling convention and IR function type
4262   //    as the destructor.
4263   // In such cases we should try to emit the deleting dtor as an alias to the
4264   // selected 'operator delete'.
4265 
4266   llvm::Function *Fn = CGM.codegenCXXStructor(GD);
4267 
4268   if (CGType == StructorCodegen::COMDAT) {
4269     SmallString<256> Buffer;
4270     llvm::raw_svector_ostream Out(Buffer);
4271     if (DD)
4272       getMangleContext().mangleCXXDtorComdat(DD, Out);
4273     else
4274       getMangleContext().mangleCXXCtorComdat(CD, Out);
4275     llvm::Comdat *C = CGM.getModule().getOrInsertComdat(Out.str());
4276     Fn->setComdat(C);
4277   } else {
4278     CGM.maybeSetTrivialComdat(*MD, *Fn);
4279   }
4280 }
4281 
4282 static llvm::FunctionCallee getBeginCatchFn(CodeGenModule &CGM) {
4283   // void *__cxa_begin_catch(void*);
4284   llvm::FunctionType *FTy = llvm::FunctionType::get(
4285       CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4286 
4287   return CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch");
4288 }
4289 
4290 static llvm::FunctionCallee getEndCatchFn(CodeGenModule &CGM) {
4291   // void __cxa_end_catch();
4292   llvm::FunctionType *FTy =
4293       llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
4294 
4295   return CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch");
4296 }
4297 
4298 static llvm::FunctionCallee getGetExceptionPtrFn(CodeGenModule &CGM) {
4299   // void *__cxa_get_exception_ptr(void*);
4300   llvm::FunctionType *FTy = llvm::FunctionType::get(
4301       CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4302 
4303   return CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr");
4304 }
4305 
4306 namespace {
4307   /// A cleanup to call __cxa_end_catch.  In many cases, the caught
4308   /// exception type lets us state definitively that the thrown exception
4309   /// type does not have a destructor.  In particular:
4310   ///   - Catch-alls tell us nothing, so we have to conservatively
4311   ///     assume that the thrown exception might have a destructor.
4312   ///   - Catches by reference behave according to their base types.
4313   ///   - Catches of non-record types will only trigger for exceptions
4314   ///     of non-record types, which never have destructors.
4315   ///   - Catches of record types can trigger for arbitrary subclasses
4316   ///     of the caught type, so we have to assume the actual thrown
4317   ///     exception type might have a throwing destructor, even if the
4318   ///     caught type's destructor is trivial or nothrow.
4319   struct CallEndCatch final : EHScopeStack::Cleanup {
4320     CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
4321     bool MightThrow;
4322 
4323     void Emit(CodeGenFunction &CGF, Flags flags) override {
4324       if (!MightThrow) {
4325         CGF.EmitNounwindRuntimeCall(getEndCatchFn(CGF.CGM));
4326         return;
4327       }
4328 
4329       CGF.EmitRuntimeCallOrInvoke(getEndCatchFn(CGF.CGM));
4330     }
4331   };
4332 }
4333 
4334 /// Emits a call to __cxa_begin_catch and enters a cleanup to call
4335 /// __cxa_end_catch.
4336 ///
4337 /// \param EndMightThrow - true if __cxa_end_catch might throw
4338 static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
4339                                    llvm::Value *Exn,
4340                                    bool EndMightThrow) {
4341   llvm::CallInst *call =
4342     CGF.EmitNounwindRuntimeCall(getBeginCatchFn(CGF.CGM), Exn);
4343 
4344   CGF.EHStack.pushCleanup<CallEndCatch>(NormalAndEHCleanup, EndMightThrow);
4345 
4346   return call;
4347 }
4348 
4349 /// A "special initializer" callback for initializing a catch
4350 /// parameter during catch initialization.
4351 static void InitCatchParam(CodeGenFunction &CGF,
4352                            const VarDecl &CatchParam,
4353                            Address ParamAddr,
4354                            SourceLocation Loc) {
4355   // Load the exception from where the landing pad saved it.
4356   llvm::Value *Exn = CGF.getExceptionFromSlot();
4357 
4358   CanQualType CatchType =
4359     CGF.CGM.getContext().getCanonicalType(CatchParam.getType());
4360   llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType);
4361 
4362   // If we're catching by reference, we can just cast the object
4363   // pointer to the appropriate pointer.
4364   if (isa<ReferenceType>(CatchType)) {
4365     QualType CaughtType = cast<ReferenceType>(CatchType)->getPointeeType();
4366     bool EndCatchMightThrow = CaughtType->isRecordType();
4367 
4368     // __cxa_begin_catch returns the adjusted object pointer.
4369     llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow);
4370 
4371     // We have no way to tell the personality function that we're
4372     // catching by reference, so if we're catching a pointer,
4373     // __cxa_begin_catch will actually return that pointer by value.
4374     if (const PointerType *PT = dyn_cast<PointerType>(CaughtType)) {
4375       QualType PointeeType = PT->getPointeeType();
4376 
4377       // When catching by reference, generally we should just ignore
4378       // this by-value pointer and use the exception object instead.
4379       if (!PointeeType->isRecordType()) {
4380 
4381         // Exn points to the struct _Unwind_Exception header, which
4382         // we have to skip past in order to reach the exception data.
4383         unsigned HeaderSize =
4384           CGF.CGM.getTargetCodeGenInfo().getSizeOfUnwindException();
4385         AdjustedExn = CGF.Builder.CreateConstGEP1_32(Exn, HeaderSize);
4386 
4387       // However, if we're catching a pointer-to-record type that won't
4388       // work, because the personality function might have adjusted
4389       // the pointer.  There's actually no way for us to fully satisfy
4390       // the language/ABI contract here:  we can't use Exn because it
4391       // might have the wrong adjustment, but we can't use the by-value
4392       // pointer because it's off by a level of abstraction.
4393       //
4394       // The current solution is to dump the adjusted pointer into an
4395       // alloca, which breaks language semantics (because changing the
4396       // pointer doesn't change the exception) but at least works.
4397       // The better solution would be to filter out non-exact matches
4398       // and rethrow them, but this is tricky because the rethrow
4399       // really needs to be catchable by other sites at this landing
4400       // pad.  The best solution is to fix the personality function.
4401       } else {
4402         // Pull the pointer for the reference type off.
4403         llvm::Type *PtrTy =
4404           cast<llvm::PointerType>(LLVMCatchTy)->getElementType();
4405 
4406         // Create the temporary and write the adjusted pointer into it.
4407         Address ExnPtrTmp =
4408           CGF.CreateTempAlloca(PtrTy, CGF.getPointerAlign(), "exn.byref.tmp");
4409         llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
4410         CGF.Builder.CreateStore(Casted, ExnPtrTmp);
4411 
4412         // Bind the reference to the temporary.
4413         AdjustedExn = ExnPtrTmp.getPointer();
4414       }
4415     }
4416 
4417     llvm::Value *ExnCast =
4418       CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref");
4419     CGF.Builder.CreateStore(ExnCast, ParamAddr);
4420     return;
4421   }
4422 
4423   // Scalars and complexes.
4424   TypeEvaluationKind TEK = CGF.getEvaluationKind(CatchType);
4425   if (TEK != TEK_Aggregate) {
4426     llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false);
4427 
4428     // If the catch type is a pointer type, __cxa_begin_catch returns
4429     // the pointer by value.
4430     if (CatchType->hasPointerRepresentation()) {
4431       llvm::Value *CastExn =
4432         CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted");
4433 
4434       switch (CatchType.getQualifiers().getObjCLifetime()) {
4435       case Qualifiers::OCL_Strong:
4436         CastExn = CGF.EmitARCRetainNonBlock(CastExn);
4437         LLVM_FALLTHROUGH;
4438 
4439       case Qualifiers::OCL_None:
4440       case Qualifiers::OCL_ExplicitNone:
4441       case Qualifiers::OCL_Autoreleasing:
4442         CGF.Builder.CreateStore(CastExn, ParamAddr);
4443         return;
4444 
4445       case Qualifiers::OCL_Weak:
4446         CGF.EmitARCInitWeak(ParamAddr, CastExn);
4447         return;
4448       }
4449       llvm_unreachable("bad ownership qualifier!");
4450     }
4451 
4452     // Otherwise, it returns a pointer into the exception object.
4453 
4454     llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
4455     llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
4456 
4457     LValue srcLV = CGF.MakeNaturalAlignAddrLValue(Cast, CatchType);
4458     LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType);
4459     switch (TEK) {
4460     case TEK_Complex:
4461       CGF.EmitStoreOfComplex(CGF.EmitLoadOfComplex(srcLV, Loc), destLV,
4462                              /*init*/ true);
4463       return;
4464     case TEK_Scalar: {
4465       llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(srcLV, Loc);
4466       CGF.EmitStoreOfScalar(ExnLoad, destLV, /*init*/ true);
4467       return;
4468     }
4469     case TEK_Aggregate:
4470       llvm_unreachable("evaluation kind filtered out!");
4471     }
4472     llvm_unreachable("bad evaluation kind");
4473   }
4474 
4475   assert(isa<RecordType>(CatchType) && "unexpected catch type!");
4476   auto catchRD = CatchType->getAsCXXRecordDecl();
4477   CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD);
4478 
4479   llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
4480 
4481   // Check for a copy expression.  If we don't have a copy expression,
4482   // that means a trivial copy is okay.
4483   const Expr *copyExpr = CatchParam.getInit();
4484   if (!copyExpr) {
4485     llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true);
4486     Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4487                         caughtExnAlignment);
4488     LValue Dest = CGF.MakeAddrLValue(ParamAddr, CatchType);
4489     LValue Src = CGF.MakeAddrLValue(adjustedExn, CatchType);
4490     CGF.EmitAggregateCopy(Dest, Src, CatchType, AggValueSlot::DoesNotOverlap);
4491     return;
4492   }
4493 
4494   // We have to call __cxa_get_exception_ptr to get the adjusted
4495   // pointer before copying.
4496   llvm::CallInst *rawAdjustedExn =
4497     CGF.EmitNounwindRuntimeCall(getGetExceptionPtrFn(CGF.CGM), Exn);
4498 
4499   // Cast that to the appropriate type.
4500   Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4501                       caughtExnAlignment);
4502 
4503   // The copy expression is defined in terms of an OpaqueValueExpr.
4504   // Find it and map it to the adjusted expression.
4505   CodeGenFunction::OpaqueValueMapping
4506     opaque(CGF, OpaqueValueExpr::findInCopyConstruct(copyExpr),
4507            CGF.MakeAddrLValue(adjustedExn, CatchParam.getType()));
4508 
4509   // Call the copy ctor in a terminate scope.
4510   CGF.EHStack.pushTerminate();
4511 
4512   // Perform the copy construction.
4513   CGF.EmitAggExpr(copyExpr,
4514                   AggValueSlot::forAddr(ParamAddr, Qualifiers(),
4515                                         AggValueSlot::IsNotDestructed,
4516                                         AggValueSlot::DoesNotNeedGCBarriers,
4517                                         AggValueSlot::IsNotAliased,
4518                                         AggValueSlot::DoesNotOverlap));
4519 
4520   // Leave the terminate scope.
4521   CGF.EHStack.popTerminate();
4522 
4523   // Undo the opaque value mapping.
4524   opaque.pop();
4525 
4526   // Finally we can call __cxa_begin_catch.
4527   CallBeginCatch(CGF, Exn, true);
4528 }
4529 
4530 /// Begins a catch statement by initializing the catch variable and
4531 /// calling __cxa_begin_catch.
4532 void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF,
4533                                    const CXXCatchStmt *S) {
4534   // We have to be very careful with the ordering of cleanups here:
4535   //   C++ [except.throw]p4:
4536   //     The destruction [of the exception temporary] occurs
4537   //     immediately after the destruction of the object declared in
4538   //     the exception-declaration in the handler.
4539   //
4540   // So the precise ordering is:
4541   //   1.  Construct catch variable.
4542   //   2.  __cxa_begin_catch
4543   //   3.  Enter __cxa_end_catch cleanup
4544   //   4.  Enter dtor cleanup
4545   //
4546   // We do this by using a slightly abnormal initialization process.
4547   // Delegation sequence:
4548   //   - ExitCXXTryStmt opens a RunCleanupsScope
4549   //     - EmitAutoVarAlloca creates the variable and debug info
4550   //       - InitCatchParam initializes the variable from the exception
4551   //       - CallBeginCatch calls __cxa_begin_catch
4552   //       - CallBeginCatch enters the __cxa_end_catch cleanup
4553   //     - EmitAutoVarCleanups enters the variable destructor cleanup
4554   //   - EmitCXXTryStmt emits the code for the catch body
4555   //   - EmitCXXTryStmt close the RunCleanupsScope
4556 
4557   VarDecl *CatchParam = S->getExceptionDecl();
4558   if (!CatchParam) {
4559     llvm::Value *Exn = CGF.getExceptionFromSlot();
4560     CallBeginCatch(CGF, Exn, true);
4561     return;
4562   }
4563 
4564   // Emit the local.
4565   CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam);
4566   InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getBeginLoc());
4567   CGF.EmitAutoVarCleanups(var);
4568 }
4569 
4570 /// Get or define the following function:
4571 ///   void @__clang_call_terminate(i8* %exn) nounwind noreturn
4572 /// This code is used only in C++.
4573 static llvm::FunctionCallee getClangCallTerminateFn(CodeGenModule &CGM) {
4574   llvm::FunctionType *fnTy =
4575     llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4576   llvm::FunctionCallee fnRef = CGM.CreateRuntimeFunction(
4577       fnTy, "__clang_call_terminate", llvm::AttributeList(), /*Local=*/true);
4578   llvm::Function *fn =
4579       cast<llvm::Function>(fnRef.getCallee()->stripPointerCasts());
4580   if (fn->empty()) {
4581     fn->setDoesNotThrow();
4582     fn->setDoesNotReturn();
4583 
4584     // What we really want is to massively penalize inlining without
4585     // forbidding it completely.  The difference between that and
4586     // 'noinline' is negligible.
4587     fn->addFnAttr(llvm::Attribute::NoInline);
4588 
4589     // Allow this function to be shared across translation units, but
4590     // we don't want it to turn into an exported symbol.
4591     fn->setLinkage(llvm::Function::LinkOnceODRLinkage);
4592     fn->setVisibility(llvm::Function::HiddenVisibility);
4593     if (CGM.supportsCOMDAT())
4594       fn->setComdat(CGM.getModule().getOrInsertComdat(fn->getName()));
4595 
4596     // Set up the function.
4597     llvm::BasicBlock *entry =
4598         llvm::BasicBlock::Create(CGM.getLLVMContext(), "", fn);
4599     CGBuilderTy builder(CGM, entry);
4600 
4601     // Pull the exception pointer out of the parameter list.
4602     llvm::Value *exn = &*fn->arg_begin();
4603 
4604     // Call __cxa_begin_catch(exn).
4605     llvm::CallInst *catchCall = builder.CreateCall(getBeginCatchFn(CGM), exn);
4606     catchCall->setDoesNotThrow();
4607     catchCall->setCallingConv(CGM.getRuntimeCC());
4608 
4609     // Call std::terminate().
4610     llvm::CallInst *termCall = builder.CreateCall(CGM.getTerminateFn());
4611     termCall->setDoesNotThrow();
4612     termCall->setDoesNotReturn();
4613     termCall->setCallingConv(CGM.getRuntimeCC());
4614 
4615     // std::terminate cannot return.
4616     builder.CreateUnreachable();
4617   }
4618   return fnRef;
4619 }
4620 
4621 llvm::CallInst *
4622 ItaniumCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
4623                                                    llvm::Value *Exn) {
4624   // In C++, we want to call __cxa_begin_catch() before terminating.
4625   if (Exn) {
4626     assert(CGF.CGM.getLangOpts().CPlusPlus);
4627     return CGF.EmitNounwindRuntimeCall(getClangCallTerminateFn(CGF.CGM), Exn);
4628   }
4629   return CGF.EmitNounwindRuntimeCall(CGF.CGM.getTerminateFn());
4630 }
4631 
4632 std::pair<llvm::Value *, const CXXRecordDecl *>
4633 ItaniumCXXABI::LoadVTablePtr(CodeGenFunction &CGF, Address This,
4634                              const CXXRecordDecl *RD) {
4635   return {CGF.GetVTablePtr(This, CGM.Int8PtrTy, RD), RD};
4636 }
4637 
4638 void WebAssemblyCXXABI::emitBeginCatch(CodeGenFunction &CGF,
4639                                        const CXXCatchStmt *C) {
4640   if (CGF.getTarget().hasFeature("exception-handling"))
4641     CGF.EHStack.pushCleanup<CatchRetScope>(
4642         NormalCleanup, cast<llvm::CatchPadInst>(CGF.CurrentFuncletPad));
4643   ItaniumCXXABI::emitBeginCatch(CGF, C);
4644 }
4645 
4646 llvm::CallInst *
4647 WebAssemblyCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
4648                                                        llvm::Value *Exn) {
4649   // Itanium ABI calls __clang_call_terminate(), which __cxa_begin_catch() on
4650   // the violating exception to mark it handled, but it is currently hard to do
4651   // with wasm EH instruction structure with catch/catch_all, we just call
4652   // std::terminate and ignore the violating exception as in CGCXXABI.
4653   // TODO Consider code transformation that makes calling __clang_call_terminate
4654   // possible.
4655   return CGCXXABI::emitTerminateForUnexpectedException(CGF, Exn);
4656 }
4657 
4658 /// Register a global destructor as best as we know how.
4659 void XLCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
4660                                   llvm::FunctionCallee dtor,
4661                                   llvm::Constant *addr) {
4662   if (D.getTLSKind() != VarDecl::TLS_None)
4663     llvm::report_fatal_error("thread local storage not yet implemented on AIX");
4664 
4665   // Create __dtor function for the var decl.
4666   llvm::Function *dtorStub = CGF.createAtExitStub(D, dtor, addr);
4667 
4668   // Register above __dtor with atexit().
4669   CGF.registerGlobalDtorWithAtExit(dtorStub);
4670 
4671   // Emit __finalize function to unregister __dtor and (as appropriate) call
4672   // __dtor.
4673   emitCXXStermFinalizer(D, dtorStub, addr);
4674 }
4675 
4676 void XLCXXABI::emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
4677                                      llvm::Constant *addr) {
4678   llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false);
4679   SmallString<256> FnName;
4680   {
4681     llvm::raw_svector_ostream Out(FnName);
4682     getMangleContext().mangleDynamicStermFinalizer(&D, Out);
4683   }
4684 
4685   // Create the finalization action associated with a variable.
4686   const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
4687   llvm::Function *StermFinalizer = CGM.CreateGlobalInitOrCleanUpFunction(
4688       FTy, FnName.str(), FI, D.getLocation());
4689 
4690   CodeGenFunction CGF(CGM);
4691 
4692   CGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, StermFinalizer, FI,
4693                     FunctionArgList(), D.getLocation(),
4694                     D.getInit()->getExprLoc());
4695 
4696   // The unatexit subroutine unregisters __dtor functions that were previously
4697   // registered by the atexit subroutine. If the referenced function is found,
4698   // the unatexit returns a value of 0, meaning that the cleanup is still
4699   // pending (and we should call the __dtor function).
4700   llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtorStub);
4701 
4702   llvm::Value *NeedsDestruct = CGF.Builder.CreateIsNull(V, "needs_destruct");
4703 
4704   llvm::BasicBlock *DestructCallBlock = CGF.createBasicBlock("destruct.call");
4705   llvm::BasicBlock *EndBlock = CGF.createBasicBlock("destruct.end");
4706 
4707   // Check if unatexit returns a value of 0. If it does, jump to
4708   // DestructCallBlock, otherwise jump to EndBlock directly.
4709   CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock);
4710 
4711   CGF.EmitBlock(DestructCallBlock);
4712 
4713   // Emit the call to dtorStub.
4714   llvm::CallInst *CI = CGF.Builder.CreateCall(dtorStub);
4715 
4716   // Make sure the call and the callee agree on calling convention.
4717   CI->setCallingConv(dtorStub->getCallingConv());
4718 
4719   CGF.EmitBlock(EndBlock);
4720 
4721   CGF.FinishFunction();
4722 
4723   assert(!D.getAttr<InitPriorityAttr>() &&
4724          "Prioritized sinit and sterm functions are not yet supported.");
4725 
4726   if (isTemplateInstantiation(D.getTemplateSpecializationKind()) ||
4727       getContext().GetGVALinkageForVariable(&D) == GVA_DiscardableODR)
4728     // According to C++ [basic.start.init]p2, class template static data
4729     // members (i.e., implicitly or explicitly instantiated specializations)
4730     // have unordered initialization. As a consequence, we can put them into
4731     // their own llvm.global_dtors entry.
4732     CGM.AddCXXStermFinalizerToGlobalDtor(StermFinalizer, 65535);
4733   else
4734     CGM.AddCXXStermFinalizerEntry(StermFinalizer);
4735 }
4736