1 //===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This provides C++ code generation targeting the Itanium C++ ABI. The class 11 // in this file generates structures that follow the Itanium C++ ABI, which is 12 // documented at: 13 // http://www.codesourcery.com/public/cxx-abi/abi.html 14 // http://www.codesourcery.com/public/cxx-abi/abi-eh.html 15 // 16 // It also supports the closely-related ARM ABI, documented at: 17 // http://infocenter.arm.com/help/topic/com.arm.doc.ihi0041c/IHI0041C_cppabi.pdf 18 // 19 //===----------------------------------------------------------------------===// 20 21 #include "CGCXXABI.h" 22 #include "CGCleanup.h" 23 #include "CGRecordLayout.h" 24 #include "CGVTables.h" 25 #include "CodeGenFunction.h" 26 #include "CodeGenModule.h" 27 #include "ConstantBuilder.h" 28 #include "TargetInfo.h" 29 #include "clang/AST/Mangle.h" 30 #include "clang/AST/Type.h" 31 #include "clang/AST/StmtCXX.h" 32 #include "llvm/IR/CallSite.h" 33 #include "llvm/IR/DataLayout.h" 34 #include "llvm/IR/Instructions.h" 35 #include "llvm/IR/Intrinsics.h" 36 #include "llvm/IR/Value.h" 37 38 using namespace clang; 39 using namespace CodeGen; 40 41 namespace { 42 class ItaniumCXXABI : public CodeGen::CGCXXABI { 43 /// VTables - All the vtables which have been defined. 44 llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables; 45 46 protected: 47 bool UseARMMethodPtrABI; 48 bool UseARMGuardVarABI; 49 bool Use32BitVTableOffsetABI; 50 51 ItaniumMangleContext &getMangleContext() { 52 return cast<ItaniumMangleContext>(CodeGen::CGCXXABI::getMangleContext()); 53 } 54 55 public: 56 ItaniumCXXABI(CodeGen::CodeGenModule &CGM, 57 bool UseARMMethodPtrABI = false, 58 bool UseARMGuardVarABI = false) : 59 CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI), 60 UseARMGuardVarABI(UseARMGuardVarABI), 61 Use32BitVTableOffsetABI(false) { } 62 63 bool classifyReturnType(CGFunctionInfo &FI) const override; 64 65 RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override { 66 // Structures with either a non-trivial destructor or a non-trivial 67 // copy constructor are always indirect. 68 // FIXME: Use canCopyArgument() when it is fixed to handle lazily declared 69 // special members. 70 if (RD->hasNonTrivialDestructor() || RD->hasNonTrivialCopyConstructor()) 71 return RAA_Indirect; 72 return RAA_Default; 73 } 74 75 bool isThisCompleteObject(GlobalDecl GD) const override { 76 // The Itanium ABI has separate complete-object vs. base-object 77 // variants of both constructors and destructors. 78 if (isa<CXXDestructorDecl>(GD.getDecl())) { 79 switch (GD.getDtorType()) { 80 case Dtor_Complete: 81 case Dtor_Deleting: 82 return true; 83 84 case Dtor_Base: 85 return false; 86 87 case Dtor_Comdat: 88 llvm_unreachable("emitting dtor comdat as function?"); 89 } 90 llvm_unreachable("bad dtor kind"); 91 } 92 if (isa<CXXConstructorDecl>(GD.getDecl())) { 93 switch (GD.getCtorType()) { 94 case Ctor_Complete: 95 return true; 96 97 case Ctor_Base: 98 return false; 99 100 case Ctor_CopyingClosure: 101 case Ctor_DefaultClosure: 102 llvm_unreachable("closure ctors in Itanium ABI?"); 103 104 case Ctor_Comdat: 105 llvm_unreachable("emitting ctor comdat as function?"); 106 } 107 llvm_unreachable("bad dtor kind"); 108 } 109 110 // No other kinds. 111 return false; 112 } 113 114 bool isZeroInitializable(const MemberPointerType *MPT) override; 115 116 llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override; 117 118 CGCallee 119 EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF, 120 const Expr *E, 121 Address This, 122 llvm::Value *&ThisPtrForCall, 123 llvm::Value *MemFnPtr, 124 const MemberPointerType *MPT) override; 125 126 llvm::Value * 127 EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E, 128 Address Base, 129 llvm::Value *MemPtr, 130 const MemberPointerType *MPT) override; 131 132 llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF, 133 const CastExpr *E, 134 llvm::Value *Src) override; 135 llvm::Constant *EmitMemberPointerConversion(const CastExpr *E, 136 llvm::Constant *Src) override; 137 138 llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override; 139 140 llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override; 141 llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT, 142 CharUnits offset) override; 143 llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override; 144 llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD, 145 CharUnits ThisAdjustment); 146 147 llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF, 148 llvm::Value *L, llvm::Value *R, 149 const MemberPointerType *MPT, 150 bool Inequality) override; 151 152 llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF, 153 llvm::Value *Addr, 154 const MemberPointerType *MPT) override; 155 156 void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE, 157 Address Ptr, QualType ElementType, 158 const CXXDestructorDecl *Dtor) override; 159 160 CharUnits getAlignmentOfExnObject() { 161 unsigned Align = CGM.getContext().getTargetInfo().getExnObjectAlignment(); 162 return CGM.getContext().toCharUnitsFromBits(Align); 163 } 164 165 void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override; 166 void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override; 167 168 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override; 169 170 llvm::CallInst * 171 emitTerminateForUnexpectedException(CodeGenFunction &CGF, 172 llvm::Value *Exn) override; 173 174 void EmitFundamentalRTTIDescriptor(QualType Type, bool DLLExport); 175 void EmitFundamentalRTTIDescriptors(bool DLLExport); 176 llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override; 177 CatchTypeInfo 178 getAddrOfCXXCatchHandlerType(QualType Ty, 179 QualType CatchHandlerType) override { 180 return CatchTypeInfo{getAddrOfRTTIDescriptor(Ty), 0}; 181 } 182 183 bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override; 184 void EmitBadTypeidCall(CodeGenFunction &CGF) override; 185 llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy, 186 Address ThisPtr, 187 llvm::Type *StdTypeInfoPtrTy) override; 188 189 bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr, 190 QualType SrcRecordTy) override; 191 192 llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, Address Value, 193 QualType SrcRecordTy, QualType DestTy, 194 QualType DestRecordTy, 195 llvm::BasicBlock *CastEnd) override; 196 197 llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value, 198 QualType SrcRecordTy, 199 QualType DestTy) override; 200 201 bool EmitBadCastCall(CodeGenFunction &CGF) override; 202 203 llvm::Value * 204 GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This, 205 const CXXRecordDecl *ClassDecl, 206 const CXXRecordDecl *BaseClassDecl) override; 207 208 void EmitCXXConstructors(const CXXConstructorDecl *D) override; 209 210 void buildStructorSignature(const CXXMethodDecl *MD, StructorType T, 211 SmallVectorImpl<CanQualType> &ArgTys) override; 212 213 bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor, 214 CXXDtorType DT) const override { 215 // Itanium does not emit any destructor variant as an inline thunk. 216 // Delegating may occur as an optimization, but all variants are either 217 // emitted with external linkage or as linkonce if they are inline and used. 218 return false; 219 } 220 221 void EmitCXXDestructors(const CXXDestructorDecl *D) override; 222 223 void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy, 224 FunctionArgList &Params) override; 225 226 void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override; 227 228 unsigned addImplicitConstructorArgs(CodeGenFunction &CGF, 229 const CXXConstructorDecl *D, 230 CXXCtorType Type, bool ForVirtualBase, 231 bool Delegating, 232 CallArgList &Args) override; 233 234 void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD, 235 CXXDtorType Type, bool ForVirtualBase, 236 bool Delegating, Address This) override; 237 238 void emitVTableDefinitions(CodeGenVTables &CGVT, 239 const CXXRecordDecl *RD) override; 240 241 bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF, 242 CodeGenFunction::VPtr Vptr) override; 243 244 bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override { 245 return true; 246 } 247 248 llvm::Constant * 249 getVTableAddressPoint(BaseSubobject Base, 250 const CXXRecordDecl *VTableClass) override; 251 252 llvm::Value *getVTableAddressPointInStructor( 253 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, 254 BaseSubobject Base, const CXXRecordDecl *NearestVBase) override; 255 256 llvm::Value *getVTableAddressPointInStructorWithVTT( 257 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, 258 BaseSubobject Base, const CXXRecordDecl *NearestVBase); 259 260 llvm::Constant * 261 getVTableAddressPointForConstExpr(BaseSubobject Base, 262 const CXXRecordDecl *VTableClass) override; 263 264 llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD, 265 CharUnits VPtrOffset) override; 266 267 CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD, 268 Address This, llvm::Type *Ty, 269 SourceLocation Loc) override; 270 271 llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF, 272 const CXXDestructorDecl *Dtor, 273 CXXDtorType DtorType, 274 Address This, 275 const CXXMemberCallExpr *CE) override; 276 277 void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override; 278 279 bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override; 280 281 void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD, 282 bool ReturnAdjustment) override { 283 // Allow inlining of thunks by emitting them with available_externally 284 // linkage together with vtables when needed. 285 if (ForVTable && !Thunk->hasLocalLinkage()) 286 Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage); 287 } 288 289 llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This, 290 const ThisAdjustment &TA) override; 291 292 llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret, 293 const ReturnAdjustment &RA) override; 294 295 size_t getSrcArgforCopyCtor(const CXXConstructorDecl *, 296 FunctionArgList &Args) const override { 297 assert(!Args.empty() && "expected the arglist to not be empty!"); 298 return Args.size() - 1; 299 } 300 301 StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; } 302 StringRef GetDeletedVirtualCallName() override 303 { return "__cxa_deleted_virtual"; } 304 305 CharUnits getArrayCookieSizeImpl(QualType elementType) override; 306 Address InitializeArrayCookie(CodeGenFunction &CGF, 307 Address NewPtr, 308 llvm::Value *NumElements, 309 const CXXNewExpr *expr, 310 QualType ElementType) override; 311 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, 312 Address allocPtr, 313 CharUnits cookieSize) override; 314 315 void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D, 316 llvm::GlobalVariable *DeclPtr, 317 bool PerformInit) override; 318 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D, 319 llvm::Constant *dtor, llvm::Constant *addr) override; 320 321 llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD, 322 llvm::Value *Val); 323 void EmitThreadLocalInitFuncs( 324 CodeGenModule &CGM, 325 ArrayRef<const VarDecl *> CXXThreadLocals, 326 ArrayRef<llvm::Function *> CXXThreadLocalInits, 327 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override; 328 329 bool usesThreadWrapperFunction() const override { return true; } 330 LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, 331 QualType LValType) override; 332 333 bool NeedsVTTParameter(GlobalDecl GD) override; 334 335 /**************************** RTTI Uniqueness ******************************/ 336 337 protected: 338 /// Returns true if the ABI requires RTTI type_info objects to be unique 339 /// across a program. 340 virtual bool shouldRTTIBeUnique() const { return true; } 341 342 public: 343 /// What sort of unique-RTTI behavior should we use? 344 enum RTTIUniquenessKind { 345 /// We are guaranteeing, or need to guarantee, that the RTTI string 346 /// is unique. 347 RUK_Unique, 348 349 /// We are not guaranteeing uniqueness for the RTTI string, so we 350 /// can demote to hidden visibility but must use string comparisons. 351 RUK_NonUniqueHidden, 352 353 /// We are not guaranteeing uniqueness for the RTTI string, so we 354 /// have to use string comparisons, but we also have to emit it with 355 /// non-hidden visibility. 356 RUK_NonUniqueVisible 357 }; 358 359 /// Return the required visibility status for the given type and linkage in 360 /// the current ABI. 361 RTTIUniquenessKind 362 classifyRTTIUniqueness(QualType CanTy, 363 llvm::GlobalValue::LinkageTypes Linkage) const; 364 friend class ItaniumRTTIBuilder; 365 366 void emitCXXStructor(const CXXMethodDecl *MD, StructorType Type) override; 367 368 private: 369 bool hasAnyVirtualInlineFunction(const CXXRecordDecl *RD) const { 370 const auto &VtableLayout = 371 CGM.getItaniumVTableContext().getVTableLayout(RD); 372 373 for (const auto &VtableComponent : VtableLayout.vtable_components()) { 374 // Skip empty slot. 375 if (!VtableComponent.isUsedFunctionPointerKind()) 376 continue; 377 378 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl(); 379 if (Method->getCanonicalDecl()->isInlined()) 380 return true; 381 } 382 return false; 383 } 384 385 bool isVTableHidden(const CXXRecordDecl *RD) const { 386 const auto &VtableLayout = 387 CGM.getItaniumVTableContext().getVTableLayout(RD); 388 389 for (const auto &VtableComponent : VtableLayout.vtable_components()) { 390 if (VtableComponent.isRTTIKind()) { 391 const CXXRecordDecl *RTTIDecl = VtableComponent.getRTTIDecl(); 392 if (RTTIDecl->getVisibility() == Visibility::HiddenVisibility) 393 return true; 394 } else if (VtableComponent.isUsedFunctionPointerKind()) { 395 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl(); 396 if (Method->getVisibility() == Visibility::HiddenVisibility && 397 !Method->isDefined()) 398 return true; 399 } 400 } 401 return false; 402 } 403 }; 404 405 class ARMCXXABI : public ItaniumCXXABI { 406 public: 407 ARMCXXABI(CodeGen::CodeGenModule &CGM) : 408 ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true, 409 /* UseARMGuardVarABI = */ true) {} 410 411 bool HasThisReturn(GlobalDecl GD) const override { 412 return (isa<CXXConstructorDecl>(GD.getDecl()) || ( 413 isa<CXXDestructorDecl>(GD.getDecl()) && 414 GD.getDtorType() != Dtor_Deleting)); 415 } 416 417 void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV, 418 QualType ResTy) override; 419 420 CharUnits getArrayCookieSizeImpl(QualType elementType) override; 421 Address InitializeArrayCookie(CodeGenFunction &CGF, 422 Address NewPtr, 423 llvm::Value *NumElements, 424 const CXXNewExpr *expr, 425 QualType ElementType) override; 426 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, Address allocPtr, 427 CharUnits cookieSize) override; 428 }; 429 430 class iOS64CXXABI : public ARMCXXABI { 431 public: 432 iOS64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) { 433 Use32BitVTableOffsetABI = true; 434 } 435 436 // ARM64 libraries are prepared for non-unique RTTI. 437 bool shouldRTTIBeUnique() const override { return false; } 438 }; 439 440 class WebAssemblyCXXABI final : public ItaniumCXXABI { 441 public: 442 explicit WebAssemblyCXXABI(CodeGen::CodeGenModule &CGM) 443 : ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true, 444 /*UseARMGuardVarABI=*/true) {} 445 446 private: 447 bool HasThisReturn(GlobalDecl GD) const override { 448 return isa<CXXConstructorDecl>(GD.getDecl()) || 449 (isa<CXXDestructorDecl>(GD.getDecl()) && 450 GD.getDtorType() != Dtor_Deleting); 451 } 452 bool canCallMismatchedFunctionType() const override { return false; } 453 }; 454 } 455 456 CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) { 457 switch (CGM.getTarget().getCXXABI().getKind()) { 458 // For IR-generation purposes, there's no significant difference 459 // between the ARM and iOS ABIs. 460 case TargetCXXABI::GenericARM: 461 case TargetCXXABI::iOS: 462 case TargetCXXABI::WatchOS: 463 return new ARMCXXABI(CGM); 464 465 case TargetCXXABI::iOS64: 466 return new iOS64CXXABI(CGM); 467 468 // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't 469 // include the other 32-bit ARM oddities: constructor/destructor return values 470 // and array cookies. 471 case TargetCXXABI::GenericAArch64: 472 return new ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true, 473 /* UseARMGuardVarABI = */ true); 474 475 case TargetCXXABI::GenericMIPS: 476 return new ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true); 477 478 case TargetCXXABI::WebAssembly: 479 return new WebAssemblyCXXABI(CGM); 480 481 case TargetCXXABI::GenericItanium: 482 if (CGM.getContext().getTargetInfo().getTriple().getArch() 483 == llvm::Triple::le32) { 484 // For PNaCl, use ARM-style method pointers so that PNaCl code 485 // does not assume anything about the alignment of function 486 // pointers. 487 return new ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true, 488 /* UseARMGuardVarABI = */ false); 489 } 490 return new ItaniumCXXABI(CGM); 491 492 case TargetCXXABI::Microsoft: 493 llvm_unreachable("Microsoft ABI is not Itanium-based"); 494 } 495 llvm_unreachable("bad ABI kind"); 496 } 497 498 llvm::Type * 499 ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) { 500 if (MPT->isMemberDataPointer()) 501 return CGM.PtrDiffTy; 502 return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy, nullptr); 503 } 504 505 /// In the Itanium and ARM ABIs, method pointers have the form: 506 /// struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr; 507 /// 508 /// In the Itanium ABI: 509 /// - method pointers are virtual if (memptr.ptr & 1) is nonzero 510 /// - the this-adjustment is (memptr.adj) 511 /// - the virtual offset is (memptr.ptr - 1) 512 /// 513 /// In the ARM ABI: 514 /// - method pointers are virtual if (memptr.adj & 1) is nonzero 515 /// - the this-adjustment is (memptr.adj >> 1) 516 /// - the virtual offset is (memptr.ptr) 517 /// ARM uses 'adj' for the virtual flag because Thumb functions 518 /// may be only single-byte aligned. 519 /// 520 /// If the member is virtual, the adjusted 'this' pointer points 521 /// to a vtable pointer from which the virtual offset is applied. 522 /// 523 /// If the member is non-virtual, memptr.ptr is the address of 524 /// the function to call. 525 CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer( 526 CodeGenFunction &CGF, const Expr *E, Address ThisAddr, 527 llvm::Value *&ThisPtrForCall, 528 llvm::Value *MemFnPtr, const MemberPointerType *MPT) { 529 CGBuilderTy &Builder = CGF.Builder; 530 531 const FunctionProtoType *FPT = 532 MPT->getPointeeType()->getAs<FunctionProtoType>(); 533 const CXXRecordDecl *RD = 534 cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl()); 535 536 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType( 537 CGM.getTypes().arrangeCXXMethodType(RD, FPT, /*FD=*/nullptr)); 538 539 llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1); 540 541 llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual"); 542 llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual"); 543 llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end"); 544 545 // Extract memptr.adj, which is in the second field. 546 llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj"); 547 548 // Compute the true adjustment. 549 llvm::Value *Adj = RawAdj; 550 if (UseARMMethodPtrABI) 551 Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted"); 552 553 // Apply the adjustment and cast back to the original struct type 554 // for consistency. 555 llvm::Value *This = ThisAddr.getPointer(); 556 llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy()); 557 Ptr = Builder.CreateInBoundsGEP(Ptr, Adj); 558 This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted"); 559 ThisPtrForCall = This; 560 561 // Load the function pointer. 562 llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr"); 563 564 // If the LSB in the function pointer is 1, the function pointer points to 565 // a virtual function. 566 llvm::Value *IsVirtual; 567 if (UseARMMethodPtrABI) 568 IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1); 569 else 570 IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1); 571 IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual"); 572 Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual); 573 574 // In the virtual path, the adjustment left 'This' pointing to the 575 // vtable of the correct base subobject. The "function pointer" is an 576 // offset within the vtable (+1 for the virtual flag on non-ARM). 577 CGF.EmitBlock(FnVirtual); 578 579 // Cast the adjusted this to a pointer to vtable pointer and load. 580 llvm::Type *VTableTy = Builder.getInt8PtrTy(); 581 CharUnits VTablePtrAlign = 582 CGF.CGM.getDynamicOffsetAlignment(ThisAddr.getAlignment(), RD, 583 CGF.getPointerAlign()); 584 llvm::Value *VTable = 585 CGF.GetVTablePtr(Address(This, VTablePtrAlign), VTableTy, RD); 586 587 // Apply the offset. 588 // On ARM64, to reserve extra space in virtual member function pointers, 589 // we only pay attention to the low 32 bits of the offset. 590 llvm::Value *VTableOffset = FnAsInt; 591 if (!UseARMMethodPtrABI) 592 VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1); 593 if (Use32BitVTableOffsetABI) { 594 VTableOffset = Builder.CreateTrunc(VTableOffset, CGF.Int32Ty); 595 VTableOffset = Builder.CreateZExt(VTableOffset, CGM.PtrDiffTy); 596 } 597 VTable = Builder.CreateGEP(VTable, VTableOffset); 598 599 // Load the virtual function to call. 600 VTable = Builder.CreateBitCast(VTable, FTy->getPointerTo()->getPointerTo()); 601 llvm::Value *VirtualFn = 602 Builder.CreateAlignedLoad(VTable, CGF.getPointerAlign(), 603 "memptr.virtualfn"); 604 CGF.EmitBranch(FnEnd); 605 606 // In the non-virtual path, the function pointer is actually a 607 // function pointer. 608 CGF.EmitBlock(FnNonVirtual); 609 llvm::Value *NonVirtualFn = 610 Builder.CreateIntToPtr(FnAsInt, FTy->getPointerTo(), "memptr.nonvirtualfn"); 611 612 // We're done. 613 CGF.EmitBlock(FnEnd); 614 llvm::PHINode *CalleePtr = Builder.CreatePHI(FTy->getPointerTo(), 2); 615 CalleePtr->addIncoming(VirtualFn, FnVirtual); 616 CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual); 617 618 CGCallee Callee(FPT, CalleePtr); 619 return Callee; 620 } 621 622 /// Compute an l-value by applying the given pointer-to-member to a 623 /// base object. 624 llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress( 625 CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr, 626 const MemberPointerType *MPT) { 627 assert(MemPtr->getType() == CGM.PtrDiffTy); 628 629 CGBuilderTy &Builder = CGF.Builder; 630 631 // Cast to char*. 632 Base = Builder.CreateElementBitCast(Base, CGF.Int8Ty); 633 634 // Apply the offset, which we assume is non-null. 635 llvm::Value *Addr = 636 Builder.CreateInBoundsGEP(Base.getPointer(), MemPtr, "memptr.offset"); 637 638 // Cast the address to the appropriate pointer type, adopting the 639 // address space of the base pointer. 640 llvm::Type *PType = CGF.ConvertTypeForMem(MPT->getPointeeType()) 641 ->getPointerTo(Base.getAddressSpace()); 642 return Builder.CreateBitCast(Addr, PType); 643 } 644 645 /// Perform a bitcast, derived-to-base, or base-to-derived member pointer 646 /// conversion. 647 /// 648 /// Bitcast conversions are always a no-op under Itanium. 649 /// 650 /// Obligatory offset/adjustment diagram: 651 /// <-- offset --> <-- adjustment --> 652 /// |--------------------------|----------------------|--------------------| 653 /// ^Derived address point ^Base address point ^Member address point 654 /// 655 /// So when converting a base member pointer to a derived member pointer, 656 /// we add the offset to the adjustment because the address point has 657 /// decreased; and conversely, when converting a derived MP to a base MP 658 /// we subtract the offset from the adjustment because the address point 659 /// has increased. 660 /// 661 /// The standard forbids (at compile time) conversion to and from 662 /// virtual bases, which is why we don't have to consider them here. 663 /// 664 /// The standard forbids (at run time) casting a derived MP to a base 665 /// MP when the derived MP does not point to a member of the base. 666 /// This is why -1 is a reasonable choice for null data member 667 /// pointers. 668 llvm::Value * 669 ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF, 670 const CastExpr *E, 671 llvm::Value *src) { 672 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer || 673 E->getCastKind() == CK_BaseToDerivedMemberPointer || 674 E->getCastKind() == CK_ReinterpretMemberPointer); 675 676 // Under Itanium, reinterprets don't require any additional processing. 677 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src; 678 679 // Use constant emission if we can. 680 if (isa<llvm::Constant>(src)) 681 return EmitMemberPointerConversion(E, cast<llvm::Constant>(src)); 682 683 llvm::Constant *adj = getMemberPointerAdjustment(E); 684 if (!adj) return src; 685 686 CGBuilderTy &Builder = CGF.Builder; 687 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer); 688 689 const MemberPointerType *destTy = 690 E->getType()->castAs<MemberPointerType>(); 691 692 // For member data pointers, this is just a matter of adding the 693 // offset if the source is non-null. 694 if (destTy->isMemberDataPointer()) { 695 llvm::Value *dst; 696 if (isDerivedToBase) 697 dst = Builder.CreateNSWSub(src, adj, "adj"); 698 else 699 dst = Builder.CreateNSWAdd(src, adj, "adj"); 700 701 // Null check. 702 llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType()); 703 llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull"); 704 return Builder.CreateSelect(isNull, src, dst); 705 } 706 707 // The this-adjustment is left-shifted by 1 on ARM. 708 if (UseARMMethodPtrABI) { 709 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue(); 710 offset <<= 1; 711 adj = llvm::ConstantInt::get(adj->getType(), offset); 712 } 713 714 llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj"); 715 llvm::Value *dstAdj; 716 if (isDerivedToBase) 717 dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj"); 718 else 719 dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj"); 720 721 return Builder.CreateInsertValue(src, dstAdj, 1); 722 } 723 724 llvm::Constant * 725 ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E, 726 llvm::Constant *src) { 727 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer || 728 E->getCastKind() == CK_BaseToDerivedMemberPointer || 729 E->getCastKind() == CK_ReinterpretMemberPointer); 730 731 // Under Itanium, reinterprets don't require any additional processing. 732 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src; 733 734 // If the adjustment is trivial, we don't need to do anything. 735 llvm::Constant *adj = getMemberPointerAdjustment(E); 736 if (!adj) return src; 737 738 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer); 739 740 const MemberPointerType *destTy = 741 E->getType()->castAs<MemberPointerType>(); 742 743 // For member data pointers, this is just a matter of adding the 744 // offset if the source is non-null. 745 if (destTy->isMemberDataPointer()) { 746 // null maps to null. 747 if (src->isAllOnesValue()) return src; 748 749 if (isDerivedToBase) 750 return llvm::ConstantExpr::getNSWSub(src, adj); 751 else 752 return llvm::ConstantExpr::getNSWAdd(src, adj); 753 } 754 755 // The this-adjustment is left-shifted by 1 on ARM. 756 if (UseARMMethodPtrABI) { 757 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue(); 758 offset <<= 1; 759 adj = llvm::ConstantInt::get(adj->getType(), offset); 760 } 761 762 llvm::Constant *srcAdj = llvm::ConstantExpr::getExtractValue(src, 1); 763 llvm::Constant *dstAdj; 764 if (isDerivedToBase) 765 dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj); 766 else 767 dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj); 768 769 return llvm::ConstantExpr::getInsertValue(src, dstAdj, 1); 770 } 771 772 llvm::Constant * 773 ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) { 774 // Itanium C++ ABI 2.3: 775 // A NULL pointer is represented as -1. 776 if (MPT->isMemberDataPointer()) 777 return llvm::ConstantInt::get(CGM.PtrDiffTy, -1ULL, /*isSigned=*/true); 778 779 llvm::Constant *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0); 780 llvm::Constant *Values[2] = { Zero, Zero }; 781 return llvm::ConstantStruct::getAnon(Values); 782 } 783 784 llvm::Constant * 785 ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT, 786 CharUnits offset) { 787 // Itanium C++ ABI 2.3: 788 // A pointer to data member is an offset from the base address of 789 // the class object containing it, represented as a ptrdiff_t 790 return llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity()); 791 } 792 793 llvm::Constant * 794 ItaniumCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) { 795 return BuildMemberPointer(MD, CharUnits::Zero()); 796 } 797 798 llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD, 799 CharUnits ThisAdjustment) { 800 assert(MD->isInstance() && "Member function must not be static!"); 801 MD = MD->getCanonicalDecl(); 802 803 CodeGenTypes &Types = CGM.getTypes(); 804 805 // Get the function pointer (or index if this is a virtual function). 806 llvm::Constant *MemPtr[2]; 807 if (MD->isVirtual()) { 808 uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(MD); 809 810 const ASTContext &Context = getContext(); 811 CharUnits PointerWidth = 812 Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0)); 813 uint64_t VTableOffset = (Index * PointerWidth.getQuantity()); 814 815 if (UseARMMethodPtrABI) { 816 // ARM C++ ABI 3.2.1: 817 // This ABI specifies that adj contains twice the this 818 // adjustment, plus 1 if the member function is virtual. The 819 // least significant bit of adj then makes exactly the same 820 // discrimination as the least significant bit of ptr does for 821 // Itanium. 822 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset); 823 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy, 824 2 * ThisAdjustment.getQuantity() + 1); 825 } else { 826 // Itanium C++ ABI 2.3: 827 // For a virtual function, [the pointer field] is 1 plus the 828 // virtual table offset (in bytes) of the function, 829 // represented as a ptrdiff_t. 830 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset + 1); 831 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy, 832 ThisAdjustment.getQuantity()); 833 } 834 } else { 835 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>(); 836 llvm::Type *Ty; 837 // Check whether the function has a computable LLVM signature. 838 if (Types.isFuncTypeConvertible(FPT)) { 839 // The function has a computable LLVM signature; use the correct type. 840 Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD)); 841 } else { 842 // Use an arbitrary non-function type to tell GetAddrOfFunction that the 843 // function type is incomplete. 844 Ty = CGM.PtrDiffTy; 845 } 846 llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty); 847 848 MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy); 849 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy, 850 (UseARMMethodPtrABI ? 2 : 1) * 851 ThisAdjustment.getQuantity()); 852 } 853 854 return llvm::ConstantStruct::getAnon(MemPtr); 855 } 856 857 llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP, 858 QualType MPType) { 859 const MemberPointerType *MPT = MPType->castAs<MemberPointerType>(); 860 const ValueDecl *MPD = MP.getMemberPointerDecl(); 861 if (!MPD) 862 return EmitNullMemberPointer(MPT); 863 864 CharUnits ThisAdjustment = getMemberPointerPathAdjustment(MP); 865 866 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD)) 867 return BuildMemberPointer(MD, ThisAdjustment); 868 869 CharUnits FieldOffset = 870 getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD)); 871 return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset); 872 } 873 874 /// The comparison algorithm is pretty easy: the member pointers are 875 /// the same if they're either bitwise identical *or* both null. 876 /// 877 /// ARM is different here only because null-ness is more complicated. 878 llvm::Value * 879 ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF, 880 llvm::Value *L, 881 llvm::Value *R, 882 const MemberPointerType *MPT, 883 bool Inequality) { 884 CGBuilderTy &Builder = CGF.Builder; 885 886 llvm::ICmpInst::Predicate Eq; 887 llvm::Instruction::BinaryOps And, Or; 888 if (Inequality) { 889 Eq = llvm::ICmpInst::ICMP_NE; 890 And = llvm::Instruction::Or; 891 Or = llvm::Instruction::And; 892 } else { 893 Eq = llvm::ICmpInst::ICMP_EQ; 894 And = llvm::Instruction::And; 895 Or = llvm::Instruction::Or; 896 } 897 898 // Member data pointers are easy because there's a unique null 899 // value, so it just comes down to bitwise equality. 900 if (MPT->isMemberDataPointer()) 901 return Builder.CreateICmp(Eq, L, R); 902 903 // For member function pointers, the tautologies are more complex. 904 // The Itanium tautology is: 905 // (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj)) 906 // The ARM tautology is: 907 // (L == R) <==> (L.ptr == R.ptr && 908 // (L.adj == R.adj || 909 // (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0))) 910 // The inequality tautologies have exactly the same structure, except 911 // applying De Morgan's laws. 912 913 llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr"); 914 llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr"); 915 916 // This condition tests whether L.ptr == R.ptr. This must always be 917 // true for equality to hold. 918 llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr"); 919 920 // This condition, together with the assumption that L.ptr == R.ptr, 921 // tests whether the pointers are both null. ARM imposes an extra 922 // condition. 923 llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType()); 924 llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null"); 925 926 // This condition tests whether L.adj == R.adj. If this isn't 927 // true, the pointers are unequal unless they're both null. 928 llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj"); 929 llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj"); 930 llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj"); 931 932 // Null member function pointers on ARM clear the low bit of Adj, 933 // so the zero condition has to check that neither low bit is set. 934 if (UseARMMethodPtrABI) { 935 llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1); 936 937 // Compute (l.adj | r.adj) & 1 and test it against zero. 938 llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj"); 939 llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One); 940 llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero, 941 "cmp.or.adj"); 942 EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero); 943 } 944 945 // Tie together all our conditions. 946 llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq); 947 Result = Builder.CreateBinOp(And, PtrEq, Result, 948 Inequality ? "memptr.ne" : "memptr.eq"); 949 return Result; 950 } 951 952 llvm::Value * 953 ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF, 954 llvm::Value *MemPtr, 955 const MemberPointerType *MPT) { 956 CGBuilderTy &Builder = CGF.Builder; 957 958 /// For member data pointers, this is just a check against -1. 959 if (MPT->isMemberDataPointer()) { 960 assert(MemPtr->getType() == CGM.PtrDiffTy); 961 llvm::Value *NegativeOne = 962 llvm::Constant::getAllOnesValue(MemPtr->getType()); 963 return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool"); 964 } 965 966 // In Itanium, a member function pointer is not null if 'ptr' is not null. 967 llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr"); 968 969 llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0); 970 llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool"); 971 972 // On ARM, a member function pointer is also non-null if the low bit of 'adj' 973 // (the virtual bit) is set. 974 if (UseARMMethodPtrABI) { 975 llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1); 976 llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj"); 977 llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit"); 978 llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero, 979 "memptr.isvirtual"); 980 Result = Builder.CreateOr(Result, IsVirtual); 981 } 982 983 return Result; 984 } 985 986 bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const { 987 const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl(); 988 if (!RD) 989 return false; 990 991 // Return indirectly if we have a non-trivial copy ctor or non-trivial dtor. 992 // FIXME: Use canCopyArgument() when it is fixed to handle lazily declared 993 // special members. 994 if (RD->hasNonTrivialDestructor() || RD->hasNonTrivialCopyConstructor()) { 995 auto Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType()); 996 FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false); 997 return true; 998 } 999 return false; 1000 } 1001 1002 /// The Itanium ABI requires non-zero initialization only for data 1003 /// member pointers, for which '0' is a valid offset. 1004 bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) { 1005 return MPT->isMemberFunctionPointer(); 1006 } 1007 1008 /// The Itanium ABI always places an offset to the complete object 1009 /// at entry -2 in the vtable. 1010 void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF, 1011 const CXXDeleteExpr *DE, 1012 Address Ptr, 1013 QualType ElementType, 1014 const CXXDestructorDecl *Dtor) { 1015 bool UseGlobalDelete = DE->isGlobalDelete(); 1016 if (UseGlobalDelete) { 1017 // Derive the complete-object pointer, which is what we need 1018 // to pass to the deallocation function. 1019 1020 // Grab the vtable pointer as an intptr_t*. 1021 auto *ClassDecl = 1022 cast<CXXRecordDecl>(ElementType->getAs<RecordType>()->getDecl()); 1023 llvm::Value *VTable = 1024 CGF.GetVTablePtr(Ptr, CGF.IntPtrTy->getPointerTo(), ClassDecl); 1025 1026 // Track back to entry -2 and pull out the offset there. 1027 llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64( 1028 VTable, -2, "complete-offset.ptr"); 1029 llvm::Value *Offset = 1030 CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign()); 1031 1032 // Apply the offset. 1033 llvm::Value *CompletePtr = 1034 CGF.Builder.CreateBitCast(Ptr.getPointer(), CGF.Int8PtrTy); 1035 CompletePtr = CGF.Builder.CreateInBoundsGEP(CompletePtr, Offset); 1036 1037 // If we're supposed to call the global delete, make sure we do so 1038 // even if the destructor throws. 1039 CGF.pushCallObjectDeleteCleanup(DE->getOperatorDelete(), CompletePtr, 1040 ElementType); 1041 } 1042 1043 // FIXME: Provide a source location here even though there's no 1044 // CXXMemberCallExpr for dtor call. 1045 CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting; 1046 EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, /*CE=*/nullptr); 1047 1048 if (UseGlobalDelete) 1049 CGF.PopCleanupBlock(); 1050 } 1051 1052 void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) { 1053 // void __cxa_rethrow(); 1054 1055 llvm::FunctionType *FTy = 1056 llvm::FunctionType::get(CGM.VoidTy, /*IsVarArgs=*/false); 1057 1058 llvm::Constant *Fn = CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow"); 1059 1060 if (isNoReturn) 1061 CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, None); 1062 else 1063 CGF.EmitRuntimeCallOrInvoke(Fn); 1064 } 1065 1066 static llvm::Constant *getAllocateExceptionFn(CodeGenModule &CGM) { 1067 // void *__cxa_allocate_exception(size_t thrown_size); 1068 1069 llvm::FunctionType *FTy = 1070 llvm::FunctionType::get(CGM.Int8PtrTy, CGM.SizeTy, /*IsVarArgs=*/false); 1071 1072 return CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception"); 1073 } 1074 1075 static llvm::Constant *getThrowFn(CodeGenModule &CGM) { 1076 // void __cxa_throw(void *thrown_exception, std::type_info *tinfo, 1077 // void (*dest) (void *)); 1078 1079 llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.Int8PtrTy, CGM.Int8PtrTy }; 1080 llvm::FunctionType *FTy = 1081 llvm::FunctionType::get(CGM.VoidTy, Args, /*IsVarArgs=*/false); 1082 1083 return CGM.CreateRuntimeFunction(FTy, "__cxa_throw"); 1084 } 1085 1086 void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) { 1087 QualType ThrowType = E->getSubExpr()->getType(); 1088 // Now allocate the exception object. 1089 llvm::Type *SizeTy = CGF.ConvertType(getContext().getSizeType()); 1090 uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity(); 1091 1092 llvm::Constant *AllocExceptionFn = getAllocateExceptionFn(CGM); 1093 llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall( 1094 AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception"); 1095 1096 CharUnits ExnAlign = getAlignmentOfExnObject(); 1097 CGF.EmitAnyExprToExn(E->getSubExpr(), Address(ExceptionPtr, ExnAlign)); 1098 1099 // Now throw the exception. 1100 llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType, 1101 /*ForEH=*/true); 1102 1103 // The address of the destructor. If the exception type has a 1104 // trivial destructor (or isn't a record), we just pass null. 1105 llvm::Constant *Dtor = nullptr; 1106 if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) { 1107 CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl()); 1108 if (!Record->hasTrivialDestructor()) { 1109 CXXDestructorDecl *DtorD = Record->getDestructor(); 1110 Dtor = CGM.getAddrOfCXXStructor(DtorD, StructorType::Complete); 1111 Dtor = llvm::ConstantExpr::getBitCast(Dtor, CGM.Int8PtrTy); 1112 } 1113 } 1114 if (!Dtor) Dtor = llvm::Constant::getNullValue(CGM.Int8PtrTy); 1115 1116 llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor }; 1117 CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(CGM), args); 1118 } 1119 1120 static llvm::Constant *getItaniumDynamicCastFn(CodeGenFunction &CGF) { 1121 // void *__dynamic_cast(const void *sub, 1122 // const abi::__class_type_info *src, 1123 // const abi::__class_type_info *dst, 1124 // std::ptrdiff_t src2dst_offset); 1125 1126 llvm::Type *Int8PtrTy = CGF.Int8PtrTy; 1127 llvm::Type *PtrDiffTy = 1128 CGF.ConvertType(CGF.getContext().getPointerDiffType()); 1129 1130 llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy }; 1131 1132 llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false); 1133 1134 // Mark the function as nounwind readonly. 1135 llvm::Attribute::AttrKind FuncAttrs[] = { llvm::Attribute::NoUnwind, 1136 llvm::Attribute::ReadOnly }; 1137 llvm::AttributeSet Attrs = llvm::AttributeSet::get( 1138 CGF.getLLVMContext(), llvm::AttributeSet::FunctionIndex, FuncAttrs); 1139 1140 return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs); 1141 } 1142 1143 static llvm::Constant *getBadCastFn(CodeGenFunction &CGF) { 1144 // void __cxa_bad_cast(); 1145 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false); 1146 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast"); 1147 } 1148 1149 /// \brief Compute the src2dst_offset hint as described in the 1150 /// Itanium C++ ABI [2.9.7] 1151 static CharUnits computeOffsetHint(ASTContext &Context, 1152 const CXXRecordDecl *Src, 1153 const CXXRecordDecl *Dst) { 1154 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true, 1155 /*DetectVirtual=*/false); 1156 1157 // If Dst is not derived from Src we can skip the whole computation below and 1158 // return that Src is not a public base of Dst. Record all inheritance paths. 1159 if (!Dst->isDerivedFrom(Src, Paths)) 1160 return CharUnits::fromQuantity(-2ULL); 1161 1162 unsigned NumPublicPaths = 0; 1163 CharUnits Offset; 1164 1165 // Now walk all possible inheritance paths. 1166 for (const CXXBasePath &Path : Paths) { 1167 if (Path.Access != AS_public) // Ignore non-public inheritance. 1168 continue; 1169 1170 ++NumPublicPaths; 1171 1172 for (const CXXBasePathElement &PathElement : Path) { 1173 // If the path contains a virtual base class we can't give any hint. 1174 // -1: no hint. 1175 if (PathElement.Base->isVirtual()) 1176 return CharUnits::fromQuantity(-1ULL); 1177 1178 if (NumPublicPaths > 1) // Won't use offsets, skip computation. 1179 continue; 1180 1181 // Accumulate the base class offsets. 1182 const ASTRecordLayout &L = Context.getASTRecordLayout(PathElement.Class); 1183 Offset += L.getBaseClassOffset( 1184 PathElement.Base->getType()->getAsCXXRecordDecl()); 1185 } 1186 } 1187 1188 // -2: Src is not a public base of Dst. 1189 if (NumPublicPaths == 0) 1190 return CharUnits::fromQuantity(-2ULL); 1191 1192 // -3: Src is a multiple public base type but never a virtual base type. 1193 if (NumPublicPaths > 1) 1194 return CharUnits::fromQuantity(-3ULL); 1195 1196 // Otherwise, the Src type is a unique public nonvirtual base type of Dst. 1197 // Return the offset of Src from the origin of Dst. 1198 return Offset; 1199 } 1200 1201 static llvm::Constant *getBadTypeidFn(CodeGenFunction &CGF) { 1202 // void __cxa_bad_typeid(); 1203 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false); 1204 1205 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid"); 1206 } 1207 1208 bool ItaniumCXXABI::shouldTypeidBeNullChecked(bool IsDeref, 1209 QualType SrcRecordTy) { 1210 return IsDeref; 1211 } 1212 1213 void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) { 1214 llvm::Value *Fn = getBadTypeidFn(CGF); 1215 CGF.EmitRuntimeCallOrInvoke(Fn).setDoesNotReturn(); 1216 CGF.Builder.CreateUnreachable(); 1217 } 1218 1219 llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF, 1220 QualType SrcRecordTy, 1221 Address ThisPtr, 1222 llvm::Type *StdTypeInfoPtrTy) { 1223 auto *ClassDecl = 1224 cast<CXXRecordDecl>(SrcRecordTy->getAs<RecordType>()->getDecl()); 1225 llvm::Value *Value = 1226 CGF.GetVTablePtr(ThisPtr, StdTypeInfoPtrTy->getPointerTo(), ClassDecl); 1227 1228 // Load the type info. 1229 Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL); 1230 return CGF.Builder.CreateAlignedLoad(Value, CGF.getPointerAlign()); 1231 } 1232 1233 bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr, 1234 QualType SrcRecordTy) { 1235 return SrcIsPtr; 1236 } 1237 1238 llvm::Value *ItaniumCXXABI::EmitDynamicCastCall( 1239 CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy, 1240 QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) { 1241 llvm::Type *PtrDiffLTy = 1242 CGF.ConvertType(CGF.getContext().getPointerDiffType()); 1243 llvm::Type *DestLTy = CGF.ConvertType(DestTy); 1244 1245 llvm::Value *SrcRTTI = 1246 CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType()); 1247 llvm::Value *DestRTTI = 1248 CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType()); 1249 1250 // Compute the offset hint. 1251 const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl(); 1252 const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl(); 1253 llvm::Value *OffsetHint = llvm::ConstantInt::get( 1254 PtrDiffLTy, 1255 computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity()); 1256 1257 // Emit the call to __dynamic_cast. 1258 llvm::Value *Value = ThisAddr.getPointer(); 1259 Value = CGF.EmitCastToVoidPtr(Value); 1260 1261 llvm::Value *args[] = {Value, SrcRTTI, DestRTTI, OffsetHint}; 1262 Value = CGF.EmitNounwindRuntimeCall(getItaniumDynamicCastFn(CGF), args); 1263 Value = CGF.Builder.CreateBitCast(Value, DestLTy); 1264 1265 /// C++ [expr.dynamic.cast]p9: 1266 /// A failed cast to reference type throws std::bad_cast 1267 if (DestTy->isReferenceType()) { 1268 llvm::BasicBlock *BadCastBlock = 1269 CGF.createBasicBlock("dynamic_cast.bad_cast"); 1270 1271 llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value); 1272 CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd); 1273 1274 CGF.EmitBlock(BadCastBlock); 1275 EmitBadCastCall(CGF); 1276 } 1277 1278 return Value; 1279 } 1280 1281 llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF, 1282 Address ThisAddr, 1283 QualType SrcRecordTy, 1284 QualType DestTy) { 1285 llvm::Type *PtrDiffLTy = 1286 CGF.ConvertType(CGF.getContext().getPointerDiffType()); 1287 llvm::Type *DestLTy = CGF.ConvertType(DestTy); 1288 1289 auto *ClassDecl = 1290 cast<CXXRecordDecl>(SrcRecordTy->getAs<RecordType>()->getDecl()); 1291 // Get the vtable pointer. 1292 llvm::Value *VTable = CGF.GetVTablePtr(ThisAddr, PtrDiffLTy->getPointerTo(), 1293 ClassDecl); 1294 1295 // Get the offset-to-top from the vtable. 1296 llvm::Value *OffsetToTop = 1297 CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL); 1298 OffsetToTop = 1299 CGF.Builder.CreateAlignedLoad(OffsetToTop, CGF.getPointerAlign(), 1300 "offset.to.top"); 1301 1302 // Finally, add the offset to the pointer. 1303 llvm::Value *Value = ThisAddr.getPointer(); 1304 Value = CGF.EmitCastToVoidPtr(Value); 1305 Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop); 1306 1307 return CGF.Builder.CreateBitCast(Value, DestLTy); 1308 } 1309 1310 bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) { 1311 llvm::Value *Fn = getBadCastFn(CGF); 1312 CGF.EmitRuntimeCallOrInvoke(Fn).setDoesNotReturn(); 1313 CGF.Builder.CreateUnreachable(); 1314 return true; 1315 } 1316 1317 llvm::Value * 1318 ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF, 1319 Address This, 1320 const CXXRecordDecl *ClassDecl, 1321 const CXXRecordDecl *BaseClassDecl) { 1322 llvm::Value *VTablePtr = CGF.GetVTablePtr(This, CGM.Int8PtrTy, ClassDecl); 1323 CharUnits VBaseOffsetOffset = 1324 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl, 1325 BaseClassDecl); 1326 1327 llvm::Value *VBaseOffsetPtr = 1328 CGF.Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetOffset.getQuantity(), 1329 "vbase.offset.ptr"); 1330 VBaseOffsetPtr = CGF.Builder.CreateBitCast(VBaseOffsetPtr, 1331 CGM.PtrDiffTy->getPointerTo()); 1332 1333 llvm::Value *VBaseOffset = 1334 CGF.Builder.CreateAlignedLoad(VBaseOffsetPtr, CGF.getPointerAlign(), 1335 "vbase.offset"); 1336 1337 return VBaseOffset; 1338 } 1339 1340 void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) { 1341 // Just make sure we're in sync with TargetCXXABI. 1342 assert(CGM.getTarget().getCXXABI().hasConstructorVariants()); 1343 1344 // The constructor used for constructing this as a base class; 1345 // ignores virtual bases. 1346 CGM.EmitGlobal(GlobalDecl(D, Ctor_Base)); 1347 1348 // The constructor used for constructing this as a complete class; 1349 // constructs the virtual bases, then calls the base constructor. 1350 if (!D->getParent()->isAbstract()) { 1351 // We don't need to emit the complete ctor if the class is abstract. 1352 CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete)); 1353 } 1354 } 1355 1356 void 1357 ItaniumCXXABI::buildStructorSignature(const CXXMethodDecl *MD, StructorType T, 1358 SmallVectorImpl<CanQualType> &ArgTys) { 1359 ASTContext &Context = getContext(); 1360 1361 // All parameters are already in place except VTT, which goes after 'this'. 1362 // These are Clang types, so we don't need to worry about sret yet. 1363 1364 // Check if we need to add a VTT parameter (which has type void **). 1365 if (T == StructorType::Base && MD->getParent()->getNumVBases() != 0) 1366 ArgTys.insert(ArgTys.begin() + 1, 1367 Context.getPointerType(Context.VoidPtrTy)); 1368 } 1369 1370 void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) { 1371 // The destructor used for destructing this as a base class; ignores 1372 // virtual bases. 1373 CGM.EmitGlobal(GlobalDecl(D, Dtor_Base)); 1374 1375 // The destructor used for destructing this as a most-derived class; 1376 // call the base destructor and then destructs any virtual bases. 1377 CGM.EmitGlobal(GlobalDecl(D, Dtor_Complete)); 1378 1379 // The destructor in a virtual table is always a 'deleting' 1380 // destructor, which calls the complete destructor and then uses the 1381 // appropriate operator delete. 1382 if (D->isVirtual()) 1383 CGM.EmitGlobal(GlobalDecl(D, Dtor_Deleting)); 1384 } 1385 1386 void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF, 1387 QualType &ResTy, 1388 FunctionArgList &Params) { 1389 const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl()); 1390 assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)); 1391 1392 // Check if we need a VTT parameter as well. 1393 if (NeedsVTTParameter(CGF.CurGD)) { 1394 ASTContext &Context = getContext(); 1395 1396 // FIXME: avoid the fake decl 1397 QualType T = Context.getPointerType(Context.VoidPtrTy); 1398 ImplicitParamDecl *VTTDecl 1399 = ImplicitParamDecl::Create(Context, nullptr, MD->getLocation(), 1400 &Context.Idents.get("vtt"), T); 1401 Params.insert(Params.begin() + 1, VTTDecl); 1402 getStructorImplicitParamDecl(CGF) = VTTDecl; 1403 } 1404 } 1405 1406 void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) { 1407 // Naked functions have no prolog. 1408 if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr<NakedAttr>()) 1409 return; 1410 1411 /// Initialize the 'this' slot. 1412 EmitThisParam(CGF); 1413 1414 /// Initialize the 'vtt' slot if needed. 1415 if (getStructorImplicitParamDecl(CGF)) { 1416 getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad( 1417 CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)), "vtt"); 1418 } 1419 1420 /// If this is a function that the ABI specifies returns 'this', initialize 1421 /// the return slot to 'this' at the start of the function. 1422 /// 1423 /// Unlike the setting of return types, this is done within the ABI 1424 /// implementation instead of by clients of CGCXXABI because: 1425 /// 1) getThisValue is currently protected 1426 /// 2) in theory, an ABI could implement 'this' returns some other way; 1427 /// HasThisReturn only specifies a contract, not the implementation 1428 if (HasThisReturn(CGF.CurGD)) 1429 CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue); 1430 } 1431 1432 unsigned ItaniumCXXABI::addImplicitConstructorArgs( 1433 CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type, 1434 bool ForVirtualBase, bool Delegating, CallArgList &Args) { 1435 if (!NeedsVTTParameter(GlobalDecl(D, Type))) 1436 return 0; 1437 1438 // Insert the implicit 'vtt' argument as the second argument. 1439 llvm::Value *VTT = 1440 CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating); 1441 QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy); 1442 Args.insert(Args.begin() + 1, 1443 CallArg(RValue::get(VTT), VTTTy, /*needscopy=*/false)); 1444 return 1; // Added one arg. 1445 } 1446 1447 void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF, 1448 const CXXDestructorDecl *DD, 1449 CXXDtorType Type, bool ForVirtualBase, 1450 bool Delegating, Address This) { 1451 GlobalDecl GD(DD, Type); 1452 llvm::Value *VTT = CGF.GetVTTParameter(GD, ForVirtualBase, Delegating); 1453 QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy); 1454 1455 CGCallee Callee; 1456 if (getContext().getLangOpts().AppleKext && 1457 Type != Dtor_Base && DD->isVirtual()) 1458 Callee = CGF.BuildAppleKextVirtualDestructorCall(DD, Type, DD->getParent()); 1459 else 1460 Callee = 1461 CGCallee::forDirect(CGM.getAddrOfCXXStructor(DD, getFromDtorType(Type)), 1462 DD); 1463 1464 CGF.EmitCXXMemberOrOperatorCall(DD, Callee, ReturnValueSlot(), 1465 This.getPointer(), VTT, VTTTy, 1466 nullptr, nullptr); 1467 } 1468 1469 void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT, 1470 const CXXRecordDecl *RD) { 1471 llvm::GlobalVariable *VTable = getAddrOfVTable(RD, CharUnits()); 1472 if (VTable->hasInitializer()) 1473 return; 1474 1475 ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext(); 1476 const VTableLayout &VTLayout = VTContext.getVTableLayout(RD); 1477 llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD); 1478 llvm::Constant *RTTI = 1479 CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD)); 1480 1481 // Create and set the initializer. 1482 ConstantInitBuilder Builder(CGM); 1483 auto Components = Builder.beginStruct(); 1484 CGVT.createVTableInitializer(Components, VTLayout, RTTI); 1485 Components.finishAndSetAsInitializer(VTable); 1486 1487 // Set the correct linkage. 1488 VTable->setLinkage(Linkage); 1489 1490 if (CGM.supportsCOMDAT() && VTable->isWeakForLinker()) 1491 VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName())); 1492 1493 // Set the right visibility. 1494 CGM.setGlobalVisibility(VTable, RD); 1495 1496 // Use pointer alignment for the vtable. Otherwise we would align them based 1497 // on the size of the initializer which doesn't make sense as only single 1498 // values are read. 1499 unsigned PAlign = CGM.getTarget().getPointerAlign(0); 1500 VTable->setAlignment(getContext().toCharUnitsFromBits(PAlign).getQuantity()); 1501 1502 // If this is the magic class __cxxabiv1::__fundamental_type_info, 1503 // we will emit the typeinfo for the fundamental types. This is the 1504 // same behaviour as GCC. 1505 const DeclContext *DC = RD->getDeclContext(); 1506 if (RD->getIdentifier() && 1507 RD->getIdentifier()->isStr("__fundamental_type_info") && 1508 isa<NamespaceDecl>(DC) && cast<NamespaceDecl>(DC)->getIdentifier() && 1509 cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") && 1510 DC->getParent()->isTranslationUnit()) 1511 EmitFundamentalRTTIDescriptors(RD->hasAttr<DLLExportAttr>()); 1512 1513 if (!VTable->isDeclarationForLinker()) 1514 CGM.EmitVTableTypeMetadata(VTable, VTLayout); 1515 } 1516 1517 bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField( 1518 CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) { 1519 if (Vptr.NearestVBase == nullptr) 1520 return false; 1521 return NeedsVTTParameter(CGF.CurGD); 1522 } 1523 1524 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor( 1525 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base, 1526 const CXXRecordDecl *NearestVBase) { 1527 1528 if ((Base.getBase()->getNumVBases() || NearestVBase != nullptr) && 1529 NeedsVTTParameter(CGF.CurGD)) { 1530 return getVTableAddressPointInStructorWithVTT(CGF, VTableClass, Base, 1531 NearestVBase); 1532 } 1533 return getVTableAddressPoint(Base, VTableClass); 1534 } 1535 1536 llvm::Constant * 1537 ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base, 1538 const CXXRecordDecl *VTableClass) { 1539 llvm::GlobalValue *VTable = getAddrOfVTable(VTableClass, CharUnits()); 1540 1541 // Find the appropriate vtable within the vtable group, and the address point 1542 // within that vtable. 1543 VTableLayout::AddressPointLocation AddressPoint = 1544 CGM.getItaniumVTableContext() 1545 .getVTableLayout(VTableClass) 1546 .getAddressPoint(Base); 1547 llvm::Value *Indices[] = { 1548 llvm::ConstantInt::get(CGM.Int32Ty, 0), 1549 llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.VTableIndex), 1550 llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.AddressPointIndex), 1551 }; 1552 1553 return llvm::ConstantExpr::getGetElementPtr(VTable->getValueType(), VTable, 1554 Indices, /*InBounds=*/true, 1555 /*InRangeIndex=*/1); 1556 } 1557 1558 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT( 1559 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base, 1560 const CXXRecordDecl *NearestVBase) { 1561 assert((Base.getBase()->getNumVBases() || NearestVBase != nullptr) && 1562 NeedsVTTParameter(CGF.CurGD) && "This class doesn't have VTT"); 1563 1564 // Get the secondary vpointer index. 1565 uint64_t VirtualPointerIndex = 1566 CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base); 1567 1568 /// Load the VTT. 1569 llvm::Value *VTT = CGF.LoadCXXVTT(); 1570 if (VirtualPointerIndex) 1571 VTT = CGF.Builder.CreateConstInBoundsGEP1_64(VTT, VirtualPointerIndex); 1572 1573 // And load the address point from the VTT. 1574 return CGF.Builder.CreateAlignedLoad(VTT, CGF.getPointerAlign()); 1575 } 1576 1577 llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr( 1578 BaseSubobject Base, const CXXRecordDecl *VTableClass) { 1579 return getVTableAddressPoint(Base, VTableClass); 1580 } 1581 1582 llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD, 1583 CharUnits VPtrOffset) { 1584 assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets"); 1585 1586 llvm::GlobalVariable *&VTable = VTables[RD]; 1587 if (VTable) 1588 return VTable; 1589 1590 // Queue up this vtable for possible deferred emission. 1591 CGM.addDeferredVTable(RD); 1592 1593 SmallString<256> Name; 1594 llvm::raw_svector_ostream Out(Name); 1595 getMangleContext().mangleCXXVTable(RD, Out); 1596 1597 const VTableLayout &VTLayout = 1598 CGM.getItaniumVTableContext().getVTableLayout(RD); 1599 llvm::Type *VTableType = CGM.getVTables().getVTableType(VTLayout); 1600 1601 VTable = CGM.CreateOrReplaceCXXRuntimeVariable( 1602 Name, VTableType, llvm::GlobalValue::ExternalLinkage); 1603 VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); 1604 1605 if (RD->hasAttr<DLLImportAttr>()) 1606 VTable->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass); 1607 else if (RD->hasAttr<DLLExportAttr>()) 1608 VTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass); 1609 1610 return VTable; 1611 } 1612 1613 CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF, 1614 GlobalDecl GD, 1615 Address This, 1616 llvm::Type *Ty, 1617 SourceLocation Loc) { 1618 GD = GD.getCanonicalDecl(); 1619 Ty = Ty->getPointerTo()->getPointerTo(); 1620 auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl()); 1621 llvm::Value *VTable = CGF.GetVTablePtr(This, Ty, MethodDecl->getParent()); 1622 1623 uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD); 1624 llvm::Value *VFunc; 1625 if (CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) { 1626 VFunc = CGF.EmitVTableTypeCheckedLoad( 1627 MethodDecl->getParent(), VTable, 1628 VTableIndex * CGM.getContext().getTargetInfo().getPointerWidth(0) / 8); 1629 } else { 1630 CGF.EmitTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc); 1631 1632 llvm::Value *VFuncPtr = 1633 CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfn"); 1634 auto *VFuncLoad = 1635 CGF.Builder.CreateAlignedLoad(VFuncPtr, CGF.getPointerAlign()); 1636 1637 // Add !invariant.load md to virtual function load to indicate that 1638 // function didn't change inside vtable. 1639 // It's safe to add it without -fstrict-vtable-pointers, but it would not 1640 // help in devirtualization because it will only matter if we will have 2 1641 // the same virtual function loads from the same vtable load, which won't 1642 // happen without enabled devirtualization with -fstrict-vtable-pointers. 1643 if (CGM.getCodeGenOpts().OptimizationLevel > 0 && 1644 CGM.getCodeGenOpts().StrictVTablePointers) 1645 VFuncLoad->setMetadata( 1646 llvm::LLVMContext::MD_invariant_load, 1647 llvm::MDNode::get(CGM.getLLVMContext(), 1648 llvm::ArrayRef<llvm::Metadata *>())); 1649 VFunc = VFuncLoad; 1650 } 1651 1652 CGCallee Callee(MethodDecl, VFunc); 1653 return Callee; 1654 } 1655 1656 llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall( 1657 CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType, 1658 Address This, const CXXMemberCallExpr *CE) { 1659 assert(CE == nullptr || CE->arg_begin() == CE->arg_end()); 1660 assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete); 1661 1662 const CGFunctionInfo *FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration( 1663 Dtor, getFromDtorType(DtorType)); 1664 llvm::Type *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo); 1665 CGCallee Callee = 1666 getVirtualFunctionPointer(CGF, GlobalDecl(Dtor, DtorType), This, Ty, 1667 CE ? CE->getLocStart() : SourceLocation()); 1668 1669 CGF.EmitCXXMemberOrOperatorCall(Dtor, Callee, ReturnValueSlot(), 1670 This.getPointer(), /*ImplicitParam=*/nullptr, 1671 QualType(), CE, nullptr); 1672 return nullptr; 1673 } 1674 1675 void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) { 1676 CodeGenVTables &VTables = CGM.getVTables(); 1677 llvm::GlobalVariable *VTT = VTables.GetAddrOfVTT(RD); 1678 VTables.EmitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD); 1679 } 1680 1681 bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const { 1682 // We don't emit available_externally vtables if we are in -fapple-kext mode 1683 // because kext mode does not permit devirtualization. 1684 if (CGM.getLangOpts().AppleKext) 1685 return false; 1686 1687 // If we don't have any inline virtual functions, and if vtable is not hidden, 1688 // then we are safe to emit available_externally copy of vtable. 1689 // FIXME we can still emit a copy of the vtable if we 1690 // can emit definition of the inline functions. 1691 return !hasAnyVirtualInlineFunction(RD) && !isVTableHidden(RD); 1692 } 1693 static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF, 1694 Address InitialPtr, 1695 int64_t NonVirtualAdjustment, 1696 int64_t VirtualAdjustment, 1697 bool IsReturnAdjustment) { 1698 if (!NonVirtualAdjustment && !VirtualAdjustment) 1699 return InitialPtr.getPointer(); 1700 1701 Address V = CGF.Builder.CreateElementBitCast(InitialPtr, CGF.Int8Ty); 1702 1703 // In a base-to-derived cast, the non-virtual adjustment is applied first. 1704 if (NonVirtualAdjustment && !IsReturnAdjustment) { 1705 V = CGF.Builder.CreateConstInBoundsByteGEP(V, 1706 CharUnits::fromQuantity(NonVirtualAdjustment)); 1707 } 1708 1709 // Perform the virtual adjustment if we have one. 1710 llvm::Value *ResultPtr; 1711 if (VirtualAdjustment) { 1712 llvm::Type *PtrDiffTy = 1713 CGF.ConvertType(CGF.getContext().getPointerDiffType()); 1714 1715 Address VTablePtrPtr = CGF.Builder.CreateElementBitCast(V, CGF.Int8PtrTy); 1716 llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr); 1717 1718 llvm::Value *OffsetPtr = 1719 CGF.Builder.CreateConstInBoundsGEP1_64(VTablePtr, VirtualAdjustment); 1720 1721 OffsetPtr = CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo()); 1722 1723 // Load the adjustment offset from the vtable. 1724 llvm::Value *Offset = 1725 CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign()); 1726 1727 // Adjust our pointer. 1728 ResultPtr = CGF.Builder.CreateInBoundsGEP(V.getPointer(), Offset); 1729 } else { 1730 ResultPtr = V.getPointer(); 1731 } 1732 1733 // In a derived-to-base conversion, the non-virtual adjustment is 1734 // applied second. 1735 if (NonVirtualAdjustment && IsReturnAdjustment) { 1736 ResultPtr = CGF.Builder.CreateConstInBoundsGEP1_64(ResultPtr, 1737 NonVirtualAdjustment); 1738 } 1739 1740 // Cast back to the original type. 1741 return CGF.Builder.CreateBitCast(ResultPtr, InitialPtr.getType()); 1742 } 1743 1744 llvm::Value *ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF, 1745 Address This, 1746 const ThisAdjustment &TA) { 1747 return performTypeAdjustment(CGF, This, TA.NonVirtual, 1748 TA.Virtual.Itanium.VCallOffsetOffset, 1749 /*IsReturnAdjustment=*/false); 1750 } 1751 1752 llvm::Value * 1753 ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret, 1754 const ReturnAdjustment &RA) { 1755 return performTypeAdjustment(CGF, Ret, RA.NonVirtual, 1756 RA.Virtual.Itanium.VBaseOffsetOffset, 1757 /*IsReturnAdjustment=*/true); 1758 } 1759 1760 void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF, 1761 RValue RV, QualType ResultType) { 1762 if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl())) 1763 return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType); 1764 1765 // Destructor thunks in the ARM ABI have indeterminate results. 1766 llvm::Type *T = CGF.ReturnValue.getElementType(); 1767 RValue Undef = RValue::get(llvm::UndefValue::get(T)); 1768 return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType); 1769 } 1770 1771 /************************** Array allocation cookies **************************/ 1772 1773 CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) { 1774 // The array cookie is a size_t; pad that up to the element alignment. 1775 // The cookie is actually right-justified in that space. 1776 return std::max(CharUnits::fromQuantity(CGM.SizeSizeInBytes), 1777 CGM.getContext().getTypeAlignInChars(elementType)); 1778 } 1779 1780 Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF, 1781 Address NewPtr, 1782 llvm::Value *NumElements, 1783 const CXXNewExpr *expr, 1784 QualType ElementType) { 1785 assert(requiresArrayCookie(expr)); 1786 1787 unsigned AS = NewPtr.getAddressSpace(); 1788 1789 ASTContext &Ctx = getContext(); 1790 CharUnits SizeSize = CGF.getSizeSize(); 1791 1792 // The size of the cookie. 1793 CharUnits CookieSize = 1794 std::max(SizeSize, Ctx.getTypeAlignInChars(ElementType)); 1795 assert(CookieSize == getArrayCookieSizeImpl(ElementType)); 1796 1797 // Compute an offset to the cookie. 1798 Address CookiePtr = NewPtr; 1799 CharUnits CookieOffset = CookieSize - SizeSize; 1800 if (!CookieOffset.isZero()) 1801 CookiePtr = CGF.Builder.CreateConstInBoundsByteGEP(CookiePtr, CookieOffset); 1802 1803 // Write the number of elements into the appropriate slot. 1804 Address NumElementsPtr = 1805 CGF.Builder.CreateElementBitCast(CookiePtr, CGF.SizeTy); 1806 llvm::Instruction *SI = CGF.Builder.CreateStore(NumElements, NumElementsPtr); 1807 1808 // Handle the array cookie specially in ASan. 1809 if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) && AS == 0 && 1810 expr->getOperatorNew()->isReplaceableGlobalAllocationFunction()) { 1811 // The store to the CookiePtr does not need to be instrumented. 1812 CGM.getSanitizerMetadata()->disableSanitizerForInstruction(SI); 1813 llvm::FunctionType *FTy = 1814 llvm::FunctionType::get(CGM.VoidTy, NumElementsPtr.getType(), false); 1815 llvm::Constant *F = 1816 CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie"); 1817 CGF.Builder.CreateCall(F, NumElementsPtr.getPointer()); 1818 } 1819 1820 // Finally, compute a pointer to the actual data buffer by skipping 1821 // over the cookie completely. 1822 return CGF.Builder.CreateConstInBoundsByteGEP(NewPtr, CookieSize); 1823 } 1824 1825 llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF, 1826 Address allocPtr, 1827 CharUnits cookieSize) { 1828 // The element size is right-justified in the cookie. 1829 Address numElementsPtr = allocPtr; 1830 CharUnits numElementsOffset = cookieSize - CGF.getSizeSize(); 1831 if (!numElementsOffset.isZero()) 1832 numElementsPtr = 1833 CGF.Builder.CreateConstInBoundsByteGEP(numElementsPtr, numElementsOffset); 1834 1835 unsigned AS = allocPtr.getAddressSpace(); 1836 numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy); 1837 if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) || AS != 0) 1838 return CGF.Builder.CreateLoad(numElementsPtr); 1839 // In asan mode emit a function call instead of a regular load and let the 1840 // run-time deal with it: if the shadow is properly poisoned return the 1841 // cookie, otherwise return 0 to avoid an infinite loop calling DTORs. 1842 // We can't simply ignore this load using nosanitize metadata because 1843 // the metadata may be lost. 1844 llvm::FunctionType *FTy = 1845 llvm::FunctionType::get(CGF.SizeTy, CGF.SizeTy->getPointerTo(0), false); 1846 llvm::Constant *F = 1847 CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie"); 1848 return CGF.Builder.CreateCall(F, numElementsPtr.getPointer()); 1849 } 1850 1851 CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) { 1852 // ARM says that the cookie is always: 1853 // struct array_cookie { 1854 // std::size_t element_size; // element_size != 0 1855 // std::size_t element_count; 1856 // }; 1857 // But the base ABI doesn't give anything an alignment greater than 1858 // 8, so we can dismiss this as typical ABI-author blindness to 1859 // actual language complexity and round up to the element alignment. 1860 return std::max(CharUnits::fromQuantity(2 * CGM.SizeSizeInBytes), 1861 CGM.getContext().getTypeAlignInChars(elementType)); 1862 } 1863 1864 Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF, 1865 Address newPtr, 1866 llvm::Value *numElements, 1867 const CXXNewExpr *expr, 1868 QualType elementType) { 1869 assert(requiresArrayCookie(expr)); 1870 1871 // The cookie is always at the start of the buffer. 1872 Address cookie = newPtr; 1873 1874 // The first element is the element size. 1875 cookie = CGF.Builder.CreateElementBitCast(cookie, CGF.SizeTy); 1876 llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy, 1877 getContext().getTypeSizeInChars(elementType).getQuantity()); 1878 CGF.Builder.CreateStore(elementSize, cookie); 1879 1880 // The second element is the element count. 1881 cookie = CGF.Builder.CreateConstInBoundsGEP(cookie, 1, CGF.getSizeSize()); 1882 CGF.Builder.CreateStore(numElements, cookie); 1883 1884 // Finally, compute a pointer to the actual data buffer by skipping 1885 // over the cookie completely. 1886 CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType); 1887 return CGF.Builder.CreateConstInBoundsByteGEP(newPtr, cookieSize); 1888 } 1889 1890 llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF, 1891 Address allocPtr, 1892 CharUnits cookieSize) { 1893 // The number of elements is at offset sizeof(size_t) relative to 1894 // the allocated pointer. 1895 Address numElementsPtr 1896 = CGF.Builder.CreateConstInBoundsByteGEP(allocPtr, CGF.getSizeSize()); 1897 1898 numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy); 1899 return CGF.Builder.CreateLoad(numElementsPtr); 1900 } 1901 1902 /*********************** Static local initialization **************************/ 1903 1904 static llvm::Constant *getGuardAcquireFn(CodeGenModule &CGM, 1905 llvm::PointerType *GuardPtrTy) { 1906 // int __cxa_guard_acquire(__guard *guard_object); 1907 llvm::FunctionType *FTy = 1908 llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy), 1909 GuardPtrTy, /*isVarArg=*/false); 1910 return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_acquire", 1911 llvm::AttributeSet::get(CGM.getLLVMContext(), 1912 llvm::AttributeSet::FunctionIndex, 1913 llvm::Attribute::NoUnwind)); 1914 } 1915 1916 static llvm::Constant *getGuardReleaseFn(CodeGenModule &CGM, 1917 llvm::PointerType *GuardPtrTy) { 1918 // void __cxa_guard_release(__guard *guard_object); 1919 llvm::FunctionType *FTy = 1920 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false); 1921 return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_release", 1922 llvm::AttributeSet::get(CGM.getLLVMContext(), 1923 llvm::AttributeSet::FunctionIndex, 1924 llvm::Attribute::NoUnwind)); 1925 } 1926 1927 static llvm::Constant *getGuardAbortFn(CodeGenModule &CGM, 1928 llvm::PointerType *GuardPtrTy) { 1929 // void __cxa_guard_abort(__guard *guard_object); 1930 llvm::FunctionType *FTy = 1931 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false); 1932 return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_abort", 1933 llvm::AttributeSet::get(CGM.getLLVMContext(), 1934 llvm::AttributeSet::FunctionIndex, 1935 llvm::Attribute::NoUnwind)); 1936 } 1937 1938 namespace { 1939 struct CallGuardAbort final : EHScopeStack::Cleanup { 1940 llvm::GlobalVariable *Guard; 1941 CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {} 1942 1943 void Emit(CodeGenFunction &CGF, Flags flags) override { 1944 CGF.EmitNounwindRuntimeCall(getGuardAbortFn(CGF.CGM, Guard->getType()), 1945 Guard); 1946 } 1947 }; 1948 } 1949 1950 /// The ARM code here follows the Itanium code closely enough that we 1951 /// just special-case it at particular places. 1952 void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF, 1953 const VarDecl &D, 1954 llvm::GlobalVariable *var, 1955 bool shouldPerformInit) { 1956 CGBuilderTy &Builder = CGF.Builder; 1957 1958 // Inline variables that weren't instantiated from variable templates have 1959 // partially-ordered initialization within their translation unit. 1960 bool NonTemplateInline = 1961 D.isInline() && 1962 !isTemplateInstantiation(D.getTemplateSpecializationKind()); 1963 1964 // We only need to use thread-safe statics for local non-TLS variables and 1965 // inline variables; other global initialization is always single-threaded 1966 // or (through lazy dynamic loading in multiple threads) unsequenced. 1967 bool threadsafe = getContext().getLangOpts().ThreadsafeStatics && 1968 (D.isLocalVarDecl() || NonTemplateInline) && 1969 !D.getTLSKind(); 1970 1971 // If we have a global variable with internal linkage and thread-safe statics 1972 // are disabled, we can just let the guard variable be of type i8. 1973 bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage(); 1974 1975 llvm::IntegerType *guardTy; 1976 CharUnits guardAlignment; 1977 if (useInt8GuardVariable) { 1978 guardTy = CGF.Int8Ty; 1979 guardAlignment = CharUnits::One(); 1980 } else { 1981 // Guard variables are 64 bits in the generic ABI and size width on ARM 1982 // (i.e. 32-bit on AArch32, 64-bit on AArch64). 1983 if (UseARMGuardVarABI) { 1984 guardTy = CGF.SizeTy; 1985 guardAlignment = CGF.getSizeAlign(); 1986 } else { 1987 guardTy = CGF.Int64Ty; 1988 guardAlignment = CharUnits::fromQuantity( 1989 CGM.getDataLayout().getABITypeAlignment(guardTy)); 1990 } 1991 } 1992 llvm::PointerType *guardPtrTy = guardTy->getPointerTo(); 1993 1994 // Create the guard variable if we don't already have it (as we 1995 // might if we're double-emitting this function body). 1996 llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D); 1997 if (!guard) { 1998 // Mangle the name for the guard. 1999 SmallString<256> guardName; 2000 { 2001 llvm::raw_svector_ostream out(guardName); 2002 getMangleContext().mangleStaticGuardVariable(&D, out); 2003 } 2004 2005 // Create the guard variable with a zero-initializer. 2006 // Just absorb linkage and visibility from the guarded variable. 2007 guard = new llvm::GlobalVariable(CGM.getModule(), guardTy, 2008 false, var->getLinkage(), 2009 llvm::ConstantInt::get(guardTy, 0), 2010 guardName.str()); 2011 guard->setVisibility(var->getVisibility()); 2012 // If the variable is thread-local, so is its guard variable. 2013 guard->setThreadLocalMode(var->getThreadLocalMode()); 2014 guard->setAlignment(guardAlignment.getQuantity()); 2015 2016 // The ABI says: "It is suggested that it be emitted in the same COMDAT 2017 // group as the associated data object." In practice, this doesn't work for 2018 // non-ELF object formats, so only do it for ELF. 2019 llvm::Comdat *C = var->getComdat(); 2020 if (!D.isLocalVarDecl() && C && 2021 CGM.getTarget().getTriple().isOSBinFormatELF()) { 2022 guard->setComdat(C); 2023 // An inline variable's guard function is run from the per-TU 2024 // initialization function, not via a dedicated global ctor function, so 2025 // we can't put it in a comdat. 2026 if (!NonTemplateInline) 2027 CGF.CurFn->setComdat(C); 2028 } else if (CGM.supportsCOMDAT() && guard->isWeakForLinker()) { 2029 guard->setComdat(CGM.getModule().getOrInsertComdat(guard->getName())); 2030 } 2031 2032 CGM.setStaticLocalDeclGuardAddress(&D, guard); 2033 } 2034 2035 Address guardAddr = Address(guard, guardAlignment); 2036 2037 // Test whether the variable has completed initialization. 2038 // 2039 // Itanium C++ ABI 3.3.2: 2040 // The following is pseudo-code showing how these functions can be used: 2041 // if (obj_guard.first_byte == 0) { 2042 // if ( __cxa_guard_acquire (&obj_guard) ) { 2043 // try { 2044 // ... initialize the object ...; 2045 // } catch (...) { 2046 // __cxa_guard_abort (&obj_guard); 2047 // throw; 2048 // } 2049 // ... queue object destructor with __cxa_atexit() ...; 2050 // __cxa_guard_release (&obj_guard); 2051 // } 2052 // } 2053 2054 // Load the first byte of the guard variable. 2055 llvm::LoadInst *LI = 2056 Builder.CreateLoad(Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty)); 2057 2058 // Itanium ABI: 2059 // An implementation supporting thread-safety on multiprocessor 2060 // systems must also guarantee that references to the initialized 2061 // object do not occur before the load of the initialization flag. 2062 // 2063 // In LLVM, we do this by marking the load Acquire. 2064 if (threadsafe) 2065 LI->setAtomic(llvm::AtomicOrdering::Acquire); 2066 2067 // For ARM, we should only check the first bit, rather than the entire byte: 2068 // 2069 // ARM C++ ABI 3.2.3.1: 2070 // To support the potential use of initialization guard variables 2071 // as semaphores that are the target of ARM SWP and LDREX/STREX 2072 // synchronizing instructions we define a static initialization 2073 // guard variable to be a 4-byte aligned, 4-byte word with the 2074 // following inline access protocol. 2075 // #define INITIALIZED 1 2076 // if ((obj_guard & INITIALIZED) != INITIALIZED) { 2077 // if (__cxa_guard_acquire(&obj_guard)) 2078 // ... 2079 // } 2080 // 2081 // and similarly for ARM64: 2082 // 2083 // ARM64 C++ ABI 3.2.2: 2084 // This ABI instead only specifies the value bit 0 of the static guard 2085 // variable; all other bits are platform defined. Bit 0 shall be 0 when the 2086 // variable is not initialized and 1 when it is. 2087 llvm::Value *V = 2088 (UseARMGuardVarABI && !useInt8GuardVariable) 2089 ? Builder.CreateAnd(LI, llvm::ConstantInt::get(CGM.Int8Ty, 1)) 2090 : LI; 2091 llvm::Value *isInitialized = Builder.CreateIsNull(V, "guard.uninitialized"); 2092 2093 llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check"); 2094 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end"); 2095 2096 // Check if the first byte of the guard variable is zero. 2097 Builder.CreateCondBr(isInitialized, InitCheckBlock, EndBlock); 2098 2099 CGF.EmitBlock(InitCheckBlock); 2100 2101 // Variables used when coping with thread-safe statics and exceptions. 2102 if (threadsafe) { 2103 // Call __cxa_guard_acquire. 2104 llvm::Value *V 2105 = CGF.EmitNounwindRuntimeCall(getGuardAcquireFn(CGM, guardPtrTy), guard); 2106 2107 llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init"); 2108 2109 Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"), 2110 InitBlock, EndBlock); 2111 2112 // Call __cxa_guard_abort along the exceptional edge. 2113 CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard); 2114 2115 CGF.EmitBlock(InitBlock); 2116 } 2117 2118 // Emit the initializer and add a global destructor if appropriate. 2119 CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit); 2120 2121 if (threadsafe) { 2122 // Pop the guard-abort cleanup if we pushed one. 2123 CGF.PopCleanupBlock(); 2124 2125 // Call __cxa_guard_release. This cannot throw. 2126 CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy), 2127 guardAddr.getPointer()); 2128 } else { 2129 Builder.CreateStore(llvm::ConstantInt::get(guardTy, 1), guardAddr); 2130 } 2131 2132 CGF.EmitBlock(EndBlock); 2133 } 2134 2135 /// Register a global destructor using __cxa_atexit. 2136 static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF, 2137 llvm::Constant *dtor, 2138 llvm::Constant *addr, 2139 bool TLS) { 2140 const char *Name = "__cxa_atexit"; 2141 if (TLS) { 2142 const llvm::Triple &T = CGF.getTarget().getTriple(); 2143 Name = T.isOSDarwin() ? "_tlv_atexit" : "__cxa_thread_atexit"; 2144 } 2145 2146 // We're assuming that the destructor function is something we can 2147 // reasonably call with the default CC. Go ahead and cast it to the 2148 // right prototype. 2149 llvm::Type *dtorTy = 2150 llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, false)->getPointerTo(); 2151 2152 // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d); 2153 llvm::Type *paramTys[] = { dtorTy, CGF.Int8PtrTy, CGF.Int8PtrTy }; 2154 llvm::FunctionType *atexitTy = 2155 llvm::FunctionType::get(CGF.IntTy, paramTys, false); 2156 2157 // Fetch the actual function. 2158 llvm::Constant *atexit = CGF.CGM.CreateRuntimeFunction(atexitTy, Name); 2159 if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit)) 2160 fn->setDoesNotThrow(); 2161 2162 // Create a variable that binds the atexit to this shared object. 2163 llvm::Constant *handle = 2164 CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle"); 2165 2166 llvm::Value *args[] = { 2167 llvm::ConstantExpr::getBitCast(dtor, dtorTy), 2168 llvm::ConstantExpr::getBitCast(addr, CGF.Int8PtrTy), 2169 handle 2170 }; 2171 CGF.EmitNounwindRuntimeCall(atexit, args); 2172 } 2173 2174 /// Register a global destructor as best as we know how. 2175 void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF, 2176 const VarDecl &D, 2177 llvm::Constant *dtor, 2178 llvm::Constant *addr) { 2179 // Use __cxa_atexit if available. 2180 if (CGM.getCodeGenOpts().CXAAtExit) 2181 return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, D.getTLSKind()); 2182 2183 if (D.getTLSKind()) 2184 CGM.ErrorUnsupported(&D, "non-trivial TLS destruction"); 2185 2186 // In Apple kexts, we want to add a global destructor entry. 2187 // FIXME: shouldn't this be guarded by some variable? 2188 if (CGM.getLangOpts().AppleKext) { 2189 // Generate a global destructor entry. 2190 return CGM.AddCXXDtorEntry(dtor, addr); 2191 } 2192 2193 CGF.registerGlobalDtorWithAtExit(D, dtor, addr); 2194 } 2195 2196 static bool isThreadWrapperReplaceable(const VarDecl *VD, 2197 CodeGen::CodeGenModule &CGM) { 2198 assert(!VD->isStaticLocal() && "static local VarDecls don't need wrappers!"); 2199 // Darwin prefers to have references to thread local variables to go through 2200 // the thread wrapper instead of directly referencing the backing variable. 2201 return VD->getTLSKind() == VarDecl::TLS_Dynamic && 2202 CGM.getTarget().getTriple().isOSDarwin(); 2203 } 2204 2205 /// Get the appropriate linkage for the wrapper function. This is essentially 2206 /// the weak form of the variable's linkage; every translation unit which needs 2207 /// the wrapper emits a copy, and we want the linker to merge them. 2208 static llvm::GlobalValue::LinkageTypes 2209 getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM) { 2210 llvm::GlobalValue::LinkageTypes VarLinkage = 2211 CGM.getLLVMLinkageVarDefinition(VD, /*isConstant=*/false); 2212 2213 // For internal linkage variables, we don't need an external or weak wrapper. 2214 if (llvm::GlobalValue::isLocalLinkage(VarLinkage)) 2215 return VarLinkage; 2216 2217 // If the thread wrapper is replaceable, give it appropriate linkage. 2218 if (isThreadWrapperReplaceable(VD, CGM)) 2219 if (!llvm::GlobalVariable::isLinkOnceLinkage(VarLinkage) && 2220 !llvm::GlobalVariable::isWeakODRLinkage(VarLinkage)) 2221 return VarLinkage; 2222 return llvm::GlobalValue::WeakODRLinkage; 2223 } 2224 2225 llvm::Function * 2226 ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD, 2227 llvm::Value *Val) { 2228 // Mangle the name for the thread_local wrapper function. 2229 SmallString<256> WrapperName; 2230 { 2231 llvm::raw_svector_ostream Out(WrapperName); 2232 getMangleContext().mangleItaniumThreadLocalWrapper(VD, Out); 2233 } 2234 2235 // FIXME: If VD is a definition, we should regenerate the function attributes 2236 // before returning. 2237 if (llvm::Value *V = CGM.getModule().getNamedValue(WrapperName)) 2238 return cast<llvm::Function>(V); 2239 2240 QualType RetQT = VD->getType(); 2241 if (RetQT->isReferenceType()) 2242 RetQT = RetQT.getNonReferenceType(); 2243 2244 const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration( 2245 getContext().getPointerType(RetQT), FunctionArgList()); 2246 2247 llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FI); 2248 llvm::Function *Wrapper = 2249 llvm::Function::Create(FnTy, getThreadLocalWrapperLinkage(VD, CGM), 2250 WrapperName.str(), &CGM.getModule()); 2251 2252 CGM.SetLLVMFunctionAttributes(nullptr, FI, Wrapper); 2253 2254 if (VD->hasDefinition()) 2255 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Wrapper); 2256 2257 // Always resolve references to the wrapper at link time. 2258 if (!Wrapper->hasLocalLinkage() && !(isThreadWrapperReplaceable(VD, CGM) && 2259 !llvm::GlobalVariable::isLinkOnceLinkage(Wrapper->getLinkage()) && 2260 !llvm::GlobalVariable::isWeakODRLinkage(Wrapper->getLinkage()))) 2261 Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility); 2262 2263 if (isThreadWrapperReplaceable(VD, CGM)) { 2264 Wrapper->setCallingConv(llvm::CallingConv::CXX_FAST_TLS); 2265 Wrapper->addFnAttr(llvm::Attribute::NoUnwind); 2266 } 2267 return Wrapper; 2268 } 2269 2270 void ItaniumCXXABI::EmitThreadLocalInitFuncs( 2271 CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals, 2272 ArrayRef<llvm::Function *> CXXThreadLocalInits, 2273 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) { 2274 llvm::Function *InitFunc = nullptr; 2275 if (!CXXThreadLocalInits.empty()) { 2276 // Generate a guarded initialization function. 2277 llvm::FunctionType *FTy = 2278 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); 2279 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction(); 2280 InitFunc = CGM.CreateGlobalInitOrDestructFunction(FTy, "__tls_init", FI, 2281 SourceLocation(), 2282 /*TLS=*/true); 2283 llvm::GlobalVariable *Guard = new llvm::GlobalVariable( 2284 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false, 2285 llvm::GlobalVariable::InternalLinkage, 2286 llvm::ConstantInt::get(CGM.Int8Ty, 0), "__tls_guard"); 2287 Guard->setThreadLocal(true); 2288 2289 CharUnits GuardAlign = CharUnits::One(); 2290 Guard->setAlignment(GuardAlign.getQuantity()); 2291 2292 CodeGenFunction(CGM) 2293 .GenerateCXXGlobalInitFunc(InitFunc, CXXThreadLocalInits, 2294 Address(Guard, GuardAlign)); 2295 // On Darwin platforms, use CXX_FAST_TLS calling convention. 2296 if (CGM.getTarget().getTriple().isOSDarwin()) { 2297 InitFunc->setCallingConv(llvm::CallingConv::CXX_FAST_TLS); 2298 InitFunc->addFnAttr(llvm::Attribute::NoUnwind); 2299 } 2300 } 2301 for (const VarDecl *VD : CXXThreadLocals) { 2302 llvm::GlobalVariable *Var = 2303 cast<llvm::GlobalVariable>(CGM.GetGlobalValue(CGM.getMangledName(VD))); 2304 2305 // Some targets require that all access to thread local variables go through 2306 // the thread wrapper. This means that we cannot attempt to create a thread 2307 // wrapper or a thread helper. 2308 if (isThreadWrapperReplaceable(VD, CGM) && !VD->hasDefinition()) 2309 continue; 2310 2311 // Mangle the name for the thread_local initialization function. 2312 SmallString<256> InitFnName; 2313 { 2314 llvm::raw_svector_ostream Out(InitFnName); 2315 getMangleContext().mangleItaniumThreadLocalInit(VD, Out); 2316 } 2317 2318 // If we have a definition for the variable, emit the initialization 2319 // function as an alias to the global Init function (if any). Otherwise, 2320 // produce a declaration of the initialization function. 2321 llvm::GlobalValue *Init = nullptr; 2322 bool InitIsInitFunc = false; 2323 if (VD->hasDefinition()) { 2324 InitIsInitFunc = true; 2325 if (InitFunc) 2326 Init = llvm::GlobalAlias::create(Var->getLinkage(), InitFnName.str(), 2327 InitFunc); 2328 } else { 2329 // Emit a weak global function referring to the initialization function. 2330 // This function will not exist if the TU defining the thread_local 2331 // variable in question does not need any dynamic initialization for 2332 // its thread_local variables. 2333 llvm::FunctionType *FnTy = llvm::FunctionType::get(CGM.VoidTy, false); 2334 Init = llvm::Function::Create( 2335 FnTy, llvm::GlobalVariable::ExternalWeakLinkage, InitFnName.str(), 2336 &CGM.getModule()); 2337 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction(); 2338 CGM.SetLLVMFunctionAttributes(nullptr, FI, cast<llvm::Function>(Init)); 2339 } 2340 2341 if (Init) 2342 Init->setVisibility(Var->getVisibility()); 2343 2344 llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Var); 2345 llvm::LLVMContext &Context = CGM.getModule().getContext(); 2346 llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Wrapper); 2347 CGBuilderTy Builder(CGM, Entry); 2348 if (InitIsInitFunc) { 2349 if (Init) { 2350 llvm::CallInst *CallVal = Builder.CreateCall(Init); 2351 if (isThreadWrapperReplaceable(VD, CGM)) 2352 CallVal->setCallingConv(llvm::CallingConv::CXX_FAST_TLS); 2353 } 2354 } else { 2355 // Don't know whether we have an init function. Call it if it exists. 2356 llvm::Value *Have = Builder.CreateIsNotNull(Init); 2357 llvm::BasicBlock *InitBB = llvm::BasicBlock::Create(Context, "", Wrapper); 2358 llvm::BasicBlock *ExitBB = llvm::BasicBlock::Create(Context, "", Wrapper); 2359 Builder.CreateCondBr(Have, InitBB, ExitBB); 2360 2361 Builder.SetInsertPoint(InitBB); 2362 Builder.CreateCall(Init); 2363 Builder.CreateBr(ExitBB); 2364 2365 Builder.SetInsertPoint(ExitBB); 2366 } 2367 2368 // For a reference, the result of the wrapper function is a pointer to 2369 // the referenced object. 2370 llvm::Value *Val = Var; 2371 if (VD->getType()->isReferenceType()) { 2372 CharUnits Align = CGM.getContext().getDeclAlign(VD); 2373 Val = Builder.CreateAlignedLoad(Val, Align); 2374 } 2375 if (Val->getType() != Wrapper->getReturnType()) 2376 Val = Builder.CreatePointerBitCastOrAddrSpaceCast( 2377 Val, Wrapper->getReturnType(), ""); 2378 Builder.CreateRet(Val); 2379 } 2380 } 2381 2382 LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, 2383 const VarDecl *VD, 2384 QualType LValType) { 2385 llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD); 2386 llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val); 2387 2388 llvm::CallInst *CallVal = CGF.Builder.CreateCall(Wrapper); 2389 CallVal->setCallingConv(Wrapper->getCallingConv()); 2390 2391 LValue LV; 2392 if (VD->getType()->isReferenceType()) 2393 LV = CGF.MakeNaturalAlignAddrLValue(CallVal, LValType); 2394 else 2395 LV = CGF.MakeAddrLValue(CallVal, LValType, 2396 CGF.getContext().getDeclAlign(VD)); 2397 // FIXME: need setObjCGCLValueClass? 2398 return LV; 2399 } 2400 2401 /// Return whether the given global decl needs a VTT parameter, which it does 2402 /// if it's a base constructor or destructor with virtual bases. 2403 bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) { 2404 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 2405 2406 // We don't have any virtual bases, just return early. 2407 if (!MD->getParent()->getNumVBases()) 2408 return false; 2409 2410 // Check if we have a base constructor. 2411 if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base) 2412 return true; 2413 2414 // Check if we have a base destructor. 2415 if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base) 2416 return true; 2417 2418 return false; 2419 } 2420 2421 namespace { 2422 class ItaniumRTTIBuilder { 2423 CodeGenModule &CGM; // Per-module state. 2424 llvm::LLVMContext &VMContext; 2425 const ItaniumCXXABI &CXXABI; // Per-module state. 2426 2427 /// Fields - The fields of the RTTI descriptor currently being built. 2428 SmallVector<llvm::Constant *, 16> Fields; 2429 2430 /// GetAddrOfTypeName - Returns the mangled type name of the given type. 2431 llvm::GlobalVariable * 2432 GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage); 2433 2434 /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI 2435 /// descriptor of the given type. 2436 llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty); 2437 2438 /// BuildVTablePointer - Build the vtable pointer for the given type. 2439 void BuildVTablePointer(const Type *Ty); 2440 2441 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single 2442 /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b. 2443 void BuildSIClassTypeInfo(const CXXRecordDecl *RD); 2444 2445 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for 2446 /// classes with bases that do not satisfy the abi::__si_class_type_info 2447 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c. 2448 void BuildVMIClassTypeInfo(const CXXRecordDecl *RD); 2449 2450 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used 2451 /// for pointer types. 2452 void BuildPointerTypeInfo(QualType PointeeTy); 2453 2454 /// BuildObjCObjectTypeInfo - Build the appropriate kind of 2455 /// type_info for an object type. 2456 void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty); 2457 2458 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info 2459 /// struct, used for member pointer types. 2460 void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty); 2461 2462 public: 2463 ItaniumRTTIBuilder(const ItaniumCXXABI &ABI) 2464 : CGM(ABI.CGM), VMContext(CGM.getModule().getContext()), CXXABI(ABI) {} 2465 2466 // Pointer type info flags. 2467 enum { 2468 /// PTI_Const - Type has const qualifier. 2469 PTI_Const = 0x1, 2470 2471 /// PTI_Volatile - Type has volatile qualifier. 2472 PTI_Volatile = 0x2, 2473 2474 /// PTI_Restrict - Type has restrict qualifier. 2475 PTI_Restrict = 0x4, 2476 2477 /// PTI_Incomplete - Type is incomplete. 2478 PTI_Incomplete = 0x8, 2479 2480 /// PTI_ContainingClassIncomplete - Containing class is incomplete. 2481 /// (in pointer to member). 2482 PTI_ContainingClassIncomplete = 0x10, 2483 2484 /// PTI_TransactionSafe - Pointee is transaction_safe function (C++ TM TS). 2485 //PTI_TransactionSafe = 0x20, 2486 2487 /// PTI_Noexcept - Pointee is noexcept function (C++1z). 2488 PTI_Noexcept = 0x40, 2489 }; 2490 2491 // VMI type info flags. 2492 enum { 2493 /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance. 2494 VMI_NonDiamondRepeat = 0x1, 2495 2496 /// VMI_DiamondShaped - Class is diamond shaped. 2497 VMI_DiamondShaped = 0x2 2498 }; 2499 2500 // Base class type info flags. 2501 enum { 2502 /// BCTI_Virtual - Base class is virtual. 2503 BCTI_Virtual = 0x1, 2504 2505 /// BCTI_Public - Base class is public. 2506 BCTI_Public = 0x2 2507 }; 2508 2509 /// BuildTypeInfo - Build the RTTI type info struct for the given type. 2510 /// 2511 /// \param Force - true to force the creation of this RTTI value 2512 /// \param DLLExport - true to mark the RTTI value as DLLExport 2513 llvm::Constant *BuildTypeInfo(QualType Ty, bool Force = false, 2514 bool DLLExport = false); 2515 }; 2516 } 2517 2518 llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName( 2519 QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage) { 2520 SmallString<256> Name; 2521 llvm::raw_svector_ostream Out(Name); 2522 CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out); 2523 2524 // We know that the mangled name of the type starts at index 4 of the 2525 // mangled name of the typename, so we can just index into it in order to 2526 // get the mangled name of the type. 2527 llvm::Constant *Init = llvm::ConstantDataArray::getString(VMContext, 2528 Name.substr(4)); 2529 2530 llvm::GlobalVariable *GV = 2531 CGM.CreateOrReplaceCXXRuntimeVariable(Name, Init->getType(), Linkage); 2532 2533 GV->setInitializer(Init); 2534 2535 return GV; 2536 } 2537 2538 llvm::Constant * 2539 ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) { 2540 // Mangle the RTTI name. 2541 SmallString<256> Name; 2542 llvm::raw_svector_ostream Out(Name); 2543 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out); 2544 2545 // Look for an existing global. 2546 llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name); 2547 2548 if (!GV) { 2549 // Create a new global variable. 2550 GV = new llvm::GlobalVariable(CGM.getModule(), CGM.Int8PtrTy, 2551 /*Constant=*/true, 2552 llvm::GlobalValue::ExternalLinkage, nullptr, 2553 Name); 2554 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) { 2555 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl()); 2556 if (RD->hasAttr<DLLImportAttr>()) 2557 GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass); 2558 } 2559 } 2560 2561 return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy); 2562 } 2563 2564 /// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type 2565 /// info for that type is defined in the standard library. 2566 static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) { 2567 // Itanium C++ ABI 2.9.2: 2568 // Basic type information (e.g. for "int", "bool", etc.) will be kept in 2569 // the run-time support library. Specifically, the run-time support 2570 // library should contain type_info objects for the types X, X* and 2571 // X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char, 2572 // unsigned char, signed char, short, unsigned short, int, unsigned int, 2573 // long, unsigned long, long long, unsigned long long, float, double, 2574 // long double, char16_t, char32_t, and the IEEE 754r decimal and 2575 // half-precision floating point types. 2576 // 2577 // GCC also emits RTTI for __int128. 2578 // FIXME: We do not emit RTTI information for decimal types here. 2579 2580 // Types added here must also be added to EmitFundamentalRTTIDescriptors. 2581 switch (Ty->getKind()) { 2582 case BuiltinType::Void: 2583 case BuiltinType::NullPtr: 2584 case BuiltinType::Bool: 2585 case BuiltinType::WChar_S: 2586 case BuiltinType::WChar_U: 2587 case BuiltinType::Char_U: 2588 case BuiltinType::Char_S: 2589 case BuiltinType::UChar: 2590 case BuiltinType::SChar: 2591 case BuiltinType::Short: 2592 case BuiltinType::UShort: 2593 case BuiltinType::Int: 2594 case BuiltinType::UInt: 2595 case BuiltinType::Long: 2596 case BuiltinType::ULong: 2597 case BuiltinType::LongLong: 2598 case BuiltinType::ULongLong: 2599 case BuiltinType::Half: 2600 case BuiltinType::Float: 2601 case BuiltinType::Double: 2602 case BuiltinType::LongDouble: 2603 case BuiltinType::Float128: 2604 case BuiltinType::Char16: 2605 case BuiltinType::Char32: 2606 case BuiltinType::Int128: 2607 case BuiltinType::UInt128: 2608 return true; 2609 2610 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 2611 case BuiltinType::Id: 2612 #include "clang/Basic/OpenCLImageTypes.def" 2613 case BuiltinType::OCLSampler: 2614 case BuiltinType::OCLEvent: 2615 case BuiltinType::OCLClkEvent: 2616 case BuiltinType::OCLQueue: 2617 case BuiltinType::OCLNDRange: 2618 case BuiltinType::OCLReserveID: 2619 return false; 2620 2621 case BuiltinType::Dependent: 2622 #define BUILTIN_TYPE(Id, SingletonId) 2623 #define PLACEHOLDER_TYPE(Id, SingletonId) \ 2624 case BuiltinType::Id: 2625 #include "clang/AST/BuiltinTypes.def" 2626 llvm_unreachable("asking for RRTI for a placeholder type!"); 2627 2628 case BuiltinType::ObjCId: 2629 case BuiltinType::ObjCClass: 2630 case BuiltinType::ObjCSel: 2631 llvm_unreachable("FIXME: Objective-C types are unsupported!"); 2632 } 2633 2634 llvm_unreachable("Invalid BuiltinType Kind!"); 2635 } 2636 2637 static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) { 2638 QualType PointeeTy = PointerTy->getPointeeType(); 2639 const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(PointeeTy); 2640 if (!BuiltinTy) 2641 return false; 2642 2643 // Check the qualifiers. 2644 Qualifiers Quals = PointeeTy.getQualifiers(); 2645 Quals.removeConst(); 2646 2647 if (!Quals.empty()) 2648 return false; 2649 2650 return TypeInfoIsInStandardLibrary(BuiltinTy); 2651 } 2652 2653 /// IsStandardLibraryRTTIDescriptor - Returns whether the type 2654 /// information for the given type exists in the standard library. 2655 static bool IsStandardLibraryRTTIDescriptor(QualType Ty) { 2656 // Type info for builtin types is defined in the standard library. 2657 if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty)) 2658 return TypeInfoIsInStandardLibrary(BuiltinTy); 2659 2660 // Type info for some pointer types to builtin types is defined in the 2661 // standard library. 2662 if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty)) 2663 return TypeInfoIsInStandardLibrary(PointerTy); 2664 2665 return false; 2666 } 2667 2668 /// ShouldUseExternalRTTIDescriptor - Returns whether the type information for 2669 /// the given type exists somewhere else, and that we should not emit the type 2670 /// information in this translation unit. Assumes that it is not a 2671 /// standard-library type. 2672 static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM, 2673 QualType Ty) { 2674 ASTContext &Context = CGM.getContext(); 2675 2676 // If RTTI is disabled, assume it might be disabled in the 2677 // translation unit that defines any potential key function, too. 2678 if (!Context.getLangOpts().RTTI) return false; 2679 2680 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) { 2681 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl()); 2682 if (!RD->hasDefinition()) 2683 return false; 2684 2685 if (!RD->isDynamicClass()) 2686 return false; 2687 2688 // FIXME: this may need to be reconsidered if the key function 2689 // changes. 2690 // N.B. We must always emit the RTTI data ourselves if there exists a key 2691 // function. 2692 bool IsDLLImport = RD->hasAttr<DLLImportAttr>(); 2693 if (CGM.getVTables().isVTableExternal(RD)) 2694 return IsDLLImport ? false : true; 2695 2696 if (IsDLLImport) 2697 return true; 2698 } 2699 2700 return false; 2701 } 2702 2703 /// IsIncompleteClassType - Returns whether the given record type is incomplete. 2704 static bool IsIncompleteClassType(const RecordType *RecordTy) { 2705 return !RecordTy->getDecl()->isCompleteDefinition(); 2706 } 2707 2708 /// ContainsIncompleteClassType - Returns whether the given type contains an 2709 /// incomplete class type. This is true if 2710 /// 2711 /// * The given type is an incomplete class type. 2712 /// * The given type is a pointer type whose pointee type contains an 2713 /// incomplete class type. 2714 /// * The given type is a member pointer type whose class is an incomplete 2715 /// class type. 2716 /// * The given type is a member pointer type whoise pointee type contains an 2717 /// incomplete class type. 2718 /// is an indirect or direct pointer to an incomplete class type. 2719 static bool ContainsIncompleteClassType(QualType Ty) { 2720 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) { 2721 if (IsIncompleteClassType(RecordTy)) 2722 return true; 2723 } 2724 2725 if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty)) 2726 return ContainsIncompleteClassType(PointerTy->getPointeeType()); 2727 2728 if (const MemberPointerType *MemberPointerTy = 2729 dyn_cast<MemberPointerType>(Ty)) { 2730 // Check if the class type is incomplete. 2731 const RecordType *ClassType = cast<RecordType>(MemberPointerTy->getClass()); 2732 if (IsIncompleteClassType(ClassType)) 2733 return true; 2734 2735 return ContainsIncompleteClassType(MemberPointerTy->getPointeeType()); 2736 } 2737 2738 return false; 2739 } 2740 2741 // CanUseSingleInheritance - Return whether the given record decl has a "single, 2742 // public, non-virtual base at offset zero (i.e. the derived class is dynamic 2743 // iff the base is)", according to Itanium C++ ABI, 2.95p6b. 2744 static bool CanUseSingleInheritance(const CXXRecordDecl *RD) { 2745 // Check the number of bases. 2746 if (RD->getNumBases() != 1) 2747 return false; 2748 2749 // Get the base. 2750 CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin(); 2751 2752 // Check that the base is not virtual. 2753 if (Base->isVirtual()) 2754 return false; 2755 2756 // Check that the base is public. 2757 if (Base->getAccessSpecifier() != AS_public) 2758 return false; 2759 2760 // Check that the class is dynamic iff the base is. 2761 const CXXRecordDecl *BaseDecl = 2762 cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl()); 2763 if (!BaseDecl->isEmpty() && 2764 BaseDecl->isDynamicClass() != RD->isDynamicClass()) 2765 return false; 2766 2767 return true; 2768 } 2769 2770 void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) { 2771 // abi::__class_type_info. 2772 static const char * const ClassTypeInfo = 2773 "_ZTVN10__cxxabiv117__class_type_infoE"; 2774 // abi::__si_class_type_info. 2775 static const char * const SIClassTypeInfo = 2776 "_ZTVN10__cxxabiv120__si_class_type_infoE"; 2777 // abi::__vmi_class_type_info. 2778 static const char * const VMIClassTypeInfo = 2779 "_ZTVN10__cxxabiv121__vmi_class_type_infoE"; 2780 2781 const char *VTableName = nullptr; 2782 2783 switch (Ty->getTypeClass()) { 2784 #define TYPE(Class, Base) 2785 #define ABSTRACT_TYPE(Class, Base) 2786 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: 2787 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 2788 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 2789 #include "clang/AST/TypeNodes.def" 2790 llvm_unreachable("Non-canonical and dependent types shouldn't get here"); 2791 2792 case Type::LValueReference: 2793 case Type::RValueReference: 2794 llvm_unreachable("References shouldn't get here"); 2795 2796 case Type::Auto: 2797 llvm_unreachable("Undeduced auto type shouldn't get here"); 2798 2799 case Type::Pipe: 2800 llvm_unreachable("Pipe types shouldn't get here"); 2801 2802 case Type::Builtin: 2803 // GCC treats vector and complex types as fundamental types. 2804 case Type::Vector: 2805 case Type::ExtVector: 2806 case Type::Complex: 2807 case Type::Atomic: 2808 // FIXME: GCC treats block pointers as fundamental types?! 2809 case Type::BlockPointer: 2810 // abi::__fundamental_type_info. 2811 VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE"; 2812 break; 2813 2814 case Type::ConstantArray: 2815 case Type::IncompleteArray: 2816 case Type::VariableArray: 2817 // abi::__array_type_info. 2818 VTableName = "_ZTVN10__cxxabiv117__array_type_infoE"; 2819 break; 2820 2821 case Type::FunctionNoProto: 2822 case Type::FunctionProto: 2823 // abi::__function_type_info. 2824 VTableName = "_ZTVN10__cxxabiv120__function_type_infoE"; 2825 break; 2826 2827 case Type::Enum: 2828 // abi::__enum_type_info. 2829 VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE"; 2830 break; 2831 2832 case Type::Record: { 2833 const CXXRecordDecl *RD = 2834 cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl()); 2835 2836 if (!RD->hasDefinition() || !RD->getNumBases()) { 2837 VTableName = ClassTypeInfo; 2838 } else if (CanUseSingleInheritance(RD)) { 2839 VTableName = SIClassTypeInfo; 2840 } else { 2841 VTableName = VMIClassTypeInfo; 2842 } 2843 2844 break; 2845 } 2846 2847 case Type::ObjCObject: 2848 // Ignore protocol qualifiers. 2849 Ty = cast<ObjCObjectType>(Ty)->getBaseType().getTypePtr(); 2850 2851 // Handle id and Class. 2852 if (isa<BuiltinType>(Ty)) { 2853 VTableName = ClassTypeInfo; 2854 break; 2855 } 2856 2857 assert(isa<ObjCInterfaceType>(Ty)); 2858 // Fall through. 2859 2860 case Type::ObjCInterface: 2861 if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) { 2862 VTableName = SIClassTypeInfo; 2863 } else { 2864 VTableName = ClassTypeInfo; 2865 } 2866 break; 2867 2868 case Type::ObjCObjectPointer: 2869 case Type::Pointer: 2870 // abi::__pointer_type_info. 2871 VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE"; 2872 break; 2873 2874 case Type::MemberPointer: 2875 // abi::__pointer_to_member_type_info. 2876 VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE"; 2877 break; 2878 } 2879 2880 llvm::Constant *VTable = 2881 CGM.getModule().getOrInsertGlobal(VTableName, CGM.Int8PtrTy); 2882 2883 llvm::Type *PtrDiffTy = 2884 CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType()); 2885 2886 // The vtable address point is 2. 2887 llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2); 2888 VTable = 2889 llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8PtrTy, VTable, Two); 2890 VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy); 2891 2892 Fields.push_back(VTable); 2893 } 2894 2895 /// \brief Return the linkage that the type info and type info name constants 2896 /// should have for the given type. 2897 static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM, 2898 QualType Ty) { 2899 // Itanium C++ ABI 2.9.5p7: 2900 // In addition, it and all of the intermediate abi::__pointer_type_info 2901 // structs in the chain down to the abi::__class_type_info for the 2902 // incomplete class type must be prevented from resolving to the 2903 // corresponding type_info structs for the complete class type, possibly 2904 // by making them local static objects. Finally, a dummy class RTTI is 2905 // generated for the incomplete type that will not resolve to the final 2906 // complete class RTTI (because the latter need not exist), possibly by 2907 // making it a local static object. 2908 if (ContainsIncompleteClassType(Ty)) 2909 return llvm::GlobalValue::InternalLinkage; 2910 2911 switch (Ty->getLinkage()) { 2912 case NoLinkage: 2913 case InternalLinkage: 2914 case UniqueExternalLinkage: 2915 return llvm::GlobalValue::InternalLinkage; 2916 2917 case VisibleNoLinkage: 2918 case ExternalLinkage: 2919 // RTTI is not enabled, which means that this type info struct is going 2920 // to be used for exception handling. Give it linkonce_odr linkage. 2921 if (!CGM.getLangOpts().RTTI) 2922 return llvm::GlobalValue::LinkOnceODRLinkage; 2923 2924 if (const RecordType *Record = dyn_cast<RecordType>(Ty)) { 2925 const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl()); 2926 if (RD->hasAttr<WeakAttr>()) 2927 return llvm::GlobalValue::WeakODRLinkage; 2928 if (CGM.getTriple().isWindowsItaniumEnvironment()) 2929 if (RD->hasAttr<DLLImportAttr>()) 2930 return llvm::GlobalValue::ExternalLinkage; 2931 if (RD->isDynamicClass()) { 2932 llvm::GlobalValue::LinkageTypes LT = CGM.getVTableLinkage(RD); 2933 // MinGW won't export the RTTI information when there is a key function. 2934 // Make sure we emit our own copy instead of attempting to dllimport it. 2935 if (RD->hasAttr<DLLImportAttr>() && 2936 llvm::GlobalValue::isAvailableExternallyLinkage(LT)) 2937 LT = llvm::GlobalValue::LinkOnceODRLinkage; 2938 return LT; 2939 } 2940 } 2941 2942 return llvm::GlobalValue::LinkOnceODRLinkage; 2943 } 2944 2945 llvm_unreachable("Invalid linkage!"); 2946 } 2947 2948 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty, bool Force, 2949 bool DLLExport) { 2950 // We want to operate on the canonical type. 2951 Ty = Ty.getCanonicalType(); 2952 2953 // Check if we've already emitted an RTTI descriptor for this type. 2954 SmallString<256> Name; 2955 llvm::raw_svector_ostream Out(Name); 2956 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out); 2957 2958 llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name); 2959 if (OldGV && !OldGV->isDeclaration()) { 2960 assert(!OldGV->hasAvailableExternallyLinkage() && 2961 "available_externally typeinfos not yet implemented"); 2962 2963 return llvm::ConstantExpr::getBitCast(OldGV, CGM.Int8PtrTy); 2964 } 2965 2966 // Check if there is already an external RTTI descriptor for this type. 2967 bool IsStdLib = IsStandardLibraryRTTIDescriptor(Ty); 2968 if (!Force && (IsStdLib || ShouldUseExternalRTTIDescriptor(CGM, Ty))) 2969 return GetAddrOfExternalRTTIDescriptor(Ty); 2970 2971 // Emit the standard library with external linkage. 2972 llvm::GlobalVariable::LinkageTypes Linkage; 2973 if (IsStdLib) 2974 Linkage = llvm::GlobalValue::ExternalLinkage; 2975 else 2976 Linkage = getTypeInfoLinkage(CGM, Ty); 2977 2978 // Add the vtable pointer. 2979 BuildVTablePointer(cast<Type>(Ty)); 2980 2981 // And the name. 2982 llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage); 2983 llvm::Constant *TypeNameField; 2984 2985 // If we're supposed to demote the visibility, be sure to set a flag 2986 // to use a string comparison for type_info comparisons. 2987 ItaniumCXXABI::RTTIUniquenessKind RTTIUniqueness = 2988 CXXABI.classifyRTTIUniqueness(Ty, Linkage); 2989 if (RTTIUniqueness != ItaniumCXXABI::RUK_Unique) { 2990 // The flag is the sign bit, which on ARM64 is defined to be clear 2991 // for global pointers. This is very ARM64-specific. 2992 TypeNameField = llvm::ConstantExpr::getPtrToInt(TypeName, CGM.Int64Ty); 2993 llvm::Constant *flag = 2994 llvm::ConstantInt::get(CGM.Int64Ty, ((uint64_t)1) << 63); 2995 TypeNameField = llvm::ConstantExpr::getAdd(TypeNameField, flag); 2996 TypeNameField = 2997 llvm::ConstantExpr::getIntToPtr(TypeNameField, CGM.Int8PtrTy); 2998 } else { 2999 TypeNameField = llvm::ConstantExpr::getBitCast(TypeName, CGM.Int8PtrTy); 3000 } 3001 Fields.push_back(TypeNameField); 3002 3003 switch (Ty->getTypeClass()) { 3004 #define TYPE(Class, Base) 3005 #define ABSTRACT_TYPE(Class, Base) 3006 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: 3007 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 3008 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 3009 #include "clang/AST/TypeNodes.def" 3010 llvm_unreachable("Non-canonical and dependent types shouldn't get here"); 3011 3012 // GCC treats vector types as fundamental types. 3013 case Type::Builtin: 3014 case Type::Vector: 3015 case Type::ExtVector: 3016 case Type::Complex: 3017 case Type::BlockPointer: 3018 // Itanium C++ ABI 2.9.5p4: 3019 // abi::__fundamental_type_info adds no data members to std::type_info. 3020 break; 3021 3022 case Type::LValueReference: 3023 case Type::RValueReference: 3024 llvm_unreachable("References shouldn't get here"); 3025 3026 case Type::Auto: 3027 llvm_unreachable("Undeduced auto type shouldn't get here"); 3028 3029 case Type::Pipe: 3030 llvm_unreachable("Pipe type shouldn't get here"); 3031 3032 case Type::ConstantArray: 3033 case Type::IncompleteArray: 3034 case Type::VariableArray: 3035 // Itanium C++ ABI 2.9.5p5: 3036 // abi::__array_type_info adds no data members to std::type_info. 3037 break; 3038 3039 case Type::FunctionNoProto: 3040 case Type::FunctionProto: 3041 // Itanium C++ ABI 2.9.5p5: 3042 // abi::__function_type_info adds no data members to std::type_info. 3043 break; 3044 3045 case Type::Enum: 3046 // Itanium C++ ABI 2.9.5p5: 3047 // abi::__enum_type_info adds no data members to std::type_info. 3048 break; 3049 3050 case Type::Record: { 3051 const CXXRecordDecl *RD = 3052 cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl()); 3053 if (!RD->hasDefinition() || !RD->getNumBases()) { 3054 // We don't need to emit any fields. 3055 break; 3056 } 3057 3058 if (CanUseSingleInheritance(RD)) 3059 BuildSIClassTypeInfo(RD); 3060 else 3061 BuildVMIClassTypeInfo(RD); 3062 3063 break; 3064 } 3065 3066 case Type::ObjCObject: 3067 case Type::ObjCInterface: 3068 BuildObjCObjectTypeInfo(cast<ObjCObjectType>(Ty)); 3069 break; 3070 3071 case Type::ObjCObjectPointer: 3072 BuildPointerTypeInfo(cast<ObjCObjectPointerType>(Ty)->getPointeeType()); 3073 break; 3074 3075 case Type::Pointer: 3076 BuildPointerTypeInfo(cast<PointerType>(Ty)->getPointeeType()); 3077 break; 3078 3079 case Type::MemberPointer: 3080 BuildPointerToMemberTypeInfo(cast<MemberPointerType>(Ty)); 3081 break; 3082 3083 case Type::Atomic: 3084 // No fields, at least for the moment. 3085 break; 3086 } 3087 3088 llvm::Constant *Init = llvm::ConstantStruct::getAnon(Fields); 3089 3090 llvm::Module &M = CGM.getModule(); 3091 llvm::GlobalVariable *GV = 3092 new llvm::GlobalVariable(M, Init->getType(), 3093 /*Constant=*/true, Linkage, Init, Name); 3094 3095 // If there's already an old global variable, replace it with the new one. 3096 if (OldGV) { 3097 GV->takeName(OldGV); 3098 llvm::Constant *NewPtr = 3099 llvm::ConstantExpr::getBitCast(GV, OldGV->getType()); 3100 OldGV->replaceAllUsesWith(NewPtr); 3101 OldGV->eraseFromParent(); 3102 } 3103 3104 if (CGM.supportsCOMDAT() && GV->isWeakForLinker()) 3105 GV->setComdat(M.getOrInsertComdat(GV->getName())); 3106 3107 // The Itanium ABI specifies that type_info objects must be globally 3108 // unique, with one exception: if the type is an incomplete class 3109 // type or a (possibly indirect) pointer to one. That exception 3110 // affects the general case of comparing type_info objects produced 3111 // by the typeid operator, which is why the comparison operators on 3112 // std::type_info generally use the type_info name pointers instead 3113 // of the object addresses. However, the language's built-in uses 3114 // of RTTI generally require class types to be complete, even when 3115 // manipulating pointers to those class types. This allows the 3116 // implementation of dynamic_cast to rely on address equality tests, 3117 // which is much faster. 3118 3119 // All of this is to say that it's important that both the type_info 3120 // object and the type_info name be uniqued when weakly emitted. 3121 3122 // Give the type_info object and name the formal visibility of the 3123 // type itself. 3124 llvm::GlobalValue::VisibilityTypes llvmVisibility; 3125 if (llvm::GlobalValue::isLocalLinkage(Linkage)) 3126 // If the linkage is local, only default visibility makes sense. 3127 llvmVisibility = llvm::GlobalValue::DefaultVisibility; 3128 else if (RTTIUniqueness == ItaniumCXXABI::RUK_NonUniqueHidden) 3129 llvmVisibility = llvm::GlobalValue::HiddenVisibility; 3130 else 3131 llvmVisibility = CodeGenModule::GetLLVMVisibility(Ty->getVisibility()); 3132 3133 TypeName->setVisibility(llvmVisibility); 3134 GV->setVisibility(llvmVisibility); 3135 3136 if (CGM.getTriple().isWindowsItaniumEnvironment()) { 3137 auto RD = Ty->getAsCXXRecordDecl(); 3138 if (DLLExport || (RD && RD->hasAttr<DLLExportAttr>())) { 3139 TypeName->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass); 3140 GV->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass); 3141 } else if (CGM.getLangOpts().RTTI && RD && RD->hasAttr<DLLImportAttr>()) { 3142 TypeName->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass); 3143 GV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass); 3144 3145 // Because the typename and the typeinfo are DLL import, convert them to 3146 // declarations rather than definitions. The initializers still need to 3147 // be constructed to calculate the type for the declarations. 3148 TypeName->setInitializer(nullptr); 3149 GV->setInitializer(nullptr); 3150 } 3151 } 3152 3153 return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy); 3154 } 3155 3156 /// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info 3157 /// for the given Objective-C object type. 3158 void ItaniumRTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) { 3159 // Drop qualifiers. 3160 const Type *T = OT->getBaseType().getTypePtr(); 3161 assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T)); 3162 3163 // The builtin types are abi::__class_type_infos and don't require 3164 // extra fields. 3165 if (isa<BuiltinType>(T)) return; 3166 3167 ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(T)->getDecl(); 3168 ObjCInterfaceDecl *Super = Class->getSuperClass(); 3169 3170 // Root classes are also __class_type_info. 3171 if (!Super) return; 3172 3173 QualType SuperTy = CGM.getContext().getObjCInterfaceType(Super); 3174 3175 // Everything else is single inheritance. 3176 llvm::Constant *BaseTypeInfo = 3177 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(SuperTy); 3178 Fields.push_back(BaseTypeInfo); 3179 } 3180 3181 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single 3182 /// inheritance, according to the Itanium C++ ABI, 2.95p6b. 3183 void ItaniumRTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) { 3184 // Itanium C++ ABI 2.9.5p6b: 3185 // It adds to abi::__class_type_info a single member pointing to the 3186 // type_info structure for the base type, 3187 llvm::Constant *BaseTypeInfo = 3188 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(RD->bases_begin()->getType()); 3189 Fields.push_back(BaseTypeInfo); 3190 } 3191 3192 namespace { 3193 /// SeenBases - Contains virtual and non-virtual bases seen when traversing 3194 /// a class hierarchy. 3195 struct SeenBases { 3196 llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases; 3197 llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases; 3198 }; 3199 } 3200 3201 /// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in 3202 /// abi::__vmi_class_type_info. 3203 /// 3204 static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base, 3205 SeenBases &Bases) { 3206 3207 unsigned Flags = 0; 3208 3209 const CXXRecordDecl *BaseDecl = 3210 cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl()); 3211 3212 if (Base->isVirtual()) { 3213 // Mark the virtual base as seen. 3214 if (!Bases.VirtualBases.insert(BaseDecl).second) { 3215 // If this virtual base has been seen before, then the class is diamond 3216 // shaped. 3217 Flags |= ItaniumRTTIBuilder::VMI_DiamondShaped; 3218 } else { 3219 if (Bases.NonVirtualBases.count(BaseDecl)) 3220 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat; 3221 } 3222 } else { 3223 // Mark the non-virtual base as seen. 3224 if (!Bases.NonVirtualBases.insert(BaseDecl).second) { 3225 // If this non-virtual base has been seen before, then the class has non- 3226 // diamond shaped repeated inheritance. 3227 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat; 3228 } else { 3229 if (Bases.VirtualBases.count(BaseDecl)) 3230 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat; 3231 } 3232 } 3233 3234 // Walk all bases. 3235 for (const auto &I : BaseDecl->bases()) 3236 Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases); 3237 3238 return Flags; 3239 } 3240 3241 static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) { 3242 unsigned Flags = 0; 3243 SeenBases Bases; 3244 3245 // Walk all bases. 3246 for (const auto &I : RD->bases()) 3247 Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases); 3248 3249 return Flags; 3250 } 3251 3252 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for 3253 /// classes with bases that do not satisfy the abi::__si_class_type_info 3254 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c. 3255 void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) { 3256 llvm::Type *UnsignedIntLTy = 3257 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy); 3258 3259 // Itanium C++ ABI 2.9.5p6c: 3260 // __flags is a word with flags describing details about the class 3261 // structure, which may be referenced by using the __flags_masks 3262 // enumeration. These flags refer to both direct and indirect bases. 3263 unsigned Flags = ComputeVMIClassTypeInfoFlags(RD); 3264 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags)); 3265 3266 // Itanium C++ ABI 2.9.5p6c: 3267 // __base_count is a word with the number of direct proper base class 3268 // descriptions that follow. 3269 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, RD->getNumBases())); 3270 3271 if (!RD->getNumBases()) 3272 return; 3273 3274 // Now add the base class descriptions. 3275 3276 // Itanium C++ ABI 2.9.5p6c: 3277 // __base_info[] is an array of base class descriptions -- one for every 3278 // direct proper base. Each description is of the type: 3279 // 3280 // struct abi::__base_class_type_info { 3281 // public: 3282 // const __class_type_info *__base_type; 3283 // long __offset_flags; 3284 // 3285 // enum __offset_flags_masks { 3286 // __virtual_mask = 0x1, 3287 // __public_mask = 0x2, 3288 // __offset_shift = 8 3289 // }; 3290 // }; 3291 3292 // If we're in mingw and 'long' isn't wide enough for a pointer, use 'long 3293 // long' instead of 'long' for __offset_flags. libstdc++abi uses long long on 3294 // LLP64 platforms. 3295 // FIXME: Consider updating libc++abi to match, and extend this logic to all 3296 // LLP64 platforms. 3297 QualType OffsetFlagsTy = CGM.getContext().LongTy; 3298 const TargetInfo &TI = CGM.getContext().getTargetInfo(); 3299 if (TI.getTriple().isOSCygMing() && TI.getPointerWidth(0) > TI.getLongWidth()) 3300 OffsetFlagsTy = CGM.getContext().LongLongTy; 3301 llvm::Type *OffsetFlagsLTy = 3302 CGM.getTypes().ConvertType(OffsetFlagsTy); 3303 3304 for (const auto &Base : RD->bases()) { 3305 // The __base_type member points to the RTTI for the base type. 3306 Fields.push_back(ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Base.getType())); 3307 3308 const CXXRecordDecl *BaseDecl = 3309 cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl()); 3310 3311 int64_t OffsetFlags = 0; 3312 3313 // All but the lower 8 bits of __offset_flags are a signed offset. 3314 // For a non-virtual base, this is the offset in the object of the base 3315 // subobject. For a virtual base, this is the offset in the virtual table of 3316 // the virtual base offset for the virtual base referenced (negative). 3317 CharUnits Offset; 3318 if (Base.isVirtual()) 3319 Offset = 3320 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(RD, BaseDecl); 3321 else { 3322 const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD); 3323 Offset = Layout.getBaseClassOffset(BaseDecl); 3324 }; 3325 3326 OffsetFlags = uint64_t(Offset.getQuantity()) << 8; 3327 3328 // The low-order byte of __offset_flags contains flags, as given by the 3329 // masks from the enumeration __offset_flags_masks. 3330 if (Base.isVirtual()) 3331 OffsetFlags |= BCTI_Virtual; 3332 if (Base.getAccessSpecifier() == AS_public) 3333 OffsetFlags |= BCTI_Public; 3334 3335 Fields.push_back(llvm::ConstantInt::get(OffsetFlagsLTy, OffsetFlags)); 3336 } 3337 } 3338 3339 /// Compute the flags for a __pbase_type_info, and remove the corresponding 3340 /// pieces from \p Type. 3341 static unsigned extractPBaseFlags(ASTContext &Ctx, QualType &Type) { 3342 unsigned Flags = 0; 3343 3344 if (Type.isConstQualified()) 3345 Flags |= ItaniumRTTIBuilder::PTI_Const; 3346 if (Type.isVolatileQualified()) 3347 Flags |= ItaniumRTTIBuilder::PTI_Volatile; 3348 if (Type.isRestrictQualified()) 3349 Flags |= ItaniumRTTIBuilder::PTI_Restrict; 3350 Type = Type.getUnqualifiedType(); 3351 3352 // Itanium C++ ABI 2.9.5p7: 3353 // When the abi::__pbase_type_info is for a direct or indirect pointer to an 3354 // incomplete class type, the incomplete target type flag is set. 3355 if (ContainsIncompleteClassType(Type)) 3356 Flags |= ItaniumRTTIBuilder::PTI_Incomplete; 3357 3358 if (auto *Proto = Type->getAs<FunctionProtoType>()) { 3359 if (Proto->isNothrow(Ctx)) { 3360 Flags |= ItaniumRTTIBuilder::PTI_Noexcept; 3361 Type = Ctx.getFunctionType( 3362 Proto->getReturnType(), Proto->getParamTypes(), 3363 Proto->getExtProtoInfo().withExceptionSpec(EST_None)); 3364 } 3365 } 3366 3367 return Flags; 3368 } 3369 3370 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, 3371 /// used for pointer types. 3372 void ItaniumRTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) { 3373 // Itanium C++ ABI 2.9.5p7: 3374 // __flags is a flag word describing the cv-qualification and other 3375 // attributes of the type pointed to 3376 unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy); 3377 3378 llvm::Type *UnsignedIntLTy = 3379 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy); 3380 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags)); 3381 3382 // Itanium C++ ABI 2.9.5p7: 3383 // __pointee is a pointer to the std::type_info derivation for the 3384 // unqualified type being pointed to. 3385 llvm::Constant *PointeeTypeInfo = 3386 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy); 3387 Fields.push_back(PointeeTypeInfo); 3388 } 3389 3390 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info 3391 /// struct, used for member pointer types. 3392 void 3393 ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) { 3394 QualType PointeeTy = Ty->getPointeeType(); 3395 3396 // Itanium C++ ABI 2.9.5p7: 3397 // __flags is a flag word describing the cv-qualification and other 3398 // attributes of the type pointed to. 3399 unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy); 3400 3401 const RecordType *ClassType = cast<RecordType>(Ty->getClass()); 3402 if (IsIncompleteClassType(ClassType)) 3403 Flags |= PTI_ContainingClassIncomplete; 3404 3405 llvm::Type *UnsignedIntLTy = 3406 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy); 3407 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags)); 3408 3409 // Itanium C++ ABI 2.9.5p7: 3410 // __pointee is a pointer to the std::type_info derivation for the 3411 // unqualified type being pointed to. 3412 llvm::Constant *PointeeTypeInfo = 3413 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy); 3414 Fields.push_back(PointeeTypeInfo); 3415 3416 // Itanium C++ ABI 2.9.5p9: 3417 // __context is a pointer to an abi::__class_type_info corresponding to the 3418 // class type containing the member pointed to 3419 // (e.g., the "A" in "int A::*"). 3420 Fields.push_back( 3421 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(QualType(ClassType, 0))); 3422 } 3423 3424 llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) { 3425 return ItaniumRTTIBuilder(*this).BuildTypeInfo(Ty); 3426 } 3427 3428 void ItaniumCXXABI::EmitFundamentalRTTIDescriptor(QualType Type, 3429 bool DLLExport) { 3430 QualType PointerType = getContext().getPointerType(Type); 3431 QualType PointerTypeConst = getContext().getPointerType(Type.withConst()); 3432 ItaniumRTTIBuilder(*this).BuildTypeInfo(Type, /*Force=*/true, DLLExport); 3433 ItaniumRTTIBuilder(*this).BuildTypeInfo(PointerType, /*Force=*/true, 3434 DLLExport); 3435 ItaniumRTTIBuilder(*this).BuildTypeInfo(PointerTypeConst, /*Force=*/true, 3436 DLLExport); 3437 } 3438 3439 void ItaniumCXXABI::EmitFundamentalRTTIDescriptors(bool DLLExport) { 3440 // Types added here must also be added to TypeInfoIsInStandardLibrary. 3441 QualType FundamentalTypes[] = { 3442 getContext().VoidTy, getContext().NullPtrTy, 3443 getContext().BoolTy, getContext().WCharTy, 3444 getContext().CharTy, getContext().UnsignedCharTy, 3445 getContext().SignedCharTy, getContext().ShortTy, 3446 getContext().UnsignedShortTy, getContext().IntTy, 3447 getContext().UnsignedIntTy, getContext().LongTy, 3448 getContext().UnsignedLongTy, getContext().LongLongTy, 3449 getContext().UnsignedLongLongTy, getContext().Int128Ty, 3450 getContext().UnsignedInt128Ty, getContext().HalfTy, 3451 getContext().FloatTy, getContext().DoubleTy, 3452 getContext().LongDoubleTy, getContext().Float128Ty, 3453 getContext().Char16Ty, getContext().Char32Ty 3454 }; 3455 for (const QualType &FundamentalType : FundamentalTypes) 3456 EmitFundamentalRTTIDescriptor(FundamentalType, DLLExport); 3457 } 3458 3459 /// What sort of uniqueness rules should we use for the RTTI for the 3460 /// given type? 3461 ItaniumCXXABI::RTTIUniquenessKind ItaniumCXXABI::classifyRTTIUniqueness( 3462 QualType CanTy, llvm::GlobalValue::LinkageTypes Linkage) const { 3463 if (shouldRTTIBeUnique()) 3464 return RUK_Unique; 3465 3466 // It's only necessary for linkonce_odr or weak_odr linkage. 3467 if (Linkage != llvm::GlobalValue::LinkOnceODRLinkage && 3468 Linkage != llvm::GlobalValue::WeakODRLinkage) 3469 return RUK_Unique; 3470 3471 // It's only necessary with default visibility. 3472 if (CanTy->getVisibility() != DefaultVisibility) 3473 return RUK_Unique; 3474 3475 // If we're not required to publish this symbol, hide it. 3476 if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage) 3477 return RUK_NonUniqueHidden; 3478 3479 // If we're required to publish this symbol, as we might be under an 3480 // explicit instantiation, leave it with default visibility but 3481 // enable string-comparisons. 3482 assert(Linkage == llvm::GlobalValue::WeakODRLinkage); 3483 return RUK_NonUniqueVisible; 3484 } 3485 3486 // Find out how to codegen the complete destructor and constructor 3487 namespace { 3488 enum class StructorCodegen { Emit, RAUW, Alias, COMDAT }; 3489 } 3490 static StructorCodegen getCodegenToUse(CodeGenModule &CGM, 3491 const CXXMethodDecl *MD) { 3492 if (!CGM.getCodeGenOpts().CXXCtorDtorAliases) 3493 return StructorCodegen::Emit; 3494 3495 // The complete and base structors are not equivalent if there are any virtual 3496 // bases, so emit separate functions. 3497 if (MD->getParent()->getNumVBases()) 3498 return StructorCodegen::Emit; 3499 3500 GlobalDecl AliasDecl; 3501 if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) { 3502 AliasDecl = GlobalDecl(DD, Dtor_Complete); 3503 } else { 3504 const auto *CD = cast<CXXConstructorDecl>(MD); 3505 AliasDecl = GlobalDecl(CD, Ctor_Complete); 3506 } 3507 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl); 3508 3509 if (llvm::GlobalValue::isDiscardableIfUnused(Linkage)) 3510 return StructorCodegen::RAUW; 3511 3512 // FIXME: Should we allow available_externally aliases? 3513 if (!llvm::GlobalAlias::isValidLinkage(Linkage)) 3514 return StructorCodegen::RAUW; 3515 3516 if (llvm::GlobalValue::isWeakForLinker(Linkage)) { 3517 // Only ELF supports COMDATs with arbitrary names (C5/D5). 3518 if (CGM.getTarget().getTriple().isOSBinFormatELF()) 3519 return StructorCodegen::COMDAT; 3520 return StructorCodegen::Emit; 3521 } 3522 3523 return StructorCodegen::Alias; 3524 } 3525 3526 static void emitConstructorDestructorAlias(CodeGenModule &CGM, 3527 GlobalDecl AliasDecl, 3528 GlobalDecl TargetDecl) { 3529 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl); 3530 3531 StringRef MangledName = CGM.getMangledName(AliasDecl); 3532 llvm::GlobalValue *Entry = CGM.GetGlobalValue(MangledName); 3533 if (Entry && !Entry->isDeclaration()) 3534 return; 3535 3536 auto *Aliasee = cast<llvm::GlobalValue>(CGM.GetAddrOfGlobal(TargetDecl)); 3537 3538 // Create the alias with no name. 3539 auto *Alias = llvm::GlobalAlias::create(Linkage, "", Aliasee); 3540 3541 // Switch any previous uses to the alias. 3542 if (Entry) { 3543 assert(Entry->getType() == Aliasee->getType() && 3544 "declaration exists with different type"); 3545 Alias->takeName(Entry); 3546 Entry->replaceAllUsesWith(Alias); 3547 Entry->eraseFromParent(); 3548 } else { 3549 Alias->setName(MangledName); 3550 } 3551 3552 // Finally, set up the alias with its proper name and attributes. 3553 CGM.setAliasAttributes(cast<NamedDecl>(AliasDecl.getDecl()), Alias); 3554 } 3555 3556 void ItaniumCXXABI::emitCXXStructor(const CXXMethodDecl *MD, 3557 StructorType Type) { 3558 auto *CD = dyn_cast<CXXConstructorDecl>(MD); 3559 const CXXDestructorDecl *DD = CD ? nullptr : cast<CXXDestructorDecl>(MD); 3560 3561 StructorCodegen CGType = getCodegenToUse(CGM, MD); 3562 3563 if (Type == StructorType::Complete) { 3564 GlobalDecl CompleteDecl; 3565 GlobalDecl BaseDecl; 3566 if (CD) { 3567 CompleteDecl = GlobalDecl(CD, Ctor_Complete); 3568 BaseDecl = GlobalDecl(CD, Ctor_Base); 3569 } else { 3570 CompleteDecl = GlobalDecl(DD, Dtor_Complete); 3571 BaseDecl = GlobalDecl(DD, Dtor_Base); 3572 } 3573 3574 if (CGType == StructorCodegen::Alias || CGType == StructorCodegen::COMDAT) { 3575 emitConstructorDestructorAlias(CGM, CompleteDecl, BaseDecl); 3576 return; 3577 } 3578 3579 if (CGType == StructorCodegen::RAUW) { 3580 StringRef MangledName = CGM.getMangledName(CompleteDecl); 3581 auto *Aliasee = CGM.GetAddrOfGlobal(BaseDecl); 3582 CGM.addReplacement(MangledName, Aliasee); 3583 return; 3584 } 3585 } 3586 3587 // The base destructor is equivalent to the base destructor of its 3588 // base class if there is exactly one non-virtual base class with a 3589 // non-trivial destructor, there are no fields with a non-trivial 3590 // destructor, and the body of the destructor is trivial. 3591 if (DD && Type == StructorType::Base && CGType != StructorCodegen::COMDAT && 3592 !CGM.TryEmitBaseDestructorAsAlias(DD)) 3593 return; 3594 3595 llvm::Function *Fn = CGM.codegenCXXStructor(MD, Type); 3596 3597 if (CGType == StructorCodegen::COMDAT) { 3598 SmallString<256> Buffer; 3599 llvm::raw_svector_ostream Out(Buffer); 3600 if (DD) 3601 getMangleContext().mangleCXXDtorComdat(DD, Out); 3602 else 3603 getMangleContext().mangleCXXCtorComdat(CD, Out); 3604 llvm::Comdat *C = CGM.getModule().getOrInsertComdat(Out.str()); 3605 Fn->setComdat(C); 3606 } else { 3607 CGM.maybeSetTrivialComdat(*MD, *Fn); 3608 } 3609 } 3610 3611 static llvm::Constant *getBeginCatchFn(CodeGenModule &CGM) { 3612 // void *__cxa_begin_catch(void*); 3613 llvm::FunctionType *FTy = llvm::FunctionType::get( 3614 CGM.Int8PtrTy, CGM.Int8PtrTy, /*IsVarArgs=*/false); 3615 3616 return CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch"); 3617 } 3618 3619 static llvm::Constant *getEndCatchFn(CodeGenModule &CGM) { 3620 // void __cxa_end_catch(); 3621 llvm::FunctionType *FTy = 3622 llvm::FunctionType::get(CGM.VoidTy, /*IsVarArgs=*/false); 3623 3624 return CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch"); 3625 } 3626 3627 static llvm::Constant *getGetExceptionPtrFn(CodeGenModule &CGM) { 3628 // void *__cxa_get_exception_ptr(void*); 3629 llvm::FunctionType *FTy = llvm::FunctionType::get( 3630 CGM.Int8PtrTy, CGM.Int8PtrTy, /*IsVarArgs=*/false); 3631 3632 return CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr"); 3633 } 3634 3635 namespace { 3636 /// A cleanup to call __cxa_end_catch. In many cases, the caught 3637 /// exception type lets us state definitively that the thrown exception 3638 /// type does not have a destructor. In particular: 3639 /// - Catch-alls tell us nothing, so we have to conservatively 3640 /// assume that the thrown exception might have a destructor. 3641 /// - Catches by reference behave according to their base types. 3642 /// - Catches of non-record types will only trigger for exceptions 3643 /// of non-record types, which never have destructors. 3644 /// - Catches of record types can trigger for arbitrary subclasses 3645 /// of the caught type, so we have to assume the actual thrown 3646 /// exception type might have a throwing destructor, even if the 3647 /// caught type's destructor is trivial or nothrow. 3648 struct CallEndCatch final : EHScopeStack::Cleanup { 3649 CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {} 3650 bool MightThrow; 3651 3652 void Emit(CodeGenFunction &CGF, Flags flags) override { 3653 if (!MightThrow) { 3654 CGF.EmitNounwindRuntimeCall(getEndCatchFn(CGF.CGM)); 3655 return; 3656 } 3657 3658 CGF.EmitRuntimeCallOrInvoke(getEndCatchFn(CGF.CGM)); 3659 } 3660 }; 3661 } 3662 3663 /// Emits a call to __cxa_begin_catch and enters a cleanup to call 3664 /// __cxa_end_catch. 3665 /// 3666 /// \param EndMightThrow - true if __cxa_end_catch might throw 3667 static llvm::Value *CallBeginCatch(CodeGenFunction &CGF, 3668 llvm::Value *Exn, 3669 bool EndMightThrow) { 3670 llvm::CallInst *call = 3671 CGF.EmitNounwindRuntimeCall(getBeginCatchFn(CGF.CGM), Exn); 3672 3673 CGF.EHStack.pushCleanup<CallEndCatch>(NormalAndEHCleanup, EndMightThrow); 3674 3675 return call; 3676 } 3677 3678 /// A "special initializer" callback for initializing a catch 3679 /// parameter during catch initialization. 3680 static void InitCatchParam(CodeGenFunction &CGF, 3681 const VarDecl &CatchParam, 3682 Address ParamAddr, 3683 SourceLocation Loc) { 3684 // Load the exception from where the landing pad saved it. 3685 llvm::Value *Exn = CGF.getExceptionFromSlot(); 3686 3687 CanQualType CatchType = 3688 CGF.CGM.getContext().getCanonicalType(CatchParam.getType()); 3689 llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType); 3690 3691 // If we're catching by reference, we can just cast the object 3692 // pointer to the appropriate pointer. 3693 if (isa<ReferenceType>(CatchType)) { 3694 QualType CaughtType = cast<ReferenceType>(CatchType)->getPointeeType(); 3695 bool EndCatchMightThrow = CaughtType->isRecordType(); 3696 3697 // __cxa_begin_catch returns the adjusted object pointer. 3698 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow); 3699 3700 // We have no way to tell the personality function that we're 3701 // catching by reference, so if we're catching a pointer, 3702 // __cxa_begin_catch will actually return that pointer by value. 3703 if (const PointerType *PT = dyn_cast<PointerType>(CaughtType)) { 3704 QualType PointeeType = PT->getPointeeType(); 3705 3706 // When catching by reference, generally we should just ignore 3707 // this by-value pointer and use the exception object instead. 3708 if (!PointeeType->isRecordType()) { 3709 3710 // Exn points to the struct _Unwind_Exception header, which 3711 // we have to skip past in order to reach the exception data. 3712 unsigned HeaderSize = 3713 CGF.CGM.getTargetCodeGenInfo().getSizeOfUnwindException(); 3714 AdjustedExn = CGF.Builder.CreateConstGEP1_32(Exn, HeaderSize); 3715 3716 // However, if we're catching a pointer-to-record type that won't 3717 // work, because the personality function might have adjusted 3718 // the pointer. There's actually no way for us to fully satisfy 3719 // the language/ABI contract here: we can't use Exn because it 3720 // might have the wrong adjustment, but we can't use the by-value 3721 // pointer because it's off by a level of abstraction. 3722 // 3723 // The current solution is to dump the adjusted pointer into an 3724 // alloca, which breaks language semantics (because changing the 3725 // pointer doesn't change the exception) but at least works. 3726 // The better solution would be to filter out non-exact matches 3727 // and rethrow them, but this is tricky because the rethrow 3728 // really needs to be catchable by other sites at this landing 3729 // pad. The best solution is to fix the personality function. 3730 } else { 3731 // Pull the pointer for the reference type off. 3732 llvm::Type *PtrTy = 3733 cast<llvm::PointerType>(LLVMCatchTy)->getElementType(); 3734 3735 // Create the temporary and write the adjusted pointer into it. 3736 Address ExnPtrTmp = 3737 CGF.CreateTempAlloca(PtrTy, CGF.getPointerAlign(), "exn.byref.tmp"); 3738 llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy); 3739 CGF.Builder.CreateStore(Casted, ExnPtrTmp); 3740 3741 // Bind the reference to the temporary. 3742 AdjustedExn = ExnPtrTmp.getPointer(); 3743 } 3744 } 3745 3746 llvm::Value *ExnCast = 3747 CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref"); 3748 CGF.Builder.CreateStore(ExnCast, ParamAddr); 3749 return; 3750 } 3751 3752 // Scalars and complexes. 3753 TypeEvaluationKind TEK = CGF.getEvaluationKind(CatchType); 3754 if (TEK != TEK_Aggregate) { 3755 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false); 3756 3757 // If the catch type is a pointer type, __cxa_begin_catch returns 3758 // the pointer by value. 3759 if (CatchType->hasPointerRepresentation()) { 3760 llvm::Value *CastExn = 3761 CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted"); 3762 3763 switch (CatchType.getQualifiers().getObjCLifetime()) { 3764 case Qualifiers::OCL_Strong: 3765 CastExn = CGF.EmitARCRetainNonBlock(CastExn); 3766 // fallthrough 3767 3768 case Qualifiers::OCL_None: 3769 case Qualifiers::OCL_ExplicitNone: 3770 case Qualifiers::OCL_Autoreleasing: 3771 CGF.Builder.CreateStore(CastExn, ParamAddr); 3772 return; 3773 3774 case Qualifiers::OCL_Weak: 3775 CGF.EmitARCInitWeak(ParamAddr, CastExn); 3776 return; 3777 } 3778 llvm_unreachable("bad ownership qualifier!"); 3779 } 3780 3781 // Otherwise, it returns a pointer into the exception object. 3782 3783 llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok 3784 llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy); 3785 3786 LValue srcLV = CGF.MakeNaturalAlignAddrLValue(Cast, CatchType); 3787 LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType); 3788 switch (TEK) { 3789 case TEK_Complex: 3790 CGF.EmitStoreOfComplex(CGF.EmitLoadOfComplex(srcLV, Loc), destLV, 3791 /*init*/ true); 3792 return; 3793 case TEK_Scalar: { 3794 llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(srcLV, Loc); 3795 CGF.EmitStoreOfScalar(ExnLoad, destLV, /*init*/ true); 3796 return; 3797 } 3798 case TEK_Aggregate: 3799 llvm_unreachable("evaluation kind filtered out!"); 3800 } 3801 llvm_unreachable("bad evaluation kind"); 3802 } 3803 3804 assert(isa<RecordType>(CatchType) && "unexpected catch type!"); 3805 auto catchRD = CatchType->getAsCXXRecordDecl(); 3806 CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD); 3807 3808 llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok 3809 3810 // Check for a copy expression. If we don't have a copy expression, 3811 // that means a trivial copy is okay. 3812 const Expr *copyExpr = CatchParam.getInit(); 3813 if (!copyExpr) { 3814 llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true); 3815 Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy), 3816 caughtExnAlignment); 3817 CGF.EmitAggregateCopy(ParamAddr, adjustedExn, CatchType); 3818 return; 3819 } 3820 3821 // We have to call __cxa_get_exception_ptr to get the adjusted 3822 // pointer before copying. 3823 llvm::CallInst *rawAdjustedExn = 3824 CGF.EmitNounwindRuntimeCall(getGetExceptionPtrFn(CGF.CGM), Exn); 3825 3826 // Cast that to the appropriate type. 3827 Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy), 3828 caughtExnAlignment); 3829 3830 // The copy expression is defined in terms of an OpaqueValueExpr. 3831 // Find it and map it to the adjusted expression. 3832 CodeGenFunction::OpaqueValueMapping 3833 opaque(CGF, OpaqueValueExpr::findInCopyConstruct(copyExpr), 3834 CGF.MakeAddrLValue(adjustedExn, CatchParam.getType())); 3835 3836 // Call the copy ctor in a terminate scope. 3837 CGF.EHStack.pushTerminate(); 3838 3839 // Perform the copy construction. 3840 CGF.EmitAggExpr(copyExpr, 3841 AggValueSlot::forAddr(ParamAddr, Qualifiers(), 3842 AggValueSlot::IsNotDestructed, 3843 AggValueSlot::DoesNotNeedGCBarriers, 3844 AggValueSlot::IsNotAliased)); 3845 3846 // Leave the terminate scope. 3847 CGF.EHStack.popTerminate(); 3848 3849 // Undo the opaque value mapping. 3850 opaque.pop(); 3851 3852 // Finally we can call __cxa_begin_catch. 3853 CallBeginCatch(CGF, Exn, true); 3854 } 3855 3856 /// Begins a catch statement by initializing the catch variable and 3857 /// calling __cxa_begin_catch. 3858 void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF, 3859 const CXXCatchStmt *S) { 3860 // We have to be very careful with the ordering of cleanups here: 3861 // C++ [except.throw]p4: 3862 // The destruction [of the exception temporary] occurs 3863 // immediately after the destruction of the object declared in 3864 // the exception-declaration in the handler. 3865 // 3866 // So the precise ordering is: 3867 // 1. Construct catch variable. 3868 // 2. __cxa_begin_catch 3869 // 3. Enter __cxa_end_catch cleanup 3870 // 4. Enter dtor cleanup 3871 // 3872 // We do this by using a slightly abnormal initialization process. 3873 // Delegation sequence: 3874 // - ExitCXXTryStmt opens a RunCleanupsScope 3875 // - EmitAutoVarAlloca creates the variable and debug info 3876 // - InitCatchParam initializes the variable from the exception 3877 // - CallBeginCatch calls __cxa_begin_catch 3878 // - CallBeginCatch enters the __cxa_end_catch cleanup 3879 // - EmitAutoVarCleanups enters the variable destructor cleanup 3880 // - EmitCXXTryStmt emits the code for the catch body 3881 // - EmitCXXTryStmt close the RunCleanupsScope 3882 3883 VarDecl *CatchParam = S->getExceptionDecl(); 3884 if (!CatchParam) { 3885 llvm::Value *Exn = CGF.getExceptionFromSlot(); 3886 CallBeginCatch(CGF, Exn, true); 3887 return; 3888 } 3889 3890 // Emit the local. 3891 CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam); 3892 InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getLocStart()); 3893 CGF.EmitAutoVarCleanups(var); 3894 } 3895 3896 /// Get or define the following function: 3897 /// void @__clang_call_terminate(i8* %exn) nounwind noreturn 3898 /// This code is used only in C++. 3899 static llvm::Constant *getClangCallTerminateFn(CodeGenModule &CGM) { 3900 llvm::FunctionType *fnTy = 3901 llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*IsVarArgs=*/false); 3902 llvm::Constant *fnRef = 3903 CGM.CreateRuntimeFunction(fnTy, "__clang_call_terminate", 3904 llvm::AttributeSet(), /*Local=*/true); 3905 3906 llvm::Function *fn = dyn_cast<llvm::Function>(fnRef); 3907 if (fn && fn->empty()) { 3908 fn->setDoesNotThrow(); 3909 fn->setDoesNotReturn(); 3910 3911 // What we really want is to massively penalize inlining without 3912 // forbidding it completely. The difference between that and 3913 // 'noinline' is negligible. 3914 fn->addFnAttr(llvm::Attribute::NoInline); 3915 3916 // Allow this function to be shared across translation units, but 3917 // we don't want it to turn into an exported symbol. 3918 fn->setLinkage(llvm::Function::LinkOnceODRLinkage); 3919 fn->setVisibility(llvm::Function::HiddenVisibility); 3920 if (CGM.supportsCOMDAT()) 3921 fn->setComdat(CGM.getModule().getOrInsertComdat(fn->getName())); 3922 3923 // Set up the function. 3924 llvm::BasicBlock *entry = 3925 llvm::BasicBlock::Create(CGM.getLLVMContext(), "", fn); 3926 CGBuilderTy builder(CGM, entry); 3927 3928 // Pull the exception pointer out of the parameter list. 3929 llvm::Value *exn = &*fn->arg_begin(); 3930 3931 // Call __cxa_begin_catch(exn). 3932 llvm::CallInst *catchCall = builder.CreateCall(getBeginCatchFn(CGM), exn); 3933 catchCall->setDoesNotThrow(); 3934 catchCall->setCallingConv(CGM.getRuntimeCC()); 3935 3936 // Call std::terminate(). 3937 llvm::CallInst *termCall = builder.CreateCall(CGM.getTerminateFn()); 3938 termCall->setDoesNotThrow(); 3939 termCall->setDoesNotReturn(); 3940 termCall->setCallingConv(CGM.getRuntimeCC()); 3941 3942 // std::terminate cannot return. 3943 builder.CreateUnreachable(); 3944 } 3945 3946 return fnRef; 3947 } 3948 3949 llvm::CallInst * 3950 ItaniumCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF, 3951 llvm::Value *Exn) { 3952 // In C++, we want to call __cxa_begin_catch() before terminating. 3953 if (Exn) { 3954 assert(CGF.CGM.getLangOpts().CPlusPlus); 3955 return CGF.EmitNounwindRuntimeCall(getClangCallTerminateFn(CGF.CGM), Exn); 3956 } 3957 return CGF.EmitNounwindRuntimeCall(CGF.CGM.getTerminateFn()); 3958 } 3959