1 //===--- CGDeclCXX.cpp - Emit LLVM Code for C++ declarations --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This contains code dealing with code generation of C++ declarations 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "CGCXXABI.h" 14 #include "CGObjCRuntime.h" 15 #include "CGOpenMPRuntime.h" 16 #include "CodeGenFunction.h" 17 #include "TargetInfo.h" 18 #include "clang/AST/Attr.h" 19 #include "clang/Basic/LangOptions.h" 20 #include "llvm/ADT/StringExtras.h" 21 #include "llvm/IR/Intrinsics.h" 22 #include "llvm/IR/MDBuilder.h" 23 #include "llvm/Support/Path.h" 24 #include "llvm/Transforms/Utils/ModuleUtils.h" 25 26 using namespace clang; 27 using namespace CodeGen; 28 29 static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D, 30 ConstantAddress DeclPtr) { 31 assert( 32 (D.hasGlobalStorage() || 33 (D.hasLocalStorage() && CGF.getContext().getLangOpts().OpenCLCPlusPlus)) && 34 "VarDecl must have global or local (in the case of OpenCL) storage!"); 35 assert(!D.getType()->isReferenceType() && 36 "Should not call EmitDeclInit on a reference!"); 37 38 QualType type = D.getType(); 39 LValue lv = CGF.MakeAddrLValue(DeclPtr, type); 40 41 const Expr *Init = D.getInit(); 42 switch (CGF.getEvaluationKind(type)) { 43 case TEK_Scalar: { 44 CodeGenModule &CGM = CGF.CGM; 45 if (lv.isObjCStrong()) 46 CGM.getObjCRuntime().EmitObjCGlobalAssign(CGF, CGF.EmitScalarExpr(Init), 47 DeclPtr, D.getTLSKind()); 48 else if (lv.isObjCWeak()) 49 CGM.getObjCRuntime().EmitObjCWeakAssign(CGF, CGF.EmitScalarExpr(Init), 50 DeclPtr); 51 else 52 CGF.EmitScalarInit(Init, &D, lv, false); 53 return; 54 } 55 case TEK_Complex: 56 CGF.EmitComplexExprIntoLValue(Init, lv, /*isInit*/ true); 57 return; 58 case TEK_Aggregate: 59 CGF.EmitAggExpr(Init, 60 AggValueSlot::forLValue(lv, CGF, AggValueSlot::IsDestructed, 61 AggValueSlot::DoesNotNeedGCBarriers, 62 AggValueSlot::IsNotAliased, 63 AggValueSlot::DoesNotOverlap)); 64 return; 65 } 66 llvm_unreachable("bad evaluation kind"); 67 } 68 69 /// Emit code to cause the destruction of the given variable with 70 /// static storage duration. 71 static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D, 72 ConstantAddress Addr) { 73 // Honor __attribute__((no_destroy)) and bail instead of attempting 74 // to emit a reference to a possibly nonexistent destructor, which 75 // in turn can cause a crash. This will result in a global constructor 76 // that isn't balanced out by a destructor call as intended by the 77 // attribute. This also checks for -fno-c++-static-destructors and 78 // bails even if the attribute is not present. 79 QualType::DestructionKind DtorKind = D.needsDestruction(CGF.getContext()); 80 81 // FIXME: __attribute__((cleanup)) ? 82 83 switch (DtorKind) { 84 case QualType::DK_none: 85 return; 86 87 case QualType::DK_cxx_destructor: 88 break; 89 90 case QualType::DK_objc_strong_lifetime: 91 case QualType::DK_objc_weak_lifetime: 92 case QualType::DK_nontrivial_c_struct: 93 // We don't care about releasing objects during process teardown. 94 assert(!D.getTLSKind() && "should have rejected this"); 95 return; 96 } 97 98 llvm::FunctionCallee Func; 99 llvm::Constant *Argument; 100 101 CodeGenModule &CGM = CGF.CGM; 102 QualType Type = D.getType(); 103 104 // Special-case non-array C++ destructors, if they have the right signature. 105 // Under some ABIs, destructors return this instead of void, and cannot be 106 // passed directly to __cxa_atexit if the target does not allow this 107 // mismatch. 108 const CXXRecordDecl *Record = Type->getAsCXXRecordDecl(); 109 bool CanRegisterDestructor = 110 Record && (!CGM.getCXXABI().HasThisReturn( 111 GlobalDecl(Record->getDestructor(), Dtor_Complete)) || 112 CGM.getCXXABI().canCallMismatchedFunctionType()); 113 // If __cxa_atexit is disabled via a flag, a different helper function is 114 // generated elsewhere which uses atexit instead, and it takes the destructor 115 // directly. 116 bool UsingExternalHelper = !CGM.getCodeGenOpts().CXAAtExit; 117 if (Record && (CanRegisterDestructor || UsingExternalHelper)) { 118 assert(!Record->hasTrivialDestructor()); 119 CXXDestructorDecl *Dtor = Record->getDestructor(); 120 121 Func = CGM.getAddrAndTypeOfCXXStructor(GlobalDecl(Dtor, Dtor_Complete)); 122 if (CGF.getContext().getLangOpts().OpenCL) { 123 auto DestAS = 124 CGM.getTargetCodeGenInfo().getAddrSpaceOfCxaAtexitPtrParam(); 125 auto DestTy = CGF.getTypes().ConvertType(Type)->getPointerTo( 126 CGM.getContext().getTargetAddressSpace(DestAS)); 127 auto SrcAS = D.getType().getQualifiers().getAddressSpace(); 128 if (DestAS == SrcAS) 129 Argument = llvm::ConstantExpr::getBitCast(Addr.getPointer(), DestTy); 130 else 131 // FIXME: On addr space mismatch we are passing NULL. The generation 132 // of the global destructor function should be adjusted accordingly. 133 Argument = llvm::ConstantPointerNull::get(DestTy); 134 } else { 135 Argument = llvm::ConstantExpr::getBitCast( 136 Addr.getPointer(), CGF.getTypes().ConvertType(Type)->getPointerTo()); 137 } 138 // Otherwise, the standard logic requires a helper function. 139 } else { 140 Func = CodeGenFunction(CGM) 141 .generateDestroyHelper(Addr, Type, CGF.getDestroyer(DtorKind), 142 CGF.needsEHCleanup(DtorKind), &D); 143 Argument = llvm::Constant::getNullValue(CGF.Int8PtrTy); 144 } 145 146 CGM.getCXXABI().registerGlobalDtor(CGF, D, Func, Argument); 147 } 148 149 /// Emit code to cause the variable at the given address to be considered as 150 /// constant from this point onwards. 151 static void EmitDeclInvariant(CodeGenFunction &CGF, const VarDecl &D, 152 llvm::Constant *Addr) { 153 return CGF.EmitInvariantStart( 154 Addr, CGF.getContext().getTypeSizeInChars(D.getType())); 155 } 156 157 void CodeGenFunction::EmitInvariantStart(llvm::Constant *Addr, CharUnits Size) { 158 // Do not emit the intrinsic if we're not optimizing. 159 if (!CGM.getCodeGenOpts().OptimizationLevel) 160 return; 161 162 // Grab the llvm.invariant.start intrinsic. 163 llvm::Intrinsic::ID InvStartID = llvm::Intrinsic::invariant_start; 164 // Overloaded address space type. 165 llvm::Type *ObjectPtr[1] = {Int8PtrTy}; 166 llvm::Function *InvariantStart = CGM.getIntrinsic(InvStartID, ObjectPtr); 167 168 // Emit a call with the size in bytes of the object. 169 uint64_t Width = Size.getQuantity(); 170 llvm::Value *Args[2] = { llvm::ConstantInt::getSigned(Int64Ty, Width), 171 llvm::ConstantExpr::getBitCast(Addr, Int8PtrTy)}; 172 Builder.CreateCall(InvariantStart, Args); 173 } 174 175 void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D, 176 llvm::Constant *DeclPtr, 177 bool PerformInit) { 178 179 const Expr *Init = D.getInit(); 180 QualType T = D.getType(); 181 182 // The address space of a static local variable (DeclPtr) may be different 183 // from the address space of the "this" argument of the constructor. In that 184 // case, we need an addrspacecast before calling the constructor. 185 // 186 // struct StructWithCtor { 187 // __device__ StructWithCtor() {...} 188 // }; 189 // __device__ void foo() { 190 // __shared__ StructWithCtor s; 191 // ... 192 // } 193 // 194 // For example, in the above CUDA code, the static local variable s has a 195 // "shared" address space qualifier, but the constructor of StructWithCtor 196 // expects "this" in the "generic" address space. 197 unsigned ExpectedAddrSpace = getContext().getTargetAddressSpace(T); 198 unsigned ActualAddrSpace = DeclPtr->getType()->getPointerAddressSpace(); 199 if (ActualAddrSpace != ExpectedAddrSpace) { 200 llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(T); 201 llvm::PointerType *PTy = llvm::PointerType::get(LTy, ExpectedAddrSpace); 202 DeclPtr = llvm::ConstantExpr::getAddrSpaceCast(DeclPtr, PTy); 203 } 204 205 ConstantAddress DeclAddr(DeclPtr, getContext().getDeclAlign(&D)); 206 207 if (!T->isReferenceType()) { 208 if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd && 209 D.hasAttr<OMPThreadPrivateDeclAttr>()) { 210 (void)CGM.getOpenMPRuntime().emitThreadPrivateVarDefinition( 211 &D, DeclAddr, D.getAttr<OMPThreadPrivateDeclAttr>()->getLocation(), 212 PerformInit, this); 213 } 214 if (PerformInit) 215 EmitDeclInit(*this, D, DeclAddr); 216 if (CGM.isTypeConstant(D.getType(), true)) 217 EmitDeclInvariant(*this, D, DeclPtr); 218 else 219 EmitDeclDestroy(*this, D, DeclAddr); 220 return; 221 } 222 223 assert(PerformInit && "cannot have constant initializer which needs " 224 "destruction for reference"); 225 RValue RV = EmitReferenceBindingToExpr(Init); 226 EmitStoreOfScalar(RV.getScalarVal(), DeclAddr, false, T); 227 } 228 229 /// Create a stub function, suitable for being passed to atexit, 230 /// which passes the given address to the given destructor function. 231 llvm::Function *CodeGenFunction::createAtExitStub(const VarDecl &VD, 232 llvm::FunctionCallee dtor, 233 llvm::Constant *addr) { 234 // Get the destructor function type, void(*)(void). 235 llvm::FunctionType *ty = llvm::FunctionType::get(CGM.VoidTy, false); 236 SmallString<256> FnName; 237 { 238 llvm::raw_svector_ostream Out(FnName); 239 CGM.getCXXABI().getMangleContext().mangleDynamicAtExitDestructor(&VD, Out); 240 } 241 242 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction(); 243 llvm::Function *fn = CGM.CreateGlobalInitOrCleanUpFunction( 244 ty, FnName.str(), FI, VD.getLocation()); 245 246 CodeGenFunction CGF(CGM); 247 248 CGF.StartFunction(GlobalDecl(&VD, DynamicInitKind::AtExit), 249 CGM.getContext().VoidTy, fn, FI, FunctionArgList(), 250 VD.getLocation(), VD.getInit()->getExprLoc()); 251 252 llvm::CallInst *call = CGF.Builder.CreateCall(dtor, addr); 253 254 // Make sure the call and the callee agree on calling convention. 255 if (auto *dtorFn = dyn_cast<llvm::Function>( 256 dtor.getCallee()->stripPointerCastsAndAliases())) 257 call->setCallingConv(dtorFn->getCallingConv()); 258 259 CGF.FinishFunction(); 260 261 return fn; 262 } 263 264 /// Register a global destructor using the C atexit runtime function. 265 void CodeGenFunction::registerGlobalDtorWithAtExit(const VarDecl &VD, 266 llvm::FunctionCallee dtor, 267 llvm::Constant *addr) { 268 // Create a function which calls the destructor. 269 llvm::Constant *dtorStub = createAtExitStub(VD, dtor, addr); 270 registerGlobalDtorWithAtExit(dtorStub); 271 } 272 273 void CodeGenFunction::registerGlobalDtorWithAtExit(llvm::Constant *dtorStub) { 274 // extern "C" int atexit(void (*f)(void)); 275 assert(cast<llvm::Function>(dtorStub)->getFunctionType() == 276 llvm::FunctionType::get(CGM.VoidTy, false) && 277 "Argument to atexit has a wrong type."); 278 279 llvm::FunctionType *atexitTy = 280 llvm::FunctionType::get(IntTy, dtorStub->getType(), false); 281 282 llvm::FunctionCallee atexit = 283 CGM.CreateRuntimeFunction(atexitTy, "atexit", llvm::AttributeList(), 284 /*Local=*/true); 285 if (llvm::Function *atexitFn = dyn_cast<llvm::Function>(atexit.getCallee())) 286 atexitFn->setDoesNotThrow(); 287 288 EmitNounwindRuntimeCall(atexit, dtorStub); 289 } 290 291 llvm::Value * 292 CodeGenFunction::unregisterGlobalDtorWithUnAtExit(llvm::Function *dtorStub) { 293 // The unatexit subroutine unregisters __dtor functions that were previously 294 // registered by the atexit subroutine. If the referenced function is found, 295 // it is removed from the list of functions that are called at normal program 296 // termination and the unatexit returns a value of 0, otherwise a non-zero 297 // value is returned. 298 // 299 // extern "C" int unatexit(void (*f)(void)); 300 assert(dtorStub->getFunctionType() == 301 llvm::FunctionType::get(CGM.VoidTy, false) && 302 "Argument to unatexit has a wrong type."); 303 304 llvm::FunctionType *unatexitTy = 305 llvm::FunctionType::get(IntTy, {dtorStub->getType()}, /*isVarArg=*/false); 306 307 llvm::FunctionCallee unatexit = 308 CGM.CreateRuntimeFunction(unatexitTy, "unatexit", llvm::AttributeList()); 309 310 cast<llvm::Function>(unatexit.getCallee())->setDoesNotThrow(); 311 312 return EmitNounwindRuntimeCall(unatexit, dtorStub); 313 } 314 315 void CodeGenFunction::EmitCXXGuardedInit(const VarDecl &D, 316 llvm::GlobalVariable *DeclPtr, 317 bool PerformInit) { 318 // If we've been asked to forbid guard variables, emit an error now. 319 // This diagnostic is hard-coded for Darwin's use case; we can find 320 // better phrasing if someone else needs it. 321 if (CGM.getCodeGenOpts().ForbidGuardVariables) 322 CGM.Error(D.getLocation(), 323 "this initialization requires a guard variable, which " 324 "the kernel does not support"); 325 326 CGM.getCXXABI().EmitGuardedInit(*this, D, DeclPtr, PerformInit); 327 } 328 329 void CodeGenFunction::EmitCXXGuardedInitBranch(llvm::Value *NeedsInit, 330 llvm::BasicBlock *InitBlock, 331 llvm::BasicBlock *NoInitBlock, 332 GuardKind Kind, 333 const VarDecl *D) { 334 assert((Kind == GuardKind::TlsGuard || D) && "no guarded variable"); 335 336 // A guess at how many times we will enter the initialization of a 337 // variable, depending on the kind of variable. 338 static const uint64_t InitsPerTLSVar = 1024; 339 static const uint64_t InitsPerLocalVar = 1024 * 1024; 340 341 llvm::MDNode *Weights; 342 if (Kind == GuardKind::VariableGuard && !D->isLocalVarDecl()) { 343 // For non-local variables, don't apply any weighting for now. Due to our 344 // use of COMDATs, we expect there to be at most one initialization of the 345 // variable per DSO, but we have no way to know how many DSOs will try to 346 // initialize the variable. 347 Weights = nullptr; 348 } else { 349 uint64_t NumInits; 350 // FIXME: For the TLS case, collect and use profiling information to 351 // determine a more accurate brach weight. 352 if (Kind == GuardKind::TlsGuard || D->getTLSKind()) 353 NumInits = InitsPerTLSVar; 354 else 355 NumInits = InitsPerLocalVar; 356 357 // The probability of us entering the initializer is 358 // 1 / (total number of times we attempt to initialize the variable). 359 llvm::MDBuilder MDHelper(CGM.getLLVMContext()); 360 Weights = MDHelper.createBranchWeights(1, NumInits - 1); 361 } 362 363 Builder.CreateCondBr(NeedsInit, InitBlock, NoInitBlock, Weights); 364 } 365 366 llvm::Function *CodeGenModule::CreateGlobalInitOrCleanUpFunction( 367 llvm::FunctionType *FTy, const Twine &Name, const CGFunctionInfo &FI, 368 SourceLocation Loc, bool TLS, bool IsExternalLinkage) { 369 llvm::Function *Fn = llvm::Function::Create( 370 FTy, 371 IsExternalLinkage ? llvm::GlobalValue::ExternalLinkage 372 : llvm::GlobalValue::InternalLinkage, 373 Name, &getModule()); 374 375 if (!getLangOpts().AppleKext && !TLS) { 376 // Set the section if needed. 377 if (const char *Section = getTarget().getStaticInitSectionSpecifier()) 378 Fn->setSection(Section); 379 } 380 381 if (Fn->hasInternalLinkage()) 382 SetInternalFunctionAttributes(GlobalDecl(), Fn, FI); 383 384 Fn->setCallingConv(getRuntimeCC()); 385 386 if (!getLangOpts().Exceptions) 387 Fn->setDoesNotThrow(); 388 389 if (getLangOpts().Sanitize.has(SanitizerKind::Address) && 390 !isInSanitizerBlacklist(SanitizerKind::Address, Fn, Loc)) 391 Fn->addFnAttr(llvm::Attribute::SanitizeAddress); 392 393 if (getLangOpts().Sanitize.has(SanitizerKind::KernelAddress) && 394 !isInSanitizerBlacklist(SanitizerKind::KernelAddress, Fn, Loc)) 395 Fn->addFnAttr(llvm::Attribute::SanitizeAddress); 396 397 if (getLangOpts().Sanitize.has(SanitizerKind::HWAddress) && 398 !isInSanitizerBlacklist(SanitizerKind::HWAddress, Fn, Loc)) 399 Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress); 400 401 if (getLangOpts().Sanitize.has(SanitizerKind::KernelHWAddress) && 402 !isInSanitizerBlacklist(SanitizerKind::KernelHWAddress, Fn, Loc)) 403 Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress); 404 405 if (getLangOpts().Sanitize.has(SanitizerKind::MemTag) && 406 !isInSanitizerBlacklist(SanitizerKind::MemTag, Fn, Loc)) 407 Fn->addFnAttr(llvm::Attribute::SanitizeMemTag); 408 409 if (getLangOpts().Sanitize.has(SanitizerKind::Thread) && 410 !isInSanitizerBlacklist(SanitizerKind::Thread, Fn, Loc)) 411 Fn->addFnAttr(llvm::Attribute::SanitizeThread); 412 413 if (getLangOpts().Sanitize.has(SanitizerKind::Memory) && 414 !isInSanitizerBlacklist(SanitizerKind::Memory, Fn, Loc)) 415 Fn->addFnAttr(llvm::Attribute::SanitizeMemory); 416 417 if (getLangOpts().Sanitize.has(SanitizerKind::KernelMemory) && 418 !isInSanitizerBlacklist(SanitizerKind::KernelMemory, Fn, Loc)) 419 Fn->addFnAttr(llvm::Attribute::SanitizeMemory); 420 421 if (getLangOpts().Sanitize.has(SanitizerKind::SafeStack) && 422 !isInSanitizerBlacklist(SanitizerKind::SafeStack, Fn, Loc)) 423 Fn->addFnAttr(llvm::Attribute::SafeStack); 424 425 if (getLangOpts().Sanitize.has(SanitizerKind::ShadowCallStack) && 426 !isInSanitizerBlacklist(SanitizerKind::ShadowCallStack, Fn, Loc)) 427 Fn->addFnAttr(llvm::Attribute::ShadowCallStack); 428 429 auto RASignKind = getLangOpts().getSignReturnAddressScope(); 430 if (RASignKind != LangOptions::SignReturnAddressScopeKind::None) { 431 Fn->addFnAttr("sign-return-address", 432 RASignKind == LangOptions::SignReturnAddressScopeKind::All 433 ? "all" 434 : "non-leaf"); 435 auto RASignKey = getLangOpts().getSignReturnAddressKey(); 436 Fn->addFnAttr("sign-return-address-key", 437 RASignKey == LangOptions::SignReturnAddressKeyKind::AKey 438 ? "a_key" 439 : "b_key"); 440 } 441 442 if (getLangOpts().BranchTargetEnforcement) 443 Fn->addFnAttr("branch-target-enforcement"); 444 445 return Fn; 446 } 447 448 /// Create a global pointer to a function that will initialize a global 449 /// variable. The user has requested that this pointer be emitted in a specific 450 /// section. 451 void CodeGenModule::EmitPointerToInitFunc(const VarDecl *D, 452 llvm::GlobalVariable *GV, 453 llvm::Function *InitFunc, 454 InitSegAttr *ISA) { 455 llvm::GlobalVariable *PtrArray = new llvm::GlobalVariable( 456 TheModule, InitFunc->getType(), /*isConstant=*/true, 457 llvm::GlobalValue::PrivateLinkage, InitFunc, "__cxx_init_fn_ptr"); 458 PtrArray->setSection(ISA->getSection()); 459 addUsedGlobal(PtrArray); 460 461 // If the GV is already in a comdat group, then we have to join it. 462 if (llvm::Comdat *C = GV->getComdat()) 463 PtrArray->setComdat(C); 464 } 465 466 void 467 CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D, 468 llvm::GlobalVariable *Addr, 469 bool PerformInit) { 470 471 // According to E.2.3.1 in CUDA-7.5 Programming guide: __device__, 472 // __constant__ and __shared__ variables defined in namespace scope, 473 // that are of class type, cannot have a non-empty constructor. All 474 // the checks have been done in Sema by now. Whatever initializers 475 // are allowed are empty and we just need to ignore them here. 476 if (getLangOpts().CUDAIsDevice && !getLangOpts().GPUAllowDeviceInit && 477 (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() || 478 D->hasAttr<CUDASharedAttr>())) 479 return; 480 481 if (getLangOpts().OpenMP && 482 getOpenMPRuntime().emitDeclareTargetVarDefinition(D, Addr, PerformInit)) 483 return; 484 485 // Check if we've already initialized this decl. 486 auto I = DelayedCXXInitPosition.find(D); 487 if (I != DelayedCXXInitPosition.end() && I->second == ~0U) 488 return; 489 490 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false); 491 SmallString<256> FnName; 492 { 493 llvm::raw_svector_ostream Out(FnName); 494 getCXXABI().getMangleContext().mangleDynamicInitializer(D, Out); 495 } 496 497 // Create a variable initialization function. 498 llvm::Function *Fn = CreateGlobalInitOrCleanUpFunction( 499 FTy, FnName.str(), getTypes().arrangeNullaryFunction(), D->getLocation()); 500 501 auto *ISA = D->getAttr<InitSegAttr>(); 502 CodeGenFunction(*this).GenerateCXXGlobalVarDeclInitFunc(Fn, D, Addr, 503 PerformInit); 504 505 llvm::GlobalVariable *COMDATKey = 506 supportsCOMDAT() && D->isExternallyVisible() ? Addr : nullptr; 507 508 if (D->getTLSKind()) { 509 // FIXME: Should we support init_priority for thread_local? 510 // FIXME: We only need to register one __cxa_thread_atexit function for the 511 // entire TU. 512 CXXThreadLocalInits.push_back(Fn); 513 CXXThreadLocalInitVars.push_back(D); 514 } else if (PerformInit && ISA) { 515 EmitPointerToInitFunc(D, Addr, Fn, ISA); 516 } else if (auto *IPA = D->getAttr<InitPriorityAttr>()) { 517 OrderGlobalInits Key(IPA->getPriority(), PrioritizedCXXGlobalInits.size()); 518 PrioritizedCXXGlobalInits.push_back(std::make_pair(Key, Fn)); 519 } else if (isTemplateInstantiation(D->getTemplateSpecializationKind()) || 520 getContext().GetGVALinkageForVariable(D) == GVA_DiscardableODR) { 521 // C++ [basic.start.init]p2: 522 // Definitions of explicitly specialized class template static data 523 // members have ordered initialization. Other class template static data 524 // members (i.e., implicitly or explicitly instantiated specializations) 525 // have unordered initialization. 526 // 527 // As a consequence, we can put them into their own llvm.global_ctors entry. 528 // 529 // If the global is externally visible, put the initializer into a COMDAT 530 // group with the global being initialized. On most platforms, this is a 531 // minor startup time optimization. In the MS C++ ABI, there are no guard 532 // variables, so this COMDAT key is required for correctness. 533 AddGlobalCtor(Fn, 65535, COMDATKey); 534 if (getTarget().getCXXABI().isMicrosoft() && COMDATKey) { 535 // In The MS C++, MS add template static data member in the linker 536 // drective. 537 addUsedGlobal(COMDATKey); 538 } 539 } else if (D->hasAttr<SelectAnyAttr>()) { 540 // SelectAny globals will be comdat-folded. Put the initializer into a 541 // COMDAT group associated with the global, so the initializers get folded 542 // too. 543 AddGlobalCtor(Fn, 65535, COMDATKey); 544 } else { 545 I = DelayedCXXInitPosition.find(D); // Re-do lookup in case of re-hash. 546 if (I == DelayedCXXInitPosition.end()) { 547 CXXGlobalInits.push_back(Fn); 548 } else if (I->second != ~0U) { 549 assert(I->second < CXXGlobalInits.size() && 550 CXXGlobalInits[I->second] == nullptr); 551 CXXGlobalInits[I->second] = Fn; 552 } 553 } 554 555 // Remember that we already emitted the initializer for this global. 556 DelayedCXXInitPosition[D] = ~0U; 557 } 558 559 void CodeGenModule::EmitCXXThreadLocalInitFunc() { 560 getCXXABI().EmitThreadLocalInitFuncs( 561 *this, CXXThreadLocals, CXXThreadLocalInits, CXXThreadLocalInitVars); 562 563 CXXThreadLocalInits.clear(); 564 CXXThreadLocalInitVars.clear(); 565 CXXThreadLocals.clear(); 566 } 567 568 static SmallString<128> getTransformedFileName(llvm::Module &M) { 569 SmallString<128> FileName = llvm::sys::path::filename(M.getName()); 570 571 if (FileName.empty()) 572 FileName = "<null>"; 573 574 for (size_t i = 0; i < FileName.size(); ++i) { 575 // Replace everything that's not [a-zA-Z0-9._] with a _. This set happens 576 // to be the set of C preprocessing numbers. 577 if (!isPreprocessingNumberBody(FileName[i])) 578 FileName[i] = '_'; 579 } 580 581 return FileName; 582 } 583 584 void 585 CodeGenModule::EmitCXXGlobalInitFunc() { 586 while (!CXXGlobalInits.empty() && !CXXGlobalInits.back()) 587 CXXGlobalInits.pop_back(); 588 589 if (CXXGlobalInits.empty() && PrioritizedCXXGlobalInits.empty()) 590 return; 591 592 const bool UseSinitAndSterm = getCXXABI().useSinitAndSterm(); 593 if (UseSinitAndSterm) { 594 GlobalUniqueModuleId = getUniqueModuleId(&getModule()); 595 596 // FIXME: We need to figure out what to hash on or encode into the unique ID 597 // we need. 598 if (GlobalUniqueModuleId.compare("") == 0) 599 llvm::report_fatal_error( 600 "cannot produce a unique identifier for this module" 601 " based on strong external symbols"); 602 GlobalUniqueModuleId = GlobalUniqueModuleId.substr(1); 603 } 604 605 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false); 606 const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction(); 607 608 // Create our global prioritized initialization function. 609 if (!PrioritizedCXXGlobalInits.empty()) { 610 assert(!UseSinitAndSterm && "Prioritized sinit and sterm functions are not" 611 " supported yet."); 612 613 SmallVector<llvm::Function *, 8> LocalCXXGlobalInits; 614 llvm::array_pod_sort(PrioritizedCXXGlobalInits.begin(), 615 PrioritizedCXXGlobalInits.end()); 616 // Iterate over "chunks" of ctors with same priority and emit each chunk 617 // into separate function. Note - everything is sorted first by priority, 618 // second - by lex order, so we emit ctor functions in proper order. 619 for (SmallVectorImpl<GlobalInitData >::iterator 620 I = PrioritizedCXXGlobalInits.begin(), 621 E = PrioritizedCXXGlobalInits.end(); I != E; ) { 622 SmallVectorImpl<GlobalInitData >::iterator 623 PrioE = std::upper_bound(I + 1, E, *I, GlobalInitPriorityCmp()); 624 625 LocalCXXGlobalInits.clear(); 626 unsigned Priority = I->first.priority; 627 // Compute the function suffix from priority. Prepend with zeroes to make 628 // sure the function names are also ordered as priorities. 629 std::string PrioritySuffix = llvm::utostr(Priority); 630 // Priority is always <= 65535 (enforced by sema). 631 PrioritySuffix = std::string(6-PrioritySuffix.size(), '0')+PrioritySuffix; 632 llvm::Function *Fn = CreateGlobalInitOrCleanUpFunction( 633 FTy, "_GLOBAL__I_" + PrioritySuffix, FI); 634 635 for (; I < PrioE; ++I) 636 LocalCXXGlobalInits.push_back(I->second); 637 638 CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, LocalCXXGlobalInits); 639 AddGlobalCtor(Fn, Priority); 640 } 641 PrioritizedCXXGlobalInits.clear(); 642 } 643 644 if (UseSinitAndSterm && CXXGlobalInits.empty()) 645 return; 646 647 // Create our global initialization function. 648 SmallString<128> FuncName; 649 bool IsExternalLinkage = false; 650 if (UseSinitAndSterm) { 651 llvm::Twine("__sinit80000000_clang_", GlobalUniqueModuleId) 652 .toVector(FuncName); 653 IsExternalLinkage = true; 654 } else { 655 // Include the filename in the symbol name. Including "sub_" matches gcc 656 // and makes sure these symbols appear lexicographically behind the symbols 657 // with priority emitted above. 658 llvm::Twine("_GLOBAL__sub_I_", getTransformedFileName(getModule())) 659 .toVector(FuncName); 660 } 661 662 llvm::Function *Fn = CreateGlobalInitOrCleanUpFunction( 663 FTy, FuncName, FI, SourceLocation(), false /* TLS */, 664 IsExternalLinkage); 665 666 CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, CXXGlobalInits); 667 AddGlobalCtor(Fn); 668 669 // In OpenCL global init functions must be converted to kernels in order to 670 // be able to launch them from the host. 671 // FIXME: Some more work might be needed to handle destructors correctly. 672 // Current initialization function makes use of function pointers callbacks. 673 // We can't support function pointers especially between host and device. 674 // However it seems global destruction has little meaning without any 675 // dynamic resource allocation on the device and program scope variables are 676 // destroyed by the runtime when program is released. 677 if (getLangOpts().OpenCL) { 678 GenOpenCLArgMetadata(Fn); 679 Fn->setCallingConv(llvm::CallingConv::SPIR_KERNEL); 680 } 681 682 if (getLangOpts().HIP) { 683 Fn->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL); 684 Fn->addFnAttr("device-init"); 685 } 686 687 CXXGlobalInits.clear(); 688 } 689 690 void CodeGenModule::EmitCXXGlobalCleanUpFunc() { 691 if (CXXGlobalDtorsOrStermFinalizers.empty()) 692 return; 693 694 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false); 695 const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction(); 696 697 // Create our global cleanup function. 698 llvm::Function *Fn = nullptr; 699 if (getCXXABI().useSinitAndSterm()) { 700 if (GlobalUniqueModuleId.empty()) { 701 GlobalUniqueModuleId = getUniqueModuleId(&getModule()); 702 // FIXME: We need to figure out what to hash on or encode into the unique 703 // ID we need. 704 if (GlobalUniqueModuleId.compare("") == 0) 705 llvm::report_fatal_error( 706 "cannot produce a unique identifier for this module" 707 " based on strong external symbols"); 708 GlobalUniqueModuleId = GlobalUniqueModuleId.substr(1); 709 } 710 711 Fn = CreateGlobalInitOrCleanUpFunction( 712 FTy, llvm::Twine("__sterm80000000_clang_", GlobalUniqueModuleId), FI, 713 SourceLocation(), false /* TLS */, true /* IsExternalLinkage */); 714 } else { 715 Fn = CreateGlobalInitOrCleanUpFunction(FTy, "_GLOBAL__D_a", FI); 716 } 717 718 CodeGenFunction(*this).GenerateCXXGlobalCleanUpFunc( 719 Fn, CXXGlobalDtorsOrStermFinalizers); 720 AddGlobalDtor(Fn); 721 CXXGlobalDtorsOrStermFinalizers.clear(); 722 } 723 724 /// Emit the code necessary to initialize the given global variable. 725 void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn, 726 const VarDecl *D, 727 llvm::GlobalVariable *Addr, 728 bool PerformInit) { 729 // Check if we need to emit debug info for variable initializer. 730 if (D->hasAttr<NoDebugAttr>()) 731 DebugInfo = nullptr; // disable debug info indefinitely for this function 732 733 CurEHLocation = D->getBeginLoc(); 734 735 StartFunction(GlobalDecl(D, DynamicInitKind::Initializer), 736 getContext().VoidTy, Fn, getTypes().arrangeNullaryFunction(), 737 FunctionArgList(), D->getLocation(), 738 D->getInit()->getExprLoc()); 739 740 // Use guarded initialization if the global variable is weak. This 741 // occurs for, e.g., instantiated static data members and 742 // definitions explicitly marked weak. 743 // 744 // Also use guarded initialization for a variable with dynamic TLS and 745 // unordered initialization. (If the initialization is ordered, the ABI 746 // layer will guard the whole-TU initialization for us.) 747 if (Addr->hasWeakLinkage() || Addr->hasLinkOnceLinkage() || 748 (D->getTLSKind() == VarDecl::TLS_Dynamic && 749 isTemplateInstantiation(D->getTemplateSpecializationKind()))) { 750 EmitCXXGuardedInit(*D, Addr, PerformInit); 751 } else { 752 EmitCXXGlobalVarDeclInit(*D, Addr, PerformInit); 753 } 754 755 FinishFunction(); 756 } 757 758 void 759 CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn, 760 ArrayRef<llvm::Function *> Decls, 761 ConstantAddress Guard) { 762 { 763 auto NL = ApplyDebugLocation::CreateEmpty(*this); 764 StartFunction(GlobalDecl(), getContext().VoidTy, Fn, 765 getTypes().arrangeNullaryFunction(), FunctionArgList()); 766 // Emit an artificial location for this function. 767 auto AL = ApplyDebugLocation::CreateArtificial(*this); 768 769 llvm::BasicBlock *ExitBlock = nullptr; 770 if (Guard.isValid()) { 771 // If we have a guard variable, check whether we've already performed 772 // these initializations. This happens for TLS initialization functions. 773 llvm::Value *GuardVal = Builder.CreateLoad(Guard); 774 llvm::Value *Uninit = Builder.CreateIsNull(GuardVal, 775 "guard.uninitialized"); 776 llvm::BasicBlock *InitBlock = createBasicBlock("init"); 777 ExitBlock = createBasicBlock("exit"); 778 EmitCXXGuardedInitBranch(Uninit, InitBlock, ExitBlock, 779 GuardKind::TlsGuard, nullptr); 780 EmitBlock(InitBlock); 781 // Mark as initialized before initializing anything else. If the 782 // initializers use previously-initialized thread_local vars, that's 783 // probably supposed to be OK, but the standard doesn't say. 784 Builder.CreateStore(llvm::ConstantInt::get(GuardVal->getType(),1), Guard); 785 786 // The guard variable can't ever change again. 787 EmitInvariantStart( 788 Guard.getPointer(), 789 CharUnits::fromQuantity( 790 CGM.getDataLayout().getTypeAllocSize(GuardVal->getType()))); 791 } 792 793 RunCleanupsScope Scope(*this); 794 795 // When building in Objective-C++ ARC mode, create an autorelease pool 796 // around the global initializers. 797 if (getLangOpts().ObjCAutoRefCount && getLangOpts().CPlusPlus) { 798 llvm::Value *token = EmitObjCAutoreleasePoolPush(); 799 EmitObjCAutoreleasePoolCleanup(token); 800 } 801 802 for (unsigned i = 0, e = Decls.size(); i != e; ++i) 803 if (Decls[i]) 804 EmitRuntimeCall(Decls[i]); 805 806 Scope.ForceCleanup(); 807 808 if (ExitBlock) { 809 Builder.CreateBr(ExitBlock); 810 EmitBlock(ExitBlock); 811 } 812 } 813 814 FinishFunction(); 815 } 816 817 void CodeGenFunction::GenerateCXXGlobalCleanUpFunc( 818 llvm::Function *Fn, 819 const std::vector<std::tuple<llvm::FunctionType *, llvm::WeakTrackingVH, 820 llvm::Constant *>> &DtorsOrStermFinalizers) { 821 { 822 auto NL = ApplyDebugLocation::CreateEmpty(*this); 823 StartFunction(GlobalDecl(), getContext().VoidTy, Fn, 824 getTypes().arrangeNullaryFunction(), FunctionArgList()); 825 // Emit an artificial location for this function. 826 auto AL = ApplyDebugLocation::CreateArtificial(*this); 827 828 // Emit the cleanups, in reverse order from construction. 829 for (unsigned i = 0, e = DtorsOrStermFinalizers.size(); i != e; ++i) { 830 llvm::FunctionType *CalleeTy; 831 llvm::Value *Callee; 832 llvm::Constant *Arg; 833 std::tie(CalleeTy, Callee, Arg) = DtorsOrStermFinalizers[e - i - 1]; 834 835 llvm::CallInst *CI = nullptr; 836 if (Arg == nullptr) { 837 assert( 838 CGM.getCXXABI().useSinitAndSterm() && 839 "Arg could not be nullptr unless using sinit and sterm functions."); 840 CI = Builder.CreateCall(CalleeTy, Callee); 841 } else 842 CI = Builder.CreateCall(CalleeTy, Callee, Arg); 843 844 // Make sure the call and the callee agree on calling convention. 845 if (llvm::Function *F = dyn_cast<llvm::Function>(Callee)) 846 CI->setCallingConv(F->getCallingConv()); 847 } 848 } 849 850 FinishFunction(); 851 } 852 853 /// generateDestroyHelper - Generates a helper function which, when 854 /// invoked, destroys the given object. The address of the object 855 /// should be in global memory. 856 llvm::Function *CodeGenFunction::generateDestroyHelper( 857 Address addr, QualType type, Destroyer *destroyer, 858 bool useEHCleanupForArray, const VarDecl *VD) { 859 FunctionArgList args; 860 ImplicitParamDecl Dst(getContext(), getContext().VoidPtrTy, 861 ImplicitParamDecl::Other); 862 args.push_back(&Dst); 863 864 const CGFunctionInfo &FI = 865 CGM.getTypes().arrangeBuiltinFunctionDeclaration(getContext().VoidTy, args); 866 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI); 867 llvm::Function *fn = CGM.CreateGlobalInitOrCleanUpFunction( 868 FTy, "__cxx_global_array_dtor", FI, VD->getLocation()); 869 870 CurEHLocation = VD->getBeginLoc(); 871 872 StartFunction(VD, getContext().VoidTy, fn, FI, args); 873 874 emitDestroy(addr, type, destroyer, useEHCleanupForArray); 875 876 FinishFunction(); 877 878 return fn; 879 } 880