1 //===--- CGDeclCXX.cpp - Emit LLVM Code for C++ declarations --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This contains code dealing with code generation of C++ declarations 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "CodeGenFunction.h" 14 #include "CGCXXABI.h" 15 #include "CGObjCRuntime.h" 16 #include "CGOpenMPRuntime.h" 17 #include "TargetInfo.h" 18 #include "clang/Basic/CodeGenOptions.h" 19 #include "llvm/ADT/StringExtras.h" 20 #include "llvm/IR/Intrinsics.h" 21 #include "llvm/IR/MDBuilder.h" 22 #include "llvm/Support/Path.h" 23 24 using namespace clang; 25 using namespace CodeGen; 26 27 static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D, 28 ConstantAddress DeclPtr) { 29 assert( 30 (D.hasGlobalStorage() || 31 (D.hasLocalStorage() && CGF.getContext().getLangOpts().OpenCLCPlusPlus)) && 32 "VarDecl must have global or local (in the case of OpenCL) storage!"); 33 assert(!D.getType()->isReferenceType() && 34 "Should not call EmitDeclInit on a reference!"); 35 36 QualType type = D.getType(); 37 LValue lv = CGF.MakeAddrLValue(DeclPtr, type); 38 39 const Expr *Init = D.getInit(); 40 switch (CGF.getEvaluationKind(type)) { 41 case TEK_Scalar: { 42 CodeGenModule &CGM = CGF.CGM; 43 if (lv.isObjCStrong()) 44 CGM.getObjCRuntime().EmitObjCGlobalAssign(CGF, CGF.EmitScalarExpr(Init), 45 DeclPtr, D.getTLSKind()); 46 else if (lv.isObjCWeak()) 47 CGM.getObjCRuntime().EmitObjCWeakAssign(CGF, CGF.EmitScalarExpr(Init), 48 DeclPtr); 49 else 50 CGF.EmitScalarInit(Init, &D, lv, false); 51 return; 52 } 53 case TEK_Complex: 54 CGF.EmitComplexExprIntoLValue(Init, lv, /*isInit*/ true); 55 return; 56 case TEK_Aggregate: 57 CGF.EmitAggExpr(Init, 58 AggValueSlot::forLValue(lv, CGF, AggValueSlot::IsDestructed, 59 AggValueSlot::DoesNotNeedGCBarriers, 60 AggValueSlot::IsNotAliased, 61 AggValueSlot::DoesNotOverlap)); 62 return; 63 } 64 llvm_unreachable("bad evaluation kind"); 65 } 66 67 /// Emit code to cause the destruction of the given variable with 68 /// static storage duration. 69 static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D, 70 ConstantAddress Addr) { 71 // Honor __attribute__((no_destroy)) and bail instead of attempting 72 // to emit a reference to a possibly nonexistent destructor, which 73 // in turn can cause a crash. This will result in a global constructor 74 // that isn't balanced out by a destructor call as intended by the 75 // attribute. This also checks for -fno-c++-static-destructors and 76 // bails even if the attribute is not present. 77 QualType::DestructionKind DtorKind = D.needsDestruction(CGF.getContext()); 78 79 // FIXME: __attribute__((cleanup)) ? 80 81 switch (DtorKind) { 82 case QualType::DK_none: 83 return; 84 85 case QualType::DK_cxx_destructor: 86 break; 87 88 case QualType::DK_objc_strong_lifetime: 89 case QualType::DK_objc_weak_lifetime: 90 case QualType::DK_nontrivial_c_struct: 91 // We don't care about releasing objects during process teardown. 92 assert(!D.getTLSKind() && "should have rejected this"); 93 return; 94 } 95 96 llvm::FunctionCallee Func; 97 llvm::Constant *Argument; 98 99 CodeGenModule &CGM = CGF.CGM; 100 QualType Type = D.getType(); 101 102 // Special-case non-array C++ destructors, if they have the right signature. 103 // Under some ABIs, destructors return this instead of void, and cannot be 104 // passed directly to __cxa_atexit if the target does not allow this 105 // mismatch. 106 const CXXRecordDecl *Record = Type->getAsCXXRecordDecl(); 107 bool CanRegisterDestructor = 108 Record && (!CGM.getCXXABI().HasThisReturn( 109 GlobalDecl(Record->getDestructor(), Dtor_Complete)) || 110 CGM.getCXXABI().canCallMismatchedFunctionType()); 111 // If __cxa_atexit is disabled via a flag, a different helper function is 112 // generated elsewhere which uses atexit instead, and it takes the destructor 113 // directly. 114 bool UsingExternalHelper = !CGM.getCodeGenOpts().CXAAtExit; 115 if (Record && (CanRegisterDestructor || UsingExternalHelper)) { 116 assert(!Record->hasTrivialDestructor()); 117 CXXDestructorDecl *Dtor = Record->getDestructor(); 118 119 Func = CGM.getAddrAndTypeOfCXXStructor(GlobalDecl(Dtor, Dtor_Complete)); 120 if (CGF.getContext().getLangOpts().OpenCL) { 121 auto DestAS = 122 CGM.getTargetCodeGenInfo().getAddrSpaceOfCxaAtexitPtrParam(); 123 auto DestTy = CGF.getTypes().ConvertType(Type)->getPointerTo( 124 CGM.getContext().getTargetAddressSpace(DestAS)); 125 auto SrcAS = D.getType().getQualifiers().getAddressSpace(); 126 if (DestAS == SrcAS) 127 Argument = llvm::ConstantExpr::getBitCast(Addr.getPointer(), DestTy); 128 else 129 // FIXME: On addr space mismatch we are passing NULL. The generation 130 // of the global destructor function should be adjusted accordingly. 131 Argument = llvm::ConstantPointerNull::get(DestTy); 132 } else { 133 Argument = llvm::ConstantExpr::getBitCast( 134 Addr.getPointer(), CGF.getTypes().ConvertType(Type)->getPointerTo()); 135 } 136 // Otherwise, the standard logic requires a helper function. 137 } else { 138 Func = CodeGenFunction(CGM) 139 .generateDestroyHelper(Addr, Type, CGF.getDestroyer(DtorKind), 140 CGF.needsEHCleanup(DtorKind), &D); 141 Argument = llvm::Constant::getNullValue(CGF.Int8PtrTy); 142 } 143 144 CGM.getCXXABI().registerGlobalDtor(CGF, D, Func, Argument); 145 } 146 147 /// Emit code to cause the variable at the given address to be considered as 148 /// constant from this point onwards. 149 static void EmitDeclInvariant(CodeGenFunction &CGF, const VarDecl &D, 150 llvm::Constant *Addr) { 151 return CGF.EmitInvariantStart( 152 Addr, CGF.getContext().getTypeSizeInChars(D.getType())); 153 } 154 155 void CodeGenFunction::EmitInvariantStart(llvm::Constant *Addr, CharUnits Size) { 156 // Do not emit the intrinsic if we're not optimizing. 157 if (!CGM.getCodeGenOpts().OptimizationLevel) 158 return; 159 160 // Grab the llvm.invariant.start intrinsic. 161 llvm::Intrinsic::ID InvStartID = llvm::Intrinsic::invariant_start; 162 // Overloaded address space type. 163 llvm::Type *ObjectPtr[1] = {Int8PtrTy}; 164 llvm::Function *InvariantStart = CGM.getIntrinsic(InvStartID, ObjectPtr); 165 166 // Emit a call with the size in bytes of the object. 167 uint64_t Width = Size.getQuantity(); 168 llvm::Value *Args[2] = { llvm::ConstantInt::getSigned(Int64Ty, Width), 169 llvm::ConstantExpr::getBitCast(Addr, Int8PtrTy)}; 170 Builder.CreateCall(InvariantStart, Args); 171 } 172 173 void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D, 174 llvm::Constant *DeclPtr, 175 bool PerformInit) { 176 177 const Expr *Init = D.getInit(); 178 QualType T = D.getType(); 179 180 // The address space of a static local variable (DeclPtr) may be different 181 // from the address space of the "this" argument of the constructor. In that 182 // case, we need an addrspacecast before calling the constructor. 183 // 184 // struct StructWithCtor { 185 // __device__ StructWithCtor() {...} 186 // }; 187 // __device__ void foo() { 188 // __shared__ StructWithCtor s; 189 // ... 190 // } 191 // 192 // For example, in the above CUDA code, the static local variable s has a 193 // "shared" address space qualifier, but the constructor of StructWithCtor 194 // expects "this" in the "generic" address space. 195 unsigned ExpectedAddrSpace = getContext().getTargetAddressSpace(T); 196 unsigned ActualAddrSpace = DeclPtr->getType()->getPointerAddressSpace(); 197 if (ActualAddrSpace != ExpectedAddrSpace) { 198 llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(T); 199 llvm::PointerType *PTy = llvm::PointerType::get(LTy, ExpectedAddrSpace); 200 DeclPtr = llvm::ConstantExpr::getAddrSpaceCast(DeclPtr, PTy); 201 } 202 203 ConstantAddress DeclAddr(DeclPtr, getContext().getDeclAlign(&D)); 204 205 if (!T->isReferenceType()) { 206 if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd && 207 D.hasAttr<OMPThreadPrivateDeclAttr>()) { 208 (void)CGM.getOpenMPRuntime().emitThreadPrivateVarDefinition( 209 &D, DeclAddr, D.getAttr<OMPThreadPrivateDeclAttr>()->getLocation(), 210 PerformInit, this); 211 } 212 if (PerformInit) 213 EmitDeclInit(*this, D, DeclAddr); 214 if (CGM.isTypeConstant(D.getType(), true)) 215 EmitDeclInvariant(*this, D, DeclPtr); 216 else 217 EmitDeclDestroy(*this, D, DeclAddr); 218 return; 219 } 220 221 assert(PerformInit && "cannot have constant initializer which needs " 222 "destruction for reference"); 223 RValue RV = EmitReferenceBindingToExpr(Init); 224 EmitStoreOfScalar(RV.getScalarVal(), DeclAddr, false, T); 225 } 226 227 /// Create a stub function, suitable for being passed to atexit, 228 /// which passes the given address to the given destructor function. 229 llvm::Function *CodeGenFunction::createAtExitStub(const VarDecl &VD, 230 llvm::FunctionCallee dtor, 231 llvm::Constant *addr) { 232 // Get the destructor function type, void(*)(void). 233 llvm::FunctionType *ty = llvm::FunctionType::get(CGM.VoidTy, false); 234 SmallString<256> FnName; 235 { 236 llvm::raw_svector_ostream Out(FnName); 237 CGM.getCXXABI().getMangleContext().mangleDynamicAtExitDestructor(&VD, Out); 238 } 239 240 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction(); 241 llvm::Function *fn = CGM.CreateGlobalInitOrDestructFunction( 242 ty, FnName.str(), FI, VD.getLocation()); 243 244 CodeGenFunction CGF(CGM); 245 246 CGF.StartFunction(GlobalDecl(&VD, DynamicInitKind::AtExit), 247 CGM.getContext().VoidTy, fn, FI, FunctionArgList()); 248 249 llvm::CallInst *call = CGF.Builder.CreateCall(dtor, addr); 250 251 // Make sure the call and the callee agree on calling convention. 252 if (auto *dtorFn = dyn_cast<llvm::Function>( 253 dtor.getCallee()->stripPointerCastsAndAliases())) 254 call->setCallingConv(dtorFn->getCallingConv()); 255 256 CGF.FinishFunction(); 257 258 return fn; 259 } 260 261 /// Register a global destructor using the C atexit runtime function. 262 void CodeGenFunction::registerGlobalDtorWithAtExit(const VarDecl &VD, 263 llvm::FunctionCallee dtor, 264 llvm::Constant *addr) { 265 // Create a function which calls the destructor. 266 llvm::Constant *dtorStub = createAtExitStub(VD, dtor, addr); 267 registerGlobalDtorWithAtExit(dtorStub); 268 } 269 270 void CodeGenFunction::registerGlobalDtorWithAtExit(llvm::Constant *dtorStub) { 271 // extern "C" int atexit(void (*f)(void)); 272 llvm::FunctionType *atexitTy = 273 llvm::FunctionType::get(IntTy, dtorStub->getType(), false); 274 275 llvm::FunctionCallee atexit = 276 CGM.CreateRuntimeFunction(atexitTy, "atexit", llvm::AttributeList(), 277 /*Local=*/true); 278 if (llvm::Function *atexitFn = dyn_cast<llvm::Function>(atexit.getCallee())) 279 atexitFn->setDoesNotThrow(); 280 281 EmitNounwindRuntimeCall(atexit, dtorStub); 282 } 283 284 void CodeGenFunction::EmitCXXGuardedInit(const VarDecl &D, 285 llvm::GlobalVariable *DeclPtr, 286 bool PerformInit) { 287 // If we've been asked to forbid guard variables, emit an error now. 288 // This diagnostic is hard-coded for Darwin's use case; we can find 289 // better phrasing if someone else needs it. 290 if (CGM.getCodeGenOpts().ForbidGuardVariables) 291 CGM.Error(D.getLocation(), 292 "this initialization requires a guard variable, which " 293 "the kernel does not support"); 294 295 CGM.getCXXABI().EmitGuardedInit(*this, D, DeclPtr, PerformInit); 296 } 297 298 void CodeGenFunction::EmitCXXGuardedInitBranch(llvm::Value *NeedsInit, 299 llvm::BasicBlock *InitBlock, 300 llvm::BasicBlock *NoInitBlock, 301 GuardKind Kind, 302 const VarDecl *D) { 303 assert((Kind == GuardKind::TlsGuard || D) && "no guarded variable"); 304 305 // A guess at how many times we will enter the initialization of a 306 // variable, depending on the kind of variable. 307 static const uint64_t InitsPerTLSVar = 1024; 308 static const uint64_t InitsPerLocalVar = 1024 * 1024; 309 310 llvm::MDNode *Weights; 311 if (Kind == GuardKind::VariableGuard && !D->isLocalVarDecl()) { 312 // For non-local variables, don't apply any weighting for now. Due to our 313 // use of COMDATs, we expect there to be at most one initialization of the 314 // variable per DSO, but we have no way to know how many DSOs will try to 315 // initialize the variable. 316 Weights = nullptr; 317 } else { 318 uint64_t NumInits; 319 // FIXME: For the TLS case, collect and use profiling information to 320 // determine a more accurate brach weight. 321 if (Kind == GuardKind::TlsGuard || D->getTLSKind()) 322 NumInits = InitsPerTLSVar; 323 else 324 NumInits = InitsPerLocalVar; 325 326 // The probability of us entering the initializer is 327 // 1 / (total number of times we attempt to initialize the variable). 328 llvm::MDBuilder MDHelper(CGM.getLLVMContext()); 329 Weights = MDHelper.createBranchWeights(1, NumInits - 1); 330 } 331 332 Builder.CreateCondBr(NeedsInit, InitBlock, NoInitBlock, Weights); 333 } 334 335 llvm::Function *CodeGenModule::CreateGlobalInitOrDestructFunction( 336 llvm::FunctionType *FTy, const Twine &Name, const CGFunctionInfo &FI, 337 SourceLocation Loc, bool TLS) { 338 llvm::Function *Fn = 339 llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage, 340 Name, &getModule()); 341 if (!getLangOpts().AppleKext && !TLS) { 342 // Set the section if needed. 343 if (const char *Section = getTarget().getStaticInitSectionSpecifier()) 344 Fn->setSection(Section); 345 } 346 347 SetInternalFunctionAttributes(GlobalDecl(), Fn, FI); 348 349 Fn->setCallingConv(getRuntimeCC()); 350 351 if (!getLangOpts().Exceptions) 352 Fn->setDoesNotThrow(); 353 354 if (getLangOpts().Sanitize.has(SanitizerKind::Address) && 355 !isInSanitizerBlacklist(SanitizerKind::Address, Fn, Loc)) 356 Fn->addFnAttr(llvm::Attribute::SanitizeAddress); 357 358 if (getLangOpts().Sanitize.has(SanitizerKind::KernelAddress) && 359 !isInSanitizerBlacklist(SanitizerKind::KernelAddress, Fn, Loc)) 360 Fn->addFnAttr(llvm::Attribute::SanitizeAddress); 361 362 if (getLangOpts().Sanitize.has(SanitizerKind::HWAddress) && 363 !isInSanitizerBlacklist(SanitizerKind::HWAddress, Fn, Loc)) 364 Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress); 365 366 if (getLangOpts().Sanitize.has(SanitizerKind::KernelHWAddress) && 367 !isInSanitizerBlacklist(SanitizerKind::KernelHWAddress, Fn, Loc)) 368 Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress); 369 370 if (getLangOpts().Sanitize.has(SanitizerKind::MemTag) && 371 !isInSanitizerBlacklist(SanitizerKind::MemTag, Fn, Loc)) 372 Fn->addFnAttr(llvm::Attribute::SanitizeMemTag); 373 374 if (getLangOpts().Sanitize.has(SanitizerKind::Thread) && 375 !isInSanitizerBlacklist(SanitizerKind::Thread, Fn, Loc)) 376 Fn->addFnAttr(llvm::Attribute::SanitizeThread); 377 378 if (getLangOpts().Sanitize.has(SanitizerKind::Memory) && 379 !isInSanitizerBlacklist(SanitizerKind::Memory, Fn, Loc)) 380 Fn->addFnAttr(llvm::Attribute::SanitizeMemory); 381 382 if (getLangOpts().Sanitize.has(SanitizerKind::KernelMemory) && 383 !isInSanitizerBlacklist(SanitizerKind::KernelMemory, Fn, Loc)) 384 Fn->addFnAttr(llvm::Attribute::SanitizeMemory); 385 386 if (getLangOpts().Sanitize.has(SanitizerKind::SafeStack) && 387 !isInSanitizerBlacklist(SanitizerKind::SafeStack, Fn, Loc)) 388 Fn->addFnAttr(llvm::Attribute::SafeStack); 389 390 if (getLangOpts().Sanitize.has(SanitizerKind::ShadowCallStack) && 391 !isInSanitizerBlacklist(SanitizerKind::ShadowCallStack, Fn, Loc)) 392 Fn->addFnAttr(llvm::Attribute::ShadowCallStack); 393 394 auto RASignKind = getCodeGenOpts().getSignReturnAddress(); 395 if (RASignKind != CodeGenOptions::SignReturnAddressScope::None) { 396 Fn->addFnAttr("sign-return-address", 397 RASignKind == CodeGenOptions::SignReturnAddressScope::All 398 ? "all" 399 : "non-leaf"); 400 auto RASignKey = getCodeGenOpts().getSignReturnAddressKey(); 401 Fn->addFnAttr("sign-return-address-key", 402 RASignKey == CodeGenOptions::SignReturnAddressKeyValue::AKey 403 ? "a_key" 404 : "b_key"); 405 } 406 407 if (getCodeGenOpts().BranchTargetEnforcement) 408 Fn->addFnAttr("branch-target-enforcement"); 409 410 return Fn; 411 } 412 413 /// Create a global pointer to a function that will initialize a global 414 /// variable. The user has requested that this pointer be emitted in a specific 415 /// section. 416 void CodeGenModule::EmitPointerToInitFunc(const VarDecl *D, 417 llvm::GlobalVariable *GV, 418 llvm::Function *InitFunc, 419 InitSegAttr *ISA) { 420 llvm::GlobalVariable *PtrArray = new llvm::GlobalVariable( 421 TheModule, InitFunc->getType(), /*isConstant=*/true, 422 llvm::GlobalValue::PrivateLinkage, InitFunc, "__cxx_init_fn_ptr"); 423 PtrArray->setSection(ISA->getSection()); 424 addUsedGlobal(PtrArray); 425 426 // If the GV is already in a comdat group, then we have to join it. 427 if (llvm::Comdat *C = GV->getComdat()) 428 PtrArray->setComdat(C); 429 } 430 431 void 432 CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D, 433 llvm::GlobalVariable *Addr, 434 bool PerformInit) { 435 436 // According to E.2.3.1 in CUDA-7.5 Programming guide: __device__, 437 // __constant__ and __shared__ variables defined in namespace scope, 438 // that are of class type, cannot have a non-empty constructor. All 439 // the checks have been done in Sema by now. Whatever initializers 440 // are allowed are empty and we just need to ignore them here. 441 if (getLangOpts().CUDAIsDevice && !getLangOpts().GPUAllowDeviceInit && 442 (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() || 443 D->hasAttr<CUDASharedAttr>())) 444 return; 445 446 if (getLangOpts().OpenMP && 447 getOpenMPRuntime().emitDeclareTargetVarDefinition(D, Addr, PerformInit)) 448 return; 449 450 // Check if we've already initialized this decl. 451 auto I = DelayedCXXInitPosition.find(D); 452 if (I != DelayedCXXInitPosition.end() && I->second == ~0U) 453 return; 454 455 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false); 456 SmallString<256> FnName; 457 { 458 llvm::raw_svector_ostream Out(FnName); 459 getCXXABI().getMangleContext().mangleDynamicInitializer(D, Out); 460 } 461 462 // Create a variable initialization function. 463 llvm::Function *Fn = 464 CreateGlobalInitOrDestructFunction(FTy, FnName.str(), 465 getTypes().arrangeNullaryFunction(), 466 D->getLocation()); 467 468 auto *ISA = D->getAttr<InitSegAttr>(); 469 CodeGenFunction(*this).GenerateCXXGlobalVarDeclInitFunc(Fn, D, Addr, 470 PerformInit); 471 472 llvm::GlobalVariable *COMDATKey = 473 supportsCOMDAT() && D->isExternallyVisible() ? Addr : nullptr; 474 475 if (D->getTLSKind()) { 476 // FIXME: Should we support init_priority for thread_local? 477 // FIXME: We only need to register one __cxa_thread_atexit function for the 478 // entire TU. 479 CXXThreadLocalInits.push_back(Fn); 480 CXXThreadLocalInitVars.push_back(D); 481 } else if (PerformInit && ISA) { 482 EmitPointerToInitFunc(D, Addr, Fn, ISA); 483 } else if (auto *IPA = D->getAttr<InitPriorityAttr>()) { 484 OrderGlobalInits Key(IPA->getPriority(), PrioritizedCXXGlobalInits.size()); 485 PrioritizedCXXGlobalInits.push_back(std::make_pair(Key, Fn)); 486 } else if (isTemplateInstantiation(D->getTemplateSpecializationKind()) || 487 getContext().GetGVALinkageForVariable(D) == GVA_DiscardableODR) { 488 // C++ [basic.start.init]p2: 489 // Definitions of explicitly specialized class template static data 490 // members have ordered initialization. Other class template static data 491 // members (i.e., implicitly or explicitly instantiated specializations) 492 // have unordered initialization. 493 // 494 // As a consequence, we can put them into their own llvm.global_ctors entry. 495 // 496 // If the global is externally visible, put the initializer into a COMDAT 497 // group with the global being initialized. On most platforms, this is a 498 // minor startup time optimization. In the MS C++ ABI, there are no guard 499 // variables, so this COMDAT key is required for correctness. 500 AddGlobalCtor(Fn, 65535, COMDATKey); 501 if (getTarget().getCXXABI().isMicrosoft() && COMDATKey) { 502 // In The MS C++, MS add template static data member in the linker 503 // drective. 504 addUsedGlobal(COMDATKey); 505 } 506 } else if (D->hasAttr<SelectAnyAttr>()) { 507 // SelectAny globals will be comdat-folded. Put the initializer into a 508 // COMDAT group associated with the global, so the initializers get folded 509 // too. 510 AddGlobalCtor(Fn, 65535, COMDATKey); 511 } else { 512 I = DelayedCXXInitPosition.find(D); // Re-do lookup in case of re-hash. 513 if (I == DelayedCXXInitPosition.end()) { 514 CXXGlobalInits.push_back(Fn); 515 } else if (I->second != ~0U) { 516 assert(I->second < CXXGlobalInits.size() && 517 CXXGlobalInits[I->second] == nullptr); 518 CXXGlobalInits[I->second] = Fn; 519 } 520 } 521 522 // Remember that we already emitted the initializer for this global. 523 DelayedCXXInitPosition[D] = ~0U; 524 } 525 526 void CodeGenModule::EmitCXXThreadLocalInitFunc() { 527 getCXXABI().EmitThreadLocalInitFuncs( 528 *this, CXXThreadLocals, CXXThreadLocalInits, CXXThreadLocalInitVars); 529 530 CXXThreadLocalInits.clear(); 531 CXXThreadLocalInitVars.clear(); 532 CXXThreadLocals.clear(); 533 } 534 535 void 536 CodeGenModule::EmitCXXGlobalInitFunc() { 537 while (!CXXGlobalInits.empty() && !CXXGlobalInits.back()) 538 CXXGlobalInits.pop_back(); 539 540 if (CXXGlobalInits.empty() && PrioritizedCXXGlobalInits.empty()) 541 return; 542 543 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false); 544 const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction(); 545 546 // Create our global initialization function. 547 if (!PrioritizedCXXGlobalInits.empty()) { 548 SmallVector<llvm::Function *, 8> LocalCXXGlobalInits; 549 llvm::array_pod_sort(PrioritizedCXXGlobalInits.begin(), 550 PrioritizedCXXGlobalInits.end()); 551 // Iterate over "chunks" of ctors with same priority and emit each chunk 552 // into separate function. Note - everything is sorted first by priority, 553 // second - by lex order, so we emit ctor functions in proper order. 554 for (SmallVectorImpl<GlobalInitData >::iterator 555 I = PrioritizedCXXGlobalInits.begin(), 556 E = PrioritizedCXXGlobalInits.end(); I != E; ) { 557 SmallVectorImpl<GlobalInitData >::iterator 558 PrioE = std::upper_bound(I + 1, E, *I, GlobalInitPriorityCmp()); 559 560 LocalCXXGlobalInits.clear(); 561 unsigned Priority = I->first.priority; 562 // Compute the function suffix from priority. Prepend with zeroes to make 563 // sure the function names are also ordered as priorities. 564 std::string PrioritySuffix = llvm::utostr(Priority); 565 // Priority is always <= 65535 (enforced by sema). 566 PrioritySuffix = std::string(6-PrioritySuffix.size(), '0')+PrioritySuffix; 567 llvm::Function *Fn = CreateGlobalInitOrDestructFunction( 568 FTy, "_GLOBAL__I_" + PrioritySuffix, FI); 569 570 for (; I < PrioE; ++I) 571 LocalCXXGlobalInits.push_back(I->second); 572 573 CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, LocalCXXGlobalInits); 574 AddGlobalCtor(Fn, Priority); 575 } 576 PrioritizedCXXGlobalInits.clear(); 577 } 578 579 // Include the filename in the symbol name. Including "sub_" matches gcc and 580 // makes sure these symbols appear lexicographically behind the symbols with 581 // priority emitted above. 582 SmallString<128> FileName = llvm::sys::path::filename(getModule().getName()); 583 if (FileName.empty()) 584 FileName = "<null>"; 585 586 for (size_t i = 0; i < FileName.size(); ++i) { 587 // Replace everything that's not [a-zA-Z0-9._] with a _. This set happens 588 // to be the set of C preprocessing numbers. 589 if (!isPreprocessingNumberBody(FileName[i])) 590 FileName[i] = '_'; 591 } 592 593 llvm::Function *Fn = CreateGlobalInitOrDestructFunction( 594 FTy, llvm::Twine("_GLOBAL__sub_I_", FileName), FI); 595 596 CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, CXXGlobalInits); 597 AddGlobalCtor(Fn); 598 599 // In OpenCL global init functions must be converted to kernels in order to 600 // be able to launch them from the host. 601 // FIXME: Some more work might be needed to handle destructors correctly. 602 // Current initialization function makes use of function pointers callbacks. 603 // We can't support function pointers especially between host and device. 604 // However it seems global destruction has little meaning without any 605 // dynamic resource allocation on the device and program scope variables are 606 // destroyed by the runtime when program is released. 607 if (getLangOpts().OpenCL) { 608 GenOpenCLArgMetadata(Fn); 609 Fn->setCallingConv(llvm::CallingConv::SPIR_KERNEL); 610 } 611 612 if (getLangOpts().HIP) { 613 Fn->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL); 614 Fn->addFnAttr("device-init"); 615 } 616 617 CXXGlobalInits.clear(); 618 } 619 620 void CodeGenModule::EmitCXXGlobalDtorFunc() { 621 if (CXXGlobalDtors.empty()) 622 return; 623 624 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false); 625 626 // Create our global destructor function. 627 const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction(); 628 llvm::Function *Fn = 629 CreateGlobalInitOrDestructFunction(FTy, "_GLOBAL__D_a", FI); 630 631 CodeGenFunction(*this).GenerateCXXGlobalDtorsFunc(Fn, CXXGlobalDtors); 632 AddGlobalDtor(Fn); 633 } 634 635 /// Emit the code necessary to initialize the given global variable. 636 void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn, 637 const VarDecl *D, 638 llvm::GlobalVariable *Addr, 639 bool PerformInit) { 640 // Check if we need to emit debug info for variable initializer. 641 if (D->hasAttr<NoDebugAttr>()) 642 DebugInfo = nullptr; // disable debug info indefinitely for this function 643 644 CurEHLocation = D->getBeginLoc(); 645 646 StartFunction(GlobalDecl(D, DynamicInitKind::Initializer), 647 getContext().VoidTy, Fn, getTypes().arrangeNullaryFunction(), 648 FunctionArgList(), D->getLocation(), 649 D->getInit()->getExprLoc()); 650 651 // Use guarded initialization if the global variable is weak. This 652 // occurs for, e.g., instantiated static data members and 653 // definitions explicitly marked weak. 654 // 655 // Also use guarded initialization for a variable with dynamic TLS and 656 // unordered initialization. (If the initialization is ordered, the ABI 657 // layer will guard the whole-TU initialization for us.) 658 if (Addr->hasWeakLinkage() || Addr->hasLinkOnceLinkage() || 659 (D->getTLSKind() == VarDecl::TLS_Dynamic && 660 isTemplateInstantiation(D->getTemplateSpecializationKind()))) { 661 EmitCXXGuardedInit(*D, Addr, PerformInit); 662 } else { 663 EmitCXXGlobalVarDeclInit(*D, Addr, PerformInit); 664 } 665 666 FinishFunction(); 667 } 668 669 void 670 CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn, 671 ArrayRef<llvm::Function *> Decls, 672 ConstantAddress Guard) { 673 { 674 auto NL = ApplyDebugLocation::CreateEmpty(*this); 675 StartFunction(GlobalDecl(), getContext().VoidTy, Fn, 676 getTypes().arrangeNullaryFunction(), FunctionArgList()); 677 // Emit an artificial location for this function. 678 auto AL = ApplyDebugLocation::CreateArtificial(*this); 679 680 llvm::BasicBlock *ExitBlock = nullptr; 681 if (Guard.isValid()) { 682 // If we have a guard variable, check whether we've already performed 683 // these initializations. This happens for TLS initialization functions. 684 llvm::Value *GuardVal = Builder.CreateLoad(Guard); 685 llvm::Value *Uninit = Builder.CreateIsNull(GuardVal, 686 "guard.uninitialized"); 687 llvm::BasicBlock *InitBlock = createBasicBlock("init"); 688 ExitBlock = createBasicBlock("exit"); 689 EmitCXXGuardedInitBranch(Uninit, InitBlock, ExitBlock, 690 GuardKind::TlsGuard, nullptr); 691 EmitBlock(InitBlock); 692 // Mark as initialized before initializing anything else. If the 693 // initializers use previously-initialized thread_local vars, that's 694 // probably supposed to be OK, but the standard doesn't say. 695 Builder.CreateStore(llvm::ConstantInt::get(GuardVal->getType(),1), Guard); 696 697 // The guard variable can't ever change again. 698 EmitInvariantStart( 699 Guard.getPointer(), 700 CharUnits::fromQuantity( 701 CGM.getDataLayout().getTypeAllocSize(GuardVal->getType()))); 702 } 703 704 RunCleanupsScope Scope(*this); 705 706 // When building in Objective-C++ ARC mode, create an autorelease pool 707 // around the global initializers. 708 if (getLangOpts().ObjCAutoRefCount && getLangOpts().CPlusPlus) { 709 llvm::Value *token = EmitObjCAutoreleasePoolPush(); 710 EmitObjCAutoreleasePoolCleanup(token); 711 } 712 713 for (unsigned i = 0, e = Decls.size(); i != e; ++i) 714 if (Decls[i]) 715 EmitRuntimeCall(Decls[i]); 716 717 Scope.ForceCleanup(); 718 719 if (ExitBlock) { 720 Builder.CreateBr(ExitBlock); 721 EmitBlock(ExitBlock); 722 } 723 } 724 725 FinishFunction(); 726 } 727 728 void CodeGenFunction::GenerateCXXGlobalDtorsFunc( 729 llvm::Function *Fn, 730 const std::vector<std::tuple<llvm::FunctionType *, llvm::WeakTrackingVH, 731 llvm::Constant *>> &DtorsAndObjects) { 732 { 733 auto NL = ApplyDebugLocation::CreateEmpty(*this); 734 StartFunction(GlobalDecl(), getContext().VoidTy, Fn, 735 getTypes().arrangeNullaryFunction(), FunctionArgList()); 736 // Emit an artificial location for this function. 737 auto AL = ApplyDebugLocation::CreateArtificial(*this); 738 739 // Emit the dtors, in reverse order from construction. 740 for (unsigned i = 0, e = DtorsAndObjects.size(); i != e; ++i) { 741 llvm::FunctionType *CalleeTy; 742 llvm::Value *Callee; 743 llvm::Constant *Arg; 744 std::tie(CalleeTy, Callee, Arg) = DtorsAndObjects[e - i - 1]; 745 llvm::CallInst *CI = Builder.CreateCall(CalleeTy, Callee, Arg); 746 // Make sure the call and the callee agree on calling convention. 747 if (llvm::Function *F = dyn_cast<llvm::Function>(Callee)) 748 CI->setCallingConv(F->getCallingConv()); 749 } 750 } 751 752 FinishFunction(); 753 } 754 755 /// generateDestroyHelper - Generates a helper function which, when 756 /// invoked, destroys the given object. The address of the object 757 /// should be in global memory. 758 llvm::Function *CodeGenFunction::generateDestroyHelper( 759 Address addr, QualType type, Destroyer *destroyer, 760 bool useEHCleanupForArray, const VarDecl *VD) { 761 FunctionArgList args; 762 ImplicitParamDecl Dst(getContext(), getContext().VoidPtrTy, 763 ImplicitParamDecl::Other); 764 args.push_back(&Dst); 765 766 const CGFunctionInfo &FI = 767 CGM.getTypes().arrangeBuiltinFunctionDeclaration(getContext().VoidTy, args); 768 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI); 769 llvm::Function *fn = CGM.CreateGlobalInitOrDestructFunction( 770 FTy, "__cxx_global_array_dtor", FI, VD->getLocation()); 771 772 CurEHLocation = VD->getBeginLoc(); 773 774 StartFunction(VD, getContext().VoidTy, fn, FI, args); 775 776 emitDestroy(addr, type, destroyer, useEHCleanupForArray); 777 778 FinishFunction(); 779 780 return fn; 781 } 782