1 //===--- CGDeclCXX.cpp - Emit LLVM Code for C++ declarations --------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code dealing with code generation of C++ declarations 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CodeGenFunction.h" 15 #include "CGCXXABI.h" 16 #include "CGObjCRuntime.h" 17 #include "CGOpenMPRuntime.h" 18 #include "clang/Frontend/CodeGenOptions.h" 19 #include "llvm/ADT/StringExtras.h" 20 #include "llvm/IR/Intrinsics.h" 21 #include "llvm/IR/MDBuilder.h" 22 #include "llvm/Support/Path.h" 23 24 using namespace clang; 25 using namespace CodeGen; 26 27 static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D, 28 ConstantAddress DeclPtr) { 29 assert(D.hasGlobalStorage() && "VarDecl must have global storage!"); 30 assert(!D.getType()->isReferenceType() && 31 "Should not call EmitDeclInit on a reference!"); 32 33 QualType type = D.getType(); 34 LValue lv = CGF.MakeAddrLValue(DeclPtr, type); 35 36 const Expr *Init = D.getInit(); 37 switch (CGF.getEvaluationKind(type)) { 38 case TEK_Scalar: { 39 CodeGenModule &CGM = CGF.CGM; 40 if (lv.isObjCStrong()) 41 CGM.getObjCRuntime().EmitObjCGlobalAssign(CGF, CGF.EmitScalarExpr(Init), 42 DeclPtr, D.getTLSKind()); 43 else if (lv.isObjCWeak()) 44 CGM.getObjCRuntime().EmitObjCWeakAssign(CGF, CGF.EmitScalarExpr(Init), 45 DeclPtr); 46 else 47 CGF.EmitScalarInit(Init, &D, lv, false); 48 return; 49 } 50 case TEK_Complex: 51 CGF.EmitComplexExprIntoLValue(Init, lv, /*isInit*/ true); 52 return; 53 case TEK_Aggregate: 54 CGF.EmitAggExpr(Init, AggValueSlot::forLValue(lv,AggValueSlot::IsDestructed, 55 AggValueSlot::DoesNotNeedGCBarriers, 56 AggValueSlot::IsNotAliased)); 57 return; 58 } 59 llvm_unreachable("bad evaluation kind"); 60 } 61 62 /// Emit code to cause the destruction of the given variable with 63 /// static storage duration. 64 static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D, 65 ConstantAddress addr) { 66 CodeGenModule &CGM = CGF.CGM; 67 68 // FIXME: __attribute__((cleanup)) ? 69 70 QualType type = D.getType(); 71 QualType::DestructionKind dtorKind = type.isDestructedType(); 72 73 switch (dtorKind) { 74 case QualType::DK_none: 75 return; 76 77 case QualType::DK_cxx_destructor: 78 break; 79 80 case QualType::DK_objc_strong_lifetime: 81 case QualType::DK_objc_weak_lifetime: 82 // We don't care about releasing objects during process teardown. 83 assert(!D.getTLSKind() && "should have rejected this"); 84 return; 85 } 86 87 llvm::Constant *function; 88 llvm::Constant *argument; 89 90 // Special-case non-array C++ destructors, if they have the right signature. 91 // Under some ABIs, destructors return this instead of void, and cannot be 92 // passed directly to __cxa_atexit if the target does not allow this mismatch. 93 const CXXRecordDecl *Record = type->getAsCXXRecordDecl(); 94 bool CanRegisterDestructor = 95 Record && (!CGM.getCXXABI().HasThisReturn( 96 GlobalDecl(Record->getDestructor(), Dtor_Complete)) || 97 CGM.getCXXABI().canCallMismatchedFunctionType()); 98 // If __cxa_atexit is disabled via a flag, a different helper function is 99 // generated elsewhere which uses atexit instead, and it takes the destructor 100 // directly. 101 bool UsingExternalHelper = !CGM.getCodeGenOpts().CXAAtExit; 102 if (Record && (CanRegisterDestructor || UsingExternalHelper)) { 103 assert(!Record->hasTrivialDestructor()); 104 CXXDestructorDecl *dtor = Record->getDestructor(); 105 106 function = CGM.getAddrOfCXXStructor(dtor, StructorType::Complete); 107 argument = llvm::ConstantExpr::getBitCast( 108 addr.getPointer(), CGF.getTypes().ConvertType(type)->getPointerTo()); 109 110 // Otherwise, the standard logic requires a helper function. 111 } else { 112 function = CodeGenFunction(CGM) 113 .generateDestroyHelper(addr, type, CGF.getDestroyer(dtorKind), 114 CGF.needsEHCleanup(dtorKind), &D); 115 argument = llvm::Constant::getNullValue(CGF.Int8PtrTy); 116 } 117 118 CGM.getCXXABI().registerGlobalDtor(CGF, D, function, argument); 119 } 120 121 /// Emit code to cause the variable at the given address to be considered as 122 /// constant from this point onwards. 123 static void EmitDeclInvariant(CodeGenFunction &CGF, const VarDecl &D, 124 llvm::Constant *Addr) { 125 // Do not emit the intrinsic if we're not optimizing. 126 if (!CGF.CGM.getCodeGenOpts().OptimizationLevel) 127 return; 128 129 // Grab the llvm.invariant.start intrinsic. 130 llvm::Intrinsic::ID InvStartID = llvm::Intrinsic::invariant_start; 131 // Overloaded address space type. 132 llvm::Type *ObjectPtr[1] = {CGF.Int8PtrTy}; 133 llvm::Constant *InvariantStart = CGF.CGM.getIntrinsic(InvStartID, ObjectPtr); 134 135 // Emit a call with the size in bytes of the object. 136 CharUnits WidthChars = CGF.getContext().getTypeSizeInChars(D.getType()); 137 uint64_t Width = WidthChars.getQuantity(); 138 llvm::Value *Args[2] = { llvm::ConstantInt::getSigned(CGF.Int64Ty, Width), 139 llvm::ConstantExpr::getBitCast(Addr, CGF.Int8PtrTy)}; 140 CGF.Builder.CreateCall(InvariantStart, Args); 141 } 142 143 void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D, 144 llvm::Constant *DeclPtr, 145 bool PerformInit) { 146 147 const Expr *Init = D.getInit(); 148 QualType T = D.getType(); 149 150 // The address space of a static local variable (DeclPtr) may be different 151 // from the address space of the "this" argument of the constructor. In that 152 // case, we need an addrspacecast before calling the constructor. 153 // 154 // struct StructWithCtor { 155 // __device__ StructWithCtor() {...} 156 // }; 157 // __device__ void foo() { 158 // __shared__ StructWithCtor s; 159 // ... 160 // } 161 // 162 // For example, in the above CUDA code, the static local variable s has a 163 // "shared" address space qualifier, but the constructor of StructWithCtor 164 // expects "this" in the "generic" address space. 165 unsigned ExpectedAddrSpace = getContext().getTargetAddressSpace(T); 166 unsigned ActualAddrSpace = DeclPtr->getType()->getPointerAddressSpace(); 167 if (ActualAddrSpace != ExpectedAddrSpace) { 168 llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(T); 169 llvm::PointerType *PTy = llvm::PointerType::get(LTy, ExpectedAddrSpace); 170 DeclPtr = llvm::ConstantExpr::getAddrSpaceCast(DeclPtr, PTy); 171 } 172 173 ConstantAddress DeclAddr(DeclPtr, getContext().getDeclAlign(&D)); 174 175 if (!T->isReferenceType()) { 176 if (getLangOpts().OpenMP && D.hasAttr<OMPThreadPrivateDeclAttr>()) 177 (void)CGM.getOpenMPRuntime().emitThreadPrivateVarDefinition( 178 &D, DeclAddr, D.getAttr<OMPThreadPrivateDeclAttr>()->getLocation(), 179 PerformInit, this); 180 if (PerformInit) 181 EmitDeclInit(*this, D, DeclAddr); 182 if (CGM.isTypeConstant(D.getType(), true)) 183 EmitDeclInvariant(*this, D, DeclPtr); 184 else 185 EmitDeclDestroy(*this, D, DeclAddr); 186 return; 187 } 188 189 assert(PerformInit && "cannot have constant initializer which needs " 190 "destruction for reference"); 191 RValue RV = EmitReferenceBindingToExpr(Init); 192 EmitStoreOfScalar(RV.getScalarVal(), DeclAddr, false, T); 193 } 194 195 /// Create a stub function, suitable for being passed to atexit, 196 /// which passes the given address to the given destructor function. 197 llvm::Constant *CodeGenFunction::createAtExitStub(const VarDecl &VD, 198 llvm::Constant *dtor, 199 llvm::Constant *addr) { 200 // Get the destructor function type, void(*)(void). 201 llvm::FunctionType *ty = llvm::FunctionType::get(CGM.VoidTy, false); 202 SmallString<256> FnName; 203 { 204 llvm::raw_svector_ostream Out(FnName); 205 CGM.getCXXABI().getMangleContext().mangleDynamicAtExitDestructor(&VD, Out); 206 } 207 208 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction(); 209 llvm::Function *fn = CGM.CreateGlobalInitOrDestructFunction(ty, FnName.str(), 210 FI, 211 VD.getLocation()); 212 213 CodeGenFunction CGF(CGM); 214 215 CGF.StartFunction(&VD, CGM.getContext().VoidTy, fn, FI, FunctionArgList()); 216 217 llvm::CallInst *call = CGF.Builder.CreateCall(dtor, addr); 218 219 // Make sure the call and the callee agree on calling convention. 220 if (llvm::Function *dtorFn = 221 dyn_cast<llvm::Function>(dtor->stripPointerCasts())) 222 call->setCallingConv(dtorFn->getCallingConv()); 223 224 CGF.FinishFunction(); 225 226 return fn; 227 } 228 229 /// Register a global destructor using the C atexit runtime function. 230 void CodeGenFunction::registerGlobalDtorWithAtExit(const VarDecl &VD, 231 llvm::Constant *dtor, 232 llvm::Constant *addr) { 233 // Create a function which calls the destructor. 234 llvm::Constant *dtorStub = createAtExitStub(VD, dtor, addr); 235 236 // extern "C" int atexit(void (*f)(void)); 237 llvm::FunctionType *atexitTy = 238 llvm::FunctionType::get(IntTy, dtorStub->getType(), false); 239 240 llvm::Constant *atexit = 241 CGM.CreateRuntimeFunction(atexitTy, "atexit", llvm::AttributeList(), 242 /*Local=*/true); 243 if (llvm::Function *atexitFn = dyn_cast<llvm::Function>(atexit)) 244 atexitFn->setDoesNotThrow(); 245 246 EmitNounwindRuntimeCall(atexit, dtorStub); 247 } 248 249 void CodeGenFunction::EmitCXXGuardedInit(const VarDecl &D, 250 llvm::GlobalVariable *DeclPtr, 251 bool PerformInit) { 252 // If we've been asked to forbid guard variables, emit an error now. 253 // This diagnostic is hard-coded for Darwin's use case; we can find 254 // better phrasing if someone else needs it. 255 if (CGM.getCodeGenOpts().ForbidGuardVariables) 256 CGM.Error(D.getLocation(), 257 "this initialization requires a guard variable, which " 258 "the kernel does not support"); 259 260 CGM.getCXXABI().EmitGuardedInit(*this, D, DeclPtr, PerformInit); 261 } 262 263 void CodeGenFunction::EmitCXXGuardedInitBranch(llvm::Value *NeedsInit, 264 llvm::BasicBlock *InitBlock, 265 llvm::BasicBlock *NoInitBlock, 266 GuardKind Kind, 267 const VarDecl *D) { 268 assert((Kind == GuardKind::TlsGuard || D) && "no guarded variable"); 269 270 // A guess at how many times we will enter the initialization of a 271 // variable, depending on the kind of variable. 272 static const uint64_t InitsPerTLSVar = 1024; 273 static const uint64_t InitsPerLocalVar = 1024 * 1024; 274 275 llvm::MDNode *Weights; 276 if (Kind == GuardKind::VariableGuard && !D->isLocalVarDecl()) { 277 // For non-local variables, don't apply any weighting for now. Due to our 278 // use of COMDATs, we expect there to be at most one initialization of the 279 // variable per DSO, but we have no way to know how many DSOs will try to 280 // initialize the variable. 281 Weights = nullptr; 282 } else { 283 uint64_t NumInits; 284 // FIXME: For the TLS case, collect and use profiling information to 285 // determine a more accurate brach weight. 286 if (Kind == GuardKind::TlsGuard || D->getTLSKind()) 287 NumInits = InitsPerTLSVar; 288 else 289 NumInits = InitsPerLocalVar; 290 291 // The probability of us entering the initializer is 292 // 1 / (total number of times we attempt to initialize the variable). 293 llvm::MDBuilder MDHelper(CGM.getLLVMContext()); 294 Weights = MDHelper.createBranchWeights(1, NumInits - 1); 295 } 296 297 Builder.CreateCondBr(NeedsInit, InitBlock, NoInitBlock, Weights); 298 } 299 300 llvm::Function *CodeGenModule::CreateGlobalInitOrDestructFunction( 301 llvm::FunctionType *FTy, const Twine &Name, const CGFunctionInfo &FI, 302 SourceLocation Loc, bool TLS) { 303 llvm::Function *Fn = 304 llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage, 305 Name, &getModule()); 306 if (!getLangOpts().AppleKext && !TLS) { 307 // Set the section if needed. 308 if (const char *Section = getTarget().getStaticInitSectionSpecifier()) 309 Fn->setSection(Section); 310 } 311 312 SetInternalFunctionAttributes(nullptr, Fn, FI); 313 314 Fn->setCallingConv(getRuntimeCC()); 315 316 if (!getLangOpts().Exceptions) 317 Fn->setDoesNotThrow(); 318 319 if (getLangOpts().Sanitize.has(SanitizerKind::Address) && 320 !isInSanitizerBlacklist(SanitizerKind::Address, Fn, Loc)) 321 Fn->addFnAttr(llvm::Attribute::SanitizeAddress); 322 323 if (getLangOpts().Sanitize.has(SanitizerKind::KernelAddress) && 324 !isInSanitizerBlacklist(SanitizerKind::KernelAddress, Fn, Loc)) 325 Fn->addFnAttr(llvm::Attribute::SanitizeAddress); 326 327 if (getLangOpts().Sanitize.has(SanitizerKind::HWAddress) && 328 !isInSanitizerBlacklist(SanitizerKind::HWAddress, Fn, Loc)) 329 Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress); 330 331 if (getLangOpts().Sanitize.has(SanitizerKind::Thread) && 332 !isInSanitizerBlacklist(SanitizerKind::Thread, Fn, Loc)) 333 Fn->addFnAttr(llvm::Attribute::SanitizeThread); 334 335 if (getLangOpts().Sanitize.has(SanitizerKind::Memory) && 336 !isInSanitizerBlacklist(SanitizerKind::Memory, Fn, Loc)) 337 Fn->addFnAttr(llvm::Attribute::SanitizeMemory); 338 339 if (getLangOpts().Sanitize.has(SanitizerKind::SafeStack) && 340 !isInSanitizerBlacklist(SanitizerKind::SafeStack, Fn, Loc)) 341 Fn->addFnAttr(llvm::Attribute::SafeStack); 342 343 return Fn; 344 } 345 346 /// Create a global pointer to a function that will initialize a global 347 /// variable. The user has requested that this pointer be emitted in a specific 348 /// section. 349 void CodeGenModule::EmitPointerToInitFunc(const VarDecl *D, 350 llvm::GlobalVariable *GV, 351 llvm::Function *InitFunc, 352 InitSegAttr *ISA) { 353 llvm::GlobalVariable *PtrArray = new llvm::GlobalVariable( 354 TheModule, InitFunc->getType(), /*isConstant=*/true, 355 llvm::GlobalValue::PrivateLinkage, InitFunc, "__cxx_init_fn_ptr"); 356 PtrArray->setSection(ISA->getSection()); 357 addUsedGlobal(PtrArray); 358 359 // If the GV is already in a comdat group, then we have to join it. 360 if (llvm::Comdat *C = GV->getComdat()) 361 PtrArray->setComdat(C); 362 } 363 364 void 365 CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D, 366 llvm::GlobalVariable *Addr, 367 bool PerformInit) { 368 369 // According to E.2.3.1 in CUDA-7.5 Programming guide: __device__, 370 // __constant__ and __shared__ variables defined in namespace scope, 371 // that are of class type, cannot have a non-empty constructor. All 372 // the checks have been done in Sema by now. Whatever initializers 373 // are allowed are empty and we just need to ignore them here. 374 if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice && 375 (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() || 376 D->hasAttr<CUDASharedAttr>())) 377 return; 378 379 // Check if we've already initialized this decl. 380 auto I = DelayedCXXInitPosition.find(D); 381 if (I != DelayedCXXInitPosition.end() && I->second == ~0U) 382 return; 383 384 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false); 385 SmallString<256> FnName; 386 { 387 llvm::raw_svector_ostream Out(FnName); 388 getCXXABI().getMangleContext().mangleDynamicInitializer(D, Out); 389 } 390 391 // Create a variable initialization function. 392 llvm::Function *Fn = 393 CreateGlobalInitOrDestructFunction(FTy, FnName.str(), 394 getTypes().arrangeNullaryFunction(), 395 D->getLocation()); 396 397 auto *ISA = D->getAttr<InitSegAttr>(); 398 CodeGenFunction(*this).GenerateCXXGlobalVarDeclInitFunc(Fn, D, Addr, 399 PerformInit); 400 401 llvm::GlobalVariable *COMDATKey = 402 supportsCOMDAT() && D->isExternallyVisible() ? Addr : nullptr; 403 404 if (D->getTLSKind()) { 405 // FIXME: Should we support init_priority for thread_local? 406 // FIXME: We only need to register one __cxa_thread_atexit function for the 407 // entire TU. 408 CXXThreadLocalInits.push_back(Fn); 409 CXXThreadLocalInitVars.push_back(D); 410 } else if (PerformInit && ISA) { 411 EmitPointerToInitFunc(D, Addr, Fn, ISA); 412 } else if (auto *IPA = D->getAttr<InitPriorityAttr>()) { 413 OrderGlobalInits Key(IPA->getPriority(), PrioritizedCXXGlobalInits.size()); 414 PrioritizedCXXGlobalInits.push_back(std::make_pair(Key, Fn)); 415 } else if (isTemplateInstantiation(D->getTemplateSpecializationKind())) { 416 // C++ [basic.start.init]p2: 417 // Definitions of explicitly specialized class template static data 418 // members have ordered initialization. Other class template static data 419 // members (i.e., implicitly or explicitly instantiated specializations) 420 // have unordered initialization. 421 // 422 // As a consequence, we can put them into their own llvm.global_ctors entry. 423 // 424 // If the global is externally visible, put the initializer into a COMDAT 425 // group with the global being initialized. On most platforms, this is a 426 // minor startup time optimization. In the MS C++ ABI, there are no guard 427 // variables, so this COMDAT key is required for correctness. 428 AddGlobalCtor(Fn, 65535, COMDATKey); 429 } else if (D->hasAttr<SelectAnyAttr>()) { 430 // SelectAny globals will be comdat-folded. Put the initializer into a 431 // COMDAT group associated with the global, so the initializers get folded 432 // too. 433 AddGlobalCtor(Fn, 65535, COMDATKey); 434 } else { 435 I = DelayedCXXInitPosition.find(D); // Re-do lookup in case of re-hash. 436 if (I == DelayedCXXInitPosition.end()) { 437 CXXGlobalInits.push_back(Fn); 438 } else if (I->second != ~0U) { 439 assert(I->second < CXXGlobalInits.size() && 440 CXXGlobalInits[I->second] == nullptr); 441 CXXGlobalInits[I->second] = Fn; 442 } 443 } 444 445 // Remember that we already emitted the initializer for this global. 446 DelayedCXXInitPosition[D] = ~0U; 447 } 448 449 void CodeGenModule::EmitCXXThreadLocalInitFunc() { 450 getCXXABI().EmitThreadLocalInitFuncs( 451 *this, CXXThreadLocals, CXXThreadLocalInits, CXXThreadLocalInitVars); 452 453 CXXThreadLocalInits.clear(); 454 CXXThreadLocalInitVars.clear(); 455 CXXThreadLocals.clear(); 456 } 457 458 void 459 CodeGenModule::EmitCXXGlobalInitFunc() { 460 while (!CXXGlobalInits.empty() && !CXXGlobalInits.back()) 461 CXXGlobalInits.pop_back(); 462 463 if (CXXGlobalInits.empty() && PrioritizedCXXGlobalInits.empty()) 464 return; 465 466 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false); 467 const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction(); 468 469 // Create our global initialization function. 470 if (!PrioritizedCXXGlobalInits.empty()) { 471 SmallVector<llvm::Function *, 8> LocalCXXGlobalInits; 472 llvm::array_pod_sort(PrioritizedCXXGlobalInits.begin(), 473 PrioritizedCXXGlobalInits.end()); 474 // Iterate over "chunks" of ctors with same priority and emit each chunk 475 // into separate function. Note - everything is sorted first by priority, 476 // second - by lex order, so we emit ctor functions in proper order. 477 for (SmallVectorImpl<GlobalInitData >::iterator 478 I = PrioritizedCXXGlobalInits.begin(), 479 E = PrioritizedCXXGlobalInits.end(); I != E; ) { 480 SmallVectorImpl<GlobalInitData >::iterator 481 PrioE = std::upper_bound(I + 1, E, *I, GlobalInitPriorityCmp()); 482 483 LocalCXXGlobalInits.clear(); 484 unsigned Priority = I->first.priority; 485 // Compute the function suffix from priority. Prepend with zeroes to make 486 // sure the function names are also ordered as priorities. 487 std::string PrioritySuffix = llvm::utostr(Priority); 488 // Priority is always <= 65535 (enforced by sema). 489 PrioritySuffix = std::string(6-PrioritySuffix.size(), '0')+PrioritySuffix; 490 llvm::Function *Fn = CreateGlobalInitOrDestructFunction( 491 FTy, "_GLOBAL__I_" + PrioritySuffix, FI); 492 493 for (; I < PrioE; ++I) 494 LocalCXXGlobalInits.push_back(I->second); 495 496 CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, LocalCXXGlobalInits); 497 AddGlobalCtor(Fn, Priority); 498 } 499 PrioritizedCXXGlobalInits.clear(); 500 } 501 502 // Include the filename in the symbol name. Including "sub_" matches gcc and 503 // makes sure these symbols appear lexicographically behind the symbols with 504 // priority emitted above. 505 SmallString<128> FileName = llvm::sys::path::filename(getModule().getName()); 506 if (FileName.empty()) 507 FileName = "<null>"; 508 509 for (size_t i = 0; i < FileName.size(); ++i) { 510 // Replace everything that's not [a-zA-Z0-9._] with a _. This set happens 511 // to be the set of C preprocessing numbers. 512 if (!isPreprocessingNumberBody(FileName[i])) 513 FileName[i] = '_'; 514 } 515 516 llvm::Function *Fn = CreateGlobalInitOrDestructFunction( 517 FTy, llvm::Twine("_GLOBAL__sub_I_", FileName), FI); 518 519 CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, CXXGlobalInits); 520 AddGlobalCtor(Fn); 521 522 CXXGlobalInits.clear(); 523 } 524 525 void CodeGenModule::EmitCXXGlobalDtorFunc() { 526 if (CXXGlobalDtors.empty()) 527 return; 528 529 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false); 530 531 // Create our global destructor function. 532 const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction(); 533 llvm::Function *Fn = 534 CreateGlobalInitOrDestructFunction(FTy, "_GLOBAL__D_a", FI); 535 536 CodeGenFunction(*this).GenerateCXXGlobalDtorsFunc(Fn, CXXGlobalDtors); 537 AddGlobalDtor(Fn); 538 } 539 540 /// Emit the code necessary to initialize the given global variable. 541 void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn, 542 const VarDecl *D, 543 llvm::GlobalVariable *Addr, 544 bool PerformInit) { 545 // Check if we need to emit debug info for variable initializer. 546 if (D->hasAttr<NoDebugAttr>()) 547 DebugInfo = nullptr; // disable debug info indefinitely for this function 548 549 CurEHLocation = D->getLocStart(); 550 551 StartFunction(GlobalDecl(D), getContext().VoidTy, Fn, 552 getTypes().arrangeNullaryFunction(), 553 FunctionArgList(), D->getLocation(), 554 D->getInit()->getExprLoc()); 555 556 // Use guarded initialization if the global variable is weak. This 557 // occurs for, e.g., instantiated static data members and 558 // definitions explicitly marked weak. 559 if (Addr->hasWeakLinkage() || Addr->hasLinkOnceLinkage()) { 560 EmitCXXGuardedInit(*D, Addr, PerformInit); 561 } else { 562 EmitCXXGlobalVarDeclInit(*D, Addr, PerformInit); 563 } 564 565 FinishFunction(); 566 } 567 568 void 569 CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn, 570 ArrayRef<llvm::Function *> Decls, 571 Address Guard) { 572 { 573 auto NL = ApplyDebugLocation::CreateEmpty(*this); 574 StartFunction(GlobalDecl(), getContext().VoidTy, Fn, 575 getTypes().arrangeNullaryFunction(), FunctionArgList()); 576 // Emit an artificial location for this function. 577 auto AL = ApplyDebugLocation::CreateArtificial(*this); 578 579 llvm::BasicBlock *ExitBlock = nullptr; 580 if (Guard.isValid()) { 581 // If we have a guard variable, check whether we've already performed 582 // these initializations. This happens for TLS initialization functions. 583 llvm::Value *GuardVal = Builder.CreateLoad(Guard); 584 llvm::Value *Uninit = Builder.CreateIsNull(GuardVal, 585 "guard.uninitialized"); 586 llvm::BasicBlock *InitBlock = createBasicBlock("init"); 587 ExitBlock = createBasicBlock("exit"); 588 EmitCXXGuardedInitBranch(Uninit, InitBlock, ExitBlock, 589 GuardKind::TlsGuard, nullptr); 590 EmitBlock(InitBlock); 591 // Mark as initialized before initializing anything else. If the 592 // initializers use previously-initialized thread_local vars, that's 593 // probably supposed to be OK, but the standard doesn't say. 594 Builder.CreateStore(llvm::ConstantInt::get(GuardVal->getType(),1), Guard); 595 } 596 597 RunCleanupsScope Scope(*this); 598 599 // When building in Objective-C++ ARC mode, create an autorelease pool 600 // around the global initializers. 601 if (getLangOpts().ObjCAutoRefCount && getLangOpts().CPlusPlus) { 602 llvm::Value *token = EmitObjCAutoreleasePoolPush(); 603 EmitObjCAutoreleasePoolCleanup(token); 604 } 605 606 for (unsigned i = 0, e = Decls.size(); i != e; ++i) 607 if (Decls[i]) 608 EmitRuntimeCall(Decls[i]); 609 610 Scope.ForceCleanup(); 611 612 if (ExitBlock) { 613 Builder.CreateBr(ExitBlock); 614 EmitBlock(ExitBlock); 615 } 616 } 617 618 FinishFunction(); 619 } 620 621 void CodeGenFunction::GenerateCXXGlobalDtorsFunc( 622 llvm::Function *Fn, 623 const std::vector<std::pair<llvm::WeakTrackingVH, llvm::Constant *>> 624 &DtorsAndObjects) { 625 { 626 auto NL = ApplyDebugLocation::CreateEmpty(*this); 627 StartFunction(GlobalDecl(), getContext().VoidTy, Fn, 628 getTypes().arrangeNullaryFunction(), FunctionArgList()); 629 // Emit an artificial location for this function. 630 auto AL = ApplyDebugLocation::CreateArtificial(*this); 631 632 // Emit the dtors, in reverse order from construction. 633 for (unsigned i = 0, e = DtorsAndObjects.size(); i != e; ++i) { 634 llvm::Value *Callee = DtorsAndObjects[e - i - 1].first; 635 llvm::CallInst *CI = Builder.CreateCall(Callee, 636 DtorsAndObjects[e - i - 1].second); 637 // Make sure the call and the callee agree on calling convention. 638 if (llvm::Function *F = dyn_cast<llvm::Function>(Callee)) 639 CI->setCallingConv(F->getCallingConv()); 640 } 641 } 642 643 FinishFunction(); 644 } 645 646 /// generateDestroyHelper - Generates a helper function which, when 647 /// invoked, destroys the given object. The address of the object 648 /// should be in global memory. 649 llvm::Function *CodeGenFunction::generateDestroyHelper( 650 Address addr, QualType type, Destroyer *destroyer, 651 bool useEHCleanupForArray, const VarDecl *VD) { 652 FunctionArgList args; 653 ImplicitParamDecl Dst(getContext(), getContext().VoidPtrTy, 654 ImplicitParamDecl::Other); 655 args.push_back(&Dst); 656 657 const CGFunctionInfo &FI = 658 CGM.getTypes().arrangeBuiltinFunctionDeclaration(getContext().VoidTy, args); 659 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI); 660 llvm::Function *fn = CGM.CreateGlobalInitOrDestructFunction( 661 FTy, "__cxx_global_array_dtor", FI, VD->getLocation()); 662 663 CurEHLocation = VD->getLocStart(); 664 665 StartFunction(VD, getContext().VoidTy, fn, FI, args); 666 667 emitDestroy(addr, type, destroyer, useEHCleanupForArray); 668 669 FinishFunction(); 670 671 return fn; 672 } 673