1 //===--- CGDeclCXX.cpp - Emit LLVM Code for C++ declarations --------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code dealing with code generation of C++ declarations
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "CodeGenFunction.h"
15 #include "CGCXXABI.h"
16 #include "CGObjCRuntime.h"
17 #include "CGOpenMPRuntime.h"
18 #include "clang/Basic/CodeGenOptions.h"
19 #include "llvm/ADT/StringExtras.h"
20 #include "llvm/IR/Intrinsics.h"
21 #include "llvm/IR/MDBuilder.h"
22 #include "llvm/Support/Path.h"
23
24 using namespace clang;
25 using namespace CodeGen;
26
EmitDeclInit(CodeGenFunction & CGF,const VarDecl & D,ConstantAddress DeclPtr)27 static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
28 ConstantAddress DeclPtr) {
29 assert(
30 (D.hasGlobalStorage() ||
31 (D.hasLocalStorage() && CGF.getContext().getLangOpts().OpenCLCPlusPlus)) &&
32 "VarDecl must have global or local (in the case of OpenCL) storage!");
33 assert(!D.getType()->isReferenceType() &&
34 "Should not call EmitDeclInit on a reference!");
35
36 QualType type = D.getType();
37 LValue lv = CGF.MakeAddrLValue(DeclPtr, type);
38
39 const Expr *Init = D.getInit();
40 switch (CGF.getEvaluationKind(type)) {
41 case TEK_Scalar: {
42 CodeGenModule &CGM = CGF.CGM;
43 if (lv.isObjCStrong())
44 CGM.getObjCRuntime().EmitObjCGlobalAssign(CGF, CGF.EmitScalarExpr(Init),
45 DeclPtr, D.getTLSKind());
46 else if (lv.isObjCWeak())
47 CGM.getObjCRuntime().EmitObjCWeakAssign(CGF, CGF.EmitScalarExpr(Init),
48 DeclPtr);
49 else
50 CGF.EmitScalarInit(Init, &D, lv, false);
51 return;
52 }
53 case TEK_Complex:
54 CGF.EmitComplexExprIntoLValue(Init, lv, /*isInit*/ true);
55 return;
56 case TEK_Aggregate:
57 CGF.EmitAggExpr(Init, AggValueSlot::forLValue(lv,AggValueSlot::IsDestructed,
58 AggValueSlot::DoesNotNeedGCBarriers,
59 AggValueSlot::IsNotAliased,
60 AggValueSlot::DoesNotOverlap));
61 return;
62 }
63 llvm_unreachable("bad evaluation kind");
64 }
65
66 /// Emit code to cause the destruction of the given variable with
67 /// static storage duration.
EmitDeclDestroy(CodeGenFunction & CGF,const VarDecl & D,ConstantAddress Addr)68 static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
69 ConstantAddress Addr) {
70 // Honor __attribute__((no_destroy)) and bail instead of attempting
71 // to emit a reference to a possibly nonexistent destructor, which
72 // in turn can cause a crash. This will result in a global constructor
73 // that isn't balanced out by a destructor call as intended by the
74 // attribute. This also checks for -fno-c++-static-destructors and
75 // bails even if the attribute is not present.
76 if (D.isNoDestroy(CGF.getContext()))
77 return;
78
79 CodeGenModule &CGM = CGF.CGM;
80
81 // FIXME: __attribute__((cleanup)) ?
82
83 QualType Type = D.getType();
84 QualType::DestructionKind DtorKind = Type.isDestructedType();
85
86 switch (DtorKind) {
87 case QualType::DK_none:
88 return;
89
90 case QualType::DK_cxx_destructor:
91 break;
92
93 case QualType::DK_objc_strong_lifetime:
94 case QualType::DK_objc_weak_lifetime:
95 case QualType::DK_nontrivial_c_struct:
96 // We don't care about releasing objects during process teardown.
97 assert(!D.getTLSKind() && "should have rejected this");
98 return;
99 }
100
101 llvm::Constant *Func;
102 llvm::Constant *Argument;
103
104 // Special-case non-array C++ destructors, if they have the right signature.
105 // Under some ABIs, destructors return this instead of void, and cannot be
106 // passed directly to __cxa_atexit if the target does not allow this
107 // mismatch.
108 const CXXRecordDecl *Record = Type->getAsCXXRecordDecl();
109 bool CanRegisterDestructor =
110 Record && (!CGM.getCXXABI().HasThisReturn(
111 GlobalDecl(Record->getDestructor(), Dtor_Complete)) ||
112 CGM.getCXXABI().canCallMismatchedFunctionType());
113 // If __cxa_atexit is disabled via a flag, a different helper function is
114 // generated elsewhere which uses atexit instead, and it takes the destructor
115 // directly.
116 bool UsingExternalHelper = !CGM.getCodeGenOpts().CXAAtExit;
117 if (Record && (CanRegisterDestructor || UsingExternalHelper)) {
118 assert(!Record->hasTrivialDestructor());
119 CXXDestructorDecl *Dtor = Record->getDestructor();
120
121 Func = CGM.getAddrOfCXXStructor(Dtor, StructorType::Complete);
122 Argument = llvm::ConstantExpr::getBitCast(
123 Addr.getPointer(), CGF.getTypes().ConvertType(Type)->getPointerTo());
124
125 // Otherwise, the standard logic requires a helper function.
126 } else {
127 Func = CodeGenFunction(CGM)
128 .generateDestroyHelper(Addr, Type, CGF.getDestroyer(DtorKind),
129 CGF.needsEHCleanup(DtorKind), &D);
130 Argument = llvm::Constant::getNullValue(CGF.Int8PtrTy);
131 }
132
133 CGM.getCXXABI().registerGlobalDtor(CGF, D, Func, Argument);
134 }
135
136 /// Emit code to cause the variable at the given address to be considered as
137 /// constant from this point onwards.
EmitDeclInvariant(CodeGenFunction & CGF,const VarDecl & D,llvm::Constant * Addr)138 static void EmitDeclInvariant(CodeGenFunction &CGF, const VarDecl &D,
139 llvm::Constant *Addr) {
140 return CGF.EmitInvariantStart(
141 Addr, CGF.getContext().getTypeSizeInChars(D.getType()));
142 }
143
EmitInvariantStart(llvm::Constant * Addr,CharUnits Size)144 void CodeGenFunction::EmitInvariantStart(llvm::Constant *Addr, CharUnits Size) {
145 // Do not emit the intrinsic if we're not optimizing.
146 if (!CGM.getCodeGenOpts().OptimizationLevel)
147 return;
148
149 // Grab the llvm.invariant.start intrinsic.
150 llvm::Intrinsic::ID InvStartID = llvm::Intrinsic::invariant_start;
151 // Overloaded address space type.
152 llvm::Type *ObjectPtr[1] = {Int8PtrTy};
153 llvm::Constant *InvariantStart = CGM.getIntrinsic(InvStartID, ObjectPtr);
154
155 // Emit a call with the size in bytes of the object.
156 uint64_t Width = Size.getQuantity();
157 llvm::Value *Args[2] = { llvm::ConstantInt::getSigned(Int64Ty, Width),
158 llvm::ConstantExpr::getBitCast(Addr, Int8PtrTy)};
159 Builder.CreateCall(InvariantStart, Args);
160 }
161
EmitCXXGlobalVarDeclInit(const VarDecl & D,llvm::Constant * DeclPtr,bool PerformInit)162 void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
163 llvm::Constant *DeclPtr,
164 bool PerformInit) {
165
166 const Expr *Init = D.getInit();
167 QualType T = D.getType();
168
169 // The address space of a static local variable (DeclPtr) may be different
170 // from the address space of the "this" argument of the constructor. In that
171 // case, we need an addrspacecast before calling the constructor.
172 //
173 // struct StructWithCtor {
174 // __device__ StructWithCtor() {...}
175 // };
176 // __device__ void foo() {
177 // __shared__ StructWithCtor s;
178 // ...
179 // }
180 //
181 // For example, in the above CUDA code, the static local variable s has a
182 // "shared" address space qualifier, but the constructor of StructWithCtor
183 // expects "this" in the "generic" address space.
184 unsigned ExpectedAddrSpace = getContext().getTargetAddressSpace(T);
185 unsigned ActualAddrSpace = DeclPtr->getType()->getPointerAddressSpace();
186 if (ActualAddrSpace != ExpectedAddrSpace) {
187 llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(T);
188 llvm::PointerType *PTy = llvm::PointerType::get(LTy, ExpectedAddrSpace);
189 DeclPtr = llvm::ConstantExpr::getAddrSpaceCast(DeclPtr, PTy);
190 }
191
192 ConstantAddress DeclAddr(DeclPtr, getContext().getDeclAlign(&D));
193
194 if (!T->isReferenceType()) {
195 if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd &&
196 D.hasAttr<OMPThreadPrivateDeclAttr>()) {
197 (void)CGM.getOpenMPRuntime().emitThreadPrivateVarDefinition(
198 &D, DeclAddr, D.getAttr<OMPThreadPrivateDeclAttr>()->getLocation(),
199 PerformInit, this);
200 }
201 if (PerformInit)
202 EmitDeclInit(*this, D, DeclAddr);
203 if (CGM.isTypeConstant(D.getType(), true))
204 EmitDeclInvariant(*this, D, DeclPtr);
205 else
206 EmitDeclDestroy(*this, D, DeclAddr);
207 return;
208 }
209
210 assert(PerformInit && "cannot have constant initializer which needs "
211 "destruction for reference");
212 RValue RV = EmitReferenceBindingToExpr(Init);
213 EmitStoreOfScalar(RV.getScalarVal(), DeclAddr, false, T);
214 }
215
216 /// Create a stub function, suitable for being passed to atexit,
217 /// which passes the given address to the given destructor function.
createAtExitStub(const VarDecl & VD,llvm::Constant * dtor,llvm::Constant * addr)218 llvm::Constant *CodeGenFunction::createAtExitStub(const VarDecl &VD,
219 llvm::Constant *dtor,
220 llvm::Constant *addr) {
221 // Get the destructor function type, void(*)(void).
222 llvm::FunctionType *ty = llvm::FunctionType::get(CGM.VoidTy, false);
223 SmallString<256> FnName;
224 {
225 llvm::raw_svector_ostream Out(FnName);
226 CGM.getCXXABI().getMangleContext().mangleDynamicAtExitDestructor(&VD, Out);
227 }
228
229 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
230 llvm::Function *fn = CGM.CreateGlobalInitOrDestructFunction(ty, FnName.str(),
231 FI,
232 VD.getLocation());
233
234 CodeGenFunction CGF(CGM);
235
236 CGF.StartFunction(&VD, CGM.getContext().VoidTy, fn, FI, FunctionArgList());
237
238 llvm::CallInst *call = CGF.Builder.CreateCall(dtor, addr);
239
240 // Make sure the call and the callee agree on calling convention.
241 if (llvm::Function *dtorFn =
242 dyn_cast<llvm::Function>(dtor->stripPointerCasts()))
243 call->setCallingConv(dtorFn->getCallingConv());
244
245 CGF.FinishFunction();
246
247 return fn;
248 }
249
250 /// Register a global destructor using the C atexit runtime function.
registerGlobalDtorWithAtExit(const VarDecl & VD,llvm::Constant * dtor,llvm::Constant * addr)251 void CodeGenFunction::registerGlobalDtorWithAtExit(const VarDecl &VD,
252 llvm::Constant *dtor,
253 llvm::Constant *addr) {
254 // Create a function which calls the destructor.
255 llvm::Constant *dtorStub = createAtExitStub(VD, dtor, addr);
256 registerGlobalDtorWithAtExit(dtorStub);
257 }
258
registerGlobalDtorWithAtExit(llvm::Constant * dtorStub)259 void CodeGenFunction::registerGlobalDtorWithAtExit(llvm::Constant *dtorStub) {
260 // extern "C" int atexit(void (*f)(void));
261 llvm::FunctionType *atexitTy =
262 llvm::FunctionType::get(IntTy, dtorStub->getType(), false);
263
264 llvm::Constant *atexit =
265 CGM.CreateRuntimeFunction(atexitTy, "atexit", llvm::AttributeList(),
266 /*Local=*/true);
267 if (llvm::Function *atexitFn = dyn_cast<llvm::Function>(atexit))
268 atexitFn->setDoesNotThrow();
269
270 EmitNounwindRuntimeCall(atexit, dtorStub);
271 }
272
EmitCXXGuardedInit(const VarDecl & D,llvm::GlobalVariable * DeclPtr,bool PerformInit)273 void CodeGenFunction::EmitCXXGuardedInit(const VarDecl &D,
274 llvm::GlobalVariable *DeclPtr,
275 bool PerformInit) {
276 // If we've been asked to forbid guard variables, emit an error now.
277 // This diagnostic is hard-coded for Darwin's use case; we can find
278 // better phrasing if someone else needs it.
279 if (CGM.getCodeGenOpts().ForbidGuardVariables)
280 CGM.Error(D.getLocation(),
281 "this initialization requires a guard variable, which "
282 "the kernel does not support");
283
284 CGM.getCXXABI().EmitGuardedInit(*this, D, DeclPtr, PerformInit);
285 }
286
EmitCXXGuardedInitBranch(llvm::Value * NeedsInit,llvm::BasicBlock * InitBlock,llvm::BasicBlock * NoInitBlock,GuardKind Kind,const VarDecl * D)287 void CodeGenFunction::EmitCXXGuardedInitBranch(llvm::Value *NeedsInit,
288 llvm::BasicBlock *InitBlock,
289 llvm::BasicBlock *NoInitBlock,
290 GuardKind Kind,
291 const VarDecl *D) {
292 assert((Kind == GuardKind::TlsGuard || D) && "no guarded variable");
293
294 // A guess at how many times we will enter the initialization of a
295 // variable, depending on the kind of variable.
296 static const uint64_t InitsPerTLSVar = 1024;
297 static const uint64_t InitsPerLocalVar = 1024 * 1024;
298
299 llvm::MDNode *Weights;
300 if (Kind == GuardKind::VariableGuard && !D->isLocalVarDecl()) {
301 // For non-local variables, don't apply any weighting for now. Due to our
302 // use of COMDATs, we expect there to be at most one initialization of the
303 // variable per DSO, but we have no way to know how many DSOs will try to
304 // initialize the variable.
305 Weights = nullptr;
306 } else {
307 uint64_t NumInits;
308 // FIXME: For the TLS case, collect and use profiling information to
309 // determine a more accurate brach weight.
310 if (Kind == GuardKind::TlsGuard || D->getTLSKind())
311 NumInits = InitsPerTLSVar;
312 else
313 NumInits = InitsPerLocalVar;
314
315 // The probability of us entering the initializer is
316 // 1 / (total number of times we attempt to initialize the variable).
317 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
318 Weights = MDHelper.createBranchWeights(1, NumInits - 1);
319 }
320
321 Builder.CreateCondBr(NeedsInit, InitBlock, NoInitBlock, Weights);
322 }
323
CreateGlobalInitOrDestructFunction(llvm::FunctionType * FTy,const Twine & Name,const CGFunctionInfo & FI,SourceLocation Loc,bool TLS)324 llvm::Function *CodeGenModule::CreateGlobalInitOrDestructFunction(
325 llvm::FunctionType *FTy, const Twine &Name, const CGFunctionInfo &FI,
326 SourceLocation Loc, bool TLS) {
327 llvm::Function *Fn =
328 llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage,
329 Name, &getModule());
330 if (!getLangOpts().AppleKext && !TLS) {
331 // Set the section if needed.
332 if (const char *Section = getTarget().getStaticInitSectionSpecifier())
333 Fn->setSection(Section);
334 }
335
336 SetInternalFunctionAttributes(GlobalDecl(), Fn, FI);
337
338 Fn->setCallingConv(getRuntimeCC());
339
340 if (!getLangOpts().Exceptions)
341 Fn->setDoesNotThrow();
342
343 if (getLangOpts().Sanitize.has(SanitizerKind::Address) &&
344 !isInSanitizerBlacklist(SanitizerKind::Address, Fn, Loc))
345 Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
346
347 if (getLangOpts().Sanitize.has(SanitizerKind::KernelAddress) &&
348 !isInSanitizerBlacklist(SanitizerKind::KernelAddress, Fn, Loc))
349 Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
350
351 if (getLangOpts().Sanitize.has(SanitizerKind::HWAddress) &&
352 !isInSanitizerBlacklist(SanitizerKind::HWAddress, Fn, Loc))
353 Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
354
355 if (getLangOpts().Sanitize.has(SanitizerKind::KernelHWAddress) &&
356 !isInSanitizerBlacklist(SanitizerKind::KernelHWAddress, Fn, Loc))
357 Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
358
359 if (getLangOpts().Sanitize.has(SanitizerKind::Thread) &&
360 !isInSanitizerBlacklist(SanitizerKind::Thread, Fn, Loc))
361 Fn->addFnAttr(llvm::Attribute::SanitizeThread);
362
363 if (getLangOpts().Sanitize.has(SanitizerKind::Memory) &&
364 !isInSanitizerBlacklist(SanitizerKind::Memory, Fn, Loc))
365 Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
366
367 if (getLangOpts().Sanitize.has(SanitizerKind::KernelMemory) &&
368 !isInSanitizerBlacklist(SanitizerKind::KernelMemory, Fn, Loc))
369 Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
370
371 if (getLangOpts().Sanitize.has(SanitizerKind::SafeStack) &&
372 !isInSanitizerBlacklist(SanitizerKind::SafeStack, Fn, Loc))
373 Fn->addFnAttr(llvm::Attribute::SafeStack);
374
375 if (getLangOpts().Sanitize.has(SanitizerKind::ShadowCallStack) &&
376 !isInSanitizerBlacklist(SanitizerKind::ShadowCallStack, Fn, Loc))
377 Fn->addFnAttr(llvm::Attribute::ShadowCallStack);
378
379 auto RASignKind = getCodeGenOpts().getSignReturnAddress();
380 if (RASignKind != CodeGenOptions::SignReturnAddressScope::None) {
381 Fn->addFnAttr("sign-return-address",
382 RASignKind == CodeGenOptions::SignReturnAddressScope::All
383 ? "all"
384 : "non-leaf");
385 auto RASignKey = getCodeGenOpts().getSignReturnAddressKey();
386 Fn->addFnAttr("sign-return-address-key",
387 RASignKey == CodeGenOptions::SignReturnAddressKeyValue::AKey
388 ? "a_key"
389 : "b_key");
390 }
391
392 if (getCodeGenOpts().BranchTargetEnforcement)
393 Fn->addFnAttr("branch-target-enforcement");
394
395 return Fn;
396 }
397
398 /// Create a global pointer to a function that will initialize a global
399 /// variable. The user has requested that this pointer be emitted in a specific
400 /// section.
EmitPointerToInitFunc(const VarDecl * D,llvm::GlobalVariable * GV,llvm::Function * InitFunc,InitSegAttr * ISA)401 void CodeGenModule::EmitPointerToInitFunc(const VarDecl *D,
402 llvm::GlobalVariable *GV,
403 llvm::Function *InitFunc,
404 InitSegAttr *ISA) {
405 llvm::GlobalVariable *PtrArray = new llvm::GlobalVariable(
406 TheModule, InitFunc->getType(), /*isConstant=*/true,
407 llvm::GlobalValue::PrivateLinkage, InitFunc, "__cxx_init_fn_ptr");
408 PtrArray->setSection(ISA->getSection());
409 addUsedGlobal(PtrArray);
410
411 // If the GV is already in a comdat group, then we have to join it.
412 if (llvm::Comdat *C = GV->getComdat())
413 PtrArray->setComdat(C);
414 }
415
416 void
EmitCXXGlobalVarDeclInitFunc(const VarDecl * D,llvm::GlobalVariable * Addr,bool PerformInit)417 CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
418 llvm::GlobalVariable *Addr,
419 bool PerformInit) {
420
421 // According to E.2.3.1 in CUDA-7.5 Programming guide: __device__,
422 // __constant__ and __shared__ variables defined in namespace scope,
423 // that are of class type, cannot have a non-empty constructor. All
424 // the checks have been done in Sema by now. Whatever initializers
425 // are allowed are empty and we just need to ignore them here.
426 if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice &&
427 (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() ||
428 D->hasAttr<CUDASharedAttr>()))
429 return;
430
431 if (getLangOpts().OpenMP &&
432 getOpenMPRuntime().emitDeclareTargetVarDefinition(D, Addr, PerformInit))
433 return;
434
435 // Check if we've already initialized this decl.
436 auto I = DelayedCXXInitPosition.find(D);
437 if (I != DelayedCXXInitPosition.end() && I->second == ~0U)
438 return;
439
440 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
441 SmallString<256> FnName;
442 {
443 llvm::raw_svector_ostream Out(FnName);
444 getCXXABI().getMangleContext().mangleDynamicInitializer(D, Out);
445 }
446
447 // Create a variable initialization function.
448 llvm::Function *Fn =
449 CreateGlobalInitOrDestructFunction(FTy, FnName.str(),
450 getTypes().arrangeNullaryFunction(),
451 D->getLocation());
452
453 auto *ISA = D->getAttr<InitSegAttr>();
454 CodeGenFunction(*this).GenerateCXXGlobalVarDeclInitFunc(Fn, D, Addr,
455 PerformInit);
456
457 llvm::GlobalVariable *COMDATKey =
458 supportsCOMDAT() && D->isExternallyVisible() ? Addr : nullptr;
459
460 if (D->getTLSKind()) {
461 // FIXME: Should we support init_priority for thread_local?
462 // FIXME: We only need to register one __cxa_thread_atexit function for the
463 // entire TU.
464 CXXThreadLocalInits.push_back(Fn);
465 CXXThreadLocalInitVars.push_back(D);
466 } else if (PerformInit && ISA) {
467 EmitPointerToInitFunc(D, Addr, Fn, ISA);
468 } else if (auto *IPA = D->getAttr<InitPriorityAttr>()) {
469 OrderGlobalInits Key(IPA->getPriority(), PrioritizedCXXGlobalInits.size());
470 PrioritizedCXXGlobalInits.push_back(std::make_pair(Key, Fn));
471 } else if (isTemplateInstantiation(D->getTemplateSpecializationKind())) {
472 // C++ [basic.start.init]p2:
473 // Definitions of explicitly specialized class template static data
474 // members have ordered initialization. Other class template static data
475 // members (i.e., implicitly or explicitly instantiated specializations)
476 // have unordered initialization.
477 //
478 // As a consequence, we can put them into their own llvm.global_ctors entry.
479 //
480 // If the global is externally visible, put the initializer into a COMDAT
481 // group with the global being initialized. On most platforms, this is a
482 // minor startup time optimization. In the MS C++ ABI, there are no guard
483 // variables, so this COMDAT key is required for correctness.
484 AddGlobalCtor(Fn, 65535, COMDATKey);
485 } else if (D->hasAttr<SelectAnyAttr>()) {
486 // SelectAny globals will be comdat-folded. Put the initializer into a
487 // COMDAT group associated with the global, so the initializers get folded
488 // too.
489 AddGlobalCtor(Fn, 65535, COMDATKey);
490 } else {
491 I = DelayedCXXInitPosition.find(D); // Re-do lookup in case of re-hash.
492 if (I == DelayedCXXInitPosition.end()) {
493 CXXGlobalInits.push_back(Fn);
494 } else if (I->second != ~0U) {
495 assert(I->second < CXXGlobalInits.size() &&
496 CXXGlobalInits[I->second] == nullptr);
497 CXXGlobalInits[I->second] = Fn;
498 }
499 }
500
501 // Remember that we already emitted the initializer for this global.
502 DelayedCXXInitPosition[D] = ~0U;
503 }
504
EmitCXXThreadLocalInitFunc()505 void CodeGenModule::EmitCXXThreadLocalInitFunc() {
506 getCXXABI().EmitThreadLocalInitFuncs(
507 *this, CXXThreadLocals, CXXThreadLocalInits, CXXThreadLocalInitVars);
508
509 CXXThreadLocalInits.clear();
510 CXXThreadLocalInitVars.clear();
511 CXXThreadLocals.clear();
512 }
513
514 void
EmitCXXGlobalInitFunc()515 CodeGenModule::EmitCXXGlobalInitFunc() {
516 while (!CXXGlobalInits.empty() && !CXXGlobalInits.back())
517 CXXGlobalInits.pop_back();
518
519 if (CXXGlobalInits.empty() && PrioritizedCXXGlobalInits.empty())
520 return;
521
522 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
523 const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction();
524
525 // Create our global initialization function.
526 if (!PrioritizedCXXGlobalInits.empty()) {
527 SmallVector<llvm::Function *, 8> LocalCXXGlobalInits;
528 llvm::array_pod_sort(PrioritizedCXXGlobalInits.begin(),
529 PrioritizedCXXGlobalInits.end());
530 // Iterate over "chunks" of ctors with same priority and emit each chunk
531 // into separate function. Note - everything is sorted first by priority,
532 // second - by lex order, so we emit ctor functions in proper order.
533 for (SmallVectorImpl<GlobalInitData >::iterator
534 I = PrioritizedCXXGlobalInits.begin(),
535 E = PrioritizedCXXGlobalInits.end(); I != E; ) {
536 SmallVectorImpl<GlobalInitData >::iterator
537 PrioE = std::upper_bound(I + 1, E, *I, GlobalInitPriorityCmp());
538
539 LocalCXXGlobalInits.clear();
540 unsigned Priority = I->first.priority;
541 // Compute the function suffix from priority. Prepend with zeroes to make
542 // sure the function names are also ordered as priorities.
543 std::string PrioritySuffix = llvm::utostr(Priority);
544 // Priority is always <= 65535 (enforced by sema).
545 PrioritySuffix = std::string(6-PrioritySuffix.size(), '0')+PrioritySuffix;
546 llvm::Function *Fn = CreateGlobalInitOrDestructFunction(
547 FTy, "_GLOBAL__I_" + PrioritySuffix, FI);
548
549 for (; I < PrioE; ++I)
550 LocalCXXGlobalInits.push_back(I->second);
551
552 CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, LocalCXXGlobalInits);
553 AddGlobalCtor(Fn, Priority);
554 }
555 PrioritizedCXXGlobalInits.clear();
556 }
557
558 // Include the filename in the symbol name. Including "sub_" matches gcc and
559 // makes sure these symbols appear lexicographically behind the symbols with
560 // priority emitted above.
561 SmallString<128> FileName = llvm::sys::path::filename(getModule().getName());
562 if (FileName.empty())
563 FileName = "<null>";
564
565 for (size_t i = 0; i < FileName.size(); ++i) {
566 // Replace everything that's not [a-zA-Z0-9._] with a _. This set happens
567 // to be the set of C preprocessing numbers.
568 if (!isPreprocessingNumberBody(FileName[i]))
569 FileName[i] = '_';
570 }
571
572 llvm::Function *Fn = CreateGlobalInitOrDestructFunction(
573 FTy, llvm::Twine("_GLOBAL__sub_I_", FileName), FI);
574
575 CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, CXXGlobalInits);
576 AddGlobalCtor(Fn);
577
578 CXXGlobalInits.clear();
579 }
580
EmitCXXGlobalDtorFunc()581 void CodeGenModule::EmitCXXGlobalDtorFunc() {
582 if (CXXGlobalDtors.empty())
583 return;
584
585 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
586
587 // Create our global destructor function.
588 const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction();
589 llvm::Function *Fn =
590 CreateGlobalInitOrDestructFunction(FTy, "_GLOBAL__D_a", FI);
591
592 CodeGenFunction(*this).GenerateCXXGlobalDtorsFunc(Fn, CXXGlobalDtors);
593 AddGlobalDtor(Fn);
594 }
595
596 /// Emit the code necessary to initialize the given global variable.
GenerateCXXGlobalVarDeclInitFunc(llvm::Function * Fn,const VarDecl * D,llvm::GlobalVariable * Addr,bool PerformInit)597 void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
598 const VarDecl *D,
599 llvm::GlobalVariable *Addr,
600 bool PerformInit) {
601 // Check if we need to emit debug info for variable initializer.
602 if (D->hasAttr<NoDebugAttr>())
603 DebugInfo = nullptr; // disable debug info indefinitely for this function
604
605 CurEHLocation = D->getBeginLoc();
606
607 StartFunction(GlobalDecl(D), getContext().VoidTy, Fn,
608 getTypes().arrangeNullaryFunction(),
609 FunctionArgList(), D->getLocation(),
610 D->getInit()->getExprLoc());
611
612 // Use guarded initialization if the global variable is weak. This
613 // occurs for, e.g., instantiated static data members and
614 // definitions explicitly marked weak.
615 if (Addr->hasWeakLinkage() || Addr->hasLinkOnceLinkage()) {
616 EmitCXXGuardedInit(*D, Addr, PerformInit);
617 } else {
618 EmitCXXGlobalVarDeclInit(*D, Addr, PerformInit);
619 }
620
621 FinishFunction();
622 }
623
624 void
GenerateCXXGlobalInitFunc(llvm::Function * Fn,ArrayRef<llvm::Function * > Decls,ConstantAddress Guard)625 CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
626 ArrayRef<llvm::Function *> Decls,
627 ConstantAddress Guard) {
628 {
629 auto NL = ApplyDebugLocation::CreateEmpty(*this);
630 StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
631 getTypes().arrangeNullaryFunction(), FunctionArgList());
632 // Emit an artificial location for this function.
633 auto AL = ApplyDebugLocation::CreateArtificial(*this);
634
635 llvm::BasicBlock *ExitBlock = nullptr;
636 if (Guard.isValid()) {
637 // If we have a guard variable, check whether we've already performed
638 // these initializations. This happens for TLS initialization functions.
639 llvm::Value *GuardVal = Builder.CreateLoad(Guard);
640 llvm::Value *Uninit = Builder.CreateIsNull(GuardVal,
641 "guard.uninitialized");
642 llvm::BasicBlock *InitBlock = createBasicBlock("init");
643 ExitBlock = createBasicBlock("exit");
644 EmitCXXGuardedInitBranch(Uninit, InitBlock, ExitBlock,
645 GuardKind::TlsGuard, nullptr);
646 EmitBlock(InitBlock);
647 // Mark as initialized before initializing anything else. If the
648 // initializers use previously-initialized thread_local vars, that's
649 // probably supposed to be OK, but the standard doesn't say.
650 Builder.CreateStore(llvm::ConstantInt::get(GuardVal->getType(),1), Guard);
651
652 // The guard variable can't ever change again.
653 EmitInvariantStart(
654 Guard.getPointer(),
655 CharUnits::fromQuantity(
656 CGM.getDataLayout().getTypeAllocSize(GuardVal->getType())));
657 }
658
659 RunCleanupsScope Scope(*this);
660
661 // When building in Objective-C++ ARC mode, create an autorelease pool
662 // around the global initializers.
663 if (getLangOpts().ObjCAutoRefCount && getLangOpts().CPlusPlus) {
664 llvm::Value *token = EmitObjCAutoreleasePoolPush();
665 EmitObjCAutoreleasePoolCleanup(token);
666 }
667
668 for (unsigned i = 0, e = Decls.size(); i != e; ++i)
669 if (Decls[i])
670 EmitRuntimeCall(Decls[i]);
671
672 Scope.ForceCleanup();
673
674 if (ExitBlock) {
675 Builder.CreateBr(ExitBlock);
676 EmitBlock(ExitBlock);
677 }
678 }
679
680 FinishFunction();
681 }
682
GenerateCXXGlobalDtorsFunc(llvm::Function * Fn,const std::vector<std::pair<llvm::WeakTrackingVH,llvm::Constant * >> & DtorsAndObjects)683 void CodeGenFunction::GenerateCXXGlobalDtorsFunc(
684 llvm::Function *Fn,
685 const std::vector<std::pair<llvm::WeakTrackingVH, llvm::Constant *>>
686 &DtorsAndObjects) {
687 {
688 auto NL = ApplyDebugLocation::CreateEmpty(*this);
689 StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
690 getTypes().arrangeNullaryFunction(), FunctionArgList());
691 // Emit an artificial location for this function.
692 auto AL = ApplyDebugLocation::CreateArtificial(*this);
693
694 // Emit the dtors, in reverse order from construction.
695 for (unsigned i = 0, e = DtorsAndObjects.size(); i != e; ++i) {
696 llvm::Value *Callee = DtorsAndObjects[e - i - 1].first;
697 llvm::CallInst *CI = Builder.CreateCall(Callee,
698 DtorsAndObjects[e - i - 1].second);
699 // Make sure the call and the callee agree on calling convention.
700 if (llvm::Function *F = dyn_cast<llvm::Function>(Callee))
701 CI->setCallingConv(F->getCallingConv());
702 }
703 }
704
705 FinishFunction();
706 }
707
708 /// generateDestroyHelper - Generates a helper function which, when
709 /// invoked, destroys the given object. The address of the object
710 /// should be in global memory.
generateDestroyHelper(Address addr,QualType type,Destroyer * destroyer,bool useEHCleanupForArray,const VarDecl * VD)711 llvm::Function *CodeGenFunction::generateDestroyHelper(
712 Address addr, QualType type, Destroyer *destroyer,
713 bool useEHCleanupForArray, const VarDecl *VD) {
714 FunctionArgList args;
715 ImplicitParamDecl Dst(getContext(), getContext().VoidPtrTy,
716 ImplicitParamDecl::Other);
717 args.push_back(&Dst);
718
719 const CGFunctionInfo &FI =
720 CGM.getTypes().arrangeBuiltinFunctionDeclaration(getContext().VoidTy, args);
721 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
722 llvm::Function *fn = CGM.CreateGlobalInitOrDestructFunction(
723 FTy, "__cxx_global_array_dtor", FI, VD->getLocation());
724
725 CurEHLocation = VD->getBeginLoc();
726
727 StartFunction(VD, getContext().VoidTy, fn, FI, args);
728
729 emitDestroy(addr, type, destroyer, useEHCleanupForArray);
730
731 FinishFunction();
732
733 return fn;
734 }
735