1 //===--- CGDeclCXX.cpp - Emit LLVM Code for C++ declarations --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code dealing with code generation of C++ declarations
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "CGCXXABI.h"
14 #include "CGObjCRuntime.h"
15 #include "CGOpenMPRuntime.h"
16 #include "CodeGenFunction.h"
17 #include "TargetInfo.h"
18 #include "clang/AST/Attr.h"
19 #include "clang/Basic/LangOptions.h"
20 #include "llvm/ADT/StringExtras.h"
21 #include "llvm/IR/Intrinsics.h"
22 #include "llvm/IR/MDBuilder.h"
23 #include "llvm/Support/Path.h"
24
25 using namespace clang;
26 using namespace CodeGen;
27
EmitDeclInit(CodeGenFunction & CGF,const VarDecl & D,ConstantAddress DeclPtr)28 static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
29 ConstantAddress DeclPtr) {
30 assert(
31 (D.hasGlobalStorage() ||
32 (D.hasLocalStorage() && CGF.getContext().getLangOpts().OpenCLCPlusPlus)) &&
33 "VarDecl must have global or local (in the case of OpenCL) storage!");
34 assert(!D.getType()->isReferenceType() &&
35 "Should not call EmitDeclInit on a reference!");
36
37 QualType type = D.getType();
38 LValue lv = CGF.MakeAddrLValue(DeclPtr, type);
39
40 const Expr *Init = D.getInit();
41 switch (CGF.getEvaluationKind(type)) {
42 case TEK_Scalar: {
43 CodeGenModule &CGM = CGF.CGM;
44 if (lv.isObjCStrong())
45 CGM.getObjCRuntime().EmitObjCGlobalAssign(CGF, CGF.EmitScalarExpr(Init),
46 DeclPtr, D.getTLSKind());
47 else if (lv.isObjCWeak())
48 CGM.getObjCRuntime().EmitObjCWeakAssign(CGF, CGF.EmitScalarExpr(Init),
49 DeclPtr);
50 else
51 CGF.EmitScalarInit(Init, &D, lv, false);
52 return;
53 }
54 case TEK_Complex:
55 CGF.EmitComplexExprIntoLValue(Init, lv, /*isInit*/ true);
56 return;
57 case TEK_Aggregate:
58 CGF.EmitAggExpr(Init,
59 AggValueSlot::forLValue(lv, CGF, AggValueSlot::IsDestructed,
60 AggValueSlot::DoesNotNeedGCBarriers,
61 AggValueSlot::IsNotAliased,
62 AggValueSlot::DoesNotOverlap));
63 return;
64 }
65 llvm_unreachable("bad evaluation kind");
66 }
67
68 /// Emit code to cause the destruction of the given variable with
69 /// static storage duration.
EmitDeclDestroy(CodeGenFunction & CGF,const VarDecl & D,ConstantAddress Addr)70 static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
71 ConstantAddress Addr) {
72 // Honor __attribute__((no_destroy)) and bail instead of attempting
73 // to emit a reference to a possibly nonexistent destructor, which
74 // in turn can cause a crash. This will result in a global constructor
75 // that isn't balanced out by a destructor call as intended by the
76 // attribute. This also checks for -fno-c++-static-destructors and
77 // bails even if the attribute is not present.
78 QualType::DestructionKind DtorKind = D.needsDestruction(CGF.getContext());
79
80 // FIXME: __attribute__((cleanup)) ?
81
82 switch (DtorKind) {
83 case QualType::DK_none:
84 return;
85
86 case QualType::DK_cxx_destructor:
87 break;
88
89 case QualType::DK_objc_strong_lifetime:
90 case QualType::DK_objc_weak_lifetime:
91 case QualType::DK_nontrivial_c_struct:
92 // We don't care about releasing objects during process teardown.
93 assert(!D.getTLSKind() && "should have rejected this");
94 return;
95 }
96
97 llvm::FunctionCallee Func;
98 llvm::Constant *Argument;
99
100 CodeGenModule &CGM = CGF.CGM;
101 QualType Type = D.getType();
102
103 // Special-case non-array C++ destructors, if they have the right signature.
104 // Under some ABIs, destructors return this instead of void, and cannot be
105 // passed directly to __cxa_atexit if the target does not allow this
106 // mismatch.
107 const CXXRecordDecl *Record = Type->getAsCXXRecordDecl();
108 bool CanRegisterDestructor =
109 Record && (!CGM.getCXXABI().HasThisReturn(
110 GlobalDecl(Record->getDestructor(), Dtor_Complete)) ||
111 CGM.getCXXABI().canCallMismatchedFunctionType());
112 // If __cxa_atexit is disabled via a flag, a different helper function is
113 // generated elsewhere which uses atexit instead, and it takes the destructor
114 // directly.
115 bool UsingExternalHelper = !CGM.getCodeGenOpts().CXAAtExit;
116 if (Record && (CanRegisterDestructor || UsingExternalHelper)) {
117 assert(!Record->hasTrivialDestructor());
118 CXXDestructorDecl *Dtor = Record->getDestructor();
119
120 Func = CGM.getAddrAndTypeOfCXXStructor(GlobalDecl(Dtor, Dtor_Complete));
121 if (CGF.getContext().getLangOpts().OpenCL) {
122 auto DestAS =
123 CGM.getTargetCodeGenInfo().getAddrSpaceOfCxaAtexitPtrParam();
124 auto DestTy = CGF.getTypes().ConvertType(Type)->getPointerTo(
125 CGM.getContext().getTargetAddressSpace(DestAS));
126 auto SrcAS = D.getType().getQualifiers().getAddressSpace();
127 if (DestAS == SrcAS)
128 Argument = llvm::ConstantExpr::getBitCast(Addr.getPointer(), DestTy);
129 else
130 // FIXME: On addr space mismatch we are passing NULL. The generation
131 // of the global destructor function should be adjusted accordingly.
132 Argument = llvm::ConstantPointerNull::get(DestTy);
133 } else {
134 Argument = llvm::ConstantExpr::getBitCast(
135 Addr.getPointer(), CGF.getTypes().ConvertType(Type)->getPointerTo());
136 }
137 // Otherwise, the standard logic requires a helper function.
138 } else {
139 Addr = Addr.getElementBitCast(CGF.ConvertTypeForMem(Type));
140 Func = CodeGenFunction(CGM)
141 .generateDestroyHelper(Addr, Type, CGF.getDestroyer(DtorKind),
142 CGF.needsEHCleanup(DtorKind), &D);
143 Argument = llvm::Constant::getNullValue(CGF.Int8PtrTy);
144 }
145
146 CGM.getCXXABI().registerGlobalDtor(CGF, D, Func, Argument);
147 }
148
149 /// Emit code to cause the variable at the given address to be considered as
150 /// constant from this point onwards.
EmitDeclInvariant(CodeGenFunction & CGF,const VarDecl & D,llvm::Constant * Addr)151 static void EmitDeclInvariant(CodeGenFunction &CGF, const VarDecl &D,
152 llvm::Constant *Addr) {
153 return CGF.EmitInvariantStart(
154 Addr, CGF.getContext().getTypeSizeInChars(D.getType()));
155 }
156
EmitInvariantStart(llvm::Constant * Addr,CharUnits Size)157 void CodeGenFunction::EmitInvariantStart(llvm::Constant *Addr, CharUnits Size) {
158 // Do not emit the intrinsic if we're not optimizing.
159 if (!CGM.getCodeGenOpts().OptimizationLevel)
160 return;
161
162 // Grab the llvm.invariant.start intrinsic.
163 llvm::Intrinsic::ID InvStartID = llvm::Intrinsic::invariant_start;
164 // Overloaded address space type.
165 llvm::Type *ObjectPtr[1] = {Int8PtrTy};
166 llvm::Function *InvariantStart = CGM.getIntrinsic(InvStartID, ObjectPtr);
167
168 // Emit a call with the size in bytes of the object.
169 uint64_t Width = Size.getQuantity();
170 llvm::Value *Args[2] = { llvm::ConstantInt::getSigned(Int64Ty, Width),
171 llvm::ConstantExpr::getBitCast(Addr, Int8PtrTy)};
172 Builder.CreateCall(InvariantStart, Args);
173 }
174
EmitCXXGlobalVarDeclInit(const VarDecl & D,llvm::GlobalVariable * GV,bool PerformInit)175 void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
176 llvm::GlobalVariable *GV,
177 bool PerformInit) {
178
179 const Expr *Init = D.getInit();
180 QualType T = D.getType();
181
182 // The address space of a static local variable (DeclPtr) may be different
183 // from the address space of the "this" argument of the constructor. In that
184 // case, we need an addrspacecast before calling the constructor.
185 //
186 // struct StructWithCtor {
187 // __device__ StructWithCtor() {...}
188 // };
189 // __device__ void foo() {
190 // __shared__ StructWithCtor s;
191 // ...
192 // }
193 //
194 // For example, in the above CUDA code, the static local variable s has a
195 // "shared" address space qualifier, but the constructor of StructWithCtor
196 // expects "this" in the "generic" address space.
197 unsigned ExpectedAddrSpace = getContext().getTargetAddressSpace(T);
198 unsigned ActualAddrSpace = GV->getAddressSpace();
199 llvm::Constant *DeclPtr = GV;
200 if (ActualAddrSpace != ExpectedAddrSpace) {
201 llvm::PointerType *PTy = llvm::PointerType::getWithSamePointeeType(
202 GV->getType(), ExpectedAddrSpace);
203 DeclPtr = llvm::ConstantExpr::getAddrSpaceCast(DeclPtr, PTy);
204 }
205
206 ConstantAddress DeclAddr(
207 DeclPtr, GV->getValueType(), getContext().getDeclAlign(&D));
208
209 if (!T->isReferenceType()) {
210 if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd &&
211 D.hasAttr<OMPThreadPrivateDeclAttr>()) {
212 (void)CGM.getOpenMPRuntime().emitThreadPrivateVarDefinition(
213 &D, DeclAddr, D.getAttr<OMPThreadPrivateDeclAttr>()->getLocation(),
214 PerformInit, this);
215 }
216 if (PerformInit)
217 EmitDeclInit(*this, D, DeclAddr);
218 if (CGM.isTypeConstant(D.getType(), true))
219 EmitDeclInvariant(*this, D, DeclPtr);
220 else
221 EmitDeclDestroy(*this, D, DeclAddr);
222 return;
223 }
224
225 assert(PerformInit && "cannot have constant initializer which needs "
226 "destruction for reference");
227 RValue RV = EmitReferenceBindingToExpr(Init);
228 EmitStoreOfScalar(RV.getScalarVal(), DeclAddr, false, T);
229 }
230
231 /// Create a stub function, suitable for being passed to atexit,
232 /// which passes the given address to the given destructor function.
createAtExitStub(const VarDecl & VD,llvm::FunctionCallee dtor,llvm::Constant * addr)233 llvm::Function *CodeGenFunction::createAtExitStub(const VarDecl &VD,
234 llvm::FunctionCallee dtor,
235 llvm::Constant *addr) {
236 // Get the destructor function type, void(*)(void).
237 llvm::FunctionType *ty = llvm::FunctionType::get(CGM.VoidTy, false);
238 SmallString<256> FnName;
239 {
240 llvm::raw_svector_ostream Out(FnName);
241 CGM.getCXXABI().getMangleContext().mangleDynamicAtExitDestructor(&VD, Out);
242 }
243
244 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
245 llvm::Function *fn = CGM.CreateGlobalInitOrCleanUpFunction(
246 ty, FnName.str(), FI, VD.getLocation());
247
248 CodeGenFunction CGF(CGM);
249
250 CGF.StartFunction(GlobalDecl(&VD, DynamicInitKind::AtExit),
251 CGM.getContext().VoidTy, fn, FI, FunctionArgList(),
252 VD.getLocation(), VD.getInit()->getExprLoc());
253 // Emit an artificial location for this function.
254 auto AL = ApplyDebugLocation::CreateArtificial(CGF);
255
256 llvm::CallInst *call = CGF.Builder.CreateCall(dtor, addr);
257
258 // Make sure the call and the callee agree on calling convention.
259 if (auto *dtorFn = dyn_cast<llvm::Function>(
260 dtor.getCallee()->stripPointerCastsAndAliases()))
261 call->setCallingConv(dtorFn->getCallingConv());
262
263 CGF.FinishFunction();
264
265 return fn;
266 }
267
268 /// Create a stub function, suitable for being passed to __pt_atexit_np,
269 /// which passes the given address to the given destructor function.
createTLSAtExitStub(const VarDecl & D,llvm::FunctionCallee Dtor,llvm::Constant * Addr,llvm::FunctionCallee & AtExit)270 llvm::Function *CodeGenFunction::createTLSAtExitStub(
271 const VarDecl &D, llvm::FunctionCallee Dtor, llvm::Constant *Addr,
272 llvm::FunctionCallee &AtExit) {
273 SmallString<256> FnName;
274 {
275 llvm::raw_svector_ostream Out(FnName);
276 CGM.getCXXABI().getMangleContext().mangleDynamicAtExitDestructor(&D, Out);
277 }
278
279 const CGFunctionInfo &FI = CGM.getTypes().arrangeLLVMFunctionInfo(
280 getContext().IntTy, /*instanceMethod=*/false, /*chainCall=*/false,
281 {getContext().IntTy}, FunctionType::ExtInfo(), {}, RequiredArgs::All);
282
283 // Get the stub function type, int(*)(int,...).
284 llvm::FunctionType *StubTy =
285 llvm::FunctionType::get(CGM.IntTy, {CGM.IntTy}, true);
286
287 llvm::Function *DtorStub = CGM.CreateGlobalInitOrCleanUpFunction(
288 StubTy, FnName.str(), FI, D.getLocation());
289
290 CodeGenFunction CGF(CGM);
291
292 FunctionArgList Args;
293 ImplicitParamDecl IPD(CGM.getContext(), CGM.getContext().IntTy,
294 ImplicitParamDecl::Other);
295 Args.push_back(&IPD);
296 QualType ResTy = CGM.getContext().IntTy;
297
298 CGF.StartFunction(GlobalDecl(&D, DynamicInitKind::AtExit), ResTy, DtorStub,
299 FI, Args, D.getLocation(), D.getInit()->getExprLoc());
300
301 // Emit an artificial location for this function.
302 auto AL = ApplyDebugLocation::CreateArtificial(CGF);
303
304 llvm::CallInst *call = CGF.Builder.CreateCall(Dtor, Addr);
305
306 // Make sure the call and the callee agree on calling convention.
307 if (auto *DtorFn = dyn_cast<llvm::Function>(
308 Dtor.getCallee()->stripPointerCastsAndAliases()))
309 call->setCallingConv(DtorFn->getCallingConv());
310
311 // Return 0 from function
312 CGF.Builder.CreateStore(llvm::Constant::getNullValue(CGM.IntTy),
313 CGF.ReturnValue);
314
315 CGF.FinishFunction();
316
317 return DtorStub;
318 }
319
320 /// Register a global destructor using the C atexit runtime function.
registerGlobalDtorWithAtExit(const VarDecl & VD,llvm::FunctionCallee dtor,llvm::Constant * addr)321 void CodeGenFunction::registerGlobalDtorWithAtExit(const VarDecl &VD,
322 llvm::FunctionCallee dtor,
323 llvm::Constant *addr) {
324 // Create a function which calls the destructor.
325 llvm::Constant *dtorStub = createAtExitStub(VD, dtor, addr);
326 registerGlobalDtorWithAtExit(dtorStub);
327 }
328
registerGlobalDtorWithAtExit(llvm::Constant * dtorStub)329 void CodeGenFunction::registerGlobalDtorWithAtExit(llvm::Constant *dtorStub) {
330 // extern "C" int atexit(void (*f)(void));
331 assert(dtorStub->getType() ==
332 llvm::PointerType::get(
333 llvm::FunctionType::get(CGM.VoidTy, false),
334 dtorStub->getType()->getPointerAddressSpace()) &&
335 "Argument to atexit has a wrong type.");
336
337 llvm::FunctionType *atexitTy =
338 llvm::FunctionType::get(IntTy, dtorStub->getType(), false);
339
340 llvm::FunctionCallee atexit =
341 CGM.CreateRuntimeFunction(atexitTy, "atexit", llvm::AttributeList(),
342 /*Local=*/true);
343 if (llvm::Function *atexitFn = dyn_cast<llvm::Function>(atexit.getCallee()))
344 atexitFn->setDoesNotThrow();
345
346 EmitNounwindRuntimeCall(atexit, dtorStub);
347 }
348
349 llvm::Value *
unregisterGlobalDtorWithUnAtExit(llvm::Constant * dtorStub)350 CodeGenFunction::unregisterGlobalDtorWithUnAtExit(llvm::Constant *dtorStub) {
351 // The unatexit subroutine unregisters __dtor functions that were previously
352 // registered by the atexit subroutine. If the referenced function is found,
353 // it is removed from the list of functions that are called at normal program
354 // termination and the unatexit returns a value of 0, otherwise a non-zero
355 // value is returned.
356 //
357 // extern "C" int unatexit(void (*f)(void));
358 assert(dtorStub->getType() ==
359 llvm::PointerType::get(
360 llvm::FunctionType::get(CGM.VoidTy, false),
361 dtorStub->getType()->getPointerAddressSpace()) &&
362 "Argument to unatexit has a wrong type.");
363
364 llvm::FunctionType *unatexitTy =
365 llvm::FunctionType::get(IntTy, {dtorStub->getType()}, /*isVarArg=*/false);
366
367 llvm::FunctionCallee unatexit =
368 CGM.CreateRuntimeFunction(unatexitTy, "unatexit", llvm::AttributeList());
369
370 cast<llvm::Function>(unatexit.getCallee())->setDoesNotThrow();
371
372 return EmitNounwindRuntimeCall(unatexit, dtorStub);
373 }
374
EmitCXXGuardedInit(const VarDecl & D,llvm::GlobalVariable * DeclPtr,bool PerformInit)375 void CodeGenFunction::EmitCXXGuardedInit(const VarDecl &D,
376 llvm::GlobalVariable *DeclPtr,
377 bool PerformInit) {
378 // If we've been asked to forbid guard variables, emit an error now.
379 // This diagnostic is hard-coded for Darwin's use case; we can find
380 // better phrasing if someone else needs it.
381 if (CGM.getCodeGenOpts().ForbidGuardVariables)
382 CGM.Error(D.getLocation(),
383 "this initialization requires a guard variable, which "
384 "the kernel does not support");
385
386 CGM.getCXXABI().EmitGuardedInit(*this, D, DeclPtr, PerformInit);
387 }
388
EmitCXXGuardedInitBranch(llvm::Value * NeedsInit,llvm::BasicBlock * InitBlock,llvm::BasicBlock * NoInitBlock,GuardKind Kind,const VarDecl * D)389 void CodeGenFunction::EmitCXXGuardedInitBranch(llvm::Value *NeedsInit,
390 llvm::BasicBlock *InitBlock,
391 llvm::BasicBlock *NoInitBlock,
392 GuardKind Kind,
393 const VarDecl *D) {
394 assert((Kind == GuardKind::TlsGuard || D) && "no guarded variable");
395
396 // A guess at how many times we will enter the initialization of a
397 // variable, depending on the kind of variable.
398 static const uint64_t InitsPerTLSVar = 1024;
399 static const uint64_t InitsPerLocalVar = 1024 * 1024;
400
401 llvm::MDNode *Weights;
402 if (Kind == GuardKind::VariableGuard && !D->isLocalVarDecl()) {
403 // For non-local variables, don't apply any weighting for now. Due to our
404 // use of COMDATs, we expect there to be at most one initialization of the
405 // variable per DSO, but we have no way to know how many DSOs will try to
406 // initialize the variable.
407 Weights = nullptr;
408 } else {
409 uint64_t NumInits;
410 // FIXME: For the TLS case, collect and use profiling information to
411 // determine a more accurate brach weight.
412 if (Kind == GuardKind::TlsGuard || D->getTLSKind())
413 NumInits = InitsPerTLSVar;
414 else
415 NumInits = InitsPerLocalVar;
416
417 // The probability of us entering the initializer is
418 // 1 / (total number of times we attempt to initialize the variable).
419 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
420 Weights = MDHelper.createBranchWeights(1, NumInits - 1);
421 }
422
423 Builder.CreateCondBr(NeedsInit, InitBlock, NoInitBlock, Weights);
424 }
425
CreateGlobalInitOrCleanUpFunction(llvm::FunctionType * FTy,const Twine & Name,const CGFunctionInfo & FI,SourceLocation Loc,bool TLS,llvm::GlobalVariable::LinkageTypes Linkage)426 llvm::Function *CodeGenModule::CreateGlobalInitOrCleanUpFunction(
427 llvm::FunctionType *FTy, const Twine &Name, const CGFunctionInfo &FI,
428 SourceLocation Loc, bool TLS, llvm::GlobalVariable::LinkageTypes Linkage) {
429 llvm::Function *Fn = llvm::Function::Create(FTy, Linkage, Name, &getModule());
430
431 if (!getLangOpts().AppleKext && !TLS) {
432 // Set the section if needed.
433 if (const char *Section = getTarget().getStaticInitSectionSpecifier())
434 Fn->setSection(Section);
435 }
436
437 if (Linkage == llvm::GlobalVariable::InternalLinkage)
438 SetInternalFunctionAttributes(GlobalDecl(), Fn, FI);
439
440 Fn->setCallingConv(getRuntimeCC());
441
442 if (!getLangOpts().Exceptions)
443 Fn->setDoesNotThrow();
444
445 if (getLangOpts().Sanitize.has(SanitizerKind::Address) &&
446 !isInNoSanitizeList(SanitizerKind::Address, Fn, Loc))
447 Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
448
449 if (getLangOpts().Sanitize.has(SanitizerKind::KernelAddress) &&
450 !isInNoSanitizeList(SanitizerKind::KernelAddress, Fn, Loc))
451 Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
452
453 if (getLangOpts().Sanitize.has(SanitizerKind::HWAddress) &&
454 !isInNoSanitizeList(SanitizerKind::HWAddress, Fn, Loc))
455 Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
456
457 if (getLangOpts().Sanitize.has(SanitizerKind::KernelHWAddress) &&
458 !isInNoSanitizeList(SanitizerKind::KernelHWAddress, Fn, Loc))
459 Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
460
461 if (getLangOpts().Sanitize.has(SanitizerKind::MemtagStack) &&
462 !isInNoSanitizeList(SanitizerKind::MemtagStack, Fn, Loc))
463 Fn->addFnAttr(llvm::Attribute::SanitizeMemTag);
464
465 if (getLangOpts().Sanitize.has(SanitizerKind::Thread) &&
466 !isInNoSanitizeList(SanitizerKind::Thread, Fn, Loc))
467 Fn->addFnAttr(llvm::Attribute::SanitizeThread);
468
469 if (getLangOpts().Sanitize.has(SanitizerKind::Memory) &&
470 !isInNoSanitizeList(SanitizerKind::Memory, Fn, Loc))
471 Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
472
473 if (getLangOpts().Sanitize.has(SanitizerKind::KernelMemory) &&
474 !isInNoSanitizeList(SanitizerKind::KernelMemory, Fn, Loc))
475 Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
476
477 if (getLangOpts().Sanitize.has(SanitizerKind::SafeStack) &&
478 !isInNoSanitizeList(SanitizerKind::SafeStack, Fn, Loc))
479 Fn->addFnAttr(llvm::Attribute::SafeStack);
480
481 if (getLangOpts().Sanitize.has(SanitizerKind::ShadowCallStack) &&
482 !isInNoSanitizeList(SanitizerKind::ShadowCallStack, Fn, Loc))
483 Fn->addFnAttr(llvm::Attribute::ShadowCallStack);
484
485 return Fn;
486 }
487
488 /// Create a global pointer to a function that will initialize a global
489 /// variable. The user has requested that this pointer be emitted in a specific
490 /// section.
EmitPointerToInitFunc(const VarDecl * D,llvm::GlobalVariable * GV,llvm::Function * InitFunc,InitSegAttr * ISA)491 void CodeGenModule::EmitPointerToInitFunc(const VarDecl *D,
492 llvm::GlobalVariable *GV,
493 llvm::Function *InitFunc,
494 InitSegAttr *ISA) {
495 llvm::GlobalVariable *PtrArray = new llvm::GlobalVariable(
496 TheModule, InitFunc->getType(), /*isConstant=*/true,
497 llvm::GlobalValue::PrivateLinkage, InitFunc, "__cxx_init_fn_ptr");
498 PtrArray->setSection(ISA->getSection());
499 addUsedGlobal(PtrArray);
500
501 // If the GV is already in a comdat group, then we have to join it.
502 if (llvm::Comdat *C = GV->getComdat())
503 PtrArray->setComdat(C);
504 }
505
506 void
EmitCXXGlobalVarDeclInitFunc(const VarDecl * D,llvm::GlobalVariable * Addr,bool PerformInit)507 CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
508 llvm::GlobalVariable *Addr,
509 bool PerformInit) {
510
511 // According to E.2.3.1 in CUDA-7.5 Programming guide: __device__,
512 // __constant__ and __shared__ variables defined in namespace scope,
513 // that are of class type, cannot have a non-empty constructor. All
514 // the checks have been done in Sema by now. Whatever initializers
515 // are allowed are empty and we just need to ignore them here.
516 if (getLangOpts().CUDAIsDevice && !getLangOpts().GPUAllowDeviceInit &&
517 (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() ||
518 D->hasAttr<CUDASharedAttr>()))
519 return;
520
521 if (getLangOpts().OpenMP &&
522 getOpenMPRuntime().emitDeclareTargetVarDefinition(D, Addr, PerformInit))
523 return;
524
525 // Check if we've already initialized this decl.
526 auto I = DelayedCXXInitPosition.find(D);
527 if (I != DelayedCXXInitPosition.end() && I->second == ~0U)
528 return;
529
530 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
531 SmallString<256> FnName;
532 {
533 llvm::raw_svector_ostream Out(FnName);
534 getCXXABI().getMangleContext().mangleDynamicInitializer(D, Out);
535 }
536
537 // Create a variable initialization function.
538 llvm::Function *Fn = CreateGlobalInitOrCleanUpFunction(
539 FTy, FnName.str(), getTypes().arrangeNullaryFunction(), D->getLocation());
540
541 auto *ISA = D->getAttr<InitSegAttr>();
542 CodeGenFunction(*this).GenerateCXXGlobalVarDeclInitFunc(Fn, D, Addr,
543 PerformInit);
544
545 llvm::GlobalVariable *COMDATKey =
546 supportsCOMDAT() && D->isExternallyVisible() ? Addr : nullptr;
547
548 if (D->getTLSKind()) {
549 // FIXME: Should we support init_priority for thread_local?
550 // FIXME: We only need to register one __cxa_thread_atexit function for the
551 // entire TU.
552 CXXThreadLocalInits.push_back(Fn);
553 CXXThreadLocalInitVars.push_back(D);
554 } else if (PerformInit && ISA) {
555 EmitPointerToInitFunc(D, Addr, Fn, ISA);
556 } else if (auto *IPA = D->getAttr<InitPriorityAttr>()) {
557 OrderGlobalInitsOrStermFinalizers Key(IPA->getPriority(),
558 PrioritizedCXXGlobalInits.size());
559 PrioritizedCXXGlobalInits.push_back(std::make_pair(Key, Fn));
560 } else if (isTemplateInstantiation(D->getTemplateSpecializationKind()) ||
561 getContext().GetGVALinkageForVariable(D) == GVA_DiscardableODR ||
562 D->hasAttr<SelectAnyAttr>()) {
563 // C++ [basic.start.init]p2:
564 // Definitions of explicitly specialized class template static data
565 // members have ordered initialization. Other class template static data
566 // members (i.e., implicitly or explicitly instantiated specializations)
567 // have unordered initialization.
568 //
569 // As a consequence, we can put them into their own llvm.global_ctors entry.
570 //
571 // If the global is externally visible, put the initializer into a COMDAT
572 // group with the global being initialized. On most platforms, this is a
573 // minor startup time optimization. In the MS C++ ABI, there are no guard
574 // variables, so this COMDAT key is required for correctness.
575 //
576 // SelectAny globals will be comdat-folded. Put the initializer into a
577 // COMDAT group associated with the global, so the initializers get folded
578 // too.
579
580 AddGlobalCtor(Fn, 65535, COMDATKey);
581 if (COMDATKey && (getTriple().isOSBinFormatELF() ||
582 getTarget().getCXXABI().isMicrosoft())) {
583 // When COMDAT is used on ELF or in the MS C++ ABI, the key must be in
584 // llvm.used to prevent linker GC.
585 addUsedGlobal(COMDATKey);
586 }
587
588 // If we used a COMDAT key for the global ctor, the init function can be
589 // discarded if the global ctor entry is discarded.
590 // FIXME: Do we need to restrict this to ELF and Wasm?
591 llvm::Comdat *C = Addr->getComdat();
592 if (COMDATKey && C &&
593 (getTarget().getTriple().isOSBinFormatELF() ||
594 getTarget().getTriple().isOSBinFormatWasm())) {
595 Fn->setComdat(C);
596 }
597 } else {
598 I = DelayedCXXInitPosition.find(D); // Re-do lookup in case of re-hash.
599 if (I == DelayedCXXInitPosition.end()) {
600 CXXGlobalInits.push_back(Fn);
601 } else if (I->second != ~0U) {
602 assert(I->second < CXXGlobalInits.size() &&
603 CXXGlobalInits[I->second] == nullptr);
604 CXXGlobalInits[I->second] = Fn;
605 }
606 }
607
608 // Remember that we already emitted the initializer for this global.
609 DelayedCXXInitPosition[D] = ~0U;
610 }
611
EmitCXXThreadLocalInitFunc()612 void CodeGenModule::EmitCXXThreadLocalInitFunc() {
613 getCXXABI().EmitThreadLocalInitFuncs(
614 *this, CXXThreadLocals, CXXThreadLocalInits, CXXThreadLocalInitVars);
615
616 CXXThreadLocalInits.clear();
617 CXXThreadLocalInitVars.clear();
618 CXXThreadLocals.clear();
619 }
620
621 /* Build the initializer for a C++20 module:
622 This is arranged to be run only once regardless of how many times the module
623 might be included transitively. This arranged by using a control variable.
624
625 First we call any initializers for imported modules.
626 We then call initializers for the Global Module Fragment (if present)
627 We then call initializers for the current module.
628 We then call initializers for the Private Module Fragment (if present)
629 */
630
EmitCXXModuleInitFunc(Module * Primary)631 void CodeGenModule::EmitCXXModuleInitFunc(Module *Primary) {
632 while (!CXXGlobalInits.empty() && !CXXGlobalInits.back())
633 CXXGlobalInits.pop_back();
634
635 // We create the function, even if it is empty, since an importer of this
636 // module will refer to it unconditionally (for the current implementation
637 // there is no way for the importer to know that an importee does not need
638 // an initializer to be run).
639
640 // Module initializers for imported modules are emitted first.
641 // Collect the modules that we import
642 SmallVector<Module *> AllImports;
643 // Ones that we export
644 for (auto I : Primary->Exports)
645 AllImports.push_back(I.getPointer());
646 // Ones that we only import.
647 for (Module *M : Primary->Imports)
648 AllImports.push_back(M);
649
650 SmallVector<llvm::Function *, 8> ModuleInits;
651 for (Module *M : AllImports) {
652 // No Itanium initializer in header like modules.
653 if (M->isHeaderLikeModule())
654 continue; // TODO: warn of mixed use of module map modules and C++20?
655 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
656 SmallString<256> FnName;
657 {
658 llvm::raw_svector_ostream Out(FnName);
659 cast<ItaniumMangleContext>(getCXXABI().getMangleContext())
660 .mangleModuleInitializer(M, Out);
661 }
662 assert(!GetGlobalValue(FnName.str()) &&
663 "We should only have one use of the initializer call");
664 llvm::Function *Fn = llvm::Function::Create(
665 FTy, llvm::Function::ExternalLinkage, FnName.str(), &getModule());
666 ModuleInits.push_back(Fn);
667 }
668 AllImports.clear();
669
670 // Add any initializers with specified priority; this uses the same approach
671 // as EmitCXXGlobalInitFunc().
672 if (!PrioritizedCXXGlobalInits.empty()) {
673 SmallVector<llvm::Function *, 8> LocalCXXGlobalInits;
674 llvm::array_pod_sort(PrioritizedCXXGlobalInits.begin(),
675 PrioritizedCXXGlobalInits.end());
676 for (SmallVectorImpl<GlobalInitData>::iterator
677 I = PrioritizedCXXGlobalInits.begin(),
678 E = PrioritizedCXXGlobalInits.end();
679 I != E;) {
680 SmallVectorImpl<GlobalInitData>::iterator PrioE =
681 std::upper_bound(I + 1, E, *I, GlobalInitPriorityCmp());
682
683 for (; I < PrioE; ++I)
684 ModuleInits.push_back(I->second);
685 }
686 PrioritizedCXXGlobalInits.clear();
687 }
688
689 // Now append the ones without specified priority.
690 for (auto F : CXXGlobalInits)
691 ModuleInits.push_back(F);
692 CXXGlobalInits.clear();
693
694 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
695 const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction();
696
697 // We now build the initializer for this module, which has a mangled name
698 // as per the Itanium ABI . The action of the initializer is guarded so that
699 // each init is run just once (even though a module might be imported
700 // multiple times via nested use).
701 llvm::Function *Fn;
702 llvm::GlobalVariable *Guard = nullptr;
703 {
704 SmallString<256> InitFnName;
705 llvm::raw_svector_ostream Out(InitFnName);
706 cast<ItaniumMangleContext>(getCXXABI().getMangleContext())
707 .mangleModuleInitializer(Primary, Out);
708 Fn = CreateGlobalInitOrCleanUpFunction(
709 FTy, llvm::Twine(InitFnName), FI, SourceLocation(), false,
710 llvm::GlobalVariable::ExternalLinkage);
711
712 Guard = new llvm::GlobalVariable(getModule(), Int8Ty, /*isConstant=*/false,
713 llvm::GlobalVariable::InternalLinkage,
714 llvm::ConstantInt::get(Int8Ty, 0),
715 InitFnName.str() + "__in_chrg");
716 }
717 CharUnits GuardAlign = CharUnits::One();
718 Guard->setAlignment(GuardAlign.getAsAlign());
719
720 CodeGenFunction(*this).GenerateCXXGlobalInitFunc(
721 Fn, ModuleInits, ConstantAddress(Guard, Int8Ty, GuardAlign));
722 // We allow for the case that a module object is added to a linked binary
723 // without a specific call to the the initializer. This also ensure that
724 // implementation partition initializers are called when the partition
725 // is not imported as an interface.
726 AddGlobalCtor(Fn);
727
728 // See the comment in EmitCXXGlobalInitFunc about OpenCL global init
729 // functions.
730 if (getLangOpts().OpenCL) {
731 GenKernelArgMetadata(Fn);
732 Fn->setCallingConv(llvm::CallingConv::SPIR_KERNEL);
733 }
734
735 assert(!getLangOpts().CUDA || !getLangOpts().CUDAIsDevice ||
736 getLangOpts().GPUAllowDeviceInit);
737 if (getLangOpts().HIP && getLangOpts().CUDAIsDevice) {
738 Fn->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL);
739 Fn->addFnAttr("device-init");
740 }
741
742 ModuleInits.clear();
743 }
744
getTransformedFileName(llvm::Module & M)745 static SmallString<128> getTransformedFileName(llvm::Module &M) {
746 SmallString<128> FileName = llvm::sys::path::filename(M.getName());
747
748 if (FileName.empty())
749 FileName = "<null>";
750
751 for (size_t i = 0; i < FileName.size(); ++i) {
752 // Replace everything that's not [a-zA-Z0-9._] with a _. This set happens
753 // to be the set of C preprocessing numbers.
754 if (!isPreprocessingNumberBody(FileName[i]))
755 FileName[i] = '_';
756 }
757
758 return FileName;
759 }
760
getPrioritySuffix(unsigned int Priority)761 static std::string getPrioritySuffix(unsigned int Priority) {
762 assert(Priority <= 65535 && "Priority should always be <= 65535.");
763
764 // Compute the function suffix from priority. Prepend with zeroes to make
765 // sure the function names are also ordered as priorities.
766 std::string PrioritySuffix = llvm::utostr(Priority);
767 PrioritySuffix = std::string(6 - PrioritySuffix.size(), '0') + PrioritySuffix;
768
769 return PrioritySuffix;
770 }
771
772 void
EmitCXXGlobalInitFunc()773 CodeGenModule::EmitCXXGlobalInitFunc() {
774 while (!CXXGlobalInits.empty() && !CXXGlobalInits.back())
775 CXXGlobalInits.pop_back();
776
777 // When we import C++20 modules, we must run their initializers first.
778 SmallVector<llvm::Function *, 8> ModuleInits;
779 if (CXX20ModuleInits)
780 for (Module *M : ImportedModules) {
781 // No Itanium initializer in header like modules.
782 if (M->isHeaderLikeModule())
783 continue;
784 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
785 SmallString<256> FnName;
786 {
787 llvm::raw_svector_ostream Out(FnName);
788 cast<ItaniumMangleContext>(getCXXABI().getMangleContext())
789 .mangleModuleInitializer(M, Out);
790 }
791 assert(!GetGlobalValue(FnName.str()) &&
792 "We should only have one use of the initializer call");
793 llvm::Function *Fn = llvm::Function::Create(
794 FTy, llvm::Function::ExternalLinkage, FnName.str(), &getModule());
795 ModuleInits.push_back(Fn);
796 }
797
798 if (ModuleInits.empty() && CXXGlobalInits.empty() &&
799 PrioritizedCXXGlobalInits.empty())
800 return;
801
802 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
803 const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction();
804
805 // Create our global prioritized initialization function.
806 if (!PrioritizedCXXGlobalInits.empty()) {
807 SmallVector<llvm::Function *, 8> LocalCXXGlobalInits;
808 llvm::array_pod_sort(PrioritizedCXXGlobalInits.begin(),
809 PrioritizedCXXGlobalInits.end());
810 // Iterate over "chunks" of ctors with same priority and emit each chunk
811 // into separate function. Note - everything is sorted first by priority,
812 // second - by lex order, so we emit ctor functions in proper order.
813 for (SmallVectorImpl<GlobalInitData >::iterator
814 I = PrioritizedCXXGlobalInits.begin(),
815 E = PrioritizedCXXGlobalInits.end(); I != E; ) {
816 SmallVectorImpl<GlobalInitData >::iterator
817 PrioE = std::upper_bound(I + 1, E, *I, GlobalInitPriorityCmp());
818
819 LocalCXXGlobalInits.clear();
820
821 unsigned int Priority = I->first.priority;
822 llvm::Function *Fn = CreateGlobalInitOrCleanUpFunction(
823 FTy, "_GLOBAL__I_" + getPrioritySuffix(Priority), FI);
824
825 // Prepend the module inits to the highest priority set.
826 if (!ModuleInits.empty()) {
827 for (auto F : ModuleInits)
828 LocalCXXGlobalInits.push_back(F);
829 ModuleInits.clear();
830 }
831
832 for (; I < PrioE; ++I)
833 LocalCXXGlobalInits.push_back(I->second);
834
835 CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, LocalCXXGlobalInits);
836 AddGlobalCtor(Fn, Priority);
837 }
838 PrioritizedCXXGlobalInits.clear();
839 }
840
841 if (getCXXABI().useSinitAndSterm() && ModuleInits.empty() &&
842 CXXGlobalInits.empty())
843 return;
844
845 for (auto F : CXXGlobalInits)
846 ModuleInits.push_back(F);
847 CXXGlobalInits.clear();
848
849 // Include the filename in the symbol name. Including "sub_" matches gcc
850 // and makes sure these symbols appear lexicographically behind the symbols
851 // with priority emitted above.
852 llvm::Function *Fn;
853 if (CXX20ModuleInits && getContext().getModuleForCodeGen()) {
854 SmallString<256> InitFnName;
855 llvm::raw_svector_ostream Out(InitFnName);
856 cast<ItaniumMangleContext>(getCXXABI().getMangleContext())
857 .mangleModuleInitializer(getContext().getModuleForCodeGen(), Out);
858 Fn = CreateGlobalInitOrCleanUpFunction(
859 FTy, llvm::Twine(InitFnName), FI, SourceLocation(), false,
860 llvm::GlobalVariable::ExternalLinkage);
861 } else
862 Fn = CreateGlobalInitOrCleanUpFunction(
863 FTy,
864 llvm::Twine("_GLOBAL__sub_I_", getTransformedFileName(getModule())),
865 FI);
866
867 CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, ModuleInits);
868 AddGlobalCtor(Fn);
869
870 // In OpenCL global init functions must be converted to kernels in order to
871 // be able to launch them from the host.
872 // FIXME: Some more work might be needed to handle destructors correctly.
873 // Current initialization function makes use of function pointers callbacks.
874 // We can't support function pointers especially between host and device.
875 // However it seems global destruction has little meaning without any
876 // dynamic resource allocation on the device and program scope variables are
877 // destroyed by the runtime when program is released.
878 if (getLangOpts().OpenCL) {
879 GenKernelArgMetadata(Fn);
880 Fn->setCallingConv(llvm::CallingConv::SPIR_KERNEL);
881 }
882
883 assert(!getLangOpts().CUDA || !getLangOpts().CUDAIsDevice ||
884 getLangOpts().GPUAllowDeviceInit);
885 if (getLangOpts().HIP && getLangOpts().CUDAIsDevice) {
886 Fn->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL);
887 Fn->addFnAttr("device-init");
888 }
889
890 ModuleInits.clear();
891 }
892
EmitCXXGlobalCleanUpFunc()893 void CodeGenModule::EmitCXXGlobalCleanUpFunc() {
894 if (CXXGlobalDtorsOrStermFinalizers.empty() &&
895 PrioritizedCXXStermFinalizers.empty())
896 return;
897
898 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
899 const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction();
900
901 // Create our global prioritized cleanup function.
902 if (!PrioritizedCXXStermFinalizers.empty()) {
903 SmallVector<CXXGlobalDtorsOrStermFinalizer_t, 8> LocalCXXStermFinalizers;
904 llvm::array_pod_sort(PrioritizedCXXStermFinalizers.begin(),
905 PrioritizedCXXStermFinalizers.end());
906 // Iterate over "chunks" of dtors with same priority and emit each chunk
907 // into separate function. Note - everything is sorted first by priority,
908 // second - by lex order, so we emit dtor functions in proper order.
909 for (SmallVectorImpl<StermFinalizerData>::iterator
910 I = PrioritizedCXXStermFinalizers.begin(),
911 E = PrioritizedCXXStermFinalizers.end();
912 I != E;) {
913 SmallVectorImpl<StermFinalizerData>::iterator PrioE =
914 std::upper_bound(I + 1, E, *I, StermFinalizerPriorityCmp());
915
916 LocalCXXStermFinalizers.clear();
917
918 unsigned int Priority = I->first.priority;
919 llvm::Function *Fn = CreateGlobalInitOrCleanUpFunction(
920 FTy, "_GLOBAL__a_" + getPrioritySuffix(Priority), FI);
921
922 for (; I < PrioE; ++I) {
923 llvm::FunctionCallee DtorFn = I->second;
924 LocalCXXStermFinalizers.emplace_back(DtorFn.getFunctionType(),
925 DtorFn.getCallee(), nullptr);
926 }
927
928 CodeGenFunction(*this).GenerateCXXGlobalCleanUpFunc(
929 Fn, LocalCXXStermFinalizers);
930 AddGlobalDtor(Fn, Priority);
931 }
932 PrioritizedCXXStermFinalizers.clear();
933 }
934
935 if (CXXGlobalDtorsOrStermFinalizers.empty())
936 return;
937
938 // Create our global cleanup function.
939 llvm::Function *Fn =
940 CreateGlobalInitOrCleanUpFunction(FTy, "_GLOBAL__D_a", FI);
941
942 CodeGenFunction(*this).GenerateCXXGlobalCleanUpFunc(
943 Fn, CXXGlobalDtorsOrStermFinalizers);
944 AddGlobalDtor(Fn);
945 CXXGlobalDtorsOrStermFinalizers.clear();
946 }
947
948 /// Emit the code necessary to initialize the given global variable.
GenerateCXXGlobalVarDeclInitFunc(llvm::Function * Fn,const VarDecl * D,llvm::GlobalVariable * Addr,bool PerformInit)949 void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
950 const VarDecl *D,
951 llvm::GlobalVariable *Addr,
952 bool PerformInit) {
953 // Check if we need to emit debug info for variable initializer.
954 if (D->hasAttr<NoDebugAttr>())
955 DebugInfo = nullptr; // disable debug info indefinitely for this function
956
957 CurEHLocation = D->getBeginLoc();
958
959 StartFunction(GlobalDecl(D, DynamicInitKind::Initializer),
960 getContext().VoidTy, Fn, getTypes().arrangeNullaryFunction(),
961 FunctionArgList());
962 // Emit an artificial location for this function.
963 auto AL = ApplyDebugLocation::CreateArtificial(*this);
964
965 // Use guarded initialization if the global variable is weak. This
966 // occurs for, e.g., instantiated static data members and
967 // definitions explicitly marked weak.
968 //
969 // Also use guarded initialization for a variable with dynamic TLS and
970 // unordered initialization. (If the initialization is ordered, the ABI
971 // layer will guard the whole-TU initialization for us.)
972 if (Addr->hasWeakLinkage() || Addr->hasLinkOnceLinkage() ||
973 (D->getTLSKind() == VarDecl::TLS_Dynamic &&
974 isTemplateInstantiation(D->getTemplateSpecializationKind()))) {
975 EmitCXXGuardedInit(*D, Addr, PerformInit);
976 } else {
977 EmitCXXGlobalVarDeclInit(*D, Addr, PerformInit);
978 }
979
980 FinishFunction();
981 }
982
983 void
GenerateCXXGlobalInitFunc(llvm::Function * Fn,ArrayRef<llvm::Function * > Decls,ConstantAddress Guard)984 CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
985 ArrayRef<llvm::Function *> Decls,
986 ConstantAddress Guard) {
987 {
988 auto NL = ApplyDebugLocation::CreateEmpty(*this);
989 StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
990 getTypes().arrangeNullaryFunction(), FunctionArgList());
991 // Emit an artificial location for this function.
992 auto AL = ApplyDebugLocation::CreateArtificial(*this);
993
994 llvm::BasicBlock *ExitBlock = nullptr;
995 if (Guard.isValid()) {
996 // If we have a guard variable, check whether we've already performed
997 // these initializations. This happens for TLS initialization functions.
998 llvm::Value *GuardVal = Builder.CreateLoad(Guard);
999 llvm::Value *Uninit = Builder.CreateIsNull(GuardVal,
1000 "guard.uninitialized");
1001 llvm::BasicBlock *InitBlock = createBasicBlock("init");
1002 ExitBlock = createBasicBlock("exit");
1003 EmitCXXGuardedInitBranch(Uninit, InitBlock, ExitBlock,
1004 GuardKind::TlsGuard, nullptr);
1005 EmitBlock(InitBlock);
1006 // Mark as initialized before initializing anything else. If the
1007 // initializers use previously-initialized thread_local vars, that's
1008 // probably supposed to be OK, but the standard doesn't say.
1009 Builder.CreateStore(llvm::ConstantInt::get(GuardVal->getType(),1), Guard);
1010
1011 // The guard variable can't ever change again.
1012 EmitInvariantStart(
1013 Guard.getPointer(),
1014 CharUnits::fromQuantity(
1015 CGM.getDataLayout().getTypeAllocSize(GuardVal->getType())));
1016 }
1017
1018 RunCleanupsScope Scope(*this);
1019
1020 // When building in Objective-C++ ARC mode, create an autorelease pool
1021 // around the global initializers.
1022 if (getLangOpts().ObjCAutoRefCount && getLangOpts().CPlusPlus) {
1023 llvm::Value *token = EmitObjCAutoreleasePoolPush();
1024 EmitObjCAutoreleasePoolCleanup(token);
1025 }
1026
1027 for (unsigned i = 0, e = Decls.size(); i != e; ++i)
1028 if (Decls[i])
1029 EmitRuntimeCall(Decls[i]);
1030
1031 Scope.ForceCleanup();
1032
1033 if (ExitBlock) {
1034 Builder.CreateBr(ExitBlock);
1035 EmitBlock(ExitBlock);
1036 }
1037 }
1038
1039 FinishFunction();
1040 }
1041
GenerateCXXGlobalCleanUpFunc(llvm::Function * Fn,ArrayRef<std::tuple<llvm::FunctionType *,llvm::WeakTrackingVH,llvm::Constant * >> DtorsOrStermFinalizers)1042 void CodeGenFunction::GenerateCXXGlobalCleanUpFunc(
1043 llvm::Function *Fn,
1044 ArrayRef<std::tuple<llvm::FunctionType *, llvm::WeakTrackingVH,
1045 llvm::Constant *>>
1046 DtorsOrStermFinalizers) {
1047 {
1048 auto NL = ApplyDebugLocation::CreateEmpty(*this);
1049 StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
1050 getTypes().arrangeNullaryFunction(), FunctionArgList());
1051 // Emit an artificial location for this function.
1052 auto AL = ApplyDebugLocation::CreateArtificial(*this);
1053
1054 // Emit the cleanups, in reverse order from construction.
1055 for (unsigned i = 0, e = DtorsOrStermFinalizers.size(); i != e; ++i) {
1056 llvm::FunctionType *CalleeTy;
1057 llvm::Value *Callee;
1058 llvm::Constant *Arg;
1059 std::tie(CalleeTy, Callee, Arg) = DtorsOrStermFinalizers[e - i - 1];
1060
1061 llvm::CallInst *CI = nullptr;
1062 if (Arg == nullptr) {
1063 assert(
1064 CGM.getCXXABI().useSinitAndSterm() &&
1065 "Arg could not be nullptr unless using sinit and sterm functions.");
1066 CI = Builder.CreateCall(CalleeTy, Callee);
1067 } else
1068 CI = Builder.CreateCall(CalleeTy, Callee, Arg);
1069
1070 // Make sure the call and the callee agree on calling convention.
1071 if (llvm::Function *F = dyn_cast<llvm::Function>(Callee))
1072 CI->setCallingConv(F->getCallingConv());
1073 }
1074 }
1075
1076 FinishFunction();
1077 }
1078
1079 /// generateDestroyHelper - Generates a helper function which, when
1080 /// invoked, destroys the given object. The address of the object
1081 /// should be in global memory.
generateDestroyHelper(Address addr,QualType type,Destroyer * destroyer,bool useEHCleanupForArray,const VarDecl * VD)1082 llvm::Function *CodeGenFunction::generateDestroyHelper(
1083 Address addr, QualType type, Destroyer *destroyer,
1084 bool useEHCleanupForArray, const VarDecl *VD) {
1085 FunctionArgList args;
1086 ImplicitParamDecl Dst(getContext(), getContext().VoidPtrTy,
1087 ImplicitParamDecl::Other);
1088 args.push_back(&Dst);
1089
1090 const CGFunctionInfo &FI =
1091 CGM.getTypes().arrangeBuiltinFunctionDeclaration(getContext().VoidTy, args);
1092 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
1093 llvm::Function *fn = CGM.CreateGlobalInitOrCleanUpFunction(
1094 FTy, "__cxx_global_array_dtor", FI, VD->getLocation());
1095
1096 CurEHLocation = VD->getBeginLoc();
1097
1098 StartFunction(GlobalDecl(VD, DynamicInitKind::GlobalArrayDestructor),
1099 getContext().VoidTy, fn, FI, args);
1100 // Emit an artificial location for this function.
1101 auto AL = ApplyDebugLocation::CreateArtificial(*this);
1102
1103 emitDestroy(addr, type, destroyer, useEHCleanupForArray);
1104
1105 FinishFunction();
1106
1107 return fn;
1108 }
1109