1 //===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code to emit Decl nodes as LLVM code.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "CGBlocks.h"
15 #include "CGCXXABI.h"
16 #include "CGCleanup.h"
17 #include "CGDebugInfo.h"
18 #include "CGOpenCLRuntime.h"
19 #include "CGOpenMPRuntime.h"
20 #include "CodeGenFunction.h"
21 #include "CodeGenModule.h"
22 #include "ConstantEmitter.h"
23 #include "TargetInfo.h"
24 #include "clang/AST/ASTContext.h"
25 #include "clang/AST/CharUnits.h"
26 #include "clang/AST/Decl.h"
27 #include "clang/AST/DeclObjC.h"
28 #include "clang/AST/DeclOpenMP.h"
29 #include "clang/Basic/CodeGenOptions.h"
30 #include "clang/Basic/SourceManager.h"
31 #include "clang/Basic/TargetInfo.h"
32 #include "clang/CodeGen/CGFunctionInfo.h"
33 #include "llvm/Analysis/ValueTracking.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/GlobalVariable.h"
36 #include "llvm/IR/Intrinsics.h"
37 #include "llvm/IR/Type.h"
38
39 using namespace clang;
40 using namespace CodeGen;
41
EmitDecl(const Decl & D)42 void CodeGenFunction::EmitDecl(const Decl &D) {
43 switch (D.getKind()) {
44 case Decl::BuiltinTemplate:
45 case Decl::TranslationUnit:
46 case Decl::ExternCContext:
47 case Decl::Namespace:
48 case Decl::UnresolvedUsingTypename:
49 case Decl::ClassTemplateSpecialization:
50 case Decl::ClassTemplatePartialSpecialization:
51 case Decl::VarTemplateSpecialization:
52 case Decl::VarTemplatePartialSpecialization:
53 case Decl::TemplateTypeParm:
54 case Decl::UnresolvedUsingValue:
55 case Decl::NonTypeTemplateParm:
56 case Decl::CXXDeductionGuide:
57 case Decl::CXXMethod:
58 case Decl::CXXConstructor:
59 case Decl::CXXDestructor:
60 case Decl::CXXConversion:
61 case Decl::Field:
62 case Decl::MSProperty:
63 case Decl::IndirectField:
64 case Decl::ObjCIvar:
65 case Decl::ObjCAtDefsField:
66 case Decl::ParmVar:
67 case Decl::ImplicitParam:
68 case Decl::ClassTemplate:
69 case Decl::VarTemplate:
70 case Decl::FunctionTemplate:
71 case Decl::TypeAliasTemplate:
72 case Decl::TemplateTemplateParm:
73 case Decl::ObjCMethod:
74 case Decl::ObjCCategory:
75 case Decl::ObjCProtocol:
76 case Decl::ObjCInterface:
77 case Decl::ObjCCategoryImpl:
78 case Decl::ObjCImplementation:
79 case Decl::ObjCProperty:
80 case Decl::ObjCCompatibleAlias:
81 case Decl::PragmaComment:
82 case Decl::PragmaDetectMismatch:
83 case Decl::AccessSpec:
84 case Decl::LinkageSpec:
85 case Decl::Export:
86 case Decl::ObjCPropertyImpl:
87 case Decl::FileScopeAsm:
88 case Decl::Friend:
89 case Decl::FriendTemplate:
90 case Decl::Block:
91 case Decl::Captured:
92 case Decl::ClassScopeFunctionSpecialization:
93 case Decl::UsingShadow:
94 case Decl::ConstructorUsingShadow:
95 case Decl::ObjCTypeParam:
96 case Decl::Binding:
97 llvm_unreachable("Declaration should not be in declstmts!");
98 case Decl::Function: // void X();
99 case Decl::Record: // struct/union/class X;
100 case Decl::Enum: // enum X;
101 case Decl::EnumConstant: // enum ? { X = ? }
102 case Decl::CXXRecord: // struct/union/class X; [C++]
103 case Decl::StaticAssert: // static_assert(X, ""); [C++0x]
104 case Decl::Label: // __label__ x;
105 case Decl::Import:
106 case Decl::OMPThreadPrivate:
107 case Decl::OMPCapturedExpr:
108 case Decl::OMPRequires:
109 case Decl::Empty:
110 // None of these decls require codegen support.
111 return;
112
113 case Decl::NamespaceAlias:
114 if (CGDebugInfo *DI = getDebugInfo())
115 DI->EmitNamespaceAlias(cast<NamespaceAliasDecl>(D));
116 return;
117 case Decl::Using: // using X; [C++]
118 if (CGDebugInfo *DI = getDebugInfo())
119 DI->EmitUsingDecl(cast<UsingDecl>(D));
120 return;
121 case Decl::UsingPack:
122 for (auto *Using : cast<UsingPackDecl>(D).expansions())
123 EmitDecl(*Using);
124 return;
125 case Decl::UsingDirective: // using namespace X; [C++]
126 if (CGDebugInfo *DI = getDebugInfo())
127 DI->EmitUsingDirective(cast<UsingDirectiveDecl>(D));
128 return;
129 case Decl::Var:
130 case Decl::Decomposition: {
131 const VarDecl &VD = cast<VarDecl>(D);
132 assert(VD.isLocalVarDecl() &&
133 "Should not see file-scope variables inside a function!");
134 EmitVarDecl(VD);
135 if (auto *DD = dyn_cast<DecompositionDecl>(&VD))
136 for (auto *B : DD->bindings())
137 if (auto *HD = B->getHoldingVar())
138 EmitVarDecl(*HD);
139 return;
140 }
141
142 case Decl::OMPDeclareReduction:
143 return CGM.EmitOMPDeclareReduction(cast<OMPDeclareReductionDecl>(&D), this);
144
145 case Decl::Typedef: // typedef int X;
146 case Decl::TypeAlias: { // using X = int; [C++0x]
147 const TypedefNameDecl &TD = cast<TypedefNameDecl>(D);
148 QualType Ty = TD.getUnderlyingType();
149
150 if (Ty->isVariablyModifiedType())
151 EmitVariablyModifiedType(Ty);
152 }
153 }
154 }
155
156 /// EmitVarDecl - This method handles emission of any variable declaration
157 /// inside a function, including static vars etc.
EmitVarDecl(const VarDecl & D)158 void CodeGenFunction::EmitVarDecl(const VarDecl &D) {
159 if (D.hasExternalStorage())
160 // Don't emit it now, allow it to be emitted lazily on its first use.
161 return;
162
163 // Some function-scope variable does not have static storage but still
164 // needs to be emitted like a static variable, e.g. a function-scope
165 // variable in constant address space in OpenCL.
166 if (D.getStorageDuration() != SD_Automatic) {
167 // Static sampler variables translated to function calls.
168 if (D.getType()->isSamplerT())
169 return;
170
171 llvm::GlobalValue::LinkageTypes Linkage =
172 CGM.getLLVMLinkageVarDefinition(&D, /*isConstant=*/false);
173
174 // FIXME: We need to force the emission/use of a guard variable for
175 // some variables even if we can constant-evaluate them because
176 // we can't guarantee every translation unit will constant-evaluate them.
177
178 return EmitStaticVarDecl(D, Linkage);
179 }
180
181 if (D.getType().getAddressSpace() == LangAS::opencl_local)
182 return CGM.getOpenCLRuntime().EmitWorkGroupLocalVarDecl(*this, D);
183
184 assert(D.hasLocalStorage());
185 return EmitAutoVarDecl(D);
186 }
187
getStaticDeclName(CodeGenModule & CGM,const VarDecl & D)188 static std::string getStaticDeclName(CodeGenModule &CGM, const VarDecl &D) {
189 if (CGM.getLangOpts().CPlusPlus)
190 return CGM.getMangledName(&D).str();
191
192 // If this isn't C++, we don't need a mangled name, just a pretty one.
193 assert(!D.isExternallyVisible() && "name shouldn't matter");
194 std::string ContextName;
195 const DeclContext *DC = D.getDeclContext();
196 if (auto *CD = dyn_cast<CapturedDecl>(DC))
197 DC = cast<DeclContext>(CD->getNonClosureContext());
198 if (const auto *FD = dyn_cast<FunctionDecl>(DC))
199 ContextName = CGM.getMangledName(FD);
200 else if (const auto *BD = dyn_cast<BlockDecl>(DC))
201 ContextName = CGM.getBlockMangledName(GlobalDecl(), BD);
202 else if (const auto *OMD = dyn_cast<ObjCMethodDecl>(DC))
203 ContextName = OMD->getSelector().getAsString();
204 else
205 llvm_unreachable("Unknown context for static var decl");
206
207 ContextName += "." + D.getNameAsString();
208 return ContextName;
209 }
210
getOrCreateStaticVarDecl(const VarDecl & D,llvm::GlobalValue::LinkageTypes Linkage)211 llvm::Constant *CodeGenModule::getOrCreateStaticVarDecl(
212 const VarDecl &D, llvm::GlobalValue::LinkageTypes Linkage) {
213 // In general, we don't always emit static var decls once before we reference
214 // them. It is possible to reference them before emitting the function that
215 // contains them, and it is possible to emit the containing function multiple
216 // times.
217 if (llvm::Constant *ExistingGV = StaticLocalDeclMap[&D])
218 return ExistingGV;
219
220 QualType Ty = D.getType();
221 assert(Ty->isConstantSizeType() && "VLAs can't be static");
222
223 // Use the label if the variable is renamed with the asm-label extension.
224 std::string Name;
225 if (D.hasAttr<AsmLabelAttr>())
226 Name = getMangledName(&D);
227 else
228 Name = getStaticDeclName(*this, D);
229
230 llvm::Type *LTy = getTypes().ConvertTypeForMem(Ty);
231 LangAS AS = GetGlobalVarAddressSpace(&D);
232 unsigned TargetAS = getContext().getTargetAddressSpace(AS);
233
234 // OpenCL variables in local address space and CUDA shared
235 // variables cannot have an initializer.
236 llvm::Constant *Init = nullptr;
237 if (Ty.getAddressSpace() == LangAS::opencl_local ||
238 D.hasAttr<CUDASharedAttr>())
239 Init = llvm::UndefValue::get(LTy);
240 else
241 Init = EmitNullConstant(Ty);
242
243 llvm::GlobalVariable *GV = new llvm::GlobalVariable(
244 getModule(), LTy, Ty.isConstant(getContext()), Linkage, Init, Name,
245 nullptr, llvm::GlobalVariable::NotThreadLocal, TargetAS);
246 GV->setAlignment(getContext().getDeclAlign(&D).getQuantity());
247
248 if (supportsCOMDAT() && GV->isWeakForLinker())
249 GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
250
251 if (D.getTLSKind())
252 setTLSMode(GV, D);
253
254 setGVProperties(GV, &D);
255
256 // Make sure the result is of the correct type.
257 LangAS ExpectedAS = Ty.getAddressSpace();
258 llvm::Constant *Addr = GV;
259 if (AS != ExpectedAS) {
260 Addr = getTargetCodeGenInfo().performAddrSpaceCast(
261 *this, GV, AS, ExpectedAS,
262 LTy->getPointerTo(getContext().getTargetAddressSpace(ExpectedAS)));
263 }
264
265 setStaticLocalDeclAddress(&D, Addr);
266
267 // Ensure that the static local gets initialized by making sure the parent
268 // function gets emitted eventually.
269 const Decl *DC = cast<Decl>(D.getDeclContext());
270
271 // We can't name blocks or captured statements directly, so try to emit their
272 // parents.
273 if (isa<BlockDecl>(DC) || isa<CapturedDecl>(DC)) {
274 DC = DC->getNonClosureContext();
275 // FIXME: Ensure that global blocks get emitted.
276 if (!DC)
277 return Addr;
278 }
279
280 GlobalDecl GD;
281 if (const auto *CD = dyn_cast<CXXConstructorDecl>(DC))
282 GD = GlobalDecl(CD, Ctor_Base);
283 else if (const auto *DD = dyn_cast<CXXDestructorDecl>(DC))
284 GD = GlobalDecl(DD, Dtor_Base);
285 else if (const auto *FD = dyn_cast<FunctionDecl>(DC))
286 GD = GlobalDecl(FD);
287 else {
288 // Don't do anything for Obj-C method decls or global closures. We should
289 // never defer them.
290 assert(isa<ObjCMethodDecl>(DC) && "unexpected parent code decl");
291 }
292 if (GD.getDecl()) {
293 // Disable emission of the parent function for the OpenMP device codegen.
294 CGOpenMPRuntime::DisableAutoDeclareTargetRAII NoDeclTarget(*this);
295 (void)GetAddrOfGlobal(GD);
296 }
297
298 return Addr;
299 }
300
301 /// hasNontrivialDestruction - Determine whether a type's destruction is
302 /// non-trivial. If so, and the variable uses static initialization, we must
303 /// register its destructor to run on exit.
hasNontrivialDestruction(QualType T)304 static bool hasNontrivialDestruction(QualType T) {
305 CXXRecordDecl *RD = T->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
306 return RD && !RD->hasTrivialDestructor();
307 }
308
309 /// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
310 /// global variable that has already been created for it. If the initializer
311 /// has a different type than GV does, this may free GV and return a different
312 /// one. Otherwise it just returns GV.
313 llvm::GlobalVariable *
AddInitializerToStaticVarDecl(const VarDecl & D,llvm::GlobalVariable * GV)314 CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
315 llvm::GlobalVariable *GV) {
316 ConstantEmitter emitter(*this);
317 llvm::Constant *Init = emitter.tryEmitForInitializer(D);
318
319 // If constant emission failed, then this should be a C++ static
320 // initializer.
321 if (!Init) {
322 if (!getLangOpts().CPlusPlus)
323 CGM.ErrorUnsupported(D.getInit(), "constant l-value expression");
324 else if (HaveInsertPoint()) {
325 // Since we have a static initializer, this global variable can't
326 // be constant.
327 GV->setConstant(false);
328
329 EmitCXXGuardedInit(D, GV, /*PerformInit*/true);
330 }
331 return GV;
332 }
333
334 // The initializer may differ in type from the global. Rewrite
335 // the global to match the initializer. (We have to do this
336 // because some types, like unions, can't be completely represented
337 // in the LLVM type system.)
338 if (GV->getType()->getElementType() != Init->getType()) {
339 llvm::GlobalVariable *OldGV = GV;
340
341 GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(),
342 OldGV->isConstant(),
343 OldGV->getLinkage(), Init, "",
344 /*InsertBefore*/ OldGV,
345 OldGV->getThreadLocalMode(),
346 CGM.getContext().getTargetAddressSpace(D.getType()));
347 GV->setVisibility(OldGV->getVisibility());
348 GV->setDSOLocal(OldGV->isDSOLocal());
349 GV->setComdat(OldGV->getComdat());
350
351 // Steal the name of the old global
352 GV->takeName(OldGV);
353
354 // Replace all uses of the old global with the new global
355 llvm::Constant *NewPtrForOldDecl =
356 llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
357 OldGV->replaceAllUsesWith(NewPtrForOldDecl);
358
359 // Erase the old global, since it is no longer used.
360 OldGV->eraseFromParent();
361 }
362
363 GV->setConstant(CGM.isTypeConstant(D.getType(), true));
364 GV->setInitializer(Init);
365
366 emitter.finalize(GV);
367
368 if (hasNontrivialDestruction(D.getType()) && HaveInsertPoint()) {
369 // We have a constant initializer, but a nontrivial destructor. We still
370 // need to perform a guarded "initialization" in order to register the
371 // destructor.
372 EmitCXXGuardedInit(D, GV, /*PerformInit*/false);
373 }
374
375 return GV;
376 }
377
EmitStaticVarDecl(const VarDecl & D,llvm::GlobalValue::LinkageTypes Linkage)378 void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
379 llvm::GlobalValue::LinkageTypes Linkage) {
380 // Check to see if we already have a global variable for this
381 // declaration. This can happen when double-emitting function
382 // bodies, e.g. with complete and base constructors.
383 llvm::Constant *addr = CGM.getOrCreateStaticVarDecl(D, Linkage);
384 CharUnits alignment = getContext().getDeclAlign(&D);
385
386 // Store into LocalDeclMap before generating initializer to handle
387 // circular references.
388 setAddrOfLocalVar(&D, Address(addr, alignment));
389
390 // We can't have a VLA here, but we can have a pointer to a VLA,
391 // even though that doesn't really make any sense.
392 // Make sure to evaluate VLA bounds now so that we have them for later.
393 if (D.getType()->isVariablyModifiedType())
394 EmitVariablyModifiedType(D.getType());
395
396 // Save the type in case adding the initializer forces a type change.
397 llvm::Type *expectedType = addr->getType();
398
399 llvm::GlobalVariable *var =
400 cast<llvm::GlobalVariable>(addr->stripPointerCasts());
401
402 // CUDA's local and local static __shared__ variables should not
403 // have any non-empty initializers. This is ensured by Sema.
404 // Whatever initializer such variable may have when it gets here is
405 // a no-op and should not be emitted.
406 bool isCudaSharedVar = getLangOpts().CUDA && getLangOpts().CUDAIsDevice &&
407 D.hasAttr<CUDASharedAttr>();
408 // If this value has an initializer, emit it.
409 if (D.getInit() && !isCudaSharedVar)
410 var = AddInitializerToStaticVarDecl(D, var);
411
412 var->setAlignment(alignment.getQuantity());
413
414 if (D.hasAttr<AnnotateAttr>())
415 CGM.AddGlobalAnnotations(&D, var);
416
417 if (auto *SA = D.getAttr<PragmaClangBSSSectionAttr>())
418 var->addAttribute("bss-section", SA->getName());
419 if (auto *SA = D.getAttr<PragmaClangDataSectionAttr>())
420 var->addAttribute("data-section", SA->getName());
421 if (auto *SA = D.getAttr<PragmaClangRodataSectionAttr>())
422 var->addAttribute("rodata-section", SA->getName());
423
424 if (const SectionAttr *SA = D.getAttr<SectionAttr>())
425 var->setSection(SA->getName());
426
427 if (D.hasAttr<UsedAttr>())
428 CGM.addUsedGlobal(var);
429
430 // We may have to cast the constant because of the initializer
431 // mismatch above.
432 //
433 // FIXME: It is really dangerous to store this in the map; if anyone
434 // RAUW's the GV uses of this constant will be invalid.
435 llvm::Constant *castedAddr =
436 llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(var, expectedType);
437 if (var != castedAddr)
438 LocalDeclMap.find(&D)->second = Address(castedAddr, alignment);
439 CGM.setStaticLocalDeclAddress(&D, castedAddr);
440
441 CGM.getSanitizerMetadata()->reportGlobalToASan(var, D);
442
443 // Emit global variable debug descriptor for static vars.
444 CGDebugInfo *DI = getDebugInfo();
445 if (DI &&
446 CGM.getCodeGenOpts().getDebugInfo() >= codegenoptions::LimitedDebugInfo) {
447 DI->setLocation(D.getLocation());
448 DI->EmitGlobalVariable(var, &D);
449 }
450 }
451
452 namespace {
453 struct DestroyObject final : EHScopeStack::Cleanup {
DestroyObject__anon8cc62b0f0111::DestroyObject454 DestroyObject(Address addr, QualType type,
455 CodeGenFunction::Destroyer *destroyer,
456 bool useEHCleanupForArray)
457 : addr(addr), type(type), destroyer(destroyer),
458 useEHCleanupForArray(useEHCleanupForArray) {}
459
460 Address addr;
461 QualType type;
462 CodeGenFunction::Destroyer *destroyer;
463 bool useEHCleanupForArray;
464
Emit__anon8cc62b0f0111::DestroyObject465 void Emit(CodeGenFunction &CGF, Flags flags) override {
466 // Don't use an EH cleanup recursively from an EH cleanup.
467 bool useEHCleanupForArray =
468 flags.isForNormalCleanup() && this->useEHCleanupForArray;
469
470 CGF.emitDestroy(addr, type, destroyer, useEHCleanupForArray);
471 }
472 };
473
474 template <class Derived>
475 struct DestroyNRVOVariable : EHScopeStack::Cleanup {
DestroyNRVOVariable__anon8cc62b0f0111::DestroyNRVOVariable476 DestroyNRVOVariable(Address addr, llvm::Value *NRVOFlag)
477 : NRVOFlag(NRVOFlag), Loc(addr) {}
478
479 llvm::Value *NRVOFlag;
480 Address Loc;
481
Emit__anon8cc62b0f0111::DestroyNRVOVariable482 void Emit(CodeGenFunction &CGF, Flags flags) override {
483 // Along the exceptions path we always execute the dtor.
484 bool NRVO = flags.isForNormalCleanup() && NRVOFlag;
485
486 llvm::BasicBlock *SkipDtorBB = nullptr;
487 if (NRVO) {
488 // If we exited via NRVO, we skip the destructor call.
489 llvm::BasicBlock *RunDtorBB = CGF.createBasicBlock("nrvo.unused");
490 SkipDtorBB = CGF.createBasicBlock("nrvo.skipdtor");
491 llvm::Value *DidNRVO =
492 CGF.Builder.CreateFlagLoad(NRVOFlag, "nrvo.val");
493 CGF.Builder.CreateCondBr(DidNRVO, SkipDtorBB, RunDtorBB);
494 CGF.EmitBlock(RunDtorBB);
495 }
496
497 static_cast<Derived *>(this)->emitDestructorCall(CGF);
498
499 if (NRVO) CGF.EmitBlock(SkipDtorBB);
500 }
501
502 virtual ~DestroyNRVOVariable() = default;
503 };
504
505 struct DestroyNRVOVariableCXX final
506 : DestroyNRVOVariable<DestroyNRVOVariableCXX> {
DestroyNRVOVariableCXX__anon8cc62b0f0111::DestroyNRVOVariableCXX507 DestroyNRVOVariableCXX(Address addr, const CXXDestructorDecl *Dtor,
508 llvm::Value *NRVOFlag)
509 : DestroyNRVOVariable<DestroyNRVOVariableCXX>(addr, NRVOFlag),
510 Dtor(Dtor) {}
511
512 const CXXDestructorDecl *Dtor;
513
emitDestructorCall__anon8cc62b0f0111::DestroyNRVOVariableCXX514 void emitDestructorCall(CodeGenFunction &CGF) {
515 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
516 /*ForVirtualBase=*/false,
517 /*Delegating=*/false, Loc);
518 }
519 };
520
521 struct DestroyNRVOVariableC final
522 : DestroyNRVOVariable<DestroyNRVOVariableC> {
DestroyNRVOVariableC__anon8cc62b0f0111::DestroyNRVOVariableC523 DestroyNRVOVariableC(Address addr, llvm::Value *NRVOFlag, QualType Ty)
524 : DestroyNRVOVariable<DestroyNRVOVariableC>(addr, NRVOFlag), Ty(Ty) {}
525
526 QualType Ty;
527
emitDestructorCall__anon8cc62b0f0111::DestroyNRVOVariableC528 void emitDestructorCall(CodeGenFunction &CGF) {
529 CGF.destroyNonTrivialCStruct(CGF, Loc, Ty);
530 }
531 };
532
533 struct CallStackRestore final : EHScopeStack::Cleanup {
534 Address Stack;
CallStackRestore__anon8cc62b0f0111::CallStackRestore535 CallStackRestore(Address Stack) : Stack(Stack) {}
Emit__anon8cc62b0f0111::CallStackRestore536 void Emit(CodeGenFunction &CGF, Flags flags) override {
537 llvm::Value *V = CGF.Builder.CreateLoad(Stack);
538 llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
539 CGF.Builder.CreateCall(F, V);
540 }
541 };
542
543 struct ExtendGCLifetime final : EHScopeStack::Cleanup {
544 const VarDecl &Var;
ExtendGCLifetime__anon8cc62b0f0111::ExtendGCLifetime545 ExtendGCLifetime(const VarDecl *var) : Var(*var) {}
546
Emit__anon8cc62b0f0111::ExtendGCLifetime547 void Emit(CodeGenFunction &CGF, Flags flags) override {
548 // Compute the address of the local variable, in case it's a
549 // byref or something.
550 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(&Var), false,
551 Var.getType(), VK_LValue, SourceLocation());
552 llvm::Value *value = CGF.EmitLoadOfScalar(CGF.EmitDeclRefLValue(&DRE),
553 SourceLocation());
554 CGF.EmitExtendGCLifetime(value);
555 }
556 };
557
558 struct CallCleanupFunction final : EHScopeStack::Cleanup {
559 llvm::Constant *CleanupFn;
560 const CGFunctionInfo &FnInfo;
561 const VarDecl &Var;
562
CallCleanupFunction__anon8cc62b0f0111::CallCleanupFunction563 CallCleanupFunction(llvm::Constant *CleanupFn, const CGFunctionInfo *Info,
564 const VarDecl *Var)
565 : CleanupFn(CleanupFn), FnInfo(*Info), Var(*Var) {}
566
Emit__anon8cc62b0f0111::CallCleanupFunction567 void Emit(CodeGenFunction &CGF, Flags flags) override {
568 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(&Var), false,
569 Var.getType(), VK_LValue, SourceLocation());
570 // Compute the address of the local variable, in case it's a byref
571 // or something.
572 llvm::Value *Addr = CGF.EmitDeclRefLValue(&DRE).getPointer();
573
574 // In some cases, the type of the function argument will be different from
575 // the type of the pointer. An example of this is
576 // void f(void* arg);
577 // __attribute__((cleanup(f))) void *g;
578 //
579 // To fix this we insert a bitcast here.
580 QualType ArgTy = FnInfo.arg_begin()->type;
581 llvm::Value *Arg =
582 CGF.Builder.CreateBitCast(Addr, CGF.ConvertType(ArgTy));
583
584 CallArgList Args;
585 Args.add(RValue::get(Arg),
586 CGF.getContext().getPointerType(Var.getType()));
587 auto Callee = CGCallee::forDirect(CleanupFn);
588 CGF.EmitCall(FnInfo, Callee, ReturnValueSlot(), Args);
589 }
590 };
591 } // end anonymous namespace
592
593 /// EmitAutoVarWithLifetime - Does the setup required for an automatic
594 /// variable with lifetime.
EmitAutoVarWithLifetime(CodeGenFunction & CGF,const VarDecl & var,Address addr,Qualifiers::ObjCLifetime lifetime)595 static void EmitAutoVarWithLifetime(CodeGenFunction &CGF, const VarDecl &var,
596 Address addr,
597 Qualifiers::ObjCLifetime lifetime) {
598 switch (lifetime) {
599 case Qualifiers::OCL_None:
600 llvm_unreachable("present but none");
601
602 case Qualifiers::OCL_ExplicitNone:
603 // nothing to do
604 break;
605
606 case Qualifiers::OCL_Strong: {
607 CodeGenFunction::Destroyer *destroyer =
608 (var.hasAttr<ObjCPreciseLifetimeAttr>()
609 ? CodeGenFunction::destroyARCStrongPrecise
610 : CodeGenFunction::destroyARCStrongImprecise);
611
612 CleanupKind cleanupKind = CGF.getARCCleanupKind();
613 CGF.pushDestroy(cleanupKind, addr, var.getType(), destroyer,
614 cleanupKind & EHCleanup);
615 break;
616 }
617 case Qualifiers::OCL_Autoreleasing:
618 // nothing to do
619 break;
620
621 case Qualifiers::OCL_Weak:
622 // __weak objects always get EH cleanups; otherwise, exceptions
623 // could cause really nasty crashes instead of mere leaks.
624 CGF.pushDestroy(NormalAndEHCleanup, addr, var.getType(),
625 CodeGenFunction::destroyARCWeak,
626 /*useEHCleanup*/ true);
627 break;
628 }
629 }
630
isAccessedBy(const VarDecl & var,const Stmt * s)631 static bool isAccessedBy(const VarDecl &var, const Stmt *s) {
632 if (const Expr *e = dyn_cast<Expr>(s)) {
633 // Skip the most common kinds of expressions that make
634 // hierarchy-walking expensive.
635 s = e = e->IgnoreParenCasts();
636
637 if (const DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e))
638 return (ref->getDecl() == &var);
639 if (const BlockExpr *be = dyn_cast<BlockExpr>(e)) {
640 const BlockDecl *block = be->getBlockDecl();
641 for (const auto &I : block->captures()) {
642 if (I.getVariable() == &var)
643 return true;
644 }
645 }
646 }
647
648 for (const Stmt *SubStmt : s->children())
649 // SubStmt might be null; as in missing decl or conditional of an if-stmt.
650 if (SubStmt && isAccessedBy(var, SubStmt))
651 return true;
652
653 return false;
654 }
655
isAccessedBy(const ValueDecl * decl,const Expr * e)656 static bool isAccessedBy(const ValueDecl *decl, const Expr *e) {
657 if (!decl) return false;
658 if (!isa<VarDecl>(decl)) return false;
659 const VarDecl *var = cast<VarDecl>(decl);
660 return isAccessedBy(*var, e);
661 }
662
tryEmitARCCopyWeakInit(CodeGenFunction & CGF,const LValue & destLV,const Expr * init)663 static bool tryEmitARCCopyWeakInit(CodeGenFunction &CGF,
664 const LValue &destLV, const Expr *init) {
665 bool needsCast = false;
666
667 while (auto castExpr = dyn_cast<CastExpr>(init->IgnoreParens())) {
668 switch (castExpr->getCastKind()) {
669 // Look through casts that don't require representation changes.
670 case CK_NoOp:
671 case CK_BitCast:
672 case CK_BlockPointerToObjCPointerCast:
673 needsCast = true;
674 break;
675
676 // If we find an l-value to r-value cast from a __weak variable,
677 // emit this operation as a copy or move.
678 case CK_LValueToRValue: {
679 const Expr *srcExpr = castExpr->getSubExpr();
680 if (srcExpr->getType().getObjCLifetime() != Qualifiers::OCL_Weak)
681 return false;
682
683 // Emit the source l-value.
684 LValue srcLV = CGF.EmitLValue(srcExpr);
685
686 // Handle a formal type change to avoid asserting.
687 auto srcAddr = srcLV.getAddress();
688 if (needsCast) {
689 srcAddr = CGF.Builder.CreateElementBitCast(srcAddr,
690 destLV.getAddress().getElementType());
691 }
692
693 // If it was an l-value, use objc_copyWeak.
694 if (srcExpr->getValueKind() == VK_LValue) {
695 CGF.EmitARCCopyWeak(destLV.getAddress(), srcAddr);
696 } else {
697 assert(srcExpr->getValueKind() == VK_XValue);
698 CGF.EmitARCMoveWeak(destLV.getAddress(), srcAddr);
699 }
700 return true;
701 }
702
703 // Stop at anything else.
704 default:
705 return false;
706 }
707
708 init = castExpr->getSubExpr();
709 }
710 return false;
711 }
712
drillIntoBlockVariable(CodeGenFunction & CGF,LValue & lvalue,const VarDecl * var)713 static void drillIntoBlockVariable(CodeGenFunction &CGF,
714 LValue &lvalue,
715 const VarDecl *var) {
716 lvalue.setAddress(CGF.emitBlockByrefAddress(lvalue.getAddress(), var));
717 }
718
EmitNullabilityCheck(LValue LHS,llvm::Value * RHS,SourceLocation Loc)719 void CodeGenFunction::EmitNullabilityCheck(LValue LHS, llvm::Value *RHS,
720 SourceLocation Loc) {
721 if (!SanOpts.has(SanitizerKind::NullabilityAssign))
722 return;
723
724 auto Nullability = LHS.getType()->getNullability(getContext());
725 if (!Nullability || *Nullability != NullabilityKind::NonNull)
726 return;
727
728 // Check if the right hand side of the assignment is nonnull, if the left
729 // hand side must be nonnull.
730 SanitizerScope SanScope(this);
731 llvm::Value *IsNotNull = Builder.CreateIsNotNull(RHS);
732 llvm::Constant *StaticData[] = {
733 EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(LHS.getType()),
734 llvm::ConstantInt::get(Int8Ty, 0), // The LogAlignment info is unused.
735 llvm::ConstantInt::get(Int8Ty, TCK_NonnullAssign)};
736 EmitCheck({{IsNotNull, SanitizerKind::NullabilityAssign}},
737 SanitizerHandler::TypeMismatch, StaticData, RHS);
738 }
739
EmitScalarInit(const Expr * init,const ValueDecl * D,LValue lvalue,bool capturedByInit)740 void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
741 LValue lvalue, bool capturedByInit) {
742 Qualifiers::ObjCLifetime lifetime = lvalue.getObjCLifetime();
743 if (!lifetime) {
744 llvm::Value *value = EmitScalarExpr(init);
745 if (capturedByInit)
746 drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
747 EmitNullabilityCheck(lvalue, value, init->getExprLoc());
748 EmitStoreThroughLValue(RValue::get(value), lvalue, true);
749 return;
750 }
751
752 if (const CXXDefaultInitExpr *DIE = dyn_cast<CXXDefaultInitExpr>(init))
753 init = DIE->getExpr();
754
755 // If we're emitting a value with lifetime, we have to do the
756 // initialization *before* we leave the cleanup scopes.
757 if (const FullExpr *fe = dyn_cast<FullExpr>(init)) {
758 enterFullExpression(fe);
759 init = fe->getSubExpr();
760 }
761 CodeGenFunction::RunCleanupsScope Scope(*this);
762
763 // We have to maintain the illusion that the variable is
764 // zero-initialized. If the variable might be accessed in its
765 // initializer, zero-initialize before running the initializer, then
766 // actually perform the initialization with an assign.
767 bool accessedByInit = false;
768 if (lifetime != Qualifiers::OCL_ExplicitNone)
769 accessedByInit = (capturedByInit || isAccessedBy(D, init));
770 if (accessedByInit) {
771 LValue tempLV = lvalue;
772 // Drill down to the __block object if necessary.
773 if (capturedByInit) {
774 // We can use a simple GEP for this because it can't have been
775 // moved yet.
776 tempLV.setAddress(emitBlockByrefAddress(tempLV.getAddress(),
777 cast<VarDecl>(D),
778 /*follow*/ false));
779 }
780
781 auto ty = cast<llvm::PointerType>(tempLV.getAddress().getElementType());
782 llvm::Value *zero = CGM.getNullPointer(ty, tempLV.getType());
783
784 // If __weak, we want to use a barrier under certain conditions.
785 if (lifetime == Qualifiers::OCL_Weak)
786 EmitARCInitWeak(tempLV.getAddress(), zero);
787
788 // Otherwise just do a simple store.
789 else
790 EmitStoreOfScalar(zero, tempLV, /* isInitialization */ true);
791 }
792
793 // Emit the initializer.
794 llvm::Value *value = nullptr;
795
796 switch (lifetime) {
797 case Qualifiers::OCL_None:
798 llvm_unreachable("present but none");
799
800 case Qualifiers::OCL_Strong: {
801 if (!D || !isa<VarDecl>(D) || !cast<VarDecl>(D)->isARCPseudoStrong()) {
802 value = EmitARCRetainScalarExpr(init);
803 break;
804 }
805 // If D is pseudo-strong, treat it like __unsafe_unretained here. This means
806 // that we omit the retain, and causes non-autoreleased return values to be
807 // immediately released.
808 LLVM_FALLTHROUGH;
809 }
810
811 case Qualifiers::OCL_ExplicitNone:
812 value = EmitARCUnsafeUnretainedScalarExpr(init);
813 break;
814
815 case Qualifiers::OCL_Weak: {
816 // If it's not accessed by the initializer, try to emit the
817 // initialization with a copy or move.
818 if (!accessedByInit && tryEmitARCCopyWeakInit(*this, lvalue, init)) {
819 return;
820 }
821
822 // No way to optimize a producing initializer into this. It's not
823 // worth optimizing for, because the value will immediately
824 // disappear in the common case.
825 value = EmitScalarExpr(init);
826
827 if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
828 if (accessedByInit)
829 EmitARCStoreWeak(lvalue.getAddress(), value, /*ignored*/ true);
830 else
831 EmitARCInitWeak(lvalue.getAddress(), value);
832 return;
833 }
834
835 case Qualifiers::OCL_Autoreleasing:
836 value = EmitARCRetainAutoreleaseScalarExpr(init);
837 break;
838 }
839
840 if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
841
842 EmitNullabilityCheck(lvalue, value, init->getExprLoc());
843
844 // If the variable might have been accessed by its initializer, we
845 // might have to initialize with a barrier. We have to do this for
846 // both __weak and __strong, but __weak got filtered out above.
847 if (accessedByInit && lifetime == Qualifiers::OCL_Strong) {
848 llvm::Value *oldValue = EmitLoadOfScalar(lvalue, init->getExprLoc());
849 EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
850 EmitARCRelease(oldValue, ARCImpreciseLifetime);
851 return;
852 }
853
854 EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
855 }
856
857 /// Decide whether we can emit the non-zero parts of the specified initializer
858 /// with equal or fewer than NumStores scalar stores.
canEmitInitWithFewStoresAfterBZero(llvm::Constant * Init,unsigned & NumStores)859 static bool canEmitInitWithFewStoresAfterBZero(llvm::Constant *Init,
860 unsigned &NumStores) {
861 // Zero and Undef never requires any extra stores.
862 if (isa<llvm::ConstantAggregateZero>(Init) ||
863 isa<llvm::ConstantPointerNull>(Init) ||
864 isa<llvm::UndefValue>(Init))
865 return true;
866 if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
867 isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
868 isa<llvm::ConstantExpr>(Init))
869 return Init->isNullValue() || NumStores--;
870
871 // See if we can emit each element.
872 if (isa<llvm::ConstantArray>(Init) || isa<llvm::ConstantStruct>(Init)) {
873 for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
874 llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
875 if (!canEmitInitWithFewStoresAfterBZero(Elt, NumStores))
876 return false;
877 }
878 return true;
879 }
880
881 if (llvm::ConstantDataSequential *CDS =
882 dyn_cast<llvm::ConstantDataSequential>(Init)) {
883 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
884 llvm::Constant *Elt = CDS->getElementAsConstant(i);
885 if (!canEmitInitWithFewStoresAfterBZero(Elt, NumStores))
886 return false;
887 }
888 return true;
889 }
890
891 // Anything else is hard and scary.
892 return false;
893 }
894
895 /// For inits that canEmitInitWithFewStoresAfterBZero returned true for, emit
896 /// the scalar stores that would be required.
emitStoresForInitAfterBZero(CodeGenModule & CGM,llvm::Constant * Init,Address Loc,bool isVolatile,CGBuilderTy & Builder)897 static void emitStoresForInitAfterBZero(CodeGenModule &CGM,
898 llvm::Constant *Init, Address Loc,
899 bool isVolatile, CGBuilderTy &Builder) {
900 assert(!Init->isNullValue() && !isa<llvm::UndefValue>(Init) &&
901 "called emitStoresForInitAfterBZero for zero or undef value.");
902
903 if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
904 isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
905 isa<llvm::ConstantExpr>(Init)) {
906 Builder.CreateStore(Init, Loc, isVolatile);
907 return;
908 }
909
910 if (llvm::ConstantDataSequential *CDS =
911 dyn_cast<llvm::ConstantDataSequential>(Init)) {
912 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
913 llvm::Constant *Elt = CDS->getElementAsConstant(i);
914
915 // If necessary, get a pointer to the element and emit it.
916 if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt))
917 emitStoresForInitAfterBZero(
918 CGM, Elt,
919 Builder.CreateConstInBoundsGEP2_32(Loc, 0, i, CGM.getDataLayout()),
920 isVolatile, Builder);
921 }
922 return;
923 }
924
925 assert((isa<llvm::ConstantStruct>(Init) || isa<llvm::ConstantArray>(Init)) &&
926 "Unknown value type!");
927
928 for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
929 llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
930
931 // If necessary, get a pointer to the element and emit it.
932 if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt))
933 emitStoresForInitAfterBZero(
934 CGM, Elt,
935 Builder.CreateConstInBoundsGEP2_32(Loc, 0, i, CGM.getDataLayout()),
936 isVolatile, Builder);
937 }
938 }
939
940 /// Decide whether we should use bzero plus some stores to initialize a local
941 /// variable instead of using a memcpy from a constant global. It is beneficial
942 /// to use bzero if the global is all zeros, or mostly zeros and large.
shouldUseBZeroPlusStoresToInitialize(llvm::Constant * Init,uint64_t GlobalSize)943 static bool shouldUseBZeroPlusStoresToInitialize(llvm::Constant *Init,
944 uint64_t GlobalSize) {
945 // If a global is all zeros, always use a bzero.
946 if (isa<llvm::ConstantAggregateZero>(Init)) return true;
947
948 // If a non-zero global is <= 32 bytes, always use a memcpy. If it is large,
949 // do it if it will require 6 or fewer scalar stores.
950 // TODO: Should budget depends on the size? Avoiding a large global warrants
951 // plopping in more stores.
952 unsigned StoreBudget = 6;
953 uint64_t SizeLimit = 32;
954
955 return GlobalSize > SizeLimit &&
956 canEmitInitWithFewStoresAfterBZero(Init, StoreBudget);
957 }
958
959 /// Decide whether we should use memset to initialize a local variable instead
960 /// of using a memcpy from a constant global. Assumes we've already decided to
961 /// not user bzero.
962 /// FIXME We could be more clever, as we are for bzero above, and generate
963 /// memset followed by stores. It's unclear that's worth the effort.
shouldUseMemSetToInitialize(llvm::Constant * Init,uint64_t GlobalSize)964 static llvm::Value *shouldUseMemSetToInitialize(llvm::Constant *Init,
965 uint64_t GlobalSize) {
966 uint64_t SizeLimit = 32;
967 if (GlobalSize <= SizeLimit)
968 return nullptr;
969 return llvm::isBytewiseValue(Init);
970 }
971
patternFor(CodeGenModule & CGM,llvm::Type * Ty)972 static llvm::Constant *patternFor(CodeGenModule &CGM, llvm::Type *Ty) {
973 // The following value is a guaranteed unmappable pointer value and has a
974 // repeated byte-pattern which makes it easier to synthesize. We use it for
975 // pointers as well as integers so that aggregates are likely to be
976 // initialized with this repeated value.
977 constexpr uint64_t LargeValue = 0xAAAAAAAAAAAAAAAAull;
978 // For 32-bit platforms it's a bit trickier because, across systems, only the
979 // zero page can reasonably be expected to be unmapped, and even then we need
980 // a very low address. We use a smaller value, and that value sadly doesn't
981 // have a repeated byte-pattern. We don't use it for integers.
982 constexpr uint32_t SmallValue = 0x000000AA;
983 // Floating-point values are initialized as NaNs because they propagate. Using
984 // a repeated byte pattern means that it will be easier to initialize
985 // all-floating-point aggregates and arrays with memset. Further, aggregates
986 // which mix integral and a few floats might also initialize with memset
987 // followed by a handful of stores for the floats. Using fairly unique NaNs
988 // also means they'll be easier to distinguish in a crash.
989 constexpr bool NegativeNaN = true;
990 constexpr uint64_t NaNPayload = 0xFFFFFFFFFFFFFFFFull;
991 if (Ty->isIntOrIntVectorTy()) {
992 unsigned BitWidth = cast<llvm::IntegerType>(
993 Ty->isVectorTy() ? Ty->getVectorElementType() : Ty)
994 ->getBitWidth();
995 if (BitWidth <= 64)
996 return llvm::ConstantInt::get(Ty, LargeValue);
997 return llvm::ConstantInt::get(
998 Ty, llvm::APInt::getSplat(BitWidth, llvm::APInt(64, LargeValue)));
999 }
1000 if (Ty->isPtrOrPtrVectorTy()) {
1001 auto *PtrTy = cast<llvm::PointerType>(
1002 Ty->isVectorTy() ? Ty->getVectorElementType() : Ty);
1003 unsigned PtrWidth = CGM.getContext().getTargetInfo().getPointerWidth(
1004 PtrTy->getAddressSpace());
1005 llvm::Type *IntTy = llvm::IntegerType::get(CGM.getLLVMContext(), PtrWidth);
1006 uint64_t IntValue;
1007 switch (PtrWidth) {
1008 default:
1009 llvm_unreachable("pattern initialization of unsupported pointer width");
1010 case 64:
1011 IntValue = LargeValue;
1012 break;
1013 case 32:
1014 IntValue = SmallValue;
1015 break;
1016 }
1017 auto *Int = llvm::ConstantInt::get(IntTy, IntValue);
1018 return llvm::ConstantExpr::getIntToPtr(Int, PtrTy);
1019 }
1020 if (Ty->isFPOrFPVectorTy()) {
1021 unsigned BitWidth = llvm::APFloat::semanticsSizeInBits(
1022 (Ty->isVectorTy() ? Ty->getVectorElementType() : Ty)
1023 ->getFltSemantics());
1024 llvm::APInt Payload(64, NaNPayload);
1025 if (BitWidth >= 64)
1026 Payload = llvm::APInt::getSplat(BitWidth, Payload);
1027 return llvm::ConstantFP::getQNaN(Ty, NegativeNaN, &Payload);
1028 }
1029 if (Ty->isArrayTy()) {
1030 // Note: this doesn't touch tail padding (at the end of an object, before
1031 // the next array object). It is instead handled by replaceUndef.
1032 auto *ArrTy = cast<llvm::ArrayType>(Ty);
1033 llvm::SmallVector<llvm::Constant *, 8> Element(
1034 ArrTy->getNumElements(), patternFor(CGM, ArrTy->getElementType()));
1035 return llvm::ConstantArray::get(ArrTy, Element);
1036 }
1037
1038 // Note: this doesn't touch struct padding. It will initialize as much union
1039 // padding as is required for the largest type in the union. Padding is
1040 // instead handled by replaceUndef. Stores to structs with volatile members
1041 // don't have a volatile qualifier when initialized according to C++. This is
1042 // fine because stack-based volatiles don't really have volatile semantics
1043 // anyways, and the initialization shouldn't be observable.
1044 auto *StructTy = cast<llvm::StructType>(Ty);
1045 llvm::SmallVector<llvm::Constant *, 8> Struct(StructTy->getNumElements());
1046 for (unsigned El = 0; El != Struct.size(); ++El)
1047 Struct[El] = patternFor(CGM, StructTy->getElementType(El));
1048 return llvm::ConstantStruct::get(StructTy, Struct);
1049 }
1050
createUnnamedGlobalFrom(CodeGenModule & CGM,const VarDecl & D,CGBuilderTy & Builder,llvm::Constant * Constant,CharUnits Align)1051 static Address createUnnamedGlobalFrom(CodeGenModule &CGM, const VarDecl &D,
1052 CGBuilderTy &Builder,
1053 llvm::Constant *Constant,
1054 CharUnits Align) {
1055 auto FunctionName = [&](const DeclContext *DC) -> std::string {
1056 if (const auto *FD = dyn_cast<FunctionDecl>(DC)) {
1057 if (const auto *CC = dyn_cast<CXXConstructorDecl>(FD))
1058 return CC->getNameAsString();
1059 if (const auto *CD = dyn_cast<CXXDestructorDecl>(FD))
1060 return CD->getNameAsString();
1061 return CGM.getMangledName(FD);
1062 } else if (const auto *OM = dyn_cast<ObjCMethodDecl>(DC)) {
1063 return OM->getNameAsString();
1064 } else if (isa<BlockDecl>(DC)) {
1065 return "<block>";
1066 } else if (isa<CapturedDecl>(DC)) {
1067 return "<captured>";
1068 } else {
1069 llvm::llvm_unreachable_internal("expected a function or method");
1070 }
1071 };
1072
1073 auto *Ty = Constant->getType();
1074 bool isConstant = true;
1075 llvm::GlobalVariable *InsertBefore = nullptr;
1076 unsigned AS = CGM.getContext().getTargetAddressSpace(
1077 CGM.getStringLiteralAddressSpace());
1078 llvm::GlobalVariable *GV = new llvm::GlobalVariable(
1079 CGM.getModule(), Ty, isConstant, llvm::GlobalValue::PrivateLinkage,
1080 Constant,
1081 "__const." + FunctionName(D.getParentFunctionOrMethod()) + "." +
1082 D.getName(),
1083 InsertBefore, llvm::GlobalValue::NotThreadLocal, AS);
1084 GV->setAlignment(Align.getQuantity());
1085 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1086
1087 Address SrcPtr = Address(GV, Align);
1088 llvm::Type *BP = llvm::PointerType::getInt8PtrTy(CGM.getLLVMContext(), AS);
1089 if (SrcPtr.getType() != BP)
1090 SrcPtr = Builder.CreateBitCast(SrcPtr, BP);
1091 return SrcPtr;
1092 }
1093
emitStoresForConstant(CodeGenModule & CGM,const VarDecl & D,Address Loc,bool isVolatile,CGBuilderTy & Builder,llvm::Constant * constant)1094 static void emitStoresForConstant(CodeGenModule &CGM, const VarDecl &D,
1095 Address Loc, bool isVolatile,
1096 CGBuilderTy &Builder,
1097 llvm::Constant *constant) {
1098 auto *Ty = constant->getType();
1099 bool isScalar = Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy() ||
1100 Ty->isFPOrFPVectorTy();
1101 if (isScalar) {
1102 Builder.CreateStore(constant, Loc, isVolatile);
1103 return;
1104 }
1105
1106 auto *Int8Ty = llvm::IntegerType::getInt8Ty(CGM.getLLVMContext());
1107 auto *IntPtrTy = CGM.getDataLayout().getIntPtrType(CGM.getLLVMContext());
1108
1109 // If the initializer is all or mostly the same, codegen with bzero / memset
1110 // then do a few stores afterward.
1111 uint64_t ConstantSize = CGM.getDataLayout().getTypeAllocSize(Ty);
1112 auto *SizeVal = llvm::ConstantInt::get(IntPtrTy, ConstantSize);
1113 if (shouldUseBZeroPlusStoresToInitialize(constant, ConstantSize)) {
1114 Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, 0), SizeVal,
1115 isVolatile);
1116
1117 bool valueAlreadyCorrect =
1118 constant->isNullValue() || isa<llvm::UndefValue>(constant);
1119 if (!valueAlreadyCorrect) {
1120 Loc = Builder.CreateBitCast(Loc, Ty->getPointerTo(Loc.getAddressSpace()));
1121 emitStoresForInitAfterBZero(CGM, constant, Loc, isVolatile, Builder);
1122 }
1123 return;
1124 }
1125
1126 llvm::Value *Pattern = shouldUseMemSetToInitialize(constant, ConstantSize);
1127 if (Pattern) {
1128 uint64_t Value = 0x00;
1129 if (!isa<llvm::UndefValue>(Pattern)) {
1130 const llvm::APInt &AP = cast<llvm::ConstantInt>(Pattern)->getValue();
1131 assert(AP.getBitWidth() <= 8);
1132 Value = AP.getLimitedValue();
1133 }
1134 Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, Value), SizeVal,
1135 isVolatile);
1136 return;
1137 }
1138
1139 Builder.CreateMemCpy(
1140 Loc,
1141 createUnnamedGlobalFrom(CGM, D, Builder, constant, Loc.getAlignment()),
1142 SizeVal, isVolatile);
1143 }
1144
emitStoresForZeroInit(CodeGenModule & CGM,const VarDecl & D,Address Loc,bool isVolatile,CGBuilderTy & Builder)1145 static void emitStoresForZeroInit(CodeGenModule &CGM, const VarDecl &D,
1146 Address Loc, bool isVolatile,
1147 CGBuilderTy &Builder) {
1148 llvm::Type *ElTy = Loc.getElementType();
1149 llvm::Constant *constant = llvm::Constant::getNullValue(ElTy);
1150 emitStoresForConstant(CGM, D, Loc, isVolatile, Builder, constant);
1151 }
1152
emitStoresForPatternInit(CodeGenModule & CGM,const VarDecl & D,Address Loc,bool isVolatile,CGBuilderTy & Builder)1153 static void emitStoresForPatternInit(CodeGenModule &CGM, const VarDecl &D,
1154 Address Loc, bool isVolatile,
1155 CGBuilderTy &Builder) {
1156 llvm::Type *ElTy = Loc.getElementType();
1157 llvm::Constant *constant = patternFor(CGM, ElTy);
1158 assert(!isa<llvm::UndefValue>(constant));
1159 emitStoresForConstant(CGM, D, Loc, isVolatile, Builder, constant);
1160 }
1161
containsUndef(llvm::Constant * constant)1162 static bool containsUndef(llvm::Constant *constant) {
1163 auto *Ty = constant->getType();
1164 if (isa<llvm::UndefValue>(constant))
1165 return true;
1166 if (Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy())
1167 for (llvm::Use &Op : constant->operands())
1168 if (containsUndef(cast<llvm::Constant>(Op)))
1169 return true;
1170 return false;
1171 }
1172
replaceUndef(llvm::Constant * constant)1173 static llvm::Constant *replaceUndef(llvm::Constant *constant) {
1174 // FIXME: when doing pattern initialization, replace undef with 0xAA instead.
1175 // FIXME: also replace padding between values by creating a new struct type
1176 // which has no padding.
1177 auto *Ty = constant->getType();
1178 if (isa<llvm::UndefValue>(constant))
1179 return llvm::Constant::getNullValue(Ty);
1180 if (!(Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy()))
1181 return constant;
1182 if (!containsUndef(constant))
1183 return constant;
1184 llvm::SmallVector<llvm::Constant *, 8> Values(constant->getNumOperands());
1185 for (unsigned Op = 0, NumOp = constant->getNumOperands(); Op != NumOp; ++Op) {
1186 auto *OpValue = cast<llvm::Constant>(constant->getOperand(Op));
1187 Values[Op] = replaceUndef(OpValue);
1188 }
1189 if (Ty->isStructTy())
1190 return llvm::ConstantStruct::get(cast<llvm::StructType>(Ty), Values);
1191 if (Ty->isArrayTy())
1192 return llvm::ConstantArray::get(cast<llvm::ArrayType>(Ty), Values);
1193 assert(Ty->isVectorTy());
1194 return llvm::ConstantVector::get(Values);
1195 }
1196
1197 /// EmitAutoVarDecl - Emit code and set up an entry in LocalDeclMap for a
1198 /// variable declaration with auto, register, or no storage class specifier.
1199 /// These turn into simple stack objects, or GlobalValues depending on target.
EmitAutoVarDecl(const VarDecl & D)1200 void CodeGenFunction::EmitAutoVarDecl(const VarDecl &D) {
1201 AutoVarEmission emission = EmitAutoVarAlloca(D);
1202 EmitAutoVarInit(emission);
1203 EmitAutoVarCleanups(emission);
1204 }
1205
1206 /// Emit a lifetime.begin marker if some criteria are satisfied.
1207 /// \return a pointer to the temporary size Value if a marker was emitted, null
1208 /// otherwise
EmitLifetimeStart(uint64_t Size,llvm::Value * Addr)1209 llvm::Value *CodeGenFunction::EmitLifetimeStart(uint64_t Size,
1210 llvm::Value *Addr) {
1211 if (!ShouldEmitLifetimeMarkers)
1212 return nullptr;
1213
1214 assert(Addr->getType()->getPointerAddressSpace() ==
1215 CGM.getDataLayout().getAllocaAddrSpace() &&
1216 "Pointer should be in alloca address space");
1217 llvm::Value *SizeV = llvm::ConstantInt::get(Int64Ty, Size);
1218 Addr = Builder.CreateBitCast(Addr, AllocaInt8PtrTy);
1219 llvm::CallInst *C =
1220 Builder.CreateCall(CGM.getLLVMLifetimeStartFn(), {SizeV, Addr});
1221 C->setDoesNotThrow();
1222 return SizeV;
1223 }
1224
EmitLifetimeEnd(llvm::Value * Size,llvm::Value * Addr)1225 void CodeGenFunction::EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr) {
1226 assert(Addr->getType()->getPointerAddressSpace() ==
1227 CGM.getDataLayout().getAllocaAddrSpace() &&
1228 "Pointer should be in alloca address space");
1229 Addr = Builder.CreateBitCast(Addr, AllocaInt8PtrTy);
1230 llvm::CallInst *C =
1231 Builder.CreateCall(CGM.getLLVMLifetimeEndFn(), {Size, Addr});
1232 C->setDoesNotThrow();
1233 }
1234
EmitAndRegisterVariableArrayDimensions(CGDebugInfo * DI,const VarDecl & D,bool EmitDebugInfo)1235 void CodeGenFunction::EmitAndRegisterVariableArrayDimensions(
1236 CGDebugInfo *DI, const VarDecl &D, bool EmitDebugInfo) {
1237 // For each dimension stores its QualType and corresponding
1238 // size-expression Value.
1239 SmallVector<CodeGenFunction::VlaSizePair, 4> Dimensions;
1240 SmallVector<IdentifierInfo *, 4> VLAExprNames;
1241
1242 // Break down the array into individual dimensions.
1243 QualType Type1D = D.getType();
1244 while (getContext().getAsVariableArrayType(Type1D)) {
1245 auto VlaSize = getVLAElements1D(Type1D);
1246 if (auto *C = dyn_cast<llvm::ConstantInt>(VlaSize.NumElts))
1247 Dimensions.emplace_back(C, Type1D.getUnqualifiedType());
1248 else {
1249 // Generate a locally unique name for the size expression.
1250 Twine Name = Twine("__vla_expr") + Twine(VLAExprCounter++);
1251 SmallString<12> Buffer;
1252 StringRef NameRef = Name.toStringRef(Buffer);
1253 auto &Ident = getContext().Idents.getOwn(NameRef);
1254 VLAExprNames.push_back(&Ident);
1255 auto SizeExprAddr =
1256 CreateDefaultAlignTempAlloca(VlaSize.NumElts->getType(), NameRef);
1257 Builder.CreateStore(VlaSize.NumElts, SizeExprAddr);
1258 Dimensions.emplace_back(SizeExprAddr.getPointer(),
1259 Type1D.getUnqualifiedType());
1260 }
1261 Type1D = VlaSize.Type;
1262 }
1263
1264 if (!EmitDebugInfo)
1265 return;
1266
1267 // Register each dimension's size-expression with a DILocalVariable,
1268 // so that it can be used by CGDebugInfo when instantiating a DISubrange
1269 // to describe this array.
1270 unsigned NameIdx = 0;
1271 for (auto &VlaSize : Dimensions) {
1272 llvm::Metadata *MD;
1273 if (auto *C = dyn_cast<llvm::ConstantInt>(VlaSize.NumElts))
1274 MD = llvm::ConstantAsMetadata::get(C);
1275 else {
1276 // Create an artificial VarDecl to generate debug info for.
1277 IdentifierInfo *NameIdent = VLAExprNames[NameIdx++];
1278 auto VlaExprTy = VlaSize.NumElts->getType()->getPointerElementType();
1279 auto QT = getContext().getIntTypeForBitwidth(
1280 VlaExprTy->getScalarSizeInBits(), false);
1281 auto *ArtificialDecl = VarDecl::Create(
1282 getContext(), const_cast<DeclContext *>(D.getDeclContext()),
1283 D.getLocation(), D.getLocation(), NameIdent, QT,
1284 getContext().CreateTypeSourceInfo(QT), SC_Auto);
1285 ArtificialDecl->setImplicit();
1286
1287 MD = DI->EmitDeclareOfAutoVariable(ArtificialDecl, VlaSize.NumElts,
1288 Builder);
1289 }
1290 assert(MD && "No Size expression debug node created");
1291 DI->registerVLASizeExpression(VlaSize.Type, MD);
1292 }
1293 }
1294
1295 /// EmitAutoVarAlloca - Emit the alloca and debug information for a
1296 /// local variable. Does not emit initialization or destruction.
1297 CodeGenFunction::AutoVarEmission
EmitAutoVarAlloca(const VarDecl & D)1298 CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
1299 QualType Ty = D.getType();
1300 assert(
1301 Ty.getAddressSpace() == LangAS::Default ||
1302 (Ty.getAddressSpace() == LangAS::opencl_private && getLangOpts().OpenCL));
1303
1304 AutoVarEmission emission(D);
1305
1306 bool isEscapingByRef = D.isEscapingByref();
1307 emission.IsEscapingByRef = isEscapingByRef;
1308
1309 CharUnits alignment = getContext().getDeclAlign(&D);
1310
1311 // If the type is variably-modified, emit all the VLA sizes for it.
1312 if (Ty->isVariablyModifiedType())
1313 EmitVariablyModifiedType(Ty);
1314
1315 auto *DI = getDebugInfo();
1316 bool EmitDebugInfo = DI && CGM.getCodeGenOpts().getDebugInfo() >=
1317 codegenoptions::LimitedDebugInfo;
1318
1319 Address address = Address::invalid();
1320 Address AllocaAddr = Address::invalid();
1321 if (Ty->isConstantSizeType()) {
1322 bool NRVO = getLangOpts().ElideConstructors &&
1323 D.isNRVOVariable();
1324
1325 // If this value is an array or struct with a statically determinable
1326 // constant initializer, there are optimizations we can do.
1327 //
1328 // TODO: We should constant-evaluate the initializer of any variable,
1329 // as long as it is initialized by a constant expression. Currently,
1330 // isConstantInitializer produces wrong answers for structs with
1331 // reference or bitfield members, and a few other cases, and checking
1332 // for POD-ness protects us from some of these.
1333 if (D.getInit() && (Ty->isArrayType() || Ty->isRecordType()) &&
1334 (D.isConstexpr() ||
1335 ((Ty.isPODType(getContext()) ||
1336 getContext().getBaseElementType(Ty)->isObjCObjectPointerType()) &&
1337 D.getInit()->isConstantInitializer(getContext(), false)))) {
1338
1339 // If the variable's a const type, and it's neither an NRVO
1340 // candidate nor a __block variable and has no mutable members,
1341 // emit it as a global instead.
1342 // Exception is if a variable is located in non-constant address space
1343 // in OpenCL.
1344 if ((!getLangOpts().OpenCL ||
1345 Ty.getAddressSpace() == LangAS::opencl_constant) &&
1346 (CGM.getCodeGenOpts().MergeAllConstants && !NRVO &&
1347 !isEscapingByRef && CGM.isTypeConstant(Ty, true))) {
1348 EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage);
1349
1350 // Signal this condition to later callbacks.
1351 emission.Addr = Address::invalid();
1352 assert(emission.wasEmittedAsGlobal());
1353 return emission;
1354 }
1355
1356 // Otherwise, tell the initialization code that we're in this case.
1357 emission.IsConstantAggregate = true;
1358 }
1359
1360 // A normal fixed sized variable becomes an alloca in the entry block,
1361 // unless:
1362 // - it's an NRVO variable.
1363 // - we are compiling OpenMP and it's an OpenMP local variable.
1364
1365 Address OpenMPLocalAddr =
1366 getLangOpts().OpenMP
1367 ? CGM.getOpenMPRuntime().getAddressOfLocalVariable(*this, &D)
1368 : Address::invalid();
1369 if (getLangOpts().OpenMP && OpenMPLocalAddr.isValid()) {
1370 address = OpenMPLocalAddr;
1371 } else if (NRVO) {
1372 // The named return value optimization: allocate this variable in the
1373 // return slot, so that we can elide the copy when returning this
1374 // variable (C++0x [class.copy]p34).
1375 address = ReturnValue;
1376
1377 if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
1378 const auto *RD = RecordTy->getDecl();
1379 const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD);
1380 if ((CXXRD && !CXXRD->hasTrivialDestructor()) ||
1381 RD->isNonTrivialToPrimitiveDestroy()) {
1382 // Create a flag that is used to indicate when the NRVO was applied
1383 // to this variable. Set it to zero to indicate that NRVO was not
1384 // applied.
1385 llvm::Value *Zero = Builder.getFalse();
1386 Address NRVOFlag =
1387 CreateTempAlloca(Zero->getType(), CharUnits::One(), "nrvo");
1388 EnsureInsertPoint();
1389 Builder.CreateStore(Zero, NRVOFlag);
1390
1391 // Record the NRVO flag for this variable.
1392 NRVOFlags[&D] = NRVOFlag.getPointer();
1393 emission.NRVOFlag = NRVOFlag.getPointer();
1394 }
1395 }
1396 } else {
1397 CharUnits allocaAlignment;
1398 llvm::Type *allocaTy;
1399 if (isEscapingByRef) {
1400 auto &byrefInfo = getBlockByrefInfo(&D);
1401 allocaTy = byrefInfo.Type;
1402 allocaAlignment = byrefInfo.ByrefAlignment;
1403 } else {
1404 allocaTy = ConvertTypeForMem(Ty);
1405 allocaAlignment = alignment;
1406 }
1407
1408 // Create the alloca. Note that we set the name separately from
1409 // building the instruction so that it's there even in no-asserts
1410 // builds.
1411 address = CreateTempAlloca(allocaTy, allocaAlignment, D.getName(),
1412 /*ArraySize=*/nullptr, &AllocaAddr);
1413
1414 // Don't emit lifetime markers for MSVC catch parameters. The lifetime of
1415 // the catch parameter starts in the catchpad instruction, and we can't
1416 // insert code in those basic blocks.
1417 bool IsMSCatchParam =
1418 D.isExceptionVariable() && getTarget().getCXXABI().isMicrosoft();
1419
1420 // Emit a lifetime intrinsic if meaningful. There's no point in doing this
1421 // if we don't have a valid insertion point (?).
1422 if (HaveInsertPoint() && !IsMSCatchParam) {
1423 // If there's a jump into the lifetime of this variable, its lifetime
1424 // gets broken up into several regions in IR, which requires more work
1425 // to handle correctly. For now, just omit the intrinsics; this is a
1426 // rare case, and it's better to just be conservatively correct.
1427 // PR28267.
1428 //
1429 // We have to do this in all language modes if there's a jump past the
1430 // declaration. We also have to do it in C if there's a jump to an
1431 // earlier point in the current block because non-VLA lifetimes begin as
1432 // soon as the containing block is entered, not when its variables
1433 // actually come into scope; suppressing the lifetime annotations
1434 // completely in this case is unnecessarily pessimistic, but again, this
1435 // is rare.
1436 if (!Bypasses.IsBypassed(&D) &&
1437 !(!getLangOpts().CPlusPlus && hasLabelBeenSeenInCurrentScope())) {
1438 uint64_t size = CGM.getDataLayout().getTypeAllocSize(allocaTy);
1439 emission.SizeForLifetimeMarkers =
1440 EmitLifetimeStart(size, AllocaAddr.getPointer());
1441 }
1442 } else {
1443 assert(!emission.useLifetimeMarkers());
1444 }
1445 }
1446 } else {
1447 EnsureInsertPoint();
1448
1449 if (!DidCallStackSave) {
1450 // Save the stack.
1451 Address Stack =
1452 CreateTempAlloca(Int8PtrTy, getPointerAlign(), "saved_stack");
1453
1454 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave);
1455 llvm::Value *V = Builder.CreateCall(F);
1456 Builder.CreateStore(V, Stack);
1457
1458 DidCallStackSave = true;
1459
1460 // Push a cleanup block and restore the stack there.
1461 // FIXME: in general circumstances, this should be an EH cleanup.
1462 pushStackRestore(NormalCleanup, Stack);
1463 }
1464
1465 auto VlaSize = getVLASize(Ty);
1466 llvm::Type *llvmTy = ConvertTypeForMem(VlaSize.Type);
1467
1468 // Allocate memory for the array.
1469 address = CreateTempAlloca(llvmTy, alignment, "vla", VlaSize.NumElts,
1470 &AllocaAddr);
1471
1472 // If we have debug info enabled, properly describe the VLA dimensions for
1473 // this type by registering the vla size expression for each of the
1474 // dimensions.
1475 EmitAndRegisterVariableArrayDimensions(DI, D, EmitDebugInfo);
1476 }
1477
1478 setAddrOfLocalVar(&D, address);
1479 emission.Addr = address;
1480 emission.AllocaAddr = AllocaAddr;
1481
1482 // Emit debug info for local var declaration.
1483 if (EmitDebugInfo && HaveInsertPoint()) {
1484 DI->setLocation(D.getLocation());
1485 (void)DI->EmitDeclareOfAutoVariable(&D, address.getPointer(), Builder);
1486 }
1487
1488 if (D.hasAttr<AnnotateAttr>())
1489 EmitVarAnnotations(&D, address.getPointer());
1490
1491 // Make sure we call @llvm.lifetime.end.
1492 if (emission.useLifetimeMarkers())
1493 EHStack.pushCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker,
1494 emission.getOriginalAllocatedAddress(),
1495 emission.getSizeForLifetimeMarkers());
1496
1497 return emission;
1498 }
1499
1500 static bool isCapturedBy(const VarDecl &, const Expr *);
1501
1502 /// Determines whether the given __block variable is potentially
1503 /// captured by the given statement.
isCapturedBy(const VarDecl & Var,const Stmt * S)1504 static bool isCapturedBy(const VarDecl &Var, const Stmt *S) {
1505 if (const Expr *E = dyn_cast<Expr>(S))
1506 return isCapturedBy(Var, E);
1507 for (const Stmt *SubStmt : S->children())
1508 if (isCapturedBy(Var, SubStmt))
1509 return true;
1510 return false;
1511 }
1512
1513 /// Determines whether the given __block variable is potentially
1514 /// captured by the given expression.
isCapturedBy(const VarDecl & Var,const Expr * E)1515 static bool isCapturedBy(const VarDecl &Var, const Expr *E) {
1516 // Skip the most common kinds of expressions that make
1517 // hierarchy-walking expensive.
1518 E = E->IgnoreParenCasts();
1519
1520 if (const BlockExpr *BE = dyn_cast<BlockExpr>(E)) {
1521 const BlockDecl *Block = BE->getBlockDecl();
1522 for (const auto &I : Block->captures()) {
1523 if (I.getVariable() == &Var)
1524 return true;
1525 }
1526
1527 // No need to walk into the subexpressions.
1528 return false;
1529 }
1530
1531 if (const StmtExpr *SE = dyn_cast<StmtExpr>(E)) {
1532 const CompoundStmt *CS = SE->getSubStmt();
1533 for (const auto *BI : CS->body())
1534 if (const auto *BIE = dyn_cast<Expr>(BI)) {
1535 if (isCapturedBy(Var, BIE))
1536 return true;
1537 }
1538 else if (const auto *DS = dyn_cast<DeclStmt>(BI)) {
1539 // special case declarations
1540 for (const auto *I : DS->decls()) {
1541 if (const auto *VD = dyn_cast<VarDecl>((I))) {
1542 const Expr *Init = VD->getInit();
1543 if (Init && isCapturedBy(Var, Init))
1544 return true;
1545 }
1546 }
1547 }
1548 else
1549 // FIXME. Make safe assumption assuming arbitrary statements cause capturing.
1550 // Later, provide code to poke into statements for capture analysis.
1551 return true;
1552 return false;
1553 }
1554
1555 for (const Stmt *SubStmt : E->children())
1556 if (isCapturedBy(Var, SubStmt))
1557 return true;
1558
1559 return false;
1560 }
1561
1562 /// Determine whether the given initializer is trivial in the sense
1563 /// that it requires no code to be generated.
isTrivialInitializer(const Expr * Init)1564 bool CodeGenFunction::isTrivialInitializer(const Expr *Init) {
1565 if (!Init)
1566 return true;
1567
1568 if (const CXXConstructExpr *Construct = dyn_cast<CXXConstructExpr>(Init))
1569 if (CXXConstructorDecl *Constructor = Construct->getConstructor())
1570 if (Constructor->isTrivial() &&
1571 Constructor->isDefaultConstructor() &&
1572 !Construct->requiresZeroInitialization())
1573 return true;
1574
1575 return false;
1576 }
1577
EmitAutoVarInit(const AutoVarEmission & emission)1578 void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
1579 assert(emission.Variable && "emission was not valid!");
1580
1581 // If this was emitted as a global constant, we're done.
1582 if (emission.wasEmittedAsGlobal()) return;
1583
1584 const VarDecl &D = *emission.Variable;
1585 auto DL = ApplyDebugLocation::CreateDefaultArtificial(*this, D.getLocation());
1586 QualType type = D.getType();
1587
1588 bool isVolatile = type.isVolatileQualified();
1589
1590 // If this local has an initializer, emit it now.
1591 const Expr *Init = D.getInit();
1592
1593 // If we are at an unreachable point, we don't need to emit the initializer
1594 // unless it contains a label.
1595 if (!HaveInsertPoint()) {
1596 if (!Init || !ContainsLabel(Init)) return;
1597 EnsureInsertPoint();
1598 }
1599
1600 // Initialize the structure of a __block variable.
1601 if (emission.IsEscapingByRef)
1602 emitByrefStructureInit(emission);
1603
1604 // Initialize the variable here if it doesn't have a initializer and it is a
1605 // C struct that is non-trivial to initialize or an array containing such a
1606 // struct.
1607 if (!Init &&
1608 type.isNonTrivialToPrimitiveDefaultInitialize() ==
1609 QualType::PDIK_Struct) {
1610 LValue Dst = MakeAddrLValue(emission.getAllocatedAddress(), type);
1611 if (emission.IsEscapingByRef)
1612 drillIntoBlockVariable(*this, Dst, &D);
1613 defaultInitNonTrivialCStructVar(Dst);
1614 return;
1615 }
1616
1617 // Check whether this is a byref variable that's potentially
1618 // captured and moved by its own initializer. If so, we'll need to
1619 // emit the initializer first, then copy into the variable.
1620 bool capturedByInit =
1621 Init && emission.IsEscapingByRef && isCapturedBy(D, Init);
1622
1623 bool locIsByrefHeader = !capturedByInit;
1624 const Address Loc =
1625 locIsByrefHeader ? emission.getObjectAddress(*this) : emission.Addr;
1626
1627 // Note: constexpr already initializes everything correctly.
1628 LangOptions::TrivialAutoVarInitKind trivialAutoVarInit =
1629 (D.isConstexpr()
1630 ? LangOptions::TrivialAutoVarInitKind::Uninitialized
1631 : (D.getAttr<UninitializedAttr>()
1632 ? LangOptions::TrivialAutoVarInitKind::Uninitialized
1633 : getContext().getLangOpts().getTrivialAutoVarInit()));
1634
1635 auto initializeWhatIsTechnicallyUninitialized = [&](Address Loc) {
1636 if (trivialAutoVarInit ==
1637 LangOptions::TrivialAutoVarInitKind::Uninitialized)
1638 return;
1639
1640 // Only initialize a __block's storage: we always initialize the header.
1641 if (emission.IsEscapingByRef && !locIsByrefHeader)
1642 Loc = emitBlockByrefAddress(Loc, &D, /*follow=*/false);
1643
1644 CharUnits Size = getContext().getTypeSizeInChars(type);
1645 if (!Size.isZero()) {
1646 switch (trivialAutoVarInit) {
1647 case LangOptions::TrivialAutoVarInitKind::Uninitialized:
1648 llvm_unreachable("Uninitialized handled above");
1649 case LangOptions::TrivialAutoVarInitKind::Zero:
1650 emitStoresForZeroInit(CGM, D, Loc, isVolatile, Builder);
1651 break;
1652 case LangOptions::TrivialAutoVarInitKind::Pattern:
1653 emitStoresForPatternInit(CGM, D, Loc, isVolatile, Builder);
1654 break;
1655 }
1656 return;
1657 }
1658
1659 // VLAs look zero-sized to getTypeInfo. We can't emit constant stores to
1660 // them, so emit a memcpy with the VLA size to initialize each element.
1661 // Technically zero-sized or negative-sized VLAs are undefined, and UBSan
1662 // will catch that code, but there exists code which generates zero-sized
1663 // VLAs. Be nice and initialize whatever they requested.
1664 const VariableArrayType *VlaType =
1665 dyn_cast_or_null<VariableArrayType>(getContext().getAsArrayType(type));
1666 if (!VlaType)
1667 return;
1668 auto VlaSize = getVLASize(VlaType);
1669 auto SizeVal = VlaSize.NumElts;
1670 CharUnits EltSize = getContext().getTypeSizeInChars(VlaSize.Type);
1671 switch (trivialAutoVarInit) {
1672 case LangOptions::TrivialAutoVarInitKind::Uninitialized:
1673 llvm_unreachable("Uninitialized handled above");
1674
1675 case LangOptions::TrivialAutoVarInitKind::Zero:
1676 if (!EltSize.isOne())
1677 SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(EltSize));
1678 Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, 0), SizeVal,
1679 isVolatile);
1680 break;
1681
1682 case LangOptions::TrivialAutoVarInitKind::Pattern: {
1683 llvm::Type *ElTy = Loc.getElementType();
1684 llvm::Constant *Constant = patternFor(CGM, ElTy);
1685 CharUnits ConstantAlign = getContext().getTypeAlignInChars(VlaSize.Type);
1686 llvm::BasicBlock *SetupBB = createBasicBlock("vla-setup.loop");
1687 llvm::BasicBlock *LoopBB = createBasicBlock("vla-init.loop");
1688 llvm::BasicBlock *ContBB = createBasicBlock("vla-init.cont");
1689 llvm::Value *IsZeroSizedVLA = Builder.CreateICmpEQ(
1690 SizeVal, llvm::ConstantInt::get(SizeVal->getType(), 0),
1691 "vla.iszerosized");
1692 Builder.CreateCondBr(IsZeroSizedVLA, ContBB, SetupBB);
1693 EmitBlock(SetupBB);
1694 if (!EltSize.isOne())
1695 SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(EltSize));
1696 llvm::Value *BaseSizeInChars =
1697 llvm::ConstantInt::get(IntPtrTy, EltSize.getQuantity());
1698 Address Begin = Builder.CreateElementBitCast(Loc, Int8Ty, "vla.begin");
1699 llvm::Value *End =
1700 Builder.CreateInBoundsGEP(Begin.getPointer(), SizeVal, "vla.end");
1701 llvm::BasicBlock *OriginBB = Builder.GetInsertBlock();
1702 EmitBlock(LoopBB);
1703 llvm::PHINode *Cur = Builder.CreatePHI(Begin.getType(), 2, "vla.cur");
1704 Cur->addIncoming(Begin.getPointer(), OriginBB);
1705 CharUnits CurAlign = Loc.getAlignment().alignmentOfArrayElement(EltSize);
1706 Builder.CreateMemCpy(
1707 Address(Cur, CurAlign),
1708 createUnnamedGlobalFrom(CGM, D, Builder, Constant, ConstantAlign),
1709 BaseSizeInChars, isVolatile);
1710 llvm::Value *Next =
1711 Builder.CreateInBoundsGEP(Int8Ty, Cur, BaseSizeInChars, "vla.next");
1712 llvm::Value *Done = Builder.CreateICmpEQ(Next, End, "vla-init.isdone");
1713 Builder.CreateCondBr(Done, ContBB, LoopBB);
1714 Cur->addIncoming(Next, LoopBB);
1715 EmitBlock(ContBB);
1716 } break;
1717 }
1718 };
1719
1720 if (isTrivialInitializer(Init)) {
1721 initializeWhatIsTechnicallyUninitialized(Loc);
1722 return;
1723 }
1724
1725 llvm::Constant *constant = nullptr;
1726 if (emission.IsConstantAggregate || D.isConstexpr()) {
1727 assert(!capturedByInit && "constant init contains a capturing block?");
1728 constant = ConstantEmitter(*this).tryEmitAbstractForInitializer(D);
1729 if (constant && trivialAutoVarInit !=
1730 LangOptions::TrivialAutoVarInitKind::Uninitialized)
1731 constant = replaceUndef(constant);
1732 }
1733
1734 if (!constant) {
1735 initializeWhatIsTechnicallyUninitialized(Loc);
1736 LValue lv = MakeAddrLValue(Loc, type);
1737 lv.setNonGC(true);
1738 return EmitExprAsInit(Init, &D, lv, capturedByInit);
1739 }
1740
1741 if (!emission.IsConstantAggregate) {
1742 // For simple scalar/complex initialization, store the value directly.
1743 LValue lv = MakeAddrLValue(Loc, type);
1744 lv.setNonGC(true);
1745 return EmitStoreThroughLValue(RValue::get(constant), lv, true);
1746 }
1747
1748 llvm::Type *BP = CGM.Int8Ty->getPointerTo(Loc.getAddressSpace());
1749 emitStoresForConstant(
1750 CGM, D, (Loc.getType() == BP) ? Loc : Builder.CreateBitCast(Loc, BP),
1751 isVolatile, Builder, constant);
1752 }
1753
1754 /// Emit an expression as an initializer for an object (variable, field, etc.)
1755 /// at the given location. The expression is not necessarily the normal
1756 /// initializer for the object, and the address is not necessarily
1757 /// its normal location.
1758 ///
1759 /// \param init the initializing expression
1760 /// \param D the object to act as if we're initializing
1761 /// \param loc the address to initialize; its type is a pointer
1762 /// to the LLVM mapping of the object's type
1763 /// \param alignment the alignment of the address
1764 /// \param capturedByInit true if \p D is a __block variable
1765 /// whose address is potentially changed by the initializer
EmitExprAsInit(const Expr * init,const ValueDecl * D,LValue lvalue,bool capturedByInit)1766 void CodeGenFunction::EmitExprAsInit(const Expr *init, const ValueDecl *D,
1767 LValue lvalue, bool capturedByInit) {
1768 QualType type = D->getType();
1769
1770 if (type->isReferenceType()) {
1771 RValue rvalue = EmitReferenceBindingToExpr(init);
1772 if (capturedByInit)
1773 drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
1774 EmitStoreThroughLValue(rvalue, lvalue, true);
1775 return;
1776 }
1777 switch (getEvaluationKind(type)) {
1778 case TEK_Scalar:
1779 EmitScalarInit(init, D, lvalue, capturedByInit);
1780 return;
1781 case TEK_Complex: {
1782 ComplexPairTy complex = EmitComplexExpr(init);
1783 if (capturedByInit)
1784 drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
1785 EmitStoreOfComplex(complex, lvalue, /*init*/ true);
1786 return;
1787 }
1788 case TEK_Aggregate:
1789 if (type->isAtomicType()) {
1790 EmitAtomicInit(const_cast<Expr*>(init), lvalue);
1791 } else {
1792 AggValueSlot::Overlap_t Overlap = AggValueSlot::MayOverlap;
1793 if (isa<VarDecl>(D))
1794 Overlap = AggValueSlot::DoesNotOverlap;
1795 else if (auto *FD = dyn_cast<FieldDecl>(D))
1796 Overlap = overlapForFieldInit(FD);
1797 // TODO: how can we delay here if D is captured by its initializer?
1798 EmitAggExpr(init, AggValueSlot::forLValue(lvalue,
1799 AggValueSlot::IsDestructed,
1800 AggValueSlot::DoesNotNeedGCBarriers,
1801 AggValueSlot::IsNotAliased,
1802 Overlap));
1803 }
1804 return;
1805 }
1806 llvm_unreachable("bad evaluation kind");
1807 }
1808
1809 /// Enter a destroy cleanup for the given local variable.
emitAutoVarTypeCleanup(const CodeGenFunction::AutoVarEmission & emission,QualType::DestructionKind dtorKind)1810 void CodeGenFunction::emitAutoVarTypeCleanup(
1811 const CodeGenFunction::AutoVarEmission &emission,
1812 QualType::DestructionKind dtorKind) {
1813 assert(dtorKind != QualType::DK_none);
1814
1815 // Note that for __block variables, we want to destroy the
1816 // original stack object, not the possibly forwarded object.
1817 Address addr = emission.getObjectAddress(*this);
1818
1819 const VarDecl *var = emission.Variable;
1820 QualType type = var->getType();
1821
1822 CleanupKind cleanupKind = NormalAndEHCleanup;
1823 CodeGenFunction::Destroyer *destroyer = nullptr;
1824
1825 switch (dtorKind) {
1826 case QualType::DK_none:
1827 llvm_unreachable("no cleanup for trivially-destructible variable");
1828
1829 case QualType::DK_cxx_destructor:
1830 // If there's an NRVO flag on the emission, we need a different
1831 // cleanup.
1832 if (emission.NRVOFlag) {
1833 assert(!type->isArrayType());
1834 CXXDestructorDecl *dtor = type->getAsCXXRecordDecl()->getDestructor();
1835 EHStack.pushCleanup<DestroyNRVOVariableCXX>(cleanupKind, addr, dtor,
1836 emission.NRVOFlag);
1837 return;
1838 }
1839 break;
1840
1841 case QualType::DK_objc_strong_lifetime:
1842 // Suppress cleanups for pseudo-strong variables.
1843 if (var->isARCPseudoStrong()) return;
1844
1845 // Otherwise, consider whether to use an EH cleanup or not.
1846 cleanupKind = getARCCleanupKind();
1847
1848 // Use the imprecise destroyer by default.
1849 if (!var->hasAttr<ObjCPreciseLifetimeAttr>())
1850 destroyer = CodeGenFunction::destroyARCStrongImprecise;
1851 break;
1852
1853 case QualType::DK_objc_weak_lifetime:
1854 break;
1855
1856 case QualType::DK_nontrivial_c_struct:
1857 destroyer = CodeGenFunction::destroyNonTrivialCStruct;
1858 if (emission.NRVOFlag) {
1859 assert(!type->isArrayType());
1860 EHStack.pushCleanup<DestroyNRVOVariableC>(cleanupKind, addr,
1861 emission.NRVOFlag, type);
1862 return;
1863 }
1864 break;
1865 }
1866
1867 // If we haven't chosen a more specific destroyer, use the default.
1868 if (!destroyer) destroyer = getDestroyer(dtorKind);
1869
1870 // Use an EH cleanup in array destructors iff the destructor itself
1871 // is being pushed as an EH cleanup.
1872 bool useEHCleanup = (cleanupKind & EHCleanup);
1873 EHStack.pushCleanup<DestroyObject>(cleanupKind, addr, type, destroyer,
1874 useEHCleanup);
1875 }
1876
EmitAutoVarCleanups(const AutoVarEmission & emission)1877 void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) {
1878 assert(emission.Variable && "emission was not valid!");
1879
1880 // If this was emitted as a global constant, we're done.
1881 if (emission.wasEmittedAsGlobal()) return;
1882
1883 // If we don't have an insertion point, we're done. Sema prevents
1884 // us from jumping into any of these scopes anyway.
1885 if (!HaveInsertPoint()) return;
1886
1887 const VarDecl &D = *emission.Variable;
1888
1889 // Check the type for a cleanup.
1890 if (QualType::DestructionKind dtorKind = D.getType().isDestructedType())
1891 emitAutoVarTypeCleanup(emission, dtorKind);
1892
1893 // In GC mode, honor objc_precise_lifetime.
1894 if (getLangOpts().getGC() != LangOptions::NonGC &&
1895 D.hasAttr<ObjCPreciseLifetimeAttr>()) {
1896 EHStack.pushCleanup<ExtendGCLifetime>(NormalCleanup, &D);
1897 }
1898
1899 // Handle the cleanup attribute.
1900 if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) {
1901 const FunctionDecl *FD = CA->getFunctionDecl();
1902
1903 llvm::Constant *F = CGM.GetAddrOfFunction(FD);
1904 assert(F && "Could not find function!");
1905
1906 const CGFunctionInfo &Info = CGM.getTypes().arrangeFunctionDeclaration(FD);
1907 EHStack.pushCleanup<CallCleanupFunction>(NormalAndEHCleanup, F, &Info, &D);
1908 }
1909
1910 // If this is a block variable, call _Block_object_destroy
1911 // (on the unforwarded address). Don't enter this cleanup if we're in pure-GC
1912 // mode.
1913 if (emission.IsEscapingByRef &&
1914 CGM.getLangOpts().getGC() != LangOptions::GCOnly) {
1915 BlockFieldFlags Flags = BLOCK_FIELD_IS_BYREF;
1916 if (emission.Variable->getType().isObjCGCWeak())
1917 Flags |= BLOCK_FIELD_IS_WEAK;
1918 enterByrefCleanup(NormalAndEHCleanup, emission.Addr, Flags,
1919 /*LoadBlockVarAddr*/ false,
1920 cxxDestructorCanThrow(emission.Variable->getType()));
1921 }
1922 }
1923
1924 CodeGenFunction::Destroyer *
getDestroyer(QualType::DestructionKind kind)1925 CodeGenFunction::getDestroyer(QualType::DestructionKind kind) {
1926 switch (kind) {
1927 case QualType::DK_none: llvm_unreachable("no destroyer for trivial dtor");
1928 case QualType::DK_cxx_destructor:
1929 return destroyCXXObject;
1930 case QualType::DK_objc_strong_lifetime:
1931 return destroyARCStrongPrecise;
1932 case QualType::DK_objc_weak_lifetime:
1933 return destroyARCWeak;
1934 case QualType::DK_nontrivial_c_struct:
1935 return destroyNonTrivialCStruct;
1936 }
1937 llvm_unreachable("Unknown DestructionKind");
1938 }
1939
1940 /// pushEHDestroy - Push the standard destructor for the given type as
1941 /// an EH-only cleanup.
pushEHDestroy(QualType::DestructionKind dtorKind,Address addr,QualType type)1942 void CodeGenFunction::pushEHDestroy(QualType::DestructionKind dtorKind,
1943 Address addr, QualType type) {
1944 assert(dtorKind && "cannot push destructor for trivial type");
1945 assert(needsEHCleanup(dtorKind));
1946
1947 pushDestroy(EHCleanup, addr, type, getDestroyer(dtorKind), true);
1948 }
1949
1950 /// pushDestroy - Push the standard destructor for the given type as
1951 /// at least a normal cleanup.
pushDestroy(QualType::DestructionKind dtorKind,Address addr,QualType type)1952 void CodeGenFunction::pushDestroy(QualType::DestructionKind dtorKind,
1953 Address addr, QualType type) {
1954 assert(dtorKind && "cannot push destructor for trivial type");
1955
1956 CleanupKind cleanupKind = getCleanupKind(dtorKind);
1957 pushDestroy(cleanupKind, addr, type, getDestroyer(dtorKind),
1958 cleanupKind & EHCleanup);
1959 }
1960
pushDestroy(CleanupKind cleanupKind,Address addr,QualType type,Destroyer * destroyer,bool useEHCleanupForArray)1961 void CodeGenFunction::pushDestroy(CleanupKind cleanupKind, Address addr,
1962 QualType type, Destroyer *destroyer,
1963 bool useEHCleanupForArray) {
1964 pushFullExprCleanup<DestroyObject>(cleanupKind, addr, type,
1965 destroyer, useEHCleanupForArray);
1966 }
1967
pushStackRestore(CleanupKind Kind,Address SPMem)1968 void CodeGenFunction::pushStackRestore(CleanupKind Kind, Address SPMem) {
1969 EHStack.pushCleanup<CallStackRestore>(Kind, SPMem);
1970 }
1971
pushLifetimeExtendedDestroy(CleanupKind cleanupKind,Address addr,QualType type,Destroyer * destroyer,bool useEHCleanupForArray)1972 void CodeGenFunction::pushLifetimeExtendedDestroy(
1973 CleanupKind cleanupKind, Address addr, QualType type,
1974 Destroyer *destroyer, bool useEHCleanupForArray) {
1975 // Push an EH-only cleanup for the object now.
1976 // FIXME: When popping normal cleanups, we need to keep this EH cleanup
1977 // around in case a temporary's destructor throws an exception.
1978 if (cleanupKind & EHCleanup)
1979 EHStack.pushCleanup<DestroyObject>(
1980 static_cast<CleanupKind>(cleanupKind & ~NormalCleanup), addr, type,
1981 destroyer, useEHCleanupForArray);
1982
1983 // Remember that we need to push a full cleanup for the object at the
1984 // end of the full-expression.
1985 pushCleanupAfterFullExpr<DestroyObject>(
1986 cleanupKind, addr, type, destroyer, useEHCleanupForArray);
1987 }
1988
1989 /// emitDestroy - Immediately perform the destruction of the given
1990 /// object.
1991 ///
1992 /// \param addr - the address of the object; a type*
1993 /// \param type - the type of the object; if an array type, all
1994 /// objects are destroyed in reverse order
1995 /// \param destroyer - the function to call to destroy individual
1996 /// elements
1997 /// \param useEHCleanupForArray - whether an EH cleanup should be
1998 /// used when destroying array elements, in case one of the
1999 /// destructions throws an exception
emitDestroy(Address addr,QualType type,Destroyer * destroyer,bool useEHCleanupForArray)2000 void CodeGenFunction::emitDestroy(Address addr, QualType type,
2001 Destroyer *destroyer,
2002 bool useEHCleanupForArray) {
2003 const ArrayType *arrayType = getContext().getAsArrayType(type);
2004 if (!arrayType)
2005 return destroyer(*this, addr, type);
2006
2007 llvm::Value *length = emitArrayLength(arrayType, type, addr);
2008
2009 CharUnits elementAlign =
2010 addr.getAlignment()
2011 .alignmentOfArrayElement(getContext().getTypeSizeInChars(type));
2012
2013 // Normally we have to check whether the array is zero-length.
2014 bool checkZeroLength = true;
2015
2016 // But if the array length is constant, we can suppress that.
2017 if (llvm::ConstantInt *constLength = dyn_cast<llvm::ConstantInt>(length)) {
2018 // ...and if it's constant zero, we can just skip the entire thing.
2019 if (constLength->isZero()) return;
2020 checkZeroLength = false;
2021 }
2022
2023 llvm::Value *begin = addr.getPointer();
2024 llvm::Value *end = Builder.CreateInBoundsGEP(begin, length);
2025 emitArrayDestroy(begin, end, type, elementAlign, destroyer,
2026 checkZeroLength, useEHCleanupForArray);
2027 }
2028
2029 /// emitArrayDestroy - Destroys all the elements of the given array,
2030 /// beginning from last to first. The array cannot be zero-length.
2031 ///
2032 /// \param begin - a type* denoting the first element of the array
2033 /// \param end - a type* denoting one past the end of the array
2034 /// \param elementType - the element type of the array
2035 /// \param destroyer - the function to call to destroy elements
2036 /// \param useEHCleanup - whether to push an EH cleanup to destroy
2037 /// the remaining elements in case the destruction of a single
2038 /// element throws
emitArrayDestroy(llvm::Value * begin,llvm::Value * end,QualType elementType,CharUnits elementAlign,Destroyer * destroyer,bool checkZeroLength,bool useEHCleanup)2039 void CodeGenFunction::emitArrayDestroy(llvm::Value *begin,
2040 llvm::Value *end,
2041 QualType elementType,
2042 CharUnits elementAlign,
2043 Destroyer *destroyer,
2044 bool checkZeroLength,
2045 bool useEHCleanup) {
2046 assert(!elementType->isArrayType());
2047
2048 // The basic structure here is a do-while loop, because we don't
2049 // need to check for the zero-element case.
2050 llvm::BasicBlock *bodyBB = createBasicBlock("arraydestroy.body");
2051 llvm::BasicBlock *doneBB = createBasicBlock("arraydestroy.done");
2052
2053 if (checkZeroLength) {
2054 llvm::Value *isEmpty = Builder.CreateICmpEQ(begin, end,
2055 "arraydestroy.isempty");
2056 Builder.CreateCondBr(isEmpty, doneBB, bodyBB);
2057 }
2058
2059 // Enter the loop body, making that address the current address.
2060 llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
2061 EmitBlock(bodyBB);
2062 llvm::PHINode *elementPast =
2063 Builder.CreatePHI(begin->getType(), 2, "arraydestroy.elementPast");
2064 elementPast->addIncoming(end, entryBB);
2065
2066 // Shift the address back by one element.
2067 llvm::Value *negativeOne = llvm::ConstantInt::get(SizeTy, -1, true);
2068 llvm::Value *element = Builder.CreateInBoundsGEP(elementPast, negativeOne,
2069 "arraydestroy.element");
2070
2071 if (useEHCleanup)
2072 pushRegularPartialArrayCleanup(begin, element, elementType, elementAlign,
2073 destroyer);
2074
2075 // Perform the actual destruction there.
2076 destroyer(*this, Address(element, elementAlign), elementType);
2077
2078 if (useEHCleanup)
2079 PopCleanupBlock();
2080
2081 // Check whether we've reached the end.
2082 llvm::Value *done = Builder.CreateICmpEQ(element, begin, "arraydestroy.done");
2083 Builder.CreateCondBr(done, doneBB, bodyBB);
2084 elementPast->addIncoming(element, Builder.GetInsertBlock());
2085
2086 // Done.
2087 EmitBlock(doneBB);
2088 }
2089
2090 /// Perform partial array destruction as if in an EH cleanup. Unlike
2091 /// emitArrayDestroy, the element type here may still be an array type.
emitPartialArrayDestroy(CodeGenFunction & CGF,llvm::Value * begin,llvm::Value * end,QualType type,CharUnits elementAlign,CodeGenFunction::Destroyer * destroyer)2092 static void emitPartialArrayDestroy(CodeGenFunction &CGF,
2093 llvm::Value *begin, llvm::Value *end,
2094 QualType type, CharUnits elementAlign,
2095 CodeGenFunction::Destroyer *destroyer) {
2096 // If the element type is itself an array, drill down.
2097 unsigned arrayDepth = 0;
2098 while (const ArrayType *arrayType = CGF.getContext().getAsArrayType(type)) {
2099 // VLAs don't require a GEP index to walk into.
2100 if (!isa<VariableArrayType>(arrayType))
2101 arrayDepth++;
2102 type = arrayType->getElementType();
2103 }
2104
2105 if (arrayDepth) {
2106 llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
2107
2108 SmallVector<llvm::Value*,4> gepIndices(arrayDepth+1, zero);
2109 begin = CGF.Builder.CreateInBoundsGEP(begin, gepIndices, "pad.arraybegin");
2110 end = CGF.Builder.CreateInBoundsGEP(end, gepIndices, "pad.arrayend");
2111 }
2112
2113 // Destroy the array. We don't ever need an EH cleanup because we
2114 // assume that we're in an EH cleanup ourselves, so a throwing
2115 // destructor causes an immediate terminate.
2116 CGF.emitArrayDestroy(begin, end, type, elementAlign, destroyer,
2117 /*checkZeroLength*/ true, /*useEHCleanup*/ false);
2118 }
2119
2120 namespace {
2121 /// RegularPartialArrayDestroy - a cleanup which performs a partial
2122 /// array destroy where the end pointer is regularly determined and
2123 /// does not need to be loaded from a local.
2124 class RegularPartialArrayDestroy final : public EHScopeStack::Cleanup {
2125 llvm::Value *ArrayBegin;
2126 llvm::Value *ArrayEnd;
2127 QualType ElementType;
2128 CodeGenFunction::Destroyer *Destroyer;
2129 CharUnits ElementAlign;
2130 public:
RegularPartialArrayDestroy(llvm::Value * arrayBegin,llvm::Value * arrayEnd,QualType elementType,CharUnits elementAlign,CodeGenFunction::Destroyer * destroyer)2131 RegularPartialArrayDestroy(llvm::Value *arrayBegin, llvm::Value *arrayEnd,
2132 QualType elementType, CharUnits elementAlign,
2133 CodeGenFunction::Destroyer *destroyer)
2134 : ArrayBegin(arrayBegin), ArrayEnd(arrayEnd),
2135 ElementType(elementType), Destroyer(destroyer),
2136 ElementAlign(elementAlign) {}
2137
Emit(CodeGenFunction & CGF,Flags flags)2138 void Emit(CodeGenFunction &CGF, Flags flags) override {
2139 emitPartialArrayDestroy(CGF, ArrayBegin, ArrayEnd,
2140 ElementType, ElementAlign, Destroyer);
2141 }
2142 };
2143
2144 /// IrregularPartialArrayDestroy - a cleanup which performs a
2145 /// partial array destroy where the end pointer is irregularly
2146 /// determined and must be loaded from a local.
2147 class IrregularPartialArrayDestroy final : public EHScopeStack::Cleanup {
2148 llvm::Value *ArrayBegin;
2149 Address ArrayEndPointer;
2150 QualType ElementType;
2151 CodeGenFunction::Destroyer *Destroyer;
2152 CharUnits ElementAlign;
2153 public:
IrregularPartialArrayDestroy(llvm::Value * arrayBegin,Address arrayEndPointer,QualType elementType,CharUnits elementAlign,CodeGenFunction::Destroyer * destroyer)2154 IrregularPartialArrayDestroy(llvm::Value *arrayBegin,
2155 Address arrayEndPointer,
2156 QualType elementType,
2157 CharUnits elementAlign,
2158 CodeGenFunction::Destroyer *destroyer)
2159 : ArrayBegin(arrayBegin), ArrayEndPointer(arrayEndPointer),
2160 ElementType(elementType), Destroyer(destroyer),
2161 ElementAlign(elementAlign) {}
2162
Emit(CodeGenFunction & CGF,Flags flags)2163 void Emit(CodeGenFunction &CGF, Flags flags) override {
2164 llvm::Value *arrayEnd = CGF.Builder.CreateLoad(ArrayEndPointer);
2165 emitPartialArrayDestroy(CGF, ArrayBegin, arrayEnd,
2166 ElementType, ElementAlign, Destroyer);
2167 }
2168 };
2169 } // end anonymous namespace
2170
2171 /// pushIrregularPartialArrayCleanup - Push an EH cleanup to destroy
2172 /// already-constructed elements of the given array. The cleanup
2173 /// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
2174 ///
2175 /// \param elementType - the immediate element type of the array;
2176 /// possibly still an array type
pushIrregularPartialArrayCleanup(llvm::Value * arrayBegin,Address arrayEndPointer,QualType elementType,CharUnits elementAlign,Destroyer * destroyer)2177 void CodeGenFunction::pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
2178 Address arrayEndPointer,
2179 QualType elementType,
2180 CharUnits elementAlign,
2181 Destroyer *destroyer) {
2182 pushFullExprCleanup<IrregularPartialArrayDestroy>(EHCleanup,
2183 arrayBegin, arrayEndPointer,
2184 elementType, elementAlign,
2185 destroyer);
2186 }
2187
2188 /// pushRegularPartialArrayCleanup - Push an EH cleanup to destroy
2189 /// already-constructed elements of the given array. The cleanup
2190 /// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
2191 ///
2192 /// \param elementType - the immediate element type of the array;
2193 /// possibly still an array type
pushRegularPartialArrayCleanup(llvm::Value * arrayBegin,llvm::Value * arrayEnd,QualType elementType,CharUnits elementAlign,Destroyer * destroyer)2194 void CodeGenFunction::pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
2195 llvm::Value *arrayEnd,
2196 QualType elementType,
2197 CharUnits elementAlign,
2198 Destroyer *destroyer) {
2199 pushFullExprCleanup<RegularPartialArrayDestroy>(EHCleanup,
2200 arrayBegin, arrayEnd,
2201 elementType, elementAlign,
2202 destroyer);
2203 }
2204
2205 /// Lazily declare the @llvm.lifetime.start intrinsic.
getLLVMLifetimeStartFn()2206 llvm::Constant *CodeGenModule::getLLVMLifetimeStartFn() {
2207 if (LifetimeStartFn)
2208 return LifetimeStartFn;
2209 LifetimeStartFn = llvm::Intrinsic::getDeclaration(&getModule(),
2210 llvm::Intrinsic::lifetime_start, AllocaInt8PtrTy);
2211 return LifetimeStartFn;
2212 }
2213
2214 /// Lazily declare the @llvm.lifetime.end intrinsic.
getLLVMLifetimeEndFn()2215 llvm::Constant *CodeGenModule::getLLVMLifetimeEndFn() {
2216 if (LifetimeEndFn)
2217 return LifetimeEndFn;
2218 LifetimeEndFn = llvm::Intrinsic::getDeclaration(&getModule(),
2219 llvm::Intrinsic::lifetime_end, AllocaInt8PtrTy);
2220 return LifetimeEndFn;
2221 }
2222
2223 namespace {
2224 /// A cleanup to perform a release of an object at the end of a
2225 /// function. This is used to balance out the incoming +1 of a
2226 /// ns_consumed argument when we can't reasonably do that just by
2227 /// not doing the initial retain for a __block argument.
2228 struct ConsumeARCParameter final : EHScopeStack::Cleanup {
ConsumeARCParameter__anon8cc62b0f0511::ConsumeARCParameter2229 ConsumeARCParameter(llvm::Value *param,
2230 ARCPreciseLifetime_t precise)
2231 : Param(param), Precise(precise) {}
2232
2233 llvm::Value *Param;
2234 ARCPreciseLifetime_t Precise;
2235
Emit__anon8cc62b0f0511::ConsumeARCParameter2236 void Emit(CodeGenFunction &CGF, Flags flags) override {
2237 CGF.EmitARCRelease(Param, Precise);
2238 }
2239 };
2240 } // end anonymous namespace
2241
2242 /// Emit an alloca (or GlobalValue depending on target)
2243 /// for the specified parameter and set up LocalDeclMap.
EmitParmDecl(const VarDecl & D,ParamValue Arg,unsigned ArgNo)2244 void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
2245 unsigned ArgNo) {
2246 // FIXME: Why isn't ImplicitParamDecl a ParmVarDecl?
2247 assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) &&
2248 "Invalid argument to EmitParmDecl");
2249
2250 Arg.getAnyValue()->setName(D.getName());
2251
2252 QualType Ty = D.getType();
2253
2254 // Use better IR generation for certain implicit parameters.
2255 if (auto IPD = dyn_cast<ImplicitParamDecl>(&D)) {
2256 // The only implicit argument a block has is its literal.
2257 // This may be passed as an inalloca'ed value on Windows x86.
2258 if (BlockInfo) {
2259 llvm::Value *V = Arg.isIndirect()
2260 ? Builder.CreateLoad(Arg.getIndirectAddress())
2261 : Arg.getDirectValue();
2262 setBlockContextParameter(IPD, ArgNo, V);
2263 return;
2264 }
2265 }
2266
2267 Address DeclPtr = Address::invalid();
2268 bool DoStore = false;
2269 bool IsScalar = hasScalarEvaluationKind(Ty);
2270 // If we already have a pointer to the argument, reuse the input pointer.
2271 if (Arg.isIndirect()) {
2272 DeclPtr = Arg.getIndirectAddress();
2273 // If we have a prettier pointer type at this point, bitcast to that.
2274 unsigned AS = DeclPtr.getType()->getAddressSpace();
2275 llvm::Type *IRTy = ConvertTypeForMem(Ty)->getPointerTo(AS);
2276 if (DeclPtr.getType() != IRTy)
2277 DeclPtr = Builder.CreateBitCast(DeclPtr, IRTy, D.getName());
2278 // Indirect argument is in alloca address space, which may be different
2279 // from the default address space.
2280 auto AllocaAS = CGM.getASTAllocaAddressSpace();
2281 auto *V = DeclPtr.getPointer();
2282 auto SrcLangAS = getLangOpts().OpenCL ? LangAS::opencl_private : AllocaAS;
2283 auto DestLangAS =
2284 getLangOpts().OpenCL ? LangAS::opencl_private : LangAS::Default;
2285 if (SrcLangAS != DestLangAS) {
2286 assert(getContext().getTargetAddressSpace(SrcLangAS) ==
2287 CGM.getDataLayout().getAllocaAddrSpace());
2288 auto DestAS = getContext().getTargetAddressSpace(DestLangAS);
2289 auto *T = V->getType()->getPointerElementType()->getPointerTo(DestAS);
2290 DeclPtr = Address(getTargetHooks().performAddrSpaceCast(
2291 *this, V, SrcLangAS, DestLangAS, T, true),
2292 DeclPtr.getAlignment());
2293 }
2294
2295 // Push a destructor cleanup for this parameter if the ABI requires it.
2296 // Don't push a cleanup in a thunk for a method that will also emit a
2297 // cleanup.
2298 if (hasAggregateEvaluationKind(Ty) && !CurFuncIsThunk &&
2299 Ty->getAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
2300 if (QualType::DestructionKind DtorKind = Ty.isDestructedType()) {
2301 assert((DtorKind == QualType::DK_cxx_destructor ||
2302 DtorKind == QualType::DK_nontrivial_c_struct) &&
2303 "unexpected destructor type");
2304 pushDestroy(DtorKind, DeclPtr, Ty);
2305 CalleeDestructedParamCleanups[cast<ParmVarDecl>(&D)] =
2306 EHStack.stable_begin();
2307 }
2308 }
2309 } else {
2310 // Check if the parameter address is controlled by OpenMP runtime.
2311 Address OpenMPLocalAddr =
2312 getLangOpts().OpenMP
2313 ? CGM.getOpenMPRuntime().getAddressOfLocalVariable(*this, &D)
2314 : Address::invalid();
2315 if (getLangOpts().OpenMP && OpenMPLocalAddr.isValid()) {
2316 DeclPtr = OpenMPLocalAddr;
2317 } else {
2318 // Otherwise, create a temporary to hold the value.
2319 DeclPtr = CreateMemTemp(Ty, getContext().getDeclAlign(&D),
2320 D.getName() + ".addr");
2321 }
2322 DoStore = true;
2323 }
2324
2325 llvm::Value *ArgVal = (DoStore ? Arg.getDirectValue() : nullptr);
2326
2327 LValue lv = MakeAddrLValue(DeclPtr, Ty);
2328 if (IsScalar) {
2329 Qualifiers qs = Ty.getQualifiers();
2330 if (Qualifiers::ObjCLifetime lt = qs.getObjCLifetime()) {
2331 // We honor __attribute__((ns_consumed)) for types with lifetime.
2332 // For __strong, it's handled by just skipping the initial retain;
2333 // otherwise we have to balance out the initial +1 with an extra
2334 // cleanup to do the release at the end of the function.
2335 bool isConsumed = D.hasAttr<NSConsumedAttr>();
2336
2337 // If a parameter is pseudo-strong then we can omit the implicit retain.
2338 if (D.isARCPseudoStrong()) {
2339 assert(lt == Qualifiers::OCL_Strong &&
2340 "pseudo-strong variable isn't strong?");
2341 assert(qs.hasConst() && "pseudo-strong variable should be const!");
2342 lt = Qualifiers::OCL_ExplicitNone;
2343 }
2344
2345 // Load objects passed indirectly.
2346 if (Arg.isIndirect() && !ArgVal)
2347 ArgVal = Builder.CreateLoad(DeclPtr);
2348
2349 if (lt == Qualifiers::OCL_Strong) {
2350 if (!isConsumed) {
2351 if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
2352 // use objc_storeStrong(&dest, value) for retaining the
2353 // object. But first, store a null into 'dest' because
2354 // objc_storeStrong attempts to release its old value.
2355 llvm::Value *Null = CGM.EmitNullConstant(D.getType());
2356 EmitStoreOfScalar(Null, lv, /* isInitialization */ true);
2357 EmitARCStoreStrongCall(lv.getAddress(), ArgVal, true);
2358 DoStore = false;
2359 }
2360 else
2361 // Don't use objc_retainBlock for block pointers, because we
2362 // don't want to Block_copy something just because we got it
2363 // as a parameter.
2364 ArgVal = EmitARCRetainNonBlock(ArgVal);
2365 }
2366 } else {
2367 // Push the cleanup for a consumed parameter.
2368 if (isConsumed) {
2369 ARCPreciseLifetime_t precise = (D.hasAttr<ObjCPreciseLifetimeAttr>()
2370 ? ARCPreciseLifetime : ARCImpreciseLifetime);
2371 EHStack.pushCleanup<ConsumeARCParameter>(getARCCleanupKind(), ArgVal,
2372 precise);
2373 }
2374
2375 if (lt == Qualifiers::OCL_Weak) {
2376 EmitARCInitWeak(DeclPtr, ArgVal);
2377 DoStore = false; // The weak init is a store, no need to do two.
2378 }
2379 }
2380
2381 // Enter the cleanup scope.
2382 EmitAutoVarWithLifetime(*this, D, DeclPtr, lt);
2383 }
2384 }
2385
2386 // Store the initial value into the alloca.
2387 if (DoStore)
2388 EmitStoreOfScalar(ArgVal, lv, /* isInitialization */ true);
2389
2390 setAddrOfLocalVar(&D, DeclPtr);
2391
2392 // Emit debug info for param declaration.
2393 if (CGDebugInfo *DI = getDebugInfo()) {
2394 if (CGM.getCodeGenOpts().getDebugInfo() >=
2395 codegenoptions::LimitedDebugInfo) {
2396 DI->EmitDeclareOfArgVariable(&D, DeclPtr.getPointer(), ArgNo, Builder);
2397 }
2398 }
2399
2400 if (D.hasAttr<AnnotateAttr>())
2401 EmitVarAnnotations(&D, DeclPtr.getPointer());
2402
2403 // We can only check return value nullability if all arguments to the
2404 // function satisfy their nullability preconditions. This makes it necessary
2405 // to emit null checks for args in the function body itself.
2406 if (requiresReturnValueNullabilityCheck()) {
2407 auto Nullability = Ty->getNullability(getContext());
2408 if (Nullability && *Nullability == NullabilityKind::NonNull) {
2409 SanitizerScope SanScope(this);
2410 RetValNullabilityPrecondition =
2411 Builder.CreateAnd(RetValNullabilityPrecondition,
2412 Builder.CreateIsNotNull(Arg.getAnyValue()));
2413 }
2414 }
2415 }
2416
EmitOMPDeclareReduction(const OMPDeclareReductionDecl * D,CodeGenFunction * CGF)2417 void CodeGenModule::EmitOMPDeclareReduction(const OMPDeclareReductionDecl *D,
2418 CodeGenFunction *CGF) {
2419 if (!LangOpts.OpenMP || (!LangOpts.EmitAllDecls && !D->isUsed()))
2420 return;
2421 getOpenMPRuntime().emitUserDefinedReduction(CGF, D);
2422 }
2423
EmitOMPRequiresDecl(const OMPRequiresDecl * D)2424 void CodeGenModule::EmitOMPRequiresDecl(const OMPRequiresDecl *D) {
2425 getOpenMPRuntime().checkArchForUnifiedAddressing(*this, D);
2426 }
2427