1 //===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code to emit Decl nodes as LLVM code.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CGBlocks.h"
14 #include "CGCXXABI.h"
15 #include "CGCleanup.h"
16 #include "CGDebugInfo.h"
17 #include "CGOpenCLRuntime.h"
18 #include "CGOpenMPRuntime.h"
19 #include "CodeGenFunction.h"
20 #include "CodeGenModule.h"
21 #include "ConstantEmitter.h"
22 #include "PatternInit.h"
23 #include "TargetInfo.h"
24 #include "clang/AST/ASTContext.h"
25 #include "clang/AST/Attr.h"
26 #include "clang/AST/CharUnits.h"
27 #include "clang/AST/Decl.h"
28 #include "clang/AST/DeclObjC.h"
29 #include "clang/AST/DeclOpenMP.h"
30 #include "clang/Basic/CodeGenOptions.h"
31 #include "clang/Basic/SourceManager.h"
32 #include "clang/Basic/TargetInfo.h"
33 #include "clang/CodeGen/CGFunctionInfo.h"
34 #include "clang/Sema/Sema.h"
35 #include "llvm/Analysis/ValueTracking.h"
36 #include "llvm/IR/DataLayout.h"
37 #include "llvm/IR/GlobalVariable.h"
38 #include "llvm/IR/Intrinsics.h"
39 #include "llvm/IR/Type.h"
40 
41 using namespace clang;
42 using namespace CodeGen;
43 
44 static_assert(clang::Sema::MaximumAlignment <= llvm::Value::MaximumAlignment,
45               "Clang max alignment greater than what LLVM supports?");
46 
47 void CodeGenFunction::EmitDecl(const Decl &D) {
48   switch (D.getKind()) {
49   case Decl::BuiltinTemplate:
50   case Decl::TranslationUnit:
51   case Decl::ExternCContext:
52   case Decl::Namespace:
53   case Decl::UnresolvedUsingTypename:
54   case Decl::ClassTemplateSpecialization:
55   case Decl::ClassTemplatePartialSpecialization:
56   case Decl::VarTemplateSpecialization:
57   case Decl::VarTemplatePartialSpecialization:
58   case Decl::TemplateTypeParm:
59   case Decl::UnresolvedUsingValue:
60   case Decl::NonTypeTemplateParm:
61   case Decl::CXXDeductionGuide:
62   case Decl::CXXMethod:
63   case Decl::CXXConstructor:
64   case Decl::CXXDestructor:
65   case Decl::CXXConversion:
66   case Decl::Field:
67   case Decl::MSProperty:
68   case Decl::IndirectField:
69   case Decl::ObjCIvar:
70   case Decl::ObjCAtDefsField:
71   case Decl::ParmVar:
72   case Decl::ImplicitParam:
73   case Decl::ClassTemplate:
74   case Decl::VarTemplate:
75   case Decl::FunctionTemplate:
76   case Decl::TypeAliasTemplate:
77   case Decl::TemplateTemplateParm:
78   case Decl::ObjCMethod:
79   case Decl::ObjCCategory:
80   case Decl::ObjCProtocol:
81   case Decl::ObjCInterface:
82   case Decl::ObjCCategoryImpl:
83   case Decl::ObjCImplementation:
84   case Decl::ObjCProperty:
85   case Decl::ObjCCompatibleAlias:
86   case Decl::PragmaComment:
87   case Decl::PragmaDetectMismatch:
88   case Decl::AccessSpec:
89   case Decl::LinkageSpec:
90   case Decl::Export:
91   case Decl::ObjCPropertyImpl:
92   case Decl::FileScopeAsm:
93   case Decl::Friend:
94   case Decl::FriendTemplate:
95   case Decl::Block:
96   case Decl::Captured:
97   case Decl::ClassScopeFunctionSpecialization:
98   case Decl::UsingShadow:
99   case Decl::ConstructorUsingShadow:
100   case Decl::ObjCTypeParam:
101   case Decl::Binding:
102   case Decl::UnresolvedUsingIfExists:
103     llvm_unreachable("Declaration should not be in declstmts!");
104   case Decl::Record:    // struct/union/class X;
105   case Decl::CXXRecord: // struct/union/class X; [C++]
106     if (CGDebugInfo *DI = getDebugInfo()) {
107       DI->recordDeclarationLexicalScope(D);
108       if (cast<RecordDecl>(D).getDefinition())
109         DI->EmitAndRetainType(getContext().getRecordType(cast<RecordDecl>(&D)));
110     }
111     return;
112   case Decl::Enum:      // enum X;
113     if (CGDebugInfo *DI = getDebugInfo()) {
114       DI->recordDeclarationLexicalScope(D);
115       if (cast<EnumDecl>(D).getDefinition())
116         DI->EmitAndRetainType(getContext().getEnumType(cast<EnumDecl>(&D)));
117     }
118     return;
119   case Decl::EnumConstant: // enum ? { X = ? }
120   case Decl::Function:     // void X();
121   case Decl::StaticAssert: // static_assert(X, ""); [C++0x]
122   case Decl::Label:        // __label__ x;
123   case Decl::Import:
124   case Decl::MSGuid:    // __declspec(uuid("..."))
125   case Decl::TemplateParamObject:
126   case Decl::OMPThreadPrivate:
127   case Decl::OMPAllocate:
128   case Decl::OMPCapturedExpr:
129   case Decl::OMPRequires:
130   case Decl::Empty:
131   case Decl::Concept:
132   case Decl::LifetimeExtendedTemporary:
133   case Decl::RequiresExprBody:
134     // None of these decls require codegen support.
135     return;
136 
137   case Decl::NamespaceAlias:
138     if (CGDebugInfo *DI = getDebugInfo())
139       DI->EmitNamespaceAlias(cast<NamespaceAliasDecl>(D));
140     return;
141   case Decl::Using:          // using X; [C++]
142     if (CGDebugInfo *DI = getDebugInfo())
143       DI->EmitUsingDecl(cast<UsingDecl>(D));
144     return;
145   case Decl::UsingEnum: // using enum X; [C++]
146     if (CGDebugInfo *DI = getDebugInfo())
147       DI->EmitUsingEnumDecl(cast<UsingEnumDecl>(D));
148     return;
149   case Decl::UsingPack:
150     for (auto *Using : cast<UsingPackDecl>(D).expansions())
151       EmitDecl(*Using);
152     return;
153   case Decl::UsingDirective: // using namespace X; [C++]
154     if (CGDebugInfo *DI = getDebugInfo())
155       DI->EmitUsingDirective(cast<UsingDirectiveDecl>(D));
156     return;
157   case Decl::Var:
158   case Decl::Decomposition: {
159     const VarDecl &VD = cast<VarDecl>(D);
160     assert(VD.isLocalVarDecl() &&
161            "Should not see file-scope variables inside a function!");
162     EmitVarDecl(VD);
163     if (auto *DD = dyn_cast<DecompositionDecl>(&VD))
164       for (auto *B : DD->bindings())
165         if (auto *HD = B->getHoldingVar())
166           EmitVarDecl(*HD);
167     return;
168   }
169 
170   case Decl::OMPDeclareReduction:
171     return CGM.EmitOMPDeclareReduction(cast<OMPDeclareReductionDecl>(&D), this);
172 
173   case Decl::OMPDeclareMapper:
174     return CGM.EmitOMPDeclareMapper(cast<OMPDeclareMapperDecl>(&D), this);
175 
176   case Decl::Typedef:      // typedef int X;
177   case Decl::TypeAlias: {  // using X = int; [C++0x]
178     QualType Ty = cast<TypedefNameDecl>(D).getUnderlyingType();
179     if (CGDebugInfo *DI = getDebugInfo()) {
180       DI->recordDeclarationLexicalScope(D);
181       DI->EmitAndRetainType(Ty);
182     }
183     if (Ty->isVariablyModifiedType())
184       EmitVariablyModifiedType(Ty);
185     return;
186   }
187   }
188 }
189 
190 /// EmitVarDecl - This method handles emission of any variable declaration
191 /// inside a function, including static vars etc.
192 void CodeGenFunction::EmitVarDecl(const VarDecl &D) {
193   if (D.hasExternalStorage())
194     // Don't emit it now, allow it to be emitted lazily on its first use.
195     return;
196 
197   // Some function-scope variable does not have static storage but still
198   // needs to be emitted like a static variable, e.g. a function-scope
199   // variable in constant address space in OpenCL.
200   if (D.getStorageDuration() != SD_Automatic) {
201     // Static sampler variables translated to function calls.
202     if (D.getType()->isSamplerT())
203       return;
204 
205     llvm::GlobalValue::LinkageTypes Linkage =
206         CGM.getLLVMLinkageVarDefinition(&D, /*IsConstant=*/false);
207 
208     // FIXME: We need to force the emission/use of a guard variable for
209     // some variables even if we can constant-evaluate them because
210     // we can't guarantee every translation unit will constant-evaluate them.
211 
212     return EmitStaticVarDecl(D, Linkage);
213   }
214 
215   if (D.getType().getAddressSpace() == LangAS::opencl_local)
216     return CGM.getOpenCLRuntime().EmitWorkGroupLocalVarDecl(*this, D);
217 
218   assert(D.hasLocalStorage());
219   return EmitAutoVarDecl(D);
220 }
221 
222 static std::string getStaticDeclName(CodeGenModule &CGM, const VarDecl &D) {
223   if (CGM.getLangOpts().CPlusPlus)
224     return CGM.getMangledName(&D).str();
225 
226   // If this isn't C++, we don't need a mangled name, just a pretty one.
227   assert(!D.isExternallyVisible() && "name shouldn't matter");
228   std::string ContextName;
229   const DeclContext *DC = D.getDeclContext();
230   if (auto *CD = dyn_cast<CapturedDecl>(DC))
231     DC = cast<DeclContext>(CD->getNonClosureContext());
232   if (const auto *FD = dyn_cast<FunctionDecl>(DC))
233     ContextName = std::string(CGM.getMangledName(FD));
234   else if (const auto *BD = dyn_cast<BlockDecl>(DC))
235     ContextName = std::string(CGM.getBlockMangledName(GlobalDecl(), BD));
236   else if (const auto *OMD = dyn_cast<ObjCMethodDecl>(DC))
237     ContextName = OMD->getSelector().getAsString();
238   else
239     llvm_unreachable("Unknown context for static var decl");
240 
241   ContextName += "." + D.getNameAsString();
242   return ContextName;
243 }
244 
245 llvm::Constant *CodeGenModule::getOrCreateStaticVarDecl(
246     const VarDecl &D, llvm::GlobalValue::LinkageTypes Linkage) {
247   // In general, we don't always emit static var decls once before we reference
248   // them. It is possible to reference them before emitting the function that
249   // contains them, and it is possible to emit the containing function multiple
250   // times.
251   if (llvm::Constant *ExistingGV = StaticLocalDeclMap[&D])
252     return ExistingGV;
253 
254   QualType Ty = D.getType();
255   assert(Ty->isConstantSizeType() && "VLAs can't be static");
256 
257   // Use the label if the variable is renamed with the asm-label extension.
258   std::string Name;
259   if (D.hasAttr<AsmLabelAttr>())
260     Name = std::string(getMangledName(&D));
261   else
262     Name = getStaticDeclName(*this, D);
263 
264   llvm::Type *LTy = getTypes().ConvertTypeForMem(Ty);
265   LangAS AS = GetGlobalVarAddressSpace(&D);
266   unsigned TargetAS = getContext().getTargetAddressSpace(AS);
267 
268   // OpenCL variables in local address space and CUDA shared
269   // variables cannot have an initializer.
270   llvm::Constant *Init = nullptr;
271   if (Ty.getAddressSpace() == LangAS::opencl_local ||
272       D.hasAttr<CUDASharedAttr>() || D.hasAttr<LoaderUninitializedAttr>())
273     Init = llvm::UndefValue::get(LTy);
274   else
275     Init = EmitNullConstant(Ty);
276 
277   llvm::GlobalVariable *GV = new llvm::GlobalVariable(
278       getModule(), LTy, Ty.isConstant(getContext()), Linkage, Init, Name,
279       nullptr, llvm::GlobalVariable::NotThreadLocal, TargetAS);
280   GV->setAlignment(getContext().getDeclAlign(&D).getAsAlign());
281 
282   if (supportsCOMDAT() && GV->isWeakForLinker())
283     GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
284 
285   if (D.getTLSKind())
286     setTLSMode(GV, D);
287 
288   setGVProperties(GV, &D);
289 
290   // Make sure the result is of the correct type.
291   LangAS ExpectedAS = Ty.getAddressSpace();
292   llvm::Constant *Addr = GV;
293   if (AS != ExpectedAS) {
294     Addr = getTargetCodeGenInfo().performAddrSpaceCast(
295         *this, GV, AS, ExpectedAS,
296         LTy->getPointerTo(getContext().getTargetAddressSpace(ExpectedAS)));
297   }
298 
299   setStaticLocalDeclAddress(&D, Addr);
300 
301   // Ensure that the static local gets initialized by making sure the parent
302   // function gets emitted eventually.
303   const Decl *DC = cast<Decl>(D.getDeclContext());
304 
305   // We can't name blocks or captured statements directly, so try to emit their
306   // parents.
307   if (isa<BlockDecl>(DC) || isa<CapturedDecl>(DC)) {
308     DC = DC->getNonClosureContext();
309     // FIXME: Ensure that global blocks get emitted.
310     if (!DC)
311       return Addr;
312   }
313 
314   GlobalDecl GD;
315   if (const auto *CD = dyn_cast<CXXConstructorDecl>(DC))
316     GD = GlobalDecl(CD, Ctor_Base);
317   else if (const auto *DD = dyn_cast<CXXDestructorDecl>(DC))
318     GD = GlobalDecl(DD, Dtor_Base);
319   else if (const auto *FD = dyn_cast<FunctionDecl>(DC))
320     GD = GlobalDecl(FD);
321   else {
322     // Don't do anything for Obj-C method decls or global closures. We should
323     // never defer them.
324     assert(isa<ObjCMethodDecl>(DC) && "unexpected parent code decl");
325   }
326   if (GD.getDecl()) {
327     // Disable emission of the parent function for the OpenMP device codegen.
328     CGOpenMPRuntime::DisableAutoDeclareTargetRAII NoDeclTarget(*this);
329     (void)GetAddrOfGlobal(GD);
330   }
331 
332   return Addr;
333 }
334 
335 /// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
336 /// global variable that has already been created for it.  If the initializer
337 /// has a different type than GV does, this may free GV and return a different
338 /// one.  Otherwise it just returns GV.
339 llvm::GlobalVariable *
340 CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
341                                                llvm::GlobalVariable *GV) {
342   ConstantEmitter emitter(*this);
343   llvm::Constant *Init = emitter.tryEmitForInitializer(D);
344 
345   // If constant emission failed, then this should be a C++ static
346   // initializer.
347   if (!Init) {
348     if (!getLangOpts().CPlusPlus)
349       CGM.ErrorUnsupported(D.getInit(), "constant l-value expression");
350     else if (HaveInsertPoint()) {
351       // Since we have a static initializer, this global variable can't
352       // be constant.
353       GV->setConstant(false);
354 
355       EmitCXXGuardedInit(D, GV, /*PerformInit*/true);
356     }
357     return GV;
358   }
359 
360   // The initializer may differ in type from the global. Rewrite
361   // the global to match the initializer.  (We have to do this
362   // because some types, like unions, can't be completely represented
363   // in the LLVM type system.)
364   if (GV->getValueType() != Init->getType()) {
365     llvm::GlobalVariable *OldGV = GV;
366 
367     GV = new llvm::GlobalVariable(
368         CGM.getModule(), Init->getType(), OldGV->isConstant(),
369         OldGV->getLinkage(), Init, "",
370         /*InsertBefore*/ OldGV, OldGV->getThreadLocalMode(),
371         OldGV->getType()->getPointerAddressSpace());
372     GV->setVisibility(OldGV->getVisibility());
373     GV->setDSOLocal(OldGV->isDSOLocal());
374     GV->setComdat(OldGV->getComdat());
375 
376     // Steal the name of the old global
377     GV->takeName(OldGV);
378 
379     // Replace all uses of the old global with the new global
380     llvm::Constant *NewPtrForOldDecl =
381     llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
382     OldGV->replaceAllUsesWith(NewPtrForOldDecl);
383 
384     // Erase the old global, since it is no longer used.
385     OldGV->eraseFromParent();
386   }
387 
388   GV->setConstant(CGM.isTypeConstant(D.getType(), true));
389   GV->setInitializer(Init);
390 
391   emitter.finalize(GV);
392 
393   if (D.needsDestruction(getContext()) == QualType::DK_cxx_destructor &&
394       HaveInsertPoint()) {
395     // We have a constant initializer, but a nontrivial destructor. We still
396     // need to perform a guarded "initialization" in order to register the
397     // destructor.
398     EmitCXXGuardedInit(D, GV, /*PerformInit*/false);
399   }
400 
401   return GV;
402 }
403 
404 void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
405                                       llvm::GlobalValue::LinkageTypes Linkage) {
406   // Check to see if we already have a global variable for this
407   // declaration.  This can happen when double-emitting function
408   // bodies, e.g. with complete and base constructors.
409   llvm::Constant *addr = CGM.getOrCreateStaticVarDecl(D, Linkage);
410   CharUnits alignment = getContext().getDeclAlign(&D);
411 
412   // Store into LocalDeclMap before generating initializer to handle
413   // circular references.
414   setAddrOfLocalVar(&D, Address(addr, alignment));
415 
416   // We can't have a VLA here, but we can have a pointer to a VLA,
417   // even though that doesn't really make any sense.
418   // Make sure to evaluate VLA bounds now so that we have them for later.
419   if (D.getType()->isVariablyModifiedType())
420     EmitVariablyModifiedType(D.getType());
421 
422   // Save the type in case adding the initializer forces a type change.
423   llvm::Type *expectedType = addr->getType();
424 
425   llvm::GlobalVariable *var =
426     cast<llvm::GlobalVariable>(addr->stripPointerCasts());
427 
428   // CUDA's local and local static __shared__ variables should not
429   // have any non-empty initializers. This is ensured by Sema.
430   // Whatever initializer such variable may have when it gets here is
431   // a no-op and should not be emitted.
432   bool isCudaSharedVar = getLangOpts().CUDA && getLangOpts().CUDAIsDevice &&
433                          D.hasAttr<CUDASharedAttr>();
434   // If this value has an initializer, emit it.
435   if (D.getInit() && !isCudaSharedVar)
436     var = AddInitializerToStaticVarDecl(D, var);
437 
438   var->setAlignment(alignment.getAsAlign());
439 
440   if (D.hasAttr<AnnotateAttr>())
441     CGM.AddGlobalAnnotations(&D, var);
442 
443   if (auto *SA = D.getAttr<PragmaClangBSSSectionAttr>())
444     var->addAttribute("bss-section", SA->getName());
445   if (auto *SA = D.getAttr<PragmaClangDataSectionAttr>())
446     var->addAttribute("data-section", SA->getName());
447   if (auto *SA = D.getAttr<PragmaClangRodataSectionAttr>())
448     var->addAttribute("rodata-section", SA->getName());
449   if (auto *SA = D.getAttr<PragmaClangRelroSectionAttr>())
450     var->addAttribute("relro-section", SA->getName());
451 
452   if (const SectionAttr *SA = D.getAttr<SectionAttr>())
453     var->setSection(SA->getName());
454 
455   if (D.hasAttr<RetainAttr>())
456     CGM.addUsedGlobal(var);
457   else if (D.hasAttr<UsedAttr>())
458     CGM.addUsedOrCompilerUsedGlobal(var);
459 
460   // We may have to cast the constant because of the initializer
461   // mismatch above.
462   //
463   // FIXME: It is really dangerous to store this in the map; if anyone
464   // RAUW's the GV uses of this constant will be invalid.
465   llvm::Constant *castedAddr =
466     llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(var, expectedType);
467   if (var != castedAddr)
468     LocalDeclMap.find(&D)->second = Address(castedAddr, alignment);
469   CGM.setStaticLocalDeclAddress(&D, castedAddr);
470 
471   CGM.getSanitizerMetadata()->reportGlobalToASan(var, D);
472 
473   // Emit global variable debug descriptor for static vars.
474   CGDebugInfo *DI = getDebugInfo();
475   if (DI && CGM.getCodeGenOpts().hasReducedDebugInfo()) {
476     DI->setLocation(D.getLocation());
477     DI->EmitGlobalVariable(var, &D);
478   }
479 }
480 
481 namespace {
482   struct DestroyObject final : EHScopeStack::Cleanup {
483     DestroyObject(Address addr, QualType type,
484                   CodeGenFunction::Destroyer *destroyer,
485                   bool useEHCleanupForArray)
486       : addr(addr), type(type), destroyer(destroyer),
487         useEHCleanupForArray(useEHCleanupForArray) {}
488 
489     Address addr;
490     QualType type;
491     CodeGenFunction::Destroyer *destroyer;
492     bool useEHCleanupForArray;
493 
494     void Emit(CodeGenFunction &CGF, Flags flags) override {
495       // Don't use an EH cleanup recursively from an EH cleanup.
496       bool useEHCleanupForArray =
497         flags.isForNormalCleanup() && this->useEHCleanupForArray;
498 
499       CGF.emitDestroy(addr, type, destroyer, useEHCleanupForArray);
500     }
501   };
502 
503   template <class Derived>
504   struct DestroyNRVOVariable : EHScopeStack::Cleanup {
505     DestroyNRVOVariable(Address addr, QualType type, llvm::Value *NRVOFlag)
506         : NRVOFlag(NRVOFlag), Loc(addr), Ty(type) {}
507 
508     llvm::Value *NRVOFlag;
509     Address Loc;
510     QualType Ty;
511 
512     void Emit(CodeGenFunction &CGF, Flags flags) override {
513       // Along the exceptions path we always execute the dtor.
514       bool NRVO = flags.isForNormalCleanup() && NRVOFlag;
515 
516       llvm::BasicBlock *SkipDtorBB = nullptr;
517       if (NRVO) {
518         // If we exited via NRVO, we skip the destructor call.
519         llvm::BasicBlock *RunDtorBB = CGF.createBasicBlock("nrvo.unused");
520         SkipDtorBB = CGF.createBasicBlock("nrvo.skipdtor");
521         llvm::Value *DidNRVO =
522           CGF.Builder.CreateFlagLoad(NRVOFlag, "nrvo.val");
523         CGF.Builder.CreateCondBr(DidNRVO, SkipDtorBB, RunDtorBB);
524         CGF.EmitBlock(RunDtorBB);
525       }
526 
527       static_cast<Derived *>(this)->emitDestructorCall(CGF);
528 
529       if (NRVO) CGF.EmitBlock(SkipDtorBB);
530     }
531 
532     virtual ~DestroyNRVOVariable() = default;
533   };
534 
535   struct DestroyNRVOVariableCXX final
536       : DestroyNRVOVariable<DestroyNRVOVariableCXX> {
537     DestroyNRVOVariableCXX(Address addr, QualType type,
538                            const CXXDestructorDecl *Dtor, llvm::Value *NRVOFlag)
539         : DestroyNRVOVariable<DestroyNRVOVariableCXX>(addr, type, NRVOFlag),
540           Dtor(Dtor) {}
541 
542     const CXXDestructorDecl *Dtor;
543 
544     void emitDestructorCall(CodeGenFunction &CGF) {
545       CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
546                                 /*ForVirtualBase=*/false,
547                                 /*Delegating=*/false, Loc, Ty);
548     }
549   };
550 
551   struct DestroyNRVOVariableC final
552       : DestroyNRVOVariable<DestroyNRVOVariableC> {
553     DestroyNRVOVariableC(Address addr, llvm::Value *NRVOFlag, QualType Ty)
554         : DestroyNRVOVariable<DestroyNRVOVariableC>(addr, Ty, NRVOFlag) {}
555 
556     void emitDestructorCall(CodeGenFunction &CGF) {
557       CGF.destroyNonTrivialCStruct(CGF, Loc, Ty);
558     }
559   };
560 
561   struct CallStackRestore final : EHScopeStack::Cleanup {
562     Address Stack;
563     CallStackRestore(Address Stack) : Stack(Stack) {}
564     bool isRedundantBeforeReturn() override { return true; }
565     void Emit(CodeGenFunction &CGF, Flags flags) override {
566       llvm::Value *V = CGF.Builder.CreateLoad(Stack);
567       llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
568       CGF.Builder.CreateCall(F, V);
569     }
570   };
571 
572   struct ExtendGCLifetime final : EHScopeStack::Cleanup {
573     const VarDecl &Var;
574     ExtendGCLifetime(const VarDecl *var) : Var(*var) {}
575 
576     void Emit(CodeGenFunction &CGF, Flags flags) override {
577       // Compute the address of the local variable, in case it's a
578       // byref or something.
579       DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(&Var), false,
580                       Var.getType(), VK_LValue, SourceLocation());
581       llvm::Value *value = CGF.EmitLoadOfScalar(CGF.EmitDeclRefLValue(&DRE),
582                                                 SourceLocation());
583       CGF.EmitExtendGCLifetime(value);
584     }
585   };
586 
587   struct CallCleanupFunction final : EHScopeStack::Cleanup {
588     llvm::Constant *CleanupFn;
589     const CGFunctionInfo &FnInfo;
590     const VarDecl &Var;
591 
592     CallCleanupFunction(llvm::Constant *CleanupFn, const CGFunctionInfo *Info,
593                         const VarDecl *Var)
594       : CleanupFn(CleanupFn), FnInfo(*Info), Var(*Var) {}
595 
596     void Emit(CodeGenFunction &CGF, Flags flags) override {
597       DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(&Var), false,
598                       Var.getType(), VK_LValue, SourceLocation());
599       // Compute the address of the local variable, in case it's a byref
600       // or something.
601       llvm::Value *Addr = CGF.EmitDeclRefLValue(&DRE).getPointer(CGF);
602 
603       // In some cases, the type of the function argument will be different from
604       // the type of the pointer. An example of this is
605       // void f(void* arg);
606       // __attribute__((cleanup(f))) void *g;
607       //
608       // To fix this we insert a bitcast here.
609       QualType ArgTy = FnInfo.arg_begin()->type;
610       llvm::Value *Arg =
611         CGF.Builder.CreateBitCast(Addr, CGF.ConvertType(ArgTy));
612 
613       CallArgList Args;
614       Args.add(RValue::get(Arg),
615                CGF.getContext().getPointerType(Var.getType()));
616       auto Callee = CGCallee::forDirect(CleanupFn);
617       CGF.EmitCall(FnInfo, Callee, ReturnValueSlot(), Args);
618     }
619   };
620 } // end anonymous namespace
621 
622 /// EmitAutoVarWithLifetime - Does the setup required for an automatic
623 /// variable with lifetime.
624 static void EmitAutoVarWithLifetime(CodeGenFunction &CGF, const VarDecl &var,
625                                     Address addr,
626                                     Qualifiers::ObjCLifetime lifetime) {
627   switch (lifetime) {
628   case Qualifiers::OCL_None:
629     llvm_unreachable("present but none");
630 
631   case Qualifiers::OCL_ExplicitNone:
632     // nothing to do
633     break;
634 
635   case Qualifiers::OCL_Strong: {
636     CodeGenFunction::Destroyer *destroyer =
637       (var.hasAttr<ObjCPreciseLifetimeAttr>()
638        ? CodeGenFunction::destroyARCStrongPrecise
639        : CodeGenFunction::destroyARCStrongImprecise);
640 
641     CleanupKind cleanupKind = CGF.getARCCleanupKind();
642     CGF.pushDestroy(cleanupKind, addr, var.getType(), destroyer,
643                     cleanupKind & EHCleanup);
644     break;
645   }
646   case Qualifiers::OCL_Autoreleasing:
647     // nothing to do
648     break;
649 
650   case Qualifiers::OCL_Weak:
651     // __weak objects always get EH cleanups; otherwise, exceptions
652     // could cause really nasty crashes instead of mere leaks.
653     CGF.pushDestroy(NormalAndEHCleanup, addr, var.getType(),
654                     CodeGenFunction::destroyARCWeak,
655                     /*useEHCleanup*/ true);
656     break;
657   }
658 }
659 
660 static bool isAccessedBy(const VarDecl &var, const Stmt *s) {
661   if (const Expr *e = dyn_cast<Expr>(s)) {
662     // Skip the most common kinds of expressions that make
663     // hierarchy-walking expensive.
664     s = e = e->IgnoreParenCasts();
665 
666     if (const DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e))
667       return (ref->getDecl() == &var);
668     if (const BlockExpr *be = dyn_cast<BlockExpr>(e)) {
669       const BlockDecl *block = be->getBlockDecl();
670       for (const auto &I : block->captures()) {
671         if (I.getVariable() == &var)
672           return true;
673       }
674     }
675   }
676 
677   for (const Stmt *SubStmt : s->children())
678     // SubStmt might be null; as in missing decl or conditional of an if-stmt.
679     if (SubStmt && isAccessedBy(var, SubStmt))
680       return true;
681 
682   return false;
683 }
684 
685 static bool isAccessedBy(const ValueDecl *decl, const Expr *e) {
686   if (!decl) return false;
687   if (!isa<VarDecl>(decl)) return false;
688   const VarDecl *var = cast<VarDecl>(decl);
689   return isAccessedBy(*var, e);
690 }
691 
692 static bool tryEmitARCCopyWeakInit(CodeGenFunction &CGF,
693                                    const LValue &destLV, const Expr *init) {
694   bool needsCast = false;
695 
696   while (auto castExpr = dyn_cast<CastExpr>(init->IgnoreParens())) {
697     switch (castExpr->getCastKind()) {
698     // Look through casts that don't require representation changes.
699     case CK_NoOp:
700     case CK_BitCast:
701     case CK_BlockPointerToObjCPointerCast:
702       needsCast = true;
703       break;
704 
705     // If we find an l-value to r-value cast from a __weak variable,
706     // emit this operation as a copy or move.
707     case CK_LValueToRValue: {
708       const Expr *srcExpr = castExpr->getSubExpr();
709       if (srcExpr->getType().getObjCLifetime() != Qualifiers::OCL_Weak)
710         return false;
711 
712       // Emit the source l-value.
713       LValue srcLV = CGF.EmitLValue(srcExpr);
714 
715       // Handle a formal type change to avoid asserting.
716       auto srcAddr = srcLV.getAddress(CGF);
717       if (needsCast) {
718         srcAddr = CGF.Builder.CreateElementBitCast(
719             srcAddr, destLV.getAddress(CGF).getElementType());
720       }
721 
722       // If it was an l-value, use objc_copyWeak.
723       if (srcExpr->isLValue()) {
724         CGF.EmitARCCopyWeak(destLV.getAddress(CGF), srcAddr);
725       } else {
726         assert(srcExpr->isXValue());
727         CGF.EmitARCMoveWeak(destLV.getAddress(CGF), srcAddr);
728       }
729       return true;
730     }
731 
732     // Stop at anything else.
733     default:
734       return false;
735     }
736 
737     init = castExpr->getSubExpr();
738   }
739   return false;
740 }
741 
742 static void drillIntoBlockVariable(CodeGenFunction &CGF,
743                                    LValue &lvalue,
744                                    const VarDecl *var) {
745   lvalue.setAddress(CGF.emitBlockByrefAddress(lvalue.getAddress(CGF), var));
746 }
747 
748 void CodeGenFunction::EmitNullabilityCheck(LValue LHS, llvm::Value *RHS,
749                                            SourceLocation Loc) {
750   if (!SanOpts.has(SanitizerKind::NullabilityAssign))
751     return;
752 
753   auto Nullability = LHS.getType()->getNullability(getContext());
754   if (!Nullability || *Nullability != NullabilityKind::NonNull)
755     return;
756 
757   // Check if the right hand side of the assignment is nonnull, if the left
758   // hand side must be nonnull.
759   SanitizerScope SanScope(this);
760   llvm::Value *IsNotNull = Builder.CreateIsNotNull(RHS);
761   llvm::Constant *StaticData[] = {
762       EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(LHS.getType()),
763       llvm::ConstantInt::get(Int8Ty, 0), // The LogAlignment info is unused.
764       llvm::ConstantInt::get(Int8Ty, TCK_NonnullAssign)};
765   EmitCheck({{IsNotNull, SanitizerKind::NullabilityAssign}},
766             SanitizerHandler::TypeMismatch, StaticData, RHS);
767 }
768 
769 void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
770                                      LValue lvalue, bool capturedByInit) {
771   Qualifiers::ObjCLifetime lifetime = lvalue.getObjCLifetime();
772   if (!lifetime) {
773     llvm::Value *value = EmitScalarExpr(init);
774     if (capturedByInit)
775       drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
776     EmitNullabilityCheck(lvalue, value, init->getExprLoc());
777     EmitStoreThroughLValue(RValue::get(value), lvalue, true);
778     return;
779   }
780 
781   if (const CXXDefaultInitExpr *DIE = dyn_cast<CXXDefaultInitExpr>(init))
782     init = DIE->getExpr();
783 
784   // If we're emitting a value with lifetime, we have to do the
785   // initialization *before* we leave the cleanup scopes.
786   if (auto *EWC = dyn_cast<ExprWithCleanups>(init)) {
787     CodeGenFunction::RunCleanupsScope Scope(*this);
788     return EmitScalarInit(EWC->getSubExpr(), D, lvalue, capturedByInit);
789   }
790 
791   // We have to maintain the illusion that the variable is
792   // zero-initialized.  If the variable might be accessed in its
793   // initializer, zero-initialize before running the initializer, then
794   // actually perform the initialization with an assign.
795   bool accessedByInit = false;
796   if (lifetime != Qualifiers::OCL_ExplicitNone)
797     accessedByInit = (capturedByInit || isAccessedBy(D, init));
798   if (accessedByInit) {
799     LValue tempLV = lvalue;
800     // Drill down to the __block object if necessary.
801     if (capturedByInit) {
802       // We can use a simple GEP for this because it can't have been
803       // moved yet.
804       tempLV.setAddress(emitBlockByrefAddress(tempLV.getAddress(*this),
805                                               cast<VarDecl>(D),
806                                               /*follow*/ false));
807     }
808 
809     auto ty =
810         cast<llvm::PointerType>(tempLV.getAddress(*this).getElementType());
811     llvm::Value *zero = CGM.getNullPointer(ty, tempLV.getType());
812 
813     // If __weak, we want to use a barrier under certain conditions.
814     if (lifetime == Qualifiers::OCL_Weak)
815       EmitARCInitWeak(tempLV.getAddress(*this), zero);
816 
817     // Otherwise just do a simple store.
818     else
819       EmitStoreOfScalar(zero, tempLV, /* isInitialization */ true);
820   }
821 
822   // Emit the initializer.
823   llvm::Value *value = nullptr;
824 
825   switch (lifetime) {
826   case Qualifiers::OCL_None:
827     llvm_unreachable("present but none");
828 
829   case Qualifiers::OCL_Strong: {
830     if (!D || !isa<VarDecl>(D) || !cast<VarDecl>(D)->isARCPseudoStrong()) {
831       value = EmitARCRetainScalarExpr(init);
832       break;
833     }
834     // If D is pseudo-strong, treat it like __unsafe_unretained here. This means
835     // that we omit the retain, and causes non-autoreleased return values to be
836     // immediately released.
837     LLVM_FALLTHROUGH;
838   }
839 
840   case Qualifiers::OCL_ExplicitNone:
841     value = EmitARCUnsafeUnretainedScalarExpr(init);
842     break;
843 
844   case Qualifiers::OCL_Weak: {
845     // If it's not accessed by the initializer, try to emit the
846     // initialization with a copy or move.
847     if (!accessedByInit && tryEmitARCCopyWeakInit(*this, lvalue, init)) {
848       return;
849     }
850 
851     // No way to optimize a producing initializer into this.  It's not
852     // worth optimizing for, because the value will immediately
853     // disappear in the common case.
854     value = EmitScalarExpr(init);
855 
856     if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
857     if (accessedByInit)
858       EmitARCStoreWeak(lvalue.getAddress(*this), value, /*ignored*/ true);
859     else
860       EmitARCInitWeak(lvalue.getAddress(*this), value);
861     return;
862   }
863 
864   case Qualifiers::OCL_Autoreleasing:
865     value = EmitARCRetainAutoreleaseScalarExpr(init);
866     break;
867   }
868 
869   if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
870 
871   EmitNullabilityCheck(lvalue, value, init->getExprLoc());
872 
873   // If the variable might have been accessed by its initializer, we
874   // might have to initialize with a barrier.  We have to do this for
875   // both __weak and __strong, but __weak got filtered out above.
876   if (accessedByInit && lifetime == Qualifiers::OCL_Strong) {
877     llvm::Value *oldValue = EmitLoadOfScalar(lvalue, init->getExprLoc());
878     EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
879     EmitARCRelease(oldValue, ARCImpreciseLifetime);
880     return;
881   }
882 
883   EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
884 }
885 
886 /// Decide whether we can emit the non-zero parts of the specified initializer
887 /// with equal or fewer than NumStores scalar stores.
888 static bool canEmitInitWithFewStoresAfterBZero(llvm::Constant *Init,
889                                                unsigned &NumStores) {
890   // Zero and Undef never requires any extra stores.
891   if (isa<llvm::ConstantAggregateZero>(Init) ||
892       isa<llvm::ConstantPointerNull>(Init) ||
893       isa<llvm::UndefValue>(Init))
894     return true;
895   if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
896       isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
897       isa<llvm::ConstantExpr>(Init))
898     return Init->isNullValue() || NumStores--;
899 
900   // See if we can emit each element.
901   if (isa<llvm::ConstantArray>(Init) || isa<llvm::ConstantStruct>(Init)) {
902     for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
903       llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
904       if (!canEmitInitWithFewStoresAfterBZero(Elt, NumStores))
905         return false;
906     }
907     return true;
908   }
909 
910   if (llvm::ConstantDataSequential *CDS =
911         dyn_cast<llvm::ConstantDataSequential>(Init)) {
912     for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
913       llvm::Constant *Elt = CDS->getElementAsConstant(i);
914       if (!canEmitInitWithFewStoresAfterBZero(Elt, NumStores))
915         return false;
916     }
917     return true;
918   }
919 
920   // Anything else is hard and scary.
921   return false;
922 }
923 
924 /// For inits that canEmitInitWithFewStoresAfterBZero returned true for, emit
925 /// the scalar stores that would be required.
926 static void emitStoresForInitAfterBZero(CodeGenModule &CGM,
927                                         llvm::Constant *Init, Address Loc,
928                                         bool isVolatile, CGBuilderTy &Builder,
929                                         bool IsAutoInit) {
930   assert(!Init->isNullValue() && !isa<llvm::UndefValue>(Init) &&
931          "called emitStoresForInitAfterBZero for zero or undef value.");
932 
933   if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
934       isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
935       isa<llvm::ConstantExpr>(Init)) {
936     auto *I = Builder.CreateStore(Init, Loc, isVolatile);
937     if (IsAutoInit)
938       I->addAnnotationMetadata("auto-init");
939     return;
940   }
941 
942   if (llvm::ConstantDataSequential *CDS =
943           dyn_cast<llvm::ConstantDataSequential>(Init)) {
944     for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
945       llvm::Constant *Elt = CDS->getElementAsConstant(i);
946 
947       // If necessary, get a pointer to the element and emit it.
948       if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt))
949         emitStoresForInitAfterBZero(
950             CGM, Elt, Builder.CreateConstInBoundsGEP2_32(Loc, 0, i), isVolatile,
951             Builder, IsAutoInit);
952     }
953     return;
954   }
955 
956   assert((isa<llvm::ConstantStruct>(Init) || isa<llvm::ConstantArray>(Init)) &&
957          "Unknown value type!");
958 
959   for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
960     llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
961 
962     // If necessary, get a pointer to the element and emit it.
963     if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt))
964       emitStoresForInitAfterBZero(CGM, Elt,
965                                   Builder.CreateConstInBoundsGEP2_32(Loc, 0, i),
966                                   isVolatile, Builder, IsAutoInit);
967   }
968 }
969 
970 /// Decide whether we should use bzero plus some stores to initialize a local
971 /// variable instead of using a memcpy from a constant global.  It is beneficial
972 /// to use bzero if the global is all zeros, or mostly zeros and large.
973 static bool shouldUseBZeroPlusStoresToInitialize(llvm::Constant *Init,
974                                                  uint64_t GlobalSize) {
975   // If a global is all zeros, always use a bzero.
976   if (isa<llvm::ConstantAggregateZero>(Init)) return true;
977 
978   // If a non-zero global is <= 32 bytes, always use a memcpy.  If it is large,
979   // do it if it will require 6 or fewer scalar stores.
980   // TODO: Should budget depends on the size?  Avoiding a large global warrants
981   // plopping in more stores.
982   unsigned StoreBudget = 6;
983   uint64_t SizeLimit = 32;
984 
985   return GlobalSize > SizeLimit &&
986          canEmitInitWithFewStoresAfterBZero(Init, StoreBudget);
987 }
988 
989 /// Decide whether we should use memset to initialize a local variable instead
990 /// of using a memcpy from a constant global. Assumes we've already decided to
991 /// not user bzero.
992 /// FIXME We could be more clever, as we are for bzero above, and generate
993 ///       memset followed by stores. It's unclear that's worth the effort.
994 static llvm::Value *shouldUseMemSetToInitialize(llvm::Constant *Init,
995                                                 uint64_t GlobalSize,
996                                                 const llvm::DataLayout &DL) {
997   uint64_t SizeLimit = 32;
998   if (GlobalSize <= SizeLimit)
999     return nullptr;
1000   return llvm::isBytewiseValue(Init, DL);
1001 }
1002 
1003 /// Decide whether we want to split a constant structure or array store into a
1004 /// sequence of its fields' stores. This may cost us code size and compilation
1005 /// speed, but plays better with store optimizations.
1006 static bool shouldSplitConstantStore(CodeGenModule &CGM,
1007                                      uint64_t GlobalByteSize) {
1008   // Don't break things that occupy more than one cacheline.
1009   uint64_t ByteSizeLimit = 64;
1010   if (CGM.getCodeGenOpts().OptimizationLevel == 0)
1011     return false;
1012   if (GlobalByteSize <= ByteSizeLimit)
1013     return true;
1014   return false;
1015 }
1016 
1017 enum class IsPattern { No, Yes };
1018 
1019 /// Generate a constant filled with either a pattern or zeroes.
1020 static llvm::Constant *patternOrZeroFor(CodeGenModule &CGM, IsPattern isPattern,
1021                                         llvm::Type *Ty) {
1022   if (isPattern == IsPattern::Yes)
1023     return initializationPatternFor(CGM, Ty);
1024   else
1025     return llvm::Constant::getNullValue(Ty);
1026 }
1027 
1028 static llvm::Constant *constWithPadding(CodeGenModule &CGM, IsPattern isPattern,
1029                                         llvm::Constant *constant);
1030 
1031 /// Helper function for constWithPadding() to deal with padding in structures.
1032 static llvm::Constant *constStructWithPadding(CodeGenModule &CGM,
1033                                               IsPattern isPattern,
1034                                               llvm::StructType *STy,
1035                                               llvm::Constant *constant) {
1036   const llvm::DataLayout &DL = CGM.getDataLayout();
1037   const llvm::StructLayout *Layout = DL.getStructLayout(STy);
1038   llvm::Type *Int8Ty = llvm::IntegerType::getInt8Ty(CGM.getLLVMContext());
1039   unsigned SizeSoFar = 0;
1040   SmallVector<llvm::Constant *, 8> Values;
1041   bool NestedIntact = true;
1042   for (unsigned i = 0, e = STy->getNumElements(); i != e; i++) {
1043     unsigned CurOff = Layout->getElementOffset(i);
1044     if (SizeSoFar < CurOff) {
1045       assert(!STy->isPacked());
1046       auto *PadTy = llvm::ArrayType::get(Int8Ty, CurOff - SizeSoFar);
1047       Values.push_back(patternOrZeroFor(CGM, isPattern, PadTy));
1048     }
1049     llvm::Constant *CurOp;
1050     if (constant->isZeroValue())
1051       CurOp = llvm::Constant::getNullValue(STy->getElementType(i));
1052     else
1053       CurOp = cast<llvm::Constant>(constant->getAggregateElement(i));
1054     auto *NewOp = constWithPadding(CGM, isPattern, CurOp);
1055     if (CurOp != NewOp)
1056       NestedIntact = false;
1057     Values.push_back(NewOp);
1058     SizeSoFar = CurOff + DL.getTypeAllocSize(CurOp->getType());
1059   }
1060   unsigned TotalSize = Layout->getSizeInBytes();
1061   if (SizeSoFar < TotalSize) {
1062     auto *PadTy = llvm::ArrayType::get(Int8Ty, TotalSize - SizeSoFar);
1063     Values.push_back(patternOrZeroFor(CGM, isPattern, PadTy));
1064   }
1065   if (NestedIntact && Values.size() == STy->getNumElements())
1066     return constant;
1067   return llvm::ConstantStruct::getAnon(Values, STy->isPacked());
1068 }
1069 
1070 /// Replace all padding bytes in a given constant with either a pattern byte or
1071 /// 0x00.
1072 static llvm::Constant *constWithPadding(CodeGenModule &CGM, IsPattern isPattern,
1073                                         llvm::Constant *constant) {
1074   llvm::Type *OrigTy = constant->getType();
1075   if (const auto STy = dyn_cast<llvm::StructType>(OrigTy))
1076     return constStructWithPadding(CGM, isPattern, STy, constant);
1077   if (auto *ArrayTy = dyn_cast<llvm::ArrayType>(OrigTy)) {
1078     llvm::SmallVector<llvm::Constant *, 8> Values;
1079     uint64_t Size = ArrayTy->getNumElements();
1080     if (!Size)
1081       return constant;
1082     llvm::Type *ElemTy = ArrayTy->getElementType();
1083     bool ZeroInitializer = constant->isNullValue();
1084     llvm::Constant *OpValue, *PaddedOp;
1085     if (ZeroInitializer) {
1086       OpValue = llvm::Constant::getNullValue(ElemTy);
1087       PaddedOp = constWithPadding(CGM, isPattern, OpValue);
1088     }
1089     for (unsigned Op = 0; Op != Size; ++Op) {
1090       if (!ZeroInitializer) {
1091         OpValue = constant->getAggregateElement(Op);
1092         PaddedOp = constWithPadding(CGM, isPattern, OpValue);
1093       }
1094       Values.push_back(PaddedOp);
1095     }
1096     auto *NewElemTy = Values[0]->getType();
1097     if (NewElemTy == ElemTy)
1098       return constant;
1099     auto *NewArrayTy = llvm::ArrayType::get(NewElemTy, Size);
1100     return llvm::ConstantArray::get(NewArrayTy, Values);
1101   }
1102   // FIXME: Add handling for tail padding in vectors. Vectors don't
1103   // have padding between or inside elements, but the total amount of
1104   // data can be less than the allocated size.
1105   return constant;
1106 }
1107 
1108 Address CodeGenModule::createUnnamedGlobalFrom(const VarDecl &D,
1109                                                llvm::Constant *Constant,
1110                                                CharUnits Align) {
1111   auto FunctionName = [&](const DeclContext *DC) -> std::string {
1112     if (const auto *FD = dyn_cast<FunctionDecl>(DC)) {
1113       if (const auto *CC = dyn_cast<CXXConstructorDecl>(FD))
1114         return CC->getNameAsString();
1115       if (const auto *CD = dyn_cast<CXXDestructorDecl>(FD))
1116         return CD->getNameAsString();
1117       return std::string(getMangledName(FD));
1118     } else if (const auto *OM = dyn_cast<ObjCMethodDecl>(DC)) {
1119       return OM->getNameAsString();
1120     } else if (isa<BlockDecl>(DC)) {
1121       return "<block>";
1122     } else if (isa<CapturedDecl>(DC)) {
1123       return "<captured>";
1124     } else {
1125       llvm_unreachable("expected a function or method");
1126     }
1127   };
1128 
1129   // Form a simple per-variable cache of these values in case we find we
1130   // want to reuse them.
1131   llvm::GlobalVariable *&CacheEntry = InitializerConstants[&D];
1132   if (!CacheEntry || CacheEntry->getInitializer() != Constant) {
1133     auto *Ty = Constant->getType();
1134     bool isConstant = true;
1135     llvm::GlobalVariable *InsertBefore = nullptr;
1136     unsigned AS =
1137         getContext().getTargetAddressSpace(GetGlobalConstantAddressSpace());
1138     std::string Name;
1139     if (D.hasGlobalStorage())
1140       Name = getMangledName(&D).str() + ".const";
1141     else if (const DeclContext *DC = D.getParentFunctionOrMethod())
1142       Name = ("__const." + FunctionName(DC) + "." + D.getName()).str();
1143     else
1144       llvm_unreachable("local variable has no parent function or method");
1145     llvm::GlobalVariable *GV = new llvm::GlobalVariable(
1146         getModule(), Ty, isConstant, llvm::GlobalValue::PrivateLinkage,
1147         Constant, Name, InsertBefore, llvm::GlobalValue::NotThreadLocal, AS);
1148     GV->setAlignment(Align.getAsAlign());
1149     GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1150     CacheEntry = GV;
1151   } else if (CacheEntry->getAlignment() < uint64_t(Align.getQuantity())) {
1152     CacheEntry->setAlignment(Align.getAsAlign());
1153   }
1154 
1155   return Address(CacheEntry, Align);
1156 }
1157 
1158 static Address createUnnamedGlobalForMemcpyFrom(CodeGenModule &CGM,
1159                                                 const VarDecl &D,
1160                                                 CGBuilderTy &Builder,
1161                                                 llvm::Constant *Constant,
1162                                                 CharUnits Align) {
1163   Address SrcPtr = CGM.createUnnamedGlobalFrom(D, Constant, Align);
1164   llvm::Type *BP = llvm::PointerType::getInt8PtrTy(CGM.getLLVMContext(),
1165                                                    SrcPtr.getAddressSpace());
1166   if (SrcPtr.getType() != BP)
1167     SrcPtr = Builder.CreateBitCast(SrcPtr, BP);
1168   return SrcPtr;
1169 }
1170 
1171 static void emitStoresForConstant(CodeGenModule &CGM, const VarDecl &D,
1172                                   Address Loc, bool isVolatile,
1173                                   CGBuilderTy &Builder,
1174                                   llvm::Constant *constant, bool IsAutoInit) {
1175   auto *Ty = constant->getType();
1176   uint64_t ConstantSize = CGM.getDataLayout().getTypeAllocSize(Ty);
1177   if (!ConstantSize)
1178     return;
1179 
1180   bool canDoSingleStore = Ty->isIntOrIntVectorTy() ||
1181                           Ty->isPtrOrPtrVectorTy() || Ty->isFPOrFPVectorTy();
1182   if (canDoSingleStore) {
1183     auto *I = Builder.CreateStore(constant, Loc, isVolatile);
1184     if (IsAutoInit)
1185       I->addAnnotationMetadata("auto-init");
1186     return;
1187   }
1188 
1189   auto *SizeVal = llvm::ConstantInt::get(CGM.IntPtrTy, ConstantSize);
1190 
1191   // If the initializer is all or mostly the same, codegen with bzero / memset
1192   // then do a few stores afterward.
1193   if (shouldUseBZeroPlusStoresToInitialize(constant, ConstantSize)) {
1194     auto *I = Builder.CreateMemSet(Loc, llvm::ConstantInt::get(CGM.Int8Ty, 0),
1195                                    SizeVal, isVolatile);
1196     if (IsAutoInit)
1197       I->addAnnotationMetadata("auto-init");
1198 
1199     bool valueAlreadyCorrect =
1200         constant->isNullValue() || isa<llvm::UndefValue>(constant);
1201     if (!valueAlreadyCorrect) {
1202       Loc = Builder.CreateBitCast(Loc, Ty->getPointerTo(Loc.getAddressSpace()));
1203       emitStoresForInitAfterBZero(CGM, constant, Loc, isVolatile, Builder,
1204                                   IsAutoInit);
1205     }
1206     return;
1207   }
1208 
1209   // If the initializer is a repeated byte pattern, use memset.
1210   llvm::Value *Pattern =
1211       shouldUseMemSetToInitialize(constant, ConstantSize, CGM.getDataLayout());
1212   if (Pattern) {
1213     uint64_t Value = 0x00;
1214     if (!isa<llvm::UndefValue>(Pattern)) {
1215       const llvm::APInt &AP = cast<llvm::ConstantInt>(Pattern)->getValue();
1216       assert(AP.getBitWidth() <= 8);
1217       Value = AP.getLimitedValue();
1218     }
1219     auto *I = Builder.CreateMemSet(
1220         Loc, llvm::ConstantInt::get(CGM.Int8Ty, Value), SizeVal, isVolatile);
1221     if (IsAutoInit)
1222       I->addAnnotationMetadata("auto-init");
1223     return;
1224   }
1225 
1226   // If the initializer is small, use a handful of stores.
1227   if (shouldSplitConstantStore(CGM, ConstantSize)) {
1228     if (auto *STy = dyn_cast<llvm::StructType>(Ty)) {
1229       // FIXME: handle the case when STy != Loc.getElementType().
1230       if (STy == Loc.getElementType()) {
1231         for (unsigned i = 0; i != constant->getNumOperands(); i++) {
1232           Address EltPtr = Builder.CreateStructGEP(Loc, i);
1233           emitStoresForConstant(
1234               CGM, D, EltPtr, isVolatile, Builder,
1235               cast<llvm::Constant>(Builder.CreateExtractValue(constant, i)),
1236               IsAutoInit);
1237         }
1238         return;
1239       }
1240     } else if (auto *ATy = dyn_cast<llvm::ArrayType>(Ty)) {
1241       // FIXME: handle the case when ATy != Loc.getElementType().
1242       if (ATy == Loc.getElementType()) {
1243         for (unsigned i = 0; i != ATy->getNumElements(); i++) {
1244           Address EltPtr = Builder.CreateConstArrayGEP(Loc, i);
1245           emitStoresForConstant(
1246               CGM, D, EltPtr, isVolatile, Builder,
1247               cast<llvm::Constant>(Builder.CreateExtractValue(constant, i)),
1248               IsAutoInit);
1249         }
1250         return;
1251       }
1252     }
1253   }
1254 
1255   // Copy from a global.
1256   auto *I =
1257       Builder.CreateMemCpy(Loc,
1258                            createUnnamedGlobalForMemcpyFrom(
1259                                CGM, D, Builder, constant, Loc.getAlignment()),
1260                            SizeVal, isVolatile);
1261   if (IsAutoInit)
1262     I->addAnnotationMetadata("auto-init");
1263 }
1264 
1265 static void emitStoresForZeroInit(CodeGenModule &CGM, const VarDecl &D,
1266                                   Address Loc, bool isVolatile,
1267                                   CGBuilderTy &Builder) {
1268   llvm::Type *ElTy = Loc.getElementType();
1269   llvm::Constant *constant =
1270       constWithPadding(CGM, IsPattern::No, llvm::Constant::getNullValue(ElTy));
1271   emitStoresForConstant(CGM, D, Loc, isVolatile, Builder, constant,
1272                         /*IsAutoInit=*/true);
1273 }
1274 
1275 static void emitStoresForPatternInit(CodeGenModule &CGM, const VarDecl &D,
1276                                      Address Loc, bool isVolatile,
1277                                      CGBuilderTy &Builder) {
1278   llvm::Type *ElTy = Loc.getElementType();
1279   llvm::Constant *constant = constWithPadding(
1280       CGM, IsPattern::Yes, initializationPatternFor(CGM, ElTy));
1281   assert(!isa<llvm::UndefValue>(constant));
1282   emitStoresForConstant(CGM, D, Loc, isVolatile, Builder, constant,
1283                         /*IsAutoInit=*/true);
1284 }
1285 
1286 static bool containsUndef(llvm::Constant *constant) {
1287   auto *Ty = constant->getType();
1288   if (isa<llvm::UndefValue>(constant))
1289     return true;
1290   if (Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy())
1291     for (llvm::Use &Op : constant->operands())
1292       if (containsUndef(cast<llvm::Constant>(Op)))
1293         return true;
1294   return false;
1295 }
1296 
1297 static llvm::Constant *replaceUndef(CodeGenModule &CGM, IsPattern isPattern,
1298                                     llvm::Constant *constant) {
1299   auto *Ty = constant->getType();
1300   if (isa<llvm::UndefValue>(constant))
1301     return patternOrZeroFor(CGM, isPattern, Ty);
1302   if (!(Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy()))
1303     return constant;
1304   if (!containsUndef(constant))
1305     return constant;
1306   llvm::SmallVector<llvm::Constant *, 8> Values(constant->getNumOperands());
1307   for (unsigned Op = 0, NumOp = constant->getNumOperands(); Op != NumOp; ++Op) {
1308     auto *OpValue = cast<llvm::Constant>(constant->getOperand(Op));
1309     Values[Op] = replaceUndef(CGM, isPattern, OpValue);
1310   }
1311   if (Ty->isStructTy())
1312     return llvm::ConstantStruct::get(cast<llvm::StructType>(Ty), Values);
1313   if (Ty->isArrayTy())
1314     return llvm::ConstantArray::get(cast<llvm::ArrayType>(Ty), Values);
1315   assert(Ty->isVectorTy());
1316   return llvm::ConstantVector::get(Values);
1317 }
1318 
1319 /// EmitAutoVarDecl - Emit code and set up an entry in LocalDeclMap for a
1320 /// variable declaration with auto, register, or no storage class specifier.
1321 /// These turn into simple stack objects, or GlobalValues depending on target.
1322 void CodeGenFunction::EmitAutoVarDecl(const VarDecl &D) {
1323   AutoVarEmission emission = EmitAutoVarAlloca(D);
1324   EmitAutoVarInit(emission);
1325   EmitAutoVarCleanups(emission);
1326 }
1327 
1328 /// Emit a lifetime.begin marker if some criteria are satisfied.
1329 /// \return a pointer to the temporary size Value if a marker was emitted, null
1330 /// otherwise
1331 llvm::Value *CodeGenFunction::EmitLifetimeStart(llvm::TypeSize Size,
1332                                                 llvm::Value *Addr) {
1333   if (!ShouldEmitLifetimeMarkers)
1334     return nullptr;
1335 
1336   assert(Addr->getType()->getPointerAddressSpace() ==
1337              CGM.getDataLayout().getAllocaAddrSpace() &&
1338          "Pointer should be in alloca address space");
1339   llvm::Value *SizeV = llvm::ConstantInt::get(
1340       Int64Ty, Size.isScalable() ? -1 : Size.getFixedValue());
1341   Addr = Builder.CreateBitCast(Addr, AllocaInt8PtrTy);
1342   llvm::CallInst *C =
1343       Builder.CreateCall(CGM.getLLVMLifetimeStartFn(), {SizeV, Addr});
1344   C->setDoesNotThrow();
1345   return SizeV;
1346 }
1347 
1348 void CodeGenFunction::EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr) {
1349   assert(Addr->getType()->getPointerAddressSpace() ==
1350              CGM.getDataLayout().getAllocaAddrSpace() &&
1351          "Pointer should be in alloca address space");
1352   Addr = Builder.CreateBitCast(Addr, AllocaInt8PtrTy);
1353   llvm::CallInst *C =
1354       Builder.CreateCall(CGM.getLLVMLifetimeEndFn(), {Size, Addr});
1355   C->setDoesNotThrow();
1356 }
1357 
1358 void CodeGenFunction::EmitAndRegisterVariableArrayDimensions(
1359     CGDebugInfo *DI, const VarDecl &D, bool EmitDebugInfo) {
1360   // For each dimension stores its QualType and corresponding
1361   // size-expression Value.
1362   SmallVector<CodeGenFunction::VlaSizePair, 4> Dimensions;
1363   SmallVector<IdentifierInfo *, 4> VLAExprNames;
1364 
1365   // Break down the array into individual dimensions.
1366   QualType Type1D = D.getType();
1367   while (getContext().getAsVariableArrayType(Type1D)) {
1368     auto VlaSize = getVLAElements1D(Type1D);
1369     if (auto *C = dyn_cast<llvm::ConstantInt>(VlaSize.NumElts))
1370       Dimensions.emplace_back(C, Type1D.getUnqualifiedType());
1371     else {
1372       // Generate a locally unique name for the size expression.
1373       Twine Name = Twine("__vla_expr") + Twine(VLAExprCounter++);
1374       SmallString<12> Buffer;
1375       StringRef NameRef = Name.toStringRef(Buffer);
1376       auto &Ident = getContext().Idents.getOwn(NameRef);
1377       VLAExprNames.push_back(&Ident);
1378       auto SizeExprAddr =
1379           CreateDefaultAlignTempAlloca(VlaSize.NumElts->getType(), NameRef);
1380       Builder.CreateStore(VlaSize.NumElts, SizeExprAddr);
1381       Dimensions.emplace_back(SizeExprAddr.getPointer(),
1382                               Type1D.getUnqualifiedType());
1383     }
1384     Type1D = VlaSize.Type;
1385   }
1386 
1387   if (!EmitDebugInfo)
1388     return;
1389 
1390   // Register each dimension's size-expression with a DILocalVariable,
1391   // so that it can be used by CGDebugInfo when instantiating a DISubrange
1392   // to describe this array.
1393   unsigned NameIdx = 0;
1394   for (auto &VlaSize : Dimensions) {
1395     llvm::Metadata *MD;
1396     if (auto *C = dyn_cast<llvm::ConstantInt>(VlaSize.NumElts))
1397       MD = llvm::ConstantAsMetadata::get(C);
1398     else {
1399       // Create an artificial VarDecl to generate debug info for.
1400       IdentifierInfo *NameIdent = VLAExprNames[NameIdx++];
1401       auto VlaExprTy = VlaSize.NumElts->getType()->getPointerElementType();
1402       auto QT = getContext().getIntTypeForBitwidth(
1403           VlaExprTy->getScalarSizeInBits(), false);
1404       auto *ArtificialDecl = VarDecl::Create(
1405           getContext(), const_cast<DeclContext *>(D.getDeclContext()),
1406           D.getLocation(), D.getLocation(), NameIdent, QT,
1407           getContext().CreateTypeSourceInfo(QT), SC_Auto);
1408       ArtificialDecl->setImplicit();
1409 
1410       MD = DI->EmitDeclareOfAutoVariable(ArtificialDecl, VlaSize.NumElts,
1411                                          Builder);
1412     }
1413     assert(MD && "No Size expression debug node created");
1414     DI->registerVLASizeExpression(VlaSize.Type, MD);
1415   }
1416 }
1417 
1418 /// EmitAutoVarAlloca - Emit the alloca and debug information for a
1419 /// local variable.  Does not emit initialization or destruction.
1420 CodeGenFunction::AutoVarEmission
1421 CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
1422   QualType Ty = D.getType();
1423   assert(
1424       Ty.getAddressSpace() == LangAS::Default ||
1425       (Ty.getAddressSpace() == LangAS::opencl_private && getLangOpts().OpenCL));
1426 
1427   AutoVarEmission emission(D);
1428 
1429   bool isEscapingByRef = D.isEscapingByref();
1430   emission.IsEscapingByRef = isEscapingByRef;
1431 
1432   CharUnits alignment = getContext().getDeclAlign(&D);
1433 
1434   // If the type is variably-modified, emit all the VLA sizes for it.
1435   if (Ty->isVariablyModifiedType())
1436     EmitVariablyModifiedType(Ty);
1437 
1438   auto *DI = getDebugInfo();
1439   bool EmitDebugInfo = DI && CGM.getCodeGenOpts().hasReducedDebugInfo();
1440 
1441   Address address = Address::invalid();
1442   Address AllocaAddr = Address::invalid();
1443   Address OpenMPLocalAddr = Address::invalid();
1444   if (CGM.getLangOpts().OpenMPIRBuilder)
1445     OpenMPLocalAddr = OMPBuilderCBHelpers::getAddressOfLocalVariable(*this, &D);
1446   else
1447     OpenMPLocalAddr =
1448         getLangOpts().OpenMP
1449             ? CGM.getOpenMPRuntime().getAddressOfLocalVariable(*this, &D)
1450             : Address::invalid();
1451 
1452   bool NRVO = getLangOpts().ElideConstructors && D.isNRVOVariable();
1453 
1454   if (getLangOpts().OpenMP && OpenMPLocalAddr.isValid()) {
1455     address = OpenMPLocalAddr;
1456     AllocaAddr = OpenMPLocalAddr;
1457   } else if (Ty->isConstantSizeType()) {
1458     // If this value is an array or struct with a statically determinable
1459     // constant initializer, there are optimizations we can do.
1460     //
1461     // TODO: We should constant-evaluate the initializer of any variable,
1462     // as long as it is initialized by a constant expression. Currently,
1463     // isConstantInitializer produces wrong answers for structs with
1464     // reference or bitfield members, and a few other cases, and checking
1465     // for POD-ness protects us from some of these.
1466     if (D.getInit() && (Ty->isArrayType() || Ty->isRecordType()) &&
1467         (D.isConstexpr() ||
1468          ((Ty.isPODType(getContext()) ||
1469            getContext().getBaseElementType(Ty)->isObjCObjectPointerType()) &&
1470           D.getInit()->isConstantInitializer(getContext(), false)))) {
1471 
1472       // If the variable's a const type, and it's neither an NRVO
1473       // candidate nor a __block variable and has no mutable members,
1474       // emit it as a global instead.
1475       // Exception is if a variable is located in non-constant address space
1476       // in OpenCL.
1477       if ((!getLangOpts().OpenCL ||
1478            Ty.getAddressSpace() == LangAS::opencl_constant) &&
1479           (CGM.getCodeGenOpts().MergeAllConstants && !NRVO &&
1480            !isEscapingByRef && CGM.isTypeConstant(Ty, true))) {
1481         EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage);
1482 
1483         // Signal this condition to later callbacks.
1484         emission.Addr = Address::invalid();
1485         assert(emission.wasEmittedAsGlobal());
1486         return emission;
1487       }
1488 
1489       // Otherwise, tell the initialization code that we're in this case.
1490       emission.IsConstantAggregate = true;
1491     }
1492 
1493     // A normal fixed sized variable becomes an alloca in the entry block,
1494     // unless:
1495     // - it's an NRVO variable.
1496     // - we are compiling OpenMP and it's an OpenMP local variable.
1497     if (NRVO) {
1498       // The named return value optimization: allocate this variable in the
1499       // return slot, so that we can elide the copy when returning this
1500       // variable (C++0x [class.copy]p34).
1501       address = ReturnValue;
1502       AllocaAddr = ReturnValue;
1503 
1504       if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
1505         const auto *RD = RecordTy->getDecl();
1506         const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD);
1507         if ((CXXRD && !CXXRD->hasTrivialDestructor()) ||
1508             RD->isNonTrivialToPrimitiveDestroy()) {
1509           // Create a flag that is used to indicate when the NRVO was applied
1510           // to this variable. Set it to zero to indicate that NRVO was not
1511           // applied.
1512           llvm::Value *Zero = Builder.getFalse();
1513           Address NRVOFlag =
1514               CreateTempAlloca(Zero->getType(), CharUnits::One(), "nrvo",
1515                                /*ArraySize=*/nullptr, &AllocaAddr);
1516           EnsureInsertPoint();
1517           Builder.CreateStore(Zero, NRVOFlag);
1518 
1519           // Record the NRVO flag for this variable.
1520           NRVOFlags[&D] = NRVOFlag.getPointer();
1521           emission.NRVOFlag = NRVOFlag.getPointer();
1522         }
1523       }
1524     } else {
1525       CharUnits allocaAlignment;
1526       llvm::Type *allocaTy;
1527       if (isEscapingByRef) {
1528         auto &byrefInfo = getBlockByrefInfo(&D);
1529         allocaTy = byrefInfo.Type;
1530         allocaAlignment = byrefInfo.ByrefAlignment;
1531       } else {
1532         allocaTy = ConvertTypeForMem(Ty);
1533         allocaAlignment = alignment;
1534       }
1535 
1536       // Create the alloca.  Note that we set the name separately from
1537       // building the instruction so that it's there even in no-asserts
1538       // builds.
1539       address = CreateTempAlloca(allocaTy, allocaAlignment, D.getName(),
1540                                  /*ArraySize=*/nullptr, &AllocaAddr);
1541 
1542       // Don't emit lifetime markers for MSVC catch parameters. The lifetime of
1543       // the catch parameter starts in the catchpad instruction, and we can't
1544       // insert code in those basic blocks.
1545       bool IsMSCatchParam =
1546           D.isExceptionVariable() && getTarget().getCXXABI().isMicrosoft();
1547 
1548       // Emit a lifetime intrinsic if meaningful. There's no point in doing this
1549       // if we don't have a valid insertion point (?).
1550       if (HaveInsertPoint() && !IsMSCatchParam) {
1551         // If there's a jump into the lifetime of this variable, its lifetime
1552         // gets broken up into several regions in IR, which requires more work
1553         // to handle correctly. For now, just omit the intrinsics; this is a
1554         // rare case, and it's better to just be conservatively correct.
1555         // PR28267.
1556         //
1557         // We have to do this in all language modes if there's a jump past the
1558         // declaration. We also have to do it in C if there's a jump to an
1559         // earlier point in the current block because non-VLA lifetimes begin as
1560         // soon as the containing block is entered, not when its variables
1561         // actually come into scope; suppressing the lifetime annotations
1562         // completely in this case is unnecessarily pessimistic, but again, this
1563         // is rare.
1564         if (!Bypasses.IsBypassed(&D) &&
1565             !(!getLangOpts().CPlusPlus && hasLabelBeenSeenInCurrentScope())) {
1566           llvm::TypeSize Size = CGM.getDataLayout().getTypeAllocSize(allocaTy);
1567           emission.SizeForLifetimeMarkers =
1568               EmitLifetimeStart(Size, AllocaAddr.getPointer());
1569         }
1570       } else {
1571         assert(!emission.useLifetimeMarkers());
1572       }
1573     }
1574   } else {
1575     EnsureInsertPoint();
1576 
1577     if (!DidCallStackSave) {
1578       // Save the stack.
1579       Address Stack =
1580         CreateTempAlloca(Int8PtrTy, getPointerAlign(), "saved_stack");
1581 
1582       llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave);
1583       llvm::Value *V = Builder.CreateCall(F);
1584       Builder.CreateStore(V, Stack);
1585 
1586       DidCallStackSave = true;
1587 
1588       // Push a cleanup block and restore the stack there.
1589       // FIXME: in general circumstances, this should be an EH cleanup.
1590       pushStackRestore(NormalCleanup, Stack);
1591     }
1592 
1593     auto VlaSize = getVLASize(Ty);
1594     llvm::Type *llvmTy = ConvertTypeForMem(VlaSize.Type);
1595 
1596     // Allocate memory for the array.
1597     address = CreateTempAlloca(llvmTy, alignment, "vla", VlaSize.NumElts,
1598                                &AllocaAddr);
1599 
1600     // If we have debug info enabled, properly describe the VLA dimensions for
1601     // this type by registering the vla size expression for each of the
1602     // dimensions.
1603     EmitAndRegisterVariableArrayDimensions(DI, D, EmitDebugInfo);
1604   }
1605 
1606   setAddrOfLocalVar(&D, address);
1607   emission.Addr = address;
1608   emission.AllocaAddr = AllocaAddr;
1609 
1610   // Emit debug info for local var declaration.
1611   if (EmitDebugInfo && HaveInsertPoint()) {
1612     Address DebugAddr = address;
1613     bool UsePointerValue = NRVO && ReturnValuePointer.isValid();
1614     DI->setLocation(D.getLocation());
1615 
1616     // If NRVO, use a pointer to the return address.
1617     if (UsePointerValue) {
1618       DebugAddr = ReturnValuePointer;
1619       AllocaAddr = ReturnValuePointer;
1620     }
1621     (void)DI->EmitDeclareOfAutoVariable(&D, AllocaAddr.getPointer(), Builder,
1622                                         UsePointerValue);
1623   }
1624 
1625   if (D.hasAttr<AnnotateAttr>() && HaveInsertPoint())
1626     EmitVarAnnotations(&D, address.getPointer());
1627 
1628   // Make sure we call @llvm.lifetime.end.
1629   if (emission.useLifetimeMarkers())
1630     EHStack.pushCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker,
1631                                          emission.getOriginalAllocatedAddress(),
1632                                          emission.getSizeForLifetimeMarkers());
1633 
1634   return emission;
1635 }
1636 
1637 static bool isCapturedBy(const VarDecl &, const Expr *);
1638 
1639 /// Determines whether the given __block variable is potentially
1640 /// captured by the given statement.
1641 static bool isCapturedBy(const VarDecl &Var, const Stmt *S) {
1642   if (const Expr *E = dyn_cast<Expr>(S))
1643     return isCapturedBy(Var, E);
1644   for (const Stmt *SubStmt : S->children())
1645     if (isCapturedBy(Var, SubStmt))
1646       return true;
1647   return false;
1648 }
1649 
1650 /// Determines whether the given __block variable is potentially
1651 /// captured by the given expression.
1652 static bool isCapturedBy(const VarDecl &Var, const Expr *E) {
1653   // Skip the most common kinds of expressions that make
1654   // hierarchy-walking expensive.
1655   E = E->IgnoreParenCasts();
1656 
1657   if (const BlockExpr *BE = dyn_cast<BlockExpr>(E)) {
1658     const BlockDecl *Block = BE->getBlockDecl();
1659     for (const auto &I : Block->captures()) {
1660       if (I.getVariable() == &Var)
1661         return true;
1662     }
1663 
1664     // No need to walk into the subexpressions.
1665     return false;
1666   }
1667 
1668   if (const StmtExpr *SE = dyn_cast<StmtExpr>(E)) {
1669     const CompoundStmt *CS = SE->getSubStmt();
1670     for (const auto *BI : CS->body())
1671       if (const auto *BIE = dyn_cast<Expr>(BI)) {
1672         if (isCapturedBy(Var, BIE))
1673           return true;
1674       }
1675       else if (const auto *DS = dyn_cast<DeclStmt>(BI)) {
1676           // special case declarations
1677           for (const auto *I : DS->decls()) {
1678               if (const auto *VD = dyn_cast<VarDecl>((I))) {
1679                 const Expr *Init = VD->getInit();
1680                 if (Init && isCapturedBy(Var, Init))
1681                   return true;
1682               }
1683           }
1684       }
1685       else
1686         // FIXME. Make safe assumption assuming arbitrary statements cause capturing.
1687         // Later, provide code to poke into statements for capture analysis.
1688         return true;
1689     return false;
1690   }
1691 
1692   for (const Stmt *SubStmt : E->children())
1693     if (isCapturedBy(Var, SubStmt))
1694       return true;
1695 
1696   return false;
1697 }
1698 
1699 /// Determine whether the given initializer is trivial in the sense
1700 /// that it requires no code to be generated.
1701 bool CodeGenFunction::isTrivialInitializer(const Expr *Init) {
1702   if (!Init)
1703     return true;
1704 
1705   if (const CXXConstructExpr *Construct = dyn_cast<CXXConstructExpr>(Init))
1706     if (CXXConstructorDecl *Constructor = Construct->getConstructor())
1707       if (Constructor->isTrivial() &&
1708           Constructor->isDefaultConstructor() &&
1709           !Construct->requiresZeroInitialization())
1710         return true;
1711 
1712   return false;
1713 }
1714 
1715 void CodeGenFunction::emitZeroOrPatternForAutoVarInit(QualType type,
1716                                                       const VarDecl &D,
1717                                                       Address Loc) {
1718   auto trivialAutoVarInit = getContext().getLangOpts().getTrivialAutoVarInit();
1719   CharUnits Size = getContext().getTypeSizeInChars(type);
1720   bool isVolatile = type.isVolatileQualified();
1721   if (!Size.isZero()) {
1722     switch (trivialAutoVarInit) {
1723     case LangOptions::TrivialAutoVarInitKind::Uninitialized:
1724       llvm_unreachable("Uninitialized handled by caller");
1725     case LangOptions::TrivialAutoVarInitKind::Zero:
1726       if (CGM.stopAutoInit())
1727         return;
1728       emitStoresForZeroInit(CGM, D, Loc, isVolatile, Builder);
1729       break;
1730     case LangOptions::TrivialAutoVarInitKind::Pattern:
1731       if (CGM.stopAutoInit())
1732         return;
1733       emitStoresForPatternInit(CGM, D, Loc, isVolatile, Builder);
1734       break;
1735     }
1736     return;
1737   }
1738 
1739   // VLAs look zero-sized to getTypeInfo. We can't emit constant stores to
1740   // them, so emit a memcpy with the VLA size to initialize each element.
1741   // Technically zero-sized or negative-sized VLAs are undefined, and UBSan
1742   // will catch that code, but there exists code which generates zero-sized
1743   // VLAs. Be nice and initialize whatever they requested.
1744   const auto *VlaType = getContext().getAsVariableArrayType(type);
1745   if (!VlaType)
1746     return;
1747   auto VlaSize = getVLASize(VlaType);
1748   auto SizeVal = VlaSize.NumElts;
1749   CharUnits EltSize = getContext().getTypeSizeInChars(VlaSize.Type);
1750   switch (trivialAutoVarInit) {
1751   case LangOptions::TrivialAutoVarInitKind::Uninitialized:
1752     llvm_unreachable("Uninitialized handled by caller");
1753 
1754   case LangOptions::TrivialAutoVarInitKind::Zero: {
1755     if (CGM.stopAutoInit())
1756       return;
1757     if (!EltSize.isOne())
1758       SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(EltSize));
1759     auto *I = Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, 0),
1760                                    SizeVal, isVolatile);
1761     I->addAnnotationMetadata("auto-init");
1762     break;
1763   }
1764 
1765   case LangOptions::TrivialAutoVarInitKind::Pattern: {
1766     if (CGM.stopAutoInit())
1767       return;
1768     llvm::Type *ElTy = Loc.getElementType();
1769     llvm::Constant *Constant = constWithPadding(
1770         CGM, IsPattern::Yes, initializationPatternFor(CGM, ElTy));
1771     CharUnits ConstantAlign = getContext().getTypeAlignInChars(VlaSize.Type);
1772     llvm::BasicBlock *SetupBB = createBasicBlock("vla-setup.loop");
1773     llvm::BasicBlock *LoopBB = createBasicBlock("vla-init.loop");
1774     llvm::BasicBlock *ContBB = createBasicBlock("vla-init.cont");
1775     llvm::Value *IsZeroSizedVLA = Builder.CreateICmpEQ(
1776         SizeVal, llvm::ConstantInt::get(SizeVal->getType(), 0),
1777         "vla.iszerosized");
1778     Builder.CreateCondBr(IsZeroSizedVLA, ContBB, SetupBB);
1779     EmitBlock(SetupBB);
1780     if (!EltSize.isOne())
1781       SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(EltSize));
1782     llvm::Value *BaseSizeInChars =
1783         llvm::ConstantInt::get(IntPtrTy, EltSize.getQuantity());
1784     Address Begin = Builder.CreateElementBitCast(Loc, Int8Ty, "vla.begin");
1785     llvm::Value *End = Builder.CreateInBoundsGEP(
1786         Begin.getElementType(), Begin.getPointer(), SizeVal, "vla.end");
1787     llvm::BasicBlock *OriginBB = Builder.GetInsertBlock();
1788     EmitBlock(LoopBB);
1789     llvm::PHINode *Cur = Builder.CreatePHI(Begin.getType(), 2, "vla.cur");
1790     Cur->addIncoming(Begin.getPointer(), OriginBB);
1791     CharUnits CurAlign = Loc.getAlignment().alignmentOfArrayElement(EltSize);
1792     auto *I =
1793         Builder.CreateMemCpy(Address(Cur, CurAlign),
1794                              createUnnamedGlobalForMemcpyFrom(
1795                                  CGM, D, Builder, Constant, ConstantAlign),
1796                              BaseSizeInChars, isVolatile);
1797     I->addAnnotationMetadata("auto-init");
1798     llvm::Value *Next =
1799         Builder.CreateInBoundsGEP(Int8Ty, Cur, BaseSizeInChars, "vla.next");
1800     llvm::Value *Done = Builder.CreateICmpEQ(Next, End, "vla-init.isdone");
1801     Builder.CreateCondBr(Done, ContBB, LoopBB);
1802     Cur->addIncoming(Next, LoopBB);
1803     EmitBlock(ContBB);
1804   } break;
1805   }
1806 }
1807 
1808 void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
1809   assert(emission.Variable && "emission was not valid!");
1810 
1811   // If this was emitted as a global constant, we're done.
1812   if (emission.wasEmittedAsGlobal()) return;
1813 
1814   const VarDecl &D = *emission.Variable;
1815   auto DL = ApplyDebugLocation::CreateDefaultArtificial(*this, D.getLocation());
1816   QualType type = D.getType();
1817 
1818   // If this local has an initializer, emit it now.
1819   const Expr *Init = D.getInit();
1820 
1821   // If we are at an unreachable point, we don't need to emit the initializer
1822   // unless it contains a label.
1823   if (!HaveInsertPoint()) {
1824     if (!Init || !ContainsLabel(Init)) return;
1825     EnsureInsertPoint();
1826   }
1827 
1828   // Initialize the structure of a __block variable.
1829   if (emission.IsEscapingByRef)
1830     emitByrefStructureInit(emission);
1831 
1832   // Initialize the variable here if it doesn't have a initializer and it is a
1833   // C struct that is non-trivial to initialize or an array containing such a
1834   // struct.
1835   if (!Init &&
1836       type.isNonTrivialToPrimitiveDefaultInitialize() ==
1837           QualType::PDIK_Struct) {
1838     LValue Dst = MakeAddrLValue(emission.getAllocatedAddress(), type);
1839     if (emission.IsEscapingByRef)
1840       drillIntoBlockVariable(*this, Dst, &D);
1841     defaultInitNonTrivialCStructVar(Dst);
1842     return;
1843   }
1844 
1845   // Check whether this is a byref variable that's potentially
1846   // captured and moved by its own initializer.  If so, we'll need to
1847   // emit the initializer first, then copy into the variable.
1848   bool capturedByInit =
1849       Init && emission.IsEscapingByRef && isCapturedBy(D, Init);
1850 
1851   bool locIsByrefHeader = !capturedByInit;
1852   const Address Loc =
1853       locIsByrefHeader ? emission.getObjectAddress(*this) : emission.Addr;
1854 
1855   // Note: constexpr already initializes everything correctly.
1856   LangOptions::TrivialAutoVarInitKind trivialAutoVarInit =
1857       (D.isConstexpr()
1858            ? LangOptions::TrivialAutoVarInitKind::Uninitialized
1859            : (D.getAttr<UninitializedAttr>()
1860                   ? LangOptions::TrivialAutoVarInitKind::Uninitialized
1861                   : getContext().getLangOpts().getTrivialAutoVarInit()));
1862 
1863   auto initializeWhatIsTechnicallyUninitialized = [&](Address Loc) {
1864     if (trivialAutoVarInit ==
1865         LangOptions::TrivialAutoVarInitKind::Uninitialized)
1866       return;
1867 
1868     // Only initialize a __block's storage: we always initialize the header.
1869     if (emission.IsEscapingByRef && !locIsByrefHeader)
1870       Loc = emitBlockByrefAddress(Loc, &D, /*follow=*/false);
1871 
1872     return emitZeroOrPatternForAutoVarInit(type, D, Loc);
1873   };
1874 
1875   if (isTrivialInitializer(Init))
1876     return initializeWhatIsTechnicallyUninitialized(Loc);
1877 
1878   llvm::Constant *constant = nullptr;
1879   if (emission.IsConstantAggregate ||
1880       D.mightBeUsableInConstantExpressions(getContext())) {
1881     assert(!capturedByInit && "constant init contains a capturing block?");
1882     constant = ConstantEmitter(*this).tryEmitAbstractForInitializer(D);
1883     if (constant && !constant->isZeroValue() &&
1884         (trivialAutoVarInit !=
1885          LangOptions::TrivialAutoVarInitKind::Uninitialized)) {
1886       IsPattern isPattern =
1887           (trivialAutoVarInit == LangOptions::TrivialAutoVarInitKind::Pattern)
1888               ? IsPattern::Yes
1889               : IsPattern::No;
1890       // C guarantees that brace-init with fewer initializers than members in
1891       // the aggregate will initialize the rest of the aggregate as-if it were
1892       // static initialization. In turn static initialization guarantees that
1893       // padding is initialized to zero bits. We could instead pattern-init if D
1894       // has any ImplicitValueInitExpr, but that seems to be unintuitive
1895       // behavior.
1896       constant = constWithPadding(CGM, IsPattern::No,
1897                                   replaceUndef(CGM, isPattern, constant));
1898     }
1899   }
1900 
1901   if (!constant) {
1902     initializeWhatIsTechnicallyUninitialized(Loc);
1903     LValue lv = MakeAddrLValue(Loc, type);
1904     lv.setNonGC(true);
1905     return EmitExprAsInit(Init, &D, lv, capturedByInit);
1906   }
1907 
1908   if (!emission.IsConstantAggregate) {
1909     // For simple scalar/complex initialization, store the value directly.
1910     LValue lv = MakeAddrLValue(Loc, type);
1911     lv.setNonGC(true);
1912     return EmitStoreThroughLValue(RValue::get(constant), lv, true);
1913   }
1914 
1915   llvm::Type *BP = CGM.Int8Ty->getPointerTo(Loc.getAddressSpace());
1916   emitStoresForConstant(
1917       CGM, D, (Loc.getType() == BP) ? Loc : Builder.CreateBitCast(Loc, BP),
1918       type.isVolatileQualified(), Builder, constant, /*IsAutoInit=*/false);
1919 }
1920 
1921 /// Emit an expression as an initializer for an object (variable, field, etc.)
1922 /// at the given location.  The expression is not necessarily the normal
1923 /// initializer for the object, and the address is not necessarily
1924 /// its normal location.
1925 ///
1926 /// \param init the initializing expression
1927 /// \param D the object to act as if we're initializing
1928 /// \param lvalue the lvalue to initialize
1929 /// \param capturedByInit true if \p D is a __block variable
1930 ///   whose address is potentially changed by the initializer
1931 void CodeGenFunction::EmitExprAsInit(const Expr *init, const ValueDecl *D,
1932                                      LValue lvalue, bool capturedByInit) {
1933   QualType type = D->getType();
1934 
1935   if (type->isReferenceType()) {
1936     RValue rvalue = EmitReferenceBindingToExpr(init);
1937     if (capturedByInit)
1938       drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
1939     EmitStoreThroughLValue(rvalue, lvalue, true);
1940     return;
1941   }
1942   switch (getEvaluationKind(type)) {
1943   case TEK_Scalar:
1944     EmitScalarInit(init, D, lvalue, capturedByInit);
1945     return;
1946   case TEK_Complex: {
1947     ComplexPairTy complex = EmitComplexExpr(init);
1948     if (capturedByInit)
1949       drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
1950     EmitStoreOfComplex(complex, lvalue, /*init*/ true);
1951     return;
1952   }
1953   case TEK_Aggregate:
1954     if (type->isAtomicType()) {
1955       EmitAtomicInit(const_cast<Expr*>(init), lvalue);
1956     } else {
1957       AggValueSlot::Overlap_t Overlap = AggValueSlot::MayOverlap;
1958       if (isa<VarDecl>(D))
1959         Overlap = AggValueSlot::DoesNotOverlap;
1960       else if (auto *FD = dyn_cast<FieldDecl>(D))
1961         Overlap = getOverlapForFieldInit(FD);
1962       // TODO: how can we delay here if D is captured by its initializer?
1963       EmitAggExpr(init, AggValueSlot::forLValue(
1964                             lvalue, *this, AggValueSlot::IsDestructed,
1965                             AggValueSlot::DoesNotNeedGCBarriers,
1966                             AggValueSlot::IsNotAliased, Overlap));
1967     }
1968     return;
1969   }
1970   llvm_unreachable("bad evaluation kind");
1971 }
1972 
1973 /// Enter a destroy cleanup for the given local variable.
1974 void CodeGenFunction::emitAutoVarTypeCleanup(
1975                             const CodeGenFunction::AutoVarEmission &emission,
1976                             QualType::DestructionKind dtorKind) {
1977   assert(dtorKind != QualType::DK_none);
1978 
1979   // Note that for __block variables, we want to destroy the
1980   // original stack object, not the possibly forwarded object.
1981   Address addr = emission.getObjectAddress(*this);
1982 
1983   const VarDecl *var = emission.Variable;
1984   QualType type = var->getType();
1985 
1986   CleanupKind cleanupKind = NormalAndEHCleanup;
1987   CodeGenFunction::Destroyer *destroyer = nullptr;
1988 
1989   switch (dtorKind) {
1990   case QualType::DK_none:
1991     llvm_unreachable("no cleanup for trivially-destructible variable");
1992 
1993   case QualType::DK_cxx_destructor:
1994     // If there's an NRVO flag on the emission, we need a different
1995     // cleanup.
1996     if (emission.NRVOFlag) {
1997       assert(!type->isArrayType());
1998       CXXDestructorDecl *dtor = type->getAsCXXRecordDecl()->getDestructor();
1999       EHStack.pushCleanup<DestroyNRVOVariableCXX>(cleanupKind, addr, type, dtor,
2000                                                   emission.NRVOFlag);
2001       return;
2002     }
2003     break;
2004 
2005   case QualType::DK_objc_strong_lifetime:
2006     // Suppress cleanups for pseudo-strong variables.
2007     if (var->isARCPseudoStrong()) return;
2008 
2009     // Otherwise, consider whether to use an EH cleanup or not.
2010     cleanupKind = getARCCleanupKind();
2011 
2012     // Use the imprecise destroyer by default.
2013     if (!var->hasAttr<ObjCPreciseLifetimeAttr>())
2014       destroyer = CodeGenFunction::destroyARCStrongImprecise;
2015     break;
2016 
2017   case QualType::DK_objc_weak_lifetime:
2018     break;
2019 
2020   case QualType::DK_nontrivial_c_struct:
2021     destroyer = CodeGenFunction::destroyNonTrivialCStruct;
2022     if (emission.NRVOFlag) {
2023       assert(!type->isArrayType());
2024       EHStack.pushCleanup<DestroyNRVOVariableC>(cleanupKind, addr,
2025                                                 emission.NRVOFlag, type);
2026       return;
2027     }
2028     break;
2029   }
2030 
2031   // If we haven't chosen a more specific destroyer, use the default.
2032   if (!destroyer) destroyer = getDestroyer(dtorKind);
2033 
2034   // Use an EH cleanup in array destructors iff the destructor itself
2035   // is being pushed as an EH cleanup.
2036   bool useEHCleanup = (cleanupKind & EHCleanup);
2037   EHStack.pushCleanup<DestroyObject>(cleanupKind, addr, type, destroyer,
2038                                      useEHCleanup);
2039 }
2040 
2041 void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) {
2042   assert(emission.Variable && "emission was not valid!");
2043 
2044   // If this was emitted as a global constant, we're done.
2045   if (emission.wasEmittedAsGlobal()) return;
2046 
2047   // If we don't have an insertion point, we're done.  Sema prevents
2048   // us from jumping into any of these scopes anyway.
2049   if (!HaveInsertPoint()) return;
2050 
2051   const VarDecl &D = *emission.Variable;
2052 
2053   // Check the type for a cleanup.
2054   if (QualType::DestructionKind dtorKind = D.needsDestruction(getContext()))
2055     emitAutoVarTypeCleanup(emission, dtorKind);
2056 
2057   // In GC mode, honor objc_precise_lifetime.
2058   if (getLangOpts().getGC() != LangOptions::NonGC &&
2059       D.hasAttr<ObjCPreciseLifetimeAttr>()) {
2060     EHStack.pushCleanup<ExtendGCLifetime>(NormalCleanup, &D);
2061   }
2062 
2063   // Handle the cleanup attribute.
2064   if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) {
2065     const FunctionDecl *FD = CA->getFunctionDecl();
2066 
2067     llvm::Constant *F = CGM.GetAddrOfFunction(FD);
2068     assert(F && "Could not find function!");
2069 
2070     const CGFunctionInfo &Info = CGM.getTypes().arrangeFunctionDeclaration(FD);
2071     EHStack.pushCleanup<CallCleanupFunction>(NormalAndEHCleanup, F, &Info, &D);
2072   }
2073 
2074   // If this is a block variable, call _Block_object_destroy
2075   // (on the unforwarded address). Don't enter this cleanup if we're in pure-GC
2076   // mode.
2077   if (emission.IsEscapingByRef &&
2078       CGM.getLangOpts().getGC() != LangOptions::GCOnly) {
2079     BlockFieldFlags Flags = BLOCK_FIELD_IS_BYREF;
2080     if (emission.Variable->getType().isObjCGCWeak())
2081       Flags |= BLOCK_FIELD_IS_WEAK;
2082     enterByrefCleanup(NormalAndEHCleanup, emission.Addr, Flags,
2083                       /*LoadBlockVarAddr*/ false,
2084                       cxxDestructorCanThrow(emission.Variable->getType()));
2085   }
2086 }
2087 
2088 CodeGenFunction::Destroyer *
2089 CodeGenFunction::getDestroyer(QualType::DestructionKind kind) {
2090   switch (kind) {
2091   case QualType::DK_none: llvm_unreachable("no destroyer for trivial dtor");
2092   case QualType::DK_cxx_destructor:
2093     return destroyCXXObject;
2094   case QualType::DK_objc_strong_lifetime:
2095     return destroyARCStrongPrecise;
2096   case QualType::DK_objc_weak_lifetime:
2097     return destroyARCWeak;
2098   case QualType::DK_nontrivial_c_struct:
2099     return destroyNonTrivialCStruct;
2100   }
2101   llvm_unreachable("Unknown DestructionKind");
2102 }
2103 
2104 /// pushEHDestroy - Push the standard destructor for the given type as
2105 /// an EH-only cleanup.
2106 void CodeGenFunction::pushEHDestroy(QualType::DestructionKind dtorKind,
2107                                     Address addr, QualType type) {
2108   assert(dtorKind && "cannot push destructor for trivial type");
2109   assert(needsEHCleanup(dtorKind));
2110 
2111   pushDestroy(EHCleanup, addr, type, getDestroyer(dtorKind), true);
2112 }
2113 
2114 /// pushDestroy - Push the standard destructor for the given type as
2115 /// at least a normal cleanup.
2116 void CodeGenFunction::pushDestroy(QualType::DestructionKind dtorKind,
2117                                   Address addr, QualType type) {
2118   assert(dtorKind && "cannot push destructor for trivial type");
2119 
2120   CleanupKind cleanupKind = getCleanupKind(dtorKind);
2121   pushDestroy(cleanupKind, addr, type, getDestroyer(dtorKind),
2122               cleanupKind & EHCleanup);
2123 }
2124 
2125 void CodeGenFunction::pushDestroy(CleanupKind cleanupKind, Address addr,
2126                                   QualType type, Destroyer *destroyer,
2127                                   bool useEHCleanupForArray) {
2128   pushFullExprCleanup<DestroyObject>(cleanupKind, addr, type,
2129                                      destroyer, useEHCleanupForArray);
2130 }
2131 
2132 void CodeGenFunction::pushStackRestore(CleanupKind Kind, Address SPMem) {
2133   EHStack.pushCleanup<CallStackRestore>(Kind, SPMem);
2134 }
2135 
2136 void CodeGenFunction::pushLifetimeExtendedDestroy(CleanupKind cleanupKind,
2137                                                   Address addr, QualType type,
2138                                                   Destroyer *destroyer,
2139                                                   bool useEHCleanupForArray) {
2140   // If we're not in a conditional branch, we don't need to bother generating a
2141   // conditional cleanup.
2142   if (!isInConditionalBranch()) {
2143     // Push an EH-only cleanup for the object now.
2144     // FIXME: When popping normal cleanups, we need to keep this EH cleanup
2145     // around in case a temporary's destructor throws an exception.
2146     if (cleanupKind & EHCleanup)
2147       EHStack.pushCleanup<DestroyObject>(
2148           static_cast<CleanupKind>(cleanupKind & ~NormalCleanup), addr, type,
2149           destroyer, useEHCleanupForArray);
2150 
2151     return pushCleanupAfterFullExprWithActiveFlag<DestroyObject>(
2152         cleanupKind, Address::invalid(), addr, type, destroyer, useEHCleanupForArray);
2153   }
2154 
2155   // Otherwise, we should only destroy the object if it's been initialized.
2156   // Re-use the active flag and saved address across both the EH and end of
2157   // scope cleanups.
2158 
2159   using SavedType = typename DominatingValue<Address>::saved_type;
2160   using ConditionalCleanupType =
2161       EHScopeStack::ConditionalCleanup<DestroyObject, Address, QualType,
2162                                        Destroyer *, bool>;
2163 
2164   Address ActiveFlag = createCleanupActiveFlag();
2165   SavedType SavedAddr = saveValueInCond(addr);
2166 
2167   if (cleanupKind & EHCleanup) {
2168     EHStack.pushCleanup<ConditionalCleanupType>(
2169         static_cast<CleanupKind>(cleanupKind & ~NormalCleanup), SavedAddr, type,
2170         destroyer, useEHCleanupForArray);
2171     initFullExprCleanupWithFlag(ActiveFlag);
2172   }
2173 
2174   pushCleanupAfterFullExprWithActiveFlag<ConditionalCleanupType>(
2175       cleanupKind, ActiveFlag, SavedAddr, type, destroyer,
2176       useEHCleanupForArray);
2177 }
2178 
2179 /// emitDestroy - Immediately perform the destruction of the given
2180 /// object.
2181 ///
2182 /// \param addr - the address of the object; a type*
2183 /// \param type - the type of the object; if an array type, all
2184 ///   objects are destroyed in reverse order
2185 /// \param destroyer - the function to call to destroy individual
2186 ///   elements
2187 /// \param useEHCleanupForArray - whether an EH cleanup should be
2188 ///   used when destroying array elements, in case one of the
2189 ///   destructions throws an exception
2190 void CodeGenFunction::emitDestroy(Address addr, QualType type,
2191                                   Destroyer *destroyer,
2192                                   bool useEHCleanupForArray) {
2193   const ArrayType *arrayType = getContext().getAsArrayType(type);
2194   if (!arrayType)
2195     return destroyer(*this, addr, type);
2196 
2197   llvm::Value *length = emitArrayLength(arrayType, type, addr);
2198 
2199   CharUnits elementAlign =
2200     addr.getAlignment()
2201         .alignmentOfArrayElement(getContext().getTypeSizeInChars(type));
2202 
2203   // Normally we have to check whether the array is zero-length.
2204   bool checkZeroLength = true;
2205 
2206   // But if the array length is constant, we can suppress that.
2207   if (llvm::ConstantInt *constLength = dyn_cast<llvm::ConstantInt>(length)) {
2208     // ...and if it's constant zero, we can just skip the entire thing.
2209     if (constLength->isZero()) return;
2210     checkZeroLength = false;
2211   }
2212 
2213   llvm::Value *begin = addr.getPointer();
2214   llvm::Value *end =
2215       Builder.CreateInBoundsGEP(addr.getElementType(), begin, length);
2216   emitArrayDestroy(begin, end, type, elementAlign, destroyer,
2217                    checkZeroLength, useEHCleanupForArray);
2218 }
2219 
2220 /// emitArrayDestroy - Destroys all the elements of the given array,
2221 /// beginning from last to first.  The array cannot be zero-length.
2222 ///
2223 /// \param begin - a type* denoting the first element of the array
2224 /// \param end - a type* denoting one past the end of the array
2225 /// \param elementType - the element type of the array
2226 /// \param destroyer - the function to call to destroy elements
2227 /// \param useEHCleanup - whether to push an EH cleanup to destroy
2228 ///   the remaining elements in case the destruction of a single
2229 ///   element throws
2230 void CodeGenFunction::emitArrayDestroy(llvm::Value *begin,
2231                                        llvm::Value *end,
2232                                        QualType elementType,
2233                                        CharUnits elementAlign,
2234                                        Destroyer *destroyer,
2235                                        bool checkZeroLength,
2236                                        bool useEHCleanup) {
2237   assert(!elementType->isArrayType());
2238 
2239   // The basic structure here is a do-while loop, because we don't
2240   // need to check for the zero-element case.
2241   llvm::BasicBlock *bodyBB = createBasicBlock("arraydestroy.body");
2242   llvm::BasicBlock *doneBB = createBasicBlock("arraydestroy.done");
2243 
2244   if (checkZeroLength) {
2245     llvm::Value *isEmpty = Builder.CreateICmpEQ(begin, end,
2246                                                 "arraydestroy.isempty");
2247     Builder.CreateCondBr(isEmpty, doneBB, bodyBB);
2248   }
2249 
2250   // Enter the loop body, making that address the current address.
2251   llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
2252   EmitBlock(bodyBB);
2253   llvm::PHINode *elementPast =
2254     Builder.CreatePHI(begin->getType(), 2, "arraydestroy.elementPast");
2255   elementPast->addIncoming(end, entryBB);
2256 
2257   // Shift the address back by one element.
2258   llvm::Value *negativeOne = llvm::ConstantInt::get(SizeTy, -1, true);
2259   llvm::Value *element = Builder.CreateInBoundsGEP(
2260       elementPast->getType()->getPointerElementType(), elementPast, negativeOne,
2261       "arraydestroy.element");
2262 
2263   if (useEHCleanup)
2264     pushRegularPartialArrayCleanup(begin, element, elementType, elementAlign,
2265                                    destroyer);
2266 
2267   // Perform the actual destruction there.
2268   destroyer(*this, Address(element, elementAlign), elementType);
2269 
2270   if (useEHCleanup)
2271     PopCleanupBlock();
2272 
2273   // Check whether we've reached the end.
2274   llvm::Value *done = Builder.CreateICmpEQ(element, begin, "arraydestroy.done");
2275   Builder.CreateCondBr(done, doneBB, bodyBB);
2276   elementPast->addIncoming(element, Builder.GetInsertBlock());
2277 
2278   // Done.
2279   EmitBlock(doneBB);
2280 }
2281 
2282 /// Perform partial array destruction as if in an EH cleanup.  Unlike
2283 /// emitArrayDestroy, the element type here may still be an array type.
2284 static void emitPartialArrayDestroy(CodeGenFunction &CGF,
2285                                     llvm::Value *begin, llvm::Value *end,
2286                                     QualType type, CharUnits elementAlign,
2287                                     CodeGenFunction::Destroyer *destroyer) {
2288   // If the element type is itself an array, drill down.
2289   unsigned arrayDepth = 0;
2290   while (const ArrayType *arrayType = CGF.getContext().getAsArrayType(type)) {
2291     // VLAs don't require a GEP index to walk into.
2292     if (!isa<VariableArrayType>(arrayType))
2293       arrayDepth++;
2294     type = arrayType->getElementType();
2295   }
2296 
2297   if (arrayDepth) {
2298     llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
2299 
2300     SmallVector<llvm::Value*,4> gepIndices(arrayDepth+1, zero);
2301     llvm::Type *elemTy = begin->getType()->getPointerElementType();
2302     begin = CGF.Builder.CreateInBoundsGEP(
2303         elemTy, begin, gepIndices, "pad.arraybegin");
2304     end = CGF.Builder.CreateInBoundsGEP(
2305         elemTy, end, gepIndices, "pad.arrayend");
2306   }
2307 
2308   // Destroy the array.  We don't ever need an EH cleanup because we
2309   // assume that we're in an EH cleanup ourselves, so a throwing
2310   // destructor causes an immediate terminate.
2311   CGF.emitArrayDestroy(begin, end, type, elementAlign, destroyer,
2312                        /*checkZeroLength*/ true, /*useEHCleanup*/ false);
2313 }
2314 
2315 namespace {
2316   /// RegularPartialArrayDestroy - a cleanup which performs a partial
2317   /// array destroy where the end pointer is regularly determined and
2318   /// does not need to be loaded from a local.
2319   class RegularPartialArrayDestroy final : public EHScopeStack::Cleanup {
2320     llvm::Value *ArrayBegin;
2321     llvm::Value *ArrayEnd;
2322     QualType ElementType;
2323     CodeGenFunction::Destroyer *Destroyer;
2324     CharUnits ElementAlign;
2325   public:
2326     RegularPartialArrayDestroy(llvm::Value *arrayBegin, llvm::Value *arrayEnd,
2327                                QualType elementType, CharUnits elementAlign,
2328                                CodeGenFunction::Destroyer *destroyer)
2329       : ArrayBegin(arrayBegin), ArrayEnd(arrayEnd),
2330         ElementType(elementType), Destroyer(destroyer),
2331         ElementAlign(elementAlign) {}
2332 
2333     void Emit(CodeGenFunction &CGF, Flags flags) override {
2334       emitPartialArrayDestroy(CGF, ArrayBegin, ArrayEnd,
2335                               ElementType, ElementAlign, Destroyer);
2336     }
2337   };
2338 
2339   /// IrregularPartialArrayDestroy - a cleanup which performs a
2340   /// partial array destroy where the end pointer is irregularly
2341   /// determined and must be loaded from a local.
2342   class IrregularPartialArrayDestroy final : public EHScopeStack::Cleanup {
2343     llvm::Value *ArrayBegin;
2344     Address ArrayEndPointer;
2345     QualType ElementType;
2346     CodeGenFunction::Destroyer *Destroyer;
2347     CharUnits ElementAlign;
2348   public:
2349     IrregularPartialArrayDestroy(llvm::Value *arrayBegin,
2350                                  Address arrayEndPointer,
2351                                  QualType elementType,
2352                                  CharUnits elementAlign,
2353                                  CodeGenFunction::Destroyer *destroyer)
2354       : ArrayBegin(arrayBegin), ArrayEndPointer(arrayEndPointer),
2355         ElementType(elementType), Destroyer(destroyer),
2356         ElementAlign(elementAlign) {}
2357 
2358     void Emit(CodeGenFunction &CGF, Flags flags) override {
2359       llvm::Value *arrayEnd = CGF.Builder.CreateLoad(ArrayEndPointer);
2360       emitPartialArrayDestroy(CGF, ArrayBegin, arrayEnd,
2361                               ElementType, ElementAlign, Destroyer);
2362     }
2363   };
2364 } // end anonymous namespace
2365 
2366 /// pushIrregularPartialArrayCleanup - Push an EH cleanup to destroy
2367 /// already-constructed elements of the given array.  The cleanup
2368 /// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
2369 ///
2370 /// \param elementType - the immediate element type of the array;
2371 ///   possibly still an array type
2372 void CodeGenFunction::pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
2373                                                        Address arrayEndPointer,
2374                                                        QualType elementType,
2375                                                        CharUnits elementAlign,
2376                                                        Destroyer *destroyer) {
2377   pushFullExprCleanup<IrregularPartialArrayDestroy>(EHCleanup,
2378                                                     arrayBegin, arrayEndPointer,
2379                                                     elementType, elementAlign,
2380                                                     destroyer);
2381 }
2382 
2383 /// pushRegularPartialArrayCleanup - Push an EH cleanup to destroy
2384 /// already-constructed elements of the given array.  The cleanup
2385 /// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
2386 ///
2387 /// \param elementType - the immediate element type of the array;
2388 ///   possibly still an array type
2389 void CodeGenFunction::pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
2390                                                      llvm::Value *arrayEnd,
2391                                                      QualType elementType,
2392                                                      CharUnits elementAlign,
2393                                                      Destroyer *destroyer) {
2394   pushFullExprCleanup<RegularPartialArrayDestroy>(EHCleanup,
2395                                                   arrayBegin, arrayEnd,
2396                                                   elementType, elementAlign,
2397                                                   destroyer);
2398 }
2399 
2400 /// Lazily declare the @llvm.lifetime.start intrinsic.
2401 llvm::Function *CodeGenModule::getLLVMLifetimeStartFn() {
2402   if (LifetimeStartFn)
2403     return LifetimeStartFn;
2404   LifetimeStartFn = llvm::Intrinsic::getDeclaration(&getModule(),
2405     llvm::Intrinsic::lifetime_start, AllocaInt8PtrTy);
2406   return LifetimeStartFn;
2407 }
2408 
2409 /// Lazily declare the @llvm.lifetime.end intrinsic.
2410 llvm::Function *CodeGenModule::getLLVMLifetimeEndFn() {
2411   if (LifetimeEndFn)
2412     return LifetimeEndFn;
2413   LifetimeEndFn = llvm::Intrinsic::getDeclaration(&getModule(),
2414     llvm::Intrinsic::lifetime_end, AllocaInt8PtrTy);
2415   return LifetimeEndFn;
2416 }
2417 
2418 namespace {
2419   /// A cleanup to perform a release of an object at the end of a
2420   /// function.  This is used to balance out the incoming +1 of a
2421   /// ns_consumed argument when we can't reasonably do that just by
2422   /// not doing the initial retain for a __block argument.
2423   struct ConsumeARCParameter final : EHScopeStack::Cleanup {
2424     ConsumeARCParameter(llvm::Value *param,
2425                         ARCPreciseLifetime_t precise)
2426       : Param(param), Precise(precise) {}
2427 
2428     llvm::Value *Param;
2429     ARCPreciseLifetime_t Precise;
2430 
2431     void Emit(CodeGenFunction &CGF, Flags flags) override {
2432       CGF.EmitARCRelease(Param, Precise);
2433     }
2434   };
2435 } // end anonymous namespace
2436 
2437 /// Emit an alloca (or GlobalValue depending on target)
2438 /// for the specified parameter and set up LocalDeclMap.
2439 void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
2440                                    unsigned ArgNo) {
2441   // FIXME: Why isn't ImplicitParamDecl a ParmVarDecl?
2442   assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) &&
2443          "Invalid argument to EmitParmDecl");
2444 
2445   Arg.getAnyValue()->setName(D.getName());
2446 
2447   QualType Ty = D.getType();
2448 
2449   // Use better IR generation for certain implicit parameters.
2450   if (auto IPD = dyn_cast<ImplicitParamDecl>(&D)) {
2451     // The only implicit argument a block has is its literal.
2452     // This may be passed as an inalloca'ed value on Windows x86.
2453     if (BlockInfo) {
2454       llvm::Value *V = Arg.isIndirect()
2455                            ? Builder.CreateLoad(Arg.getIndirectAddress())
2456                            : Arg.getDirectValue();
2457       setBlockContextParameter(IPD, ArgNo, V);
2458       return;
2459     }
2460   }
2461 
2462   Address DeclPtr = Address::invalid();
2463   Address AllocaPtr = Address::invalid();
2464   bool DoStore = false;
2465   bool IsScalar = hasScalarEvaluationKind(Ty);
2466   // If we already have a pointer to the argument, reuse the input pointer.
2467   if (Arg.isIndirect()) {
2468     DeclPtr = Arg.getIndirectAddress();
2469     // If we have a prettier pointer type at this point, bitcast to that.
2470     unsigned AS = DeclPtr.getType()->getAddressSpace();
2471     llvm::Type *IRTy = ConvertTypeForMem(Ty)->getPointerTo(AS);
2472     if (DeclPtr.getType() != IRTy)
2473       DeclPtr = Builder.CreateBitCast(DeclPtr, IRTy, D.getName());
2474     // Indirect argument is in alloca address space, which may be different
2475     // from the default address space.
2476     auto AllocaAS = CGM.getASTAllocaAddressSpace();
2477     auto *V = DeclPtr.getPointer();
2478     AllocaPtr = DeclPtr;
2479     auto SrcLangAS = getLangOpts().OpenCL ? LangAS::opencl_private : AllocaAS;
2480     auto DestLangAS =
2481         getLangOpts().OpenCL ? LangAS::opencl_private : LangAS::Default;
2482     if (SrcLangAS != DestLangAS) {
2483       assert(getContext().getTargetAddressSpace(SrcLangAS) ==
2484              CGM.getDataLayout().getAllocaAddrSpace());
2485       auto DestAS = getContext().getTargetAddressSpace(DestLangAS);
2486       auto *T = V->getType()->getPointerElementType()->getPointerTo(DestAS);
2487       DeclPtr = Address(getTargetHooks().performAddrSpaceCast(
2488                             *this, V, SrcLangAS, DestLangAS, T, true),
2489                         DeclPtr.getAlignment());
2490     }
2491 
2492     // Push a destructor cleanup for this parameter if the ABI requires it.
2493     // Don't push a cleanup in a thunk for a method that will also emit a
2494     // cleanup.
2495     if (Ty->isRecordType() && !CurFuncIsThunk &&
2496         Ty->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
2497       if (QualType::DestructionKind DtorKind =
2498               D.needsDestruction(getContext())) {
2499         assert((DtorKind == QualType::DK_cxx_destructor ||
2500                 DtorKind == QualType::DK_nontrivial_c_struct) &&
2501                "unexpected destructor type");
2502         pushDestroy(DtorKind, DeclPtr, Ty);
2503         CalleeDestructedParamCleanups[cast<ParmVarDecl>(&D)] =
2504             EHStack.stable_begin();
2505       }
2506     }
2507   } else {
2508     // Check if the parameter address is controlled by OpenMP runtime.
2509     Address OpenMPLocalAddr =
2510         getLangOpts().OpenMP
2511             ? CGM.getOpenMPRuntime().getAddressOfLocalVariable(*this, &D)
2512             : Address::invalid();
2513     if (getLangOpts().OpenMP && OpenMPLocalAddr.isValid()) {
2514       DeclPtr = OpenMPLocalAddr;
2515       AllocaPtr = DeclPtr;
2516     } else {
2517       // Otherwise, create a temporary to hold the value.
2518       DeclPtr = CreateMemTemp(Ty, getContext().getDeclAlign(&D),
2519                               D.getName() + ".addr", &AllocaPtr);
2520     }
2521     DoStore = true;
2522   }
2523 
2524   llvm::Value *ArgVal = (DoStore ? Arg.getDirectValue() : nullptr);
2525 
2526   LValue lv = MakeAddrLValue(DeclPtr, Ty);
2527   if (IsScalar) {
2528     Qualifiers qs = Ty.getQualifiers();
2529     if (Qualifiers::ObjCLifetime lt = qs.getObjCLifetime()) {
2530       // We honor __attribute__((ns_consumed)) for types with lifetime.
2531       // For __strong, it's handled by just skipping the initial retain;
2532       // otherwise we have to balance out the initial +1 with an extra
2533       // cleanup to do the release at the end of the function.
2534       bool isConsumed = D.hasAttr<NSConsumedAttr>();
2535 
2536       // If a parameter is pseudo-strong then we can omit the implicit retain.
2537       if (D.isARCPseudoStrong()) {
2538         assert(lt == Qualifiers::OCL_Strong &&
2539                "pseudo-strong variable isn't strong?");
2540         assert(qs.hasConst() && "pseudo-strong variable should be const!");
2541         lt = Qualifiers::OCL_ExplicitNone;
2542       }
2543 
2544       // Load objects passed indirectly.
2545       if (Arg.isIndirect() && !ArgVal)
2546         ArgVal = Builder.CreateLoad(DeclPtr);
2547 
2548       if (lt == Qualifiers::OCL_Strong) {
2549         if (!isConsumed) {
2550           if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
2551             // use objc_storeStrong(&dest, value) for retaining the
2552             // object. But first, store a null into 'dest' because
2553             // objc_storeStrong attempts to release its old value.
2554             llvm::Value *Null = CGM.EmitNullConstant(D.getType());
2555             EmitStoreOfScalar(Null, lv, /* isInitialization */ true);
2556             EmitARCStoreStrongCall(lv.getAddress(*this), ArgVal, true);
2557             DoStore = false;
2558           }
2559           else
2560           // Don't use objc_retainBlock for block pointers, because we
2561           // don't want to Block_copy something just because we got it
2562           // as a parameter.
2563             ArgVal = EmitARCRetainNonBlock(ArgVal);
2564         }
2565       } else {
2566         // Push the cleanup for a consumed parameter.
2567         if (isConsumed) {
2568           ARCPreciseLifetime_t precise = (D.hasAttr<ObjCPreciseLifetimeAttr>()
2569                                 ? ARCPreciseLifetime : ARCImpreciseLifetime);
2570           EHStack.pushCleanup<ConsumeARCParameter>(getARCCleanupKind(), ArgVal,
2571                                                    precise);
2572         }
2573 
2574         if (lt == Qualifiers::OCL_Weak) {
2575           EmitARCInitWeak(DeclPtr, ArgVal);
2576           DoStore = false; // The weak init is a store, no need to do two.
2577         }
2578       }
2579 
2580       // Enter the cleanup scope.
2581       EmitAutoVarWithLifetime(*this, D, DeclPtr, lt);
2582     }
2583   }
2584 
2585   // Store the initial value into the alloca.
2586   if (DoStore)
2587     EmitStoreOfScalar(ArgVal, lv, /* isInitialization */ true);
2588 
2589   setAddrOfLocalVar(&D, DeclPtr);
2590 
2591   // Emit debug info for param declarations in non-thunk functions.
2592   if (CGDebugInfo *DI = getDebugInfo()) {
2593     if (CGM.getCodeGenOpts().hasReducedDebugInfo() && !CurFuncIsThunk) {
2594       llvm::DILocalVariable *DILocalVar = DI->EmitDeclareOfArgVariable(
2595           &D, AllocaPtr.getPointer(), ArgNo, Builder);
2596       if (const auto *Var = dyn_cast_or_null<ParmVarDecl>(&D))
2597         DI->getParamDbgMappings().insert({Var, DILocalVar});
2598     }
2599   }
2600 
2601   if (D.hasAttr<AnnotateAttr>())
2602     EmitVarAnnotations(&D, DeclPtr.getPointer());
2603 
2604   // We can only check return value nullability if all arguments to the
2605   // function satisfy their nullability preconditions. This makes it necessary
2606   // to emit null checks for args in the function body itself.
2607   if (requiresReturnValueNullabilityCheck()) {
2608     auto Nullability = Ty->getNullability(getContext());
2609     if (Nullability && *Nullability == NullabilityKind::NonNull) {
2610       SanitizerScope SanScope(this);
2611       RetValNullabilityPrecondition =
2612           Builder.CreateAnd(RetValNullabilityPrecondition,
2613                             Builder.CreateIsNotNull(Arg.getAnyValue()));
2614     }
2615   }
2616 }
2617 
2618 void CodeGenModule::EmitOMPDeclareReduction(const OMPDeclareReductionDecl *D,
2619                                             CodeGenFunction *CGF) {
2620   if (!LangOpts.OpenMP || (!LangOpts.EmitAllDecls && !D->isUsed()))
2621     return;
2622   getOpenMPRuntime().emitUserDefinedReduction(CGF, D);
2623 }
2624 
2625 void CodeGenModule::EmitOMPDeclareMapper(const OMPDeclareMapperDecl *D,
2626                                          CodeGenFunction *CGF) {
2627   if (!LangOpts.OpenMP || LangOpts.OpenMPSimd ||
2628       (!LangOpts.EmitAllDecls && !D->isUsed()))
2629     return;
2630   getOpenMPRuntime().emitUserDefinedMapper(D, CGF);
2631 }
2632 
2633 void CodeGenModule::EmitOMPRequiresDecl(const OMPRequiresDecl *D) {
2634   getOpenMPRuntime().processRequiresDirective(D);
2635 }
2636 
2637 void CodeGenModule::EmitOMPAllocateDecl(const OMPAllocateDecl *D) {
2638   for (const Expr *E : D->varlists()) {
2639     const auto *DE = cast<DeclRefExpr>(E);
2640     const auto *VD = cast<VarDecl>(DE->getDecl());
2641 
2642     // Skip all but globals.
2643     if (!VD->hasGlobalStorage())
2644       continue;
2645 
2646     // Check if the global has been materialized yet or not. If not, we are done
2647     // as any later generation will utilize the OMPAllocateDeclAttr. However, if
2648     // we already emitted the global we might have done so before the
2649     // OMPAllocateDeclAttr was attached, leading to the wrong address space
2650     // (potentially). While not pretty, common practise is to remove the old IR
2651     // global and generate a new one, so we do that here too. Uses are replaced
2652     // properly.
2653     StringRef MangledName = getMangledName(VD);
2654     llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
2655     if (!Entry)
2656       continue;
2657 
2658     // We can also keep the existing global if the address space is what we
2659     // expect it to be, if not, it is replaced.
2660     QualType ASTTy = VD->getType();
2661     clang::LangAS GVAS = GetGlobalVarAddressSpace(VD);
2662     auto TargetAS = getContext().getTargetAddressSpace(GVAS);
2663     if (Entry->getType()->getAddressSpace() == TargetAS)
2664       continue;
2665 
2666     // Make a new global with the correct type / address space.
2667     llvm::Type *Ty = getTypes().ConvertTypeForMem(ASTTy);
2668     llvm::PointerType *PTy = llvm::PointerType::get(Ty, TargetAS);
2669 
2670     // Replace all uses of the old global with a cast. Since we mutate the type
2671     // in place we neeed an intermediate that takes the spot of the old entry
2672     // until we can create the cast.
2673     llvm::GlobalVariable *DummyGV = new llvm::GlobalVariable(
2674         getModule(), Entry->getValueType(), false,
2675         llvm::GlobalValue::CommonLinkage, nullptr, "dummy", nullptr,
2676         llvm::GlobalVariable::NotThreadLocal, Entry->getAddressSpace());
2677     Entry->replaceAllUsesWith(DummyGV);
2678 
2679     Entry->mutateType(PTy);
2680     llvm::Constant *NewPtrForOldDecl =
2681         llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
2682             Entry, DummyGV->getType());
2683 
2684     // Now we have a casted version of the changed global, the dummy can be
2685     // replaced and deleted.
2686     DummyGV->replaceAllUsesWith(NewPtrForOldDecl);
2687     DummyGV->eraseFromParent();
2688   }
2689 }
2690