1 //===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code to emit Decl nodes as LLVM code.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CGBlocks.h"
15 #include "CGCXXABI.h"
16 #include "CGCleanup.h"
17 #include "CGDebugInfo.h"
18 #include "CGOpenCLRuntime.h"
19 #include "CGOpenMPRuntime.h"
20 #include "CodeGenFunction.h"
21 #include "CodeGenModule.h"
22 #include "ConstantEmitter.h"
23 #include "TargetInfo.h"
24 #include "clang/AST/ASTContext.h"
25 #include "clang/AST/CharUnits.h"
26 #include "clang/AST/Decl.h"
27 #include "clang/AST/DeclObjC.h"
28 #include "clang/AST/DeclOpenMP.h"
29 #include "clang/Basic/SourceManager.h"
30 #include "clang/Basic/TargetInfo.h"
31 #include "clang/CodeGen/CGFunctionInfo.h"
32 #include "clang/Frontend/CodeGenOptions.h"
33 #include "llvm/IR/DataLayout.h"
34 #include "llvm/IR/GlobalVariable.h"
35 #include "llvm/IR/Intrinsics.h"
36 #include "llvm/IR/Type.h"
37 
38 using namespace clang;
39 using namespace CodeGen;
40 
41 void CodeGenFunction::EmitDecl(const Decl &D) {
42   switch (D.getKind()) {
43   case Decl::BuiltinTemplate:
44   case Decl::TranslationUnit:
45   case Decl::ExternCContext:
46   case Decl::Namespace:
47   case Decl::UnresolvedUsingTypename:
48   case Decl::ClassTemplateSpecialization:
49   case Decl::ClassTemplatePartialSpecialization:
50   case Decl::VarTemplateSpecialization:
51   case Decl::VarTemplatePartialSpecialization:
52   case Decl::TemplateTypeParm:
53   case Decl::UnresolvedUsingValue:
54   case Decl::NonTypeTemplateParm:
55   case Decl::CXXDeductionGuide:
56   case Decl::CXXMethod:
57   case Decl::CXXConstructor:
58   case Decl::CXXDestructor:
59   case Decl::CXXConversion:
60   case Decl::Field:
61   case Decl::MSProperty:
62   case Decl::IndirectField:
63   case Decl::ObjCIvar:
64   case Decl::ObjCAtDefsField:
65   case Decl::ParmVar:
66   case Decl::ImplicitParam:
67   case Decl::ClassTemplate:
68   case Decl::VarTemplate:
69   case Decl::FunctionTemplate:
70   case Decl::TypeAliasTemplate:
71   case Decl::TemplateTemplateParm:
72   case Decl::ObjCMethod:
73   case Decl::ObjCCategory:
74   case Decl::ObjCProtocol:
75   case Decl::ObjCInterface:
76   case Decl::ObjCCategoryImpl:
77   case Decl::ObjCImplementation:
78   case Decl::ObjCProperty:
79   case Decl::ObjCCompatibleAlias:
80   case Decl::PragmaComment:
81   case Decl::PragmaDetectMismatch:
82   case Decl::AccessSpec:
83   case Decl::LinkageSpec:
84   case Decl::Export:
85   case Decl::ObjCPropertyImpl:
86   case Decl::FileScopeAsm:
87   case Decl::Friend:
88   case Decl::FriendTemplate:
89   case Decl::Block:
90   case Decl::Captured:
91   case Decl::ClassScopeFunctionSpecialization:
92   case Decl::UsingShadow:
93   case Decl::ConstructorUsingShadow:
94   case Decl::ObjCTypeParam:
95   case Decl::Binding:
96     llvm_unreachable("Declaration should not be in declstmts!");
97   case Decl::Function:  // void X();
98   case Decl::Record:    // struct/union/class X;
99   case Decl::Enum:      // enum X;
100   case Decl::EnumConstant: // enum ? { X = ? }
101   case Decl::CXXRecord: // struct/union/class X; [C++]
102   case Decl::StaticAssert: // static_assert(X, ""); [C++0x]
103   case Decl::Label:        // __label__ x;
104   case Decl::Import:
105   case Decl::OMPThreadPrivate:
106   case Decl::OMPCapturedExpr:
107   case Decl::Empty:
108     // None of these decls require codegen support.
109     return;
110 
111   case Decl::NamespaceAlias:
112     if (CGDebugInfo *DI = getDebugInfo())
113         DI->EmitNamespaceAlias(cast<NamespaceAliasDecl>(D));
114     return;
115   case Decl::Using:          // using X; [C++]
116     if (CGDebugInfo *DI = getDebugInfo())
117         DI->EmitUsingDecl(cast<UsingDecl>(D));
118     return;
119   case Decl::UsingPack:
120     for (auto *Using : cast<UsingPackDecl>(D).expansions())
121       EmitDecl(*Using);
122     return;
123   case Decl::UsingDirective: // using namespace X; [C++]
124     if (CGDebugInfo *DI = getDebugInfo())
125       DI->EmitUsingDirective(cast<UsingDirectiveDecl>(D));
126     return;
127   case Decl::Var:
128   case Decl::Decomposition: {
129     const VarDecl &VD = cast<VarDecl>(D);
130     assert(VD.isLocalVarDecl() &&
131            "Should not see file-scope variables inside a function!");
132     EmitVarDecl(VD);
133     if (auto *DD = dyn_cast<DecompositionDecl>(&VD))
134       for (auto *B : DD->bindings())
135         if (auto *HD = B->getHoldingVar())
136           EmitVarDecl(*HD);
137     return;
138   }
139 
140   case Decl::OMPDeclareReduction:
141     return CGM.EmitOMPDeclareReduction(cast<OMPDeclareReductionDecl>(&D), this);
142 
143   case Decl::Typedef:      // typedef int X;
144   case Decl::TypeAlias: {  // using X = int; [C++0x]
145     const TypedefNameDecl &TD = cast<TypedefNameDecl>(D);
146     QualType Ty = TD.getUnderlyingType();
147 
148     if (Ty->isVariablyModifiedType())
149       EmitVariablyModifiedType(Ty);
150   }
151   }
152 }
153 
154 /// EmitVarDecl - This method handles emission of any variable declaration
155 /// inside a function, including static vars etc.
156 void CodeGenFunction::EmitVarDecl(const VarDecl &D) {
157   if (D.hasExternalStorage())
158     // Don't emit it now, allow it to be emitted lazily on its first use.
159     return;
160 
161   // Some function-scope variable does not have static storage but still
162   // needs to be emitted like a static variable, e.g. a function-scope
163   // variable in constant address space in OpenCL.
164   if (D.getStorageDuration() != SD_Automatic) {
165     // Static sampler variables translated to function calls.
166     if (D.getType()->isSamplerT())
167       return;
168 
169     llvm::GlobalValue::LinkageTypes Linkage =
170         CGM.getLLVMLinkageVarDefinition(&D, /*isConstant=*/false);
171 
172     // FIXME: We need to force the emission/use of a guard variable for
173     // some variables even if we can constant-evaluate them because
174     // we can't guarantee every translation unit will constant-evaluate them.
175 
176     return EmitStaticVarDecl(D, Linkage);
177   }
178 
179   if (D.getType().getAddressSpace() == LangAS::opencl_local)
180     return CGM.getOpenCLRuntime().EmitWorkGroupLocalVarDecl(*this, D);
181 
182   assert(D.hasLocalStorage());
183   return EmitAutoVarDecl(D);
184 }
185 
186 static std::string getStaticDeclName(CodeGenModule &CGM, const VarDecl &D) {
187   if (CGM.getLangOpts().CPlusPlus)
188     return CGM.getMangledName(&D).str();
189 
190   // If this isn't C++, we don't need a mangled name, just a pretty one.
191   assert(!D.isExternallyVisible() && "name shouldn't matter");
192   std::string ContextName;
193   const DeclContext *DC = D.getDeclContext();
194   if (auto *CD = dyn_cast<CapturedDecl>(DC))
195     DC = cast<DeclContext>(CD->getNonClosureContext());
196   if (const auto *FD = dyn_cast<FunctionDecl>(DC))
197     ContextName = CGM.getMangledName(FD);
198   else if (const auto *BD = dyn_cast<BlockDecl>(DC))
199     ContextName = CGM.getBlockMangledName(GlobalDecl(), BD);
200   else if (const auto *OMD = dyn_cast<ObjCMethodDecl>(DC))
201     ContextName = OMD->getSelector().getAsString();
202   else
203     llvm_unreachable("Unknown context for static var decl");
204 
205   ContextName += "." + D.getNameAsString();
206   return ContextName;
207 }
208 
209 llvm::Constant *CodeGenModule::getOrCreateStaticVarDecl(
210     const VarDecl &D, llvm::GlobalValue::LinkageTypes Linkage) {
211   // In general, we don't always emit static var decls once before we reference
212   // them. It is possible to reference them before emitting the function that
213   // contains them, and it is possible to emit the containing function multiple
214   // times.
215   if (llvm::Constant *ExistingGV = StaticLocalDeclMap[&D])
216     return ExistingGV;
217 
218   QualType Ty = D.getType();
219   assert(Ty->isConstantSizeType() && "VLAs can't be static");
220 
221   // Use the label if the variable is renamed with the asm-label extension.
222   std::string Name;
223   if (D.hasAttr<AsmLabelAttr>())
224     Name = getMangledName(&D);
225   else
226     Name = getStaticDeclName(*this, D);
227 
228   llvm::Type *LTy = getTypes().ConvertTypeForMem(Ty);
229   LangAS AS = GetGlobalVarAddressSpace(&D);
230   unsigned TargetAS = getContext().getTargetAddressSpace(AS);
231 
232   // Local address space cannot have an initializer.
233   llvm::Constant *Init = nullptr;
234   if (Ty.getAddressSpace() != LangAS::opencl_local)
235     Init = EmitNullConstant(Ty);
236   else
237     Init = llvm::UndefValue::get(LTy);
238 
239   llvm::GlobalVariable *GV = new llvm::GlobalVariable(
240       getModule(), LTy, Ty.isConstant(getContext()), Linkage, Init, Name,
241       nullptr, llvm::GlobalVariable::NotThreadLocal, TargetAS);
242   GV->setAlignment(getContext().getDeclAlign(&D).getQuantity());
243 
244   if (supportsCOMDAT() && GV->isWeakForLinker())
245     GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
246 
247   if (D.getTLSKind())
248     setTLSMode(GV, D);
249 
250   setGVProperties(GV, &D);
251 
252   // Make sure the result is of the correct type.
253   LangAS ExpectedAS = Ty.getAddressSpace();
254   llvm::Constant *Addr = GV;
255   if (AS != ExpectedAS) {
256     Addr = getTargetCodeGenInfo().performAddrSpaceCast(
257         *this, GV, AS, ExpectedAS,
258         LTy->getPointerTo(getContext().getTargetAddressSpace(ExpectedAS)));
259   }
260 
261   setStaticLocalDeclAddress(&D, Addr);
262 
263   // Ensure that the static local gets initialized by making sure the parent
264   // function gets emitted eventually.
265   const Decl *DC = cast<Decl>(D.getDeclContext());
266 
267   // We can't name blocks or captured statements directly, so try to emit their
268   // parents.
269   if (isa<BlockDecl>(DC) || isa<CapturedDecl>(DC)) {
270     DC = DC->getNonClosureContext();
271     // FIXME: Ensure that global blocks get emitted.
272     if (!DC)
273       return Addr;
274   }
275 
276   GlobalDecl GD;
277   if (const auto *CD = dyn_cast<CXXConstructorDecl>(DC))
278     GD = GlobalDecl(CD, Ctor_Base);
279   else if (const auto *DD = dyn_cast<CXXDestructorDecl>(DC))
280     GD = GlobalDecl(DD, Dtor_Base);
281   else if (const auto *FD = dyn_cast<FunctionDecl>(DC))
282     GD = GlobalDecl(FD);
283   else {
284     // Don't do anything for Obj-C method decls or global closures. We should
285     // never defer them.
286     assert(isa<ObjCMethodDecl>(DC) && "unexpected parent code decl");
287   }
288   if (GD.getDecl())
289     (void)GetAddrOfGlobal(GD);
290 
291   return Addr;
292 }
293 
294 /// hasNontrivialDestruction - Determine whether a type's destruction is
295 /// non-trivial. If so, and the variable uses static initialization, we must
296 /// register its destructor to run on exit.
297 static bool hasNontrivialDestruction(QualType T) {
298   CXXRecordDecl *RD = T->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
299   return RD && !RD->hasTrivialDestructor();
300 }
301 
302 /// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
303 /// global variable that has already been created for it.  If the initializer
304 /// has a different type than GV does, this may free GV and return a different
305 /// one.  Otherwise it just returns GV.
306 llvm::GlobalVariable *
307 CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
308                                                llvm::GlobalVariable *GV) {
309   ConstantEmitter emitter(*this);
310   llvm::Constant *Init = emitter.tryEmitForInitializer(D);
311 
312   // If constant emission failed, then this should be a C++ static
313   // initializer.
314   if (!Init) {
315     if (!getLangOpts().CPlusPlus)
316       CGM.ErrorUnsupported(D.getInit(), "constant l-value expression");
317     else if (HaveInsertPoint()) {
318       // Since we have a static initializer, this global variable can't
319       // be constant.
320       GV->setConstant(false);
321 
322       EmitCXXGuardedInit(D, GV, /*PerformInit*/true);
323     }
324     return GV;
325   }
326 
327   // The initializer may differ in type from the global. Rewrite
328   // the global to match the initializer.  (We have to do this
329   // because some types, like unions, can't be completely represented
330   // in the LLVM type system.)
331   if (GV->getType()->getElementType() != Init->getType()) {
332     llvm::GlobalVariable *OldGV = GV;
333 
334     GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(),
335                                   OldGV->isConstant(),
336                                   OldGV->getLinkage(), Init, "",
337                                   /*InsertBefore*/ OldGV,
338                                   OldGV->getThreadLocalMode(),
339                            CGM.getContext().getTargetAddressSpace(D.getType()));
340     GV->setVisibility(OldGV->getVisibility());
341     GV->setDSOLocal(OldGV->isDSOLocal());
342     GV->setComdat(OldGV->getComdat());
343 
344     // Steal the name of the old global
345     GV->takeName(OldGV);
346 
347     // Replace all uses of the old global with the new global
348     llvm::Constant *NewPtrForOldDecl =
349     llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
350     OldGV->replaceAllUsesWith(NewPtrForOldDecl);
351 
352     // Erase the old global, since it is no longer used.
353     OldGV->eraseFromParent();
354   }
355 
356   GV->setConstant(CGM.isTypeConstant(D.getType(), true));
357   GV->setInitializer(Init);
358 
359   emitter.finalize(GV);
360 
361   if (hasNontrivialDestruction(D.getType()) && HaveInsertPoint()) {
362     // We have a constant initializer, but a nontrivial destructor. We still
363     // need to perform a guarded "initialization" in order to register the
364     // destructor.
365     EmitCXXGuardedInit(D, GV, /*PerformInit*/false);
366   }
367 
368   return GV;
369 }
370 
371 void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
372                                       llvm::GlobalValue::LinkageTypes Linkage) {
373   // Check to see if we already have a global variable for this
374   // declaration.  This can happen when double-emitting function
375   // bodies, e.g. with complete and base constructors.
376   llvm::Constant *addr = CGM.getOrCreateStaticVarDecl(D, Linkage);
377   CharUnits alignment = getContext().getDeclAlign(&D);
378 
379   // Store into LocalDeclMap before generating initializer to handle
380   // circular references.
381   setAddrOfLocalVar(&D, Address(addr, alignment));
382 
383   // We can't have a VLA here, but we can have a pointer to a VLA,
384   // even though that doesn't really make any sense.
385   // Make sure to evaluate VLA bounds now so that we have them for later.
386   if (D.getType()->isVariablyModifiedType())
387     EmitVariablyModifiedType(D.getType());
388 
389   // Save the type in case adding the initializer forces a type change.
390   llvm::Type *expectedType = addr->getType();
391 
392   llvm::GlobalVariable *var =
393     cast<llvm::GlobalVariable>(addr->stripPointerCasts());
394 
395   // CUDA's local and local static __shared__ variables should not
396   // have any non-empty initializers. This is ensured by Sema.
397   // Whatever initializer such variable may have when it gets here is
398   // a no-op and should not be emitted.
399   bool isCudaSharedVar = getLangOpts().CUDA && getLangOpts().CUDAIsDevice &&
400                          D.hasAttr<CUDASharedAttr>();
401   // If this value has an initializer, emit it.
402   if (D.getInit() && !isCudaSharedVar)
403     var = AddInitializerToStaticVarDecl(D, var);
404 
405   var->setAlignment(alignment.getQuantity());
406 
407   if (D.hasAttr<AnnotateAttr>())
408     CGM.AddGlobalAnnotations(&D, var);
409 
410   if (auto *SA = D.getAttr<PragmaClangBSSSectionAttr>())
411     var->addAttribute("bss-section", SA->getName());
412   if (auto *SA = D.getAttr<PragmaClangDataSectionAttr>())
413     var->addAttribute("data-section", SA->getName());
414   if (auto *SA = D.getAttr<PragmaClangRodataSectionAttr>())
415     var->addAttribute("rodata-section", SA->getName());
416 
417   if (const SectionAttr *SA = D.getAttr<SectionAttr>())
418     var->setSection(SA->getName());
419 
420   if (D.hasAttr<UsedAttr>())
421     CGM.addUsedGlobal(var);
422 
423   // We may have to cast the constant because of the initializer
424   // mismatch above.
425   //
426   // FIXME: It is really dangerous to store this in the map; if anyone
427   // RAUW's the GV uses of this constant will be invalid.
428   llvm::Constant *castedAddr =
429     llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(var, expectedType);
430   if (var != castedAddr)
431     LocalDeclMap.find(&D)->second = Address(castedAddr, alignment);
432   CGM.setStaticLocalDeclAddress(&D, castedAddr);
433 
434   CGM.getSanitizerMetadata()->reportGlobalToASan(var, D);
435 
436   // Emit global variable debug descriptor for static vars.
437   CGDebugInfo *DI = getDebugInfo();
438   if (DI &&
439       CGM.getCodeGenOpts().getDebugInfo() >= codegenoptions::LimitedDebugInfo) {
440     DI->setLocation(D.getLocation());
441     DI->EmitGlobalVariable(var, &D);
442   }
443 }
444 
445 namespace {
446   struct DestroyObject final : EHScopeStack::Cleanup {
447     DestroyObject(Address addr, QualType type,
448                   CodeGenFunction::Destroyer *destroyer,
449                   bool useEHCleanupForArray)
450       : addr(addr), type(type), destroyer(destroyer),
451         useEHCleanupForArray(useEHCleanupForArray) {}
452 
453     Address addr;
454     QualType type;
455     CodeGenFunction::Destroyer *destroyer;
456     bool useEHCleanupForArray;
457 
458     void Emit(CodeGenFunction &CGF, Flags flags) override {
459       // Don't use an EH cleanup recursively from an EH cleanup.
460       bool useEHCleanupForArray =
461         flags.isForNormalCleanup() && this->useEHCleanupForArray;
462 
463       CGF.emitDestroy(addr, type, destroyer, useEHCleanupForArray);
464     }
465   };
466 
467   struct DestroyNRVOVariable final : EHScopeStack::Cleanup {
468     DestroyNRVOVariable(Address addr,
469                         const CXXDestructorDecl *Dtor,
470                         llvm::Value *NRVOFlag)
471       : Dtor(Dtor), NRVOFlag(NRVOFlag), Loc(addr) {}
472 
473     const CXXDestructorDecl *Dtor;
474     llvm::Value *NRVOFlag;
475     Address Loc;
476 
477     void Emit(CodeGenFunction &CGF, Flags flags) override {
478       // Along the exceptions path we always execute the dtor.
479       bool NRVO = flags.isForNormalCleanup() && NRVOFlag;
480 
481       llvm::BasicBlock *SkipDtorBB = nullptr;
482       if (NRVO) {
483         // If we exited via NRVO, we skip the destructor call.
484         llvm::BasicBlock *RunDtorBB = CGF.createBasicBlock("nrvo.unused");
485         SkipDtorBB = CGF.createBasicBlock("nrvo.skipdtor");
486         llvm::Value *DidNRVO =
487           CGF.Builder.CreateFlagLoad(NRVOFlag, "nrvo.val");
488         CGF.Builder.CreateCondBr(DidNRVO, SkipDtorBB, RunDtorBB);
489         CGF.EmitBlock(RunDtorBB);
490       }
491 
492       CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
493                                 /*ForVirtualBase=*/false,
494                                 /*Delegating=*/false,
495                                 Loc);
496 
497       if (NRVO) CGF.EmitBlock(SkipDtorBB);
498     }
499   };
500 
501   struct CallStackRestore final : EHScopeStack::Cleanup {
502     Address Stack;
503     CallStackRestore(Address Stack) : Stack(Stack) {}
504     void Emit(CodeGenFunction &CGF, Flags flags) override {
505       llvm::Value *V = CGF.Builder.CreateLoad(Stack);
506       llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
507       CGF.Builder.CreateCall(F, V);
508     }
509   };
510 
511   struct ExtendGCLifetime final : EHScopeStack::Cleanup {
512     const VarDecl &Var;
513     ExtendGCLifetime(const VarDecl *var) : Var(*var) {}
514 
515     void Emit(CodeGenFunction &CGF, Flags flags) override {
516       // Compute the address of the local variable, in case it's a
517       // byref or something.
518       DeclRefExpr DRE(const_cast<VarDecl*>(&Var), false,
519                       Var.getType(), VK_LValue, SourceLocation());
520       llvm::Value *value = CGF.EmitLoadOfScalar(CGF.EmitDeclRefLValue(&DRE),
521                                                 SourceLocation());
522       CGF.EmitExtendGCLifetime(value);
523     }
524   };
525 
526   struct CallCleanupFunction final : EHScopeStack::Cleanup {
527     llvm::Constant *CleanupFn;
528     const CGFunctionInfo &FnInfo;
529     const VarDecl &Var;
530 
531     CallCleanupFunction(llvm::Constant *CleanupFn, const CGFunctionInfo *Info,
532                         const VarDecl *Var)
533       : CleanupFn(CleanupFn), FnInfo(*Info), Var(*Var) {}
534 
535     void Emit(CodeGenFunction &CGF, Flags flags) override {
536       DeclRefExpr DRE(const_cast<VarDecl*>(&Var), false,
537                       Var.getType(), VK_LValue, SourceLocation());
538       // Compute the address of the local variable, in case it's a byref
539       // or something.
540       llvm::Value *Addr = CGF.EmitDeclRefLValue(&DRE).getPointer();
541 
542       // In some cases, the type of the function argument will be different from
543       // the type of the pointer. An example of this is
544       // void f(void* arg);
545       // __attribute__((cleanup(f))) void *g;
546       //
547       // To fix this we insert a bitcast here.
548       QualType ArgTy = FnInfo.arg_begin()->type;
549       llvm::Value *Arg =
550         CGF.Builder.CreateBitCast(Addr, CGF.ConvertType(ArgTy));
551 
552       CallArgList Args;
553       Args.add(RValue::get(Arg),
554                CGF.getContext().getPointerType(Var.getType()));
555       auto Callee = CGCallee::forDirect(CleanupFn);
556       CGF.EmitCall(FnInfo, Callee, ReturnValueSlot(), Args);
557     }
558   };
559 } // end anonymous namespace
560 
561 /// EmitAutoVarWithLifetime - Does the setup required for an automatic
562 /// variable with lifetime.
563 static void EmitAutoVarWithLifetime(CodeGenFunction &CGF, const VarDecl &var,
564                                     Address addr,
565                                     Qualifiers::ObjCLifetime lifetime) {
566   switch (lifetime) {
567   case Qualifiers::OCL_None:
568     llvm_unreachable("present but none");
569 
570   case Qualifiers::OCL_ExplicitNone:
571     // nothing to do
572     break;
573 
574   case Qualifiers::OCL_Strong: {
575     CodeGenFunction::Destroyer *destroyer =
576       (var.hasAttr<ObjCPreciseLifetimeAttr>()
577        ? CodeGenFunction::destroyARCStrongPrecise
578        : CodeGenFunction::destroyARCStrongImprecise);
579 
580     CleanupKind cleanupKind = CGF.getARCCleanupKind();
581     CGF.pushDestroy(cleanupKind, addr, var.getType(), destroyer,
582                     cleanupKind & EHCleanup);
583     break;
584   }
585   case Qualifiers::OCL_Autoreleasing:
586     // nothing to do
587     break;
588 
589   case Qualifiers::OCL_Weak:
590     // __weak objects always get EH cleanups; otherwise, exceptions
591     // could cause really nasty crashes instead of mere leaks.
592     CGF.pushDestroy(NormalAndEHCleanup, addr, var.getType(),
593                     CodeGenFunction::destroyARCWeak,
594                     /*useEHCleanup*/ true);
595     break;
596   }
597 }
598 
599 static bool isAccessedBy(const VarDecl &var, const Stmt *s) {
600   if (const Expr *e = dyn_cast<Expr>(s)) {
601     // Skip the most common kinds of expressions that make
602     // hierarchy-walking expensive.
603     s = e = e->IgnoreParenCasts();
604 
605     if (const DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e))
606       return (ref->getDecl() == &var);
607     if (const BlockExpr *be = dyn_cast<BlockExpr>(e)) {
608       const BlockDecl *block = be->getBlockDecl();
609       for (const auto &I : block->captures()) {
610         if (I.getVariable() == &var)
611           return true;
612       }
613     }
614   }
615 
616   for (const Stmt *SubStmt : s->children())
617     // SubStmt might be null; as in missing decl or conditional of an if-stmt.
618     if (SubStmt && isAccessedBy(var, SubStmt))
619       return true;
620 
621   return false;
622 }
623 
624 static bool isAccessedBy(const ValueDecl *decl, const Expr *e) {
625   if (!decl) return false;
626   if (!isa<VarDecl>(decl)) return false;
627   const VarDecl *var = cast<VarDecl>(decl);
628   return isAccessedBy(*var, e);
629 }
630 
631 static bool tryEmitARCCopyWeakInit(CodeGenFunction &CGF,
632                                    const LValue &destLV, const Expr *init) {
633   bool needsCast = false;
634 
635   while (auto castExpr = dyn_cast<CastExpr>(init->IgnoreParens())) {
636     switch (castExpr->getCastKind()) {
637     // Look through casts that don't require representation changes.
638     case CK_NoOp:
639     case CK_BitCast:
640     case CK_BlockPointerToObjCPointerCast:
641       needsCast = true;
642       break;
643 
644     // If we find an l-value to r-value cast from a __weak variable,
645     // emit this operation as a copy or move.
646     case CK_LValueToRValue: {
647       const Expr *srcExpr = castExpr->getSubExpr();
648       if (srcExpr->getType().getObjCLifetime() != Qualifiers::OCL_Weak)
649         return false;
650 
651       // Emit the source l-value.
652       LValue srcLV = CGF.EmitLValue(srcExpr);
653 
654       // Handle a formal type change to avoid asserting.
655       auto srcAddr = srcLV.getAddress();
656       if (needsCast) {
657         srcAddr = CGF.Builder.CreateElementBitCast(srcAddr,
658                                          destLV.getAddress().getElementType());
659       }
660 
661       // If it was an l-value, use objc_copyWeak.
662       if (srcExpr->getValueKind() == VK_LValue) {
663         CGF.EmitARCCopyWeak(destLV.getAddress(), srcAddr);
664       } else {
665         assert(srcExpr->getValueKind() == VK_XValue);
666         CGF.EmitARCMoveWeak(destLV.getAddress(), srcAddr);
667       }
668       return true;
669     }
670 
671     // Stop at anything else.
672     default:
673       return false;
674     }
675 
676     init = castExpr->getSubExpr();
677   }
678   return false;
679 }
680 
681 static void drillIntoBlockVariable(CodeGenFunction &CGF,
682                                    LValue &lvalue,
683                                    const VarDecl *var) {
684   lvalue.setAddress(CGF.emitBlockByrefAddress(lvalue.getAddress(), var));
685 }
686 
687 void CodeGenFunction::EmitNullabilityCheck(LValue LHS, llvm::Value *RHS,
688                                            SourceLocation Loc) {
689   if (!SanOpts.has(SanitizerKind::NullabilityAssign))
690     return;
691 
692   auto Nullability = LHS.getType()->getNullability(getContext());
693   if (!Nullability || *Nullability != NullabilityKind::NonNull)
694     return;
695 
696   // Check if the right hand side of the assignment is nonnull, if the left
697   // hand side must be nonnull.
698   SanitizerScope SanScope(this);
699   llvm::Value *IsNotNull = Builder.CreateIsNotNull(RHS);
700   llvm::Constant *StaticData[] = {
701       EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(LHS.getType()),
702       llvm::ConstantInt::get(Int8Ty, 0), // The LogAlignment info is unused.
703       llvm::ConstantInt::get(Int8Ty, TCK_NonnullAssign)};
704   EmitCheck({{IsNotNull, SanitizerKind::NullabilityAssign}},
705             SanitizerHandler::TypeMismatch, StaticData, RHS);
706 }
707 
708 void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
709                                      LValue lvalue, bool capturedByInit) {
710   Qualifiers::ObjCLifetime lifetime = lvalue.getObjCLifetime();
711   if (!lifetime) {
712     llvm::Value *value = EmitScalarExpr(init);
713     if (capturedByInit)
714       drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
715     EmitNullabilityCheck(lvalue, value, init->getExprLoc());
716     EmitStoreThroughLValue(RValue::get(value), lvalue, true);
717     return;
718   }
719 
720   if (const CXXDefaultInitExpr *DIE = dyn_cast<CXXDefaultInitExpr>(init))
721     init = DIE->getExpr();
722 
723   // If we're emitting a value with lifetime, we have to do the
724   // initialization *before* we leave the cleanup scopes.
725   if (const ExprWithCleanups *ewc = dyn_cast<ExprWithCleanups>(init)) {
726     enterFullExpression(ewc);
727     init = ewc->getSubExpr();
728   }
729   CodeGenFunction::RunCleanupsScope Scope(*this);
730 
731   // We have to maintain the illusion that the variable is
732   // zero-initialized.  If the variable might be accessed in its
733   // initializer, zero-initialize before running the initializer, then
734   // actually perform the initialization with an assign.
735   bool accessedByInit = false;
736   if (lifetime != Qualifiers::OCL_ExplicitNone)
737     accessedByInit = (capturedByInit || isAccessedBy(D, init));
738   if (accessedByInit) {
739     LValue tempLV = lvalue;
740     // Drill down to the __block object if necessary.
741     if (capturedByInit) {
742       // We can use a simple GEP for this because it can't have been
743       // moved yet.
744       tempLV.setAddress(emitBlockByrefAddress(tempLV.getAddress(),
745                                               cast<VarDecl>(D),
746                                               /*follow*/ false));
747     }
748 
749     auto ty = cast<llvm::PointerType>(tempLV.getAddress().getElementType());
750     llvm::Value *zero = CGM.getNullPointer(ty, tempLV.getType());
751 
752     // If __weak, we want to use a barrier under certain conditions.
753     if (lifetime == Qualifiers::OCL_Weak)
754       EmitARCInitWeak(tempLV.getAddress(), zero);
755 
756     // Otherwise just do a simple store.
757     else
758       EmitStoreOfScalar(zero, tempLV, /* isInitialization */ true);
759   }
760 
761   // Emit the initializer.
762   llvm::Value *value = nullptr;
763 
764   switch (lifetime) {
765   case Qualifiers::OCL_None:
766     llvm_unreachable("present but none");
767 
768   case Qualifiers::OCL_ExplicitNone:
769     value = EmitARCUnsafeUnretainedScalarExpr(init);
770     break;
771 
772   case Qualifiers::OCL_Strong: {
773     value = EmitARCRetainScalarExpr(init);
774     break;
775   }
776 
777   case Qualifiers::OCL_Weak: {
778     // If it's not accessed by the initializer, try to emit the
779     // initialization with a copy or move.
780     if (!accessedByInit && tryEmitARCCopyWeakInit(*this, lvalue, init)) {
781       return;
782     }
783 
784     // No way to optimize a producing initializer into this.  It's not
785     // worth optimizing for, because the value will immediately
786     // disappear in the common case.
787     value = EmitScalarExpr(init);
788 
789     if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
790     if (accessedByInit)
791       EmitARCStoreWeak(lvalue.getAddress(), value, /*ignored*/ true);
792     else
793       EmitARCInitWeak(lvalue.getAddress(), value);
794     return;
795   }
796 
797   case Qualifiers::OCL_Autoreleasing:
798     value = EmitARCRetainAutoreleaseScalarExpr(init);
799     break;
800   }
801 
802   if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
803 
804   EmitNullabilityCheck(lvalue, value, init->getExprLoc());
805 
806   // If the variable might have been accessed by its initializer, we
807   // might have to initialize with a barrier.  We have to do this for
808   // both __weak and __strong, but __weak got filtered out above.
809   if (accessedByInit && lifetime == Qualifiers::OCL_Strong) {
810     llvm::Value *oldValue = EmitLoadOfScalar(lvalue, init->getExprLoc());
811     EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
812     EmitARCRelease(oldValue, ARCImpreciseLifetime);
813     return;
814   }
815 
816   EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
817 }
818 
819 /// canEmitInitWithFewStoresAfterMemset - Decide whether we can emit the
820 /// non-zero parts of the specified initializer with equal or fewer than
821 /// NumStores scalar stores.
822 static bool canEmitInitWithFewStoresAfterMemset(llvm::Constant *Init,
823                                                 unsigned &NumStores) {
824   // Zero and Undef never requires any extra stores.
825   if (isa<llvm::ConstantAggregateZero>(Init) ||
826       isa<llvm::ConstantPointerNull>(Init) ||
827       isa<llvm::UndefValue>(Init))
828     return true;
829   if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
830       isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
831       isa<llvm::ConstantExpr>(Init))
832     return Init->isNullValue() || NumStores--;
833 
834   // See if we can emit each element.
835   if (isa<llvm::ConstantArray>(Init) || isa<llvm::ConstantStruct>(Init)) {
836     for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
837       llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
838       if (!canEmitInitWithFewStoresAfterMemset(Elt, NumStores))
839         return false;
840     }
841     return true;
842   }
843 
844   if (llvm::ConstantDataSequential *CDS =
845         dyn_cast<llvm::ConstantDataSequential>(Init)) {
846     for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
847       llvm::Constant *Elt = CDS->getElementAsConstant(i);
848       if (!canEmitInitWithFewStoresAfterMemset(Elt, NumStores))
849         return false;
850     }
851     return true;
852   }
853 
854   // Anything else is hard and scary.
855   return false;
856 }
857 
858 /// emitStoresForInitAfterMemset - For inits that
859 /// canEmitInitWithFewStoresAfterMemset returned true for, emit the scalar
860 /// stores that would be required.
861 static void emitStoresForInitAfterMemset(llvm::Constant *Init, llvm::Value *Loc,
862                                          bool isVolatile, CGBuilderTy &Builder) {
863   assert(!Init->isNullValue() && !isa<llvm::UndefValue>(Init) &&
864          "called emitStoresForInitAfterMemset for zero or undef value.");
865 
866   if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
867       isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
868       isa<llvm::ConstantExpr>(Init)) {
869     Builder.CreateDefaultAlignedStore(Init, Loc, isVolatile);
870     return;
871   }
872 
873   if (llvm::ConstantDataSequential *CDS =
874           dyn_cast<llvm::ConstantDataSequential>(Init)) {
875     for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
876       llvm::Constant *Elt = CDS->getElementAsConstant(i);
877 
878       // If necessary, get a pointer to the element and emit it.
879       if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt))
880         emitStoresForInitAfterMemset(
881             Elt, Builder.CreateConstGEP2_32(Init->getType(), Loc, 0, i),
882             isVolatile, Builder);
883     }
884     return;
885   }
886 
887   assert((isa<llvm::ConstantStruct>(Init) || isa<llvm::ConstantArray>(Init)) &&
888          "Unknown value type!");
889 
890   for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
891     llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
892 
893     // If necessary, get a pointer to the element and emit it.
894     if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt))
895       emitStoresForInitAfterMemset(
896           Elt, Builder.CreateConstGEP2_32(Init->getType(), Loc, 0, i),
897           isVolatile, Builder);
898   }
899 }
900 
901 /// shouldUseMemSetPlusStoresToInitialize - Decide whether we should use memset
902 /// plus some stores to initialize a local variable instead of using a memcpy
903 /// from a constant global.  It is beneficial to use memset if the global is all
904 /// zeros, or mostly zeros and large.
905 static bool shouldUseMemSetPlusStoresToInitialize(llvm::Constant *Init,
906                                                   uint64_t GlobalSize) {
907   // If a global is all zeros, always use a memset.
908   if (isa<llvm::ConstantAggregateZero>(Init)) return true;
909 
910   // If a non-zero global is <= 32 bytes, always use a memcpy.  If it is large,
911   // do it if it will require 6 or fewer scalar stores.
912   // TODO: Should budget depends on the size?  Avoiding a large global warrants
913   // plopping in more stores.
914   unsigned StoreBudget = 6;
915   uint64_t SizeLimit = 32;
916 
917   return GlobalSize > SizeLimit &&
918          canEmitInitWithFewStoresAfterMemset(Init, StoreBudget);
919 }
920 
921 /// EmitAutoVarDecl - Emit code and set up an entry in LocalDeclMap for a
922 /// variable declaration with auto, register, or no storage class specifier.
923 /// These turn into simple stack objects, or GlobalValues depending on target.
924 void CodeGenFunction::EmitAutoVarDecl(const VarDecl &D) {
925   AutoVarEmission emission = EmitAutoVarAlloca(D);
926   EmitAutoVarInit(emission);
927   EmitAutoVarCleanups(emission);
928 }
929 
930 /// Emit a lifetime.begin marker if some criteria are satisfied.
931 /// \return a pointer to the temporary size Value if a marker was emitted, null
932 /// otherwise
933 llvm::Value *CodeGenFunction::EmitLifetimeStart(uint64_t Size,
934                                                 llvm::Value *Addr) {
935   if (!ShouldEmitLifetimeMarkers)
936     return nullptr;
937 
938   llvm::Value *SizeV = llvm::ConstantInt::get(Int64Ty, Size);
939   Addr = Builder.CreateBitCast(Addr, AllocaInt8PtrTy);
940   llvm::CallInst *C =
941       Builder.CreateCall(CGM.getLLVMLifetimeStartFn(), {SizeV, Addr});
942   C->setDoesNotThrow();
943   return SizeV;
944 }
945 
946 void CodeGenFunction::EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr) {
947   Addr = Builder.CreateBitCast(Addr, AllocaInt8PtrTy);
948   llvm::CallInst *C =
949       Builder.CreateCall(CGM.getLLVMLifetimeEndFn(), {Size, Addr});
950   C->setDoesNotThrow();
951 }
952 
953 void CodeGenFunction::EmitAndRegisterVariableArrayDimensions(
954     CGDebugInfo *DI, const VarDecl &D, bool EmitDebugInfo) {
955   // For each dimension stores its QualType and corresponding
956   // size-expression Value.
957   SmallVector<CodeGenFunction::VlaSizePair, 4> Dimensions;
958 
959   // Break down the array into individual dimensions.
960   QualType Type1D = D.getType();
961   while (getContext().getAsVariableArrayType(Type1D)) {
962     auto VlaSize = getVLAElements1D(Type1D);
963     if (auto *C = dyn_cast<llvm::ConstantInt>(VlaSize.NumElts))
964       Dimensions.emplace_back(C, Type1D.getUnqualifiedType());
965     else {
966       auto SizeExprAddr = CreateDefaultAlignTempAlloca(
967           VlaSize.NumElts->getType(), "__vla_expr");
968       Builder.CreateStore(VlaSize.NumElts, SizeExprAddr);
969       Dimensions.emplace_back(SizeExprAddr.getPointer(),
970                               Type1D.getUnqualifiedType());
971     }
972     Type1D = VlaSize.Type;
973   }
974 
975   if (!EmitDebugInfo)
976     return;
977 
978   // Register each dimension's size-expression with a DILocalVariable,
979   // so that it can be used by CGDebugInfo when instantiating a DISubrange
980   // to describe this array.
981   for (auto &VlaSize : Dimensions) {
982     llvm::Metadata *MD;
983     if (auto *C = dyn_cast<llvm::ConstantInt>(VlaSize.NumElts))
984       MD = llvm::ConstantAsMetadata::get(C);
985     else {
986       // Create an artificial VarDecl to generate debug info for.
987       IdentifierInfo &NameIdent = getContext().Idents.getOwn(
988           cast<llvm::AllocaInst>(VlaSize.NumElts)->getName());
989       auto VlaExprTy = VlaSize.NumElts->getType()->getPointerElementType();
990       auto QT = getContext().getIntTypeForBitwidth(
991           VlaExprTy->getScalarSizeInBits(), false);
992       auto *ArtificialDecl = VarDecl::Create(
993           getContext(), const_cast<DeclContext *>(D.getDeclContext()),
994           D.getLocation(), D.getLocation(), &NameIdent, QT,
995           getContext().CreateTypeSourceInfo(QT), SC_Auto);
996       ArtificialDecl->setImplicit();
997 
998       MD = DI->EmitDeclareOfAutoVariable(ArtificialDecl, VlaSize.NumElts,
999                                          Builder);
1000     }
1001     assert(MD && "No Size expression debug node created");
1002     DI->registerVLASizeExpression(VlaSize.Type, MD);
1003   }
1004 }
1005 
1006 /// EmitAutoVarAlloca - Emit the alloca and debug information for a
1007 /// local variable.  Does not emit initialization or destruction.
1008 CodeGenFunction::AutoVarEmission
1009 CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
1010   QualType Ty = D.getType();
1011   assert(
1012       Ty.getAddressSpace() == LangAS::Default ||
1013       (Ty.getAddressSpace() == LangAS::opencl_private && getLangOpts().OpenCL));
1014 
1015   AutoVarEmission emission(D);
1016 
1017   bool isByRef = D.hasAttr<BlocksAttr>();
1018   emission.IsByRef = isByRef;
1019 
1020   CharUnits alignment = getContext().getDeclAlign(&D);
1021 
1022   // If the type is variably-modified, emit all the VLA sizes for it.
1023   if (Ty->isVariablyModifiedType())
1024     EmitVariablyModifiedType(Ty);
1025 
1026   auto *DI = getDebugInfo();
1027   bool EmitDebugInfo = DI && CGM.getCodeGenOpts().getDebugInfo() >=
1028                                  codegenoptions::LimitedDebugInfo;
1029 
1030   Address address = Address::invalid();
1031   if (Ty->isConstantSizeType()) {
1032     bool NRVO = getLangOpts().ElideConstructors &&
1033       D.isNRVOVariable();
1034 
1035     // If this value is an array or struct with a statically determinable
1036     // constant initializer, there are optimizations we can do.
1037     //
1038     // TODO: We should constant-evaluate the initializer of any variable,
1039     // as long as it is initialized by a constant expression. Currently,
1040     // isConstantInitializer produces wrong answers for structs with
1041     // reference or bitfield members, and a few other cases, and checking
1042     // for POD-ness protects us from some of these.
1043     if (D.getInit() && (Ty->isArrayType() || Ty->isRecordType()) &&
1044         (D.isConstexpr() ||
1045          ((Ty.isPODType(getContext()) ||
1046            getContext().getBaseElementType(Ty)->isObjCObjectPointerType()) &&
1047           D.getInit()->isConstantInitializer(getContext(), false)))) {
1048 
1049       // If the variable's a const type, and it's neither an NRVO
1050       // candidate nor a __block variable and has no mutable members,
1051       // emit it as a global instead.
1052       // Exception is if a variable is located in non-constant address space
1053       // in OpenCL.
1054       if ((!getLangOpts().OpenCL ||
1055            Ty.getAddressSpace() == LangAS::opencl_constant) &&
1056           (CGM.getCodeGenOpts().MergeAllConstants && !NRVO && !isByRef &&
1057            CGM.isTypeConstant(Ty, true))) {
1058         EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage);
1059 
1060         // Signal this condition to later callbacks.
1061         emission.Addr = Address::invalid();
1062         assert(emission.wasEmittedAsGlobal());
1063         return emission;
1064       }
1065 
1066       // Otherwise, tell the initialization code that we're in this case.
1067       emission.IsConstantAggregate = true;
1068     }
1069 
1070     // A normal fixed sized variable becomes an alloca in the entry block,
1071     // unless it's an NRVO variable.
1072 
1073     if (NRVO) {
1074       // The named return value optimization: allocate this variable in the
1075       // return slot, so that we can elide the copy when returning this
1076       // variable (C++0x [class.copy]p34).
1077       address = ReturnValue;
1078 
1079       if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
1080         if (!cast<CXXRecordDecl>(RecordTy->getDecl())->hasTrivialDestructor()) {
1081           // Create a flag that is used to indicate when the NRVO was applied
1082           // to this variable. Set it to zero to indicate that NRVO was not
1083           // applied.
1084           llvm::Value *Zero = Builder.getFalse();
1085           Address NRVOFlag =
1086             CreateTempAlloca(Zero->getType(), CharUnits::One(), "nrvo");
1087           EnsureInsertPoint();
1088           Builder.CreateStore(Zero, NRVOFlag);
1089 
1090           // Record the NRVO flag for this variable.
1091           NRVOFlags[&D] = NRVOFlag.getPointer();
1092           emission.NRVOFlag = NRVOFlag.getPointer();
1093         }
1094       }
1095     } else {
1096       CharUnits allocaAlignment;
1097       llvm::Type *allocaTy;
1098       if (isByRef) {
1099         auto &byrefInfo = getBlockByrefInfo(&D);
1100         allocaTy = byrefInfo.Type;
1101         allocaAlignment = byrefInfo.ByrefAlignment;
1102       } else {
1103         allocaTy = ConvertTypeForMem(Ty);
1104         allocaAlignment = alignment;
1105       }
1106 
1107       // Create the alloca.  Note that we set the name separately from
1108       // building the instruction so that it's there even in no-asserts
1109       // builds.
1110       address = CreateTempAlloca(allocaTy, allocaAlignment, D.getName());
1111 
1112       // Don't emit lifetime markers for MSVC catch parameters. The lifetime of
1113       // the catch parameter starts in the catchpad instruction, and we can't
1114       // insert code in those basic blocks.
1115       bool IsMSCatchParam =
1116           D.isExceptionVariable() && getTarget().getCXXABI().isMicrosoft();
1117 
1118       // Emit a lifetime intrinsic if meaningful. There's no point in doing this
1119       // if we don't have a valid insertion point (?).
1120       if (HaveInsertPoint() && !IsMSCatchParam) {
1121         // If there's a jump into the lifetime of this variable, its lifetime
1122         // gets broken up into several regions in IR, which requires more work
1123         // to handle correctly. For now, just omit the intrinsics; this is a
1124         // rare case, and it's better to just be conservatively correct.
1125         // PR28267.
1126         //
1127         // We have to do this in all language modes if there's a jump past the
1128         // declaration. We also have to do it in C if there's a jump to an
1129         // earlier point in the current block because non-VLA lifetimes begin as
1130         // soon as the containing block is entered, not when its variables
1131         // actually come into scope; suppressing the lifetime annotations
1132         // completely in this case is unnecessarily pessimistic, but again, this
1133         // is rare.
1134         if (!Bypasses.IsBypassed(&D) &&
1135             !(!getLangOpts().CPlusPlus && hasLabelBeenSeenInCurrentScope())) {
1136           uint64_t size = CGM.getDataLayout().getTypeAllocSize(allocaTy);
1137           emission.SizeForLifetimeMarkers =
1138               EmitLifetimeStart(size, address.getPointer());
1139         }
1140       } else {
1141         assert(!emission.useLifetimeMarkers());
1142       }
1143     }
1144   } else {
1145     EnsureInsertPoint();
1146 
1147     if (!DidCallStackSave) {
1148       // Save the stack.
1149       Address Stack =
1150         CreateTempAlloca(Int8PtrTy, getPointerAlign(), "saved_stack");
1151 
1152       llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave);
1153       llvm::Value *V = Builder.CreateCall(F);
1154       Builder.CreateStore(V, Stack);
1155 
1156       DidCallStackSave = true;
1157 
1158       // Push a cleanup block and restore the stack there.
1159       // FIXME: in general circumstances, this should be an EH cleanup.
1160       pushStackRestore(NormalCleanup, Stack);
1161     }
1162 
1163     auto VlaSize = getVLASize(Ty);
1164     llvm::Type *llvmTy = ConvertTypeForMem(VlaSize.Type);
1165 
1166     // Allocate memory for the array.
1167     address = CreateTempAlloca(llvmTy, alignment, "vla", VlaSize.NumElts);
1168 
1169     // If we have debug info enabled, properly describe the VLA dimensions for
1170     // this type by registering the vla size expression for each of the
1171     // dimensions.
1172     EmitAndRegisterVariableArrayDimensions(DI, D, EmitDebugInfo);
1173   }
1174 
1175   setAddrOfLocalVar(&D, address);
1176   emission.Addr = address;
1177 
1178   // Emit debug info for local var declaration.
1179   if (EmitDebugInfo && HaveInsertPoint()) {
1180     DI->setLocation(D.getLocation());
1181     (void)DI->EmitDeclareOfAutoVariable(&D, address.getPointer(), Builder);
1182   }
1183 
1184   if (D.hasAttr<AnnotateAttr>())
1185     EmitVarAnnotations(&D, address.getPointer());
1186 
1187   // Make sure we call @llvm.lifetime.end.
1188   if (emission.useLifetimeMarkers())
1189     EHStack.pushCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker,
1190                                          emission.getAllocatedAddress(),
1191                                          emission.getSizeForLifetimeMarkers());
1192 
1193   return emission;
1194 }
1195 
1196 /// Determines whether the given __block variable is potentially
1197 /// captured by the given expression.
1198 static bool isCapturedBy(const VarDecl &var, const Expr *e) {
1199   // Skip the most common kinds of expressions that make
1200   // hierarchy-walking expensive.
1201   e = e->IgnoreParenCasts();
1202 
1203   if (const BlockExpr *be = dyn_cast<BlockExpr>(e)) {
1204     const BlockDecl *block = be->getBlockDecl();
1205     for (const auto &I : block->captures()) {
1206       if (I.getVariable() == &var)
1207         return true;
1208     }
1209 
1210     // No need to walk into the subexpressions.
1211     return false;
1212   }
1213 
1214   if (const StmtExpr *SE = dyn_cast<StmtExpr>(e)) {
1215     const CompoundStmt *CS = SE->getSubStmt();
1216     for (const auto *BI : CS->body())
1217       if (const auto *E = dyn_cast<Expr>(BI)) {
1218         if (isCapturedBy(var, E))
1219             return true;
1220       }
1221       else if (const auto *DS = dyn_cast<DeclStmt>(BI)) {
1222           // special case declarations
1223           for (const auto *I : DS->decls()) {
1224               if (const auto *VD = dyn_cast<VarDecl>((I))) {
1225                 const Expr *Init = VD->getInit();
1226                 if (Init && isCapturedBy(var, Init))
1227                   return true;
1228               }
1229           }
1230       }
1231       else
1232         // FIXME. Make safe assumption assuming arbitrary statements cause capturing.
1233         // Later, provide code to poke into statements for capture analysis.
1234         return true;
1235     return false;
1236   }
1237 
1238   for (const Stmt *SubStmt : e->children())
1239     if (isCapturedBy(var, cast<Expr>(SubStmt)))
1240       return true;
1241 
1242   return false;
1243 }
1244 
1245 /// \brief Determine whether the given initializer is trivial in the sense
1246 /// that it requires no code to be generated.
1247 bool CodeGenFunction::isTrivialInitializer(const Expr *Init) {
1248   if (!Init)
1249     return true;
1250 
1251   if (const CXXConstructExpr *Construct = dyn_cast<CXXConstructExpr>(Init))
1252     if (CXXConstructorDecl *Constructor = Construct->getConstructor())
1253       if (Constructor->isTrivial() &&
1254           Constructor->isDefaultConstructor() &&
1255           !Construct->requiresZeroInitialization())
1256         return true;
1257 
1258   return false;
1259 }
1260 
1261 void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
1262   assert(emission.Variable && "emission was not valid!");
1263 
1264   // If this was emitted as a global constant, we're done.
1265   if (emission.wasEmittedAsGlobal()) return;
1266 
1267   const VarDecl &D = *emission.Variable;
1268   auto DL = ApplyDebugLocation::CreateDefaultArtificial(*this, D.getLocation());
1269   QualType type = D.getType();
1270 
1271   // If this local has an initializer, emit it now.
1272   const Expr *Init = D.getInit();
1273 
1274   // If we are at an unreachable point, we don't need to emit the initializer
1275   // unless it contains a label.
1276   if (!HaveInsertPoint()) {
1277     if (!Init || !ContainsLabel(Init)) return;
1278     EnsureInsertPoint();
1279   }
1280 
1281   // Initialize the structure of a __block variable.
1282   if (emission.IsByRef)
1283     emitByrefStructureInit(emission);
1284 
1285   // Initialize the variable here if it doesn't have a initializer and it is a
1286   // C struct that is non-trivial to initialize or an array containing such a
1287   // struct.
1288   if (!Init &&
1289       type.isNonTrivialToPrimitiveDefaultInitialize() ==
1290           QualType::PDIK_Struct) {
1291     LValue Dst = MakeAddrLValue(emission.getAllocatedAddress(), type);
1292     if (emission.IsByRef)
1293       drillIntoBlockVariable(*this, Dst, &D);
1294     defaultInitNonTrivialCStructVar(Dst);
1295     return;
1296   }
1297 
1298   if (isTrivialInitializer(Init))
1299     return;
1300 
1301   // Check whether this is a byref variable that's potentially
1302   // captured and moved by its own initializer.  If so, we'll need to
1303   // emit the initializer first, then copy into the variable.
1304   bool capturedByInit = emission.IsByRef && isCapturedBy(D, Init);
1305 
1306   Address Loc =
1307     capturedByInit ? emission.Addr : emission.getObjectAddress(*this);
1308 
1309   llvm::Constant *constant = nullptr;
1310   if (emission.IsConstantAggregate || D.isConstexpr()) {
1311     assert(!capturedByInit && "constant init contains a capturing block?");
1312     constant = ConstantEmitter(*this).tryEmitAbstractForInitializer(D);
1313   }
1314 
1315   if (!constant) {
1316     LValue lv = MakeAddrLValue(Loc, type);
1317     lv.setNonGC(true);
1318     return EmitExprAsInit(Init, &D, lv, capturedByInit);
1319   }
1320 
1321   if (!emission.IsConstantAggregate) {
1322     // For simple scalar/complex initialization, store the value directly.
1323     LValue lv = MakeAddrLValue(Loc, type);
1324     lv.setNonGC(true);
1325     return EmitStoreThroughLValue(RValue::get(constant), lv, true);
1326   }
1327 
1328   // If this is a simple aggregate initialization, we can optimize it
1329   // in various ways.
1330   bool isVolatile = type.isVolatileQualified();
1331 
1332   llvm::Value *SizeVal =
1333     llvm::ConstantInt::get(IntPtrTy,
1334                            getContext().getTypeSizeInChars(type).getQuantity());
1335 
1336   llvm::Type *BP = AllocaInt8PtrTy;
1337   if (Loc.getType() != BP)
1338     Loc = Builder.CreateBitCast(Loc, BP);
1339 
1340   // If the initializer is all or mostly zeros, codegen with memset then do
1341   // a few stores afterward.
1342   if (shouldUseMemSetPlusStoresToInitialize(constant,
1343                 CGM.getDataLayout().getTypeAllocSize(constant->getType()))) {
1344     Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, 0), SizeVal,
1345                          isVolatile);
1346     // Zero and undef don't require a stores.
1347     if (!constant->isNullValue() && !isa<llvm::UndefValue>(constant)) {
1348       Loc = Builder.CreateBitCast(Loc,
1349         constant->getType()->getPointerTo(Loc.getAddressSpace()));
1350       emitStoresForInitAfterMemset(constant, Loc.getPointer(),
1351                                    isVolatile, Builder);
1352     }
1353   } else {
1354     // Otherwise, create a temporary global with the initializer then
1355     // memcpy from the global to the alloca.
1356     std::string Name = getStaticDeclName(CGM, D);
1357     unsigned AS = 0;
1358     if (getLangOpts().OpenCL) {
1359       AS = CGM.getContext().getTargetAddressSpace(LangAS::opencl_constant);
1360       BP = llvm::PointerType::getInt8PtrTy(getLLVMContext(), AS);
1361     }
1362     llvm::GlobalVariable *GV =
1363       new llvm::GlobalVariable(CGM.getModule(), constant->getType(), true,
1364                                llvm::GlobalValue::PrivateLinkage,
1365                                constant, Name, nullptr,
1366                                llvm::GlobalValue::NotThreadLocal, AS);
1367     GV->setAlignment(Loc.getAlignment().getQuantity());
1368     GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1369 
1370     Address SrcPtr = Address(GV, Loc.getAlignment());
1371     if (SrcPtr.getType() != BP)
1372       SrcPtr = Builder.CreateBitCast(SrcPtr, BP);
1373 
1374     Builder.CreateMemCpy(Loc, SrcPtr, SizeVal, isVolatile);
1375   }
1376 }
1377 
1378 /// Emit an expression as an initializer for a variable at the given
1379 /// location.  The expression is not necessarily the normal
1380 /// initializer for the variable, and the address is not necessarily
1381 /// its normal location.
1382 ///
1383 /// \param init the initializing expression
1384 /// \param var the variable to act as if we're initializing
1385 /// \param loc the address to initialize; its type is a pointer
1386 ///   to the LLVM mapping of the variable's type
1387 /// \param alignment the alignment of the address
1388 /// \param capturedByInit true if the variable is a __block variable
1389 ///   whose address is potentially changed by the initializer
1390 void CodeGenFunction::EmitExprAsInit(const Expr *init, const ValueDecl *D,
1391                                      LValue lvalue, bool capturedByInit) {
1392   QualType type = D->getType();
1393 
1394   if (type->isReferenceType()) {
1395     RValue rvalue = EmitReferenceBindingToExpr(init);
1396     if (capturedByInit)
1397       drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
1398     EmitStoreThroughLValue(rvalue, lvalue, true);
1399     return;
1400   }
1401   switch (getEvaluationKind(type)) {
1402   case TEK_Scalar:
1403     EmitScalarInit(init, D, lvalue, capturedByInit);
1404     return;
1405   case TEK_Complex: {
1406     ComplexPairTy complex = EmitComplexExpr(init);
1407     if (capturedByInit)
1408       drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
1409     EmitStoreOfComplex(complex, lvalue, /*init*/ true);
1410     return;
1411   }
1412   case TEK_Aggregate:
1413     if (type->isAtomicType()) {
1414       EmitAtomicInit(const_cast<Expr*>(init), lvalue);
1415     } else {
1416       // TODO: how can we delay here if D is captured by its initializer?
1417       EmitAggExpr(init, AggValueSlot::forLValue(lvalue,
1418                                               AggValueSlot::IsDestructed,
1419                                          AggValueSlot::DoesNotNeedGCBarriers,
1420                                               AggValueSlot::IsNotAliased));
1421     }
1422     return;
1423   }
1424   llvm_unreachable("bad evaluation kind");
1425 }
1426 
1427 /// Enter a destroy cleanup for the given local variable.
1428 void CodeGenFunction::emitAutoVarTypeCleanup(
1429                             const CodeGenFunction::AutoVarEmission &emission,
1430                             QualType::DestructionKind dtorKind) {
1431   assert(dtorKind != QualType::DK_none);
1432 
1433   // Note that for __block variables, we want to destroy the
1434   // original stack object, not the possibly forwarded object.
1435   Address addr = emission.getObjectAddress(*this);
1436 
1437   const VarDecl *var = emission.Variable;
1438   QualType type = var->getType();
1439 
1440   CleanupKind cleanupKind = NormalAndEHCleanup;
1441   CodeGenFunction::Destroyer *destroyer = nullptr;
1442 
1443   switch (dtorKind) {
1444   case QualType::DK_none:
1445     llvm_unreachable("no cleanup for trivially-destructible variable");
1446 
1447   case QualType::DK_cxx_destructor:
1448     // If there's an NRVO flag on the emission, we need a different
1449     // cleanup.
1450     if (emission.NRVOFlag) {
1451       assert(!type->isArrayType());
1452       CXXDestructorDecl *dtor = type->getAsCXXRecordDecl()->getDestructor();
1453       EHStack.pushCleanup<DestroyNRVOVariable>(cleanupKind, addr,
1454                                                dtor, emission.NRVOFlag);
1455       return;
1456     }
1457     break;
1458 
1459   case QualType::DK_objc_strong_lifetime:
1460     // Suppress cleanups for pseudo-strong variables.
1461     if (var->isARCPseudoStrong()) return;
1462 
1463     // Otherwise, consider whether to use an EH cleanup or not.
1464     cleanupKind = getARCCleanupKind();
1465 
1466     // Use the imprecise destroyer by default.
1467     if (!var->hasAttr<ObjCPreciseLifetimeAttr>())
1468       destroyer = CodeGenFunction::destroyARCStrongImprecise;
1469     break;
1470 
1471   case QualType::DK_objc_weak_lifetime:
1472     break;
1473 
1474   case QualType::DK_nontrivial_c_struct:
1475     destroyer = CodeGenFunction::destroyNonTrivialCStruct;
1476     break;
1477   }
1478 
1479   // If we haven't chosen a more specific destroyer, use the default.
1480   if (!destroyer) destroyer = getDestroyer(dtorKind);
1481 
1482   // Use an EH cleanup in array destructors iff the destructor itself
1483   // is being pushed as an EH cleanup.
1484   bool useEHCleanup = (cleanupKind & EHCleanup);
1485   EHStack.pushCleanup<DestroyObject>(cleanupKind, addr, type, destroyer,
1486                                      useEHCleanup);
1487 }
1488 
1489 void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) {
1490   assert(emission.Variable && "emission was not valid!");
1491 
1492   // If this was emitted as a global constant, we're done.
1493   if (emission.wasEmittedAsGlobal()) return;
1494 
1495   // If we don't have an insertion point, we're done.  Sema prevents
1496   // us from jumping into any of these scopes anyway.
1497   if (!HaveInsertPoint()) return;
1498 
1499   const VarDecl &D = *emission.Variable;
1500 
1501   // Check the type for a cleanup.
1502   if (QualType::DestructionKind dtorKind = D.getType().isDestructedType())
1503     emitAutoVarTypeCleanup(emission, dtorKind);
1504 
1505   // In GC mode, honor objc_precise_lifetime.
1506   if (getLangOpts().getGC() != LangOptions::NonGC &&
1507       D.hasAttr<ObjCPreciseLifetimeAttr>()) {
1508     EHStack.pushCleanup<ExtendGCLifetime>(NormalCleanup, &D);
1509   }
1510 
1511   // Handle the cleanup attribute.
1512   if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) {
1513     const FunctionDecl *FD = CA->getFunctionDecl();
1514 
1515     llvm::Constant *F = CGM.GetAddrOfFunction(FD);
1516     assert(F && "Could not find function!");
1517 
1518     const CGFunctionInfo &Info = CGM.getTypes().arrangeFunctionDeclaration(FD);
1519     EHStack.pushCleanup<CallCleanupFunction>(NormalAndEHCleanup, F, &Info, &D);
1520   }
1521 
1522   // If this is a block variable, call _Block_object_destroy
1523   // (on the unforwarded address).
1524   if (emission.IsByRef)
1525     enterByrefCleanup(emission);
1526 }
1527 
1528 CodeGenFunction::Destroyer *
1529 CodeGenFunction::getDestroyer(QualType::DestructionKind kind) {
1530   switch (kind) {
1531   case QualType::DK_none: llvm_unreachable("no destroyer for trivial dtor");
1532   case QualType::DK_cxx_destructor:
1533     return destroyCXXObject;
1534   case QualType::DK_objc_strong_lifetime:
1535     return destroyARCStrongPrecise;
1536   case QualType::DK_objc_weak_lifetime:
1537     return destroyARCWeak;
1538   case QualType::DK_nontrivial_c_struct:
1539     return destroyNonTrivialCStruct;
1540   }
1541   llvm_unreachable("Unknown DestructionKind");
1542 }
1543 
1544 /// pushEHDestroy - Push the standard destructor for the given type as
1545 /// an EH-only cleanup.
1546 void CodeGenFunction::pushEHDestroy(QualType::DestructionKind dtorKind,
1547                                     Address addr, QualType type) {
1548   assert(dtorKind && "cannot push destructor for trivial type");
1549   assert(needsEHCleanup(dtorKind));
1550 
1551   pushDestroy(EHCleanup, addr, type, getDestroyer(dtorKind), true);
1552 }
1553 
1554 /// pushDestroy - Push the standard destructor for the given type as
1555 /// at least a normal cleanup.
1556 void CodeGenFunction::pushDestroy(QualType::DestructionKind dtorKind,
1557                                   Address addr, QualType type) {
1558   assert(dtorKind && "cannot push destructor for trivial type");
1559 
1560   CleanupKind cleanupKind = getCleanupKind(dtorKind);
1561   pushDestroy(cleanupKind, addr, type, getDestroyer(dtorKind),
1562               cleanupKind & EHCleanup);
1563 }
1564 
1565 void CodeGenFunction::pushDestroy(CleanupKind cleanupKind, Address addr,
1566                                   QualType type, Destroyer *destroyer,
1567                                   bool useEHCleanupForArray) {
1568   pushFullExprCleanup<DestroyObject>(cleanupKind, addr, type,
1569                                      destroyer, useEHCleanupForArray);
1570 }
1571 
1572 void CodeGenFunction::pushStackRestore(CleanupKind Kind, Address SPMem) {
1573   EHStack.pushCleanup<CallStackRestore>(Kind, SPMem);
1574 }
1575 
1576 void CodeGenFunction::pushLifetimeExtendedDestroy(
1577     CleanupKind cleanupKind, Address addr, QualType type,
1578     Destroyer *destroyer, bool useEHCleanupForArray) {
1579   assert(!isInConditionalBranch() &&
1580          "performing lifetime extension from within conditional");
1581 
1582   // Push an EH-only cleanup for the object now.
1583   // FIXME: When popping normal cleanups, we need to keep this EH cleanup
1584   // around in case a temporary's destructor throws an exception.
1585   if (cleanupKind & EHCleanup)
1586     EHStack.pushCleanup<DestroyObject>(
1587         static_cast<CleanupKind>(cleanupKind & ~NormalCleanup), addr, type,
1588         destroyer, useEHCleanupForArray);
1589 
1590   // Remember that we need to push a full cleanup for the object at the
1591   // end of the full-expression.
1592   pushCleanupAfterFullExpr<DestroyObject>(
1593       cleanupKind, addr, type, destroyer, useEHCleanupForArray);
1594 }
1595 
1596 /// emitDestroy - Immediately perform the destruction of the given
1597 /// object.
1598 ///
1599 /// \param addr - the address of the object; a type*
1600 /// \param type - the type of the object; if an array type, all
1601 ///   objects are destroyed in reverse order
1602 /// \param destroyer - the function to call to destroy individual
1603 ///   elements
1604 /// \param useEHCleanupForArray - whether an EH cleanup should be
1605 ///   used when destroying array elements, in case one of the
1606 ///   destructions throws an exception
1607 void CodeGenFunction::emitDestroy(Address addr, QualType type,
1608                                   Destroyer *destroyer,
1609                                   bool useEHCleanupForArray) {
1610   const ArrayType *arrayType = getContext().getAsArrayType(type);
1611   if (!arrayType)
1612     return destroyer(*this, addr, type);
1613 
1614   llvm::Value *length = emitArrayLength(arrayType, type, addr);
1615 
1616   CharUnits elementAlign =
1617     addr.getAlignment()
1618         .alignmentOfArrayElement(getContext().getTypeSizeInChars(type));
1619 
1620   // Normally we have to check whether the array is zero-length.
1621   bool checkZeroLength = true;
1622 
1623   // But if the array length is constant, we can suppress that.
1624   if (llvm::ConstantInt *constLength = dyn_cast<llvm::ConstantInt>(length)) {
1625     // ...and if it's constant zero, we can just skip the entire thing.
1626     if (constLength->isZero()) return;
1627     checkZeroLength = false;
1628   }
1629 
1630   llvm::Value *begin = addr.getPointer();
1631   llvm::Value *end = Builder.CreateInBoundsGEP(begin, length);
1632   emitArrayDestroy(begin, end, type, elementAlign, destroyer,
1633                    checkZeroLength, useEHCleanupForArray);
1634 }
1635 
1636 /// emitArrayDestroy - Destroys all the elements of the given array,
1637 /// beginning from last to first.  The array cannot be zero-length.
1638 ///
1639 /// \param begin - a type* denoting the first element of the array
1640 /// \param end - a type* denoting one past the end of the array
1641 /// \param elementType - the element type of the array
1642 /// \param destroyer - the function to call to destroy elements
1643 /// \param useEHCleanup - whether to push an EH cleanup to destroy
1644 ///   the remaining elements in case the destruction of a single
1645 ///   element throws
1646 void CodeGenFunction::emitArrayDestroy(llvm::Value *begin,
1647                                        llvm::Value *end,
1648                                        QualType elementType,
1649                                        CharUnits elementAlign,
1650                                        Destroyer *destroyer,
1651                                        bool checkZeroLength,
1652                                        bool useEHCleanup) {
1653   assert(!elementType->isArrayType());
1654 
1655   // The basic structure here is a do-while loop, because we don't
1656   // need to check for the zero-element case.
1657   llvm::BasicBlock *bodyBB = createBasicBlock("arraydestroy.body");
1658   llvm::BasicBlock *doneBB = createBasicBlock("arraydestroy.done");
1659 
1660   if (checkZeroLength) {
1661     llvm::Value *isEmpty = Builder.CreateICmpEQ(begin, end,
1662                                                 "arraydestroy.isempty");
1663     Builder.CreateCondBr(isEmpty, doneBB, bodyBB);
1664   }
1665 
1666   // Enter the loop body, making that address the current address.
1667   llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
1668   EmitBlock(bodyBB);
1669   llvm::PHINode *elementPast =
1670     Builder.CreatePHI(begin->getType(), 2, "arraydestroy.elementPast");
1671   elementPast->addIncoming(end, entryBB);
1672 
1673   // Shift the address back by one element.
1674   llvm::Value *negativeOne = llvm::ConstantInt::get(SizeTy, -1, true);
1675   llvm::Value *element = Builder.CreateInBoundsGEP(elementPast, negativeOne,
1676                                                    "arraydestroy.element");
1677 
1678   if (useEHCleanup)
1679     pushRegularPartialArrayCleanup(begin, element, elementType, elementAlign,
1680                                    destroyer);
1681 
1682   // Perform the actual destruction there.
1683   destroyer(*this, Address(element, elementAlign), elementType);
1684 
1685   if (useEHCleanup)
1686     PopCleanupBlock();
1687 
1688   // Check whether we've reached the end.
1689   llvm::Value *done = Builder.CreateICmpEQ(element, begin, "arraydestroy.done");
1690   Builder.CreateCondBr(done, doneBB, bodyBB);
1691   elementPast->addIncoming(element, Builder.GetInsertBlock());
1692 
1693   // Done.
1694   EmitBlock(doneBB);
1695 }
1696 
1697 /// Perform partial array destruction as if in an EH cleanup.  Unlike
1698 /// emitArrayDestroy, the element type here may still be an array type.
1699 static void emitPartialArrayDestroy(CodeGenFunction &CGF,
1700                                     llvm::Value *begin, llvm::Value *end,
1701                                     QualType type, CharUnits elementAlign,
1702                                     CodeGenFunction::Destroyer *destroyer) {
1703   // If the element type is itself an array, drill down.
1704   unsigned arrayDepth = 0;
1705   while (const ArrayType *arrayType = CGF.getContext().getAsArrayType(type)) {
1706     // VLAs don't require a GEP index to walk into.
1707     if (!isa<VariableArrayType>(arrayType))
1708       arrayDepth++;
1709     type = arrayType->getElementType();
1710   }
1711 
1712   if (arrayDepth) {
1713     llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
1714 
1715     SmallVector<llvm::Value*,4> gepIndices(arrayDepth+1, zero);
1716     begin = CGF.Builder.CreateInBoundsGEP(begin, gepIndices, "pad.arraybegin");
1717     end = CGF.Builder.CreateInBoundsGEP(end, gepIndices, "pad.arrayend");
1718   }
1719 
1720   // Destroy the array.  We don't ever need an EH cleanup because we
1721   // assume that we're in an EH cleanup ourselves, so a throwing
1722   // destructor causes an immediate terminate.
1723   CGF.emitArrayDestroy(begin, end, type, elementAlign, destroyer,
1724                        /*checkZeroLength*/ true, /*useEHCleanup*/ false);
1725 }
1726 
1727 namespace {
1728   /// RegularPartialArrayDestroy - a cleanup which performs a partial
1729   /// array destroy where the end pointer is regularly determined and
1730   /// does not need to be loaded from a local.
1731   class RegularPartialArrayDestroy final : public EHScopeStack::Cleanup {
1732     llvm::Value *ArrayBegin;
1733     llvm::Value *ArrayEnd;
1734     QualType ElementType;
1735     CodeGenFunction::Destroyer *Destroyer;
1736     CharUnits ElementAlign;
1737   public:
1738     RegularPartialArrayDestroy(llvm::Value *arrayBegin, llvm::Value *arrayEnd,
1739                                QualType elementType, CharUnits elementAlign,
1740                                CodeGenFunction::Destroyer *destroyer)
1741       : ArrayBegin(arrayBegin), ArrayEnd(arrayEnd),
1742         ElementType(elementType), Destroyer(destroyer),
1743         ElementAlign(elementAlign) {}
1744 
1745     void Emit(CodeGenFunction &CGF, Flags flags) override {
1746       emitPartialArrayDestroy(CGF, ArrayBegin, ArrayEnd,
1747                               ElementType, ElementAlign, Destroyer);
1748     }
1749   };
1750 
1751   /// IrregularPartialArrayDestroy - a cleanup which performs a
1752   /// partial array destroy where the end pointer is irregularly
1753   /// determined and must be loaded from a local.
1754   class IrregularPartialArrayDestroy final : public EHScopeStack::Cleanup {
1755     llvm::Value *ArrayBegin;
1756     Address ArrayEndPointer;
1757     QualType ElementType;
1758     CodeGenFunction::Destroyer *Destroyer;
1759     CharUnits ElementAlign;
1760   public:
1761     IrregularPartialArrayDestroy(llvm::Value *arrayBegin,
1762                                  Address arrayEndPointer,
1763                                  QualType elementType,
1764                                  CharUnits elementAlign,
1765                                  CodeGenFunction::Destroyer *destroyer)
1766       : ArrayBegin(arrayBegin), ArrayEndPointer(arrayEndPointer),
1767         ElementType(elementType), Destroyer(destroyer),
1768         ElementAlign(elementAlign) {}
1769 
1770     void Emit(CodeGenFunction &CGF, Flags flags) override {
1771       llvm::Value *arrayEnd = CGF.Builder.CreateLoad(ArrayEndPointer);
1772       emitPartialArrayDestroy(CGF, ArrayBegin, arrayEnd,
1773                               ElementType, ElementAlign, Destroyer);
1774     }
1775   };
1776 } // end anonymous namespace
1777 
1778 /// pushIrregularPartialArrayCleanup - Push an EH cleanup to destroy
1779 /// already-constructed elements of the given array.  The cleanup
1780 /// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
1781 ///
1782 /// \param elementType - the immediate element type of the array;
1783 ///   possibly still an array type
1784 void CodeGenFunction::pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
1785                                                        Address arrayEndPointer,
1786                                                        QualType elementType,
1787                                                        CharUnits elementAlign,
1788                                                        Destroyer *destroyer) {
1789   pushFullExprCleanup<IrregularPartialArrayDestroy>(EHCleanup,
1790                                                     arrayBegin, arrayEndPointer,
1791                                                     elementType, elementAlign,
1792                                                     destroyer);
1793 }
1794 
1795 /// pushRegularPartialArrayCleanup - Push an EH cleanup to destroy
1796 /// already-constructed elements of the given array.  The cleanup
1797 /// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
1798 ///
1799 /// \param elementType - the immediate element type of the array;
1800 ///   possibly still an array type
1801 void CodeGenFunction::pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
1802                                                      llvm::Value *arrayEnd,
1803                                                      QualType elementType,
1804                                                      CharUnits elementAlign,
1805                                                      Destroyer *destroyer) {
1806   pushFullExprCleanup<RegularPartialArrayDestroy>(EHCleanup,
1807                                                   arrayBegin, arrayEnd,
1808                                                   elementType, elementAlign,
1809                                                   destroyer);
1810 }
1811 
1812 /// Lazily declare the @llvm.lifetime.start intrinsic.
1813 llvm::Constant *CodeGenModule::getLLVMLifetimeStartFn() {
1814   if (LifetimeStartFn)
1815     return LifetimeStartFn;
1816   LifetimeStartFn = llvm::Intrinsic::getDeclaration(&getModule(),
1817     llvm::Intrinsic::lifetime_start, AllocaInt8PtrTy);
1818   return LifetimeStartFn;
1819 }
1820 
1821 /// Lazily declare the @llvm.lifetime.end intrinsic.
1822 llvm::Constant *CodeGenModule::getLLVMLifetimeEndFn() {
1823   if (LifetimeEndFn)
1824     return LifetimeEndFn;
1825   LifetimeEndFn = llvm::Intrinsic::getDeclaration(&getModule(),
1826     llvm::Intrinsic::lifetime_end, AllocaInt8PtrTy);
1827   return LifetimeEndFn;
1828 }
1829 
1830 namespace {
1831   /// A cleanup to perform a release of an object at the end of a
1832   /// function.  This is used to balance out the incoming +1 of a
1833   /// ns_consumed argument when we can't reasonably do that just by
1834   /// not doing the initial retain for a __block argument.
1835   struct ConsumeARCParameter final : EHScopeStack::Cleanup {
1836     ConsumeARCParameter(llvm::Value *param,
1837                         ARCPreciseLifetime_t precise)
1838       : Param(param), Precise(precise) {}
1839 
1840     llvm::Value *Param;
1841     ARCPreciseLifetime_t Precise;
1842 
1843     void Emit(CodeGenFunction &CGF, Flags flags) override {
1844       CGF.EmitARCRelease(Param, Precise);
1845     }
1846   };
1847 } // end anonymous namespace
1848 
1849 /// Emit an alloca (or GlobalValue depending on target)
1850 /// for the specified parameter and set up LocalDeclMap.
1851 void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
1852                                    unsigned ArgNo) {
1853   // FIXME: Why isn't ImplicitParamDecl a ParmVarDecl?
1854   assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) &&
1855          "Invalid argument to EmitParmDecl");
1856 
1857   Arg.getAnyValue()->setName(D.getName());
1858 
1859   QualType Ty = D.getType();
1860 
1861   // Use better IR generation for certain implicit parameters.
1862   if (auto IPD = dyn_cast<ImplicitParamDecl>(&D)) {
1863     // The only implicit argument a block has is its literal.
1864     // This may be passed as an inalloca'ed value on Windows x86.
1865     if (BlockInfo) {
1866       llvm::Value *V = Arg.isIndirect()
1867                            ? Builder.CreateLoad(Arg.getIndirectAddress())
1868                            : Arg.getDirectValue();
1869       setBlockContextParameter(IPD, ArgNo, V);
1870       return;
1871     }
1872   }
1873 
1874   Address DeclPtr = Address::invalid();
1875   bool DoStore = false;
1876   bool IsScalar = hasScalarEvaluationKind(Ty);
1877   // If we already have a pointer to the argument, reuse the input pointer.
1878   if (Arg.isIndirect()) {
1879     DeclPtr = Arg.getIndirectAddress();
1880     // If we have a prettier pointer type at this point, bitcast to that.
1881     unsigned AS = DeclPtr.getType()->getAddressSpace();
1882     llvm::Type *IRTy = ConvertTypeForMem(Ty)->getPointerTo(AS);
1883     if (DeclPtr.getType() != IRTy)
1884       DeclPtr = Builder.CreateBitCast(DeclPtr, IRTy, D.getName());
1885     // Indirect argument is in alloca address space, which may be different
1886     // from the default address space.
1887     auto AllocaAS = CGM.getASTAllocaAddressSpace();
1888     auto *V = DeclPtr.getPointer();
1889     auto SrcLangAS = getLangOpts().OpenCL ? LangAS::opencl_private : AllocaAS;
1890     auto DestLangAS =
1891         getLangOpts().OpenCL ? LangAS::opencl_private : LangAS::Default;
1892     if (SrcLangAS != DestLangAS) {
1893       assert(getContext().getTargetAddressSpace(SrcLangAS) ==
1894              CGM.getDataLayout().getAllocaAddrSpace());
1895       auto DestAS = getContext().getTargetAddressSpace(DestLangAS);
1896       auto *T = V->getType()->getPointerElementType()->getPointerTo(DestAS);
1897       DeclPtr = Address(getTargetHooks().performAddrSpaceCast(
1898                             *this, V, SrcLangAS, DestLangAS, T, true),
1899                         DeclPtr.getAlignment());
1900     }
1901 
1902     // Push a destructor cleanup for this parameter if the ABI requires it.
1903     // Don't push a cleanup in a thunk for a method that will also emit a
1904     // cleanup.
1905     if (!IsScalar && !CurFuncIsThunk &&
1906         getContext().isParamDestroyedInCallee(Ty)) {
1907       if (QualType::DestructionKind DtorKind = Ty.isDestructedType()) {
1908         assert((DtorKind == QualType::DK_cxx_destructor ||
1909                 DtorKind == QualType::DK_nontrivial_c_struct) &&
1910                "unexpected destructor type");
1911         pushDestroy(DtorKind, DeclPtr, Ty);
1912       }
1913     }
1914   } else {
1915     // Otherwise, create a temporary to hold the value.
1916     DeclPtr = CreateMemTemp(Ty, getContext().getDeclAlign(&D),
1917                             D.getName() + ".addr");
1918     DoStore = true;
1919   }
1920 
1921   llvm::Value *ArgVal = (DoStore ? Arg.getDirectValue() : nullptr);
1922 
1923   LValue lv = MakeAddrLValue(DeclPtr, Ty);
1924   if (IsScalar) {
1925     Qualifiers qs = Ty.getQualifiers();
1926     if (Qualifiers::ObjCLifetime lt = qs.getObjCLifetime()) {
1927       // We honor __attribute__((ns_consumed)) for types with lifetime.
1928       // For __strong, it's handled by just skipping the initial retain;
1929       // otherwise we have to balance out the initial +1 with an extra
1930       // cleanup to do the release at the end of the function.
1931       bool isConsumed = D.hasAttr<NSConsumedAttr>();
1932 
1933       // 'self' is always formally __strong, but if this is not an
1934       // init method then we don't want to retain it.
1935       if (D.isARCPseudoStrong()) {
1936         const ObjCMethodDecl *method = cast<ObjCMethodDecl>(CurCodeDecl);
1937         assert(&D == method->getSelfDecl());
1938         assert(lt == Qualifiers::OCL_Strong);
1939         assert(qs.hasConst());
1940         assert(method->getMethodFamily() != OMF_init);
1941         (void) method;
1942         lt = Qualifiers::OCL_ExplicitNone;
1943       }
1944 
1945       // Load objects passed indirectly.
1946       if (Arg.isIndirect() && !ArgVal)
1947         ArgVal = Builder.CreateLoad(DeclPtr);
1948 
1949       if (lt == Qualifiers::OCL_Strong) {
1950         if (!isConsumed) {
1951           if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
1952             // use objc_storeStrong(&dest, value) for retaining the
1953             // object. But first, store a null into 'dest' because
1954             // objc_storeStrong attempts to release its old value.
1955             llvm::Value *Null = CGM.EmitNullConstant(D.getType());
1956             EmitStoreOfScalar(Null, lv, /* isInitialization */ true);
1957             EmitARCStoreStrongCall(lv.getAddress(), ArgVal, true);
1958             DoStore = false;
1959           }
1960           else
1961           // Don't use objc_retainBlock for block pointers, because we
1962           // don't want to Block_copy something just because we got it
1963           // as a parameter.
1964             ArgVal = EmitARCRetainNonBlock(ArgVal);
1965         }
1966       } else {
1967         // Push the cleanup for a consumed parameter.
1968         if (isConsumed) {
1969           ARCPreciseLifetime_t precise = (D.hasAttr<ObjCPreciseLifetimeAttr>()
1970                                 ? ARCPreciseLifetime : ARCImpreciseLifetime);
1971           EHStack.pushCleanup<ConsumeARCParameter>(getARCCleanupKind(), ArgVal,
1972                                                    precise);
1973         }
1974 
1975         if (lt == Qualifiers::OCL_Weak) {
1976           EmitARCInitWeak(DeclPtr, ArgVal);
1977           DoStore = false; // The weak init is a store, no need to do two.
1978         }
1979       }
1980 
1981       // Enter the cleanup scope.
1982       EmitAutoVarWithLifetime(*this, D, DeclPtr, lt);
1983     }
1984   }
1985 
1986   // Store the initial value into the alloca.
1987   if (DoStore)
1988     EmitStoreOfScalar(ArgVal, lv, /* isInitialization */ true);
1989 
1990   setAddrOfLocalVar(&D, DeclPtr);
1991 
1992   // Emit debug info for param declaration.
1993   if (CGDebugInfo *DI = getDebugInfo()) {
1994     if (CGM.getCodeGenOpts().getDebugInfo() >=
1995         codegenoptions::LimitedDebugInfo) {
1996       DI->EmitDeclareOfArgVariable(&D, DeclPtr.getPointer(), ArgNo, Builder);
1997     }
1998   }
1999 
2000   if (D.hasAttr<AnnotateAttr>())
2001     EmitVarAnnotations(&D, DeclPtr.getPointer());
2002 
2003   // We can only check return value nullability if all arguments to the
2004   // function satisfy their nullability preconditions. This makes it necessary
2005   // to emit null checks for args in the function body itself.
2006   if (requiresReturnValueNullabilityCheck()) {
2007     auto Nullability = Ty->getNullability(getContext());
2008     if (Nullability && *Nullability == NullabilityKind::NonNull) {
2009       SanitizerScope SanScope(this);
2010       RetValNullabilityPrecondition =
2011           Builder.CreateAnd(RetValNullabilityPrecondition,
2012                             Builder.CreateIsNotNull(Arg.getAnyValue()));
2013     }
2014   }
2015 }
2016 
2017 void CodeGenModule::EmitOMPDeclareReduction(const OMPDeclareReductionDecl *D,
2018                                             CodeGenFunction *CGF) {
2019   if (!LangOpts.OpenMP || (!LangOpts.EmitAllDecls && !D->isUsed()))
2020     return;
2021   getOpenMPRuntime().emitUserDefinedReduction(CGF, D);
2022 }
2023