1 //===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code to emit Decl nodes as LLVM code.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CGDebugInfo.h"
15 #include "CodeGenFunction.h"
16 #include "CodeGenModule.h"
17 #include "clang/AST/ASTContext.h"
18 #include "clang/AST/CharUnits.h"
19 #include "clang/AST/Decl.h"
20 #include "clang/AST/DeclObjC.h"
21 #include "clang/Basic/SourceManager.h"
22 #include "clang/Basic/TargetInfo.h"
23 #include "clang/Frontend/CodeGenOptions.h"
24 #include "llvm/GlobalVariable.h"
25 #include "llvm/Intrinsics.h"
26 #include "llvm/Target/TargetData.h"
27 #include "llvm/Type.h"
28 using namespace clang;
29 using namespace CodeGen;
30 
31 
32 void CodeGenFunction::EmitDecl(const Decl &D) {
33   switch (D.getKind()) {
34   case Decl::TranslationUnit:
35   case Decl::Namespace:
36   case Decl::UnresolvedUsingTypename:
37   case Decl::ClassTemplateSpecialization:
38   case Decl::ClassTemplatePartialSpecialization:
39   case Decl::TemplateTypeParm:
40   case Decl::UnresolvedUsingValue:
41   case Decl::NonTypeTemplateParm:
42   case Decl::CXXMethod:
43   case Decl::CXXConstructor:
44   case Decl::CXXDestructor:
45   case Decl::CXXConversion:
46   case Decl::Field:
47   case Decl::IndirectField:
48   case Decl::ObjCIvar:
49   case Decl::ObjCAtDefsField:
50   case Decl::ParmVar:
51   case Decl::ImplicitParam:
52   case Decl::ClassTemplate:
53   case Decl::FunctionTemplate:
54   case Decl::TemplateTemplateParm:
55   case Decl::ObjCMethod:
56   case Decl::ObjCCategory:
57   case Decl::ObjCProtocol:
58   case Decl::ObjCInterface:
59   case Decl::ObjCCategoryImpl:
60   case Decl::ObjCImplementation:
61   case Decl::ObjCProperty:
62   case Decl::ObjCCompatibleAlias:
63   case Decl::AccessSpec:
64   case Decl::LinkageSpec:
65   case Decl::ObjCPropertyImpl:
66   case Decl::ObjCClass:
67   case Decl::ObjCForwardProtocol:
68   case Decl::FileScopeAsm:
69   case Decl::Friend:
70   case Decl::FriendTemplate:
71   case Decl::Block:
72 
73     assert(0 && "Declaration not should not be in declstmts!");
74   case Decl::Function:  // void X();
75   case Decl::Record:    // struct/union/class X;
76   case Decl::Enum:      // enum X;
77   case Decl::EnumConstant: // enum ? { X = ? }
78   case Decl::CXXRecord: // struct/union/class X; [C++]
79   case Decl::Using:          // using X; [C++]
80   case Decl::UsingShadow:
81   case Decl::UsingDirective: // using namespace X; [C++]
82   case Decl::NamespaceAlias:
83   case Decl::StaticAssert: // static_assert(X, ""); [C++0x]
84     // None of these decls require codegen support.
85     return;
86 
87   case Decl::Var: {
88     const VarDecl &VD = cast<VarDecl>(D);
89     assert(VD.isLocalVarDecl() &&
90            "Should not see file-scope variables inside a function!");
91     return EmitVarDecl(VD);
92   }
93 
94   case Decl::Typedef: {   // typedef int X;
95     const TypedefDecl &TD = cast<TypedefDecl>(D);
96     QualType Ty = TD.getUnderlyingType();
97 
98     if (Ty->isVariablyModifiedType())
99       EmitVLASize(Ty);
100   }
101   }
102 }
103 
104 /// EmitVarDecl - This method handles emission of any variable declaration
105 /// inside a function, including static vars etc.
106 void CodeGenFunction::EmitVarDecl(const VarDecl &D) {
107   switch (D.getStorageClass()) {
108   case SC_None:
109   case SC_Auto:
110   case SC_Register:
111     return EmitAutoVarDecl(D);
112   case SC_Static: {
113     llvm::GlobalValue::LinkageTypes Linkage =
114       llvm::GlobalValue::InternalLinkage;
115 
116     // If the function definition has some sort of weak linkage, its
117     // static variables should also be weak so that they get properly
118     // uniqued.  We can't do this in C, though, because there's no
119     // standard way to agree on which variables are the same (i.e.
120     // there's no mangling).
121     if (getContext().getLangOptions().CPlusPlus)
122       if (llvm::GlobalValue::isWeakForLinker(CurFn->getLinkage()))
123         Linkage = CurFn->getLinkage();
124 
125     return EmitStaticVarDecl(D, Linkage);
126   }
127   case SC_Extern:
128   case SC_PrivateExtern:
129     // Don't emit it now, allow it to be emitted lazily on its first use.
130     return;
131   }
132 
133   assert(0 && "Unknown storage class");
134 }
135 
136 static std::string GetStaticDeclName(CodeGenFunction &CGF, const VarDecl &D,
137                                      const char *Separator) {
138   CodeGenModule &CGM = CGF.CGM;
139   if (CGF.getContext().getLangOptions().CPlusPlus) {
140     llvm::StringRef Name = CGM.getMangledName(&D);
141     return Name.str();
142   }
143 
144   std::string ContextName;
145   if (!CGF.CurFuncDecl) {
146     // Better be in a block declared in global scope.
147     const NamedDecl *ND = cast<NamedDecl>(&D);
148     const DeclContext *DC = ND->getDeclContext();
149     if (const BlockDecl *BD = dyn_cast<BlockDecl>(DC)) {
150       MangleBuffer Name;
151       CGM.getBlockMangledName(GlobalDecl(), Name, BD);
152       ContextName = Name.getString();
153     }
154     else
155       assert(0 && "Unknown context for block static var decl");
156   } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CGF.CurFuncDecl)) {
157     llvm::StringRef Name = CGM.getMangledName(FD);
158     ContextName = Name.str();
159   } else if (isa<ObjCMethodDecl>(CGF.CurFuncDecl))
160     ContextName = CGF.CurFn->getName();
161   else
162     assert(0 && "Unknown context for static var decl");
163 
164   return ContextName + Separator + D.getNameAsString();
165 }
166 
167 llvm::GlobalVariable *
168 CodeGenFunction::CreateStaticVarDecl(const VarDecl &D,
169                                      const char *Separator,
170                                      llvm::GlobalValue::LinkageTypes Linkage) {
171   QualType Ty = D.getType();
172   assert(Ty->isConstantSizeType() && "VLAs can't be static");
173 
174   std::string Name = GetStaticDeclName(*this, D, Separator);
175 
176   const llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(Ty);
177   llvm::GlobalVariable *GV =
178     new llvm::GlobalVariable(CGM.getModule(), LTy,
179                              Ty.isConstant(getContext()), Linkage,
180                              CGM.EmitNullConstant(D.getType()), Name, 0,
181                              D.isThreadSpecified(), Ty.getAddressSpace());
182   GV->setAlignment(getContext().getDeclAlign(&D).getQuantity());
183   if (Linkage != llvm::GlobalValue::InternalLinkage)
184     GV->setVisibility(CurFn->getVisibility());
185   return GV;
186 }
187 
188 /// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
189 /// global variable that has already been created for it.  If the initializer
190 /// has a different type than GV does, this may free GV and return a different
191 /// one.  Otherwise it just returns GV.
192 llvm::GlobalVariable *
193 CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
194                                                llvm::GlobalVariable *GV) {
195   llvm::Constant *Init = CGM.EmitConstantExpr(D.getInit(), D.getType(), this);
196 
197   // If constant emission failed, then this should be a C++ static
198   // initializer.
199   if (!Init) {
200     if (!getContext().getLangOptions().CPlusPlus)
201       CGM.ErrorUnsupported(D.getInit(), "constant l-value expression");
202     else if (Builder.GetInsertBlock()) {
203       // Since we have a static initializer, this global variable can't
204       // be constant.
205       GV->setConstant(false);
206 
207       EmitCXXGuardedInit(D, GV);
208     }
209     return GV;
210   }
211 
212   // The initializer may differ in type from the global. Rewrite
213   // the global to match the initializer.  (We have to do this
214   // because some types, like unions, can't be completely represented
215   // in the LLVM type system.)
216   if (GV->getType()->getElementType() != Init->getType()) {
217     llvm::GlobalVariable *OldGV = GV;
218 
219     GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(),
220                                   OldGV->isConstant(),
221                                   OldGV->getLinkage(), Init, "",
222                                   /*InsertBefore*/ OldGV,
223                                   D.isThreadSpecified(),
224                                   D.getType().getAddressSpace());
225     GV->setVisibility(OldGV->getVisibility());
226 
227     // Steal the name of the old global
228     GV->takeName(OldGV);
229 
230     // Replace all uses of the old global with the new global
231     llvm::Constant *NewPtrForOldDecl =
232     llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
233     OldGV->replaceAllUsesWith(NewPtrForOldDecl);
234 
235     // Erase the old global, since it is no longer used.
236     OldGV->eraseFromParent();
237   }
238 
239   GV->setInitializer(Init);
240   return GV;
241 }
242 
243 void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
244                                       llvm::GlobalValue::LinkageTypes Linkage) {
245   llvm::Value *&DMEntry = LocalDeclMap[&D];
246   assert(DMEntry == 0 && "Decl already exists in localdeclmap!");
247 
248   llvm::GlobalVariable *GV = CreateStaticVarDecl(D, ".", Linkage);
249 
250   // Store into LocalDeclMap before generating initializer to handle
251   // circular references.
252   DMEntry = GV;
253 
254   // We can't have a VLA here, but we can have a pointer to a VLA,
255   // even though that doesn't really make any sense.
256   // Make sure to evaluate VLA bounds now so that we have them for later.
257   if (D.getType()->isVariablyModifiedType())
258     EmitVLASize(D.getType());
259 
260   // Local static block variables must be treated as globals as they may be
261   // referenced in their RHS initializer block-literal expresion.
262   CGM.setStaticLocalDeclAddress(&D, GV);
263 
264   // If this value has an initializer, emit it.
265   if (D.getInit())
266     GV = AddInitializerToStaticVarDecl(D, GV);
267 
268   GV->setAlignment(getContext().getDeclAlign(&D).getQuantity());
269 
270   // FIXME: Merge attribute handling.
271   if (const AnnotateAttr *AA = D.getAttr<AnnotateAttr>()) {
272     SourceManager &SM = CGM.getContext().getSourceManager();
273     llvm::Constant *Ann =
274       CGM.EmitAnnotateAttr(GV, AA,
275                            SM.getInstantiationLineNumber(D.getLocation()));
276     CGM.AddAnnotation(Ann);
277   }
278 
279   if (const SectionAttr *SA = D.getAttr<SectionAttr>())
280     GV->setSection(SA->getName());
281 
282   if (D.hasAttr<UsedAttr>())
283     CGM.AddUsedGlobal(GV);
284 
285   // We may have to cast the constant because of the initializer
286   // mismatch above.
287   //
288   // FIXME: It is really dangerous to store this in the map; if anyone
289   // RAUW's the GV uses of this constant will be invalid.
290   const llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(D.getType());
291   const llvm::Type *LPtrTy = LTy->getPointerTo(D.getType().getAddressSpace());
292   DMEntry = llvm::ConstantExpr::getBitCast(GV, LPtrTy);
293 
294   // Emit global variable debug descriptor for static vars.
295   CGDebugInfo *DI = getDebugInfo();
296   if (DI) {
297     DI->setLocation(D.getLocation());
298     DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(GV), &D);
299   }
300 }
301 
302 unsigned CodeGenFunction::getByRefValueLLVMField(const ValueDecl *VD) const {
303   assert(ByRefValueInfo.count(VD) && "Did not find value!");
304 
305   return ByRefValueInfo.find(VD)->second.second;
306 }
307 
308 llvm::Value *CodeGenFunction::BuildBlockByrefAddress(llvm::Value *BaseAddr,
309                                                      const VarDecl *V) {
310   llvm::Value *Loc = Builder.CreateStructGEP(BaseAddr, 1, "forwarding");
311   Loc = Builder.CreateLoad(Loc);
312   Loc = Builder.CreateStructGEP(Loc, getByRefValueLLVMField(V),
313                                 V->getNameAsString());
314   return Loc;
315 }
316 
317 /// BuildByRefType - This routine changes a __block variable declared as T x
318 ///   into:
319 ///
320 ///      struct {
321 ///        void *__isa;
322 ///        void *__forwarding;
323 ///        int32_t __flags;
324 ///        int32_t __size;
325 ///        void *__copy_helper;       // only if needed
326 ///        void *__destroy_helper;    // only if needed
327 ///        char padding[X];           // only if needed
328 ///        T x;
329 ///      } x
330 ///
331 const llvm::Type *CodeGenFunction::BuildByRefType(const ValueDecl *D) {
332   std::pair<const llvm::Type *, unsigned> &Info = ByRefValueInfo[D];
333   if (Info.first)
334     return Info.first;
335 
336   QualType Ty = D->getType();
337 
338   std::vector<const llvm::Type *> Types;
339 
340   const llvm::PointerType *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
341 
342   llvm::PATypeHolder ByRefTypeHolder = llvm::OpaqueType::get(VMContext);
343 
344   // void *__isa;
345   Types.push_back(Int8PtrTy);
346 
347   // void *__forwarding;
348   Types.push_back(llvm::PointerType::getUnqual(ByRefTypeHolder));
349 
350   // int32_t __flags;
351   Types.push_back(Int32Ty);
352 
353   // int32_t __size;
354   Types.push_back(Int32Ty);
355 
356   bool HasCopyAndDispose = BlockRequiresCopying(Ty);
357   if (HasCopyAndDispose) {
358     /// void *__copy_helper;
359     Types.push_back(Int8PtrTy);
360 
361     /// void *__destroy_helper;
362     Types.push_back(Int8PtrTy);
363   }
364 
365   bool Packed = false;
366   CharUnits Align = getContext().getDeclAlign(D);
367   if (Align > getContext().toCharUnitsFromBits(Target.getPointerAlign(0))) {
368     // We have to insert padding.
369 
370     // The struct above has 2 32-bit integers.
371     unsigned CurrentOffsetInBytes = 4 * 2;
372 
373     // And either 2 or 4 pointers.
374     CurrentOffsetInBytes += (HasCopyAndDispose ? 4 : 2) *
375       CGM.getTargetData().getTypeAllocSize(Int8PtrTy);
376 
377     // Align the offset.
378     unsigned AlignedOffsetInBytes =
379       llvm::RoundUpToAlignment(CurrentOffsetInBytes, Align.getQuantity());
380 
381     unsigned NumPaddingBytes = AlignedOffsetInBytes - CurrentOffsetInBytes;
382     if (NumPaddingBytes > 0) {
383       const llvm::Type *Ty = llvm::Type::getInt8Ty(VMContext);
384       // FIXME: We need a sema error for alignment larger than the minimum of
385       // the maximal stack alignmint and the alignment of malloc on the system.
386       if (NumPaddingBytes > 1)
387         Ty = llvm::ArrayType::get(Ty, NumPaddingBytes);
388 
389       Types.push_back(Ty);
390 
391       // We want a packed struct.
392       Packed = true;
393     }
394   }
395 
396   // T x;
397   Types.push_back(ConvertTypeForMem(Ty));
398 
399   const llvm::Type *T = llvm::StructType::get(VMContext, Types, Packed);
400 
401   cast<llvm::OpaqueType>(ByRefTypeHolder.get())->refineAbstractTypeTo(T);
402   CGM.getModule().addTypeName("struct.__block_byref_" + D->getNameAsString(),
403                               ByRefTypeHolder.get());
404 
405   Info.first = ByRefTypeHolder.get();
406 
407   Info.second = Types.size() - 1;
408 
409   return Info.first;
410 }
411 
412 namespace {
413   struct CallArrayDtor : EHScopeStack::Cleanup {
414     CallArrayDtor(const CXXDestructorDecl *Dtor,
415                   const ConstantArrayType *Type,
416                   llvm::Value *Loc)
417       : Dtor(Dtor), Type(Type), Loc(Loc) {}
418 
419     const CXXDestructorDecl *Dtor;
420     const ConstantArrayType *Type;
421     llvm::Value *Loc;
422 
423     void Emit(CodeGenFunction &CGF, bool IsForEH) {
424       QualType BaseElementTy = CGF.getContext().getBaseElementType(Type);
425       const llvm::Type *BasePtr = CGF.ConvertType(BaseElementTy);
426       BasePtr = llvm::PointerType::getUnqual(BasePtr);
427       llvm::Value *BaseAddrPtr = CGF.Builder.CreateBitCast(Loc, BasePtr);
428       CGF.EmitCXXAggrDestructorCall(Dtor, Type, BaseAddrPtr);
429     }
430   };
431 
432   struct CallVarDtor : EHScopeStack::Cleanup {
433     CallVarDtor(const CXXDestructorDecl *Dtor,
434                 llvm::Value *NRVOFlag,
435                 llvm::Value *Loc)
436       : Dtor(Dtor), NRVOFlag(NRVOFlag), Loc(Loc) {}
437 
438     const CXXDestructorDecl *Dtor;
439     llvm::Value *NRVOFlag;
440     llvm::Value *Loc;
441 
442     void Emit(CodeGenFunction &CGF, bool IsForEH) {
443       // Along the exceptions path we always execute the dtor.
444       bool NRVO = !IsForEH && NRVOFlag;
445 
446       llvm::BasicBlock *SkipDtorBB = 0;
447       if (NRVO) {
448         // If we exited via NRVO, we skip the destructor call.
449         llvm::BasicBlock *RunDtorBB = CGF.createBasicBlock("nrvo.unused");
450         SkipDtorBB = CGF.createBasicBlock("nrvo.skipdtor");
451         llvm::Value *DidNRVO = CGF.Builder.CreateLoad(NRVOFlag, "nrvo.val");
452         CGF.Builder.CreateCondBr(DidNRVO, SkipDtorBB, RunDtorBB);
453         CGF.EmitBlock(RunDtorBB);
454       }
455 
456       CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
457                                 /*ForVirtualBase=*/false, Loc);
458 
459       if (NRVO) CGF.EmitBlock(SkipDtorBB);
460     }
461   };
462 }
463 
464 namespace {
465   struct CallStackRestore : EHScopeStack::Cleanup {
466     llvm::Value *Stack;
467     CallStackRestore(llvm::Value *Stack) : Stack(Stack) {}
468     void Emit(CodeGenFunction &CGF, bool IsForEH) {
469       llvm::Value *V = CGF.Builder.CreateLoad(Stack, "tmp");
470       llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
471       CGF.Builder.CreateCall(F, V);
472     }
473   };
474 
475   struct CallCleanupFunction : EHScopeStack::Cleanup {
476     llvm::Constant *CleanupFn;
477     const CGFunctionInfo &FnInfo;
478     llvm::Value *Addr;
479     const VarDecl &Var;
480 
481     CallCleanupFunction(llvm::Constant *CleanupFn, const CGFunctionInfo *Info,
482                         llvm::Value *Addr, const VarDecl *Var)
483       : CleanupFn(CleanupFn), FnInfo(*Info), Addr(Addr), Var(*Var) {}
484 
485     void Emit(CodeGenFunction &CGF, bool IsForEH) {
486       // In some cases, the type of the function argument will be different from
487       // the type of the pointer. An example of this is
488       // void f(void* arg);
489       // __attribute__((cleanup(f))) void *g;
490       //
491       // To fix this we insert a bitcast here.
492       QualType ArgTy = FnInfo.arg_begin()->type;
493       llvm::Value *Arg =
494         CGF.Builder.CreateBitCast(Addr, CGF.ConvertType(ArgTy));
495 
496       CallArgList Args;
497       Args.push_back(std::make_pair(RValue::get(Arg),
498                             CGF.getContext().getPointerType(Var.getType())));
499       CGF.EmitCall(FnInfo, CleanupFn, ReturnValueSlot(), Args);
500     }
501   };
502 
503   struct CallBlockRelease : EHScopeStack::Cleanup {
504     llvm::Value *Addr;
505     CallBlockRelease(llvm::Value *Addr) : Addr(Addr) {}
506 
507     void Emit(CodeGenFunction &CGF, bool IsForEH) {
508       llvm::Value *V = CGF.Builder.CreateStructGEP(Addr, 1, "forwarding");
509       V = CGF.Builder.CreateLoad(V);
510       CGF.BuildBlockRelease(V);
511     }
512   };
513 }
514 
515 
516 /// canEmitInitWithFewStoresAfterMemset - Decide whether we can emit the
517 /// non-zero parts of the specified initializer with equal or fewer than
518 /// NumStores scalar stores.
519 static bool canEmitInitWithFewStoresAfterMemset(llvm::Constant *Init,
520                                                 unsigned &NumStores) {
521   // Zero and Undef never requires any extra stores.
522   if (isa<llvm::ConstantAggregateZero>(Init) ||
523       isa<llvm::ConstantPointerNull>(Init) ||
524       isa<llvm::UndefValue>(Init))
525     return true;
526   if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
527       isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
528       isa<llvm::ConstantExpr>(Init))
529     return Init->isNullValue() || NumStores--;
530 
531   // See if we can emit each element.
532   if (isa<llvm::ConstantArray>(Init) || isa<llvm::ConstantStruct>(Init)) {
533     for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
534       llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
535       if (!canEmitInitWithFewStoresAfterMemset(Elt, NumStores))
536         return false;
537     }
538     return true;
539   }
540 
541   // Anything else is hard and scary.
542   return false;
543 }
544 
545 /// emitStoresForInitAfterMemset - For inits that
546 /// canEmitInitWithFewStoresAfterMemset returned true for, emit the scalar
547 /// stores that would be required.
548 static void emitStoresForInitAfterMemset(llvm::Constant *Init, llvm::Value *Loc,
549                                          CGBuilderTy &Builder) {
550   // Zero doesn't require any stores.
551   if (isa<llvm::ConstantAggregateZero>(Init) ||
552       isa<llvm::ConstantPointerNull>(Init) ||
553       isa<llvm::UndefValue>(Init))
554     return;
555 
556   if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
557       isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
558       isa<llvm::ConstantExpr>(Init)) {
559     if (!Init->isNullValue())
560       Builder.CreateStore(Init, Loc);
561     return;
562   }
563 
564   assert((isa<llvm::ConstantStruct>(Init) || isa<llvm::ConstantArray>(Init)) &&
565          "Unknown value type!");
566 
567   for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
568     llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
569     if (Elt->isNullValue()) continue;
570 
571     // Otherwise, get a pointer to the element and emit it.
572     emitStoresForInitAfterMemset(Elt, Builder.CreateConstGEP2_32(Loc, 0, i),
573                                  Builder);
574   }
575 }
576 
577 
578 /// shouldUseMemSetPlusStoresToInitialize - Decide whether we should use memset
579 /// plus some stores to initialize a local variable instead of using a memcpy
580 /// from a constant global.  It is beneficial to use memset if the global is all
581 /// zeros, or mostly zeros and large.
582 static bool shouldUseMemSetPlusStoresToInitialize(llvm::Constant *Init,
583                                                   uint64_t GlobalSize) {
584   // If a global is all zeros, always use a memset.
585   if (isa<llvm::ConstantAggregateZero>(Init)) return true;
586 
587 
588   // If a non-zero global is <= 32 bytes, always use a memcpy.  If it is large,
589   // do it if it will require 6 or fewer scalar stores.
590   // TODO: Should budget depends on the size?  Avoiding a large global warrants
591   // plopping in more stores.
592   unsigned StoreBudget = 6;
593   uint64_t SizeLimit = 32;
594 
595   return GlobalSize > SizeLimit &&
596          canEmitInitWithFewStoresAfterMemset(Init, StoreBudget);
597 }
598 
599 
600 /// EmitAutoVarDecl - Emit code and set up an entry in LocalDeclMap for a
601 /// variable declaration with auto, register, or no storage class specifier.
602 /// These turn into simple stack objects, or GlobalValues depending on target.
603 void CodeGenFunction::EmitAutoVarDecl(const VarDecl &D,
604                                       SpecialInitFn *SpecialInit) {
605   QualType Ty = D.getType();
606   unsigned Alignment = getContext().getDeclAlign(&D).getQuantity();
607   bool isByRef = D.hasAttr<BlocksAttr>();
608   bool needsDispose = false;
609   CharUnits Align = CharUnits::Zero();
610   bool IsSimpleConstantInitializer = false;
611 
612   bool NRVO = false;
613   llvm::Value *NRVOFlag = 0;
614   llvm::Value *DeclPtr;
615   if (Ty->isConstantSizeType()) {
616     if (!Target.useGlobalsForAutomaticVariables()) {
617       NRVO = getContext().getLangOptions().ElideConstructors &&
618              D.isNRVOVariable();
619       // If this value is an array or struct, is POD, and if the initializer is
620       // a staticly determinable constant, try to optimize it (unless the NRVO
621       // is already optimizing this).
622       if (!NRVO && D.getInit() && !isByRef &&
623           (Ty->isArrayType() || Ty->isRecordType()) &&
624           Ty->isPODType() &&
625           D.getInit()->isConstantInitializer(getContext(), false)) {
626         // If this variable is marked 'const', emit the value as a global.
627         if (CGM.getCodeGenOpts().MergeAllConstants &&
628             Ty.isConstant(getContext())) {
629           EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage);
630           return;
631         }
632 
633         IsSimpleConstantInitializer = true;
634       }
635 
636       // A normal fixed sized variable becomes an alloca in the entry block,
637       // unless it's an NRVO variable.
638       const llvm::Type *LTy = ConvertTypeForMem(Ty);
639 
640       if (NRVO) {
641         // The named return value optimization: allocate this variable in the
642         // return slot, so that we can elide the copy when returning this
643         // variable (C++0x [class.copy]p34).
644         DeclPtr = ReturnValue;
645 
646         if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
647           if (!cast<CXXRecordDecl>(RecordTy->getDecl())->hasTrivialDestructor()) {
648             // Create a flag that is used to indicate when the NRVO was applied
649             // to this variable. Set it to zero to indicate that NRVO was not
650             // applied.
651             llvm::Value *Zero = Builder.getFalse();
652             NRVOFlag = CreateTempAlloca(Zero->getType(), "nrvo");
653             Builder.CreateStore(Zero, NRVOFlag);
654 
655             // Record the NRVO flag for this variable.
656             NRVOFlags[&D] = NRVOFlag;
657           }
658         }
659       } else {
660         if (isByRef)
661           LTy = BuildByRefType(&D);
662 
663         llvm::AllocaInst *Alloc = CreateTempAlloca(LTy);
664         Alloc->setName(D.getNameAsString());
665 
666         Align = getContext().getDeclAlign(&D);
667         if (isByRef)
668           Align = std::max(Align,
669               getContext().toCharUnitsFromBits(Target.getPointerAlign(0)));
670         Alloc->setAlignment(Align.getQuantity());
671         DeclPtr = Alloc;
672       }
673     } else {
674       // Targets that don't support recursion emit locals as globals.
675       const char *Class =
676         D.getStorageClass() == SC_Register ? ".reg." : ".auto.";
677       DeclPtr = CreateStaticVarDecl(D, Class,
678                                     llvm::GlobalValue::InternalLinkage);
679     }
680 
681     // FIXME: Can this happen?
682     if (Ty->isVariablyModifiedType())
683       EmitVLASize(Ty);
684   } else {
685     EnsureInsertPoint();
686 
687     if (!DidCallStackSave) {
688       // Save the stack.
689       const llvm::Type *LTy = llvm::Type::getInt8PtrTy(VMContext);
690       llvm::Value *Stack = CreateTempAlloca(LTy, "saved_stack");
691 
692       llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave);
693       llvm::Value *V = Builder.CreateCall(F);
694 
695       Builder.CreateStore(V, Stack);
696 
697       DidCallStackSave = true;
698 
699       // Push a cleanup block and restore the stack there.
700       EHStack.pushCleanup<CallStackRestore>(NormalCleanup, Stack);
701     }
702 
703     // Get the element type.
704     const llvm::Type *LElemTy = ConvertTypeForMem(Ty);
705     const llvm::Type *LElemPtrTy = LElemTy->getPointerTo(Ty.getAddressSpace());
706 
707     llvm::Value *VLASize = EmitVLASize(Ty);
708 
709     // Allocate memory for the array.
710     llvm::AllocaInst *VLA =
711       Builder.CreateAlloca(llvm::Type::getInt8Ty(VMContext), VLASize, "vla");
712     VLA->setAlignment(getContext().getDeclAlign(&D).getQuantity());
713 
714     DeclPtr = Builder.CreateBitCast(VLA, LElemPtrTy, "tmp");
715   }
716 
717   llvm::Value *&DMEntry = LocalDeclMap[&D];
718   assert(DMEntry == 0 && "Decl already exists in localdeclmap!");
719   DMEntry = DeclPtr;
720 
721   // Emit debug info for local var declaration.
722   if (CGDebugInfo *DI = getDebugInfo()) {
723     assert(HaveInsertPoint() && "Unexpected unreachable point!");
724 
725     DI->setLocation(D.getLocation());
726     if (Target.useGlobalsForAutomaticVariables()) {
727       DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(DeclPtr), &D);
728     } else
729       DI->EmitDeclareOfAutoVariable(&D, DeclPtr, Builder);
730   }
731 
732   // If this local has an initializer, emit it now.
733   const Expr *Init = D.getInit();
734 
735   // If we are at an unreachable point, we don't need to emit the initializer
736   // unless it contains a label.
737   if (!HaveInsertPoint()) {
738     if (!ContainsLabel(Init))
739       Init = 0;
740     else
741       EnsureInsertPoint();
742   }
743 
744   if (isByRef) {
745     const llvm::PointerType *PtrToInt8Ty = llvm::Type::getInt8PtrTy(VMContext);
746 
747     EnsureInsertPoint();
748     llvm::Value *isa_field = Builder.CreateStructGEP(DeclPtr, 0);
749     llvm::Value *forwarding_field = Builder.CreateStructGEP(DeclPtr, 1);
750     llvm::Value *flags_field = Builder.CreateStructGEP(DeclPtr, 2);
751     llvm::Value *size_field = Builder.CreateStructGEP(DeclPtr, 3);
752     llvm::Value *V;
753     int flag = 0;
754     int flags = 0;
755 
756     needsDispose = true;
757 
758     if (Ty->isBlockPointerType()) {
759       flag |= BLOCK_FIELD_IS_BLOCK;
760       flags |= BLOCK_HAS_COPY_DISPOSE;
761     } else if (getContext().isObjCNSObjectType(Ty) ||
762                Ty->isObjCObjectPointerType()) {
763       flag |= BLOCK_FIELD_IS_OBJECT;
764       flags |= BLOCK_HAS_COPY_DISPOSE;
765     } else if (getContext().getBlockVarCopyInits(&D)) {
766         flag |= BLOCK_HAS_CXX_OBJ;
767         flags |= BLOCK_HAS_COPY_DISPOSE;
768     }
769 
770     // FIXME: Someone double check this.
771     if (Ty.isObjCGCWeak())
772       flag |= BLOCK_FIELD_IS_WEAK;
773 
774     int isa = 0;
775     if (flag & BLOCK_FIELD_IS_WEAK)
776       isa = 1;
777     V = Builder.CreateIntToPtr(Builder.getInt32(isa), PtrToInt8Ty, "isa");
778     Builder.CreateStore(V, isa_field);
779 
780     Builder.CreateStore(DeclPtr, forwarding_field);
781 
782     Builder.CreateStore(Builder.getInt32(flags), flags_field);
783 
784     const llvm::Type *V1;
785     V1 = cast<llvm::PointerType>(DeclPtr->getType())->getElementType();
786     V = Builder.getInt32(CGM.GetTargetTypeStoreSize(V1).getQuantity());
787     Builder.CreateStore(V, size_field);
788 
789     if (flags & BLOCK_HAS_COPY_DISPOSE) {
790       SynthesizeCopyDisposeHelpers = true;
791       llvm::Value *copy_helper = Builder.CreateStructGEP(DeclPtr, 4);
792       Builder.CreateStore(BuildbyrefCopyHelper(DeclPtr->getType(), flag,
793                                                Align.getQuantity(), &D),
794                           copy_helper);
795 
796       llvm::Value *destroy_helper = Builder.CreateStructGEP(DeclPtr, 5);
797       Builder.CreateStore(BuildbyrefDestroyHelper(DeclPtr->getType(), flag,
798                                                   Align.getQuantity(), &D),
799                           destroy_helper);
800     }
801   }
802 
803   if (SpecialInit) {
804     SpecialInit(*this, D, DeclPtr);
805   } else if (Init) {
806     llvm::Value *Loc = DeclPtr;
807 
808     bool isVolatile = getContext().getCanonicalType(Ty).isVolatileQualified();
809 
810     // If the initializer was a simple constant initializer, we can optimize it
811     // in various ways.
812     if (IsSimpleConstantInitializer) {
813       llvm::Constant *Init = CGM.EmitConstantExpr(D.getInit(), Ty,this);
814       assert(Init != 0 && "Wasn't a simple constant init?");
815 
816       llvm::Value *SizeVal =
817       llvm::ConstantInt::get(CGF.IntPtrTy,
818                              getContext().getTypeSizeInChars(Ty).getQuantity());
819 
820       const llvm::Type *BP = Builder.getInt8PtrTy();
821       if (Loc->getType() != BP)
822         Loc = Builder.CreateBitCast(Loc, BP, "tmp");
823 
824       // If the initializer is all or mostly zeros, codegen with memset then do
825       // a few stores afterward.
826       if (shouldUseMemSetPlusStoresToInitialize(Init,
827                       CGM.getTargetData().getTypeAllocSize(Init->getType()))) {
828         Builder.CreateMemSet(Loc, Builder.getInt8(0), SizeVal,
829                              Align.getQuantity(), false);
830         if (!Init->isNullValue()) {
831           Loc = Builder.CreateBitCast(Loc, Init->getType()->getPointerTo());
832           emitStoresForInitAfterMemset(Init, Loc, Builder);
833         }
834 
835       } else {
836         // Otherwise, create a temporary global with the initializer then
837         // memcpy from the global to the alloca.
838         std::string Name = GetStaticDeclName(*this, D, ".");
839         llvm::GlobalVariable *GV =
840         new llvm::GlobalVariable(CGM.getModule(), Init->getType(), true,
841                                  llvm::GlobalValue::InternalLinkage,
842                                  Init, Name, 0, false, 0);
843         GV->setAlignment(Align.getQuantity());
844 
845         llvm::Value *SrcPtr = GV;
846         if (SrcPtr->getType() != BP)
847           SrcPtr = Builder.CreateBitCast(SrcPtr, BP, "tmp");
848 
849         Builder.CreateMemCpy(Loc, SrcPtr, SizeVal, Align.getQuantity(), false);
850       }
851     } else if (Ty->isReferenceType()) {
852       RValue RV = EmitReferenceBindingToExpr(Init, &D);
853       if (isByRef)
854         Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D),
855                                       D.getNameAsString());
856       EmitStoreOfScalar(RV.getScalarVal(), Loc, false, Alignment, Ty);
857     } else if (!hasAggregateLLVMType(Init->getType())) {
858       llvm::Value *V = EmitScalarExpr(Init);
859       if (isByRef) {
860         // When RHS has side-effect, must go through "forwarding' field
861         // to get to the address of the __block variable descriptor.
862         if (Init->HasSideEffects(getContext()))
863           Loc = BuildBlockByrefAddress(DeclPtr, &D);
864         else
865           Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D),
866                                         D.getNameAsString());
867       }
868       EmitStoreOfScalar(V, Loc, isVolatile, Alignment, Ty);
869     } else if (Init->getType()->isAnyComplexType()) {
870       if (isByRef)
871         Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D),
872                                       D.getNameAsString());
873       EmitComplexExprIntoAddr(Init, Loc, isVolatile);
874     } else {
875       if (isByRef)
876         Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D),
877                                       D.getNameAsString());
878       EmitAggExpr(Init, AggValueSlot::forAddr(Loc, isVolatile, true, false));
879     }
880   }
881 
882   // Handle CXX destruction of variables.
883   QualType DtorTy(Ty);
884   while (const ArrayType *Array = getContext().getAsArrayType(DtorTy))
885     DtorTy = getContext().getBaseElementType(Array);
886   if (const RecordType *RT = DtorTy->getAs<RecordType>())
887     if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
888       if (!ClassDecl->hasTrivialDestructor()) {
889         // Note: We suppress the destructor call when the corresponding NRVO
890         // flag has been set.
891         llvm::Value *Loc = DeclPtr;
892         if (isByRef)
893           Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D),
894                                         D.getNameAsString());
895 
896         const CXXDestructorDecl *D = ClassDecl->getDestructor();
897         assert(D && "EmitLocalBlockVarDecl - destructor is nul");
898 
899         if (const ConstantArrayType *Array =
900               getContext().getAsConstantArrayType(Ty)) {
901           EHStack.pushCleanup<CallArrayDtor>(NormalAndEHCleanup,
902                                              D, Array, Loc);
903         } else {
904           EHStack.pushCleanup<CallVarDtor>(NormalAndEHCleanup,
905                                            D, NRVOFlag, Loc);
906         }
907       }
908   }
909 
910   // Handle the cleanup attribute
911   if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) {
912     const FunctionDecl *FD = CA->getFunctionDecl();
913 
914     llvm::Constant* F = CGM.GetAddrOfFunction(FD);
915     assert(F && "Could not find function!");
916 
917     const CGFunctionInfo &Info = CGM.getTypes().getFunctionInfo(FD);
918     EHStack.pushCleanup<CallCleanupFunction>(NormalAndEHCleanup,
919                                              F, &Info, DeclPtr, &D);
920   }
921 
922   // If this is a block variable, clean it up.
923   if (needsDispose && CGM.getLangOptions().getGCMode() != LangOptions::GCOnly)
924     EHStack.pushCleanup<CallBlockRelease>(NormalAndEHCleanup, DeclPtr);
925 }
926 
927 /// Emit an alloca (or GlobalValue depending on target)
928 /// for the specified parameter and set up LocalDeclMap.
929 void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg) {
930   // FIXME: Why isn't ImplicitParamDecl a ParmVarDecl?
931   assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) &&
932          "Invalid argument to EmitParmDecl");
933   QualType Ty = D.getType();
934   CanQualType CTy = getContext().getCanonicalType(Ty);
935 
936   llvm::Value *DeclPtr;
937   // If this is an aggregate or variable sized value, reuse the input pointer.
938   if (!Ty->isConstantSizeType() ||
939       CodeGenFunction::hasAggregateLLVMType(Ty)) {
940     DeclPtr = Arg;
941   } else {
942     // Otherwise, create a temporary to hold the value.
943     DeclPtr = CreateMemTemp(Ty, D.getName() + ".addr");
944 
945     // Store the initial value into the alloca.
946     unsigned Alignment = getContext().getDeclAlign(&D).getQuantity();
947     EmitStoreOfScalar(Arg, DeclPtr, CTy.isVolatileQualified(), Alignment, Ty);
948   }
949   Arg->setName(D.getName());
950 
951   llvm::Value *&DMEntry = LocalDeclMap[&D];
952   assert(DMEntry == 0 && "Decl already exists in localdeclmap!");
953   DMEntry = DeclPtr;
954 
955   // Emit debug info for param declaration.
956   if (CGDebugInfo *DI = getDebugInfo()) {
957     DI->setLocation(D.getLocation());
958     DI->EmitDeclareOfArgVariable(&D, DeclPtr, Builder);
959   }
960 }
961