1 //===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "CGCall.h"
16 #include "CodeGenFunction.h"
17 #include "CodeGenModule.h"
18 #include "clang/Basic/TargetInfo.h"
19 #include "clang/AST/Decl.h"
20 #include "clang/AST/DeclCXX.h"
21 #include "clang/AST/DeclObjC.h"
22 #include "clang/CodeGen/CodeGenOptions.h"
23 #include "llvm/Attributes.h"
24 #include "llvm/Support/CallSite.h"
25 #include "llvm/Target/TargetData.h"
26 
27 #include "ABIInfo.h"
28 
29 using namespace clang;
30 using namespace CodeGen;
31 
32 /***/
33 
34 // FIXME: Use iterator and sidestep silly type array creation.
35 
36 static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
37   switch (CC) {
38   default: return llvm::CallingConv::C;
39   case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
40   case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
41   case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
42   }
43 }
44 
45 /// Derives the 'this' type for codegen purposes, i.e. ignoring method
46 /// qualification.
47 /// FIXME: address space qualification?
48 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
49   QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
50   return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
51 }
52 
53 /// Returns the canonical formal type of the given C++ method.
54 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
55   return MD->getType()->getCanonicalTypeUnqualified()
56            .getAs<FunctionProtoType>();
57 }
58 
59 /// Returns the "extra-canonicalized" return type, which discards
60 /// qualifiers on the return type.  Codegen doesn't care about them,
61 /// and it makes ABI code a little easier to be able to assume that
62 /// all parameter and return types are top-level unqualified.
63 static CanQualType GetReturnType(QualType RetTy) {
64   return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
65 }
66 
67 const CGFunctionInfo &
68 CodeGenTypes::getFunctionInfo(CanQual<FunctionNoProtoType> FTNP) {
69   return getFunctionInfo(FTNP->getResultType().getUnqualifiedType(),
70                          llvm::SmallVector<CanQualType, 16>(),
71                          FTNP->getExtInfo());
72 }
73 
74 /// \param Args - contains any initial parameters besides those
75 ///   in the formal type
76 static const CGFunctionInfo &getFunctionInfo(CodeGenTypes &CGT,
77                                   llvm::SmallVectorImpl<CanQualType> &ArgTys,
78                                              CanQual<FunctionProtoType> FTP) {
79   // FIXME: Kill copy.
80   for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
81     ArgTys.push_back(FTP->getArgType(i));
82   CanQualType ResTy = FTP->getResultType().getUnqualifiedType();
83   return CGT.getFunctionInfo(ResTy, ArgTys,
84                              FTP->getExtInfo());
85 }
86 
87 const CGFunctionInfo &
88 CodeGenTypes::getFunctionInfo(CanQual<FunctionProtoType> FTP) {
89   llvm::SmallVector<CanQualType, 16> ArgTys;
90   return ::getFunctionInfo(*this, ArgTys, FTP);
91 }
92 
93 static CallingConv getCallingConventionForDecl(const Decl *D) {
94   // Set the appropriate calling convention for the Function.
95   if (D->hasAttr<StdCallAttr>())
96     return CC_X86StdCall;
97 
98   if (D->hasAttr<FastCallAttr>())
99     return CC_X86FastCall;
100 
101   if (D->hasAttr<ThisCallAttr>())
102     return CC_X86ThisCall;
103 
104   return CC_C;
105 }
106 
107 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXRecordDecl *RD,
108                                                  const FunctionProtoType *FTP) {
109   llvm::SmallVector<CanQualType, 16> ArgTys;
110 
111   // Add the 'this' pointer.
112   ArgTys.push_back(GetThisType(Context, RD));
113 
114   return ::getFunctionInfo(*this, ArgTys,
115               FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
116 }
117 
118 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) {
119   llvm::SmallVector<CanQualType, 16> ArgTys;
120 
121   // Add the 'this' pointer unless this is a static method.
122   if (MD->isInstance())
123     ArgTys.push_back(GetThisType(Context, MD->getParent()));
124 
125   return ::getFunctionInfo(*this, ArgTys, GetFormalType(MD));
126 }
127 
128 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXConstructorDecl *D,
129                                                     CXXCtorType Type) {
130   llvm::SmallVector<CanQualType, 16> ArgTys;
131 
132   // Add the 'this' pointer.
133   ArgTys.push_back(GetThisType(Context, D->getParent()));
134 
135   // Check if we need to add a VTT parameter (which has type void **).
136   if (Type == Ctor_Base && D->getParent()->getNumVBases() != 0)
137     ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy));
138 
139   return ::getFunctionInfo(*this, ArgTys, GetFormalType(D));
140 }
141 
142 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXDestructorDecl *D,
143                                                     CXXDtorType Type) {
144   llvm::SmallVector<CanQualType, 16> ArgTys;
145 
146   // Add the 'this' pointer.
147   ArgTys.push_back(GetThisType(Context, D->getParent()));
148 
149   // Check if we need to add a VTT parameter (which has type void **).
150   if (Type == Dtor_Base && D->getParent()->getNumVBases() != 0)
151     ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy));
152 
153   return ::getFunctionInfo(*this, ArgTys, GetFormalType(D));
154 }
155 
156 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) {
157   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
158     if (MD->isInstance())
159       return getFunctionInfo(MD);
160 
161   CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
162   assert(isa<FunctionType>(FTy));
163   if (isa<FunctionNoProtoType>(FTy))
164     return getFunctionInfo(FTy.getAs<FunctionNoProtoType>());
165   assert(isa<FunctionProtoType>(FTy));
166   return getFunctionInfo(FTy.getAs<FunctionProtoType>());
167 }
168 
169 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) {
170   llvm::SmallVector<CanQualType, 16> ArgTys;
171   ArgTys.push_back(Context.getCanonicalParamType(MD->getSelfDecl()->getType()));
172   ArgTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
173   // FIXME: Kill copy?
174   for (ObjCMethodDecl::param_iterator i = MD->param_begin(),
175          e = MD->param_end(); i != e; ++i) {
176     ArgTys.push_back(Context.getCanonicalParamType((*i)->getType()));
177   }
178   return getFunctionInfo(GetReturnType(MD->getResultType()),
179                          ArgTys,
180                          FunctionType::ExtInfo(
181                              /*NoReturn*/ false,
182                              /*RegParm*/ 0,
183                              getCallingConventionForDecl(MD)));
184 }
185 
186 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(GlobalDecl GD) {
187   // FIXME: Do we need to handle ObjCMethodDecl?
188   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
189 
190   if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
191     return getFunctionInfo(CD, GD.getCtorType());
192 
193   if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
194     return getFunctionInfo(DD, GD.getDtorType());
195 
196   return getFunctionInfo(FD);
197 }
198 
199 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
200                                                     const CallArgList &Args,
201                                             const FunctionType::ExtInfo &Info) {
202   // FIXME: Kill copy.
203   llvm::SmallVector<CanQualType, 16> ArgTys;
204   for (CallArgList::const_iterator i = Args.begin(), e = Args.end();
205        i != e; ++i)
206     ArgTys.push_back(Context.getCanonicalParamType(i->second));
207   return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info);
208 }
209 
210 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
211                                                     const FunctionArgList &Args,
212                                             const FunctionType::ExtInfo &Info) {
213   // FIXME: Kill copy.
214   llvm::SmallVector<CanQualType, 16> ArgTys;
215   for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
216        i != e; ++i)
217     ArgTys.push_back(Context.getCanonicalParamType(i->second));
218   return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info);
219 }
220 
221 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
222                            const llvm::SmallVectorImpl<CanQualType> &ArgTys,
223                                             const FunctionType::ExtInfo &Info) {
224 #ifndef NDEBUG
225   for (llvm::SmallVectorImpl<CanQualType>::const_iterator
226          I = ArgTys.begin(), E = ArgTys.end(); I != E; ++I)
227     assert(I->isCanonicalAsParam());
228 #endif
229 
230   unsigned CC = ClangCallConvToLLVMCallConv(Info.getCC());
231 
232   // Lookup or create unique function info.
233   llvm::FoldingSetNodeID ID;
234   CGFunctionInfo::Profile(ID, Info, ResTy,
235                           ArgTys.begin(), ArgTys.end());
236 
237   void *InsertPos = 0;
238   CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos);
239   if (FI)
240     return *FI;
241 
242   // Construct the function info.
243   FI = new CGFunctionInfo(CC, Info.getNoReturn(), Info.getRegParm(), ResTy, ArgTys);
244   FunctionInfos.InsertNode(FI, InsertPos);
245 
246   // Compute ABI information.
247   getABIInfo().computeInfo(*FI, getContext(), TheModule.getContext());
248 
249   return *FI;
250 }
251 
252 CGFunctionInfo::CGFunctionInfo(unsigned _CallingConvention,
253                                bool _NoReturn,
254                                unsigned _RegParm,
255                                CanQualType ResTy,
256                                const llvm::SmallVectorImpl<CanQualType> &ArgTys)
257   : CallingConvention(_CallingConvention),
258     EffectiveCallingConvention(_CallingConvention),
259     NoReturn(_NoReturn), RegParm(_RegParm)
260 {
261   NumArgs = ArgTys.size();
262   Args = new ArgInfo[1 + NumArgs];
263   Args[0].type = ResTy;
264   for (unsigned i = 0; i < NumArgs; ++i)
265     Args[1 + i].type = ArgTys[i];
266 }
267 
268 /***/
269 
270 void CodeGenTypes::GetExpandedTypes(QualType Ty,
271                                     std::vector<const llvm::Type*> &ArgTys) {
272   const RecordType *RT = Ty->getAsStructureType();
273   assert(RT && "Can only expand structure types.");
274   const RecordDecl *RD = RT->getDecl();
275   assert(!RD->hasFlexibleArrayMember() &&
276          "Cannot expand structure with flexible array.");
277 
278   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
279          i != e; ++i) {
280     const FieldDecl *FD = *i;
281     assert(!FD->isBitField() &&
282            "Cannot expand structure with bit-field members.");
283 
284     QualType FT = FD->getType();
285     if (CodeGenFunction::hasAggregateLLVMType(FT)) {
286       GetExpandedTypes(FT, ArgTys);
287     } else {
288       ArgTys.push_back(ConvertType(FT));
289     }
290   }
291 }
292 
293 llvm::Function::arg_iterator
294 CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
295                                     llvm::Function::arg_iterator AI) {
296   const RecordType *RT = Ty->getAsStructureType();
297   assert(RT && "Can only expand structure types.");
298 
299   RecordDecl *RD = RT->getDecl();
300   assert(LV.isSimple() &&
301          "Unexpected non-simple lvalue during struct expansion.");
302   llvm::Value *Addr = LV.getAddress();
303   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
304          i != e; ++i) {
305     FieldDecl *FD = *i;
306     QualType FT = FD->getType();
307 
308     // FIXME: What are the right qualifiers here?
309     LValue LV = EmitLValueForField(Addr, FD, 0);
310     if (CodeGenFunction::hasAggregateLLVMType(FT)) {
311       AI = ExpandTypeFromArgs(FT, LV, AI);
312     } else {
313       EmitStoreThroughLValue(RValue::get(AI), LV, FT);
314       ++AI;
315     }
316   }
317 
318   return AI;
319 }
320 
321 void
322 CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
323                                   llvm::SmallVector<llvm::Value*, 16> &Args) {
324   const RecordType *RT = Ty->getAsStructureType();
325   assert(RT && "Can only expand structure types.");
326 
327   RecordDecl *RD = RT->getDecl();
328   assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
329   llvm::Value *Addr = RV.getAggregateAddr();
330   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
331          i != e; ++i) {
332     FieldDecl *FD = *i;
333     QualType FT = FD->getType();
334 
335     // FIXME: What are the right qualifiers here?
336     LValue LV = EmitLValueForField(Addr, FD, 0);
337     if (CodeGenFunction::hasAggregateLLVMType(FT)) {
338       ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args);
339     } else {
340       RValue RV = EmitLoadOfLValue(LV, FT);
341       assert(RV.isScalar() &&
342              "Unexpected non-scalar rvalue during struct expansion.");
343       Args.push_back(RV.getScalarVal());
344     }
345   }
346 }
347 
348 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
349 /// a pointer to an object of type \arg Ty.
350 ///
351 /// This safely handles the case when the src type is smaller than the
352 /// destination type; in this situation the values of bits which not
353 /// present in the src are undefined.
354 static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
355                                       const llvm::Type *Ty,
356                                       CodeGenFunction &CGF) {
357   const llvm::Type *SrcTy =
358     cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
359   uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
360   uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty);
361 
362   // If load is legal, just bitcast the src pointer.
363   if (SrcSize >= DstSize) {
364     // Generally SrcSize is never greater than DstSize, since this means we are
365     // losing bits. However, this can happen in cases where the structure has
366     // additional padding, for example due to a user specified alignment.
367     //
368     // FIXME: Assert that we aren't truncating non-padding bits when have access
369     // to that information.
370     llvm::Value *Casted =
371       CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
372     llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
373     // FIXME: Use better alignment / avoid requiring aligned load.
374     Load->setAlignment(1);
375     return Load;
376   } else {
377     // Otherwise do coercion through memory. This is stupid, but
378     // simple.
379     llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
380     llvm::Value *Casted =
381       CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
382     llvm::StoreInst *Store =
383       CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
384     // FIXME: Use better alignment / avoid requiring aligned store.
385     Store->setAlignment(1);
386     return CGF.Builder.CreateLoad(Tmp);
387   }
388 }
389 
390 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
391 /// where the source and destination may have different types.
392 ///
393 /// This safely handles the case when the src type is larger than the
394 /// destination type; the upper bits of the src will be lost.
395 static void CreateCoercedStore(llvm::Value *Src,
396                                llvm::Value *DstPtr,
397                                bool DstIsVolatile,
398                                CodeGenFunction &CGF) {
399   const llvm::Type *SrcTy = Src->getType();
400   const llvm::Type *DstTy =
401     cast<llvm::PointerType>(DstPtr->getType())->getElementType();
402 
403   uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
404   uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy);
405 
406   // If store is legal, just bitcast the src pointer.
407   if (SrcSize <= DstSize) {
408     llvm::Value *Casted =
409       CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
410     // FIXME: Use better alignment / avoid requiring aligned store.
411     CGF.Builder.CreateStore(Src, Casted, DstIsVolatile)->setAlignment(1);
412   } else {
413     // Otherwise do coercion through memory. This is stupid, but
414     // simple.
415 
416     // Generally SrcSize is never greater than DstSize, since this means we are
417     // losing bits. However, this can happen in cases where the structure has
418     // additional padding, for example due to a user specified alignment.
419     //
420     // FIXME: Assert that we aren't truncating non-padding bits when have access
421     // to that information.
422     llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
423     CGF.Builder.CreateStore(Src, Tmp);
424     llvm::Value *Casted =
425       CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy));
426     llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
427     // FIXME: Use better alignment / avoid requiring aligned load.
428     Load->setAlignment(1);
429     CGF.Builder.CreateStore(Load, DstPtr, DstIsVolatile);
430   }
431 }
432 
433 /***/
434 
435 bool CodeGenModule::ReturnTypeUsesSret(const CGFunctionInfo &FI) {
436   return FI.getReturnInfo().isIndirect();
437 }
438 
439 const llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
440   const CGFunctionInfo &FI = getFunctionInfo(GD);
441 
442   // For definition purposes, don't consider a K&R function variadic.
443   bool Variadic = false;
444   if (const FunctionProtoType *FPT =
445         cast<FunctionDecl>(GD.getDecl())->getType()->getAs<FunctionProtoType>())
446     Variadic = FPT->isVariadic();
447 
448   return GetFunctionType(FI, Variadic);
449 }
450 
451 const llvm::FunctionType *
452 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) {
453   std::vector<const llvm::Type*> ArgTys;
454 
455   const llvm::Type *ResultType = 0;
456 
457   QualType RetTy = FI.getReturnType();
458   const ABIArgInfo &RetAI = FI.getReturnInfo();
459   switch (RetAI.getKind()) {
460   case ABIArgInfo::Expand:
461     assert(0 && "Invalid ABI kind for return argument");
462 
463   case ABIArgInfo::Extend:
464   case ABIArgInfo::Direct:
465     ResultType = ConvertType(RetTy);
466     break;
467 
468   case ABIArgInfo::Indirect: {
469     assert(!RetAI.getIndirectAlign() && "Align unused on indirect return.");
470     ResultType = llvm::Type::getVoidTy(getLLVMContext());
471     const llvm::Type *STy = ConvertType(RetTy);
472     ArgTys.push_back(llvm::PointerType::get(STy, RetTy.getAddressSpace()));
473     break;
474   }
475 
476   case ABIArgInfo::Ignore:
477     ResultType = llvm::Type::getVoidTy(getLLVMContext());
478     break;
479 
480   case ABIArgInfo::Coerce:
481     ResultType = RetAI.getCoerceToType();
482     break;
483   }
484 
485   for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
486          ie = FI.arg_end(); it != ie; ++it) {
487     const ABIArgInfo &AI = it->info;
488 
489     switch (AI.getKind()) {
490     case ABIArgInfo::Ignore:
491       break;
492 
493     case ABIArgInfo::Coerce:
494       ArgTys.push_back(AI.getCoerceToType());
495       break;
496 
497     case ABIArgInfo::Indirect: {
498       // indirect arguments are always on the stack, which is addr space #0.
499       const llvm::Type *LTy = ConvertTypeForMem(it->type);
500       ArgTys.push_back(llvm::PointerType::getUnqual(LTy));
501       break;
502     }
503 
504     case ABIArgInfo::Extend:
505     case ABIArgInfo::Direct:
506       ArgTys.push_back(ConvertType(it->type));
507       break;
508 
509     case ABIArgInfo::Expand:
510       GetExpandedTypes(it->type, ArgTys);
511       break;
512     }
513   }
514 
515   return llvm::FunctionType::get(ResultType, ArgTys, IsVariadic);
516 }
517 
518 const llvm::Type *
519 CodeGenTypes::GetFunctionTypeForVTable(const CXXMethodDecl *MD) {
520   const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
521 
522   if (!VerifyFuncTypeComplete(FPT))
523     return GetFunctionType(getFunctionInfo(MD), FPT->isVariadic());
524 
525   return llvm::OpaqueType::get(getLLVMContext());
526 }
527 
528 void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
529                                            const Decl *TargetDecl,
530                                            AttributeListType &PAL,
531                                            unsigned &CallingConv) {
532   unsigned FuncAttrs = 0;
533   unsigned RetAttrs = 0;
534 
535   CallingConv = FI.getEffectiveCallingConvention();
536 
537   if (FI.isNoReturn())
538     FuncAttrs |= llvm::Attribute::NoReturn;
539 
540   // FIXME: handle sseregparm someday...
541   if (TargetDecl) {
542     if (TargetDecl->hasAttr<NoThrowAttr>())
543       FuncAttrs |= llvm::Attribute::NoUnwind;
544     if (TargetDecl->hasAttr<NoReturnAttr>())
545       FuncAttrs |= llvm::Attribute::NoReturn;
546     if (TargetDecl->hasAttr<ConstAttr>())
547       FuncAttrs |= llvm::Attribute::ReadNone;
548     else if (TargetDecl->hasAttr<PureAttr>())
549       FuncAttrs |= llvm::Attribute::ReadOnly;
550     if (TargetDecl->hasAttr<MallocAttr>())
551       RetAttrs |= llvm::Attribute::NoAlias;
552   }
553 
554   if (CodeGenOpts.OptimizeSize)
555     FuncAttrs |= llvm::Attribute::OptimizeForSize;
556   if (CodeGenOpts.DisableRedZone)
557     FuncAttrs |= llvm::Attribute::NoRedZone;
558   if (CodeGenOpts.NoImplicitFloat)
559     FuncAttrs |= llvm::Attribute::NoImplicitFloat;
560 
561   QualType RetTy = FI.getReturnType();
562   unsigned Index = 1;
563   const ABIArgInfo &RetAI = FI.getReturnInfo();
564   switch (RetAI.getKind()) {
565   case ABIArgInfo::Extend:
566    if (RetTy->isSignedIntegerType()) {
567      RetAttrs |= llvm::Attribute::SExt;
568    } else if (RetTy->isUnsignedIntegerType()) {
569      RetAttrs |= llvm::Attribute::ZExt;
570    }
571    // FALLTHROUGH
572   case ABIArgInfo::Direct:
573     break;
574 
575   case ABIArgInfo::Indirect:
576     PAL.push_back(llvm::AttributeWithIndex::get(Index,
577                                                 llvm::Attribute::StructRet));
578     ++Index;
579     // sret disables readnone and readonly
580     FuncAttrs &= ~(llvm::Attribute::ReadOnly |
581                    llvm::Attribute::ReadNone);
582     break;
583 
584   case ABIArgInfo::Ignore:
585   case ABIArgInfo::Coerce:
586     break;
587 
588   case ABIArgInfo::Expand:
589     assert(0 && "Invalid ABI kind for return argument");
590   }
591 
592   if (RetAttrs)
593     PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
594 
595   // FIXME: we need to honour command line settings also...
596   // FIXME: RegParm should be reduced in case of nested functions and/or global
597   // register variable.
598   signed RegParm = FI.getRegParm();
599 
600   unsigned PointerWidth = getContext().Target.getPointerWidth(0);
601   for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
602          ie = FI.arg_end(); it != ie; ++it) {
603     QualType ParamType = it->type;
604     const ABIArgInfo &AI = it->info;
605     unsigned Attributes = 0;
606 
607     // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
608     // have the corresponding parameter variable.  It doesn't make
609     // sense to do it here because parameters are so fucked up.
610 
611     switch (AI.getKind()) {
612     case ABIArgInfo::Coerce:
613       break;
614 
615     case ABIArgInfo::Indirect:
616       if (AI.getIndirectByVal())
617         Attributes |= llvm::Attribute::ByVal;
618 
619       Attributes |=
620         llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign());
621       // byval disables readnone and readonly.
622       FuncAttrs &= ~(llvm::Attribute::ReadOnly |
623                      llvm::Attribute::ReadNone);
624       break;
625 
626     case ABIArgInfo::Extend:
627      if (ParamType->isSignedIntegerType()) {
628        Attributes |= llvm::Attribute::SExt;
629      } else if (ParamType->isUnsignedIntegerType()) {
630        Attributes |= llvm::Attribute::ZExt;
631      }
632      // FALLS THROUGH
633     case ABIArgInfo::Direct:
634       if (RegParm > 0 &&
635           (ParamType->isIntegerType() || ParamType->isPointerType())) {
636         RegParm -=
637           (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth;
638         if (RegParm >= 0)
639           Attributes |= llvm::Attribute::InReg;
640       }
641       // FIXME: handle sseregparm someday...
642       break;
643 
644     case ABIArgInfo::Ignore:
645       // Skip increment, no matching LLVM parameter.
646       continue;
647 
648     case ABIArgInfo::Expand: {
649       std::vector<const llvm::Type*> Tys;
650       // FIXME: This is rather inefficient. Do we ever actually need to do
651       // anything here? The result should be just reconstructed on the other
652       // side, so extension should be a non-issue.
653       getTypes().GetExpandedTypes(ParamType, Tys);
654       Index += Tys.size();
655       continue;
656     }
657     }
658 
659     if (Attributes)
660       PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes));
661     ++Index;
662   }
663   if (FuncAttrs)
664     PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs));
665 }
666 
667 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
668                                          llvm::Function *Fn,
669                                          const FunctionArgList &Args) {
670   // If this is an implicit-return-zero function, go ahead and
671   // initialize the return value.  TODO: it might be nice to have
672   // a more general mechanism for this that didn't require synthesized
673   // return statements.
674   if (const FunctionDecl* FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
675     if (FD->hasImplicitReturnZero()) {
676       QualType RetTy = FD->getResultType().getUnqualifiedType();
677       const llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
678       llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
679       Builder.CreateStore(Zero, ReturnValue);
680     }
681   }
682 
683   // FIXME: We no longer need the types from FunctionArgList; lift up and
684   // simplify.
685 
686   // Emit allocs for param decls.  Give the LLVM Argument nodes names.
687   llvm::Function::arg_iterator AI = Fn->arg_begin();
688 
689   // Name the struct return argument.
690   if (CGM.ReturnTypeUsesSret(FI)) {
691     AI->setName("agg.result");
692     ++AI;
693   }
694 
695   assert(FI.arg_size() == Args.size() &&
696          "Mismatch between function signature & arguments.");
697   CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
698   for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
699        i != e; ++i, ++info_it) {
700     const VarDecl *Arg = i->first;
701     QualType Ty = info_it->type;
702     const ABIArgInfo &ArgI = info_it->info;
703 
704     switch (ArgI.getKind()) {
705     case ABIArgInfo::Indirect: {
706       llvm::Value* V = AI;
707       if (hasAggregateLLVMType(Ty)) {
708         // Do nothing, aggregates and complex variables are accessed by
709         // reference.
710       } else {
711         // Load scalar value from indirect argument.
712         V = EmitLoadOfScalar(V, false, Ty);
713         if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
714           // This must be a promotion, for something like
715           // "void a(x) short x; {..."
716           V = EmitScalarConversion(V, Ty, Arg->getType());
717         }
718       }
719       EmitParmDecl(*Arg, V);
720       break;
721     }
722 
723     case ABIArgInfo::Extend:
724     case ABIArgInfo::Direct: {
725       assert(AI != Fn->arg_end() && "Argument mismatch!");
726       llvm::Value* V = AI;
727       if (hasAggregateLLVMType(Ty)) {
728         // Create a temporary alloca to hold the argument; the rest of
729         // codegen expects to access aggregates & complex values by
730         // reference.
731         V = CreateMemTemp(Ty);
732         Builder.CreateStore(AI, V);
733       } else {
734         if (Arg->getType().isRestrictQualified())
735           AI->addAttr(llvm::Attribute::NoAlias);
736 
737         if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
738           // This must be a promotion, for something like
739           // "void a(x) short x; {..."
740           V = EmitScalarConversion(V, Ty, Arg->getType());
741         }
742       }
743       EmitParmDecl(*Arg, V);
744       break;
745     }
746 
747     case ABIArgInfo::Expand: {
748       // If this structure was expanded into multiple arguments then
749       // we need to create a temporary and reconstruct it from the
750       // arguments.
751       llvm::Value *Temp = CreateMemTemp(Ty, Arg->getName() + ".addr");
752       // FIXME: What are the right qualifiers here?
753       llvm::Function::arg_iterator End =
754         ExpandTypeFromArgs(Ty, LValue::MakeAddr(Temp, Qualifiers()), AI);
755       EmitParmDecl(*Arg, Temp);
756 
757       // Name the arguments used in expansion and increment AI.
758       unsigned Index = 0;
759       for (; AI != End; ++AI, ++Index)
760         AI->setName(Arg->getName() + "." + llvm::Twine(Index));
761       continue;
762     }
763 
764     case ABIArgInfo::Ignore:
765       // Initialize the local variable appropriately.
766       if (hasAggregateLLVMType(Ty)) {
767         EmitParmDecl(*Arg, CreateMemTemp(Ty));
768       } else {
769         EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())));
770       }
771 
772       // Skip increment, no matching LLVM parameter.
773       continue;
774 
775     case ABIArgInfo::Coerce: {
776       assert(AI != Fn->arg_end() && "Argument mismatch!");
777       // FIXME: This is very wasteful; EmitParmDecl is just going to drop the
778       // result in a new alloca anyway, so we could just store into that
779       // directly if we broke the abstraction down more.
780       llvm::Value *V = CreateMemTemp(Ty, "coerce");
781       CreateCoercedStore(AI, V, /*DestIsVolatile=*/false, *this);
782       // Match to what EmitParmDecl is expecting for this type.
783       if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
784         V = EmitLoadOfScalar(V, false, Ty);
785         if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
786           // This must be a promotion, for something like
787           // "void a(x) short x; {..."
788           V = EmitScalarConversion(V, Ty, Arg->getType());
789         }
790       }
791       EmitParmDecl(*Arg, V);
792       break;
793     }
794     }
795 
796     ++AI;
797   }
798   assert(AI == Fn->arg_end() && "Argument mismatch!");
799 }
800 
801 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
802                                          llvm::Value *ReturnValue) {
803   llvm::Value *RV = 0;
804 
805   // Functions with no result always return void.
806   if (ReturnValue) {
807     QualType RetTy = FI.getReturnType();
808     const ABIArgInfo &RetAI = FI.getReturnInfo();
809 
810     switch (RetAI.getKind()) {
811     case ABIArgInfo::Indirect:
812       if (RetTy->isAnyComplexType()) {
813         ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
814         StoreComplexToAddr(RT, CurFn->arg_begin(), false);
815       } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
816         // Do nothing; aggregrates get evaluated directly into the destination.
817       } else {
818         EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
819                           false, RetTy);
820       }
821       break;
822 
823     case ABIArgInfo::Extend:
824     case ABIArgInfo::Direct:
825       // The internal return value temp always will have
826       // pointer-to-return-type type.
827       RV = Builder.CreateLoad(ReturnValue);
828       break;
829 
830     case ABIArgInfo::Ignore:
831       break;
832 
833     case ABIArgInfo::Coerce:
834       RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this);
835       break;
836 
837     case ABIArgInfo::Expand:
838       assert(0 && "Invalid ABI kind for return argument");
839     }
840   }
841 
842   if (RV) {
843     Builder.CreateRet(RV);
844   } else {
845     Builder.CreateRetVoid();
846   }
847 }
848 
849 RValue CodeGenFunction::EmitDelegateCallArg(const VarDecl *Param) {
850   // StartFunction converted the ABI-lowered parameter(s) into a
851   // local alloca.  We need to turn that into an r-value suitable
852   // for EmitCall.
853   llvm::Value *Local = GetAddrOfLocalVar(Param);
854 
855   QualType ArgType = Param->getType();
856 
857   // For the most part, we just need to load the alloca, except:
858   // 1) aggregate r-values are actually pointers to temporaries, and
859   // 2) references to aggregates are pointers directly to the aggregate.
860   // I don't know why references to non-aggregates are different here.
861   if (const ReferenceType *RefType = ArgType->getAs<ReferenceType>()) {
862     if (hasAggregateLLVMType(RefType->getPointeeType()))
863       return RValue::getAggregate(Local);
864 
865     // Locals which are references to scalars are represented
866     // with allocas holding the pointer.
867     return RValue::get(Builder.CreateLoad(Local));
868   }
869 
870   if (ArgType->isAnyComplexType())
871     return RValue::getComplex(LoadComplexFromAddr(Local, /*volatile*/ false));
872 
873   if (hasAggregateLLVMType(ArgType))
874     return RValue::getAggregate(Local);
875 
876   return RValue::get(EmitLoadOfScalar(Local, false, ArgType));
877 }
878 
879 RValue CodeGenFunction::EmitCallArg(const Expr *E, QualType ArgType) {
880   if (ArgType->isReferenceType())
881     return EmitReferenceBindingToExpr(E);
882 
883   return EmitAnyExprToTemp(E);
884 }
885 
886 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
887                                  llvm::Value *Callee,
888                                  ReturnValueSlot ReturnValue,
889                                  const CallArgList &CallArgs,
890                                  const Decl *TargetDecl,
891                                  llvm::Instruction **callOrInvoke) {
892   // FIXME: We no longer need the types from CallArgs; lift up and simplify.
893   llvm::SmallVector<llvm::Value*, 16> Args;
894 
895   // Handle struct-return functions by passing a pointer to the
896   // location that we would like to return into.
897   QualType RetTy = CallInfo.getReturnType();
898   const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
899 
900 
901   // If the call returns a temporary with struct return, create a temporary
902   // alloca to hold the result, unless one is given to us.
903   if (CGM.ReturnTypeUsesSret(CallInfo)) {
904     llvm::Value *Value = ReturnValue.getValue();
905     if (!Value)
906       Value = CreateMemTemp(RetTy);
907     Args.push_back(Value);
908   }
909 
910   assert(CallInfo.arg_size() == CallArgs.size() &&
911          "Mismatch between function signature & arguments.");
912   CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
913   for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
914        I != E; ++I, ++info_it) {
915     const ABIArgInfo &ArgInfo = info_it->info;
916     RValue RV = I->first;
917 
918     switch (ArgInfo.getKind()) {
919     case ABIArgInfo::Indirect:
920       if (RV.isScalar() || RV.isComplex()) {
921         // Make a temporary alloca to pass the argument.
922         Args.push_back(CreateMemTemp(I->second));
923         if (RV.isScalar())
924           EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false, I->second);
925         else
926           StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
927       } else {
928         Args.push_back(RV.getAggregateAddr());
929       }
930       break;
931 
932     case ABIArgInfo::Extend:
933     case ABIArgInfo::Direct:
934       if (RV.isScalar()) {
935         Args.push_back(RV.getScalarVal());
936       } else if (RV.isComplex()) {
937         llvm::Value *Tmp = llvm::UndefValue::get(ConvertType(I->second));
938         Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().first, 0);
939         Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().second, 1);
940         Args.push_back(Tmp);
941       } else {
942         Args.push_back(Builder.CreateLoad(RV.getAggregateAddr()));
943       }
944       break;
945 
946     case ABIArgInfo::Ignore:
947       break;
948 
949     case ABIArgInfo::Coerce: {
950       // FIXME: Avoid the conversion through memory if possible.
951       llvm::Value *SrcPtr;
952       if (RV.isScalar()) {
953         SrcPtr = CreateMemTemp(I->second, "coerce");
954         EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, I->second);
955       } else if (RV.isComplex()) {
956         SrcPtr = CreateMemTemp(I->second, "coerce");
957         StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
958       } else
959         SrcPtr = RV.getAggregateAddr();
960       Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
961                                        *this));
962       break;
963     }
964 
965     case ABIArgInfo::Expand:
966       ExpandTypeToArgs(I->second, RV, Args);
967       break;
968     }
969   }
970 
971   // If the callee is a bitcast of a function to a varargs pointer to function
972   // type, check to see if we can remove the bitcast.  This handles some cases
973   // with unprototyped functions.
974   if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
975     if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
976       const llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
977       const llvm::FunctionType *CurFT =
978         cast<llvm::FunctionType>(CurPT->getElementType());
979       const llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
980 
981       if (CE->getOpcode() == llvm::Instruction::BitCast &&
982           ActualFT->getReturnType() == CurFT->getReturnType() &&
983           ActualFT->getNumParams() == CurFT->getNumParams() &&
984           ActualFT->getNumParams() == Args.size()) {
985         bool ArgsMatch = true;
986         for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
987           if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
988             ArgsMatch = false;
989             break;
990           }
991 
992         // Strip the cast if we can get away with it.  This is a nice cleanup,
993         // but also allows us to inline the function at -O0 if it is marked
994         // always_inline.
995         if (ArgsMatch)
996           Callee = CalleeF;
997       }
998     }
999 
1000 
1001   llvm::BasicBlock *InvokeDest = getInvokeDest();
1002   unsigned CallingConv;
1003   CodeGen::AttributeListType AttributeList;
1004   CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv);
1005   llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(),
1006                                                    AttributeList.end());
1007 
1008   llvm::CallSite CS;
1009   if (!InvokeDest || (Attrs.getFnAttributes() & llvm::Attribute::NoUnwind)) {
1010     CS = Builder.CreateCall(Callee, Args.data(), Args.data()+Args.size());
1011   } else {
1012     llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
1013     CS = Builder.CreateInvoke(Callee, Cont, InvokeDest,
1014                               Args.data(), Args.data()+Args.size());
1015     EmitBlock(Cont);
1016   }
1017   if (callOrInvoke) {
1018     *callOrInvoke = CS.getInstruction();
1019   }
1020 
1021   CS.setAttributes(Attrs);
1022   CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
1023 
1024   // If the call doesn't return, finish the basic block and clear the
1025   // insertion point; this allows the rest of IRgen to discard
1026   // unreachable code.
1027   if (CS.doesNotReturn()) {
1028     Builder.CreateUnreachable();
1029     Builder.ClearInsertionPoint();
1030 
1031     // FIXME: For now, emit a dummy basic block because expr emitters in
1032     // generally are not ready to handle emitting expressions at unreachable
1033     // points.
1034     EnsureInsertPoint();
1035 
1036     // Return a reasonable RValue.
1037     return GetUndefRValue(RetTy);
1038   }
1039 
1040   llvm::Instruction *CI = CS.getInstruction();
1041   if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
1042     CI->setName("call");
1043 
1044   switch (RetAI.getKind()) {
1045   case ABIArgInfo::Indirect:
1046     if (RetTy->isAnyComplexType())
1047       return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
1048     if (CodeGenFunction::hasAggregateLLVMType(RetTy))
1049       return RValue::getAggregate(Args[0]);
1050     return RValue::get(EmitLoadOfScalar(Args[0], false, RetTy));
1051 
1052   case ABIArgInfo::Extend:
1053   case ABIArgInfo::Direct:
1054     if (RetTy->isAnyComplexType()) {
1055       llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
1056       llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
1057       return RValue::getComplex(std::make_pair(Real, Imag));
1058     }
1059     if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1060       llvm::Value *DestPtr = ReturnValue.getValue();
1061       bool DestIsVolatile = ReturnValue.isVolatile();
1062 
1063       if (!DestPtr) {
1064         DestPtr = CreateMemTemp(RetTy, "agg.tmp");
1065         DestIsVolatile = false;
1066       }
1067       Builder.CreateStore(CI, DestPtr, DestIsVolatile);
1068       return RValue::getAggregate(DestPtr);
1069     }
1070     return RValue::get(CI);
1071 
1072   case ABIArgInfo::Ignore:
1073     // If we are ignoring an argument that had a result, make sure to
1074     // construct the appropriate return value for our caller.
1075     return GetUndefRValue(RetTy);
1076 
1077   case ABIArgInfo::Coerce: {
1078     llvm::Value *DestPtr = ReturnValue.getValue();
1079     bool DestIsVolatile = ReturnValue.isVolatile();
1080 
1081     if (!DestPtr) {
1082       DestPtr = CreateMemTemp(RetTy, "coerce");
1083       DestIsVolatile = false;
1084     }
1085 
1086     CreateCoercedStore(CI, DestPtr, DestIsVolatile, *this);
1087     if (RetTy->isAnyComplexType())
1088       return RValue::getComplex(LoadComplexFromAddr(DestPtr, false));
1089     if (CodeGenFunction::hasAggregateLLVMType(RetTy))
1090       return RValue::getAggregate(DestPtr);
1091     return RValue::get(EmitLoadOfScalar(DestPtr, false, RetTy));
1092   }
1093 
1094   case ABIArgInfo::Expand:
1095     assert(0 && "Invalid ABI kind for return argument");
1096   }
1097 
1098   assert(0 && "Unhandled ABIArgInfo::Kind");
1099   return RValue::get(0);
1100 }
1101 
1102 /* VarArg handling */
1103 
1104 llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
1105   return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
1106 }
1107