1 //===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "CGCall.h"
16 #include "CodeGenFunction.h"
17 #include "CodeGenModule.h"
18 #include "clang/Basic/TargetInfo.h"
19 #include "clang/AST/Decl.h"
20 #include "clang/AST/DeclCXX.h"
21 #include "clang/AST/DeclObjC.h"
22 #include "clang/CodeGen/CodeGenOptions.h"
23 #include "llvm/Attributes.h"
24 #include "llvm/Support/CallSite.h"
25 #include "llvm/Target/TargetData.h"
26 
27 #include "ABIInfo.h"
28 
29 using namespace clang;
30 using namespace CodeGen;
31 
32 /***/
33 
34 // FIXME: Use iterator and sidestep silly type array creation.
35 
36 static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
37   switch (CC) {
38   default: return llvm::CallingConv::C;
39   case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
40   case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
41   }
42 }
43 
44 /// Derives the 'this' type for codegen purposes, i.e. ignoring method
45 /// qualification.
46 /// FIXME: address space qualification?
47 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
48   QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
49   return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
50 }
51 
52 /// Returns the canonical formal type of the given C++ method.
53 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
54   return MD->getType()->getCanonicalTypeUnqualified()
55            .getAs<FunctionProtoType>();
56 }
57 
58 /// Returns the "extra-canonicalized" return type, which discards
59 /// qualifiers on the return type.  Codegen doesn't care about them,
60 /// and it makes ABI code a little easier to be able to assume that
61 /// all parameter and return types are top-level unqualified.
62 static CanQualType GetReturnType(QualType RetTy) {
63   return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
64 }
65 
66 const CGFunctionInfo &
67 CodeGenTypes::getFunctionInfo(CanQual<FunctionNoProtoType> FTNP) {
68   return getFunctionInfo(FTNP->getResultType().getUnqualifiedType(),
69                          llvm::SmallVector<CanQualType, 16>(),
70                          FTNP->getExtInfo());
71 }
72 
73 /// \param Args - contains any initial parameters besides those
74 ///   in the formal type
75 static const CGFunctionInfo &getFunctionInfo(CodeGenTypes &CGT,
76                                   llvm::SmallVectorImpl<CanQualType> &ArgTys,
77                                              CanQual<FunctionProtoType> FTP) {
78   // FIXME: Kill copy.
79   for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
80     ArgTys.push_back(FTP->getArgType(i));
81   CanQualType ResTy = FTP->getResultType().getUnqualifiedType();
82   return CGT.getFunctionInfo(ResTy, ArgTys,
83                              FTP->getExtInfo());
84 }
85 
86 const CGFunctionInfo &
87 CodeGenTypes::getFunctionInfo(CanQual<FunctionProtoType> FTP) {
88   llvm::SmallVector<CanQualType, 16> ArgTys;
89   return ::getFunctionInfo(*this, ArgTys, FTP);
90 }
91 
92 static CallingConv getCallingConventionForDecl(const Decl *D) {
93   // Set the appropriate calling convention for the Function.
94   if (D->hasAttr<StdCallAttr>())
95     return CC_X86StdCall;
96 
97   if (D->hasAttr<FastCallAttr>())
98     return CC_X86FastCall;
99 
100   return CC_C;
101 }
102 
103 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXRecordDecl *RD,
104                                                  const FunctionProtoType *FTP) {
105   llvm::SmallVector<CanQualType, 16> ArgTys;
106 
107   // Add the 'this' pointer.
108   ArgTys.push_back(GetThisType(Context, RD));
109 
110   return ::getFunctionInfo(*this, ArgTys,
111               FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
112 }
113 
114 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) {
115   llvm::SmallVector<CanQualType, 16> ArgTys;
116 
117   // Add the 'this' pointer unless this is a static method.
118   if (MD->isInstance())
119     ArgTys.push_back(GetThisType(Context, MD->getParent()));
120 
121   return ::getFunctionInfo(*this, ArgTys, GetFormalType(MD));
122 }
123 
124 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXConstructorDecl *D,
125                                                     CXXCtorType Type) {
126   llvm::SmallVector<CanQualType, 16> ArgTys;
127 
128   // Add the 'this' pointer.
129   ArgTys.push_back(GetThisType(Context, D->getParent()));
130 
131   // Check if we need to add a VTT parameter (which has type void **).
132   if (Type == Ctor_Base && D->getParent()->getNumVBases() != 0)
133     ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy));
134 
135   return ::getFunctionInfo(*this, ArgTys, GetFormalType(D));
136 }
137 
138 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXDestructorDecl *D,
139                                                     CXXDtorType Type) {
140   llvm::SmallVector<CanQualType, 16> ArgTys;
141 
142   // Add the 'this' pointer.
143   ArgTys.push_back(GetThisType(Context, D->getParent()));
144 
145   // Check if we need to add a VTT parameter (which has type void **).
146   if (Type == Dtor_Base && D->getParent()->getNumVBases() != 0)
147     ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy));
148 
149   return ::getFunctionInfo(*this, ArgTys, GetFormalType(D));
150 }
151 
152 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) {
153   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
154     if (MD->isInstance())
155       return getFunctionInfo(MD);
156 
157   CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
158   assert(isa<FunctionType>(FTy));
159   if (isa<FunctionNoProtoType>(FTy))
160     return getFunctionInfo(FTy.getAs<FunctionNoProtoType>());
161   assert(isa<FunctionProtoType>(FTy));
162   return getFunctionInfo(FTy.getAs<FunctionProtoType>());
163 }
164 
165 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) {
166   llvm::SmallVector<CanQualType, 16> ArgTys;
167   ArgTys.push_back(Context.getCanonicalParamType(MD->getSelfDecl()->getType()));
168   ArgTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
169   // FIXME: Kill copy?
170   for (ObjCMethodDecl::param_iterator i = MD->param_begin(),
171          e = MD->param_end(); i != e; ++i) {
172     ArgTys.push_back(Context.getCanonicalParamType((*i)->getType()));
173   }
174   return getFunctionInfo(GetReturnType(MD->getResultType()),
175                          ArgTys,
176                          FunctionType::ExtInfo(
177                              /*NoReturn*/ false,
178                              /*RegParm*/ 0,
179                              getCallingConventionForDecl(MD)));
180 }
181 
182 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(GlobalDecl GD) {
183   // FIXME: Do we need to handle ObjCMethodDecl?
184   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
185 
186   if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
187     return getFunctionInfo(CD, GD.getCtorType());
188 
189   if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
190     return getFunctionInfo(DD, GD.getDtorType());
191 
192   return getFunctionInfo(FD);
193 }
194 
195 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
196                                                     const CallArgList &Args,
197                                             const FunctionType::ExtInfo &Info) {
198   // FIXME: Kill copy.
199   llvm::SmallVector<CanQualType, 16> ArgTys;
200   for (CallArgList::const_iterator i = Args.begin(), e = Args.end();
201        i != e; ++i)
202     ArgTys.push_back(Context.getCanonicalParamType(i->second));
203   return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info);
204 }
205 
206 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
207                                                     const FunctionArgList &Args,
208                                             const FunctionType::ExtInfo &Info) {
209   // FIXME: Kill copy.
210   llvm::SmallVector<CanQualType, 16> ArgTys;
211   for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
212        i != e; ++i)
213     ArgTys.push_back(Context.getCanonicalParamType(i->second));
214   return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info);
215 }
216 
217 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
218                            const llvm::SmallVectorImpl<CanQualType> &ArgTys,
219                                             const FunctionType::ExtInfo &Info) {
220 #ifndef NDEBUG
221   for (llvm::SmallVectorImpl<CanQualType>::const_iterator
222          I = ArgTys.begin(), E = ArgTys.end(); I != E; ++I)
223     assert(I->isCanonicalAsParam());
224 #endif
225 
226   unsigned CC = ClangCallConvToLLVMCallConv(Info.getCC());
227 
228   // Lookup or create unique function info.
229   llvm::FoldingSetNodeID ID;
230   CGFunctionInfo::Profile(ID, Info, ResTy,
231                           ArgTys.begin(), ArgTys.end());
232 
233   void *InsertPos = 0;
234   CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos);
235   if (FI)
236     return *FI;
237 
238   // Construct the function info.
239   FI = new CGFunctionInfo(CC, Info.getNoReturn(), Info.getRegParm(), ResTy, ArgTys);
240   FunctionInfos.InsertNode(FI, InsertPos);
241 
242   // Compute ABI information.
243   getABIInfo().computeInfo(*FI, getContext(), TheModule.getContext());
244 
245   return *FI;
246 }
247 
248 CGFunctionInfo::CGFunctionInfo(unsigned _CallingConvention,
249                                bool _NoReturn,
250                                unsigned _RegParm,
251                                CanQualType ResTy,
252                                const llvm::SmallVectorImpl<CanQualType> &ArgTys)
253   : CallingConvention(_CallingConvention),
254     EffectiveCallingConvention(_CallingConvention),
255     NoReturn(_NoReturn), RegParm(_RegParm)
256 {
257   NumArgs = ArgTys.size();
258   Args = new ArgInfo[1 + NumArgs];
259   Args[0].type = ResTy;
260   for (unsigned i = 0; i < NumArgs; ++i)
261     Args[1 + i].type = ArgTys[i];
262 }
263 
264 /***/
265 
266 void CodeGenTypes::GetExpandedTypes(QualType Ty,
267                                     std::vector<const llvm::Type*> &ArgTys) {
268   const RecordType *RT = Ty->getAsStructureType();
269   assert(RT && "Can only expand structure types.");
270   const RecordDecl *RD = RT->getDecl();
271   assert(!RD->hasFlexibleArrayMember() &&
272          "Cannot expand structure with flexible array.");
273 
274   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
275          i != e; ++i) {
276     const FieldDecl *FD = *i;
277     assert(!FD->isBitField() &&
278            "Cannot expand structure with bit-field members.");
279 
280     QualType FT = FD->getType();
281     if (CodeGenFunction::hasAggregateLLVMType(FT)) {
282       GetExpandedTypes(FT, ArgTys);
283     } else {
284       ArgTys.push_back(ConvertType(FT));
285     }
286   }
287 }
288 
289 llvm::Function::arg_iterator
290 CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
291                                     llvm::Function::arg_iterator AI) {
292   const RecordType *RT = Ty->getAsStructureType();
293   assert(RT && "Can only expand structure types.");
294 
295   RecordDecl *RD = RT->getDecl();
296   assert(LV.isSimple() &&
297          "Unexpected non-simple lvalue during struct expansion.");
298   llvm::Value *Addr = LV.getAddress();
299   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
300          i != e; ++i) {
301     FieldDecl *FD = *i;
302     QualType FT = FD->getType();
303 
304     // FIXME: What are the right qualifiers here?
305     LValue LV = EmitLValueForField(Addr, FD, 0);
306     if (CodeGenFunction::hasAggregateLLVMType(FT)) {
307       AI = ExpandTypeFromArgs(FT, LV, AI);
308     } else {
309       EmitStoreThroughLValue(RValue::get(AI), LV, FT);
310       ++AI;
311     }
312   }
313 
314   return AI;
315 }
316 
317 void
318 CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
319                                   llvm::SmallVector<llvm::Value*, 16> &Args) {
320   const RecordType *RT = Ty->getAsStructureType();
321   assert(RT && "Can only expand structure types.");
322 
323   RecordDecl *RD = RT->getDecl();
324   assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
325   llvm::Value *Addr = RV.getAggregateAddr();
326   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
327          i != e; ++i) {
328     FieldDecl *FD = *i;
329     QualType FT = FD->getType();
330 
331     // FIXME: What are the right qualifiers here?
332     LValue LV = EmitLValueForField(Addr, FD, 0);
333     if (CodeGenFunction::hasAggregateLLVMType(FT)) {
334       ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args);
335     } else {
336       RValue RV = EmitLoadOfLValue(LV, FT);
337       assert(RV.isScalar() &&
338              "Unexpected non-scalar rvalue during struct expansion.");
339       Args.push_back(RV.getScalarVal());
340     }
341   }
342 }
343 
344 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
345 /// a pointer to an object of type \arg Ty.
346 ///
347 /// This safely handles the case when the src type is smaller than the
348 /// destination type; in this situation the values of bits which not
349 /// present in the src are undefined.
350 static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
351                                       const llvm::Type *Ty,
352                                       CodeGenFunction &CGF) {
353   const llvm::Type *SrcTy =
354     cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
355   uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
356   uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty);
357 
358   // If load is legal, just bitcast the src pointer.
359   if (SrcSize >= DstSize) {
360     // Generally SrcSize is never greater than DstSize, since this means we are
361     // losing bits. However, this can happen in cases where the structure has
362     // additional padding, for example due to a user specified alignment.
363     //
364     // FIXME: Assert that we aren't truncating non-padding bits when have access
365     // to that information.
366     llvm::Value *Casted =
367       CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
368     llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
369     // FIXME: Use better alignment / avoid requiring aligned load.
370     Load->setAlignment(1);
371     return Load;
372   } else {
373     // Otherwise do coercion through memory. This is stupid, but
374     // simple.
375     llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
376     llvm::Value *Casted =
377       CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
378     llvm::StoreInst *Store =
379       CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
380     // FIXME: Use better alignment / avoid requiring aligned store.
381     Store->setAlignment(1);
382     return CGF.Builder.CreateLoad(Tmp);
383   }
384 }
385 
386 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
387 /// where the source and destination may have different types.
388 ///
389 /// This safely handles the case when the src type is larger than the
390 /// destination type; the upper bits of the src will be lost.
391 static void CreateCoercedStore(llvm::Value *Src,
392                                llvm::Value *DstPtr,
393                                bool DstIsVolatile,
394                                CodeGenFunction &CGF) {
395   const llvm::Type *SrcTy = Src->getType();
396   const llvm::Type *DstTy =
397     cast<llvm::PointerType>(DstPtr->getType())->getElementType();
398 
399   uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
400   uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy);
401 
402   // If store is legal, just bitcast the src pointer.
403   if (SrcSize <= DstSize) {
404     llvm::Value *Casted =
405       CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
406     // FIXME: Use better alignment / avoid requiring aligned store.
407     CGF.Builder.CreateStore(Src, Casted, DstIsVolatile)->setAlignment(1);
408   } else {
409     // Otherwise do coercion through memory. This is stupid, but
410     // simple.
411 
412     // Generally SrcSize is never greater than DstSize, since this means we are
413     // losing bits. However, this can happen in cases where the structure has
414     // additional padding, for example due to a user specified alignment.
415     //
416     // FIXME: Assert that we aren't truncating non-padding bits when have access
417     // to that information.
418     llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
419     CGF.Builder.CreateStore(Src, Tmp);
420     llvm::Value *Casted =
421       CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy));
422     llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
423     // FIXME: Use better alignment / avoid requiring aligned load.
424     Load->setAlignment(1);
425     CGF.Builder.CreateStore(Load, DstPtr, DstIsVolatile);
426   }
427 }
428 
429 /***/
430 
431 bool CodeGenModule::ReturnTypeUsesSret(const CGFunctionInfo &FI) {
432   return FI.getReturnInfo().isIndirect();
433 }
434 
435 const llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
436   const CGFunctionInfo &FI = getFunctionInfo(GD);
437 
438   // For definition purposes, don't consider a K&R function variadic.
439   bool Variadic = false;
440   if (const FunctionProtoType *FPT =
441         cast<FunctionDecl>(GD.getDecl())->getType()->getAs<FunctionProtoType>())
442     Variadic = FPT->isVariadic();
443 
444   return GetFunctionType(FI, Variadic);
445 }
446 
447 const llvm::FunctionType *
448 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) {
449   std::vector<const llvm::Type*> ArgTys;
450 
451   const llvm::Type *ResultType = 0;
452 
453   QualType RetTy = FI.getReturnType();
454   const ABIArgInfo &RetAI = FI.getReturnInfo();
455   switch (RetAI.getKind()) {
456   case ABIArgInfo::Expand:
457     assert(0 && "Invalid ABI kind for return argument");
458 
459   case ABIArgInfo::Extend:
460   case ABIArgInfo::Direct:
461     ResultType = ConvertType(RetTy);
462     break;
463 
464   case ABIArgInfo::Indirect: {
465     assert(!RetAI.getIndirectAlign() && "Align unused on indirect return.");
466     ResultType = llvm::Type::getVoidTy(getLLVMContext());
467     const llvm::Type *STy = ConvertType(RetTy);
468     ArgTys.push_back(llvm::PointerType::get(STy, RetTy.getAddressSpace()));
469     break;
470   }
471 
472   case ABIArgInfo::Ignore:
473     ResultType = llvm::Type::getVoidTy(getLLVMContext());
474     break;
475 
476   case ABIArgInfo::Coerce:
477     ResultType = RetAI.getCoerceToType();
478     break;
479   }
480 
481   for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
482          ie = FI.arg_end(); it != ie; ++it) {
483     const ABIArgInfo &AI = it->info;
484 
485     switch (AI.getKind()) {
486     case ABIArgInfo::Ignore:
487       break;
488 
489     case ABIArgInfo::Coerce:
490       ArgTys.push_back(AI.getCoerceToType());
491       break;
492 
493     case ABIArgInfo::Indirect: {
494       // indirect arguments are always on the stack, which is addr space #0.
495       const llvm::Type *LTy = ConvertTypeForMem(it->type);
496       ArgTys.push_back(llvm::PointerType::getUnqual(LTy));
497       break;
498     }
499 
500     case ABIArgInfo::Extend:
501     case ABIArgInfo::Direct:
502       ArgTys.push_back(ConvertType(it->type));
503       break;
504 
505     case ABIArgInfo::Expand:
506       GetExpandedTypes(it->type, ArgTys);
507       break;
508     }
509   }
510 
511   return llvm::FunctionType::get(ResultType, ArgTys, IsVariadic);
512 }
513 
514 static bool HasIncompleteReturnTypeOrArgumentTypes(const FunctionProtoType *T) {
515   if (const TagType *TT = T->getResultType()->getAs<TagType>()) {
516     if (!TT->getDecl()->isDefinition())
517       return true;
518   }
519 
520   for (unsigned i = 0, e = T->getNumArgs(); i != e; ++i) {
521     if (const TagType *TT = T->getArgType(i)->getAs<TagType>()) {
522       if (!TT->getDecl()->isDefinition())
523         return true;
524     }
525   }
526 
527   return false;
528 }
529 
530 const llvm::Type *
531 CodeGenTypes::GetFunctionTypeForVTable(const CXXMethodDecl *MD) {
532   const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
533 
534   if (!HasIncompleteReturnTypeOrArgumentTypes(FPT))
535     return GetFunctionType(getFunctionInfo(MD), FPT->isVariadic());
536 
537   return llvm::OpaqueType::get(getLLVMContext());
538 }
539 
540 void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
541                                            const Decl *TargetDecl,
542                                            AttributeListType &PAL,
543                                            unsigned &CallingConv) {
544   unsigned FuncAttrs = 0;
545   unsigned RetAttrs = 0;
546 
547   CallingConv = FI.getEffectiveCallingConvention();
548 
549   if (FI.isNoReturn())
550     FuncAttrs |= llvm::Attribute::NoReturn;
551 
552   // FIXME: handle sseregparm someday...
553   if (TargetDecl) {
554     if (TargetDecl->hasAttr<NoThrowAttr>())
555       FuncAttrs |= llvm::Attribute::NoUnwind;
556     if (TargetDecl->hasAttr<NoReturnAttr>())
557       FuncAttrs |= llvm::Attribute::NoReturn;
558     if (TargetDecl->hasAttr<ConstAttr>())
559       FuncAttrs |= llvm::Attribute::ReadNone;
560     else if (TargetDecl->hasAttr<PureAttr>())
561       FuncAttrs |= llvm::Attribute::ReadOnly;
562     if (TargetDecl->hasAttr<MallocAttr>())
563       RetAttrs |= llvm::Attribute::NoAlias;
564   }
565 
566   if (CodeGenOpts.OptimizeSize)
567     FuncAttrs |= llvm::Attribute::OptimizeForSize;
568   if (CodeGenOpts.DisableRedZone)
569     FuncAttrs |= llvm::Attribute::NoRedZone;
570   if (CodeGenOpts.NoImplicitFloat)
571     FuncAttrs |= llvm::Attribute::NoImplicitFloat;
572 
573   QualType RetTy = FI.getReturnType();
574   unsigned Index = 1;
575   const ABIArgInfo &RetAI = FI.getReturnInfo();
576   switch (RetAI.getKind()) {
577   case ABIArgInfo::Extend:
578    if (RetTy->isSignedIntegerType()) {
579      RetAttrs |= llvm::Attribute::SExt;
580    } else if (RetTy->isUnsignedIntegerType()) {
581      RetAttrs |= llvm::Attribute::ZExt;
582    }
583    // FALLTHROUGH
584   case ABIArgInfo::Direct:
585     break;
586 
587   case ABIArgInfo::Indirect:
588     PAL.push_back(llvm::AttributeWithIndex::get(Index,
589                                                 llvm::Attribute::StructRet));
590     ++Index;
591     // sret disables readnone and readonly
592     FuncAttrs &= ~(llvm::Attribute::ReadOnly |
593                    llvm::Attribute::ReadNone);
594     break;
595 
596   case ABIArgInfo::Ignore:
597   case ABIArgInfo::Coerce:
598     break;
599 
600   case ABIArgInfo::Expand:
601     assert(0 && "Invalid ABI kind for return argument");
602   }
603 
604   if (RetAttrs)
605     PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
606 
607   // FIXME: we need to honour command line settings also...
608   // FIXME: RegParm should be reduced in case of nested functions and/or global
609   // register variable.
610   signed RegParm = FI.getRegParm();
611 
612   unsigned PointerWidth = getContext().Target.getPointerWidth(0);
613   for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
614          ie = FI.arg_end(); it != ie; ++it) {
615     QualType ParamType = it->type;
616     const ABIArgInfo &AI = it->info;
617     unsigned Attributes = 0;
618 
619     // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
620     // have the corresponding parameter variable.  It doesn't make
621     // sense to do it here because parameters are so fucked up.
622 
623     switch (AI.getKind()) {
624     case ABIArgInfo::Coerce:
625       break;
626 
627     case ABIArgInfo::Indirect:
628       if (AI.getIndirectByVal())
629         Attributes |= llvm::Attribute::ByVal;
630 
631       Attributes |=
632         llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign());
633       // byval disables readnone and readonly.
634       FuncAttrs &= ~(llvm::Attribute::ReadOnly |
635                      llvm::Attribute::ReadNone);
636       break;
637 
638     case ABIArgInfo::Extend:
639      if (ParamType->isSignedIntegerType()) {
640        Attributes |= llvm::Attribute::SExt;
641      } else if (ParamType->isUnsignedIntegerType()) {
642        Attributes |= llvm::Attribute::ZExt;
643      }
644      // FALLS THROUGH
645     case ABIArgInfo::Direct:
646       if (RegParm > 0 &&
647           (ParamType->isIntegerType() || ParamType->isPointerType())) {
648         RegParm -=
649           (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth;
650         if (RegParm >= 0)
651           Attributes |= llvm::Attribute::InReg;
652       }
653       // FIXME: handle sseregparm someday...
654       break;
655 
656     case ABIArgInfo::Ignore:
657       // Skip increment, no matching LLVM parameter.
658       continue;
659 
660     case ABIArgInfo::Expand: {
661       std::vector<const llvm::Type*> Tys;
662       // FIXME: This is rather inefficient. Do we ever actually need to do
663       // anything here? The result should be just reconstructed on the other
664       // side, so extension should be a non-issue.
665       getTypes().GetExpandedTypes(ParamType, Tys);
666       Index += Tys.size();
667       continue;
668     }
669     }
670 
671     if (Attributes)
672       PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes));
673     ++Index;
674   }
675   if (FuncAttrs)
676     PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs));
677 }
678 
679 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
680                                          llvm::Function *Fn,
681                                          const FunctionArgList &Args) {
682   // If this is an implicit-return-zero function, go ahead and
683   // initialize the return value.  TODO: it might be nice to have
684   // a more general mechanism for this that didn't require synthesized
685   // return statements.
686   if (const FunctionDecl* FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
687     if (FD->hasImplicitReturnZero()) {
688       QualType RetTy = FD->getResultType().getUnqualifiedType();
689       const llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
690       llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
691       Builder.CreateStore(Zero, ReturnValue);
692     }
693   }
694 
695   // FIXME: We no longer need the types from FunctionArgList; lift up and
696   // simplify.
697 
698   // Emit allocs for param decls.  Give the LLVM Argument nodes names.
699   llvm::Function::arg_iterator AI = Fn->arg_begin();
700 
701   // Name the struct return argument.
702   if (CGM.ReturnTypeUsesSret(FI)) {
703     AI->setName("agg.result");
704     ++AI;
705   }
706 
707   assert(FI.arg_size() == Args.size() &&
708          "Mismatch between function signature & arguments.");
709   CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
710   for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
711        i != e; ++i, ++info_it) {
712     const VarDecl *Arg = i->first;
713     QualType Ty = info_it->type;
714     const ABIArgInfo &ArgI = info_it->info;
715 
716     switch (ArgI.getKind()) {
717     case ABIArgInfo::Indirect: {
718       llvm::Value* V = AI;
719       if (hasAggregateLLVMType(Ty)) {
720         // Do nothing, aggregates and complex variables are accessed by
721         // reference.
722       } else {
723         // Load scalar value from indirect argument.
724         V = EmitLoadOfScalar(V, false, Ty);
725         if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
726           // This must be a promotion, for something like
727           // "void a(x) short x; {..."
728           V = EmitScalarConversion(V, Ty, Arg->getType());
729         }
730       }
731       EmitParmDecl(*Arg, V);
732       break;
733     }
734 
735     case ABIArgInfo::Extend:
736     case ABIArgInfo::Direct: {
737       assert(AI != Fn->arg_end() && "Argument mismatch!");
738       llvm::Value* V = AI;
739       if (hasAggregateLLVMType(Ty)) {
740         // Create a temporary alloca to hold the argument; the rest of
741         // codegen expects to access aggregates & complex values by
742         // reference.
743         V = CreateMemTemp(Ty);
744         Builder.CreateStore(AI, V);
745       } else {
746         if (Arg->getType().isRestrictQualified())
747           AI->addAttr(llvm::Attribute::NoAlias);
748 
749         if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
750           // This must be a promotion, for something like
751           // "void a(x) short x; {..."
752           V = EmitScalarConversion(V, Ty, Arg->getType());
753         }
754       }
755       EmitParmDecl(*Arg, V);
756       break;
757     }
758 
759     case ABIArgInfo::Expand: {
760       // If this structure was expanded into multiple arguments then
761       // we need to create a temporary and reconstruct it from the
762       // arguments.
763       llvm::Value *Temp = CreateMemTemp(Ty, Arg->getName() + ".addr");
764       // FIXME: What are the right qualifiers here?
765       llvm::Function::arg_iterator End =
766         ExpandTypeFromArgs(Ty, LValue::MakeAddr(Temp, Qualifiers()), AI);
767       EmitParmDecl(*Arg, Temp);
768 
769       // Name the arguments used in expansion and increment AI.
770       unsigned Index = 0;
771       for (; AI != End; ++AI, ++Index)
772         AI->setName(Arg->getName() + "." + llvm::Twine(Index));
773       continue;
774     }
775 
776     case ABIArgInfo::Ignore:
777       // Initialize the local variable appropriately.
778       if (hasAggregateLLVMType(Ty)) {
779         EmitParmDecl(*Arg, CreateMemTemp(Ty));
780       } else {
781         EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())));
782       }
783 
784       // Skip increment, no matching LLVM parameter.
785       continue;
786 
787     case ABIArgInfo::Coerce: {
788       assert(AI != Fn->arg_end() && "Argument mismatch!");
789       // FIXME: This is very wasteful; EmitParmDecl is just going to drop the
790       // result in a new alloca anyway, so we could just store into that
791       // directly if we broke the abstraction down more.
792       llvm::Value *V = CreateMemTemp(Ty, "coerce");
793       CreateCoercedStore(AI, V, /*DestIsVolatile=*/false, *this);
794       // Match to what EmitParmDecl is expecting for this type.
795       if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
796         V = EmitLoadOfScalar(V, false, Ty);
797         if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
798           // This must be a promotion, for something like
799           // "void a(x) short x; {..."
800           V = EmitScalarConversion(V, Ty, Arg->getType());
801         }
802       }
803       EmitParmDecl(*Arg, V);
804       break;
805     }
806     }
807 
808     ++AI;
809   }
810   assert(AI == Fn->arg_end() && "Argument mismatch!");
811 }
812 
813 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
814                                          llvm::Value *ReturnValue) {
815   llvm::Value *RV = 0;
816 
817   // Functions with no result always return void.
818   if (ReturnValue) {
819     QualType RetTy = FI.getReturnType();
820     const ABIArgInfo &RetAI = FI.getReturnInfo();
821 
822     switch (RetAI.getKind()) {
823     case ABIArgInfo::Indirect:
824       if (RetTy->isAnyComplexType()) {
825         ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
826         StoreComplexToAddr(RT, CurFn->arg_begin(), false);
827       } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
828         // Do nothing; aggregrates get evaluated directly into the destination.
829       } else {
830         EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
831                           false, RetTy);
832       }
833       break;
834 
835     case ABIArgInfo::Extend:
836     case ABIArgInfo::Direct:
837       // The internal return value temp always will have
838       // pointer-to-return-type type.
839       RV = Builder.CreateLoad(ReturnValue);
840       break;
841 
842     case ABIArgInfo::Ignore:
843       break;
844 
845     case ABIArgInfo::Coerce:
846       RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this);
847       break;
848 
849     case ABIArgInfo::Expand:
850       assert(0 && "Invalid ABI kind for return argument");
851     }
852   }
853 
854   if (RV) {
855     Builder.CreateRet(RV);
856   } else {
857     Builder.CreateRetVoid();
858   }
859 }
860 
861 RValue CodeGenFunction::EmitCallArg(const Expr *E, QualType ArgType) {
862   if (ArgType->isReferenceType())
863     return EmitReferenceBindingToExpr(E);
864 
865   return EmitAnyExprToTemp(E);
866 }
867 
868 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
869                                  llvm::Value *Callee,
870                                  ReturnValueSlot ReturnValue,
871                                  const CallArgList &CallArgs,
872                                  const Decl *TargetDecl,
873                                  llvm::Instruction **callOrInvoke) {
874   // FIXME: We no longer need the types from CallArgs; lift up and simplify.
875   llvm::SmallVector<llvm::Value*, 16> Args;
876 
877   // Handle struct-return functions by passing a pointer to the
878   // location that we would like to return into.
879   QualType RetTy = CallInfo.getReturnType();
880   const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
881 
882 
883   // If the call returns a temporary with struct return, create a temporary
884   // alloca to hold the result, unless one is given to us.
885   if (CGM.ReturnTypeUsesSret(CallInfo)) {
886     llvm::Value *Value = ReturnValue.getValue();
887     if (!Value)
888       Value = CreateMemTemp(RetTy);
889     Args.push_back(Value);
890   }
891 
892   assert(CallInfo.arg_size() == CallArgs.size() &&
893          "Mismatch between function signature & arguments.");
894   CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
895   for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
896        I != E; ++I, ++info_it) {
897     const ABIArgInfo &ArgInfo = info_it->info;
898     RValue RV = I->first;
899 
900     switch (ArgInfo.getKind()) {
901     case ABIArgInfo::Indirect:
902       if (RV.isScalar() || RV.isComplex()) {
903         // Make a temporary alloca to pass the argument.
904         Args.push_back(CreateMemTemp(I->second));
905         if (RV.isScalar())
906           EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false, I->second);
907         else
908           StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
909       } else {
910         Args.push_back(RV.getAggregateAddr());
911       }
912       break;
913 
914     case ABIArgInfo::Extend:
915     case ABIArgInfo::Direct:
916       if (RV.isScalar()) {
917         Args.push_back(RV.getScalarVal());
918       } else if (RV.isComplex()) {
919         llvm::Value *Tmp = llvm::UndefValue::get(ConvertType(I->second));
920         Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().first, 0);
921         Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().second, 1);
922         Args.push_back(Tmp);
923       } else {
924         Args.push_back(Builder.CreateLoad(RV.getAggregateAddr()));
925       }
926       break;
927 
928     case ABIArgInfo::Ignore:
929       break;
930 
931     case ABIArgInfo::Coerce: {
932       // FIXME: Avoid the conversion through memory if possible.
933       llvm::Value *SrcPtr;
934       if (RV.isScalar()) {
935         SrcPtr = CreateMemTemp(I->second, "coerce");
936         EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, I->second);
937       } else if (RV.isComplex()) {
938         SrcPtr = CreateMemTemp(I->second, "coerce");
939         StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
940       } else
941         SrcPtr = RV.getAggregateAddr();
942       Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
943                                        *this));
944       break;
945     }
946 
947     case ABIArgInfo::Expand:
948       ExpandTypeToArgs(I->second, RV, Args);
949       break;
950     }
951   }
952 
953   // If the callee is a bitcast of a function to a varargs pointer to function
954   // type, check to see if we can remove the bitcast.  This handles some cases
955   // with unprototyped functions.
956   if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
957     if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
958       const llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
959       const llvm::FunctionType *CurFT =
960         cast<llvm::FunctionType>(CurPT->getElementType());
961       const llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
962 
963       if (CE->getOpcode() == llvm::Instruction::BitCast &&
964           ActualFT->getReturnType() == CurFT->getReturnType() &&
965           ActualFT->getNumParams() == CurFT->getNumParams() &&
966           ActualFT->getNumParams() == Args.size()) {
967         bool ArgsMatch = true;
968         for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
969           if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
970             ArgsMatch = false;
971             break;
972           }
973 
974         // Strip the cast if we can get away with it.  This is a nice cleanup,
975         // but also allows us to inline the function at -O0 if it is marked
976         // always_inline.
977         if (ArgsMatch)
978           Callee = CalleeF;
979       }
980     }
981 
982 
983   llvm::BasicBlock *InvokeDest = getInvokeDest();
984   unsigned CallingConv;
985   CodeGen::AttributeListType AttributeList;
986   CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv);
987   llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(),
988                                                    AttributeList.end());
989 
990   llvm::CallSite CS;
991   if (!InvokeDest || (Attrs.getFnAttributes() & llvm::Attribute::NoUnwind)) {
992     CS = Builder.CreateCall(Callee, Args.data(), Args.data()+Args.size());
993   } else {
994     llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
995     CS = Builder.CreateInvoke(Callee, Cont, InvokeDest,
996                               Args.data(), Args.data()+Args.size());
997     EmitBlock(Cont);
998   }
999   if (callOrInvoke) {
1000     *callOrInvoke = CS.getInstruction();
1001   }
1002 
1003   CS.setAttributes(Attrs);
1004   CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
1005 
1006   // If the call doesn't return, finish the basic block and clear the
1007   // insertion point; this allows the rest of IRgen to discard
1008   // unreachable code.
1009   if (CS.doesNotReturn()) {
1010     Builder.CreateUnreachable();
1011     Builder.ClearInsertionPoint();
1012 
1013     // FIXME: For now, emit a dummy basic block because expr emitters in
1014     // generally are not ready to handle emitting expressions at unreachable
1015     // points.
1016     EnsureInsertPoint();
1017 
1018     // Return a reasonable RValue.
1019     return GetUndefRValue(RetTy);
1020   }
1021 
1022   llvm::Instruction *CI = CS.getInstruction();
1023   if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
1024     CI->setName("call");
1025 
1026   switch (RetAI.getKind()) {
1027   case ABIArgInfo::Indirect:
1028     if (RetTy->isAnyComplexType())
1029       return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
1030     if (CodeGenFunction::hasAggregateLLVMType(RetTy))
1031       return RValue::getAggregate(Args[0]);
1032     return RValue::get(EmitLoadOfScalar(Args[0], false, RetTy));
1033 
1034   case ABIArgInfo::Extend:
1035   case ABIArgInfo::Direct:
1036     if (RetTy->isAnyComplexType()) {
1037       llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
1038       llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
1039       return RValue::getComplex(std::make_pair(Real, Imag));
1040     }
1041     if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1042       llvm::Value *DestPtr = ReturnValue.getValue();
1043       bool DestIsVolatile = ReturnValue.isVolatile();
1044 
1045       if (!DestPtr) {
1046         DestPtr = CreateMemTemp(RetTy, "agg.tmp");
1047         DestIsVolatile = false;
1048       }
1049       Builder.CreateStore(CI, DestPtr, DestIsVolatile);
1050       return RValue::getAggregate(DestPtr);
1051     }
1052     return RValue::get(CI);
1053 
1054   case ABIArgInfo::Ignore:
1055     // If we are ignoring an argument that had a result, make sure to
1056     // construct the appropriate return value for our caller.
1057     return GetUndefRValue(RetTy);
1058 
1059   case ABIArgInfo::Coerce: {
1060     llvm::Value *DestPtr = ReturnValue.getValue();
1061     bool DestIsVolatile = ReturnValue.isVolatile();
1062 
1063     if (!DestPtr) {
1064       DestPtr = CreateMemTemp(RetTy, "coerce");
1065       DestIsVolatile = false;
1066     }
1067 
1068     CreateCoercedStore(CI, DestPtr, DestIsVolatile, *this);
1069     if (RetTy->isAnyComplexType())
1070       return RValue::getComplex(LoadComplexFromAddr(DestPtr, false));
1071     if (CodeGenFunction::hasAggregateLLVMType(RetTy))
1072       return RValue::getAggregate(DestPtr);
1073     return RValue::get(EmitLoadOfScalar(DestPtr, false, RetTy));
1074   }
1075 
1076   case ABIArgInfo::Expand:
1077     assert(0 && "Invalid ABI kind for return argument");
1078   }
1079 
1080   assert(0 && "Unhandled ABIArgInfo::Kind");
1081   return RValue::get(0);
1082 }
1083 
1084 /* VarArg handling */
1085 
1086 llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
1087   return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
1088 }
1089