1 //===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "CGCall.h"
16 #include "CodeGenFunction.h"
17 #include "CodeGenModule.h"
18 #include "clang/Basic/TargetInfo.h"
19 #include "clang/AST/Decl.h"
20 #include "clang/AST/DeclCXX.h"
21 #include "clang/AST/DeclObjC.h"
22 #include "clang/CodeGen/CodeGenOptions.h"
23 #include "llvm/Attributes.h"
24 #include "llvm/Support/CallSite.h"
25 #include "llvm/Target/TargetData.h"
26 
27 #include "ABIInfo.h"
28 
29 using namespace clang;
30 using namespace CodeGen;
31 
32 /***/
33 
34 // FIXME: Use iterator and sidestep silly type array creation.
35 
36 const
37 CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionNoProtoType *FTNP) {
38   // FIXME: Set calling convention correctly, it needs to be associated with the
39   // type somehow.
40   return getFunctionInfo(FTNP->getResultType(),
41                          llvm::SmallVector<QualType, 16>(), 0);
42 }
43 
44 const
45 CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionProtoType *FTP) {
46   llvm::SmallVector<QualType, 16> ArgTys;
47   // FIXME: Kill copy.
48   for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
49     ArgTys.push_back(FTP->getArgType(i));
50   // FIXME: Set calling convention correctly, it needs to be associated with the
51   // type somehow.
52   return getFunctionInfo(FTP->getResultType(), ArgTys, 0);
53 }
54 
55 static unsigned getCallingConventionForDecl(const Decl *D) {
56   // Set the appropriate calling convention for the Function.
57   if (D->hasAttr<StdCallAttr>())
58     return llvm::CallingConv::X86_StdCall;
59 
60   if (D->hasAttr<FastCallAttr>())
61     return llvm::CallingConv::X86_FastCall;
62 
63   return llvm::CallingConv::C;
64 }
65 
66 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXRecordDecl *RD,
67                                                  const FunctionProtoType *FTP) {
68   llvm::SmallVector<QualType, 16> ArgTys;
69 
70   // Add the 'this' pointer.
71   ArgTys.push_back(Context.getPointerType(Context.getTagDeclType(RD)));
72 
73   for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
74     ArgTys.push_back(FTP->getArgType(i));
75 
76   // FIXME: Set calling convention correctly, it needs to be associated with the
77   // type somehow.
78   return getFunctionInfo(FTP->getResultType(), ArgTys, 0);
79 }
80 
81 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) {
82   llvm::SmallVector<QualType, 16> ArgTys;
83   // Add the 'this' pointer unless this is a static method.
84   if (MD->isInstance())
85     ArgTys.push_back(MD->getThisType(Context));
86 
87   const FunctionProtoType *FTP = MD->getType()->getAs<FunctionProtoType>();
88   for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
89     ArgTys.push_back(FTP->getArgType(i));
90   return getFunctionInfo(FTP->getResultType(), ArgTys,
91                          getCallingConventionForDecl(MD));
92 }
93 
94 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXConstructorDecl *D,
95                                                     CXXCtorType Type) {
96   llvm::SmallVector<QualType, 16> ArgTys;
97 
98   // Add the 'this' pointer.
99   ArgTys.push_back(D->getThisType(Context));
100 
101   // Check if we need to add a VTT parameter (which has type void **).
102   if (Type == Ctor_Base && D->getParent()->getNumVBases() != 0)
103     ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy));
104 
105   const FunctionProtoType *FTP = D->getType()->getAs<FunctionProtoType>();
106   for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
107     ArgTys.push_back(FTP->getArgType(i));
108   return getFunctionInfo(FTP->getResultType(), ArgTys,
109                          getCallingConventionForDecl(D));
110 }
111 
112 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXDestructorDecl *D,
113                                                     CXXDtorType Type) {
114   llvm::SmallVector<QualType, 16> ArgTys;
115 
116   // Add the 'this' pointer.
117   ArgTys.push_back(D->getThisType(Context));
118 
119   // Check if we need to add a VTT parameter (which has type void **).
120   if (Type == Dtor_Base && D->getParent()->getNumVBases() != 0)
121     ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy));
122 
123   const FunctionProtoType *FTP = D->getType()->getAs<FunctionProtoType>();
124   for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
125     ArgTys.push_back(FTP->getArgType(i));
126   return getFunctionInfo(FTP->getResultType(), ArgTys,
127                          getCallingConventionForDecl(D));
128 }
129 
130 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) {
131   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
132     if (MD->isInstance())
133       return getFunctionInfo(MD);
134 
135   unsigned CallingConvention = getCallingConventionForDecl(FD);
136   const FunctionType *FTy = FD->getType()->getAs<FunctionType>();
137   if (const FunctionNoProtoType *FNTP = dyn_cast<FunctionNoProtoType>(FTy))
138     return getFunctionInfo(FNTP->getResultType(),
139                            llvm::SmallVector<QualType, 16>(),
140                            CallingConvention);
141 
142   const FunctionProtoType *FPT = cast<FunctionProtoType>(FTy);
143   llvm::SmallVector<QualType, 16> ArgTys;
144   // FIXME: Kill copy.
145   for (unsigned i = 0, e = FPT->getNumArgs(); i != e; ++i)
146     ArgTys.push_back(FPT->getArgType(i));
147   return getFunctionInfo(FPT->getResultType(), ArgTys, CallingConvention);
148 }
149 
150 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) {
151   llvm::SmallVector<QualType, 16> ArgTys;
152   ArgTys.push_back(MD->getSelfDecl()->getType());
153   ArgTys.push_back(Context.getObjCSelType());
154   // FIXME: Kill copy?
155   for (ObjCMethodDecl::param_iterator i = MD->param_begin(),
156          e = MD->param_end(); i != e; ++i)
157     ArgTys.push_back((*i)->getType());
158   return getFunctionInfo(MD->getResultType(), ArgTys,
159                          getCallingConventionForDecl(MD));
160 }
161 
162 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
163                                                     const CallArgList &Args,
164                                                     unsigned CallingConvention){
165   // FIXME: Kill copy.
166   llvm::SmallVector<QualType, 16> ArgTys;
167   for (CallArgList::const_iterator i = Args.begin(), e = Args.end();
168        i != e; ++i)
169     ArgTys.push_back(i->second);
170   return getFunctionInfo(ResTy, ArgTys, CallingConvention);
171 }
172 
173 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
174                                                     const FunctionArgList &Args,
175                                                     unsigned CallingConvention){
176   // FIXME: Kill copy.
177   llvm::SmallVector<QualType, 16> ArgTys;
178   for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
179        i != e; ++i)
180     ArgTys.push_back(i->second);
181   return getFunctionInfo(ResTy, ArgTys, CallingConvention);
182 }
183 
184 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
185                                   const llvm::SmallVector<QualType, 16> &ArgTys,
186                                                     unsigned CallingConvention){
187   // Lookup or create unique function info.
188   llvm::FoldingSetNodeID ID;
189   CGFunctionInfo::Profile(ID, CallingConvention, ResTy,
190                           ArgTys.begin(), ArgTys.end());
191 
192   void *InsertPos = 0;
193   CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos);
194   if (FI)
195     return *FI;
196 
197   // Construct the function info.
198   FI = new CGFunctionInfo(CallingConvention, ResTy, ArgTys);
199   FunctionInfos.InsertNode(FI, InsertPos);
200 
201   // Compute ABI information.
202   getABIInfo().computeInfo(*FI, getContext(), TheModule.getContext());
203 
204   return *FI;
205 }
206 
207 CGFunctionInfo::CGFunctionInfo(unsigned _CallingConvention,
208                                QualType ResTy,
209                                const llvm::SmallVector<QualType, 16> &ArgTys)
210   : CallingConvention(_CallingConvention),
211     EffectiveCallingConvention(_CallingConvention)
212 {
213   NumArgs = ArgTys.size();
214   Args = new ArgInfo[1 + NumArgs];
215   Args[0].type = ResTy;
216   for (unsigned i = 0; i < NumArgs; ++i)
217     Args[1 + i].type = ArgTys[i];
218 }
219 
220 /***/
221 
222 void CodeGenTypes::GetExpandedTypes(QualType Ty,
223                                     std::vector<const llvm::Type*> &ArgTys) {
224   const RecordType *RT = Ty->getAsStructureType();
225   assert(RT && "Can only expand structure types.");
226   const RecordDecl *RD = RT->getDecl();
227   assert(!RD->hasFlexibleArrayMember() &&
228          "Cannot expand structure with flexible array.");
229 
230   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
231          i != e; ++i) {
232     const FieldDecl *FD = *i;
233     assert(!FD->isBitField() &&
234            "Cannot expand structure with bit-field members.");
235 
236     QualType FT = FD->getType();
237     if (CodeGenFunction::hasAggregateLLVMType(FT)) {
238       GetExpandedTypes(FT, ArgTys);
239     } else {
240       ArgTys.push_back(ConvertType(FT));
241     }
242   }
243 }
244 
245 llvm::Function::arg_iterator
246 CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
247                                     llvm::Function::arg_iterator AI) {
248   const RecordType *RT = Ty->getAsStructureType();
249   assert(RT && "Can only expand structure types.");
250 
251   RecordDecl *RD = RT->getDecl();
252   assert(LV.isSimple() &&
253          "Unexpected non-simple lvalue during struct expansion.");
254   llvm::Value *Addr = LV.getAddress();
255   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
256          i != e; ++i) {
257     FieldDecl *FD = *i;
258     QualType FT = FD->getType();
259 
260     // FIXME: What are the right qualifiers here?
261     LValue LV = EmitLValueForField(Addr, FD, 0);
262     if (CodeGenFunction::hasAggregateLLVMType(FT)) {
263       AI = ExpandTypeFromArgs(FT, LV, AI);
264     } else {
265       EmitStoreThroughLValue(RValue::get(AI), LV, FT);
266       ++AI;
267     }
268   }
269 
270   return AI;
271 }
272 
273 void
274 CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
275                                   llvm::SmallVector<llvm::Value*, 16> &Args) {
276   const RecordType *RT = Ty->getAsStructureType();
277   assert(RT && "Can only expand structure types.");
278 
279   RecordDecl *RD = RT->getDecl();
280   assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
281   llvm::Value *Addr = RV.getAggregateAddr();
282   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
283          i != e; ++i) {
284     FieldDecl *FD = *i;
285     QualType FT = FD->getType();
286 
287     // FIXME: What are the right qualifiers here?
288     LValue LV = EmitLValueForField(Addr, FD, 0);
289     if (CodeGenFunction::hasAggregateLLVMType(FT)) {
290       ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args);
291     } else {
292       RValue RV = EmitLoadOfLValue(LV, FT);
293       assert(RV.isScalar() &&
294              "Unexpected non-scalar rvalue during struct expansion.");
295       Args.push_back(RV.getScalarVal());
296     }
297   }
298 }
299 
300 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
301 /// a pointer to an object of type \arg Ty.
302 ///
303 /// This safely handles the case when the src type is smaller than the
304 /// destination type; in this situation the values of bits which not
305 /// present in the src are undefined.
306 static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
307                                       const llvm::Type *Ty,
308                                       CodeGenFunction &CGF) {
309   const llvm::Type *SrcTy =
310     cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
311   uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
312   uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty);
313 
314   // If load is legal, just bitcast the src pointer.
315   if (SrcSize >= DstSize) {
316     // Generally SrcSize is never greater than DstSize, since this means we are
317     // losing bits. However, this can happen in cases where the structure has
318     // additional padding, for example due to a user specified alignment.
319     //
320     // FIXME: Assert that we aren't truncating non-padding bits when have access
321     // to that information.
322     llvm::Value *Casted =
323       CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
324     llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
325     // FIXME: Use better alignment / avoid requiring aligned load.
326     Load->setAlignment(1);
327     return Load;
328   } else {
329     // Otherwise do coercion through memory. This is stupid, but
330     // simple.
331     llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
332     llvm::Value *Casted =
333       CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
334     llvm::StoreInst *Store =
335       CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
336     // FIXME: Use better alignment / avoid requiring aligned store.
337     Store->setAlignment(1);
338     return CGF.Builder.CreateLoad(Tmp);
339   }
340 }
341 
342 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
343 /// where the source and destination may have different types.
344 ///
345 /// This safely handles the case when the src type is larger than the
346 /// destination type; the upper bits of the src will be lost.
347 static void CreateCoercedStore(llvm::Value *Src,
348                                llvm::Value *DstPtr,
349                                bool DstIsVolatile,
350                                CodeGenFunction &CGF) {
351   const llvm::Type *SrcTy = Src->getType();
352   const llvm::Type *DstTy =
353     cast<llvm::PointerType>(DstPtr->getType())->getElementType();
354 
355   uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
356   uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy);
357 
358   // If store is legal, just bitcast the src pointer.
359   if (SrcSize <= DstSize) {
360     llvm::Value *Casted =
361       CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
362     // FIXME: Use better alignment / avoid requiring aligned store.
363     CGF.Builder.CreateStore(Src, Casted, DstIsVolatile)->setAlignment(1);
364   } else {
365     // Otherwise do coercion through memory. This is stupid, but
366     // simple.
367 
368     // Generally SrcSize is never greater than DstSize, since this means we are
369     // losing bits. However, this can happen in cases where the structure has
370     // additional padding, for example due to a user specified alignment.
371     //
372     // FIXME: Assert that we aren't truncating non-padding bits when have access
373     // to that information.
374     llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
375     CGF.Builder.CreateStore(Src, Tmp);
376     llvm::Value *Casted =
377       CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy));
378     llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
379     // FIXME: Use better alignment / avoid requiring aligned load.
380     Load->setAlignment(1);
381     CGF.Builder.CreateStore(Load, DstPtr, DstIsVolatile);
382   }
383 }
384 
385 /***/
386 
387 bool CodeGenModule::ReturnTypeUsesSret(const CGFunctionInfo &FI) {
388   return FI.getReturnInfo().isIndirect();
389 }
390 
391 const llvm::FunctionType *
392 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) {
393   std::vector<const llvm::Type*> ArgTys;
394 
395   const llvm::Type *ResultType = 0;
396 
397   QualType RetTy = FI.getReturnType();
398   const ABIArgInfo &RetAI = FI.getReturnInfo();
399   switch (RetAI.getKind()) {
400   case ABIArgInfo::Expand:
401     assert(0 && "Invalid ABI kind for return argument");
402 
403   case ABIArgInfo::Extend:
404   case ABIArgInfo::Direct:
405     ResultType = ConvertType(RetTy);
406     break;
407 
408   case ABIArgInfo::Indirect: {
409     assert(!RetAI.getIndirectAlign() && "Align unused on indirect return.");
410     ResultType = llvm::Type::getVoidTy(getLLVMContext());
411     const llvm::Type *STy = ConvertType(RetTy);
412     ArgTys.push_back(llvm::PointerType::get(STy, RetTy.getAddressSpace()));
413     break;
414   }
415 
416   case ABIArgInfo::Ignore:
417     ResultType = llvm::Type::getVoidTy(getLLVMContext());
418     break;
419 
420   case ABIArgInfo::Coerce:
421     ResultType = RetAI.getCoerceToType();
422     break;
423   }
424 
425   for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
426          ie = FI.arg_end(); it != ie; ++it) {
427     const ABIArgInfo &AI = it->info;
428 
429     switch (AI.getKind()) {
430     case ABIArgInfo::Ignore:
431       break;
432 
433     case ABIArgInfo::Coerce:
434       ArgTys.push_back(AI.getCoerceToType());
435       break;
436 
437     case ABIArgInfo::Indirect: {
438       // indirect arguments are always on the stack, which is addr space #0.
439       const llvm::Type *LTy = ConvertTypeForMem(it->type);
440       ArgTys.push_back(llvm::PointerType::getUnqual(LTy));
441       break;
442     }
443 
444     case ABIArgInfo::Extend:
445     case ABIArgInfo::Direct:
446       ArgTys.push_back(ConvertType(it->type));
447       break;
448 
449     case ABIArgInfo::Expand:
450       GetExpandedTypes(it->type, ArgTys);
451       break;
452     }
453   }
454 
455   return llvm::FunctionType::get(ResultType, ArgTys, IsVariadic);
456 }
457 
458 static bool HasIncompleteReturnTypeOrArgumentTypes(const FunctionProtoType *T) {
459   if (const TagType *TT = T->getResultType()->getAs<TagType>()) {
460     if (!TT->getDecl()->isDefinition())
461       return true;
462   }
463 
464   for (unsigned i = 0, e = T->getNumArgs(); i != e; ++i) {
465     if (const TagType *TT = T->getArgType(i)->getAs<TagType>()) {
466       if (!TT->getDecl()->isDefinition())
467         return true;
468     }
469   }
470 
471   return false;
472 }
473 
474 const llvm::Type *
475 CodeGenTypes::GetFunctionTypeForVtable(const CXXMethodDecl *MD) {
476   const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
477 
478   if (!HasIncompleteReturnTypeOrArgumentTypes(FPT))
479     return GetFunctionType(getFunctionInfo(MD), FPT->isVariadic());
480 
481   return llvm::OpaqueType::get(getLLVMContext());
482 }
483 
484 void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
485                                            const Decl *TargetDecl,
486                                            AttributeListType &PAL,
487                                            unsigned &CallingConv) {
488   unsigned FuncAttrs = 0;
489   unsigned RetAttrs = 0;
490 
491   CallingConv = FI.getEffectiveCallingConvention();
492 
493   // FIXME: handle sseregparm someday...
494   if (TargetDecl) {
495     if (TargetDecl->hasAttr<NoThrowAttr>())
496       FuncAttrs |= llvm::Attribute::NoUnwind;
497     if (TargetDecl->hasAttr<NoReturnAttr>())
498       FuncAttrs |= llvm::Attribute::NoReturn;
499     if (TargetDecl->hasAttr<ConstAttr>())
500       FuncAttrs |= llvm::Attribute::ReadNone;
501     else if (TargetDecl->hasAttr<PureAttr>())
502       FuncAttrs |= llvm::Attribute::ReadOnly;
503     if (TargetDecl->hasAttr<MallocAttr>())
504       RetAttrs |= llvm::Attribute::NoAlias;
505   }
506 
507   if (CodeGenOpts.OptimizeSize)
508     FuncAttrs |= llvm::Attribute::OptimizeForSize;
509   if (CodeGenOpts.DisableRedZone)
510     FuncAttrs |= llvm::Attribute::NoRedZone;
511   if (CodeGenOpts.NoImplicitFloat)
512     FuncAttrs |= llvm::Attribute::NoImplicitFloat;
513 
514   QualType RetTy = FI.getReturnType();
515   unsigned Index = 1;
516   const ABIArgInfo &RetAI = FI.getReturnInfo();
517   switch (RetAI.getKind()) {
518   case ABIArgInfo::Extend:
519    if (RetTy->isSignedIntegerType()) {
520      RetAttrs |= llvm::Attribute::SExt;
521    } else if (RetTy->isUnsignedIntegerType()) {
522      RetAttrs |= llvm::Attribute::ZExt;
523    }
524    // FALLTHROUGH
525   case ABIArgInfo::Direct:
526     break;
527 
528   case ABIArgInfo::Indirect:
529     PAL.push_back(llvm::AttributeWithIndex::get(Index,
530                                                 llvm::Attribute::StructRet |
531                                                 llvm::Attribute::NoAlias));
532     ++Index;
533     // sret disables readnone and readonly
534     FuncAttrs &= ~(llvm::Attribute::ReadOnly |
535                    llvm::Attribute::ReadNone);
536     break;
537 
538   case ABIArgInfo::Ignore:
539   case ABIArgInfo::Coerce:
540     break;
541 
542   case ABIArgInfo::Expand:
543     assert(0 && "Invalid ABI kind for return argument");
544   }
545 
546   if (RetAttrs)
547     PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
548 
549   // FIXME: we need to honour command line settings also...
550   // FIXME: RegParm should be reduced in case of nested functions and/or global
551   // register variable.
552   signed RegParm = 0;
553   if (TargetDecl)
554     if (const RegparmAttr *RegParmAttr
555           = TargetDecl->getAttr<RegparmAttr>())
556       RegParm = RegParmAttr->getNumParams();
557 
558   unsigned PointerWidth = getContext().Target.getPointerWidth(0);
559   for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
560          ie = FI.arg_end(); it != ie; ++it) {
561     QualType ParamType = it->type;
562     const ABIArgInfo &AI = it->info;
563     unsigned Attributes = 0;
564 
565     if (ParamType.isRestrictQualified())
566       Attributes |= llvm::Attribute::NoAlias;
567 
568     switch (AI.getKind()) {
569     case ABIArgInfo::Coerce:
570       break;
571 
572     case ABIArgInfo::Indirect:
573       if (AI.getIndirectByVal())
574         Attributes |= llvm::Attribute::ByVal;
575 
576       Attributes |=
577         llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign());
578       // byval disables readnone and readonly.
579       FuncAttrs &= ~(llvm::Attribute::ReadOnly |
580                      llvm::Attribute::ReadNone);
581       break;
582 
583     case ABIArgInfo::Extend:
584      if (ParamType->isSignedIntegerType()) {
585        Attributes |= llvm::Attribute::SExt;
586      } else if (ParamType->isUnsignedIntegerType()) {
587        Attributes |= llvm::Attribute::ZExt;
588      }
589      // FALLS THROUGH
590     case ABIArgInfo::Direct:
591       if (RegParm > 0 &&
592           (ParamType->isIntegerType() || ParamType->isPointerType())) {
593         RegParm -=
594           (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth;
595         if (RegParm >= 0)
596           Attributes |= llvm::Attribute::InReg;
597       }
598       // FIXME: handle sseregparm someday...
599       break;
600 
601     case ABIArgInfo::Ignore:
602       // Skip increment, no matching LLVM parameter.
603       continue;
604 
605     case ABIArgInfo::Expand: {
606       std::vector<const llvm::Type*> Tys;
607       // FIXME: This is rather inefficient. Do we ever actually need to do
608       // anything here? The result should be just reconstructed on the other
609       // side, so extension should be a non-issue.
610       getTypes().GetExpandedTypes(ParamType, Tys);
611       Index += Tys.size();
612       continue;
613     }
614     }
615 
616     if (Attributes)
617       PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes));
618     ++Index;
619   }
620   if (FuncAttrs)
621     PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs));
622 }
623 
624 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
625                                          llvm::Function *Fn,
626                                          const FunctionArgList &Args) {
627   // If this is an implicit-return-zero function, go ahead and
628   // initialize the return value.  TODO: it might be nice to have
629   // a more general mechanism for this that didn't require synthesized
630   // return statements.
631   if (const FunctionDecl* FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
632     if (FD->hasImplicitReturnZero()) {
633       QualType RetTy = FD->getResultType().getUnqualifiedType();
634       const llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
635       llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
636       Builder.CreateStore(Zero, ReturnValue);
637     }
638   }
639 
640   // FIXME: We no longer need the types from FunctionArgList; lift up and
641   // simplify.
642 
643   // Emit allocs for param decls.  Give the LLVM Argument nodes names.
644   llvm::Function::arg_iterator AI = Fn->arg_begin();
645 
646   // Name the struct return argument.
647   if (CGM.ReturnTypeUsesSret(FI)) {
648     AI->setName("agg.result");
649     ++AI;
650   }
651 
652   assert(FI.arg_size() == Args.size() &&
653          "Mismatch between function signature & arguments.");
654   CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
655   for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
656        i != e; ++i, ++info_it) {
657     const VarDecl *Arg = i->first;
658     QualType Ty = info_it->type;
659     const ABIArgInfo &ArgI = info_it->info;
660 
661     switch (ArgI.getKind()) {
662     case ABIArgInfo::Indirect: {
663       llvm::Value* V = AI;
664       if (hasAggregateLLVMType(Ty)) {
665         // Do nothing, aggregates and complex variables are accessed by
666         // reference.
667       } else {
668         // Load scalar value from indirect argument.
669         V = EmitLoadOfScalar(V, false, Ty);
670         if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
671           // This must be a promotion, for something like
672           // "void a(x) short x; {..."
673           V = EmitScalarConversion(V, Ty, Arg->getType());
674         }
675       }
676       EmitParmDecl(*Arg, V);
677       break;
678     }
679 
680     case ABIArgInfo::Extend:
681     case ABIArgInfo::Direct: {
682       assert(AI != Fn->arg_end() && "Argument mismatch!");
683       llvm::Value* V = AI;
684       if (hasAggregateLLVMType(Ty)) {
685         // Create a temporary alloca to hold the argument; the rest of
686         // codegen expects to access aggregates & complex values by
687         // reference.
688         V = CreateTempAlloca(ConvertTypeForMem(Ty));
689         Builder.CreateStore(AI, V);
690       } else {
691         if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
692           // This must be a promotion, for something like
693           // "void a(x) short x; {..."
694           V = EmitScalarConversion(V, Ty, Arg->getType());
695         }
696       }
697       EmitParmDecl(*Arg, V);
698       break;
699     }
700 
701     case ABIArgInfo::Expand: {
702       // If this structure was expanded into multiple arguments then
703       // we need to create a temporary and reconstruct it from the
704       // arguments.
705       llvm::Value *Temp = CreateTempAlloca(ConvertTypeForMem(Ty),
706                                            Arg->getName() + ".addr");
707       // FIXME: What are the right qualifiers here?
708       llvm::Function::arg_iterator End =
709         ExpandTypeFromArgs(Ty, LValue::MakeAddr(Temp, Qualifiers()), AI);
710       EmitParmDecl(*Arg, Temp);
711 
712       // Name the arguments used in expansion and increment AI.
713       unsigned Index = 0;
714       for (; AI != End; ++AI, ++Index)
715         AI->setName(Arg->getName() + "." + llvm::Twine(Index));
716       continue;
717     }
718 
719     case ABIArgInfo::Ignore:
720       // Initialize the local variable appropriately.
721       if (hasAggregateLLVMType(Ty)) {
722         EmitParmDecl(*Arg, CreateTempAlloca(ConvertTypeForMem(Ty)));
723       } else {
724         EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())));
725       }
726 
727       // Skip increment, no matching LLVM parameter.
728       continue;
729 
730     case ABIArgInfo::Coerce: {
731       assert(AI != Fn->arg_end() && "Argument mismatch!");
732       // FIXME: This is very wasteful; EmitParmDecl is just going to drop the
733       // result in a new alloca anyway, so we could just store into that
734       // directly if we broke the abstraction down more.
735       llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(Ty), "coerce");
736       CreateCoercedStore(AI, V, /*DestIsVolatile=*/false, *this);
737       // Match to what EmitParmDecl is expecting for this type.
738       if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
739         V = EmitLoadOfScalar(V, false, Ty);
740         if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
741           // This must be a promotion, for something like
742           // "void a(x) short x; {..."
743           V = EmitScalarConversion(V, Ty, Arg->getType());
744         }
745       }
746       EmitParmDecl(*Arg, V);
747       break;
748     }
749     }
750 
751     ++AI;
752   }
753   assert(AI == Fn->arg_end() && "Argument mismatch!");
754 }
755 
756 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
757                                          llvm::Value *ReturnValue) {
758   llvm::Value *RV = 0;
759 
760   // Functions with no result always return void.
761   if (ReturnValue) {
762     QualType RetTy = FI.getReturnType();
763     const ABIArgInfo &RetAI = FI.getReturnInfo();
764 
765     switch (RetAI.getKind()) {
766     case ABIArgInfo::Indirect:
767       if (RetTy->isAnyComplexType()) {
768         ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
769         StoreComplexToAddr(RT, CurFn->arg_begin(), false);
770       } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
771         // Do nothing; aggregrates get evaluated directly into the destination.
772       } else {
773         EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
774                           false, RetTy);
775       }
776       break;
777 
778     case ABIArgInfo::Extend:
779     case ABIArgInfo::Direct:
780       // The internal return value temp always will have
781       // pointer-to-return-type type.
782       RV = Builder.CreateLoad(ReturnValue);
783       break;
784 
785     case ABIArgInfo::Ignore:
786       break;
787 
788     case ABIArgInfo::Coerce:
789       RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this);
790       break;
791 
792     case ABIArgInfo::Expand:
793       assert(0 && "Invalid ABI kind for return argument");
794     }
795   }
796 
797   if (RV) {
798     Builder.CreateRet(RV);
799   } else {
800     Builder.CreateRetVoid();
801   }
802 }
803 
804 RValue CodeGenFunction::EmitCallArg(const Expr *E, QualType ArgType) {
805   if (ArgType->isReferenceType())
806     return EmitReferenceBindingToExpr(E, ArgType);
807 
808   return EmitAnyExprToTemp(E);
809 }
810 
811 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
812                                  llvm::Value *Callee,
813                                  ReturnValueSlot ReturnValue,
814                                  const CallArgList &CallArgs,
815                                  const Decl *TargetDecl) {
816   // FIXME: We no longer need the types from CallArgs; lift up and simplify.
817   llvm::SmallVector<llvm::Value*, 16> Args;
818 
819   // Handle struct-return functions by passing a pointer to the
820   // location that we would like to return into.
821   QualType RetTy = CallInfo.getReturnType();
822   const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
823 
824 
825   // If the call returns a temporary with struct return, create a temporary
826   // alloca to hold the result, unless one is given to us.
827   if (CGM.ReturnTypeUsesSret(CallInfo)) {
828     llvm::Value *Value = ReturnValue.getValue();
829     if (!Value)
830       Value = CreateTempAlloca(ConvertTypeForMem(RetTy));
831     Args.push_back(Value);
832   }
833 
834   assert(CallInfo.arg_size() == CallArgs.size() &&
835          "Mismatch between function signature & arguments.");
836   CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
837   for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
838        I != E; ++I, ++info_it) {
839     const ABIArgInfo &ArgInfo = info_it->info;
840     RValue RV = I->first;
841 
842     switch (ArgInfo.getKind()) {
843     case ABIArgInfo::Indirect:
844       if (RV.isScalar() || RV.isComplex()) {
845         // Make a temporary alloca to pass the argument.
846         Args.push_back(CreateTempAlloca(ConvertTypeForMem(I->second)));
847         if (RV.isScalar())
848           EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false, I->second);
849         else
850           StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
851       } else {
852         Args.push_back(RV.getAggregateAddr());
853       }
854       break;
855 
856     case ABIArgInfo::Extend:
857     case ABIArgInfo::Direct:
858       if (RV.isScalar()) {
859         Args.push_back(RV.getScalarVal());
860       } else if (RV.isComplex()) {
861         llvm::Value *Tmp = llvm::UndefValue::get(ConvertType(I->second));
862         Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().first, 0);
863         Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().second, 1);
864         Args.push_back(Tmp);
865       } else {
866         Args.push_back(Builder.CreateLoad(RV.getAggregateAddr()));
867       }
868       break;
869 
870     case ABIArgInfo::Ignore:
871       break;
872 
873     case ABIArgInfo::Coerce: {
874       // FIXME: Avoid the conversion through memory if possible.
875       llvm::Value *SrcPtr;
876       if (RV.isScalar()) {
877         SrcPtr = CreateTempAlloca(ConvertTypeForMem(I->second), "coerce");
878         EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, I->second);
879       } else if (RV.isComplex()) {
880         SrcPtr = CreateTempAlloca(ConvertTypeForMem(I->second), "coerce");
881         StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
882       } else
883         SrcPtr = RV.getAggregateAddr();
884       Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
885                                        *this));
886       break;
887     }
888 
889     case ABIArgInfo::Expand:
890       ExpandTypeToArgs(I->second, RV, Args);
891       break;
892     }
893   }
894 
895   // If the callee is a bitcast of a function to a varargs pointer to function
896   // type, check to see if we can remove the bitcast.  This handles some cases
897   // with unprototyped functions.
898   if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
899     if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
900       const llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
901       const llvm::FunctionType *CurFT =
902         cast<llvm::FunctionType>(CurPT->getElementType());
903       const llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
904 
905       if (CE->getOpcode() == llvm::Instruction::BitCast &&
906           ActualFT->getReturnType() == CurFT->getReturnType() &&
907           ActualFT->getNumParams() == CurFT->getNumParams() &&
908           ActualFT->getNumParams() == Args.size()) {
909         bool ArgsMatch = true;
910         for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
911           if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
912             ArgsMatch = false;
913             break;
914           }
915 
916         // Strip the cast if we can get away with it.  This is a nice cleanup,
917         // but also allows us to inline the function at -O0 if it is marked
918         // always_inline.
919         if (ArgsMatch)
920           Callee = CalleeF;
921       }
922     }
923 
924 
925   llvm::BasicBlock *InvokeDest = getInvokeDest();
926   unsigned CallingConv;
927   CodeGen::AttributeListType AttributeList;
928   CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv);
929   llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(),
930                                                    AttributeList.end());
931 
932   llvm::CallSite CS;
933   if (!InvokeDest || (Attrs.getFnAttributes() & llvm::Attribute::NoUnwind)) {
934     CS = Builder.CreateCall(Callee, Args.data(), Args.data()+Args.size());
935   } else {
936     llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
937     CS = Builder.CreateInvoke(Callee, Cont, InvokeDest,
938                               Args.data(), Args.data()+Args.size());
939     EmitBlock(Cont);
940   }
941 
942   CS.setAttributes(Attrs);
943   CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
944 
945   // If the call doesn't return, finish the basic block and clear the
946   // insertion point; this allows the rest of IRgen to discard
947   // unreachable code.
948   if (CS.doesNotReturn()) {
949     Builder.CreateUnreachable();
950     Builder.ClearInsertionPoint();
951 
952     // FIXME: For now, emit a dummy basic block because expr emitters in
953     // generally are not ready to handle emitting expressions at unreachable
954     // points.
955     EnsureInsertPoint();
956 
957     // Return a reasonable RValue.
958     return GetUndefRValue(RetTy);
959   }
960 
961   llvm::Instruction *CI = CS.getInstruction();
962   if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
963     CI->setName("call");
964 
965   switch (RetAI.getKind()) {
966   case ABIArgInfo::Indirect:
967     if (RetTy->isAnyComplexType())
968       return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
969     if (CodeGenFunction::hasAggregateLLVMType(RetTy))
970       return RValue::getAggregate(Args[0]);
971     return RValue::get(EmitLoadOfScalar(Args[0], false, RetTy));
972 
973   case ABIArgInfo::Extend:
974   case ABIArgInfo::Direct:
975     if (RetTy->isAnyComplexType()) {
976       llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
977       llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
978       return RValue::getComplex(std::make_pair(Real, Imag));
979     }
980     if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
981       llvm::Value *DestPtr = ReturnValue.getValue();
982       bool DestIsVolatile = ReturnValue.isVolatile();
983 
984       if (!DestPtr) {
985         DestPtr = CreateTempAlloca(ConvertTypeForMem(RetTy), "agg.tmp");
986         DestIsVolatile = false;
987       }
988       Builder.CreateStore(CI, DestPtr, DestIsVolatile);
989       return RValue::getAggregate(DestPtr);
990     }
991     return RValue::get(CI);
992 
993   case ABIArgInfo::Ignore:
994     // If we are ignoring an argument that had a result, make sure to
995     // construct the appropriate return value for our caller.
996     return GetUndefRValue(RetTy);
997 
998   case ABIArgInfo::Coerce: {
999     llvm::Value *DestPtr = ReturnValue.getValue();
1000     bool DestIsVolatile = ReturnValue.isVolatile();
1001 
1002     if (!DestPtr) {
1003       DestPtr = CreateTempAlloca(ConvertTypeForMem(RetTy), "coerce");
1004       DestIsVolatile = false;
1005     }
1006 
1007     CreateCoercedStore(CI, DestPtr, DestIsVolatile, *this);
1008     if (RetTy->isAnyComplexType())
1009       return RValue::getComplex(LoadComplexFromAddr(DestPtr, false));
1010     if (CodeGenFunction::hasAggregateLLVMType(RetTy))
1011       return RValue::getAggregate(DestPtr);
1012     return RValue::get(EmitLoadOfScalar(DestPtr, false, RetTy));
1013   }
1014 
1015   case ABIArgInfo::Expand:
1016     assert(0 && "Invalid ABI kind for return argument");
1017   }
1018 
1019   assert(0 && "Unhandled ABIArgInfo::Kind");
1020   return RValue::get(0);
1021 }
1022 
1023 /* VarArg handling */
1024 
1025 llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
1026   return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
1027 }
1028