1 //===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "CGCall.h"
16 #include "CodeGenFunction.h"
17 #include "CodeGenModule.h"
18 #include "clang/Basic/TargetInfo.h"
19 #include "clang/AST/Decl.h"
20 #include "clang/AST/DeclCXX.h"
21 #include "clang/AST/DeclObjC.h"
22 #include "clang/CodeGen/CodeGenOptions.h"
23 #include "llvm/Attributes.h"
24 #include "llvm/Support/CallSite.h"
25 #include "llvm/Target/TargetData.h"
26 
27 #include "ABIInfo.h"
28 
29 using namespace clang;
30 using namespace CodeGen;
31 
32 /***/
33 
34 // FIXME: Use iterator and sidestep silly type array creation.
35 
36 static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
37   switch (CC) {
38   default: return llvm::CallingConv::C;
39   case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
40   case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
41   }
42 }
43 
44 const
45 CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionNoProtoType *FTNP) {
46   return getFunctionInfo(FTNP->getResultType(),
47                          llvm::SmallVector<QualType, 16>(),
48                          FTNP->getCallConv(), FTNP->getNoReturnAttr());
49 }
50 
51 const
52 CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionProtoType *FTP) {
53   llvm::SmallVector<QualType, 16> ArgTys;
54   // FIXME: Kill copy.
55   for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
56     ArgTys.push_back(FTP->getArgType(i));
57   return getFunctionInfo(FTP->getResultType(), ArgTys,
58                          FTP->getCallConv(), FTP->getNoReturnAttr());
59 }
60 
61 static CallingConv getCallingConventionForDecl(const Decl *D) {
62   // Set the appropriate calling convention for the Function.
63   if (D->hasAttr<StdCallAttr>())
64     return CC_X86StdCall;
65 
66   if (D->hasAttr<FastCallAttr>())
67     return CC_X86FastCall;
68 
69   return CC_C;
70 }
71 
72 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXRecordDecl *RD,
73                                                  const FunctionProtoType *FTP) {
74   llvm::SmallVector<QualType, 16> ArgTys;
75 
76   // Add the 'this' pointer.
77   ArgTys.push_back(Context.getPointerType(Context.getTagDeclType(RD)));
78 
79   for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
80     ArgTys.push_back(FTP->getArgType(i));
81 
82   // FIXME: Set calling convention correctly, it needs to be associated with the
83   // type somehow.
84   return getFunctionInfo(FTP->getResultType(), ArgTys,
85                          FTP->getCallConv(), FTP->getNoReturnAttr());
86 }
87 
88 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) {
89   llvm::SmallVector<QualType, 16> ArgTys;
90   // Add the 'this' pointer unless this is a static method.
91   if (MD->isInstance())
92     ArgTys.push_back(MD->getThisType(Context));
93 
94   const FunctionProtoType *FTP = MD->getType()->getAs<FunctionProtoType>();
95   for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
96     ArgTys.push_back(FTP->getArgType(i));
97   return getFunctionInfo(FTP->getResultType(), ArgTys, FTP->getCallConv(),
98                          FTP->getNoReturnAttr());
99 }
100 
101 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXConstructorDecl *D,
102                                                     CXXCtorType Type) {
103   llvm::SmallVector<QualType, 16> ArgTys;
104 
105   // Add the 'this' pointer.
106   ArgTys.push_back(D->getThisType(Context));
107 
108   // Check if we need to add a VTT parameter (which has type void **).
109   if (Type == Ctor_Base && D->getParent()->getNumVBases() != 0)
110     ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy));
111 
112   const FunctionProtoType *FTP = D->getType()->getAs<FunctionProtoType>();
113   for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
114     ArgTys.push_back(FTP->getArgType(i));
115   return getFunctionInfo(FTP->getResultType(), ArgTys, FTP->getCallConv(),
116                          FTP->getNoReturnAttr());
117 }
118 
119 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXDestructorDecl *D,
120                                                     CXXDtorType Type) {
121   llvm::SmallVector<QualType, 16> ArgTys;
122 
123   // Add the 'this' pointer.
124   ArgTys.push_back(D->getThisType(Context));
125 
126   // Check if we need to add a VTT parameter (which has type void **).
127   if (Type == Dtor_Base && D->getParent()->getNumVBases() != 0)
128     ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy));
129 
130   const FunctionProtoType *FTP = D->getType()->getAs<FunctionProtoType>();
131   for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
132     ArgTys.push_back(FTP->getArgType(i));
133   return getFunctionInfo(FTP->getResultType(), ArgTys, FTP->getCallConv(),
134                          FTP->getNoReturnAttr());
135 }
136 
137 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) {
138   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
139     if (MD->isInstance())
140       return getFunctionInfo(MD);
141 
142   const FunctionType *FTy = FD->getType()->getAs<FunctionType>();
143   if (const FunctionNoProtoType *FNTP = dyn_cast<FunctionNoProtoType>(FTy))
144     return getFunctionInfo(FNTP->getResultType(),
145                            llvm::SmallVector<QualType, 16>(),
146                            FNTP->getCallConv(), FNTP->getNoReturnAttr());
147 
148   const FunctionProtoType *FPT = cast<FunctionProtoType>(FTy);
149   llvm::SmallVector<QualType, 16> ArgTys;
150   // FIXME: Kill copy.
151   for (unsigned i = 0, e = FPT->getNumArgs(); i != e; ++i)
152     ArgTys.push_back(FPT->getArgType(i));
153   return getFunctionInfo(FPT->getResultType(), ArgTys,
154                          FPT->getCallConv(), FPT->getNoReturnAttr());
155 }
156 
157 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) {
158   llvm::SmallVector<QualType, 16> ArgTys;
159   ArgTys.push_back(MD->getSelfDecl()->getType());
160   ArgTys.push_back(Context.getObjCSelType());
161   // FIXME: Kill copy?
162   for (ObjCMethodDecl::param_iterator i = MD->param_begin(),
163          e = MD->param_end(); i != e; ++i)
164     ArgTys.push_back((*i)->getType());
165   return getFunctionInfo(MD->getResultType(), ArgTys,
166                          getCallingConventionForDecl(MD),
167                          /*NoReturn*/ false);
168 }
169 
170 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(GlobalDecl GD) {
171   // FIXME: Do we need to handle ObjCMethodDecl?
172   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
173 
174   if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
175     return getFunctionInfo(CD, GD.getCtorType());
176 
177   if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
178     return getFunctionInfo(DD, GD.getDtorType());
179 
180   return getFunctionInfo(FD);
181 }
182 
183 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
184                                                     const CallArgList &Args,
185                                                     CallingConv CC,
186                                                     bool NoReturn) {
187   // FIXME: Kill copy.
188   llvm::SmallVector<QualType, 16> ArgTys;
189   for (CallArgList::const_iterator i = Args.begin(), e = Args.end();
190        i != e; ++i)
191     ArgTys.push_back(i->second);
192   return getFunctionInfo(ResTy, ArgTys, CC, NoReturn);
193 }
194 
195 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
196                                                     const FunctionArgList &Args,
197                                                     CallingConv CC,
198                                                     bool NoReturn) {
199   // FIXME: Kill copy.
200   llvm::SmallVector<QualType, 16> ArgTys;
201   for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
202        i != e; ++i)
203     ArgTys.push_back(i->second);
204   return getFunctionInfo(ResTy, ArgTys, CC, NoReturn);
205 }
206 
207 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
208                                   const llvm::SmallVector<QualType, 16> &ArgTys,
209                                                     CallingConv CallConv,
210                                                     bool NoReturn) {
211   unsigned CC = ClangCallConvToLLVMCallConv(CallConv);
212 
213   // Lookup or create unique function info.
214   llvm::FoldingSetNodeID ID;
215   CGFunctionInfo::Profile(ID, CC, NoReturn, ResTy,
216                           ArgTys.begin(), ArgTys.end());
217 
218   void *InsertPos = 0;
219   CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos);
220   if (FI)
221     return *FI;
222 
223   // Construct the function info.
224   FI = new CGFunctionInfo(CC, NoReturn, ResTy, ArgTys);
225   FunctionInfos.InsertNode(FI, InsertPos);
226 
227   // Compute ABI information.
228   getABIInfo().computeInfo(*FI, getContext(), TheModule.getContext());
229 
230   return *FI;
231 }
232 
233 CGFunctionInfo::CGFunctionInfo(unsigned _CallingConvention,
234                                bool _NoReturn,
235                                QualType ResTy,
236                                const llvm::SmallVector<QualType, 16> &ArgTys)
237   : CallingConvention(_CallingConvention),
238     EffectiveCallingConvention(_CallingConvention),
239     NoReturn(_NoReturn)
240 {
241   NumArgs = ArgTys.size();
242   Args = new ArgInfo[1 + NumArgs];
243   Args[0].type = ResTy;
244   for (unsigned i = 0; i < NumArgs; ++i)
245     Args[1 + i].type = ArgTys[i];
246 }
247 
248 /***/
249 
250 void CodeGenTypes::GetExpandedTypes(QualType Ty,
251                                     std::vector<const llvm::Type*> &ArgTys) {
252   const RecordType *RT = Ty->getAsStructureType();
253   assert(RT && "Can only expand structure types.");
254   const RecordDecl *RD = RT->getDecl();
255   assert(!RD->hasFlexibleArrayMember() &&
256          "Cannot expand structure with flexible array.");
257 
258   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
259          i != e; ++i) {
260     const FieldDecl *FD = *i;
261     assert(!FD->isBitField() &&
262            "Cannot expand structure with bit-field members.");
263 
264     QualType FT = FD->getType();
265     if (CodeGenFunction::hasAggregateLLVMType(FT)) {
266       GetExpandedTypes(FT, ArgTys);
267     } else {
268       ArgTys.push_back(ConvertType(FT));
269     }
270   }
271 }
272 
273 llvm::Function::arg_iterator
274 CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
275                                     llvm::Function::arg_iterator AI) {
276   const RecordType *RT = Ty->getAsStructureType();
277   assert(RT && "Can only expand structure types.");
278 
279   RecordDecl *RD = RT->getDecl();
280   assert(LV.isSimple() &&
281          "Unexpected non-simple lvalue during struct expansion.");
282   llvm::Value *Addr = LV.getAddress();
283   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
284          i != e; ++i) {
285     FieldDecl *FD = *i;
286     QualType FT = FD->getType();
287 
288     // FIXME: What are the right qualifiers here?
289     LValue LV = EmitLValueForField(Addr, FD, 0);
290     if (CodeGenFunction::hasAggregateLLVMType(FT)) {
291       AI = ExpandTypeFromArgs(FT, LV, AI);
292     } else {
293       EmitStoreThroughLValue(RValue::get(AI), LV, FT);
294       ++AI;
295     }
296   }
297 
298   return AI;
299 }
300 
301 void
302 CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
303                                   llvm::SmallVector<llvm::Value*, 16> &Args) {
304   const RecordType *RT = Ty->getAsStructureType();
305   assert(RT && "Can only expand structure types.");
306 
307   RecordDecl *RD = RT->getDecl();
308   assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
309   llvm::Value *Addr = RV.getAggregateAddr();
310   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
311          i != e; ++i) {
312     FieldDecl *FD = *i;
313     QualType FT = FD->getType();
314 
315     // FIXME: What are the right qualifiers here?
316     LValue LV = EmitLValueForField(Addr, FD, 0);
317     if (CodeGenFunction::hasAggregateLLVMType(FT)) {
318       ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args);
319     } else {
320       RValue RV = EmitLoadOfLValue(LV, FT);
321       assert(RV.isScalar() &&
322              "Unexpected non-scalar rvalue during struct expansion.");
323       Args.push_back(RV.getScalarVal());
324     }
325   }
326 }
327 
328 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
329 /// a pointer to an object of type \arg Ty.
330 ///
331 /// This safely handles the case when the src type is smaller than the
332 /// destination type; in this situation the values of bits which not
333 /// present in the src are undefined.
334 static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
335                                       const llvm::Type *Ty,
336                                       CodeGenFunction &CGF) {
337   const llvm::Type *SrcTy =
338     cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
339   uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
340   uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty);
341 
342   // If load is legal, just bitcast the src pointer.
343   if (SrcSize >= DstSize) {
344     // Generally SrcSize is never greater than DstSize, since this means we are
345     // losing bits. However, this can happen in cases where the structure has
346     // additional padding, for example due to a user specified alignment.
347     //
348     // FIXME: Assert that we aren't truncating non-padding bits when have access
349     // to that information.
350     llvm::Value *Casted =
351       CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
352     llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
353     // FIXME: Use better alignment / avoid requiring aligned load.
354     Load->setAlignment(1);
355     return Load;
356   } else {
357     // Otherwise do coercion through memory. This is stupid, but
358     // simple.
359     llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
360     llvm::Value *Casted =
361       CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
362     llvm::StoreInst *Store =
363       CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
364     // FIXME: Use better alignment / avoid requiring aligned store.
365     Store->setAlignment(1);
366     return CGF.Builder.CreateLoad(Tmp);
367   }
368 }
369 
370 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
371 /// where the source and destination may have different types.
372 ///
373 /// This safely handles the case when the src type is larger than the
374 /// destination type; the upper bits of the src will be lost.
375 static void CreateCoercedStore(llvm::Value *Src,
376                                llvm::Value *DstPtr,
377                                bool DstIsVolatile,
378                                CodeGenFunction &CGF) {
379   const llvm::Type *SrcTy = Src->getType();
380   const llvm::Type *DstTy =
381     cast<llvm::PointerType>(DstPtr->getType())->getElementType();
382 
383   uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
384   uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy);
385 
386   // If store is legal, just bitcast the src pointer.
387   if (SrcSize <= DstSize) {
388     llvm::Value *Casted =
389       CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
390     // FIXME: Use better alignment / avoid requiring aligned store.
391     CGF.Builder.CreateStore(Src, Casted, DstIsVolatile)->setAlignment(1);
392   } else {
393     // Otherwise do coercion through memory. This is stupid, but
394     // simple.
395 
396     // Generally SrcSize is never greater than DstSize, since this means we are
397     // losing bits. However, this can happen in cases where the structure has
398     // additional padding, for example due to a user specified alignment.
399     //
400     // FIXME: Assert that we aren't truncating non-padding bits when have access
401     // to that information.
402     llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
403     CGF.Builder.CreateStore(Src, Tmp);
404     llvm::Value *Casted =
405       CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy));
406     llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
407     // FIXME: Use better alignment / avoid requiring aligned load.
408     Load->setAlignment(1);
409     CGF.Builder.CreateStore(Load, DstPtr, DstIsVolatile);
410   }
411 }
412 
413 /***/
414 
415 bool CodeGenModule::ReturnTypeUsesSret(const CGFunctionInfo &FI) {
416   return FI.getReturnInfo().isIndirect();
417 }
418 
419 const llvm::FunctionType *
420 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) {
421   std::vector<const llvm::Type*> ArgTys;
422 
423   const llvm::Type *ResultType = 0;
424 
425   QualType RetTy = FI.getReturnType();
426   const ABIArgInfo &RetAI = FI.getReturnInfo();
427   switch (RetAI.getKind()) {
428   case ABIArgInfo::Expand:
429     assert(0 && "Invalid ABI kind for return argument");
430 
431   case ABIArgInfo::Extend:
432   case ABIArgInfo::Direct:
433     ResultType = ConvertType(RetTy);
434     break;
435 
436   case ABIArgInfo::Indirect: {
437     assert(!RetAI.getIndirectAlign() && "Align unused on indirect return.");
438     ResultType = llvm::Type::getVoidTy(getLLVMContext());
439     const llvm::Type *STy = ConvertType(RetTy);
440     ArgTys.push_back(llvm::PointerType::get(STy, RetTy.getAddressSpace()));
441     break;
442   }
443 
444   case ABIArgInfo::Ignore:
445     ResultType = llvm::Type::getVoidTy(getLLVMContext());
446     break;
447 
448   case ABIArgInfo::Coerce:
449     ResultType = RetAI.getCoerceToType();
450     break;
451   }
452 
453   for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
454          ie = FI.arg_end(); it != ie; ++it) {
455     const ABIArgInfo &AI = it->info;
456 
457     switch (AI.getKind()) {
458     case ABIArgInfo::Ignore:
459       break;
460 
461     case ABIArgInfo::Coerce:
462       ArgTys.push_back(AI.getCoerceToType());
463       break;
464 
465     case ABIArgInfo::Indirect: {
466       // indirect arguments are always on the stack, which is addr space #0.
467       const llvm::Type *LTy = ConvertTypeForMem(it->type);
468       ArgTys.push_back(llvm::PointerType::getUnqual(LTy));
469       break;
470     }
471 
472     case ABIArgInfo::Extend:
473     case ABIArgInfo::Direct:
474       ArgTys.push_back(ConvertType(it->type));
475       break;
476 
477     case ABIArgInfo::Expand:
478       GetExpandedTypes(it->type, ArgTys);
479       break;
480     }
481   }
482 
483   return llvm::FunctionType::get(ResultType, ArgTys, IsVariadic);
484 }
485 
486 static bool HasIncompleteReturnTypeOrArgumentTypes(const FunctionProtoType *T) {
487   if (const TagType *TT = T->getResultType()->getAs<TagType>()) {
488     if (!TT->getDecl()->isDefinition())
489       return true;
490   }
491 
492   for (unsigned i = 0, e = T->getNumArgs(); i != e; ++i) {
493     if (const TagType *TT = T->getArgType(i)->getAs<TagType>()) {
494       if (!TT->getDecl()->isDefinition())
495         return true;
496     }
497   }
498 
499   return false;
500 }
501 
502 const llvm::Type *
503 CodeGenTypes::GetFunctionTypeForVtable(const CXXMethodDecl *MD) {
504   const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
505 
506   if (!HasIncompleteReturnTypeOrArgumentTypes(FPT))
507     return GetFunctionType(getFunctionInfo(MD), FPT->isVariadic());
508 
509   return llvm::OpaqueType::get(getLLVMContext());
510 }
511 
512 void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
513                                            const Decl *TargetDecl,
514                                            AttributeListType &PAL,
515                                            unsigned &CallingConv) {
516   unsigned FuncAttrs = 0;
517   unsigned RetAttrs = 0;
518 
519   CallingConv = FI.getEffectiveCallingConvention();
520 
521   if (FI.isNoReturn())
522     FuncAttrs |= llvm::Attribute::NoReturn;
523 
524   // FIXME: handle sseregparm someday...
525   if (TargetDecl) {
526     if (TargetDecl->hasAttr<NoThrowAttr>())
527       FuncAttrs |= llvm::Attribute::NoUnwind;
528     if (TargetDecl->hasAttr<NoReturnAttr>())
529       FuncAttrs |= llvm::Attribute::NoReturn;
530     if (TargetDecl->hasAttr<ConstAttr>())
531       FuncAttrs |= llvm::Attribute::ReadNone;
532     else if (TargetDecl->hasAttr<PureAttr>())
533       FuncAttrs |= llvm::Attribute::ReadOnly;
534     if (TargetDecl->hasAttr<MallocAttr>())
535       RetAttrs |= llvm::Attribute::NoAlias;
536   }
537 
538   if (CodeGenOpts.OptimizeSize)
539     FuncAttrs |= llvm::Attribute::OptimizeForSize;
540   if (CodeGenOpts.DisableRedZone)
541     FuncAttrs |= llvm::Attribute::NoRedZone;
542   if (CodeGenOpts.NoImplicitFloat)
543     FuncAttrs |= llvm::Attribute::NoImplicitFloat;
544 
545   QualType RetTy = FI.getReturnType();
546   unsigned Index = 1;
547   const ABIArgInfo &RetAI = FI.getReturnInfo();
548   switch (RetAI.getKind()) {
549   case ABIArgInfo::Extend:
550    if (RetTy->isSignedIntegerType()) {
551      RetAttrs |= llvm::Attribute::SExt;
552    } else if (RetTy->isUnsignedIntegerType()) {
553      RetAttrs |= llvm::Attribute::ZExt;
554    }
555    // FALLTHROUGH
556   case ABIArgInfo::Direct:
557     break;
558 
559   case ABIArgInfo::Indirect:
560     PAL.push_back(llvm::AttributeWithIndex::get(Index,
561                                                 llvm::Attribute::StructRet |
562                                                 llvm::Attribute::NoAlias));
563     ++Index;
564     // sret disables readnone and readonly
565     FuncAttrs &= ~(llvm::Attribute::ReadOnly |
566                    llvm::Attribute::ReadNone);
567     break;
568 
569   case ABIArgInfo::Ignore:
570   case ABIArgInfo::Coerce:
571     break;
572 
573   case ABIArgInfo::Expand:
574     assert(0 && "Invalid ABI kind for return argument");
575   }
576 
577   if (RetAttrs)
578     PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
579 
580   // FIXME: we need to honour command line settings also...
581   // FIXME: RegParm should be reduced in case of nested functions and/or global
582   // register variable.
583   signed RegParm = 0;
584   if (TargetDecl)
585     if (const RegparmAttr *RegParmAttr
586           = TargetDecl->getAttr<RegparmAttr>())
587       RegParm = RegParmAttr->getNumParams();
588 
589   unsigned PointerWidth = getContext().Target.getPointerWidth(0);
590   for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
591          ie = FI.arg_end(); it != ie; ++it) {
592     QualType ParamType = it->type;
593     const ABIArgInfo &AI = it->info;
594     unsigned Attributes = 0;
595 
596     if (ParamType.isRestrictQualified())
597       Attributes |= llvm::Attribute::NoAlias;
598 
599     switch (AI.getKind()) {
600     case ABIArgInfo::Coerce:
601       break;
602 
603     case ABIArgInfo::Indirect:
604       if (AI.getIndirectByVal())
605         Attributes |= llvm::Attribute::ByVal;
606 
607       Attributes |=
608         llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign());
609       // byval disables readnone and readonly.
610       FuncAttrs &= ~(llvm::Attribute::ReadOnly |
611                      llvm::Attribute::ReadNone);
612       break;
613 
614     case ABIArgInfo::Extend:
615      if (ParamType->isSignedIntegerType()) {
616        Attributes |= llvm::Attribute::SExt;
617      } else if (ParamType->isUnsignedIntegerType()) {
618        Attributes |= llvm::Attribute::ZExt;
619      }
620      // FALLS THROUGH
621     case ABIArgInfo::Direct:
622       if (RegParm > 0 &&
623           (ParamType->isIntegerType() || ParamType->isPointerType())) {
624         RegParm -=
625           (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth;
626         if (RegParm >= 0)
627           Attributes |= llvm::Attribute::InReg;
628       }
629       // FIXME: handle sseregparm someday...
630       break;
631 
632     case ABIArgInfo::Ignore:
633       // Skip increment, no matching LLVM parameter.
634       continue;
635 
636     case ABIArgInfo::Expand: {
637       std::vector<const llvm::Type*> Tys;
638       // FIXME: This is rather inefficient. Do we ever actually need to do
639       // anything here? The result should be just reconstructed on the other
640       // side, so extension should be a non-issue.
641       getTypes().GetExpandedTypes(ParamType, Tys);
642       Index += Tys.size();
643       continue;
644     }
645     }
646 
647     if (Attributes)
648       PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes));
649     ++Index;
650   }
651   if (FuncAttrs)
652     PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs));
653 }
654 
655 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
656                                          llvm::Function *Fn,
657                                          const FunctionArgList &Args) {
658   // If this is an implicit-return-zero function, go ahead and
659   // initialize the return value.  TODO: it might be nice to have
660   // a more general mechanism for this that didn't require synthesized
661   // return statements.
662   if (const FunctionDecl* FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
663     if (FD->hasImplicitReturnZero()) {
664       QualType RetTy = FD->getResultType().getUnqualifiedType();
665       const llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
666       llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
667       Builder.CreateStore(Zero, ReturnValue);
668     }
669   }
670 
671   // FIXME: We no longer need the types from FunctionArgList; lift up and
672   // simplify.
673 
674   // Emit allocs for param decls.  Give the LLVM Argument nodes names.
675   llvm::Function::arg_iterator AI = Fn->arg_begin();
676 
677   // Name the struct return argument.
678   if (CGM.ReturnTypeUsesSret(FI)) {
679     AI->setName("agg.result");
680     ++AI;
681   }
682 
683   assert(FI.arg_size() == Args.size() &&
684          "Mismatch between function signature & arguments.");
685   CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
686   for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
687        i != e; ++i, ++info_it) {
688     const VarDecl *Arg = i->first;
689     QualType Ty = info_it->type;
690     const ABIArgInfo &ArgI = info_it->info;
691 
692     switch (ArgI.getKind()) {
693     case ABIArgInfo::Indirect: {
694       llvm::Value* V = AI;
695       if (hasAggregateLLVMType(Ty)) {
696         // Do nothing, aggregates and complex variables are accessed by
697         // reference.
698       } else {
699         // Load scalar value from indirect argument.
700         V = EmitLoadOfScalar(V, false, Ty);
701         if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
702           // This must be a promotion, for something like
703           // "void a(x) short x; {..."
704           V = EmitScalarConversion(V, Ty, Arg->getType());
705         }
706       }
707       EmitParmDecl(*Arg, V);
708       break;
709     }
710 
711     case ABIArgInfo::Extend:
712     case ABIArgInfo::Direct: {
713       assert(AI != Fn->arg_end() && "Argument mismatch!");
714       llvm::Value* V = AI;
715       if (hasAggregateLLVMType(Ty)) {
716         // Create a temporary alloca to hold the argument; the rest of
717         // codegen expects to access aggregates & complex values by
718         // reference.
719         V = CreateMemTemp(Ty);
720         Builder.CreateStore(AI, V);
721       } else {
722         if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
723           // This must be a promotion, for something like
724           // "void a(x) short x; {..."
725           V = EmitScalarConversion(V, Ty, Arg->getType());
726         }
727       }
728       EmitParmDecl(*Arg, V);
729       break;
730     }
731 
732     case ABIArgInfo::Expand: {
733       // If this structure was expanded into multiple arguments then
734       // we need to create a temporary and reconstruct it from the
735       // arguments.
736       llvm::Value *Temp = CreateMemTemp(Ty, Arg->getName() + ".addr");
737       // FIXME: What are the right qualifiers here?
738       llvm::Function::arg_iterator End =
739         ExpandTypeFromArgs(Ty, LValue::MakeAddr(Temp, Qualifiers()), AI);
740       EmitParmDecl(*Arg, Temp);
741 
742       // Name the arguments used in expansion and increment AI.
743       unsigned Index = 0;
744       for (; AI != End; ++AI, ++Index)
745         AI->setName(Arg->getName() + "." + llvm::Twine(Index));
746       continue;
747     }
748 
749     case ABIArgInfo::Ignore:
750       // Initialize the local variable appropriately.
751       if (hasAggregateLLVMType(Ty)) {
752         EmitParmDecl(*Arg, CreateMemTemp(Ty));
753       } else {
754         EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())));
755       }
756 
757       // Skip increment, no matching LLVM parameter.
758       continue;
759 
760     case ABIArgInfo::Coerce: {
761       assert(AI != Fn->arg_end() && "Argument mismatch!");
762       // FIXME: This is very wasteful; EmitParmDecl is just going to drop the
763       // result in a new alloca anyway, so we could just store into that
764       // directly if we broke the abstraction down more.
765       llvm::Value *V = CreateMemTemp(Ty, "coerce");
766       CreateCoercedStore(AI, V, /*DestIsVolatile=*/false, *this);
767       // Match to what EmitParmDecl is expecting for this type.
768       if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
769         V = EmitLoadOfScalar(V, false, Ty);
770         if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
771           // This must be a promotion, for something like
772           // "void a(x) short x; {..."
773           V = EmitScalarConversion(V, Ty, Arg->getType());
774         }
775       }
776       EmitParmDecl(*Arg, V);
777       break;
778     }
779     }
780 
781     ++AI;
782   }
783   assert(AI == Fn->arg_end() && "Argument mismatch!");
784 }
785 
786 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
787                                          llvm::Value *ReturnValue) {
788   llvm::Value *RV = 0;
789 
790   // Functions with no result always return void.
791   if (ReturnValue) {
792     QualType RetTy = FI.getReturnType();
793     const ABIArgInfo &RetAI = FI.getReturnInfo();
794 
795     switch (RetAI.getKind()) {
796     case ABIArgInfo::Indirect:
797       if (RetTy->isAnyComplexType()) {
798         ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
799         StoreComplexToAddr(RT, CurFn->arg_begin(), false);
800       } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
801         // Do nothing; aggregrates get evaluated directly into the destination.
802       } else {
803         EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
804                           false, RetTy);
805       }
806       break;
807 
808     case ABIArgInfo::Extend:
809     case ABIArgInfo::Direct:
810       // The internal return value temp always will have
811       // pointer-to-return-type type.
812       RV = Builder.CreateLoad(ReturnValue);
813       break;
814 
815     case ABIArgInfo::Ignore:
816       break;
817 
818     case ABIArgInfo::Coerce:
819       RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this);
820       break;
821 
822     case ABIArgInfo::Expand:
823       assert(0 && "Invalid ABI kind for return argument");
824     }
825   }
826 
827   if (RV) {
828     Builder.CreateRet(RV);
829   } else {
830     Builder.CreateRetVoid();
831   }
832 }
833 
834 RValue CodeGenFunction::EmitCallArg(const Expr *E, QualType ArgType) {
835   if (ArgType->isReferenceType())
836     return EmitReferenceBindingToExpr(E);
837 
838   return EmitAnyExprToTemp(E);
839 }
840 
841 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
842                                  llvm::Value *Callee,
843                                  ReturnValueSlot ReturnValue,
844                                  const CallArgList &CallArgs,
845                                  const Decl *TargetDecl) {
846   // FIXME: We no longer need the types from CallArgs; lift up and simplify.
847   llvm::SmallVector<llvm::Value*, 16> Args;
848 
849   // Handle struct-return functions by passing a pointer to the
850   // location that we would like to return into.
851   QualType RetTy = CallInfo.getReturnType();
852   const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
853 
854 
855   // If the call returns a temporary with struct return, create a temporary
856   // alloca to hold the result, unless one is given to us.
857   if (CGM.ReturnTypeUsesSret(CallInfo)) {
858     llvm::Value *Value = ReturnValue.getValue();
859     if (!Value)
860       Value = CreateMemTemp(RetTy);
861     Args.push_back(Value);
862   }
863 
864   assert(CallInfo.arg_size() == CallArgs.size() &&
865          "Mismatch between function signature & arguments.");
866   CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
867   for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
868        I != E; ++I, ++info_it) {
869     const ABIArgInfo &ArgInfo = info_it->info;
870     RValue RV = I->first;
871 
872     switch (ArgInfo.getKind()) {
873     case ABIArgInfo::Indirect:
874       if (RV.isScalar() || RV.isComplex()) {
875         // Make a temporary alloca to pass the argument.
876         Args.push_back(CreateMemTemp(I->second));
877         if (RV.isScalar())
878           EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false, I->second);
879         else
880           StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
881       } else {
882         Args.push_back(RV.getAggregateAddr());
883       }
884       break;
885 
886     case ABIArgInfo::Extend:
887     case ABIArgInfo::Direct:
888       if (RV.isScalar()) {
889         Args.push_back(RV.getScalarVal());
890       } else if (RV.isComplex()) {
891         llvm::Value *Tmp = llvm::UndefValue::get(ConvertType(I->second));
892         Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().first, 0);
893         Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().second, 1);
894         Args.push_back(Tmp);
895       } else {
896         Args.push_back(Builder.CreateLoad(RV.getAggregateAddr()));
897       }
898       break;
899 
900     case ABIArgInfo::Ignore:
901       break;
902 
903     case ABIArgInfo::Coerce: {
904       // FIXME: Avoid the conversion through memory if possible.
905       llvm::Value *SrcPtr;
906       if (RV.isScalar()) {
907         SrcPtr = CreateMemTemp(I->second, "coerce");
908         EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, I->second);
909       } else if (RV.isComplex()) {
910         SrcPtr = CreateMemTemp(I->second, "coerce");
911         StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
912       } else
913         SrcPtr = RV.getAggregateAddr();
914       Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
915                                        *this));
916       break;
917     }
918 
919     case ABIArgInfo::Expand:
920       ExpandTypeToArgs(I->second, RV, Args);
921       break;
922     }
923   }
924 
925   // If the callee is a bitcast of a function to a varargs pointer to function
926   // type, check to see if we can remove the bitcast.  This handles some cases
927   // with unprototyped functions.
928   if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
929     if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
930       const llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
931       const llvm::FunctionType *CurFT =
932         cast<llvm::FunctionType>(CurPT->getElementType());
933       const llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
934 
935       if (CE->getOpcode() == llvm::Instruction::BitCast &&
936           ActualFT->getReturnType() == CurFT->getReturnType() &&
937           ActualFT->getNumParams() == CurFT->getNumParams() &&
938           ActualFT->getNumParams() == Args.size()) {
939         bool ArgsMatch = true;
940         for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
941           if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
942             ArgsMatch = false;
943             break;
944           }
945 
946         // Strip the cast if we can get away with it.  This is a nice cleanup,
947         // but also allows us to inline the function at -O0 if it is marked
948         // always_inline.
949         if (ArgsMatch)
950           Callee = CalleeF;
951       }
952     }
953 
954 
955   llvm::BasicBlock *InvokeDest = getInvokeDest();
956   unsigned CallingConv;
957   CodeGen::AttributeListType AttributeList;
958   CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv);
959   llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(),
960                                                    AttributeList.end());
961 
962   llvm::CallSite CS;
963   if (!InvokeDest || (Attrs.getFnAttributes() & llvm::Attribute::NoUnwind)) {
964     CS = Builder.CreateCall(Callee, Args.data(), Args.data()+Args.size());
965   } else {
966     llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
967     CS = Builder.CreateInvoke(Callee, Cont, InvokeDest,
968                               Args.data(), Args.data()+Args.size());
969     EmitBlock(Cont);
970   }
971 
972   CS.setAttributes(Attrs);
973   CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
974 
975   // If the call doesn't return, finish the basic block and clear the
976   // insertion point; this allows the rest of IRgen to discard
977   // unreachable code.
978   if (CS.doesNotReturn()) {
979     Builder.CreateUnreachable();
980     Builder.ClearInsertionPoint();
981 
982     // FIXME: For now, emit a dummy basic block because expr emitters in
983     // generally are not ready to handle emitting expressions at unreachable
984     // points.
985     EnsureInsertPoint();
986 
987     // Return a reasonable RValue.
988     return GetUndefRValue(RetTy);
989   }
990 
991   llvm::Instruction *CI = CS.getInstruction();
992   if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
993     CI->setName("call");
994 
995   switch (RetAI.getKind()) {
996   case ABIArgInfo::Indirect:
997     if (RetTy->isAnyComplexType())
998       return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
999     if (CodeGenFunction::hasAggregateLLVMType(RetTy))
1000       return RValue::getAggregate(Args[0]);
1001     return RValue::get(EmitLoadOfScalar(Args[0], false, RetTy));
1002 
1003   case ABIArgInfo::Extend:
1004   case ABIArgInfo::Direct:
1005     if (RetTy->isAnyComplexType()) {
1006       llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
1007       llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
1008       return RValue::getComplex(std::make_pair(Real, Imag));
1009     }
1010     if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1011       llvm::Value *DestPtr = ReturnValue.getValue();
1012       bool DestIsVolatile = ReturnValue.isVolatile();
1013 
1014       if (!DestPtr) {
1015         DestPtr = CreateMemTemp(RetTy, "agg.tmp");
1016         DestIsVolatile = false;
1017       }
1018       Builder.CreateStore(CI, DestPtr, DestIsVolatile);
1019       return RValue::getAggregate(DestPtr);
1020     }
1021     return RValue::get(CI);
1022 
1023   case ABIArgInfo::Ignore:
1024     // If we are ignoring an argument that had a result, make sure to
1025     // construct the appropriate return value for our caller.
1026     return GetUndefRValue(RetTy);
1027 
1028   case ABIArgInfo::Coerce: {
1029     llvm::Value *DestPtr = ReturnValue.getValue();
1030     bool DestIsVolatile = ReturnValue.isVolatile();
1031 
1032     if (!DestPtr) {
1033       DestPtr = CreateMemTemp(RetTy, "coerce");
1034       DestIsVolatile = false;
1035     }
1036 
1037     CreateCoercedStore(CI, DestPtr, DestIsVolatile, *this);
1038     if (RetTy->isAnyComplexType())
1039       return RValue::getComplex(LoadComplexFromAddr(DestPtr, false));
1040     if (CodeGenFunction::hasAggregateLLVMType(RetTy))
1041       return RValue::getAggregate(DestPtr);
1042     return RValue::get(EmitLoadOfScalar(DestPtr, false, RetTy));
1043   }
1044 
1045   case ABIArgInfo::Expand:
1046     assert(0 && "Invalid ABI kind for return argument");
1047   }
1048 
1049   assert(0 && "Unhandled ABIArgInfo::Kind");
1050   return RValue::get(0);
1051 }
1052 
1053 /* VarArg handling */
1054 
1055 llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
1056   return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
1057 }
1058