1 //===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "CGCall.h"
16 #include "CodeGenFunction.h"
17 #include "CodeGenModule.h"
18 #include "clang/Basic/TargetInfo.h"
19 #include "clang/AST/Decl.h"
20 #include "clang/AST/DeclCXX.h"
21 #include "clang/AST/DeclObjC.h"
22 #include "clang/CodeGen/CodeGenOptions.h"
23 #include "llvm/Attributes.h"
24 #include "llvm/Support/CallSite.h"
25 #include "llvm/Target/TargetData.h"
26 
27 #include "ABIInfo.h"
28 
29 using namespace clang;
30 using namespace CodeGen;
31 
32 /***/
33 
34 // FIXME: Use iterator and sidestep silly type array creation.
35 
36 const
37 CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionNoProtoType *FTNP) {
38   // FIXME: Set calling convention correctly, it needs to be associated with the
39   // type somehow.
40   return getFunctionInfo(FTNP->getResultType(),
41                          llvm::SmallVector<QualType, 16>(), 0);
42 }
43 
44 const
45 CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionProtoType *FTP) {
46   llvm::SmallVector<QualType, 16> ArgTys;
47   // FIXME: Kill copy.
48   for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
49     ArgTys.push_back(FTP->getArgType(i));
50   // FIXME: Set calling convention correctly, it needs to be associated with the
51   // type somehow.
52   return getFunctionInfo(FTP->getResultType(), ArgTys, 0);
53 }
54 
55 static unsigned getCallingConventionForDecl(const Decl *D) {
56   // Set the appropriate calling convention for the Function.
57   if (D->hasAttr<StdCallAttr>())
58     return llvm::CallingConv::X86_StdCall;
59 
60   if (D->hasAttr<FastCallAttr>())
61     return llvm::CallingConv::X86_FastCall;
62 
63   return llvm::CallingConv::C;
64 }
65 
66 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXRecordDecl *RD,
67                                                  const FunctionProtoType *FTP) {
68   llvm::SmallVector<QualType, 16> ArgTys;
69 
70   // Add the 'this' pointer.
71   ArgTys.push_back(Context.getPointerType(Context.getTagDeclType(RD)));
72 
73   for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
74     ArgTys.push_back(FTP->getArgType(i));
75 
76   // FIXME: Set calling convention correctly, it needs to be associated with the
77   // type somehow.
78   return getFunctionInfo(FTP->getResultType(), ArgTys, 0);
79 }
80 
81 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) {
82   llvm::SmallVector<QualType, 16> ArgTys;
83   // Add the 'this' pointer unless this is a static method.
84   if (MD->isInstance())
85     ArgTys.push_back(MD->getThisType(Context));
86 
87   const FunctionProtoType *FTP = MD->getType()->getAs<FunctionProtoType>();
88   for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
89     ArgTys.push_back(FTP->getArgType(i));
90   return getFunctionInfo(FTP->getResultType(), ArgTys,
91                          getCallingConventionForDecl(MD));
92 }
93 
94 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXConstructorDecl *D,
95                                                     CXXCtorType Type) {
96   llvm::SmallVector<QualType, 16> ArgTys;
97 
98   // Add the 'this' pointer.
99   ArgTys.push_back(D->getThisType(Context));
100 
101   // Check if we need to add a VTT parameter (which has type void **).
102   if (Type == Ctor_Base && D->getParent()->getNumVBases() != 0)
103     ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy));
104 
105   const FunctionProtoType *FTP = D->getType()->getAs<FunctionProtoType>();
106   for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
107     ArgTys.push_back(FTP->getArgType(i));
108   return getFunctionInfo(FTP->getResultType(), ArgTys,
109                          getCallingConventionForDecl(D));
110 }
111 
112 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXDestructorDecl *D,
113                                                     CXXDtorType Type) {
114   llvm::SmallVector<QualType, 16> ArgTys;
115 
116   // Add the 'this' pointer.
117   ArgTys.push_back(D->getThisType(Context));
118 
119   // Check if we need to add a VTT parameter (which has type void **).
120   if (Type == Dtor_Base && D->getParent()->getNumVBases() != 0)
121     ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy));
122 
123   const FunctionProtoType *FTP = D->getType()->getAs<FunctionProtoType>();
124   for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
125     ArgTys.push_back(FTP->getArgType(i));
126   return getFunctionInfo(FTP->getResultType(), ArgTys,
127                          getCallingConventionForDecl(D));
128 }
129 
130 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) {
131   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
132     if (MD->isInstance())
133       return getFunctionInfo(MD);
134 
135   unsigned CallingConvention = getCallingConventionForDecl(FD);
136   const FunctionType *FTy = FD->getType()->getAs<FunctionType>();
137   if (const FunctionNoProtoType *FNTP = dyn_cast<FunctionNoProtoType>(FTy))
138     return getFunctionInfo(FNTP->getResultType(),
139                            llvm::SmallVector<QualType, 16>(),
140                            CallingConvention);
141 
142   const FunctionProtoType *FPT = cast<FunctionProtoType>(FTy);
143   llvm::SmallVector<QualType, 16> ArgTys;
144   // FIXME: Kill copy.
145   for (unsigned i = 0, e = FPT->getNumArgs(); i != e; ++i)
146     ArgTys.push_back(FPT->getArgType(i));
147   return getFunctionInfo(FPT->getResultType(), ArgTys, CallingConvention);
148 }
149 
150 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) {
151   llvm::SmallVector<QualType, 16> ArgTys;
152   ArgTys.push_back(MD->getSelfDecl()->getType());
153   ArgTys.push_back(Context.getObjCSelType());
154   // FIXME: Kill copy?
155   for (ObjCMethodDecl::param_iterator i = MD->param_begin(),
156          e = MD->param_end(); i != e; ++i)
157     ArgTys.push_back((*i)->getType());
158   return getFunctionInfo(MD->getResultType(), ArgTys,
159                          getCallingConventionForDecl(MD));
160 }
161 
162 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
163                                                     const CallArgList &Args,
164                                                     unsigned CallingConvention){
165   // FIXME: Kill copy.
166   llvm::SmallVector<QualType, 16> ArgTys;
167   for (CallArgList::const_iterator i = Args.begin(), e = Args.end();
168        i != e; ++i)
169     ArgTys.push_back(i->second);
170   return getFunctionInfo(ResTy, ArgTys, CallingConvention);
171 }
172 
173 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
174                                                     const FunctionArgList &Args,
175                                                     unsigned CallingConvention){
176   // FIXME: Kill copy.
177   llvm::SmallVector<QualType, 16> ArgTys;
178   for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
179        i != e; ++i)
180     ArgTys.push_back(i->second);
181   return getFunctionInfo(ResTy, ArgTys, CallingConvention);
182 }
183 
184 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
185                                   const llvm::SmallVector<QualType, 16> &ArgTys,
186                                                     unsigned CallingConvention){
187   // Lookup or create unique function info.
188   llvm::FoldingSetNodeID ID;
189   CGFunctionInfo::Profile(ID, CallingConvention, ResTy,
190                           ArgTys.begin(), ArgTys.end());
191 
192   void *InsertPos = 0;
193   CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos);
194   if (FI)
195     return *FI;
196 
197   // Construct the function info.
198   FI = new CGFunctionInfo(CallingConvention, ResTy, ArgTys);
199   FunctionInfos.InsertNode(FI, InsertPos);
200 
201   // Compute ABI information.
202   getABIInfo().computeInfo(*FI, getContext(), TheModule.getContext());
203 
204   return *FI;
205 }
206 
207 CGFunctionInfo::CGFunctionInfo(unsigned _CallingConvention,
208                                QualType ResTy,
209                                const llvm::SmallVector<QualType, 16> &ArgTys)
210   : CallingConvention(_CallingConvention),
211     EffectiveCallingConvention(_CallingConvention)
212 {
213   NumArgs = ArgTys.size();
214   Args = new ArgInfo[1 + NumArgs];
215   Args[0].type = ResTy;
216   for (unsigned i = 0; i < NumArgs; ++i)
217     Args[1 + i].type = ArgTys[i];
218 }
219 
220 /***/
221 
222 void CodeGenTypes::GetExpandedTypes(QualType Ty,
223                                     std::vector<const llvm::Type*> &ArgTys) {
224   const RecordType *RT = Ty->getAsStructureType();
225   assert(RT && "Can only expand structure types.");
226   const RecordDecl *RD = RT->getDecl();
227   assert(!RD->hasFlexibleArrayMember() &&
228          "Cannot expand structure with flexible array.");
229 
230   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
231          i != e; ++i) {
232     const FieldDecl *FD = *i;
233     assert(!FD->isBitField() &&
234            "Cannot expand structure with bit-field members.");
235 
236     QualType FT = FD->getType();
237     if (CodeGenFunction::hasAggregateLLVMType(FT)) {
238       GetExpandedTypes(FT, ArgTys);
239     } else {
240       ArgTys.push_back(ConvertType(FT));
241     }
242   }
243 }
244 
245 llvm::Function::arg_iterator
246 CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
247                                     llvm::Function::arg_iterator AI) {
248   const RecordType *RT = Ty->getAsStructureType();
249   assert(RT && "Can only expand structure types.");
250 
251   RecordDecl *RD = RT->getDecl();
252   assert(LV.isSimple() &&
253          "Unexpected non-simple lvalue during struct expansion.");
254   llvm::Value *Addr = LV.getAddress();
255   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
256          i != e; ++i) {
257     FieldDecl *FD = *i;
258     QualType FT = FD->getType();
259 
260     // FIXME: What are the right qualifiers here?
261     LValue LV = EmitLValueForField(Addr, FD, false, 0);
262     if (CodeGenFunction::hasAggregateLLVMType(FT)) {
263       AI = ExpandTypeFromArgs(FT, LV, AI);
264     } else {
265       EmitStoreThroughLValue(RValue::get(AI), LV, FT);
266       ++AI;
267     }
268   }
269 
270   return AI;
271 }
272 
273 void
274 CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
275                                   llvm::SmallVector<llvm::Value*, 16> &Args) {
276   const RecordType *RT = Ty->getAsStructureType();
277   assert(RT && "Can only expand structure types.");
278 
279   RecordDecl *RD = RT->getDecl();
280   assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
281   llvm::Value *Addr = RV.getAggregateAddr();
282   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
283          i != e; ++i) {
284     FieldDecl *FD = *i;
285     QualType FT = FD->getType();
286 
287     // FIXME: What are the right qualifiers here?
288     LValue LV = EmitLValueForField(Addr, FD, false, 0);
289     if (CodeGenFunction::hasAggregateLLVMType(FT)) {
290       ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args);
291     } else {
292       RValue RV = EmitLoadOfLValue(LV, FT);
293       assert(RV.isScalar() &&
294              "Unexpected non-scalar rvalue during struct expansion.");
295       Args.push_back(RV.getScalarVal());
296     }
297   }
298 }
299 
300 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
301 /// a pointer to an object of type \arg Ty.
302 ///
303 /// This safely handles the case when the src type is smaller than the
304 /// destination type; in this situation the values of bits which not
305 /// present in the src are undefined.
306 static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
307                                       const llvm::Type *Ty,
308                                       CodeGenFunction &CGF) {
309   const llvm::Type *SrcTy =
310     cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
311   uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
312   uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty);
313 
314   // If load is legal, just bitcast the src pointer.
315   if (SrcSize >= DstSize) {
316     // Generally SrcSize is never greater than DstSize, since this means we are
317     // losing bits. However, this can happen in cases where the structure has
318     // additional padding, for example due to a user specified alignment.
319     //
320     // FIXME: Assert that we aren't truncating non-padding bits when have access
321     // to that information.
322     llvm::Value *Casted =
323       CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
324     llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
325     // FIXME: Use better alignment / avoid requiring aligned load.
326     Load->setAlignment(1);
327     return Load;
328   } else {
329     // Otherwise do coercion through memory. This is stupid, but
330     // simple.
331     llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
332     llvm::Value *Casted =
333       CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
334     llvm::StoreInst *Store =
335       CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
336     // FIXME: Use better alignment / avoid requiring aligned store.
337     Store->setAlignment(1);
338     return CGF.Builder.CreateLoad(Tmp);
339   }
340 }
341 
342 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
343 /// where the source and destination may have different types.
344 ///
345 /// This safely handles the case when the src type is larger than the
346 /// destination type; the upper bits of the src will be lost.
347 static void CreateCoercedStore(llvm::Value *Src,
348                                llvm::Value *DstPtr,
349                                CodeGenFunction &CGF) {
350   const llvm::Type *SrcTy = Src->getType();
351   const llvm::Type *DstTy =
352     cast<llvm::PointerType>(DstPtr->getType())->getElementType();
353 
354   uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
355   uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy);
356 
357   // If store is legal, just bitcast the src pointer.
358   if (SrcSize <= DstSize) {
359     llvm::Value *Casted =
360       CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
361     // FIXME: Use better alignment / avoid requiring aligned store.
362     CGF.Builder.CreateStore(Src, Casted)->setAlignment(1);
363   } else {
364     // Otherwise do coercion through memory. This is stupid, but
365     // simple.
366 
367     // Generally SrcSize is never greater than DstSize, since this means we are
368     // losing bits. However, this can happen in cases where the structure has
369     // additional padding, for example due to a user specified alignment.
370     //
371     // FIXME: Assert that we aren't truncating non-padding bits when have access
372     // to that information.
373     llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
374     CGF.Builder.CreateStore(Src, Tmp);
375     llvm::Value *Casted =
376       CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy));
377     llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
378     // FIXME: Use better alignment / avoid requiring aligned load.
379     Load->setAlignment(1);
380     CGF.Builder.CreateStore(Load, DstPtr);
381   }
382 }
383 
384 /***/
385 
386 bool CodeGenModule::ReturnTypeUsesSret(const CGFunctionInfo &FI) {
387   return FI.getReturnInfo().isIndirect();
388 }
389 
390 const llvm::FunctionType *
391 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) {
392   std::vector<const llvm::Type*> ArgTys;
393 
394   const llvm::Type *ResultType = 0;
395 
396   QualType RetTy = FI.getReturnType();
397   const ABIArgInfo &RetAI = FI.getReturnInfo();
398   switch (RetAI.getKind()) {
399   case ABIArgInfo::Expand:
400     assert(0 && "Invalid ABI kind for return argument");
401 
402   case ABIArgInfo::Extend:
403   case ABIArgInfo::Direct:
404     ResultType = ConvertType(RetTy);
405     break;
406 
407   case ABIArgInfo::Indirect: {
408     assert(!RetAI.getIndirectAlign() && "Align unused on indirect return.");
409     ResultType = llvm::Type::getVoidTy(getLLVMContext());
410     const llvm::Type *STy = ConvertType(RetTy);
411     ArgTys.push_back(llvm::PointerType::get(STy, RetTy.getAddressSpace()));
412     break;
413   }
414 
415   case ABIArgInfo::Ignore:
416     ResultType = llvm::Type::getVoidTy(getLLVMContext());
417     break;
418 
419   case ABIArgInfo::Coerce:
420     ResultType = RetAI.getCoerceToType();
421     break;
422   }
423 
424   for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
425          ie = FI.arg_end(); it != ie; ++it) {
426     const ABIArgInfo &AI = it->info;
427 
428     switch (AI.getKind()) {
429     case ABIArgInfo::Ignore:
430       break;
431 
432     case ABIArgInfo::Coerce:
433       ArgTys.push_back(AI.getCoerceToType());
434       break;
435 
436     case ABIArgInfo::Indirect: {
437       // indirect arguments are always on the stack, which is addr space #0.
438       const llvm::Type *LTy = ConvertTypeForMem(it->type);
439       ArgTys.push_back(llvm::PointerType::getUnqual(LTy));
440       break;
441     }
442 
443     case ABIArgInfo::Extend:
444     case ABIArgInfo::Direct:
445       ArgTys.push_back(ConvertType(it->type));
446       break;
447 
448     case ABIArgInfo::Expand:
449       GetExpandedTypes(it->type, ArgTys);
450       break;
451     }
452   }
453 
454   return llvm::FunctionType::get(ResultType, ArgTys, IsVariadic);
455 }
456 
457 static bool HasIncompleteReturnTypeOrArgumentTypes(const FunctionProtoType *T) {
458   if (const TagType *TT = T->getResultType()->getAs<TagType>()) {
459     if (!TT->getDecl()->isDefinition())
460       return true;
461   }
462 
463   for (unsigned i = 0, e = T->getNumArgs(); i != e; ++i) {
464     if (const TagType *TT = T->getArgType(i)->getAs<TagType>()) {
465       if (!TT->getDecl()->isDefinition())
466         return true;
467     }
468   }
469 
470   return false;
471 }
472 
473 const llvm::Type *
474 CodeGenTypes::GetFunctionTypeForVtable(const CXXMethodDecl *MD) {
475   const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
476 
477   if (!HasIncompleteReturnTypeOrArgumentTypes(FPT))
478     return GetFunctionType(getFunctionInfo(MD), FPT->isVariadic());
479 
480   return llvm::OpaqueType::get(getLLVMContext());
481 }
482 
483 void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
484                                            const Decl *TargetDecl,
485                                            AttributeListType &PAL,
486                                            unsigned &CallingConv) {
487   unsigned FuncAttrs = 0;
488   unsigned RetAttrs = 0;
489 
490   CallingConv = FI.getEffectiveCallingConvention();
491 
492   // FIXME: handle sseregparm someday...
493   if (TargetDecl) {
494     if (TargetDecl->hasAttr<NoThrowAttr>())
495       FuncAttrs |= llvm::Attribute::NoUnwind;
496     if (TargetDecl->hasAttr<NoReturnAttr>())
497       FuncAttrs |= llvm::Attribute::NoReturn;
498     if (TargetDecl->hasAttr<ConstAttr>())
499       FuncAttrs |= llvm::Attribute::ReadNone;
500     else if (TargetDecl->hasAttr<PureAttr>())
501       FuncAttrs |= llvm::Attribute::ReadOnly;
502     if (TargetDecl->hasAttr<MallocAttr>())
503       RetAttrs |= llvm::Attribute::NoAlias;
504   }
505 
506   if (CodeGenOpts.OptimizeSize)
507     FuncAttrs |= llvm::Attribute::OptimizeForSize;
508   if (CodeGenOpts.DisableRedZone)
509     FuncAttrs |= llvm::Attribute::NoRedZone;
510   if (CodeGenOpts.NoImplicitFloat)
511     FuncAttrs |= llvm::Attribute::NoImplicitFloat;
512 
513   QualType RetTy = FI.getReturnType();
514   unsigned Index = 1;
515   const ABIArgInfo &RetAI = FI.getReturnInfo();
516   switch (RetAI.getKind()) {
517   case ABIArgInfo::Extend:
518    if (RetTy->isSignedIntegerType()) {
519      RetAttrs |= llvm::Attribute::SExt;
520    } else if (RetTy->isUnsignedIntegerType()) {
521      RetAttrs |= llvm::Attribute::ZExt;
522    }
523    // FALLTHROUGH
524   case ABIArgInfo::Direct:
525     break;
526 
527   case ABIArgInfo::Indirect:
528     PAL.push_back(llvm::AttributeWithIndex::get(Index,
529                                                 llvm::Attribute::StructRet |
530                                                 llvm::Attribute::NoAlias));
531     ++Index;
532     // sret disables readnone and readonly
533     FuncAttrs &= ~(llvm::Attribute::ReadOnly |
534                    llvm::Attribute::ReadNone);
535     break;
536 
537   case ABIArgInfo::Ignore:
538   case ABIArgInfo::Coerce:
539     break;
540 
541   case ABIArgInfo::Expand:
542     assert(0 && "Invalid ABI kind for return argument");
543   }
544 
545   if (RetAttrs)
546     PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
547 
548   // FIXME: we need to honour command line settings also...
549   // FIXME: RegParm should be reduced in case of nested functions and/or global
550   // register variable.
551   signed RegParm = 0;
552   if (TargetDecl)
553     if (const RegparmAttr *RegParmAttr
554           = TargetDecl->getAttr<RegparmAttr>())
555       RegParm = RegParmAttr->getNumParams();
556 
557   unsigned PointerWidth = getContext().Target.getPointerWidth(0);
558   for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
559          ie = FI.arg_end(); it != ie; ++it) {
560     QualType ParamType = it->type;
561     const ABIArgInfo &AI = it->info;
562     unsigned Attributes = 0;
563 
564     if (ParamType.isRestrictQualified())
565       Attributes |= llvm::Attribute::NoAlias;
566 
567     switch (AI.getKind()) {
568     case ABIArgInfo::Coerce:
569       break;
570 
571     case ABIArgInfo::Indirect:
572       if (AI.getIndirectByVal())
573         Attributes |= llvm::Attribute::ByVal;
574 
575       Attributes |=
576         llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign());
577       // byval disables readnone and readonly.
578       FuncAttrs &= ~(llvm::Attribute::ReadOnly |
579                      llvm::Attribute::ReadNone);
580       break;
581 
582     case ABIArgInfo::Extend:
583      if (ParamType->isSignedIntegerType()) {
584        Attributes |= llvm::Attribute::SExt;
585      } else if (ParamType->isUnsignedIntegerType()) {
586        Attributes |= llvm::Attribute::ZExt;
587      }
588      // FALLS THROUGH
589     case ABIArgInfo::Direct:
590       if (RegParm > 0 &&
591           (ParamType->isIntegerType() || ParamType->isPointerType())) {
592         RegParm -=
593           (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth;
594         if (RegParm >= 0)
595           Attributes |= llvm::Attribute::InReg;
596       }
597       // FIXME: handle sseregparm someday...
598       break;
599 
600     case ABIArgInfo::Ignore:
601       // Skip increment, no matching LLVM parameter.
602       continue;
603 
604     case ABIArgInfo::Expand: {
605       std::vector<const llvm::Type*> Tys;
606       // FIXME: This is rather inefficient. Do we ever actually need to do
607       // anything here? The result should be just reconstructed on the other
608       // side, so extension should be a non-issue.
609       getTypes().GetExpandedTypes(ParamType, Tys);
610       Index += Tys.size();
611       continue;
612     }
613     }
614 
615     if (Attributes)
616       PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes));
617     ++Index;
618   }
619   if (FuncAttrs)
620     PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs));
621 }
622 
623 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
624                                          llvm::Function *Fn,
625                                          const FunctionArgList &Args) {
626   // If this is an implicit-return-zero function, go ahead and
627   // initialize the return value.  TODO: it might be nice to have
628   // a more general mechanism for this that didn't require synthesized
629   // return statements.
630   if (const FunctionDecl* FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
631     if (FD->hasImplicitReturnZero()) {
632       QualType RetTy = FD->getResultType().getUnqualifiedType();
633       const llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
634       llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
635       Builder.CreateStore(Zero, ReturnValue);
636     }
637   }
638 
639   // FIXME: We no longer need the types from FunctionArgList; lift up and
640   // simplify.
641 
642   // Emit allocs for param decls.  Give the LLVM Argument nodes names.
643   llvm::Function::arg_iterator AI = Fn->arg_begin();
644 
645   // Name the struct return argument.
646   if (CGM.ReturnTypeUsesSret(FI)) {
647     AI->setName("agg.result");
648     ++AI;
649   }
650 
651   assert(FI.arg_size() == Args.size() &&
652          "Mismatch between function signature & arguments.");
653   CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
654   for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
655        i != e; ++i, ++info_it) {
656     const VarDecl *Arg = i->first;
657     QualType Ty = info_it->type;
658     const ABIArgInfo &ArgI = info_it->info;
659 
660     switch (ArgI.getKind()) {
661     case ABIArgInfo::Indirect: {
662       llvm::Value* V = AI;
663       if (hasAggregateLLVMType(Ty)) {
664         // Do nothing, aggregates and complex variables are accessed by
665         // reference.
666       } else {
667         // Load scalar value from indirect argument.
668         V = EmitLoadOfScalar(V, false, Ty);
669         if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
670           // This must be a promotion, for something like
671           // "void a(x) short x; {..."
672           V = EmitScalarConversion(V, Ty, Arg->getType());
673         }
674       }
675       EmitParmDecl(*Arg, V);
676       break;
677     }
678 
679     case ABIArgInfo::Extend:
680     case ABIArgInfo::Direct: {
681       assert(AI != Fn->arg_end() && "Argument mismatch!");
682       llvm::Value* V = AI;
683       if (hasAggregateLLVMType(Ty)) {
684         // Create a temporary alloca to hold the argument; the rest of
685         // codegen expects to access aggregates & complex values by
686         // reference.
687         V = CreateTempAlloca(ConvertTypeForMem(Ty));
688         Builder.CreateStore(AI, V);
689       } else {
690         if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
691           // This must be a promotion, for something like
692           // "void a(x) short x; {..."
693           V = EmitScalarConversion(V, Ty, Arg->getType());
694         }
695       }
696       EmitParmDecl(*Arg, V);
697       break;
698     }
699 
700     case ABIArgInfo::Expand: {
701       // If this structure was expanded into multiple arguments then
702       // we need to create a temporary and reconstruct it from the
703       // arguments.
704       llvm::Value *Temp = CreateTempAlloca(ConvertTypeForMem(Ty),
705                                            Arg->getName() + ".addr");
706       // FIXME: What are the right qualifiers here?
707       llvm::Function::arg_iterator End =
708         ExpandTypeFromArgs(Ty, LValue::MakeAddr(Temp, Qualifiers()), AI);
709       EmitParmDecl(*Arg, Temp);
710 
711       // Name the arguments used in expansion and increment AI.
712       unsigned Index = 0;
713       for (; AI != End; ++AI, ++Index)
714         AI->setName(Arg->getName() + "." + llvm::Twine(Index));
715       continue;
716     }
717 
718     case ABIArgInfo::Ignore:
719       // Initialize the local variable appropriately.
720       if (hasAggregateLLVMType(Ty)) {
721         EmitParmDecl(*Arg, CreateTempAlloca(ConvertTypeForMem(Ty)));
722       } else {
723         EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())));
724       }
725 
726       // Skip increment, no matching LLVM parameter.
727       continue;
728 
729     case ABIArgInfo::Coerce: {
730       assert(AI != Fn->arg_end() && "Argument mismatch!");
731       // FIXME: This is very wasteful; EmitParmDecl is just going to drop the
732       // result in a new alloca anyway, so we could just store into that
733       // directly if we broke the abstraction down more.
734       llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(Ty), "coerce");
735       CreateCoercedStore(AI, V, *this);
736       // Match to what EmitParmDecl is expecting for this type.
737       if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
738         V = EmitLoadOfScalar(V, false, Ty);
739         if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
740           // This must be a promotion, for something like
741           // "void a(x) short x; {..."
742           V = EmitScalarConversion(V, Ty, Arg->getType());
743         }
744       }
745       EmitParmDecl(*Arg, V);
746       break;
747     }
748     }
749 
750     ++AI;
751   }
752   assert(AI == Fn->arg_end() && "Argument mismatch!");
753 }
754 
755 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
756                                          llvm::Value *ReturnValue) {
757   llvm::Value *RV = 0;
758 
759   // Functions with no result always return void.
760   if (ReturnValue) {
761     QualType RetTy = FI.getReturnType();
762     const ABIArgInfo &RetAI = FI.getReturnInfo();
763 
764     switch (RetAI.getKind()) {
765     case ABIArgInfo::Indirect:
766       if (RetTy->isAnyComplexType()) {
767         ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
768         StoreComplexToAddr(RT, CurFn->arg_begin(), false);
769       } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
770         // Do nothing; aggregrates get evaluated directly into the destination.
771       } else {
772         EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
773                           false, RetTy);
774       }
775       break;
776 
777     case ABIArgInfo::Extend:
778     case ABIArgInfo::Direct:
779       // The internal return value temp always will have
780       // pointer-to-return-type type.
781       RV = Builder.CreateLoad(ReturnValue);
782       break;
783 
784     case ABIArgInfo::Ignore:
785       break;
786 
787     case ABIArgInfo::Coerce:
788       RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this);
789       break;
790 
791     case ABIArgInfo::Expand:
792       assert(0 && "Invalid ABI kind for return argument");
793     }
794   }
795 
796   if (RV) {
797     Builder.CreateRet(RV);
798   } else {
799     Builder.CreateRetVoid();
800   }
801 }
802 
803 RValue CodeGenFunction::EmitCallArg(const Expr *E, QualType ArgType) {
804   if (ArgType->isReferenceType())
805     return EmitReferenceBindingToExpr(E, ArgType);
806 
807   return EmitAnyExprToTemp(E);
808 }
809 
810 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
811                                  llvm::Value *Callee,
812                                  const CallArgList &CallArgs,
813                                  const Decl *TargetDecl) {
814   // FIXME: We no longer need the types from CallArgs; lift up and simplify.
815   llvm::SmallVector<llvm::Value*, 16> Args;
816 
817   // Handle struct-return functions by passing a pointer to the
818   // location that we would like to return into.
819   QualType RetTy = CallInfo.getReturnType();
820   const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
821 
822 
823   // If the call returns a temporary with struct return, create a temporary
824   // alloca to hold the result.
825   if (CGM.ReturnTypeUsesSret(CallInfo))
826     Args.push_back(CreateTempAlloca(ConvertTypeForMem(RetTy)));
827 
828   assert(CallInfo.arg_size() == CallArgs.size() &&
829          "Mismatch between function signature & arguments.");
830   CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
831   for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
832        I != E; ++I, ++info_it) {
833     const ABIArgInfo &ArgInfo = info_it->info;
834     RValue RV = I->first;
835 
836     switch (ArgInfo.getKind()) {
837     case ABIArgInfo::Indirect:
838       if (RV.isScalar() || RV.isComplex()) {
839         // Make a temporary alloca to pass the argument.
840         Args.push_back(CreateTempAlloca(ConvertTypeForMem(I->second)));
841         if (RV.isScalar())
842           EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false, I->second);
843         else
844           StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
845       } else {
846         Args.push_back(RV.getAggregateAddr());
847       }
848       break;
849 
850     case ABIArgInfo::Extend:
851     case ABIArgInfo::Direct:
852       if (RV.isScalar()) {
853         Args.push_back(RV.getScalarVal());
854       } else if (RV.isComplex()) {
855         llvm::Value *Tmp = llvm::UndefValue::get(ConvertType(I->second));
856         Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().first, 0);
857         Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().second, 1);
858         Args.push_back(Tmp);
859       } else {
860         Args.push_back(Builder.CreateLoad(RV.getAggregateAddr()));
861       }
862       break;
863 
864     case ABIArgInfo::Ignore:
865       break;
866 
867     case ABIArgInfo::Coerce: {
868       // FIXME: Avoid the conversion through memory if possible.
869       llvm::Value *SrcPtr;
870       if (RV.isScalar()) {
871         SrcPtr = CreateTempAlloca(ConvertTypeForMem(I->second), "coerce");
872         EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, I->second);
873       } else if (RV.isComplex()) {
874         SrcPtr = CreateTempAlloca(ConvertTypeForMem(I->second), "coerce");
875         StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
876       } else
877         SrcPtr = RV.getAggregateAddr();
878       Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
879                                        *this));
880       break;
881     }
882 
883     case ABIArgInfo::Expand:
884       ExpandTypeToArgs(I->second, RV, Args);
885       break;
886     }
887   }
888 
889   // If the callee is a bitcast of a function to a varargs pointer to function
890   // type, check to see if we can remove the bitcast.  This handles some cases
891   // with unprototyped functions.
892   if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
893     if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
894       const llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
895       const llvm::FunctionType *CurFT =
896         cast<llvm::FunctionType>(CurPT->getElementType());
897       const llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
898 
899       if (CE->getOpcode() == llvm::Instruction::BitCast &&
900           ActualFT->getReturnType() == CurFT->getReturnType() &&
901           ActualFT->getNumParams() == CurFT->getNumParams() &&
902           ActualFT->getNumParams() == Args.size()) {
903         bool ArgsMatch = true;
904         for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
905           if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
906             ArgsMatch = false;
907             break;
908           }
909 
910         // Strip the cast if we can get away with it.  This is a nice cleanup,
911         // but also allows us to inline the function at -O0 if it is marked
912         // always_inline.
913         if (ArgsMatch)
914           Callee = CalleeF;
915       }
916     }
917 
918 
919   llvm::BasicBlock *InvokeDest = getInvokeDest();
920   unsigned CallingConv;
921   CodeGen::AttributeListType AttributeList;
922   CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv);
923   llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(),
924                                                    AttributeList.end());
925 
926   llvm::CallSite CS;
927   if (!InvokeDest || (Attrs.getFnAttributes() & llvm::Attribute::NoUnwind)) {
928     CS = Builder.CreateCall(Callee, Args.data(), Args.data()+Args.size());
929   } else {
930     llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
931     CS = Builder.CreateInvoke(Callee, Cont, InvokeDest,
932                               Args.data(), Args.data()+Args.size());
933     EmitBlock(Cont);
934   }
935 
936   CS.setAttributes(Attrs);
937   CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
938 
939   // If the call doesn't return, finish the basic block and clear the
940   // insertion point; this allows the rest of IRgen to discard
941   // unreachable code.
942   if (CS.doesNotReturn()) {
943     Builder.CreateUnreachable();
944     Builder.ClearInsertionPoint();
945 
946     // FIXME: For now, emit a dummy basic block because expr emitters in
947     // generally are not ready to handle emitting expressions at unreachable
948     // points.
949     EnsureInsertPoint();
950 
951     // Return a reasonable RValue.
952     return GetUndefRValue(RetTy);
953   }
954 
955   llvm::Instruction *CI = CS.getInstruction();
956   if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
957     CI->setName("call");
958 
959   switch (RetAI.getKind()) {
960   case ABIArgInfo::Indirect:
961     if (RetTy->isAnyComplexType())
962       return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
963     if (CodeGenFunction::hasAggregateLLVMType(RetTy))
964       return RValue::getAggregate(Args[0]);
965     return RValue::get(EmitLoadOfScalar(Args[0], false, RetTy));
966 
967   case ABIArgInfo::Extend:
968   case ABIArgInfo::Direct:
969     if (RetTy->isAnyComplexType()) {
970       llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
971       llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
972       return RValue::getComplex(std::make_pair(Real, Imag));
973     }
974     if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
975       llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(RetTy), "agg.tmp");
976       Builder.CreateStore(CI, V);
977       return RValue::getAggregate(V);
978     }
979     return RValue::get(CI);
980 
981   case ABIArgInfo::Ignore:
982     // If we are ignoring an argument that had a result, make sure to
983     // construct the appropriate return value for our caller.
984     return GetUndefRValue(RetTy);
985 
986   case ABIArgInfo::Coerce: {
987     // FIXME: Avoid the conversion through memory if possible.
988     llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(RetTy), "coerce");
989     CreateCoercedStore(CI, V, *this);
990     if (RetTy->isAnyComplexType())
991       return RValue::getComplex(LoadComplexFromAddr(V, false));
992     if (CodeGenFunction::hasAggregateLLVMType(RetTy))
993       return RValue::getAggregate(V);
994     return RValue::get(EmitLoadOfScalar(V, false, RetTy));
995   }
996 
997   case ABIArgInfo::Expand:
998     assert(0 && "Invalid ABI kind for return argument");
999   }
1000 
1001   assert(0 && "Unhandled ABIArgInfo::Kind");
1002   return RValue::get(0);
1003 }
1004 
1005 /* VarArg handling */
1006 
1007 llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
1008   return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
1009 }
1010