1 //===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "CGCall.h"
16 #include "CGCXXABI.h"
17 #include "ABIInfo.h"
18 #include "CodeGenFunction.h"
19 #include "CodeGenModule.h"
20 #include "clang/Basic/TargetInfo.h"
21 #include "clang/AST/Decl.h"
22 #include "clang/AST/DeclCXX.h"
23 #include "clang/AST/DeclObjC.h"
24 #include "clang/Frontend/CodeGenOptions.h"
25 #include "llvm/Attributes.h"
26 #include "llvm/Support/CallSite.h"
27 #include "llvm/Target/TargetData.h"
28 using namespace clang;
29 using namespace CodeGen;
30 
31 /***/
32 
33 static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
34   switch (CC) {
35   default: return llvm::CallingConv::C;
36   case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
37   case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
38   case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
39   // TODO: add support for CC_X86Pascal to llvm
40   }
41 }
42 
43 /// Derives the 'this' type for codegen purposes, i.e. ignoring method
44 /// qualification.
45 /// FIXME: address space qualification?
46 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
47   QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
48   return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
49 }
50 
51 /// Returns the canonical formal type of the given C++ method.
52 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
53   return MD->getType()->getCanonicalTypeUnqualified()
54            .getAs<FunctionProtoType>();
55 }
56 
57 /// Returns the "extra-canonicalized" return type, which discards
58 /// qualifiers on the return type.  Codegen doesn't care about them,
59 /// and it makes ABI code a little easier to be able to assume that
60 /// all parameter and return types are top-level unqualified.
61 static CanQualType GetReturnType(QualType RetTy) {
62   return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
63 }
64 
65 const CGFunctionInfo &
66 CodeGenTypes::getFunctionInfo(CanQual<FunctionNoProtoType> FTNP,
67                               bool IsRecursive) {
68   return getFunctionInfo(FTNP->getResultType().getUnqualifiedType(),
69                          llvm::SmallVector<CanQualType, 16>(),
70                          FTNP->getExtInfo(), IsRecursive);
71 }
72 
73 /// \param Args - contains any initial parameters besides those
74 ///   in the formal type
75 static const CGFunctionInfo &getFunctionInfo(CodeGenTypes &CGT,
76                                   llvm::SmallVectorImpl<CanQualType> &ArgTys,
77                                              CanQual<FunctionProtoType> FTP,
78                                              bool IsRecursive = false) {
79   // FIXME: Kill copy.
80   for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
81     ArgTys.push_back(FTP->getArgType(i));
82   CanQualType ResTy = FTP->getResultType().getUnqualifiedType();
83   return CGT.getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo(), IsRecursive);
84 }
85 
86 const CGFunctionInfo &
87 CodeGenTypes::getFunctionInfo(CanQual<FunctionProtoType> FTP,
88                               bool IsRecursive) {
89   llvm::SmallVector<CanQualType, 16> ArgTys;
90   return ::getFunctionInfo(*this, ArgTys, FTP, IsRecursive);
91 }
92 
93 static CallingConv getCallingConventionForDecl(const Decl *D) {
94   // Set the appropriate calling convention for the Function.
95   if (D->hasAttr<StdCallAttr>())
96     return CC_X86StdCall;
97 
98   if (D->hasAttr<FastCallAttr>())
99     return CC_X86FastCall;
100 
101   if (D->hasAttr<ThisCallAttr>())
102     return CC_X86ThisCall;
103 
104   if (D->hasAttr<PascalAttr>())
105     return CC_X86Pascal;
106 
107   return CC_C;
108 }
109 
110 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXRecordDecl *RD,
111                                                  const FunctionProtoType *FTP) {
112   llvm::SmallVector<CanQualType, 16> ArgTys;
113 
114   // Add the 'this' pointer.
115   ArgTys.push_back(GetThisType(Context, RD));
116 
117   return ::getFunctionInfo(*this, ArgTys,
118               FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
119 }
120 
121 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) {
122   llvm::SmallVector<CanQualType, 16> ArgTys;
123 
124   assert(!isa<CXXConstructorDecl>(MD) && "wrong method for contructors!");
125   assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
126 
127   // Add the 'this' pointer unless this is a static method.
128   if (MD->isInstance())
129     ArgTys.push_back(GetThisType(Context, MD->getParent()));
130 
131   return ::getFunctionInfo(*this, ArgTys, GetFormalType(MD));
132 }
133 
134 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXConstructorDecl *D,
135                                                     CXXCtorType Type) {
136   llvm::SmallVector<CanQualType, 16> ArgTys;
137   ArgTys.push_back(GetThisType(Context, D->getParent()));
138   CanQualType ResTy = Context.VoidTy;
139 
140   TheCXXABI.BuildConstructorSignature(D, Type, ResTy, ArgTys);
141 
142   CanQual<FunctionProtoType> FTP = GetFormalType(D);
143 
144   // Add the formal parameters.
145   for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
146     ArgTys.push_back(FTP->getArgType(i));
147 
148   return getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo());
149 }
150 
151 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXDestructorDecl *D,
152                                                     CXXDtorType Type) {
153   llvm::SmallVector<CanQualType, 2> ArgTys;
154   ArgTys.push_back(GetThisType(Context, D->getParent()));
155   CanQualType ResTy = Context.VoidTy;
156 
157   TheCXXABI.BuildDestructorSignature(D, Type, ResTy, ArgTys);
158 
159   CanQual<FunctionProtoType> FTP = GetFormalType(D);
160   assert(FTP->getNumArgs() == 0 && "dtor with formal parameters");
161 
162   return getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo());
163 }
164 
165 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) {
166   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
167     if (MD->isInstance())
168       return getFunctionInfo(MD);
169 
170   CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
171   assert(isa<FunctionType>(FTy));
172   if (isa<FunctionNoProtoType>(FTy))
173     return getFunctionInfo(FTy.getAs<FunctionNoProtoType>());
174   assert(isa<FunctionProtoType>(FTy));
175   return getFunctionInfo(FTy.getAs<FunctionProtoType>());
176 }
177 
178 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) {
179   llvm::SmallVector<CanQualType, 16> ArgTys;
180   ArgTys.push_back(Context.getCanonicalParamType(MD->getSelfDecl()->getType()));
181   ArgTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
182   // FIXME: Kill copy?
183   for (ObjCMethodDecl::param_iterator i = MD->param_begin(),
184          e = MD->param_end(); i != e; ++i) {
185     ArgTys.push_back(Context.getCanonicalParamType((*i)->getType()));
186   }
187   return getFunctionInfo(GetReturnType(MD->getResultType()),
188                          ArgTys,
189                          FunctionType::ExtInfo(
190                              /*NoReturn*/ false,
191                              /*RegParm*/ 0,
192                              getCallingConventionForDecl(MD)));
193 }
194 
195 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(GlobalDecl GD) {
196   // FIXME: Do we need to handle ObjCMethodDecl?
197   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
198 
199   if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
200     return getFunctionInfo(CD, GD.getCtorType());
201 
202   if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
203     return getFunctionInfo(DD, GD.getDtorType());
204 
205   return getFunctionInfo(FD);
206 }
207 
208 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
209                                                     const CallArgList &Args,
210                                             const FunctionType::ExtInfo &Info) {
211   // FIXME: Kill copy.
212   llvm::SmallVector<CanQualType, 16> ArgTys;
213   for (CallArgList::const_iterator i = Args.begin(), e = Args.end();
214        i != e; ++i)
215     ArgTys.push_back(Context.getCanonicalParamType(i->second));
216   return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info);
217 }
218 
219 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
220                                                     const FunctionArgList &Args,
221                                             const FunctionType::ExtInfo &Info) {
222   // FIXME: Kill copy.
223   llvm::SmallVector<CanQualType, 16> ArgTys;
224   for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
225        i != e; ++i)
226     ArgTys.push_back(Context.getCanonicalParamType(i->second));
227   return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info);
228 }
229 
230 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
231                            const llvm::SmallVectorImpl<CanQualType> &ArgTys,
232                                             const FunctionType::ExtInfo &Info,
233                                                     bool IsRecursive) {
234 #ifndef NDEBUG
235   for (llvm::SmallVectorImpl<CanQualType>::const_iterator
236          I = ArgTys.begin(), E = ArgTys.end(); I != E; ++I)
237     assert(I->isCanonicalAsParam());
238 #endif
239 
240   unsigned CC = ClangCallConvToLLVMCallConv(Info.getCC());
241 
242   // Lookup or create unique function info.
243   llvm::FoldingSetNodeID ID;
244   CGFunctionInfo::Profile(ID, Info, ResTy,
245                           ArgTys.begin(), ArgTys.end());
246 
247   void *InsertPos = 0;
248   CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos);
249   if (FI)
250     return *FI;
251 
252   // Construct the function info.
253   FI = new CGFunctionInfo(CC, Info.getNoReturn(), Info.getRegParm(), ResTy,
254                           ArgTys.data(), ArgTys.size());
255   FunctionInfos.InsertNode(FI, InsertPos);
256 
257   // Compute ABI information.
258   getABIInfo().computeInfo(*FI);
259 
260   // Loop over all of the computed argument and return value info.  If any of
261   // them are direct or extend without a specified coerce type, specify the
262   // default now.
263   ABIArgInfo &RetInfo = FI->getReturnInfo();
264   if (RetInfo.canHaveCoerceToType() && RetInfo.getCoerceToType() == 0)
265     RetInfo.setCoerceToType(ConvertTypeRecursive(FI->getReturnType()));
266 
267   for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end();
268        I != E; ++I)
269     if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0)
270       I->info.setCoerceToType(ConvertTypeRecursive(I->type));
271 
272   // If this is a top-level call and ConvertTypeRecursive hit unresolved pointer
273   // types, resolve them now.  These pointers may point to this function, which
274   // we *just* filled in the FunctionInfo for.
275   if (!IsRecursive && !PointersToResolve.empty())
276     HandleLateResolvedPointers();
277 
278   return *FI;
279 }
280 
281 CGFunctionInfo::CGFunctionInfo(unsigned _CallingConvention,
282                                bool _NoReturn, unsigned _RegParm,
283                                CanQualType ResTy,
284                                const CanQualType *ArgTys,
285                                unsigned NumArgTys)
286   : CallingConvention(_CallingConvention),
287     EffectiveCallingConvention(_CallingConvention),
288     NoReturn(_NoReturn), RegParm(_RegParm)
289 {
290   NumArgs = NumArgTys;
291 
292   // FIXME: Coallocate with the CGFunctionInfo object.
293   Args = new ArgInfo[1 + NumArgTys];
294   Args[0].type = ResTy;
295   for (unsigned i = 0; i != NumArgTys; ++i)
296     Args[1 + i].type = ArgTys[i];
297 }
298 
299 /***/
300 
301 void CodeGenTypes::GetExpandedTypes(QualType Ty,
302                                     std::vector<const llvm::Type*> &ArgTys,
303                                     bool IsRecursive) {
304   const RecordType *RT = Ty->getAsStructureType();
305   assert(RT && "Can only expand structure types.");
306   const RecordDecl *RD = RT->getDecl();
307   assert(!RD->hasFlexibleArrayMember() &&
308          "Cannot expand structure with flexible array.");
309 
310   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
311          i != e; ++i) {
312     const FieldDecl *FD = *i;
313     assert(!FD->isBitField() &&
314            "Cannot expand structure with bit-field members.");
315 
316     QualType FT = FD->getType();
317     if (CodeGenFunction::hasAggregateLLVMType(FT))
318       GetExpandedTypes(FT, ArgTys, IsRecursive);
319     else
320       ArgTys.push_back(ConvertType(FT, IsRecursive));
321   }
322 }
323 
324 llvm::Function::arg_iterator
325 CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
326                                     llvm::Function::arg_iterator AI) {
327   const RecordType *RT = Ty->getAsStructureType();
328   assert(RT && "Can only expand structure types.");
329 
330   RecordDecl *RD = RT->getDecl();
331   assert(LV.isSimple() &&
332          "Unexpected non-simple lvalue during struct expansion.");
333   llvm::Value *Addr = LV.getAddress();
334   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
335          i != e; ++i) {
336     FieldDecl *FD = *i;
337     QualType FT = FD->getType();
338 
339     // FIXME: What are the right qualifiers here?
340     LValue LV = EmitLValueForField(Addr, FD, 0);
341     if (CodeGenFunction::hasAggregateLLVMType(FT)) {
342       AI = ExpandTypeFromArgs(FT, LV, AI);
343     } else {
344       EmitStoreThroughLValue(RValue::get(AI), LV, FT);
345       ++AI;
346     }
347   }
348 
349   return AI;
350 }
351 
352 void
353 CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
354                                   llvm::SmallVector<llvm::Value*, 16> &Args) {
355   const RecordType *RT = Ty->getAsStructureType();
356   assert(RT && "Can only expand structure types.");
357 
358   RecordDecl *RD = RT->getDecl();
359   assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
360   llvm::Value *Addr = RV.getAggregateAddr();
361   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
362          i != e; ++i) {
363     FieldDecl *FD = *i;
364     QualType FT = FD->getType();
365 
366     // FIXME: What are the right qualifiers here?
367     LValue LV = EmitLValueForField(Addr, FD, 0);
368     if (CodeGenFunction::hasAggregateLLVMType(FT)) {
369       ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args);
370     } else {
371       RValue RV = EmitLoadOfLValue(LV, FT);
372       assert(RV.isScalar() &&
373              "Unexpected non-scalar rvalue during struct expansion.");
374       Args.push_back(RV.getScalarVal());
375     }
376   }
377 }
378 
379 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
380 /// accessing some number of bytes out of it, try to gep into the struct to get
381 /// at its inner goodness.  Dive as deep as possible without entering an element
382 /// with an in-memory size smaller than DstSize.
383 static llvm::Value *
384 EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
385                                    const llvm::StructType *SrcSTy,
386                                    uint64_t DstSize, CodeGenFunction &CGF) {
387   // We can't dive into a zero-element struct.
388   if (SrcSTy->getNumElements() == 0) return SrcPtr;
389 
390   const llvm::Type *FirstElt = SrcSTy->getElementType(0);
391 
392   // If the first elt is at least as large as what we're looking for, or if the
393   // first element is the same size as the whole struct, we can enter it.
394   uint64_t FirstEltSize =
395     CGF.CGM.getTargetData().getTypeAllocSize(FirstElt);
396   if (FirstEltSize < DstSize &&
397       FirstEltSize < CGF.CGM.getTargetData().getTypeAllocSize(SrcSTy))
398     return SrcPtr;
399 
400   // GEP into the first element.
401   SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive");
402 
403   // If the first element is a struct, recurse.
404   const llvm::Type *SrcTy =
405     cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
406   if (const llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
407     return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
408 
409   return SrcPtr;
410 }
411 
412 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
413 /// are either integers or pointers.  This does a truncation of the value if it
414 /// is too large or a zero extension if it is too small.
415 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
416                                              const llvm::Type *Ty,
417                                              CodeGenFunction &CGF) {
418   if (Val->getType() == Ty)
419     return Val;
420 
421   if (isa<llvm::PointerType>(Val->getType())) {
422     // If this is Pointer->Pointer avoid conversion to and from int.
423     if (isa<llvm::PointerType>(Ty))
424       return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
425 
426     // Convert the pointer to an integer so we can play with its width.
427     Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
428   }
429 
430   const llvm::Type *DestIntTy = Ty;
431   if (isa<llvm::PointerType>(DestIntTy))
432     DestIntTy = CGF.IntPtrTy;
433 
434   if (Val->getType() != DestIntTy)
435     Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
436 
437   if (isa<llvm::PointerType>(Ty))
438     Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
439   return Val;
440 }
441 
442 
443 
444 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
445 /// a pointer to an object of type \arg Ty.
446 ///
447 /// This safely handles the case when the src type is smaller than the
448 /// destination type; in this situation the values of bits which not
449 /// present in the src are undefined.
450 static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
451                                       const llvm::Type *Ty,
452                                       CodeGenFunction &CGF) {
453   const llvm::Type *SrcTy =
454     cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
455 
456   // If SrcTy and Ty are the same, just do a load.
457   if (SrcTy == Ty)
458     return CGF.Builder.CreateLoad(SrcPtr);
459 
460   uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty);
461 
462   if (const llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
463     SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
464     SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
465   }
466 
467   uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
468 
469   // If the source and destination are integer or pointer types, just do an
470   // extension or truncation to the desired type.
471   if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
472       (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
473     llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr);
474     return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
475   }
476 
477   // If load is legal, just bitcast the src pointer.
478   if (SrcSize >= DstSize) {
479     // Generally SrcSize is never greater than DstSize, since this means we are
480     // losing bits. However, this can happen in cases where the structure has
481     // additional padding, for example due to a user specified alignment.
482     //
483     // FIXME: Assert that we aren't truncating non-padding bits when have access
484     // to that information.
485     llvm::Value *Casted =
486       CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
487     llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
488     // FIXME: Use better alignment / avoid requiring aligned load.
489     Load->setAlignment(1);
490     return Load;
491   }
492 
493   // Otherwise do coercion through memory. This is stupid, but
494   // simple.
495   llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
496   llvm::Value *Casted =
497     CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
498   llvm::StoreInst *Store =
499     CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
500   // FIXME: Use better alignment / avoid requiring aligned store.
501   Store->setAlignment(1);
502   return CGF.Builder.CreateLoad(Tmp);
503 }
504 
505 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
506 /// where the source and destination may have different types.
507 ///
508 /// This safely handles the case when the src type is larger than the
509 /// destination type; the upper bits of the src will be lost.
510 static void CreateCoercedStore(llvm::Value *Src,
511                                llvm::Value *DstPtr,
512                                bool DstIsVolatile,
513                                CodeGenFunction &CGF) {
514   const llvm::Type *SrcTy = Src->getType();
515   const llvm::Type *DstTy =
516     cast<llvm::PointerType>(DstPtr->getType())->getElementType();
517   if (SrcTy == DstTy) {
518     CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
519     return;
520   }
521 
522   uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
523 
524   if (const llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
525     DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF);
526     DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType();
527   }
528 
529   // If the source and destination are integer or pointer types, just do an
530   // extension or truncation to the desired type.
531   if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
532       (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
533     Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
534     CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
535     return;
536   }
537 
538   uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy);
539 
540   // If store is legal, just bitcast the src pointer.
541   if (SrcSize <= DstSize) {
542     llvm::Value *Casted =
543       CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
544     // FIXME: Use better alignment / avoid requiring aligned store.
545     CGF.Builder.CreateStore(Src, Casted, DstIsVolatile)->setAlignment(1);
546   } else {
547     // Otherwise do coercion through memory. This is stupid, but
548     // simple.
549 
550     // Generally SrcSize is never greater than DstSize, since this means we are
551     // losing bits. However, this can happen in cases where the structure has
552     // additional padding, for example due to a user specified alignment.
553     //
554     // FIXME: Assert that we aren't truncating non-padding bits when have access
555     // to that information.
556     llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
557     CGF.Builder.CreateStore(Src, Tmp);
558     llvm::Value *Casted =
559       CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy));
560     llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
561     // FIXME: Use better alignment / avoid requiring aligned load.
562     Load->setAlignment(1);
563     CGF.Builder.CreateStore(Load, DstPtr, DstIsVolatile);
564   }
565 }
566 
567 /***/
568 
569 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
570   return FI.getReturnInfo().isIndirect();
571 }
572 
573 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
574   if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
575     switch (BT->getKind()) {
576     default:
577       return false;
578     case BuiltinType::Float:
579       return getContext().Target.useObjCFPRetForRealType(TargetInfo::Float);
580     case BuiltinType::Double:
581       return getContext().Target.useObjCFPRetForRealType(TargetInfo::Double);
582     case BuiltinType::LongDouble:
583       return getContext().Target.useObjCFPRetForRealType(
584         TargetInfo::LongDouble);
585     }
586   }
587 
588   return false;
589 }
590 
591 const llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
592   const CGFunctionInfo &FI = getFunctionInfo(GD);
593 
594   // For definition purposes, don't consider a K&R function variadic.
595   bool Variadic = false;
596   if (const FunctionProtoType *FPT =
597         cast<FunctionDecl>(GD.getDecl())->getType()->getAs<FunctionProtoType>())
598     Variadic = FPT->isVariadic();
599 
600   return GetFunctionType(FI, Variadic, false);
601 }
602 
603 const llvm::FunctionType *
604 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic,
605                               bool IsRecursive) {
606   std::vector<const llvm::Type*> ArgTys;
607 
608   const llvm::Type *ResultType = 0;
609 
610   QualType RetTy = FI.getReturnType();
611   const ABIArgInfo &RetAI = FI.getReturnInfo();
612   switch (RetAI.getKind()) {
613   case ABIArgInfo::Expand:
614     assert(0 && "Invalid ABI kind for return argument");
615 
616   case ABIArgInfo::Extend:
617   case ABIArgInfo::Direct:
618     ResultType = RetAI.getCoerceToType();
619     break;
620 
621   case ABIArgInfo::Indirect: {
622     assert(!RetAI.getIndirectAlign() && "Align unused on indirect return.");
623     ResultType = llvm::Type::getVoidTy(getLLVMContext());
624     const llvm::Type *STy = ConvertType(RetTy, IsRecursive);
625     ArgTys.push_back(llvm::PointerType::get(STy, RetTy.getAddressSpace()));
626     break;
627   }
628 
629   case ABIArgInfo::Ignore:
630     ResultType = llvm::Type::getVoidTy(getLLVMContext());
631     break;
632   }
633 
634   for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
635          ie = FI.arg_end(); it != ie; ++it) {
636     const ABIArgInfo &AI = it->info;
637 
638     switch (AI.getKind()) {
639     case ABIArgInfo::Ignore:
640       break;
641 
642     case ABIArgInfo::Indirect: {
643       // indirect arguments are always on the stack, which is addr space #0.
644       const llvm::Type *LTy = ConvertTypeForMem(it->type, IsRecursive);
645       ArgTys.push_back(llvm::PointerType::getUnqual(LTy));
646       break;
647     }
648 
649     case ABIArgInfo::Extend:
650     case ABIArgInfo::Direct: {
651       // If the coerce-to type is a first class aggregate, flatten it.  Either
652       // way is semantically identical, but fast-isel and the optimizer
653       // generally likes scalar values better than FCAs.
654       const llvm::Type *ArgTy = AI.getCoerceToType();
655       if (const llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgTy)) {
656         for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
657           ArgTys.push_back(STy->getElementType(i));
658       } else {
659         ArgTys.push_back(ArgTy);
660       }
661       break;
662     }
663 
664     case ABIArgInfo::Expand:
665       GetExpandedTypes(it->type, ArgTys, IsRecursive);
666       break;
667     }
668   }
669 
670   return llvm::FunctionType::get(ResultType, ArgTys, IsVariadic);
671 }
672 
673 const llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
674   const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
675   const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
676 
677   if (!VerifyFuncTypeComplete(FPT)) {
678     const CGFunctionInfo *Info;
679     if (isa<CXXDestructorDecl>(MD))
680       Info = &getFunctionInfo(cast<CXXDestructorDecl>(MD), GD.getDtorType());
681     else
682       Info = &getFunctionInfo(MD);
683     return GetFunctionType(*Info, FPT->isVariadic(), false);
684   }
685 
686   return llvm::OpaqueType::get(getLLVMContext());
687 }
688 
689 void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
690                                            const Decl *TargetDecl,
691                                            AttributeListType &PAL,
692                                            unsigned &CallingConv) {
693   unsigned FuncAttrs = 0;
694   unsigned RetAttrs = 0;
695 
696   CallingConv = FI.getEffectiveCallingConvention();
697 
698   if (FI.isNoReturn())
699     FuncAttrs |= llvm::Attribute::NoReturn;
700 
701   // FIXME: handle sseregparm someday...
702   if (TargetDecl) {
703     if (TargetDecl->hasAttr<NoThrowAttr>())
704       FuncAttrs |= llvm::Attribute::NoUnwind;
705     else if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
706       const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>();
707       if (FPT && FPT->hasEmptyExceptionSpec())
708         FuncAttrs |= llvm::Attribute::NoUnwind;
709     }
710 
711     if (TargetDecl->hasAttr<NoReturnAttr>())
712       FuncAttrs |= llvm::Attribute::NoReturn;
713     if (TargetDecl->hasAttr<ConstAttr>())
714       FuncAttrs |= llvm::Attribute::ReadNone;
715     else if (TargetDecl->hasAttr<PureAttr>())
716       FuncAttrs |= llvm::Attribute::ReadOnly;
717     if (TargetDecl->hasAttr<MallocAttr>())
718       RetAttrs |= llvm::Attribute::NoAlias;
719   }
720 
721   if (CodeGenOpts.OptimizeSize)
722     FuncAttrs |= llvm::Attribute::OptimizeForSize;
723   if (CodeGenOpts.DisableRedZone)
724     FuncAttrs |= llvm::Attribute::NoRedZone;
725   if (CodeGenOpts.NoImplicitFloat)
726     FuncAttrs |= llvm::Attribute::NoImplicitFloat;
727 
728   QualType RetTy = FI.getReturnType();
729   unsigned Index = 1;
730   const ABIArgInfo &RetAI = FI.getReturnInfo();
731   switch (RetAI.getKind()) {
732   case ABIArgInfo::Extend:
733    if (RetTy->hasSignedIntegerRepresentation())
734      RetAttrs |= llvm::Attribute::SExt;
735    else if (RetTy->hasUnsignedIntegerRepresentation())
736      RetAttrs |= llvm::Attribute::ZExt;
737     break;
738   case ABIArgInfo::Direct:
739   case ABIArgInfo::Ignore:
740     break;
741 
742   case ABIArgInfo::Indirect:
743     PAL.push_back(llvm::AttributeWithIndex::get(Index,
744                                                 llvm::Attribute::StructRet));
745     ++Index;
746     // sret disables readnone and readonly
747     FuncAttrs &= ~(llvm::Attribute::ReadOnly |
748                    llvm::Attribute::ReadNone);
749     break;
750 
751   case ABIArgInfo::Expand:
752     assert(0 && "Invalid ABI kind for return argument");
753   }
754 
755   if (RetAttrs)
756     PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
757 
758   // FIXME: we need to honor command line settings also.
759   // FIXME: RegParm should be reduced in case of nested functions and/or global
760   // register variable.
761   signed RegParm = FI.getRegParm();
762 
763   unsigned PointerWidth = getContext().Target.getPointerWidth(0);
764   for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
765          ie = FI.arg_end(); it != ie; ++it) {
766     QualType ParamType = it->type;
767     const ABIArgInfo &AI = it->info;
768     unsigned Attributes = 0;
769 
770     // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
771     // have the corresponding parameter variable.  It doesn't make
772     // sense to do it here because parameters are so fucked up.
773     switch (AI.getKind()) {
774     case ABIArgInfo::Extend:
775       if (ParamType->isSignedIntegerType())
776         Attributes |= llvm::Attribute::SExt;
777       else if (ParamType->isUnsignedIntegerType())
778         Attributes |= llvm::Attribute::ZExt;
779       // FALL THROUGH
780     case ABIArgInfo::Direct:
781       if (RegParm > 0 &&
782           (ParamType->isIntegerType() || ParamType->isPointerType())) {
783         RegParm -=
784         (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth;
785         if (RegParm >= 0)
786           Attributes |= llvm::Attribute::InReg;
787       }
788       // FIXME: handle sseregparm someday...
789 
790       if (const llvm::StructType *STy =
791             dyn_cast<llvm::StructType>(AI.getCoerceToType()))
792         Index += STy->getNumElements()-1;  // 1 will be added below.
793       break;
794 
795     case ABIArgInfo::Indirect:
796       if (AI.getIndirectByVal())
797         Attributes |= llvm::Attribute::ByVal;
798 
799       Attributes |=
800         llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign());
801       // byval disables readnone and readonly.
802       FuncAttrs &= ~(llvm::Attribute::ReadOnly |
803                      llvm::Attribute::ReadNone);
804       break;
805 
806     case ABIArgInfo::Ignore:
807       // Skip increment, no matching LLVM parameter.
808       continue;
809 
810     case ABIArgInfo::Expand: {
811       std::vector<const llvm::Type*> Tys;
812       // FIXME: This is rather inefficient. Do we ever actually need to do
813       // anything here? The result should be just reconstructed on the other
814       // side, so extension should be a non-issue.
815       getTypes().GetExpandedTypes(ParamType, Tys, false);
816       Index += Tys.size();
817       continue;
818     }
819     }
820 
821     if (Attributes)
822       PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes));
823     ++Index;
824   }
825   if (FuncAttrs)
826     PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs));
827 }
828 
829 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
830                                          llvm::Function *Fn,
831                                          const FunctionArgList &Args) {
832   // If this is an implicit-return-zero function, go ahead and
833   // initialize the return value.  TODO: it might be nice to have
834   // a more general mechanism for this that didn't require synthesized
835   // return statements.
836   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
837     if (FD->hasImplicitReturnZero()) {
838       QualType RetTy = FD->getResultType().getUnqualifiedType();
839       const llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
840       llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
841       Builder.CreateStore(Zero, ReturnValue);
842     }
843   }
844 
845   // FIXME: We no longer need the types from FunctionArgList; lift up and
846   // simplify.
847 
848   // Emit allocs for param decls.  Give the LLVM Argument nodes names.
849   llvm::Function::arg_iterator AI = Fn->arg_begin();
850 
851   // Name the struct return argument.
852   if (CGM.ReturnTypeUsesSRet(FI)) {
853     AI->setName("agg.result");
854     ++AI;
855   }
856 
857   assert(FI.arg_size() == Args.size() &&
858          "Mismatch between function signature & arguments.");
859   CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
860   for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
861        i != e; ++i, ++info_it) {
862     const VarDecl *Arg = i->first;
863     QualType Ty = info_it->type;
864     const ABIArgInfo &ArgI = info_it->info;
865 
866     switch (ArgI.getKind()) {
867     case ABIArgInfo::Indirect: {
868       llvm::Value *V = AI;
869 
870       if (hasAggregateLLVMType(Ty)) {
871         // Aggregates and complex variables are accessed by reference.  All we
872         // need to do is realign the value, if requested
873         if (ArgI.getIndirectRealign()) {
874           llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce");
875 
876           // Copy from the incoming argument pointer to the temporary with the
877           // appropriate alignment.
878           //
879           // FIXME: We should have a common utility for generating an aggregate
880           // copy.
881           const llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
882           CharUnits Size = getContext().getTypeSizeInChars(Ty);
883           Builder.CreateMemCpy(Builder.CreateBitCast(AlignedTemp, I8PtrTy),
884                                Builder.CreateBitCast(V, I8PtrTy),
885                                llvm::ConstantInt::get(IntPtrTy,
886                                                       Size.getQuantity()),
887                                ArgI.getIndirectAlign(),
888                                false);
889           V = AlignedTemp;
890         }
891       } else {
892         // Load scalar value from indirect argument.
893         CharUnits Alignment = getContext().getTypeAlignInChars(Ty);
894         V = EmitLoadOfScalar(V, false, Alignment.getQuantity(), Ty);
895         if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
896           // This must be a promotion, for something like
897           // "void a(x) short x; {..."
898           V = EmitScalarConversion(V, Ty, Arg->getType());
899         }
900       }
901       EmitParmDecl(*Arg, V);
902       break;
903     }
904 
905     case ABIArgInfo::Extend:
906     case ABIArgInfo::Direct: {
907       // If we have the trivial case, handle it with no muss and fuss.
908       if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
909           ArgI.getCoerceToType() == ConvertType(Ty) &&
910           ArgI.getDirectOffset() == 0) {
911         assert(AI != Fn->arg_end() && "Argument mismatch!");
912         llvm::Value *V = AI;
913 
914         if (Arg->getType().isRestrictQualified())
915           AI->addAttr(llvm::Attribute::NoAlias);
916 
917         if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
918           // This must be a promotion, for something like
919           // "void a(x) short x; {..."
920           V = EmitScalarConversion(V, Ty, Arg->getType());
921         }
922         EmitParmDecl(*Arg, V);
923         break;
924       }
925 
926       llvm::AllocaInst *Alloca = CreateMemTemp(Ty, "coerce");
927 
928       // The alignment we need to use is the max of the requested alignment for
929       // the argument plus the alignment required by our access code below.
930       unsigned AlignmentToUse =
931         CGF.CGM.getTargetData().getABITypeAlignment(ArgI.getCoerceToType());
932       AlignmentToUse = std::max(AlignmentToUse,
933                         (unsigned)getContext().getDeclAlign(Arg).getQuantity());
934 
935       Alloca->setAlignment(AlignmentToUse);
936       llvm::Value *V = Alloca;
937       llvm::Value *Ptr = V;    // Pointer to store into.
938 
939       // If the value is offset in memory, apply the offset now.
940       if (unsigned Offs = ArgI.getDirectOffset()) {
941         Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy());
942         Ptr = Builder.CreateConstGEP1_32(Ptr, Offs);
943         Ptr = Builder.CreateBitCast(Ptr,
944                           llvm::PointerType::getUnqual(ArgI.getCoerceToType()));
945       }
946 
947       // If the coerce-to type is a first class aggregate, we flatten it and
948       // pass the elements. Either way is semantically identical, but fast-isel
949       // and the optimizer generally likes scalar values better than FCAs.
950       if (const llvm::StructType *STy =
951             dyn_cast<llvm::StructType>(ArgI.getCoerceToType())) {
952         Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
953 
954         for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
955           assert(AI != Fn->arg_end() && "Argument mismatch!");
956           AI->setName(Arg->getName() + ".coerce" + llvm::Twine(i));
957           llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i);
958           Builder.CreateStore(AI++, EltPtr);
959         }
960       } else {
961         // Simple case, just do a coerced store of the argument into the alloca.
962         assert(AI != Fn->arg_end() && "Argument mismatch!");
963         AI->setName(Arg->getName() + ".coerce");
964         CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this);
965       }
966 
967 
968       // Match to what EmitParmDecl is expecting for this type.
969       if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
970         V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty);
971         if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
972           // This must be a promotion, for something like
973           // "void a(x) short x; {..."
974           V = EmitScalarConversion(V, Ty, Arg->getType());
975         }
976       }
977       EmitParmDecl(*Arg, V);
978       continue;  // Skip ++AI increment, already done.
979     }
980 
981     case ABIArgInfo::Expand: {
982       // If this structure was expanded into multiple arguments then
983       // we need to create a temporary and reconstruct it from the
984       // arguments.
985       llvm::Value *Temp = CreateMemTemp(Ty, Arg->getName() + ".addr");
986       llvm::Function::arg_iterator End =
987         ExpandTypeFromArgs(Ty, MakeAddrLValue(Temp, Ty), AI);
988       EmitParmDecl(*Arg, Temp);
989 
990       // Name the arguments used in expansion and increment AI.
991       unsigned Index = 0;
992       for (; AI != End; ++AI, ++Index)
993         AI->setName(Arg->getName() + "." + llvm::Twine(Index));
994       continue;
995     }
996 
997     case ABIArgInfo::Ignore:
998       // Initialize the local variable appropriately.
999       if (hasAggregateLLVMType(Ty))
1000         EmitParmDecl(*Arg, CreateMemTemp(Ty));
1001       else
1002         EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())));
1003 
1004       // Skip increment, no matching LLVM parameter.
1005       continue;
1006     }
1007 
1008     ++AI;
1009   }
1010   assert(AI == Fn->arg_end() && "Argument mismatch!");
1011 }
1012 
1013 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
1014   // Functions with no result always return void.
1015   if (ReturnValue == 0) {
1016     Builder.CreateRetVoid();
1017     return;
1018   }
1019 
1020   llvm::DebugLoc RetDbgLoc;
1021   llvm::Value *RV = 0;
1022   QualType RetTy = FI.getReturnType();
1023   const ABIArgInfo &RetAI = FI.getReturnInfo();
1024 
1025   switch (RetAI.getKind()) {
1026   case ABIArgInfo::Indirect: {
1027     unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
1028     if (RetTy->isAnyComplexType()) {
1029       ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
1030       StoreComplexToAddr(RT, CurFn->arg_begin(), false);
1031     } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1032       // Do nothing; aggregrates get evaluated directly into the destination.
1033     } else {
1034       EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
1035                         false, Alignment, RetTy);
1036     }
1037     break;
1038   }
1039 
1040   case ABIArgInfo::Extend:
1041   case ABIArgInfo::Direct:
1042     if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
1043         RetAI.getDirectOffset() == 0) {
1044       // The internal return value temp always will have pointer-to-return-type
1045       // type, just do a load.
1046 
1047       // If the instruction right before the insertion point is a store to the
1048       // return value, we can elide the load, zap the store, and usually zap the
1049       // alloca.
1050       llvm::BasicBlock *InsertBB = Builder.GetInsertBlock();
1051       llvm::StoreInst *SI = 0;
1052       if (InsertBB->empty() ||
1053           !(SI = dyn_cast<llvm::StoreInst>(&InsertBB->back())) ||
1054           SI->getPointerOperand() != ReturnValue || SI->isVolatile()) {
1055         RV = Builder.CreateLoad(ReturnValue);
1056       } else {
1057         // Get the stored value and nuke the now-dead store.
1058         RetDbgLoc = SI->getDebugLoc();
1059         RV = SI->getValueOperand();
1060         SI->eraseFromParent();
1061 
1062         // If that was the only use of the return value, nuke it as well now.
1063         if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
1064           cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
1065           ReturnValue = 0;
1066         }
1067       }
1068     } else {
1069       llvm::Value *V = ReturnValue;
1070       // If the value is offset in memory, apply the offset now.
1071       if (unsigned Offs = RetAI.getDirectOffset()) {
1072         V = Builder.CreateBitCast(V, Builder.getInt8PtrTy());
1073         V = Builder.CreateConstGEP1_32(V, Offs);
1074         V = Builder.CreateBitCast(V,
1075                          llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
1076       }
1077 
1078       RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
1079     }
1080     break;
1081 
1082   case ABIArgInfo::Ignore:
1083     break;
1084 
1085   case ABIArgInfo::Expand:
1086     assert(0 && "Invalid ABI kind for return argument");
1087   }
1088 
1089   llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid();
1090   if (!RetDbgLoc.isUnknown())
1091     Ret->setDebugLoc(RetDbgLoc);
1092 }
1093 
1094 RValue CodeGenFunction::EmitDelegateCallArg(const VarDecl *Param) {
1095   // StartFunction converted the ABI-lowered parameter(s) into a
1096   // local alloca.  We need to turn that into an r-value suitable
1097   // for EmitCall.
1098   llvm::Value *Local = GetAddrOfLocalVar(Param);
1099 
1100   QualType ArgType = Param->getType();
1101 
1102   // For the most part, we just need to load the alloca, except:
1103   // 1) aggregate r-values are actually pointers to temporaries, and
1104   // 2) references to aggregates are pointers directly to the aggregate.
1105   // I don't know why references to non-aggregates are different here.
1106   if (const ReferenceType *RefType = ArgType->getAs<ReferenceType>()) {
1107     if (hasAggregateLLVMType(RefType->getPointeeType()))
1108       return RValue::getAggregate(Local);
1109 
1110     // Locals which are references to scalars are represented
1111     // with allocas holding the pointer.
1112     return RValue::get(Builder.CreateLoad(Local));
1113   }
1114 
1115   if (ArgType->isAnyComplexType())
1116     return RValue::getComplex(LoadComplexFromAddr(Local, /*volatile*/ false));
1117 
1118   if (hasAggregateLLVMType(ArgType))
1119     return RValue::getAggregate(Local);
1120 
1121   unsigned Alignment = getContext().getDeclAlign(Param).getQuantity();
1122   return RValue::get(EmitLoadOfScalar(Local, false, Alignment, ArgType));
1123 }
1124 
1125 RValue CodeGenFunction::EmitCallArg(const Expr *E, QualType ArgType) {
1126   if (ArgType->isReferenceType())
1127     return EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
1128 
1129   return EmitAnyExprToTemp(E);
1130 }
1131 
1132 /// Emits a call or invoke instruction to the given function, depending
1133 /// on the current state of the EH stack.
1134 llvm::CallSite
1135 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
1136                                   llvm::Value * const *ArgBegin,
1137                                   llvm::Value * const *ArgEnd,
1138                                   const llvm::Twine &Name) {
1139   llvm::BasicBlock *InvokeDest = getInvokeDest();
1140   if (!InvokeDest)
1141     return Builder.CreateCall(Callee, ArgBegin, ArgEnd, Name);
1142 
1143   llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
1144   llvm::InvokeInst *Invoke = Builder.CreateInvoke(Callee, ContBB, InvokeDest,
1145                                                   ArgBegin, ArgEnd, Name);
1146   EmitBlock(ContBB);
1147   return Invoke;
1148 }
1149 
1150 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
1151                                  llvm::Value *Callee,
1152                                  ReturnValueSlot ReturnValue,
1153                                  const CallArgList &CallArgs,
1154                                  const Decl *TargetDecl,
1155                                  llvm::Instruction **callOrInvoke) {
1156   // FIXME: We no longer need the types from CallArgs; lift up and simplify.
1157   llvm::SmallVector<llvm::Value*, 16> Args;
1158 
1159   // Handle struct-return functions by passing a pointer to the
1160   // location that we would like to return into.
1161   QualType RetTy = CallInfo.getReturnType();
1162   const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
1163 
1164 
1165   // If the call returns a temporary with struct return, create a temporary
1166   // alloca to hold the result, unless one is given to us.
1167   if (CGM.ReturnTypeUsesSRet(CallInfo)) {
1168     llvm::Value *Value = ReturnValue.getValue();
1169     if (!Value)
1170       Value = CreateMemTemp(RetTy);
1171     Args.push_back(Value);
1172   }
1173 
1174   assert(CallInfo.arg_size() == CallArgs.size() &&
1175          "Mismatch between function signature & arguments.");
1176   CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
1177   for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
1178        I != E; ++I, ++info_it) {
1179     const ABIArgInfo &ArgInfo = info_it->info;
1180     RValue RV = I->first;
1181 
1182     unsigned Alignment =
1183       getContext().getTypeAlignInChars(I->second).getQuantity();
1184     switch (ArgInfo.getKind()) {
1185     case ABIArgInfo::Indirect: {
1186       if (RV.isScalar() || RV.isComplex()) {
1187         // Make a temporary alloca to pass the argument.
1188         Args.push_back(CreateMemTemp(I->second));
1189         if (RV.isScalar())
1190           EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false,
1191                             Alignment, I->second);
1192         else
1193           StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
1194       } else {
1195         Args.push_back(RV.getAggregateAddr());
1196       }
1197       break;
1198     }
1199 
1200     case ABIArgInfo::Ignore:
1201       break;
1202 
1203     case ABIArgInfo::Extend:
1204     case ABIArgInfo::Direct: {
1205       if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
1206           ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
1207           ArgInfo.getDirectOffset() == 0) {
1208         if (RV.isScalar())
1209           Args.push_back(RV.getScalarVal());
1210         else
1211           Args.push_back(Builder.CreateLoad(RV.getAggregateAddr()));
1212         break;
1213       }
1214 
1215       // FIXME: Avoid the conversion through memory if possible.
1216       llvm::Value *SrcPtr;
1217       if (RV.isScalar()) {
1218         SrcPtr = CreateMemTemp(I->second, "coerce");
1219         EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, Alignment,
1220                           I->second);
1221       } else if (RV.isComplex()) {
1222         SrcPtr = CreateMemTemp(I->second, "coerce");
1223         StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
1224       } else
1225         SrcPtr = RV.getAggregateAddr();
1226 
1227       // If the value is offset in memory, apply the offset now.
1228       if (unsigned Offs = ArgInfo.getDirectOffset()) {
1229         SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy());
1230         SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs);
1231         SrcPtr = Builder.CreateBitCast(SrcPtr,
1232                        llvm::PointerType::getUnqual(ArgInfo.getCoerceToType()));
1233 
1234       }
1235 
1236       // If the coerce-to type is a first class aggregate, we flatten it and
1237       // pass the elements. Either way is semantically identical, but fast-isel
1238       // and the optimizer generally likes scalar values better than FCAs.
1239       if (const llvm::StructType *STy =
1240             dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) {
1241         SrcPtr = Builder.CreateBitCast(SrcPtr,
1242                                        llvm::PointerType::getUnqual(STy));
1243         for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1244           llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i);
1245           llvm::LoadInst *LI = Builder.CreateLoad(EltPtr);
1246           // We don't know what we're loading from.
1247           LI->setAlignment(1);
1248           Args.push_back(LI);
1249         }
1250       } else {
1251         // In the simple case, just pass the coerced loaded value.
1252         Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
1253                                          *this));
1254       }
1255 
1256       break;
1257     }
1258 
1259     case ABIArgInfo::Expand:
1260       ExpandTypeToArgs(I->second, RV, Args);
1261       break;
1262     }
1263   }
1264 
1265   // If the callee is a bitcast of a function to a varargs pointer to function
1266   // type, check to see if we can remove the bitcast.  This handles some cases
1267   // with unprototyped functions.
1268   if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
1269     if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
1270       const llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
1271       const llvm::FunctionType *CurFT =
1272         cast<llvm::FunctionType>(CurPT->getElementType());
1273       const llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
1274 
1275       if (CE->getOpcode() == llvm::Instruction::BitCast &&
1276           ActualFT->getReturnType() == CurFT->getReturnType() &&
1277           ActualFT->getNumParams() == CurFT->getNumParams() &&
1278           ActualFT->getNumParams() == Args.size()) {
1279         bool ArgsMatch = true;
1280         for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
1281           if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
1282             ArgsMatch = false;
1283             break;
1284           }
1285 
1286         // Strip the cast if we can get away with it.  This is a nice cleanup,
1287         // but also allows us to inline the function at -O0 if it is marked
1288         // always_inline.
1289         if (ArgsMatch)
1290           Callee = CalleeF;
1291       }
1292     }
1293 
1294 
1295   unsigned CallingConv;
1296   CodeGen::AttributeListType AttributeList;
1297   CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv);
1298   llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(),
1299                                                    AttributeList.end());
1300 
1301   llvm::BasicBlock *InvokeDest = 0;
1302   if (!(Attrs.getFnAttributes() & llvm::Attribute::NoUnwind))
1303     InvokeDest = getInvokeDest();
1304 
1305   llvm::CallSite CS;
1306   if (!InvokeDest) {
1307     CS = Builder.CreateCall(Callee, Args.data(), Args.data()+Args.size());
1308   } else {
1309     llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
1310     CS = Builder.CreateInvoke(Callee, Cont, InvokeDest,
1311                               Args.data(), Args.data()+Args.size());
1312     EmitBlock(Cont);
1313   }
1314   if (callOrInvoke)
1315     *callOrInvoke = CS.getInstruction();
1316 
1317   CS.setAttributes(Attrs);
1318   CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
1319 
1320   // If the call doesn't return, finish the basic block and clear the
1321   // insertion point; this allows the rest of IRgen to discard
1322   // unreachable code.
1323   if (CS.doesNotReturn()) {
1324     Builder.CreateUnreachable();
1325     Builder.ClearInsertionPoint();
1326 
1327     // FIXME: For now, emit a dummy basic block because expr emitters in
1328     // generally are not ready to handle emitting expressions at unreachable
1329     // points.
1330     EnsureInsertPoint();
1331 
1332     // Return a reasonable RValue.
1333     return GetUndefRValue(RetTy);
1334   }
1335 
1336   llvm::Instruction *CI = CS.getInstruction();
1337   if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
1338     CI->setName("call");
1339 
1340   switch (RetAI.getKind()) {
1341   case ABIArgInfo::Indirect: {
1342     unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
1343     if (RetTy->isAnyComplexType())
1344       return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
1345     if (CodeGenFunction::hasAggregateLLVMType(RetTy))
1346       return RValue::getAggregate(Args[0]);
1347     return RValue::get(EmitLoadOfScalar(Args[0], false, Alignment, RetTy));
1348   }
1349 
1350   case ABIArgInfo::Ignore:
1351     // If we are ignoring an argument that had a result, make sure to
1352     // construct the appropriate return value for our caller.
1353     return GetUndefRValue(RetTy);
1354 
1355   case ABIArgInfo::Extend:
1356   case ABIArgInfo::Direct: {
1357     if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
1358         RetAI.getDirectOffset() == 0) {
1359       if (RetTy->isAnyComplexType()) {
1360         llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
1361         llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
1362         return RValue::getComplex(std::make_pair(Real, Imag));
1363       }
1364       if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1365         llvm::Value *DestPtr = ReturnValue.getValue();
1366         bool DestIsVolatile = ReturnValue.isVolatile();
1367 
1368         if (!DestPtr) {
1369           DestPtr = CreateMemTemp(RetTy, "agg.tmp");
1370           DestIsVolatile = false;
1371         }
1372         Builder.CreateStore(CI, DestPtr, DestIsVolatile);
1373         return RValue::getAggregate(DestPtr);
1374       }
1375       return RValue::get(CI);
1376     }
1377 
1378     llvm::Value *DestPtr = ReturnValue.getValue();
1379     bool DestIsVolatile = ReturnValue.isVolatile();
1380 
1381     if (!DestPtr) {
1382       DestPtr = CreateMemTemp(RetTy, "coerce");
1383       DestIsVolatile = false;
1384     }
1385 
1386     // If the value is offset in memory, apply the offset now.
1387     llvm::Value *StorePtr = DestPtr;
1388     if (unsigned Offs = RetAI.getDirectOffset()) {
1389       StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy());
1390       StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs);
1391       StorePtr = Builder.CreateBitCast(StorePtr,
1392                          llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
1393     }
1394     CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
1395 
1396     unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
1397     if (RetTy->isAnyComplexType())
1398       return RValue::getComplex(LoadComplexFromAddr(DestPtr, false));
1399     if (CodeGenFunction::hasAggregateLLVMType(RetTy))
1400       return RValue::getAggregate(DestPtr);
1401     return RValue::get(EmitLoadOfScalar(DestPtr, false, Alignment, RetTy));
1402   }
1403 
1404   case ABIArgInfo::Expand:
1405     assert(0 && "Invalid ABI kind for return argument");
1406   }
1407 
1408   assert(0 && "Unhandled ABIArgInfo::Kind");
1409   return RValue::get(0);
1410 }
1411 
1412 /* VarArg handling */
1413 
1414 llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
1415   return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
1416 }
1417