1 //===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "CGCall.h"
16 #include "ABIInfo.h"
17 #include "CodeGenFunction.h"
18 #include "CodeGenModule.h"
19 #include "clang/Basic/TargetInfo.h"
20 #include "clang/AST/Decl.h"
21 #include "clang/AST/DeclCXX.h"
22 #include "clang/AST/DeclObjC.h"
23 #include "clang/Frontend/CodeGenOptions.h"
24 #include "llvm/Attributes.h"
25 #include "llvm/Support/CallSite.h"
26 #include "llvm/Target/TargetData.h"
27 using namespace clang;
28 using namespace CodeGen;
29 
30 /***/
31 
32 static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
33   switch (CC) {
34   default: return llvm::CallingConv::C;
35   case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
36   case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
37   case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
38   }
39 }
40 
41 /// Derives the 'this' type for codegen purposes, i.e. ignoring method
42 /// qualification.
43 /// FIXME: address space qualification?
44 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
45   QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
46   return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
47 }
48 
49 /// Returns the canonical formal type of the given C++ method.
50 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
51   return MD->getType()->getCanonicalTypeUnqualified()
52            .getAs<FunctionProtoType>();
53 }
54 
55 /// Returns the "extra-canonicalized" return type, which discards
56 /// qualifiers on the return type.  Codegen doesn't care about them,
57 /// and it makes ABI code a little easier to be able to assume that
58 /// all parameter and return types are top-level unqualified.
59 static CanQualType GetReturnType(QualType RetTy) {
60   return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
61 }
62 
63 const CGFunctionInfo &
64 CodeGenTypes::getFunctionInfo(CanQual<FunctionNoProtoType> FTNP,
65                               bool IsRecursive) {
66   return getFunctionInfo(FTNP->getResultType().getUnqualifiedType(),
67                          llvm::SmallVector<CanQualType, 16>(),
68                          FTNP->getExtInfo(), IsRecursive);
69 }
70 
71 /// \param Args - contains any initial parameters besides those
72 ///   in the formal type
73 static const CGFunctionInfo &getFunctionInfo(CodeGenTypes &CGT,
74                                   llvm::SmallVectorImpl<CanQualType> &ArgTys,
75                                              CanQual<FunctionProtoType> FTP,
76                                              bool IsRecursive = false) {
77   // FIXME: Kill copy.
78   for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
79     ArgTys.push_back(FTP->getArgType(i));
80   CanQualType ResTy = FTP->getResultType().getUnqualifiedType();
81   return CGT.getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo(), IsRecursive);
82 }
83 
84 const CGFunctionInfo &
85 CodeGenTypes::getFunctionInfo(CanQual<FunctionProtoType> FTP,
86                               bool IsRecursive) {
87   llvm::SmallVector<CanQualType, 16> ArgTys;
88   return ::getFunctionInfo(*this, ArgTys, FTP, IsRecursive);
89 }
90 
91 static CallingConv getCallingConventionForDecl(const Decl *D) {
92   // Set the appropriate calling convention for the Function.
93   if (D->hasAttr<StdCallAttr>())
94     return CC_X86StdCall;
95 
96   if (D->hasAttr<FastCallAttr>())
97     return CC_X86FastCall;
98 
99   if (D->hasAttr<ThisCallAttr>())
100     return CC_X86ThisCall;
101 
102   return CC_C;
103 }
104 
105 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXRecordDecl *RD,
106                                                  const FunctionProtoType *FTP) {
107   llvm::SmallVector<CanQualType, 16> ArgTys;
108 
109   // Add the 'this' pointer.
110   ArgTys.push_back(GetThisType(Context, RD));
111 
112   return ::getFunctionInfo(*this, ArgTys,
113               FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
114 }
115 
116 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) {
117   llvm::SmallVector<CanQualType, 16> ArgTys;
118 
119   // Add the 'this' pointer unless this is a static method.
120   if (MD->isInstance())
121     ArgTys.push_back(GetThisType(Context, MD->getParent()));
122 
123   return ::getFunctionInfo(*this, ArgTys, GetFormalType(MD));
124 }
125 
126 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXConstructorDecl *D,
127                                                     CXXCtorType Type) {
128   llvm::SmallVector<CanQualType, 16> ArgTys;
129 
130   // Add the 'this' pointer.
131   ArgTys.push_back(GetThisType(Context, D->getParent()));
132 
133   // Check if we need to add a VTT parameter (which has type void **).
134   if (Type == Ctor_Base && D->getParent()->getNumVBases() != 0)
135     ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy));
136 
137   return ::getFunctionInfo(*this, ArgTys, GetFormalType(D));
138 }
139 
140 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXDestructorDecl *D,
141                                                     CXXDtorType Type) {
142   llvm::SmallVector<CanQualType, 16> ArgTys;
143 
144   // Add the 'this' pointer.
145   ArgTys.push_back(GetThisType(Context, D->getParent()));
146 
147   // Check if we need to add a VTT parameter (which has type void **).
148   if (Type == Dtor_Base && D->getParent()->getNumVBases() != 0)
149     ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy));
150 
151   return ::getFunctionInfo(*this, ArgTys, GetFormalType(D));
152 }
153 
154 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) {
155   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
156     if (MD->isInstance())
157       return getFunctionInfo(MD);
158 
159   CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
160   assert(isa<FunctionType>(FTy));
161   if (isa<FunctionNoProtoType>(FTy))
162     return getFunctionInfo(FTy.getAs<FunctionNoProtoType>());
163   assert(isa<FunctionProtoType>(FTy));
164   return getFunctionInfo(FTy.getAs<FunctionProtoType>());
165 }
166 
167 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) {
168   llvm::SmallVector<CanQualType, 16> ArgTys;
169   ArgTys.push_back(Context.getCanonicalParamType(MD->getSelfDecl()->getType()));
170   ArgTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
171   // FIXME: Kill copy?
172   for (ObjCMethodDecl::param_iterator i = MD->param_begin(),
173          e = MD->param_end(); i != e; ++i) {
174     ArgTys.push_back(Context.getCanonicalParamType((*i)->getType()));
175   }
176   return getFunctionInfo(GetReturnType(MD->getResultType()),
177                          ArgTys,
178                          FunctionType::ExtInfo(
179                              /*NoReturn*/ false,
180                              /*RegParm*/ 0,
181                              getCallingConventionForDecl(MD)));
182 }
183 
184 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(GlobalDecl GD) {
185   // FIXME: Do we need to handle ObjCMethodDecl?
186   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
187 
188   if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
189     return getFunctionInfo(CD, GD.getCtorType());
190 
191   if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
192     return getFunctionInfo(DD, GD.getDtorType());
193 
194   return getFunctionInfo(FD);
195 }
196 
197 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
198                                                     const CallArgList &Args,
199                                             const FunctionType::ExtInfo &Info) {
200   // FIXME: Kill copy.
201   llvm::SmallVector<CanQualType, 16> ArgTys;
202   for (CallArgList::const_iterator i = Args.begin(), e = Args.end();
203        i != e; ++i)
204     ArgTys.push_back(Context.getCanonicalParamType(i->second));
205   return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info);
206 }
207 
208 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
209                                                     const FunctionArgList &Args,
210                                             const FunctionType::ExtInfo &Info) {
211   // FIXME: Kill copy.
212   llvm::SmallVector<CanQualType, 16> ArgTys;
213   for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
214        i != e; ++i)
215     ArgTys.push_back(Context.getCanonicalParamType(i->second));
216   return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info);
217 }
218 
219 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
220                            const llvm::SmallVectorImpl<CanQualType> &ArgTys,
221                                             const FunctionType::ExtInfo &Info,
222                                                     bool IsRecursive) {
223 #ifndef NDEBUG
224   for (llvm::SmallVectorImpl<CanQualType>::const_iterator
225          I = ArgTys.begin(), E = ArgTys.end(); I != E; ++I)
226     assert(I->isCanonicalAsParam());
227 #endif
228 
229   unsigned CC = ClangCallConvToLLVMCallConv(Info.getCC());
230 
231   // Lookup or create unique function info.
232   llvm::FoldingSetNodeID ID;
233   CGFunctionInfo::Profile(ID, Info, ResTy,
234                           ArgTys.begin(), ArgTys.end());
235 
236   void *InsertPos = 0;
237   CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos);
238   if (FI)
239     return *FI;
240 
241   // Construct the function info.
242   FI = new CGFunctionInfo(CC, Info.getNoReturn(), Info.getRegParm(), ResTy,
243                           ArgTys.data(), ArgTys.size());
244   FunctionInfos.InsertNode(FI, InsertPos);
245 
246   // ABI lowering wants to know what our preferred type for the argument is in
247   // various situations, pass it in.
248   llvm::SmallVector<const llvm::Type *, 8> PreferredArgTypes;
249   for (llvm::SmallVectorImpl<CanQualType>::const_iterator
250        I = ArgTys.begin(), E = ArgTys.end(); I != E; ++I) {
251     // If this is being called from the guts of the ConvertType loop, make sure
252     // to call ConvertTypeRecursive so we don't get into issues with cyclic
253     // pointer type structures.
254     const llvm::Type *ArgType;
255     if (IsRecursive)
256       ArgType = ConvertTypeRecursive(*I);
257     else
258       ArgType = ConvertType(*I);
259     PreferredArgTypes.push_back(ArgType);
260   }
261 
262   // Compute ABI information.
263   getABIInfo().computeInfo(*FI, getContext(), TheModule.getContext(),
264                            PreferredArgTypes.data(), PreferredArgTypes.size());
265 
266   return *FI;
267 }
268 
269 CGFunctionInfo::CGFunctionInfo(unsigned _CallingConvention,
270                                bool _NoReturn, unsigned _RegParm,
271                                CanQualType ResTy,
272                                const CanQualType *ArgTys,
273                                unsigned NumArgTys)
274   : CallingConvention(_CallingConvention),
275     EffectiveCallingConvention(_CallingConvention),
276     NoReturn(_NoReturn), RegParm(_RegParm)
277 {
278   NumArgs = NumArgTys;
279 
280   // FIXME: Coallocate with the CGFunctionInfo object.
281   Args = new ArgInfo[1 + NumArgTys];
282   Args[0].type = ResTy;
283   for (unsigned i = 0; i != NumArgTys; ++i)
284     Args[1 + i].type = ArgTys[i];
285 }
286 
287 /***/
288 
289 void CodeGenTypes::GetExpandedTypes(QualType Ty,
290                                     std::vector<const llvm::Type*> &ArgTys,
291                                     bool IsRecursive) {
292   const RecordType *RT = Ty->getAsStructureType();
293   assert(RT && "Can only expand structure types.");
294   const RecordDecl *RD = RT->getDecl();
295   assert(!RD->hasFlexibleArrayMember() &&
296          "Cannot expand structure with flexible array.");
297 
298   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
299          i != e; ++i) {
300     const FieldDecl *FD = *i;
301     assert(!FD->isBitField() &&
302            "Cannot expand structure with bit-field members.");
303 
304     QualType FT = FD->getType();
305     if (CodeGenFunction::hasAggregateLLVMType(FT)) {
306       GetExpandedTypes(FT, ArgTys, IsRecursive);
307     } else {
308       ArgTys.push_back(ConvertType(FT, IsRecursive));
309     }
310   }
311 }
312 
313 llvm::Function::arg_iterator
314 CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
315                                     llvm::Function::arg_iterator AI) {
316   const RecordType *RT = Ty->getAsStructureType();
317   assert(RT && "Can only expand structure types.");
318 
319   RecordDecl *RD = RT->getDecl();
320   assert(LV.isSimple() &&
321          "Unexpected non-simple lvalue during struct expansion.");
322   llvm::Value *Addr = LV.getAddress();
323   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
324          i != e; ++i) {
325     FieldDecl *FD = *i;
326     QualType FT = FD->getType();
327 
328     // FIXME: What are the right qualifiers here?
329     LValue LV = EmitLValueForField(Addr, FD, 0);
330     if (CodeGenFunction::hasAggregateLLVMType(FT)) {
331       AI = ExpandTypeFromArgs(FT, LV, AI);
332     } else {
333       EmitStoreThroughLValue(RValue::get(AI), LV, FT);
334       ++AI;
335     }
336   }
337 
338   return AI;
339 }
340 
341 void
342 CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
343                                   llvm::SmallVector<llvm::Value*, 16> &Args) {
344   const RecordType *RT = Ty->getAsStructureType();
345   assert(RT && "Can only expand structure types.");
346 
347   RecordDecl *RD = RT->getDecl();
348   assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
349   llvm::Value *Addr = RV.getAggregateAddr();
350   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
351          i != e; ++i) {
352     FieldDecl *FD = *i;
353     QualType FT = FD->getType();
354 
355     // FIXME: What are the right qualifiers here?
356     LValue LV = EmitLValueForField(Addr, FD, 0);
357     if (CodeGenFunction::hasAggregateLLVMType(FT)) {
358       ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args);
359     } else {
360       RValue RV = EmitLoadOfLValue(LV, FT);
361       assert(RV.isScalar() &&
362              "Unexpected non-scalar rvalue during struct expansion.");
363       Args.push_back(RV.getScalarVal());
364     }
365   }
366 }
367 
368 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
369 /// accessing some number of bytes out of it, try to gep into the struct to get
370 /// at its inner goodness.  Dive as deep as possible without entering an element
371 /// with an in-memory size smaller than DstSize.
372 static llvm::Value *
373 EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
374                                    const llvm::StructType *SrcSTy,
375                                    uint64_t DstSize, CodeGenFunction &CGF) {
376   // We can't dive into a zero-element struct.
377   if (SrcSTy->getNumElements() == 0) return SrcPtr;
378 
379   const llvm::Type *FirstElt = SrcSTy->getElementType(0);
380 
381   // If the first elt is at least as large as what we're looking for, or if the
382   // first element is the same size as the whole struct, we can enter it.
383   uint64_t FirstEltSize =
384     CGF.CGM.getTargetData().getTypeAllocSize(FirstElt);
385   if (FirstEltSize < DstSize &&
386       FirstEltSize < CGF.CGM.getTargetData().getTypeAllocSize(SrcSTy))
387     return SrcPtr;
388 
389   // GEP into the first element.
390   SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive");
391 
392   // If the first element is a struct, recurse.
393   const llvm::Type *SrcTy =
394     cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
395   if (const llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
396     return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
397 
398   return SrcPtr;
399 }
400 
401 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
402 /// are either integers or pointers.  This does a truncation of the value if it
403 /// is too large or a zero extension if it is too small.
404 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
405                                              const llvm::Type *Ty,
406                                              CodeGenFunction &CGF) {
407   if (Val->getType() == Ty)
408     return Val;
409 
410   if (isa<llvm::PointerType>(Val->getType())) {
411     // If this is Pointer->Pointer avoid conversion to and from int.
412     if (isa<llvm::PointerType>(Ty))
413       return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
414 
415     // Convert the pointer to an integer so we can play with its width.
416     Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
417   }
418 
419   const llvm::Type *DestIntTy = Ty;
420   if (isa<llvm::PointerType>(DestIntTy))
421     DestIntTy = CGF.IntPtrTy;
422 
423   if (Val->getType() != DestIntTy)
424     Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
425 
426   if (isa<llvm::PointerType>(Ty))
427     Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
428   return Val;
429 }
430 
431 
432 
433 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
434 /// a pointer to an object of type \arg Ty.
435 ///
436 /// This safely handles the case when the src type is smaller than the
437 /// destination type; in this situation the values of bits which not
438 /// present in the src are undefined.
439 static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
440                                       const llvm::Type *Ty,
441                                       CodeGenFunction &CGF) {
442   const llvm::Type *SrcTy =
443     cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
444 
445   // If SrcTy and Ty are the same, just do a load.
446   if (SrcTy == Ty)
447     return CGF.Builder.CreateLoad(SrcPtr);
448 
449   uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty);
450 
451   if (const llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
452     SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
453     SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
454   }
455 
456   uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
457 
458   // If the source and destination are integer or pointer types, just do an
459   // extension or truncation to the desired type.
460   if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
461       (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
462     llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr);
463     return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
464   }
465 
466   // If load is legal, just bitcast the src pointer.
467   if (SrcSize >= DstSize) {
468     // Generally SrcSize is never greater than DstSize, since this means we are
469     // losing bits. However, this can happen in cases where the structure has
470     // additional padding, for example due to a user specified alignment.
471     //
472     // FIXME: Assert that we aren't truncating non-padding bits when have access
473     // to that information.
474     llvm::Value *Casted =
475       CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
476     llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
477     // FIXME: Use better alignment / avoid requiring aligned load.
478     Load->setAlignment(1);
479     return Load;
480   }
481 
482   // Otherwise do coercion through memory. This is stupid, but
483   // simple.
484   llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
485   llvm::Value *Casted =
486     CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
487   llvm::StoreInst *Store =
488     CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
489   // FIXME: Use better alignment / avoid requiring aligned store.
490   Store->setAlignment(1);
491   return CGF.Builder.CreateLoad(Tmp);
492 }
493 
494 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
495 /// where the source and destination may have different types.
496 ///
497 /// This safely handles the case when the src type is larger than the
498 /// destination type; the upper bits of the src will be lost.
499 static void CreateCoercedStore(llvm::Value *Src,
500                                llvm::Value *DstPtr,
501                                bool DstIsVolatile,
502                                CodeGenFunction &CGF) {
503   const llvm::Type *SrcTy = Src->getType();
504   const llvm::Type *DstTy =
505     cast<llvm::PointerType>(DstPtr->getType())->getElementType();
506   if (SrcTy == DstTy) {
507     CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
508     return;
509   }
510 
511   uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
512 
513   if (const llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
514     DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF);
515     DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType();
516   }
517 
518   // If the source and destination are integer or pointer types, just do an
519   // extension or truncation to the desired type.
520   if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
521       (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
522     Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
523     CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
524     return;
525   }
526 
527   uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy);
528 
529   // If store is legal, just bitcast the src pointer.
530   if (SrcSize <= DstSize) {
531     llvm::Value *Casted =
532       CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
533     // FIXME: Use better alignment / avoid requiring aligned store.
534     CGF.Builder.CreateStore(Src, Casted, DstIsVolatile)->setAlignment(1);
535   } else {
536     // Otherwise do coercion through memory. This is stupid, but
537     // simple.
538 
539     // Generally SrcSize is never greater than DstSize, since this means we are
540     // losing bits. However, this can happen in cases where the structure has
541     // additional padding, for example due to a user specified alignment.
542     //
543     // FIXME: Assert that we aren't truncating non-padding bits when have access
544     // to that information.
545     llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
546     CGF.Builder.CreateStore(Src, Tmp);
547     llvm::Value *Casted =
548       CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy));
549     llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
550     // FIXME: Use better alignment / avoid requiring aligned load.
551     Load->setAlignment(1);
552     CGF.Builder.CreateStore(Load, DstPtr, DstIsVolatile);
553   }
554 }
555 
556 /***/
557 
558 bool CodeGenModule::ReturnTypeUsesSret(const CGFunctionInfo &FI) {
559   return FI.getReturnInfo().isIndirect();
560 }
561 
562 const llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
563   const CGFunctionInfo &FI = getFunctionInfo(GD);
564 
565   // For definition purposes, don't consider a K&R function variadic.
566   bool Variadic = false;
567   if (const FunctionProtoType *FPT =
568         cast<FunctionDecl>(GD.getDecl())->getType()->getAs<FunctionProtoType>())
569     Variadic = FPT->isVariadic();
570 
571   return GetFunctionType(FI, Variadic, false);
572 }
573 
574 const llvm::FunctionType *
575 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic,
576                               bool IsRecursive) {
577   std::vector<const llvm::Type*> ArgTys;
578 
579   const llvm::Type *ResultType = 0;
580 
581   QualType RetTy = FI.getReturnType();
582   const ABIArgInfo &RetAI = FI.getReturnInfo();
583   switch (RetAI.getKind()) {
584   case ABIArgInfo::Expand:
585     assert(0 && "Invalid ABI kind for return argument");
586 
587   case ABIArgInfo::Extend:
588   case ABIArgInfo::Direct:
589     ResultType = ConvertType(RetTy, IsRecursive);
590     break;
591 
592   case ABIArgInfo::Indirect: {
593     assert(!RetAI.getIndirectAlign() && "Align unused on indirect return.");
594     ResultType = llvm::Type::getVoidTy(getLLVMContext());
595     const llvm::Type *STy = ConvertType(RetTy, IsRecursive);
596     ArgTys.push_back(llvm::PointerType::get(STy, RetTy.getAddressSpace()));
597     break;
598   }
599 
600   case ABIArgInfo::Ignore:
601     ResultType = llvm::Type::getVoidTy(getLLVMContext());
602     break;
603 
604   case ABIArgInfo::Coerce:
605     ResultType = RetAI.getCoerceToType();
606     break;
607   }
608 
609   for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
610          ie = FI.arg_end(); it != ie; ++it) {
611     const ABIArgInfo &AI = it->info;
612 
613     switch (AI.getKind()) {
614     case ABIArgInfo::Ignore:
615       break;
616 
617     case ABIArgInfo::Coerce: {
618       // If the coerce-to type is a first class aggregate, flatten it.  Either
619       // way is semantically identical, but fast-isel and the optimizer
620       // generally likes scalar values better than FCAs.
621       const llvm::Type *ArgTy = AI.getCoerceToType();
622       if (const llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgTy)) {
623         for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
624           ArgTys.push_back(STy->getElementType(i));
625       } else {
626         ArgTys.push_back(ArgTy);
627       }
628       break;
629     }
630 
631     case ABIArgInfo::Indirect: {
632       // indirect arguments are always on the stack, which is addr space #0.
633       const llvm::Type *LTy = ConvertTypeForMem(it->type, IsRecursive);
634       ArgTys.push_back(llvm::PointerType::getUnqual(LTy));
635       break;
636     }
637 
638     case ABIArgInfo::Extend:
639     case ABIArgInfo::Direct:
640       ArgTys.push_back(ConvertType(it->type, IsRecursive));
641       break;
642 
643     case ABIArgInfo::Expand:
644       GetExpandedTypes(it->type, ArgTys, IsRecursive);
645       break;
646     }
647   }
648 
649   return llvm::FunctionType::get(ResultType, ArgTys, IsVariadic);
650 }
651 
652 const llvm::Type *
653 CodeGenTypes::GetFunctionTypeForVTable(const CXXMethodDecl *MD) {
654   const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
655 
656   if (!VerifyFuncTypeComplete(FPT))
657     return GetFunctionType(getFunctionInfo(MD), FPT->isVariadic(), false);
658 
659   return llvm::OpaqueType::get(getLLVMContext());
660 }
661 
662 void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
663                                            const Decl *TargetDecl,
664                                            AttributeListType &PAL,
665                                            unsigned &CallingConv) {
666   unsigned FuncAttrs = 0;
667   unsigned RetAttrs = 0;
668 
669   CallingConv = FI.getEffectiveCallingConvention();
670 
671   if (FI.isNoReturn())
672     FuncAttrs |= llvm::Attribute::NoReturn;
673 
674   // FIXME: handle sseregparm someday...
675   if (TargetDecl) {
676     if (TargetDecl->hasAttr<NoThrowAttr>())
677       FuncAttrs |= llvm::Attribute::NoUnwind;
678     if (TargetDecl->hasAttr<NoReturnAttr>())
679       FuncAttrs |= llvm::Attribute::NoReturn;
680     if (TargetDecl->hasAttr<ConstAttr>())
681       FuncAttrs |= llvm::Attribute::ReadNone;
682     else if (TargetDecl->hasAttr<PureAttr>())
683       FuncAttrs |= llvm::Attribute::ReadOnly;
684     if (TargetDecl->hasAttr<MallocAttr>())
685       RetAttrs |= llvm::Attribute::NoAlias;
686   }
687 
688   if (CodeGenOpts.OptimizeSize)
689     FuncAttrs |= llvm::Attribute::OptimizeForSize;
690   if (CodeGenOpts.DisableRedZone)
691     FuncAttrs |= llvm::Attribute::NoRedZone;
692   if (CodeGenOpts.NoImplicitFloat)
693     FuncAttrs |= llvm::Attribute::NoImplicitFloat;
694 
695   QualType RetTy = FI.getReturnType();
696   unsigned Index = 1;
697   const ABIArgInfo &RetAI = FI.getReturnInfo();
698   switch (RetAI.getKind()) {
699   case ABIArgInfo::Extend:
700    if (RetTy->isSignedIntegerType()) {
701      RetAttrs |= llvm::Attribute::SExt;
702    } else if (RetTy->isUnsignedIntegerType()) {
703      RetAttrs |= llvm::Attribute::ZExt;
704    }
705    // FALLTHROUGH
706   case ABIArgInfo::Direct:
707     break;
708 
709   case ABIArgInfo::Indirect:
710     PAL.push_back(llvm::AttributeWithIndex::get(Index,
711                                                 llvm::Attribute::StructRet));
712     ++Index;
713     // sret disables readnone and readonly
714     FuncAttrs &= ~(llvm::Attribute::ReadOnly |
715                    llvm::Attribute::ReadNone);
716     break;
717 
718   case ABIArgInfo::Ignore:
719   case ABIArgInfo::Coerce:
720     break;
721 
722   case ABIArgInfo::Expand:
723     assert(0 && "Invalid ABI kind for return argument");
724   }
725 
726   if (RetAttrs)
727     PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
728 
729   // FIXME: we need to honour command line settings also...
730   // FIXME: RegParm should be reduced in case of nested functions and/or global
731   // register variable.
732   signed RegParm = FI.getRegParm();
733 
734   unsigned PointerWidth = getContext().Target.getPointerWidth(0);
735   for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
736          ie = FI.arg_end(); it != ie; ++it) {
737     QualType ParamType = it->type;
738     const ABIArgInfo &AI = it->info;
739     unsigned Attributes = 0;
740 
741     // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
742     // have the corresponding parameter variable.  It doesn't make
743     // sense to do it here because parameters are so fucked up.
744 
745     switch (AI.getKind()) {
746     case ABIArgInfo::Coerce:
747       if (const llvm::StructType *STy =
748           dyn_cast<llvm::StructType>(AI.getCoerceToType()))
749         Index += STy->getNumElements();
750       else
751         ++Index;
752       continue;  // Skip index increment.
753 
754     case ABIArgInfo::Indirect:
755       if (AI.getIndirectByVal())
756         Attributes |= llvm::Attribute::ByVal;
757 
758       Attributes |=
759         llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign());
760       // byval disables readnone and readonly.
761       FuncAttrs &= ~(llvm::Attribute::ReadOnly |
762                      llvm::Attribute::ReadNone);
763       break;
764 
765     case ABIArgInfo::Extend:
766      if (ParamType->isSignedIntegerType()) {
767        Attributes |= llvm::Attribute::SExt;
768      } else if (ParamType->isUnsignedIntegerType()) {
769        Attributes |= llvm::Attribute::ZExt;
770      }
771      // FALLS THROUGH
772     case ABIArgInfo::Direct:
773       if (RegParm > 0 &&
774           (ParamType->isIntegerType() || ParamType->isPointerType())) {
775         RegParm -=
776           (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth;
777         if (RegParm >= 0)
778           Attributes |= llvm::Attribute::InReg;
779       }
780       // FIXME: handle sseregparm someday...
781       break;
782 
783     case ABIArgInfo::Ignore:
784       // Skip increment, no matching LLVM parameter.
785       continue;
786 
787     case ABIArgInfo::Expand: {
788       std::vector<const llvm::Type*> Tys;
789       // FIXME: This is rather inefficient. Do we ever actually need to do
790       // anything here? The result should be just reconstructed on the other
791       // side, so extension should be a non-issue.
792       getTypes().GetExpandedTypes(ParamType, Tys, false);
793       Index += Tys.size();
794       continue;
795     }
796     }
797 
798     if (Attributes)
799       PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes));
800     ++Index;
801   }
802   if (FuncAttrs)
803     PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs));
804 }
805 
806 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
807                                          llvm::Function *Fn,
808                                          const FunctionArgList &Args) {
809   // If this is an implicit-return-zero function, go ahead and
810   // initialize the return value.  TODO: it might be nice to have
811   // a more general mechanism for this that didn't require synthesized
812   // return statements.
813   if (const FunctionDecl* FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
814     if (FD->hasImplicitReturnZero()) {
815       QualType RetTy = FD->getResultType().getUnqualifiedType();
816       const llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
817       llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
818       Builder.CreateStore(Zero, ReturnValue);
819     }
820   }
821 
822   // FIXME: We no longer need the types from FunctionArgList; lift up and
823   // simplify.
824 
825   // Emit allocs for param decls.  Give the LLVM Argument nodes names.
826   llvm::Function::arg_iterator AI = Fn->arg_begin();
827 
828   // Name the struct return argument.
829   if (CGM.ReturnTypeUsesSret(FI)) {
830     AI->setName("agg.result");
831     ++AI;
832   }
833 
834   assert(FI.arg_size() == Args.size() &&
835          "Mismatch between function signature & arguments.");
836   CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
837   for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
838        i != e; ++i, ++info_it) {
839     const VarDecl *Arg = i->first;
840     QualType Ty = info_it->type;
841     const ABIArgInfo &ArgI = info_it->info;
842 
843     switch (ArgI.getKind()) {
844     case ABIArgInfo::Indirect: {
845       llvm::Value *V = AI;
846       if (hasAggregateLLVMType(Ty)) {
847         // Do nothing, aggregates and complex variables are accessed by
848         // reference.
849       } else {
850         // Load scalar value from indirect argument.
851         V = EmitLoadOfScalar(V, false, Ty);
852         if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
853           // This must be a promotion, for something like
854           // "void a(x) short x; {..."
855           V = EmitScalarConversion(V, Ty, Arg->getType());
856         }
857       }
858       EmitParmDecl(*Arg, V);
859       break;
860     }
861 
862     case ABIArgInfo::Extend:
863     case ABIArgInfo::Direct: {
864       assert(AI != Fn->arg_end() && "Argument mismatch!");
865       llvm::Value *V = AI;
866       if (hasAggregateLLVMType(Ty)) {
867         // Create a temporary alloca to hold the argument; the rest of
868         // codegen expects to access aggregates & complex values by
869         // reference.
870         V = CreateMemTemp(Ty);
871         Builder.CreateStore(AI, V);
872       } else {
873         if (Arg->getType().isRestrictQualified())
874           AI->addAttr(llvm::Attribute::NoAlias);
875 
876         if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
877           // This must be a promotion, for something like
878           // "void a(x) short x; {..."
879           V = EmitScalarConversion(V, Ty, Arg->getType());
880         }
881       }
882       EmitParmDecl(*Arg, V);
883       break;
884     }
885 
886     case ABIArgInfo::Expand: {
887       // If this structure was expanded into multiple arguments then
888       // we need to create a temporary and reconstruct it from the
889       // arguments.
890       llvm::Value *Temp = CreateMemTemp(Ty, Arg->getName() + ".addr");
891       // FIXME: What are the right qualifiers here?
892       llvm::Function::arg_iterator End =
893         ExpandTypeFromArgs(Ty, LValue::MakeAddr(Temp, Qualifiers()), AI);
894       EmitParmDecl(*Arg, Temp);
895 
896       // Name the arguments used in expansion and increment AI.
897       unsigned Index = 0;
898       for (; AI != End; ++AI, ++Index)
899         AI->setName(Arg->getName() + "." + llvm::Twine(Index));
900       continue;
901     }
902 
903     case ABIArgInfo::Ignore:
904       // Initialize the local variable appropriately.
905       if (hasAggregateLLVMType(Ty)) {
906         EmitParmDecl(*Arg, CreateMemTemp(Ty));
907       } else {
908         EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())));
909       }
910 
911       // Skip increment, no matching LLVM parameter.
912       continue;
913 
914     case ABIArgInfo::Coerce: {
915       // FIXME: This is very wasteful; EmitParmDecl is just going to drop the
916       // result in a new alloca anyway, so we could just store into that
917       // directly if we broke the abstraction down more.
918       llvm::Value *V = CreateMemTemp(Ty, "coerce");
919 
920       // If the coerce-to type is a first class aggregate, we flatten it and
921       // pass the elements. Either way is semantically identical, but fast-isel
922       // and the optimizer generally likes scalar values better than FCAs.
923       if (const llvm::StructType *STy =
924             dyn_cast<llvm::StructType>(ArgI.getCoerceToType())) {
925         // If the argument and alloca types match up, we don't have to build the
926         // FCA at all, emit a series of GEPs and stores, which is better for
927         // fast isel.
928         if (STy == cast<llvm::PointerType>(V->getType())->getElementType()) {
929           for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
930             assert(AI != Fn->arg_end() && "Argument mismatch!");
931             AI->setName(Arg->getName() + ".coerce" + llvm::Twine(i));
932             llvm::Value *EltPtr = Builder.CreateConstGEP2_32(V, 0, i);
933             Builder.CreateStore(AI++, EltPtr);
934           }
935         } else {
936           // Reconstruct the FCA here so we can do a coerced store.
937           llvm::Value *FormalArg = llvm::UndefValue::get(STy);
938           for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
939             assert(AI != Fn->arg_end() && "Argument mismatch!");
940             AI->setName(Arg->getName() + ".coerce" + llvm::Twine(i));
941             FormalArg = Builder.CreateInsertValue(FormalArg, AI++, i);
942           }
943           CreateCoercedStore(FormalArg, V, /*DestIsVolatile=*/false, *this);
944         }
945       } else {
946         // Simple case, just do a coerced store of the argument into the alloca.
947         assert(AI != Fn->arg_end() && "Argument mismatch!");
948         AI->setName(Arg->getName() + ".coerce");
949         CreateCoercedStore(AI++, V, /*DestIsVolatile=*/false, *this);
950       }
951 
952 
953       // Match to what EmitParmDecl is expecting for this type.
954       if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
955         V = EmitLoadOfScalar(V, false, Ty);
956         if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
957           // This must be a promotion, for something like
958           // "void a(x) short x; {..."
959           V = EmitScalarConversion(V, Ty, Arg->getType());
960         }
961       }
962       EmitParmDecl(*Arg, V);
963       continue;  // Skip ++AI increment, already done.
964     }
965     }
966 
967     ++AI;
968   }
969   assert(AI == Fn->arg_end() && "Argument mismatch!");
970 }
971 
972 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
973   // Functions with no result always return void.
974   if (ReturnValue == 0) {
975     Builder.CreateRetVoid();
976     return;
977   }
978 
979   llvm::MDNode *RetDbgInfo = 0;
980   llvm::Value *RV = 0;
981   QualType RetTy = FI.getReturnType();
982   const ABIArgInfo &RetAI = FI.getReturnInfo();
983 
984   switch (RetAI.getKind()) {
985   case ABIArgInfo::Indirect:
986     if (RetTy->isAnyComplexType()) {
987       ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
988       StoreComplexToAddr(RT, CurFn->arg_begin(), false);
989     } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
990       // Do nothing; aggregrates get evaluated directly into the destination.
991     } else {
992       EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
993                         false, RetTy);
994     }
995     break;
996 
997   case ABIArgInfo::Extend:
998   case ABIArgInfo::Direct: {
999     // The internal return value temp always will have pointer-to-return-type
1000     // type, just do a load.
1001 
1002     // If the instruction right before the insertion point is a store to the
1003     // return value, we can elide the load, zap the store, and usually zap the
1004     // alloca.
1005     llvm::BasicBlock *InsertBB = Builder.GetInsertBlock();
1006     llvm::StoreInst *SI = 0;
1007     if (InsertBB->empty() ||
1008         !(SI = dyn_cast<llvm::StoreInst>(&InsertBB->back())) ||
1009         SI->getPointerOperand() != ReturnValue || SI->isVolatile()) {
1010       RV = Builder.CreateLoad(ReturnValue);
1011     } else {
1012       // Get the stored value and nuke the now-dead store.
1013       RetDbgInfo = SI->getDbgMetadata();
1014       RV = SI->getValueOperand();
1015       SI->eraseFromParent();
1016 
1017       // If that was the only use of the return value, nuke it as well now.
1018       if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
1019         cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
1020         ReturnValue = 0;
1021       }
1022     }
1023     break;
1024   }
1025   case ABIArgInfo::Ignore:
1026     break;
1027 
1028   case ABIArgInfo::Coerce:
1029     RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this);
1030     break;
1031 
1032   case ABIArgInfo::Expand:
1033     assert(0 && "Invalid ABI kind for return argument");
1034   }
1035 
1036   llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid();
1037   if (RetDbgInfo)
1038     Ret->setDbgMetadata(RetDbgInfo);
1039 }
1040 
1041 RValue CodeGenFunction::EmitDelegateCallArg(const VarDecl *Param) {
1042   // StartFunction converted the ABI-lowered parameter(s) into a
1043   // local alloca.  We need to turn that into an r-value suitable
1044   // for EmitCall.
1045   llvm::Value *Local = GetAddrOfLocalVar(Param);
1046 
1047   QualType ArgType = Param->getType();
1048 
1049   // For the most part, we just need to load the alloca, except:
1050   // 1) aggregate r-values are actually pointers to temporaries, and
1051   // 2) references to aggregates are pointers directly to the aggregate.
1052   // I don't know why references to non-aggregates are different here.
1053   if (const ReferenceType *RefType = ArgType->getAs<ReferenceType>()) {
1054     if (hasAggregateLLVMType(RefType->getPointeeType()))
1055       return RValue::getAggregate(Local);
1056 
1057     // Locals which are references to scalars are represented
1058     // with allocas holding the pointer.
1059     return RValue::get(Builder.CreateLoad(Local));
1060   }
1061 
1062   if (ArgType->isAnyComplexType())
1063     return RValue::getComplex(LoadComplexFromAddr(Local, /*volatile*/ false));
1064 
1065   if (hasAggregateLLVMType(ArgType))
1066     return RValue::getAggregate(Local);
1067 
1068   return RValue::get(EmitLoadOfScalar(Local, false, ArgType));
1069 }
1070 
1071 RValue CodeGenFunction::EmitCallArg(const Expr *E, QualType ArgType) {
1072   if (ArgType->isReferenceType())
1073     return EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
1074 
1075   return EmitAnyExprToTemp(E);
1076 }
1077 
1078 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
1079                                  llvm::Value *Callee,
1080                                  ReturnValueSlot ReturnValue,
1081                                  const CallArgList &CallArgs,
1082                                  const Decl *TargetDecl,
1083                                  llvm::Instruction **callOrInvoke) {
1084   // FIXME: We no longer need the types from CallArgs; lift up and simplify.
1085   llvm::SmallVector<llvm::Value*, 16> Args;
1086 
1087   // Handle struct-return functions by passing a pointer to the
1088   // location that we would like to return into.
1089   QualType RetTy = CallInfo.getReturnType();
1090   const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
1091 
1092 
1093   // If the call returns a temporary with struct return, create a temporary
1094   // alloca to hold the result, unless one is given to us.
1095   if (CGM.ReturnTypeUsesSret(CallInfo)) {
1096     llvm::Value *Value = ReturnValue.getValue();
1097     if (!Value)
1098       Value = CreateMemTemp(RetTy);
1099     Args.push_back(Value);
1100   }
1101 
1102   assert(CallInfo.arg_size() == CallArgs.size() &&
1103          "Mismatch between function signature & arguments.");
1104   CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
1105   for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
1106        I != E; ++I, ++info_it) {
1107     const ABIArgInfo &ArgInfo = info_it->info;
1108     RValue RV = I->first;
1109 
1110     switch (ArgInfo.getKind()) {
1111     case ABIArgInfo::Indirect:
1112       if (RV.isScalar() || RV.isComplex()) {
1113         // Make a temporary alloca to pass the argument.
1114         Args.push_back(CreateMemTemp(I->second));
1115         if (RV.isScalar())
1116           EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false, I->second);
1117         else
1118           StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
1119       } else {
1120         Args.push_back(RV.getAggregateAddr());
1121       }
1122       break;
1123 
1124     case ABIArgInfo::Extend:
1125     case ABIArgInfo::Direct:
1126       if (RV.isScalar()) {
1127         Args.push_back(RV.getScalarVal());
1128       } else if (RV.isComplex()) {
1129         llvm::Value *Tmp = llvm::UndefValue::get(ConvertType(I->second));
1130         Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().first, 0);
1131         Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().second, 1);
1132         Args.push_back(Tmp);
1133       } else {
1134         Args.push_back(Builder.CreateLoad(RV.getAggregateAddr()));
1135       }
1136       break;
1137 
1138     case ABIArgInfo::Ignore:
1139       break;
1140 
1141     case ABIArgInfo::Coerce: {
1142       // FIXME: Avoid the conversion through memory if possible.
1143       llvm::Value *SrcPtr;
1144       if (RV.isScalar()) {
1145         SrcPtr = CreateMemTemp(I->second, "coerce");
1146         EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, I->second);
1147       } else if (RV.isComplex()) {
1148         SrcPtr = CreateMemTemp(I->second, "coerce");
1149         StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
1150       } else
1151         SrcPtr = RV.getAggregateAddr();
1152 
1153       // If the coerce-to type is a first class aggregate, we flatten it and
1154       // pass the elements. Either way is semantically identical, but fast-isel
1155       // and the optimizer generally likes scalar values better than FCAs.
1156       if (const llvm::StructType *STy =
1157             dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) {
1158         // If the argument and alloca types match up, we don't have to build the
1159         // FCA at all, emit a series of GEPs and loads, which is better for
1160         // fast isel.
1161         if (STy ==cast<llvm::PointerType>(SrcPtr->getType())->getElementType()){
1162           for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1163             llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i);
1164             Args.push_back(Builder.CreateLoad(EltPtr));
1165           }
1166         } else {
1167           // Otherwise, do a coerced load the entire FCA and handle the pieces.
1168           llvm::Value *SrcVal =
1169             CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(), *this);
1170 
1171           // Extract the elements of the value to pass in.
1172           for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
1173             Args.push_back(Builder.CreateExtractValue(SrcVal, i));
1174         }
1175       } else {
1176         // In the simple case, just pass the coerced loaded value.
1177         Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
1178                                          *this));
1179       }
1180 
1181       break;
1182     }
1183 
1184     case ABIArgInfo::Expand:
1185       ExpandTypeToArgs(I->second, RV, Args);
1186       break;
1187     }
1188   }
1189 
1190   // If the callee is a bitcast of a function to a varargs pointer to function
1191   // type, check to see if we can remove the bitcast.  This handles some cases
1192   // with unprototyped functions.
1193   if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
1194     if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
1195       const llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
1196       const llvm::FunctionType *CurFT =
1197         cast<llvm::FunctionType>(CurPT->getElementType());
1198       const llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
1199 
1200       if (CE->getOpcode() == llvm::Instruction::BitCast &&
1201           ActualFT->getReturnType() == CurFT->getReturnType() &&
1202           ActualFT->getNumParams() == CurFT->getNumParams() &&
1203           ActualFT->getNumParams() == Args.size()) {
1204         bool ArgsMatch = true;
1205         for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
1206           if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
1207             ArgsMatch = false;
1208             break;
1209           }
1210 
1211         // Strip the cast if we can get away with it.  This is a nice cleanup,
1212         // but also allows us to inline the function at -O0 if it is marked
1213         // always_inline.
1214         if (ArgsMatch)
1215           Callee = CalleeF;
1216       }
1217     }
1218 
1219 
1220   llvm::BasicBlock *InvokeDest = getInvokeDest();
1221   unsigned CallingConv;
1222   CodeGen::AttributeListType AttributeList;
1223   CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv);
1224   llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(),
1225                                                    AttributeList.end());
1226 
1227   llvm::CallSite CS;
1228   if (!InvokeDest || (Attrs.getFnAttributes() & llvm::Attribute::NoUnwind)) {
1229     CS = Builder.CreateCall(Callee, Args.data(), Args.data()+Args.size());
1230   } else {
1231     llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
1232     CS = Builder.CreateInvoke(Callee, Cont, InvokeDest,
1233                               Args.data(), Args.data()+Args.size());
1234     EmitBlock(Cont);
1235   }
1236   if (callOrInvoke)
1237     *callOrInvoke = CS.getInstruction();
1238 
1239   CS.setAttributes(Attrs);
1240   CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
1241 
1242   // If the call doesn't return, finish the basic block and clear the
1243   // insertion point; this allows the rest of IRgen to discard
1244   // unreachable code.
1245   if (CS.doesNotReturn()) {
1246     Builder.CreateUnreachable();
1247     Builder.ClearInsertionPoint();
1248 
1249     // FIXME: For now, emit a dummy basic block because expr emitters in
1250     // generally are not ready to handle emitting expressions at unreachable
1251     // points.
1252     EnsureInsertPoint();
1253 
1254     // Return a reasonable RValue.
1255     return GetUndefRValue(RetTy);
1256   }
1257 
1258   llvm::Instruction *CI = CS.getInstruction();
1259   if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
1260     CI->setName("call");
1261 
1262   switch (RetAI.getKind()) {
1263   case ABIArgInfo::Indirect:
1264     if (RetTy->isAnyComplexType())
1265       return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
1266     if (CodeGenFunction::hasAggregateLLVMType(RetTy))
1267       return RValue::getAggregate(Args[0]);
1268     return RValue::get(EmitLoadOfScalar(Args[0], false, RetTy));
1269 
1270   case ABIArgInfo::Extend:
1271   case ABIArgInfo::Direct:
1272     if (RetTy->isAnyComplexType()) {
1273       llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
1274       llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
1275       return RValue::getComplex(std::make_pair(Real, Imag));
1276     }
1277     if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1278       llvm::Value *DestPtr = ReturnValue.getValue();
1279       bool DestIsVolatile = ReturnValue.isVolatile();
1280 
1281       if (!DestPtr) {
1282         DestPtr = CreateMemTemp(RetTy, "agg.tmp");
1283         DestIsVolatile = false;
1284       }
1285       Builder.CreateStore(CI, DestPtr, DestIsVolatile);
1286       return RValue::getAggregate(DestPtr);
1287     }
1288     return RValue::get(CI);
1289 
1290   case ABIArgInfo::Ignore:
1291     // If we are ignoring an argument that had a result, make sure to
1292     // construct the appropriate return value for our caller.
1293     return GetUndefRValue(RetTy);
1294 
1295   case ABIArgInfo::Coerce: {
1296     llvm::Value *DestPtr = ReturnValue.getValue();
1297     bool DestIsVolatile = ReturnValue.isVolatile();
1298 
1299     if (!DestPtr) {
1300       DestPtr = CreateMemTemp(RetTy, "coerce");
1301       DestIsVolatile = false;
1302     }
1303 
1304     CreateCoercedStore(CI, DestPtr, DestIsVolatile, *this);
1305     if (RetTy->isAnyComplexType())
1306       return RValue::getComplex(LoadComplexFromAddr(DestPtr, false));
1307     if (CodeGenFunction::hasAggregateLLVMType(RetTy))
1308       return RValue::getAggregate(DestPtr);
1309     return RValue::get(EmitLoadOfScalar(DestPtr, false, RetTy));
1310   }
1311 
1312   case ABIArgInfo::Expand:
1313     assert(0 && "Invalid ABI kind for return argument");
1314   }
1315 
1316   assert(0 && "Unhandled ABIArgInfo::Kind");
1317   return RValue::get(0);
1318 }
1319 
1320 /* VarArg handling */
1321 
1322 llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
1323   return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
1324 }
1325