1 //===--- CGCall.cpp - Encapsulate calling convention details ----*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "CGCall.h"
16 #include "ABIInfo.h"
17 #include "CGCXXABI.h"
18 #include "CodeGenFunction.h"
19 #include "CodeGenModule.h"
20 #include "TargetInfo.h"
21 #include "clang/AST/Decl.h"
22 #include "clang/AST/DeclCXX.h"
23 #include "clang/AST/DeclObjC.h"
24 #include "clang/Basic/TargetInfo.h"
25 #include "clang/Frontend/CodeGenOptions.h"
26 #include "llvm/Attributes.h"
27 #include "llvm/DataLayout.h"
28 #include "llvm/InlineAsm.h"
29 #include "llvm/Support/CallSite.h"
30 #include "llvm/Transforms/Utils/Local.h"
31 using namespace clang;
32 using namespace CodeGen;
33 
34 /***/
35 
36 static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
37   switch (CC) {
38   default: return llvm::CallingConv::C;
39   case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
40   case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
41   case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
42   case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
43   case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
44   // TODO: add support for CC_X86Pascal to llvm
45   }
46 }
47 
48 /// Derives the 'this' type for codegen purposes, i.e. ignoring method
49 /// qualification.
50 /// FIXME: address space qualification?
51 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
52   QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
53   return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
54 }
55 
56 /// Returns the canonical formal type of the given C++ method.
57 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
58   return MD->getType()->getCanonicalTypeUnqualified()
59            .getAs<FunctionProtoType>();
60 }
61 
62 /// Returns the "extra-canonicalized" return type, which discards
63 /// qualifiers on the return type.  Codegen doesn't care about them,
64 /// and it makes ABI code a little easier to be able to assume that
65 /// all parameter and return types are top-level unqualified.
66 static CanQualType GetReturnType(QualType RetTy) {
67   return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
68 }
69 
70 /// Arrange the argument and result information for a value of the given
71 /// unprototyped freestanding function type.
72 const CGFunctionInfo &
73 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
74   // When translating an unprototyped function type, always use a
75   // variadic type.
76   return arrangeLLVMFunctionInfo(FTNP->getResultType().getUnqualifiedType(),
77                                  ArrayRef<CanQualType>(),
78                                  FTNP->getExtInfo(),
79                                  RequiredArgs(0));
80 }
81 
82 /// Arrange the LLVM function layout for a value of the given function
83 /// type, on top of any implicit parameters already stored.  Use the
84 /// given ExtInfo instead of the ExtInfo from the function type.
85 static const CGFunctionInfo &arrangeLLVMFunctionInfo(CodeGenTypes &CGT,
86                                        SmallVectorImpl<CanQualType> &prefix,
87                                              CanQual<FunctionProtoType> FTP,
88                                               FunctionType::ExtInfo extInfo) {
89   RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
90   // FIXME: Kill copy.
91   for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
92     prefix.push_back(FTP->getArgType(i));
93   CanQualType resultType = FTP->getResultType().getUnqualifiedType();
94   return CGT.arrangeLLVMFunctionInfo(resultType, prefix, extInfo, required);
95 }
96 
97 /// Arrange the argument and result information for a free function (i.e.
98 /// not a C++ or ObjC instance method) of the given type.
99 static const CGFunctionInfo &arrangeFreeFunctionType(CodeGenTypes &CGT,
100                                       SmallVectorImpl<CanQualType> &prefix,
101                                             CanQual<FunctionProtoType> FTP) {
102   return arrangeLLVMFunctionInfo(CGT, prefix, FTP, FTP->getExtInfo());
103 }
104 
105 /// Given the formal ext-info of a C++ instance method, adjust it
106 /// according to the C++ ABI in effect.
107 static void adjustCXXMethodInfo(CodeGenTypes &CGT,
108                                 FunctionType::ExtInfo &extInfo,
109                                 bool isVariadic) {
110   if (extInfo.getCC() == CC_Default) {
111     CallingConv CC = CGT.getContext().getDefaultCXXMethodCallConv(isVariadic);
112     extInfo = extInfo.withCallingConv(CC);
113   }
114 }
115 
116 /// Arrange the argument and result information for a free function (i.e.
117 /// not a C++ or ObjC instance method) of the given type.
118 static const CGFunctionInfo &arrangeCXXMethodType(CodeGenTypes &CGT,
119                                       SmallVectorImpl<CanQualType> &prefix,
120                                             CanQual<FunctionProtoType> FTP) {
121   FunctionType::ExtInfo extInfo = FTP->getExtInfo();
122   adjustCXXMethodInfo(CGT, extInfo, FTP->isVariadic());
123   return arrangeLLVMFunctionInfo(CGT, prefix, FTP, extInfo);
124 }
125 
126 /// Arrange the argument and result information for a value of the
127 /// given freestanding function type.
128 const CGFunctionInfo &
129 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) {
130   SmallVector<CanQualType, 16> argTypes;
131   return ::arrangeFreeFunctionType(*this, argTypes, FTP);
132 }
133 
134 static CallingConv getCallingConventionForDecl(const Decl *D) {
135   // Set the appropriate calling convention for the Function.
136   if (D->hasAttr<StdCallAttr>())
137     return CC_X86StdCall;
138 
139   if (D->hasAttr<FastCallAttr>())
140     return CC_X86FastCall;
141 
142   if (D->hasAttr<ThisCallAttr>())
143     return CC_X86ThisCall;
144 
145   if (D->hasAttr<PascalAttr>())
146     return CC_X86Pascal;
147 
148   if (PcsAttr *PCS = D->getAttr<PcsAttr>())
149     return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
150 
151   if (D->hasAttr<PnaclCallAttr>())
152     return CC_PnaclCall;
153 
154   return CC_C;
155 }
156 
157 /// Arrange the argument and result information for a call to an
158 /// unknown C++ non-static member function of the given abstract type.
159 /// The member function must be an ordinary function, i.e. not a
160 /// constructor or destructor.
161 const CGFunctionInfo &
162 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
163                                    const FunctionProtoType *FTP) {
164   SmallVector<CanQualType, 16> argTypes;
165 
166   // Add the 'this' pointer.
167   argTypes.push_back(GetThisType(Context, RD));
168 
169   return ::arrangeCXXMethodType(*this, argTypes,
170               FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
171 }
172 
173 /// Arrange the argument and result information for a declaration or
174 /// definition of the given C++ non-static member function.  The
175 /// member function must be an ordinary function, i.e. not a
176 /// constructor or destructor.
177 const CGFunctionInfo &
178 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
179   assert(!isa<CXXConstructorDecl>(MD) && "wrong method for contructors!");
180   assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
181 
182   CanQual<FunctionProtoType> prototype = GetFormalType(MD);
183 
184   if (MD->isInstance()) {
185     // The abstract case is perfectly fine.
186     return arrangeCXXMethodType(MD->getParent(), prototype.getTypePtr());
187   }
188 
189   return arrangeFreeFunctionType(prototype);
190 }
191 
192 /// Arrange the argument and result information for a declaration
193 /// or definition to the given constructor variant.
194 const CGFunctionInfo &
195 CodeGenTypes::arrangeCXXConstructorDeclaration(const CXXConstructorDecl *D,
196                                                CXXCtorType ctorKind) {
197   SmallVector<CanQualType, 16> argTypes;
198   argTypes.push_back(GetThisType(Context, D->getParent()));
199   CanQualType resultType = Context.VoidTy;
200 
201   TheCXXABI.BuildConstructorSignature(D, ctorKind, resultType, argTypes);
202 
203   CanQual<FunctionProtoType> FTP = GetFormalType(D);
204 
205   RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, argTypes.size());
206 
207   // Add the formal parameters.
208   for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
209     argTypes.push_back(FTP->getArgType(i));
210 
211   FunctionType::ExtInfo extInfo = FTP->getExtInfo();
212   adjustCXXMethodInfo(*this, extInfo, FTP->isVariadic());
213   return arrangeLLVMFunctionInfo(resultType, argTypes, extInfo, required);
214 }
215 
216 /// Arrange the argument and result information for a declaration,
217 /// definition, or call to the given destructor variant.  It so
218 /// happens that all three cases produce the same information.
219 const CGFunctionInfo &
220 CodeGenTypes::arrangeCXXDestructor(const CXXDestructorDecl *D,
221                                    CXXDtorType dtorKind) {
222   SmallVector<CanQualType, 2> argTypes;
223   argTypes.push_back(GetThisType(Context, D->getParent()));
224   CanQualType resultType = Context.VoidTy;
225 
226   TheCXXABI.BuildDestructorSignature(D, dtorKind, resultType, argTypes);
227 
228   CanQual<FunctionProtoType> FTP = GetFormalType(D);
229   assert(FTP->getNumArgs() == 0 && "dtor with formal parameters");
230   assert(FTP->isVariadic() == 0 && "dtor with formal parameters");
231 
232   FunctionType::ExtInfo extInfo = FTP->getExtInfo();
233   adjustCXXMethodInfo(*this, extInfo, false);
234   return arrangeLLVMFunctionInfo(resultType, argTypes, extInfo,
235                                  RequiredArgs::All);
236 }
237 
238 /// Arrange the argument and result information for the declaration or
239 /// definition of the given function.
240 const CGFunctionInfo &
241 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
242   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
243     if (MD->isInstance())
244       return arrangeCXXMethodDeclaration(MD);
245 
246   CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
247 
248   assert(isa<FunctionType>(FTy));
249 
250   // When declaring a function without a prototype, always use a
251   // non-variadic type.
252   if (isa<FunctionNoProtoType>(FTy)) {
253     CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>();
254     return arrangeLLVMFunctionInfo(noProto->getResultType(),
255                                    ArrayRef<CanQualType>(),
256                                    noProto->getExtInfo(),
257                                    RequiredArgs::All);
258   }
259 
260   assert(isa<FunctionProtoType>(FTy));
261   return arrangeFreeFunctionType(FTy.getAs<FunctionProtoType>());
262 }
263 
264 /// Arrange the argument and result information for the declaration or
265 /// definition of an Objective-C method.
266 const CGFunctionInfo &
267 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
268   // It happens that this is the same as a call with no optional
269   // arguments, except also using the formal 'self' type.
270   return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
271 }
272 
273 /// Arrange the argument and result information for the function type
274 /// through which to perform a send to the given Objective-C method,
275 /// using the given receiver type.  The receiver type is not always
276 /// the 'self' type of the method or even an Objective-C pointer type.
277 /// This is *not* the right method for actually performing such a
278 /// message send, due to the possibility of optional arguments.
279 const CGFunctionInfo &
280 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
281                                               QualType receiverType) {
282   SmallVector<CanQualType, 16> argTys;
283   argTys.push_back(Context.getCanonicalParamType(receiverType));
284   argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
285   // FIXME: Kill copy?
286   for (ObjCMethodDecl::param_const_iterator i = MD->param_begin(),
287          e = MD->param_end(); i != e; ++i) {
288     argTys.push_back(Context.getCanonicalParamType((*i)->getType()));
289   }
290 
291   FunctionType::ExtInfo einfo;
292   einfo = einfo.withCallingConv(getCallingConventionForDecl(MD));
293 
294   if (getContext().getLangOpts().ObjCAutoRefCount &&
295       MD->hasAttr<NSReturnsRetainedAttr>())
296     einfo = einfo.withProducesResult(true);
297 
298   RequiredArgs required =
299     (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
300 
301   return arrangeLLVMFunctionInfo(GetReturnType(MD->getResultType()), argTys,
302                                  einfo, required);
303 }
304 
305 const CGFunctionInfo &
306 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
307   // FIXME: Do we need to handle ObjCMethodDecl?
308   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
309 
310   if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
311     return arrangeCXXConstructorDeclaration(CD, GD.getCtorType());
312 
313   if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
314     return arrangeCXXDestructor(DD, GD.getDtorType());
315 
316   return arrangeFunctionDeclaration(FD);
317 }
318 
319 /// Arrange a call as unto a free function, except possibly with an
320 /// additional number of formal parameters considered required.
321 static const CGFunctionInfo &
322 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
323                             const CallArgList &args,
324                             const FunctionType *fnType,
325                             unsigned numExtraRequiredArgs) {
326   assert(args.size() >= numExtraRequiredArgs);
327 
328   // In most cases, there are no optional arguments.
329   RequiredArgs required = RequiredArgs::All;
330 
331   // If we have a variadic prototype, the required arguments are the
332   // extra prefix plus the arguments in the prototype.
333   if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
334     if (proto->isVariadic())
335       required = RequiredArgs(proto->getNumArgs() + numExtraRequiredArgs);
336 
337   // If we don't have a prototype at all, but we're supposed to
338   // explicitly use the variadic convention for unprototyped calls,
339   // treat all of the arguments as required but preserve the nominal
340   // possibility of variadics.
341   } else if (CGT.CGM.getTargetCodeGenInfo()
342                .isNoProtoCallVariadic(args, cast<FunctionNoProtoType>(fnType))) {
343     required = RequiredArgs(args.size());
344   }
345 
346   return CGT.arrangeFreeFunctionCall(fnType->getResultType(), args,
347                                      fnType->getExtInfo(), required);
348 }
349 
350 /// Figure out the rules for calling a function with the given formal
351 /// type using the given arguments.  The arguments are necessary
352 /// because the function might be unprototyped, in which case it's
353 /// target-dependent in crazy ways.
354 const CGFunctionInfo &
355 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
356                                       const FunctionType *fnType) {
357   return arrangeFreeFunctionLikeCall(*this, args, fnType, 0);
358 }
359 
360 /// A block function call is essentially a free-function call with an
361 /// extra implicit argument.
362 const CGFunctionInfo &
363 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
364                                        const FunctionType *fnType) {
365   return arrangeFreeFunctionLikeCall(*this, args, fnType, 1);
366 }
367 
368 const CGFunctionInfo &
369 CodeGenTypes::arrangeFreeFunctionCall(QualType resultType,
370                                       const CallArgList &args,
371                                       FunctionType::ExtInfo info,
372                                       RequiredArgs required) {
373   // FIXME: Kill copy.
374   SmallVector<CanQualType, 16> argTypes;
375   for (CallArgList::const_iterator i = args.begin(), e = args.end();
376        i != e; ++i)
377     argTypes.push_back(Context.getCanonicalParamType(i->Ty));
378   return arrangeLLVMFunctionInfo(GetReturnType(resultType), argTypes, info,
379                                  required);
380 }
381 
382 /// Arrange a call to a C++ method, passing the given arguments.
383 const CGFunctionInfo &
384 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
385                                    const FunctionProtoType *FPT,
386                                    RequiredArgs required) {
387   // FIXME: Kill copy.
388   SmallVector<CanQualType, 16> argTypes;
389   for (CallArgList::const_iterator i = args.begin(), e = args.end();
390        i != e; ++i)
391     argTypes.push_back(Context.getCanonicalParamType(i->Ty));
392 
393   FunctionType::ExtInfo info = FPT->getExtInfo();
394   adjustCXXMethodInfo(*this, info, FPT->isVariadic());
395   return arrangeLLVMFunctionInfo(GetReturnType(FPT->getResultType()),
396                                  argTypes, info, required);
397 }
398 
399 const CGFunctionInfo &
400 CodeGenTypes::arrangeFunctionDeclaration(QualType resultType,
401                                          const FunctionArgList &args,
402                                          const FunctionType::ExtInfo &info,
403                                          bool isVariadic) {
404   // FIXME: Kill copy.
405   SmallVector<CanQualType, 16> argTypes;
406   for (FunctionArgList::const_iterator i = args.begin(), e = args.end();
407        i != e; ++i)
408     argTypes.push_back(Context.getCanonicalParamType((*i)->getType()));
409 
410   RequiredArgs required =
411     (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All);
412   return arrangeLLVMFunctionInfo(GetReturnType(resultType), argTypes, info,
413                                  required);
414 }
415 
416 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
417   return arrangeLLVMFunctionInfo(getContext().VoidTy, ArrayRef<CanQualType>(),
418                                  FunctionType::ExtInfo(), RequiredArgs::All);
419 }
420 
421 /// Arrange the argument and result information for an abstract value
422 /// of a given function type.  This is the method which all of the
423 /// above functions ultimately defer to.
424 const CGFunctionInfo &
425 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
426                                       ArrayRef<CanQualType> argTypes,
427                                       FunctionType::ExtInfo info,
428                                       RequiredArgs required) {
429 #ifndef NDEBUG
430   for (ArrayRef<CanQualType>::const_iterator
431          I = argTypes.begin(), E = argTypes.end(); I != E; ++I)
432     assert(I->isCanonicalAsParam());
433 #endif
434 
435   unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
436 
437   // Lookup or create unique function info.
438   llvm::FoldingSetNodeID ID;
439   CGFunctionInfo::Profile(ID, info, required, resultType, argTypes);
440 
441   void *insertPos = 0;
442   CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
443   if (FI)
444     return *FI;
445 
446   // Construct the function info.  We co-allocate the ArgInfos.
447   FI = CGFunctionInfo::create(CC, info, resultType, argTypes, required);
448   FunctionInfos.InsertNode(FI, insertPos);
449 
450   bool inserted = FunctionsBeingProcessed.insert(FI); (void)inserted;
451   assert(inserted && "Recursively being processed?");
452 
453   // Compute ABI information.
454   getABIInfo().computeInfo(*FI);
455 
456   // Loop over all of the computed argument and return value info.  If any of
457   // them are direct or extend without a specified coerce type, specify the
458   // default now.
459   ABIArgInfo &retInfo = FI->getReturnInfo();
460   if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == 0)
461     retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
462 
463   for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end();
464        I != E; ++I)
465     if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0)
466       I->info.setCoerceToType(ConvertType(I->type));
467 
468   bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
469   assert(erased && "Not in set?");
470 
471   return *FI;
472 }
473 
474 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
475                                        const FunctionType::ExtInfo &info,
476                                        CanQualType resultType,
477                                        ArrayRef<CanQualType> argTypes,
478                                        RequiredArgs required) {
479   void *buffer = operator new(sizeof(CGFunctionInfo) +
480                               sizeof(ArgInfo) * (argTypes.size() + 1));
481   CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
482   FI->CallingConvention = llvmCC;
483   FI->EffectiveCallingConvention = llvmCC;
484   FI->ASTCallingConvention = info.getCC();
485   FI->NoReturn = info.getNoReturn();
486   FI->ReturnsRetained = info.getProducesResult();
487   FI->Required = required;
488   FI->HasRegParm = info.getHasRegParm();
489   FI->RegParm = info.getRegParm();
490   FI->NumArgs = argTypes.size();
491   FI->getArgsBuffer()[0].type = resultType;
492   for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
493     FI->getArgsBuffer()[i + 1].type = argTypes[i];
494   return FI;
495 }
496 
497 /***/
498 
499 void CodeGenTypes::GetExpandedTypes(QualType type,
500                      SmallVectorImpl<llvm::Type*> &expandedTypes) {
501   if (const ConstantArrayType *AT = Context.getAsConstantArrayType(type)) {
502     uint64_t NumElts = AT->getSize().getZExtValue();
503     for (uint64_t Elt = 0; Elt < NumElts; ++Elt)
504       GetExpandedTypes(AT->getElementType(), expandedTypes);
505   } else if (const RecordType *RT = type->getAs<RecordType>()) {
506     const RecordDecl *RD = RT->getDecl();
507     assert(!RD->hasFlexibleArrayMember() &&
508            "Cannot expand structure with flexible array.");
509     if (RD->isUnion()) {
510       // Unions can be here only in degenerative cases - all the fields are same
511       // after flattening. Thus we have to use the "largest" field.
512       const FieldDecl *LargestFD = 0;
513       CharUnits UnionSize = CharUnits::Zero();
514 
515       for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
516            i != e; ++i) {
517         const FieldDecl *FD = *i;
518         assert(!FD->isBitField() &&
519                "Cannot expand structure with bit-field members.");
520         CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
521         if (UnionSize < FieldSize) {
522           UnionSize = FieldSize;
523           LargestFD = FD;
524         }
525       }
526       if (LargestFD)
527         GetExpandedTypes(LargestFD->getType(), expandedTypes);
528     } else {
529       for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
530            i != e; ++i) {
531         assert(!i->isBitField() &&
532                "Cannot expand structure with bit-field members.");
533         GetExpandedTypes(i->getType(), expandedTypes);
534       }
535     }
536   } else if (const ComplexType *CT = type->getAs<ComplexType>()) {
537     llvm::Type *EltTy = ConvertType(CT->getElementType());
538     expandedTypes.push_back(EltTy);
539     expandedTypes.push_back(EltTy);
540   } else
541     expandedTypes.push_back(ConvertType(type));
542 }
543 
544 llvm::Function::arg_iterator
545 CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
546                                     llvm::Function::arg_iterator AI) {
547   assert(LV.isSimple() &&
548          "Unexpected non-simple lvalue during struct expansion.");
549 
550   if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
551     unsigned NumElts = AT->getSize().getZExtValue();
552     QualType EltTy = AT->getElementType();
553     for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
554       llvm::Value *EltAddr = Builder.CreateConstGEP2_32(LV.getAddress(), 0, Elt);
555       LValue LV = MakeAddrLValue(EltAddr, EltTy);
556       AI = ExpandTypeFromArgs(EltTy, LV, AI);
557     }
558   } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
559     RecordDecl *RD = RT->getDecl();
560     if (RD->isUnion()) {
561       // Unions can be here only in degenerative cases - all the fields are same
562       // after flattening. Thus we have to use the "largest" field.
563       const FieldDecl *LargestFD = 0;
564       CharUnits UnionSize = CharUnits::Zero();
565 
566       for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
567            i != e; ++i) {
568         const FieldDecl *FD = *i;
569         assert(!FD->isBitField() &&
570                "Cannot expand structure with bit-field members.");
571         CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
572         if (UnionSize < FieldSize) {
573           UnionSize = FieldSize;
574           LargestFD = FD;
575         }
576       }
577       if (LargestFD) {
578         // FIXME: What are the right qualifiers here?
579         LValue SubLV = EmitLValueForField(LV, LargestFD);
580         AI = ExpandTypeFromArgs(LargestFD->getType(), SubLV, AI);
581       }
582     } else {
583       for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
584            i != e; ++i) {
585         FieldDecl *FD = *i;
586         QualType FT = FD->getType();
587 
588         // FIXME: What are the right qualifiers here?
589         LValue SubLV = EmitLValueForField(LV, FD);
590         AI = ExpandTypeFromArgs(FT, SubLV, AI);
591       }
592     }
593   } else if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
594     QualType EltTy = CT->getElementType();
595     llvm::Value *RealAddr = Builder.CreateStructGEP(LV.getAddress(), 0, "real");
596     EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(RealAddr, EltTy));
597     llvm::Value *ImagAddr = Builder.CreateStructGEP(LV.getAddress(), 1, "imag");
598     EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(ImagAddr, EltTy));
599   } else {
600     EmitStoreThroughLValue(RValue::get(AI), LV);
601     ++AI;
602   }
603 
604   return AI;
605 }
606 
607 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
608 /// accessing some number of bytes out of it, try to gep into the struct to get
609 /// at its inner goodness.  Dive as deep as possible without entering an element
610 /// with an in-memory size smaller than DstSize.
611 static llvm::Value *
612 EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
613                                    llvm::StructType *SrcSTy,
614                                    uint64_t DstSize, CodeGenFunction &CGF) {
615   // We can't dive into a zero-element struct.
616   if (SrcSTy->getNumElements() == 0) return SrcPtr;
617 
618   llvm::Type *FirstElt = SrcSTy->getElementType(0);
619 
620   // If the first elt is at least as large as what we're looking for, or if the
621   // first element is the same size as the whole struct, we can enter it.
622   uint64_t FirstEltSize =
623     CGF.CGM.getDataLayout().getTypeAllocSize(FirstElt);
624   if (FirstEltSize < DstSize &&
625       FirstEltSize < CGF.CGM.getDataLayout().getTypeAllocSize(SrcSTy))
626     return SrcPtr;
627 
628   // GEP into the first element.
629   SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive");
630 
631   // If the first element is a struct, recurse.
632   llvm::Type *SrcTy =
633     cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
634   if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
635     return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
636 
637   return SrcPtr;
638 }
639 
640 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
641 /// are either integers or pointers.  This does a truncation of the value if it
642 /// is too large or a zero extension if it is too small.
643 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
644                                              llvm::Type *Ty,
645                                              CodeGenFunction &CGF) {
646   if (Val->getType() == Ty)
647     return Val;
648 
649   if (isa<llvm::PointerType>(Val->getType())) {
650     // If this is Pointer->Pointer avoid conversion to and from int.
651     if (isa<llvm::PointerType>(Ty))
652       return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
653 
654     // Convert the pointer to an integer so we can play with its width.
655     Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
656   }
657 
658   llvm::Type *DestIntTy = Ty;
659   if (isa<llvm::PointerType>(DestIntTy))
660     DestIntTy = CGF.IntPtrTy;
661 
662   if (Val->getType() != DestIntTy)
663     Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
664 
665   if (isa<llvm::PointerType>(Ty))
666     Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
667   return Val;
668 }
669 
670 
671 
672 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
673 /// a pointer to an object of type \arg Ty.
674 ///
675 /// This safely handles the case when the src type is smaller than the
676 /// destination type; in this situation the values of bits which not
677 /// present in the src are undefined.
678 static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
679                                       llvm::Type *Ty,
680                                       CodeGenFunction &CGF) {
681   llvm::Type *SrcTy =
682     cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
683 
684   // If SrcTy and Ty are the same, just do a load.
685   if (SrcTy == Ty)
686     return CGF.Builder.CreateLoad(SrcPtr);
687 
688   uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
689 
690   if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
691     SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
692     SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
693   }
694 
695   uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
696 
697   // If the source and destination are integer or pointer types, just do an
698   // extension or truncation to the desired type.
699   if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
700       (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
701     llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr);
702     return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
703   }
704 
705   // If load is legal, just bitcast the src pointer.
706   if (SrcSize >= DstSize) {
707     // Generally SrcSize is never greater than DstSize, since this means we are
708     // losing bits. However, this can happen in cases where the structure has
709     // additional padding, for example due to a user specified alignment.
710     //
711     // FIXME: Assert that we aren't truncating non-padding bits when have access
712     // to that information.
713     llvm::Value *Casted =
714       CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
715     llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
716     // FIXME: Use better alignment / avoid requiring aligned load.
717     Load->setAlignment(1);
718     return Load;
719   }
720 
721   // Otherwise do coercion through memory. This is stupid, but
722   // simple.
723   llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
724   llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy();
725   llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy);
726   llvm::Value *SrcCasted = CGF.Builder.CreateBitCast(SrcPtr, I8PtrTy);
727   // FIXME: Use better alignment.
728   CGF.Builder.CreateMemCpy(Casted, SrcCasted,
729       llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
730       1, false);
731   return CGF.Builder.CreateLoad(Tmp);
732 }
733 
734 // Function to store a first-class aggregate into memory.  We prefer to
735 // store the elements rather than the aggregate to be more friendly to
736 // fast-isel.
737 // FIXME: Do we need to recurse here?
738 static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
739                           llvm::Value *DestPtr, bool DestIsVolatile,
740                           bool LowAlignment) {
741   // Prefer scalar stores to first-class aggregate stores.
742   if (llvm::StructType *STy =
743         dyn_cast<llvm::StructType>(Val->getType())) {
744     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
745       llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(DestPtr, 0, i);
746       llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
747       llvm::StoreInst *SI = CGF.Builder.CreateStore(Elt, EltPtr,
748                                                     DestIsVolatile);
749       if (LowAlignment)
750         SI->setAlignment(1);
751     }
752   } else {
753     llvm::StoreInst *SI = CGF.Builder.CreateStore(Val, DestPtr, DestIsVolatile);
754     if (LowAlignment)
755       SI->setAlignment(1);
756   }
757 }
758 
759 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
760 /// where the source and destination may have different types.
761 ///
762 /// This safely handles the case when the src type is larger than the
763 /// destination type; the upper bits of the src will be lost.
764 static void CreateCoercedStore(llvm::Value *Src,
765                                llvm::Value *DstPtr,
766                                bool DstIsVolatile,
767                                CodeGenFunction &CGF) {
768   llvm::Type *SrcTy = Src->getType();
769   llvm::Type *DstTy =
770     cast<llvm::PointerType>(DstPtr->getType())->getElementType();
771   if (SrcTy == DstTy) {
772     CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
773     return;
774   }
775 
776   uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
777 
778   if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
779     DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF);
780     DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType();
781   }
782 
783   // If the source and destination are integer or pointer types, just do an
784   // extension or truncation to the desired type.
785   if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
786       (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
787     Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
788     CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
789     return;
790   }
791 
792   uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
793 
794   // If store is legal, just bitcast the src pointer.
795   if (SrcSize <= DstSize) {
796     llvm::Value *Casted =
797       CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
798     // FIXME: Use better alignment / avoid requiring aligned store.
799     BuildAggStore(CGF, Src, Casted, DstIsVolatile, true);
800   } else {
801     // Otherwise do coercion through memory. This is stupid, but
802     // simple.
803 
804     // Generally SrcSize is never greater than DstSize, since this means we are
805     // losing bits. However, this can happen in cases where the structure has
806     // additional padding, for example due to a user specified alignment.
807     //
808     // FIXME: Assert that we aren't truncating non-padding bits when have access
809     // to that information.
810     llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
811     CGF.Builder.CreateStore(Src, Tmp);
812     llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy();
813     llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy);
814     llvm::Value *DstCasted = CGF.Builder.CreateBitCast(DstPtr, I8PtrTy);
815     // FIXME: Use better alignment.
816     CGF.Builder.CreateMemCpy(DstCasted, Casted,
817         llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
818         1, false);
819   }
820 }
821 
822 /***/
823 
824 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
825   return FI.getReturnInfo().isIndirect();
826 }
827 
828 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
829   if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
830     switch (BT->getKind()) {
831     default:
832       return false;
833     case BuiltinType::Float:
834       return getContext().getTargetInfo().useObjCFPRetForRealType(TargetInfo::Float);
835     case BuiltinType::Double:
836       return getContext().getTargetInfo().useObjCFPRetForRealType(TargetInfo::Double);
837     case BuiltinType::LongDouble:
838       return getContext().getTargetInfo().useObjCFPRetForRealType(
839         TargetInfo::LongDouble);
840     }
841   }
842 
843   return false;
844 }
845 
846 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
847   if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
848     if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
849       if (BT->getKind() == BuiltinType::LongDouble)
850         return getContext().getTargetInfo().useObjCFP2RetForComplexLongDouble();
851     }
852   }
853 
854   return false;
855 }
856 
857 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
858   const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
859   return GetFunctionType(FI);
860 }
861 
862 llvm::FunctionType *
863 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
864 
865   bool Inserted = FunctionsBeingProcessed.insert(&FI); (void)Inserted;
866   assert(Inserted && "Recursively being processed?");
867 
868   SmallVector<llvm::Type*, 8> argTypes;
869   llvm::Type *resultType = 0;
870 
871   const ABIArgInfo &retAI = FI.getReturnInfo();
872   switch (retAI.getKind()) {
873   case ABIArgInfo::Expand:
874     llvm_unreachable("Invalid ABI kind for return argument");
875 
876   case ABIArgInfo::Extend:
877   case ABIArgInfo::Direct:
878     resultType = retAI.getCoerceToType();
879     break;
880 
881   case ABIArgInfo::Indirect: {
882     assert(!retAI.getIndirectAlign() && "Align unused on indirect return.");
883     resultType = llvm::Type::getVoidTy(getLLVMContext());
884 
885     QualType ret = FI.getReturnType();
886     llvm::Type *ty = ConvertType(ret);
887     unsigned addressSpace = Context.getTargetAddressSpace(ret);
888     argTypes.push_back(llvm::PointerType::get(ty, addressSpace));
889     break;
890   }
891 
892   case ABIArgInfo::Ignore:
893     resultType = llvm::Type::getVoidTy(getLLVMContext());
894     break;
895   }
896 
897   // Add in all of the required arguments.
898   CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), ie;
899   if (FI.isVariadic()) {
900     ie = it + FI.getRequiredArgs().getNumRequiredArgs();
901   } else {
902     ie = FI.arg_end();
903   }
904   for (; it != ie; ++it) {
905     const ABIArgInfo &argAI = it->info;
906 
907     // Insert a padding type to ensure proper alignment.
908     if (llvm::Type *PaddingType = argAI.getPaddingType())
909       argTypes.push_back(PaddingType);
910 
911     switch (argAI.getKind()) {
912     case ABIArgInfo::Ignore:
913       break;
914 
915     case ABIArgInfo::Indirect: {
916       // indirect arguments are always on the stack, which is addr space #0.
917       llvm::Type *LTy = ConvertTypeForMem(it->type);
918       argTypes.push_back(LTy->getPointerTo());
919       break;
920     }
921 
922     case ABIArgInfo::Extend:
923     case ABIArgInfo::Direct: {
924       // If the coerce-to type is a first class aggregate, flatten it.  Either
925       // way is semantically identical, but fast-isel and the optimizer
926       // generally likes scalar values better than FCAs.
927       llvm::Type *argType = argAI.getCoerceToType();
928       if (llvm::StructType *st = dyn_cast<llvm::StructType>(argType)) {
929         for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
930           argTypes.push_back(st->getElementType(i));
931       } else {
932         argTypes.push_back(argType);
933       }
934       break;
935     }
936 
937     case ABIArgInfo::Expand:
938       GetExpandedTypes(it->type, argTypes);
939       break;
940     }
941   }
942 
943   bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
944   assert(Erased && "Not in set?");
945 
946   return llvm::FunctionType::get(resultType, argTypes, FI.isVariadic());
947 }
948 
949 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
950   const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
951   const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
952 
953   if (!isFuncTypeConvertible(FPT))
954     return llvm::StructType::get(getLLVMContext());
955 
956   const CGFunctionInfo *Info;
957   if (isa<CXXDestructorDecl>(MD))
958     Info = &arrangeCXXDestructor(cast<CXXDestructorDecl>(MD), GD.getDtorType());
959   else
960     Info = &arrangeCXXMethodDeclaration(MD);
961   return GetFunctionType(*Info);
962 }
963 
964 void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
965                                            const Decl *TargetDecl,
966                                            AttributeListType &PAL,
967                                            unsigned &CallingConv) {
968   llvm::AttrBuilder FuncAttrs;
969   llvm::AttrBuilder RetAttrs;
970 
971   CallingConv = FI.getEffectiveCallingConvention();
972 
973   if (FI.isNoReturn())
974     FuncAttrs.addAttribute(llvm::Attributes::NoReturn);
975 
976   // FIXME: handle sseregparm someday...
977   if (TargetDecl) {
978     if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
979       FuncAttrs.addAttribute(llvm::Attributes::ReturnsTwice);
980     if (TargetDecl->hasAttr<NoThrowAttr>())
981       FuncAttrs.addAttribute(llvm::Attributes::NoUnwind);
982     else if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
983       const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>();
984       if (FPT && FPT->isNothrow(getContext()))
985         FuncAttrs.addAttribute(llvm::Attributes::NoUnwind);
986     }
987 
988     if (TargetDecl->hasAttr<NoReturnAttr>())
989       FuncAttrs.addAttribute(llvm::Attributes::NoReturn);
990 
991     if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
992       FuncAttrs.addAttribute(llvm::Attributes::ReturnsTwice);
993 
994     // 'const' and 'pure' attribute functions are also nounwind.
995     if (TargetDecl->hasAttr<ConstAttr>()) {
996       FuncAttrs.addAttribute(llvm::Attributes::ReadNone);
997       FuncAttrs.addAttribute(llvm::Attributes::NoUnwind);
998     } else if (TargetDecl->hasAttr<PureAttr>()) {
999       FuncAttrs.addAttribute(llvm::Attributes::ReadOnly);
1000       FuncAttrs.addAttribute(llvm::Attributes::NoUnwind);
1001     }
1002     if (TargetDecl->hasAttr<MallocAttr>())
1003       RetAttrs.addAttribute(llvm::Attributes::NoAlias);
1004   }
1005 
1006   if (CodeGenOpts.OptimizeSize)
1007     FuncAttrs.addAttribute(llvm::Attributes::OptimizeForSize);
1008   if (CodeGenOpts.OptimizeSize == 2)
1009     FuncAttrs.addAttribute(llvm::Attributes::MinSize);
1010   if (CodeGenOpts.DisableRedZone)
1011     FuncAttrs.addAttribute(llvm::Attributes::NoRedZone);
1012   if (CodeGenOpts.NoImplicitFloat)
1013     FuncAttrs.addAttribute(llvm::Attributes::NoImplicitFloat);
1014 
1015   QualType RetTy = FI.getReturnType();
1016   unsigned Index = 1;
1017   const ABIArgInfo &RetAI = FI.getReturnInfo();
1018   switch (RetAI.getKind()) {
1019   case ABIArgInfo::Extend:
1020    if (RetTy->hasSignedIntegerRepresentation())
1021      RetAttrs.addAttribute(llvm::Attributes::SExt);
1022    else if (RetTy->hasUnsignedIntegerRepresentation())
1023      RetAttrs.addAttribute(llvm::Attributes::ZExt);
1024     break;
1025   case ABIArgInfo::Direct:
1026   case ABIArgInfo::Ignore:
1027     break;
1028 
1029   case ABIArgInfo::Indirect: {
1030     llvm::AttrBuilder SRETAttrs;
1031     SRETAttrs.addAttribute(llvm::Attributes::StructRet);
1032     if (RetAI.getInReg())
1033       SRETAttrs.addAttribute(llvm::Attributes::InReg);
1034     PAL.push_back(llvm::
1035                   AttributeWithIndex::get(Index,
1036                                          llvm::Attributes::get(getLLVMContext(),
1037                                                                SRETAttrs)));
1038 
1039     ++Index;
1040     // sret disables readnone and readonly
1041     FuncAttrs.removeAttribute(llvm::Attributes::ReadOnly)
1042       .removeAttribute(llvm::Attributes::ReadNone);
1043     break;
1044   }
1045 
1046   case ABIArgInfo::Expand:
1047     llvm_unreachable("Invalid ABI kind for return argument");
1048   }
1049 
1050   if (RetAttrs.hasAttributes())
1051     PAL.push_back(llvm::
1052                   AttributeWithIndex::get(llvm::AttributeSet::ReturnIndex,
1053                                          llvm::Attributes::get(getLLVMContext(),
1054                                                                RetAttrs)));
1055 
1056   for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1057          ie = FI.arg_end(); it != ie; ++it) {
1058     QualType ParamType = it->type;
1059     const ABIArgInfo &AI = it->info;
1060     llvm::AttrBuilder Attrs;
1061 
1062     if (AI.getPaddingType()) {
1063       if (AI.getPaddingInReg()) {
1064         llvm::AttrBuilder PadAttrs;
1065         PadAttrs.addAttribute(llvm::Attributes::InReg);
1066 
1067         llvm::Attributes A =llvm::Attributes::get(getLLVMContext(), PadAttrs);
1068         PAL.push_back(llvm::AttributeWithIndex::get(Index, A));
1069       }
1070       // Increment Index if there is padding.
1071       ++Index;
1072     }
1073 
1074     // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
1075     // have the corresponding parameter variable.  It doesn't make
1076     // sense to do it here because parameters are so messed up.
1077     switch (AI.getKind()) {
1078     case ABIArgInfo::Extend:
1079       if (ParamType->isSignedIntegerOrEnumerationType())
1080         Attrs.addAttribute(llvm::Attributes::SExt);
1081       else if (ParamType->isUnsignedIntegerOrEnumerationType())
1082         Attrs.addAttribute(llvm::Attributes::ZExt);
1083       // FALL THROUGH
1084     case ABIArgInfo::Direct:
1085       if (AI.getInReg())
1086         Attrs.addAttribute(llvm::Attributes::InReg);
1087 
1088       // FIXME: handle sseregparm someday...
1089 
1090       if (llvm::StructType *STy =
1091           dyn_cast<llvm::StructType>(AI.getCoerceToType())) {
1092         unsigned Extra = STy->getNumElements()-1;  // 1 will be added below.
1093         if (Attrs.hasAttributes())
1094           for (unsigned I = 0; I < Extra; ++I)
1095             PAL.push_back(llvm::AttributeWithIndex::get(Index + I,
1096                                          llvm::Attributes::get(getLLVMContext(),
1097                                                                Attrs)));
1098         Index += Extra;
1099       }
1100       break;
1101 
1102     case ABIArgInfo::Indirect:
1103       if (AI.getInReg())
1104         Attrs.addAttribute(llvm::Attributes::InReg);
1105 
1106       if (AI.getIndirectByVal())
1107         Attrs.addAttribute(llvm::Attributes::ByVal);
1108 
1109       Attrs.addAlignmentAttr(AI.getIndirectAlign());
1110 
1111       // byval disables readnone and readonly.
1112       FuncAttrs.removeAttribute(llvm::Attributes::ReadOnly)
1113         .removeAttribute(llvm::Attributes::ReadNone);
1114       break;
1115 
1116     case ABIArgInfo::Ignore:
1117       // Skip increment, no matching LLVM parameter.
1118       continue;
1119 
1120     case ABIArgInfo::Expand: {
1121       SmallVector<llvm::Type*, 8> types;
1122       // FIXME: This is rather inefficient. Do we ever actually need to do
1123       // anything here? The result should be just reconstructed on the other
1124       // side, so extension should be a non-issue.
1125       getTypes().GetExpandedTypes(ParamType, types);
1126       Index += types.size();
1127       continue;
1128     }
1129     }
1130 
1131     if (Attrs.hasAttributes())
1132       PAL.push_back(llvm::AttributeWithIndex::get(Index,
1133                                          llvm::Attributes::get(getLLVMContext(),
1134                                                                Attrs)));
1135     ++Index;
1136   }
1137   if (FuncAttrs.hasAttributes())
1138     PAL.push_back(llvm::
1139                   AttributeWithIndex::get(llvm::AttributeSet::FunctionIndex,
1140                                          llvm::Attributes::get(getLLVMContext(),
1141                                                                FuncAttrs)));
1142 }
1143 
1144 /// An argument came in as a promoted argument; demote it back to its
1145 /// declared type.
1146 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
1147                                          const VarDecl *var,
1148                                          llvm::Value *value) {
1149   llvm::Type *varType = CGF.ConvertType(var->getType());
1150 
1151   // This can happen with promotions that actually don't change the
1152   // underlying type, like the enum promotions.
1153   if (value->getType() == varType) return value;
1154 
1155   assert((varType->isIntegerTy() || varType->isFloatingPointTy())
1156          && "unexpected promotion type");
1157 
1158   if (isa<llvm::IntegerType>(varType))
1159     return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
1160 
1161   return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
1162 }
1163 
1164 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
1165                                          llvm::Function *Fn,
1166                                          const FunctionArgList &Args) {
1167   // If this is an implicit-return-zero function, go ahead and
1168   // initialize the return value.  TODO: it might be nice to have
1169   // a more general mechanism for this that didn't require synthesized
1170   // return statements.
1171   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
1172     if (FD->hasImplicitReturnZero()) {
1173       QualType RetTy = FD->getResultType().getUnqualifiedType();
1174       llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
1175       llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
1176       Builder.CreateStore(Zero, ReturnValue);
1177     }
1178   }
1179 
1180   // FIXME: We no longer need the types from FunctionArgList; lift up and
1181   // simplify.
1182 
1183   // Emit allocs for param decls.  Give the LLVM Argument nodes names.
1184   llvm::Function::arg_iterator AI = Fn->arg_begin();
1185 
1186   // Name the struct return argument.
1187   if (CGM.ReturnTypeUsesSRet(FI)) {
1188     AI->setName("agg.result");
1189     AI->addAttr(llvm::Attributes::get(getLLVMContext(),
1190                                       llvm::Attributes::NoAlias));
1191     ++AI;
1192   }
1193 
1194   assert(FI.arg_size() == Args.size() &&
1195          "Mismatch between function signature & arguments.");
1196   unsigned ArgNo = 1;
1197   CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
1198   for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
1199        i != e; ++i, ++info_it, ++ArgNo) {
1200     const VarDecl *Arg = *i;
1201     QualType Ty = info_it->type;
1202     const ABIArgInfo &ArgI = info_it->info;
1203 
1204     bool isPromoted =
1205       isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
1206 
1207     // Skip the dummy padding argument.
1208     if (ArgI.getPaddingType())
1209       ++AI;
1210 
1211     switch (ArgI.getKind()) {
1212     case ABIArgInfo::Indirect: {
1213       llvm::Value *V = AI;
1214 
1215       if (hasAggregateLLVMType(Ty)) {
1216         // Aggregates and complex variables are accessed by reference.  All we
1217         // need to do is realign the value, if requested
1218         if (ArgI.getIndirectRealign()) {
1219           llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce");
1220 
1221           // Copy from the incoming argument pointer to the temporary with the
1222           // appropriate alignment.
1223           //
1224           // FIXME: We should have a common utility for generating an aggregate
1225           // copy.
1226           llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
1227           CharUnits Size = getContext().getTypeSizeInChars(Ty);
1228           llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
1229           llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy);
1230           Builder.CreateMemCpy(Dst,
1231                                Src,
1232                                llvm::ConstantInt::get(IntPtrTy,
1233                                                       Size.getQuantity()),
1234                                ArgI.getIndirectAlign(),
1235                                false);
1236           V = AlignedTemp;
1237         }
1238       } else {
1239         // Load scalar value from indirect argument.
1240         CharUnits Alignment = getContext().getTypeAlignInChars(Ty);
1241         V = EmitLoadOfScalar(V, false, Alignment.getQuantity(), Ty);
1242 
1243         if (isPromoted)
1244           V = emitArgumentDemotion(*this, Arg, V);
1245       }
1246       EmitParmDecl(*Arg, V, ArgNo);
1247       break;
1248     }
1249 
1250     case ABIArgInfo::Extend:
1251     case ABIArgInfo::Direct: {
1252 
1253       // If we have the trivial case, handle it with no muss and fuss.
1254       if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
1255           ArgI.getCoerceToType() == ConvertType(Ty) &&
1256           ArgI.getDirectOffset() == 0) {
1257         assert(AI != Fn->arg_end() && "Argument mismatch!");
1258         llvm::Value *V = AI;
1259 
1260         if (Arg->getType().isRestrictQualified())
1261           AI->addAttr(llvm::Attributes::get(getLLVMContext(),
1262                                             llvm::Attributes::NoAlias));
1263 
1264         // Ensure the argument is the correct type.
1265         if (V->getType() != ArgI.getCoerceToType())
1266           V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
1267 
1268         if (isPromoted)
1269           V = emitArgumentDemotion(*this, Arg, V);
1270 
1271         // Because of merging of function types from multiple decls it is
1272         // possible for the type of an argument to not match the corresponding
1273         // type in the function type. Since we are codegening the callee
1274         // in here, add a cast to the argument type.
1275         llvm::Type *LTy = ConvertType(Arg->getType());
1276         if (V->getType() != LTy)
1277           V = Builder.CreateBitCast(V, LTy);
1278 
1279         EmitParmDecl(*Arg, V, ArgNo);
1280         break;
1281       }
1282 
1283       llvm::AllocaInst *Alloca = CreateMemTemp(Ty, Arg->getName());
1284 
1285       // The alignment we need to use is the max of the requested alignment for
1286       // the argument plus the alignment required by our access code below.
1287       unsigned AlignmentToUse =
1288         CGM.getDataLayout().getABITypeAlignment(ArgI.getCoerceToType());
1289       AlignmentToUse = std::max(AlignmentToUse,
1290                         (unsigned)getContext().getDeclAlign(Arg).getQuantity());
1291 
1292       Alloca->setAlignment(AlignmentToUse);
1293       llvm::Value *V = Alloca;
1294       llvm::Value *Ptr = V;    // Pointer to store into.
1295 
1296       // If the value is offset in memory, apply the offset now.
1297       if (unsigned Offs = ArgI.getDirectOffset()) {
1298         Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy());
1299         Ptr = Builder.CreateConstGEP1_32(Ptr, Offs);
1300         Ptr = Builder.CreateBitCast(Ptr,
1301                           llvm::PointerType::getUnqual(ArgI.getCoerceToType()));
1302       }
1303 
1304       // If the coerce-to type is a first class aggregate, we flatten it and
1305       // pass the elements. Either way is semantically identical, but fast-isel
1306       // and the optimizer generally likes scalar values better than FCAs.
1307       llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
1308       if (STy && STy->getNumElements() > 1) {
1309         uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
1310         llvm::Type *DstTy =
1311           cast<llvm::PointerType>(Ptr->getType())->getElementType();
1312         uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
1313 
1314         if (SrcSize <= DstSize) {
1315           Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
1316 
1317           for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1318             assert(AI != Fn->arg_end() && "Argument mismatch!");
1319             AI->setName(Arg->getName() + ".coerce" + Twine(i));
1320             llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i);
1321             Builder.CreateStore(AI++, EltPtr);
1322           }
1323         } else {
1324           llvm::AllocaInst *TempAlloca =
1325             CreateTempAlloca(ArgI.getCoerceToType(), "coerce");
1326           TempAlloca->setAlignment(AlignmentToUse);
1327           llvm::Value *TempV = TempAlloca;
1328 
1329           for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1330             assert(AI != Fn->arg_end() && "Argument mismatch!");
1331             AI->setName(Arg->getName() + ".coerce" + Twine(i));
1332             llvm::Value *EltPtr = Builder.CreateConstGEP2_32(TempV, 0, i);
1333             Builder.CreateStore(AI++, EltPtr);
1334           }
1335 
1336           Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse);
1337         }
1338       } else {
1339         // Simple case, just do a coerced store of the argument into the alloca.
1340         assert(AI != Fn->arg_end() && "Argument mismatch!");
1341         AI->setName(Arg->getName() + ".coerce");
1342         CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this);
1343       }
1344 
1345 
1346       // Match to what EmitParmDecl is expecting for this type.
1347       if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
1348         V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty);
1349         if (isPromoted)
1350           V = emitArgumentDemotion(*this, Arg, V);
1351       }
1352       EmitParmDecl(*Arg, V, ArgNo);
1353       continue;  // Skip ++AI increment, already done.
1354     }
1355 
1356     case ABIArgInfo::Expand: {
1357       // If this structure was expanded into multiple arguments then
1358       // we need to create a temporary and reconstruct it from the
1359       // arguments.
1360       llvm::AllocaInst *Alloca = CreateMemTemp(Ty);
1361       CharUnits Align = getContext().getDeclAlign(Arg);
1362       Alloca->setAlignment(Align.getQuantity());
1363       LValue LV = MakeAddrLValue(Alloca, Ty, Align);
1364       llvm::Function::arg_iterator End = ExpandTypeFromArgs(Ty, LV, AI);
1365       EmitParmDecl(*Arg, Alloca, ArgNo);
1366 
1367       // Name the arguments used in expansion and increment AI.
1368       unsigned Index = 0;
1369       for (; AI != End; ++AI, ++Index)
1370         AI->setName(Arg->getName() + "." + Twine(Index));
1371       continue;
1372     }
1373 
1374     case ABIArgInfo::Ignore:
1375       // Initialize the local variable appropriately.
1376       if (hasAggregateLLVMType(Ty))
1377         EmitParmDecl(*Arg, CreateMemTemp(Ty), ArgNo);
1378       else
1379         EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())),
1380                      ArgNo);
1381 
1382       // Skip increment, no matching LLVM parameter.
1383       continue;
1384     }
1385 
1386     ++AI;
1387   }
1388   assert(AI == Fn->arg_end() && "Argument mismatch!");
1389 }
1390 
1391 static void eraseUnusedBitCasts(llvm::Instruction *insn) {
1392   while (insn->use_empty()) {
1393     llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
1394     if (!bitcast) return;
1395 
1396     // This is "safe" because we would have used a ConstantExpr otherwise.
1397     insn = cast<llvm::Instruction>(bitcast->getOperand(0));
1398     bitcast->eraseFromParent();
1399   }
1400 }
1401 
1402 /// Try to emit a fused autorelease of a return result.
1403 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
1404                                                     llvm::Value *result) {
1405   // We must be immediately followed the cast.
1406   llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
1407   if (BB->empty()) return 0;
1408   if (&BB->back() != result) return 0;
1409 
1410   llvm::Type *resultType = result->getType();
1411 
1412   // result is in a BasicBlock and is therefore an Instruction.
1413   llvm::Instruction *generator = cast<llvm::Instruction>(result);
1414 
1415   SmallVector<llvm::Instruction*,4> insnsToKill;
1416 
1417   // Look for:
1418   //  %generator = bitcast %type1* %generator2 to %type2*
1419   while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
1420     // We would have emitted this as a constant if the operand weren't
1421     // an Instruction.
1422     generator = cast<llvm::Instruction>(bitcast->getOperand(0));
1423 
1424     // Require the generator to be immediately followed by the cast.
1425     if (generator->getNextNode() != bitcast)
1426       return 0;
1427 
1428     insnsToKill.push_back(bitcast);
1429   }
1430 
1431   // Look for:
1432   //   %generator = call i8* @objc_retain(i8* %originalResult)
1433   // or
1434   //   %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
1435   llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
1436   if (!call) return 0;
1437 
1438   bool doRetainAutorelease;
1439 
1440   if (call->getCalledValue() == CGF.CGM.getARCEntrypoints().objc_retain) {
1441     doRetainAutorelease = true;
1442   } else if (call->getCalledValue() == CGF.CGM.getARCEntrypoints()
1443                                           .objc_retainAutoreleasedReturnValue) {
1444     doRetainAutorelease = false;
1445 
1446     // If we emitted an assembly marker for this call (and the
1447     // ARCEntrypoints field should have been set if so), go looking
1448     // for that call.  If we can't find it, we can't do this
1449     // optimization.  But it should always be the immediately previous
1450     // instruction, unless we needed bitcasts around the call.
1451     if (CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker) {
1452       llvm::Instruction *prev = call->getPrevNode();
1453       assert(prev);
1454       if (isa<llvm::BitCastInst>(prev)) {
1455         prev = prev->getPrevNode();
1456         assert(prev);
1457       }
1458       assert(isa<llvm::CallInst>(prev));
1459       assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
1460                CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker);
1461       insnsToKill.push_back(prev);
1462     }
1463   } else {
1464     return 0;
1465   }
1466 
1467   result = call->getArgOperand(0);
1468   insnsToKill.push_back(call);
1469 
1470   // Keep killing bitcasts, for sanity.  Note that we no longer care
1471   // about precise ordering as long as there's exactly one use.
1472   while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
1473     if (!bitcast->hasOneUse()) break;
1474     insnsToKill.push_back(bitcast);
1475     result = bitcast->getOperand(0);
1476   }
1477 
1478   // Delete all the unnecessary instructions, from latest to earliest.
1479   for (SmallVectorImpl<llvm::Instruction*>::iterator
1480          i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i)
1481     (*i)->eraseFromParent();
1482 
1483   // Do the fused retain/autorelease if we were asked to.
1484   if (doRetainAutorelease)
1485     result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
1486 
1487   // Cast back to the result type.
1488   return CGF.Builder.CreateBitCast(result, resultType);
1489 }
1490 
1491 /// If this is a +1 of the value of an immutable 'self', remove it.
1492 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
1493                                           llvm::Value *result) {
1494   // This is only applicable to a method with an immutable 'self'.
1495   const ObjCMethodDecl *method =
1496     dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
1497   if (!method) return 0;
1498   const VarDecl *self = method->getSelfDecl();
1499   if (!self->getType().isConstQualified()) return 0;
1500 
1501   // Look for a retain call.
1502   llvm::CallInst *retainCall =
1503     dyn_cast<llvm::CallInst>(result->stripPointerCasts());
1504   if (!retainCall ||
1505       retainCall->getCalledValue() != CGF.CGM.getARCEntrypoints().objc_retain)
1506     return 0;
1507 
1508   // Look for an ordinary load of 'self'.
1509   llvm::Value *retainedValue = retainCall->getArgOperand(0);
1510   llvm::LoadInst *load =
1511     dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
1512   if (!load || load->isAtomic() || load->isVolatile() ||
1513       load->getPointerOperand() != CGF.GetAddrOfLocalVar(self))
1514     return 0;
1515 
1516   // Okay!  Burn it all down.  This relies for correctness on the
1517   // assumption that the retain is emitted as part of the return and
1518   // that thereafter everything is used "linearly".
1519   llvm::Type *resultType = result->getType();
1520   eraseUnusedBitCasts(cast<llvm::Instruction>(result));
1521   assert(retainCall->use_empty());
1522   retainCall->eraseFromParent();
1523   eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
1524 
1525   return CGF.Builder.CreateBitCast(load, resultType);
1526 }
1527 
1528 /// Emit an ARC autorelease of the result of a function.
1529 ///
1530 /// \return the value to actually return from the function
1531 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
1532                                             llvm::Value *result) {
1533   // If we're returning 'self', kill the initial retain.  This is a
1534   // heuristic attempt to "encourage correctness" in the really unfortunate
1535   // case where we have a return of self during a dealloc and we desperately
1536   // need to avoid the possible autorelease.
1537   if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
1538     return self;
1539 
1540   // At -O0, try to emit a fused retain/autorelease.
1541   if (CGF.shouldUseFusedARCCalls())
1542     if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
1543       return fused;
1544 
1545   return CGF.EmitARCAutoreleaseReturnValue(result);
1546 }
1547 
1548 /// Heuristically search for a dominating store to the return-value slot.
1549 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
1550   // If there are multiple uses of the return-value slot, just check
1551   // for something immediately preceding the IP.  Sometimes this can
1552   // happen with how we generate implicit-returns; it can also happen
1553   // with noreturn cleanups.
1554   if (!CGF.ReturnValue->hasOneUse()) {
1555     llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
1556     if (IP->empty()) return 0;
1557     llvm::StoreInst *store = dyn_cast<llvm::StoreInst>(&IP->back());
1558     if (!store) return 0;
1559     if (store->getPointerOperand() != CGF.ReturnValue) return 0;
1560     assert(!store->isAtomic() && !store->isVolatile()); // see below
1561     return store;
1562   }
1563 
1564   llvm::StoreInst *store =
1565     dyn_cast<llvm::StoreInst>(CGF.ReturnValue->use_back());
1566   if (!store) return 0;
1567 
1568   // These aren't actually possible for non-coerced returns, and we
1569   // only care about non-coerced returns on this code path.
1570   assert(!store->isAtomic() && !store->isVolatile());
1571 
1572   // Now do a first-and-dirty dominance check: just walk up the
1573   // single-predecessors chain from the current insertion point.
1574   llvm::BasicBlock *StoreBB = store->getParent();
1575   llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
1576   while (IP != StoreBB) {
1577     if (!(IP = IP->getSinglePredecessor()))
1578       return 0;
1579   }
1580 
1581   // Okay, the store's basic block dominates the insertion point; we
1582   // can do our thing.
1583   return store;
1584 }
1585 
1586 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
1587   // Functions with no result always return void.
1588   if (ReturnValue == 0) {
1589     Builder.CreateRetVoid();
1590     return;
1591   }
1592 
1593   llvm::DebugLoc RetDbgLoc;
1594   llvm::Value *RV = 0;
1595   QualType RetTy = FI.getReturnType();
1596   const ABIArgInfo &RetAI = FI.getReturnInfo();
1597 
1598   switch (RetAI.getKind()) {
1599   case ABIArgInfo::Indirect: {
1600     unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
1601     if (RetTy->isAnyComplexType()) {
1602       ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
1603       StoreComplexToAddr(RT, CurFn->arg_begin(), false);
1604     } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1605       // Do nothing; aggregrates get evaluated directly into the destination.
1606     } else {
1607       EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
1608                         false, Alignment, RetTy);
1609     }
1610     break;
1611   }
1612 
1613   case ABIArgInfo::Extend:
1614   case ABIArgInfo::Direct:
1615     if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
1616         RetAI.getDirectOffset() == 0) {
1617       // The internal return value temp always will have pointer-to-return-type
1618       // type, just do a load.
1619 
1620       // If there is a dominating store to ReturnValue, we can elide
1621       // the load, zap the store, and usually zap the alloca.
1622       if (llvm::StoreInst *SI = findDominatingStoreToReturnValue(*this)) {
1623         // Get the stored value and nuke the now-dead store.
1624         RetDbgLoc = SI->getDebugLoc();
1625         RV = SI->getValueOperand();
1626         SI->eraseFromParent();
1627 
1628         // If that was the only use of the return value, nuke it as well now.
1629         if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
1630           cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
1631           ReturnValue = 0;
1632         }
1633 
1634       // Otherwise, we have to do a simple load.
1635       } else {
1636         RV = Builder.CreateLoad(ReturnValue);
1637       }
1638     } else {
1639       llvm::Value *V = ReturnValue;
1640       // If the value is offset in memory, apply the offset now.
1641       if (unsigned Offs = RetAI.getDirectOffset()) {
1642         V = Builder.CreateBitCast(V, Builder.getInt8PtrTy());
1643         V = Builder.CreateConstGEP1_32(V, Offs);
1644         V = Builder.CreateBitCast(V,
1645                          llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
1646       }
1647 
1648       RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
1649     }
1650 
1651     // In ARC, end functions that return a retainable type with a call
1652     // to objc_autoreleaseReturnValue.
1653     if (AutoreleaseResult) {
1654       assert(getLangOpts().ObjCAutoRefCount &&
1655              !FI.isReturnsRetained() &&
1656              RetTy->isObjCRetainableType());
1657       RV = emitAutoreleaseOfResult(*this, RV);
1658     }
1659 
1660     break;
1661 
1662   case ABIArgInfo::Ignore:
1663     break;
1664 
1665   case ABIArgInfo::Expand:
1666     llvm_unreachable("Invalid ABI kind for return argument");
1667   }
1668 
1669   llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid();
1670   if (!RetDbgLoc.isUnknown())
1671     Ret->setDebugLoc(RetDbgLoc);
1672 }
1673 
1674 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
1675                                           const VarDecl *param) {
1676   // StartFunction converted the ABI-lowered parameter(s) into a
1677   // local alloca.  We need to turn that into an r-value suitable
1678   // for EmitCall.
1679   llvm::Value *local = GetAddrOfLocalVar(param);
1680 
1681   QualType type = param->getType();
1682 
1683   // For the most part, we just need to load the alloca, except:
1684   // 1) aggregate r-values are actually pointers to temporaries, and
1685   // 2) references to aggregates are pointers directly to the aggregate.
1686   // I don't know why references to non-aggregates are different here.
1687   if (const ReferenceType *ref = type->getAs<ReferenceType>()) {
1688     if (hasAggregateLLVMType(ref->getPointeeType()))
1689       return args.add(RValue::getAggregate(local), type);
1690 
1691     // Locals which are references to scalars are represented
1692     // with allocas holding the pointer.
1693     return args.add(RValue::get(Builder.CreateLoad(local)), type);
1694   }
1695 
1696   if (type->isAnyComplexType()) {
1697     ComplexPairTy complex = LoadComplexFromAddr(local, /*volatile*/ false);
1698     return args.add(RValue::getComplex(complex), type);
1699   }
1700 
1701   if (hasAggregateLLVMType(type))
1702     return args.add(RValue::getAggregate(local), type);
1703 
1704   unsigned alignment = getContext().getDeclAlign(param).getQuantity();
1705   llvm::Value *value = EmitLoadOfScalar(local, false, alignment, type);
1706   return args.add(RValue::get(value), type);
1707 }
1708 
1709 static bool isProvablyNull(llvm::Value *addr) {
1710   return isa<llvm::ConstantPointerNull>(addr);
1711 }
1712 
1713 static bool isProvablyNonNull(llvm::Value *addr) {
1714   return isa<llvm::AllocaInst>(addr);
1715 }
1716 
1717 /// Emit the actual writing-back of a writeback.
1718 static void emitWriteback(CodeGenFunction &CGF,
1719                           const CallArgList::Writeback &writeback) {
1720   llvm::Value *srcAddr = writeback.Address;
1721   assert(!isProvablyNull(srcAddr) &&
1722          "shouldn't have writeback for provably null argument");
1723 
1724   llvm::BasicBlock *contBB = 0;
1725 
1726   // If the argument wasn't provably non-null, we need to null check
1727   // before doing the store.
1728   bool provablyNonNull = isProvablyNonNull(srcAddr);
1729   if (!provablyNonNull) {
1730     llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
1731     contBB = CGF.createBasicBlock("icr.done");
1732 
1733     llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
1734     CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
1735     CGF.EmitBlock(writebackBB);
1736   }
1737 
1738   // Load the value to writeback.
1739   llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
1740 
1741   // Cast it back, in case we're writing an id to a Foo* or something.
1742   value = CGF.Builder.CreateBitCast(value,
1743                cast<llvm::PointerType>(srcAddr->getType())->getElementType(),
1744                             "icr.writeback-cast");
1745 
1746   // Perform the writeback.
1747   QualType srcAddrType = writeback.AddressType;
1748   CGF.EmitStoreThroughLValue(RValue::get(value),
1749                              CGF.MakeAddrLValue(srcAddr, srcAddrType));
1750 
1751   // Jump to the continuation block.
1752   if (!provablyNonNull)
1753     CGF.EmitBlock(contBB);
1754 }
1755 
1756 static void emitWritebacks(CodeGenFunction &CGF,
1757                            const CallArgList &args) {
1758   for (CallArgList::writeback_iterator
1759          i = args.writeback_begin(), e = args.writeback_end(); i != e; ++i)
1760     emitWriteback(CGF, *i);
1761 }
1762 
1763 /// Emit an argument that's being passed call-by-writeback.  That is,
1764 /// we are passing the address of
1765 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
1766                              const ObjCIndirectCopyRestoreExpr *CRE) {
1767   llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr());
1768 
1769   // The dest and src types don't necessarily match in LLVM terms
1770   // because of the crazy ObjC compatibility rules.
1771 
1772   llvm::PointerType *destType =
1773     cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
1774 
1775   // If the address is a constant null, just pass the appropriate null.
1776   if (isProvablyNull(srcAddr)) {
1777     args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
1778              CRE->getType());
1779     return;
1780   }
1781 
1782   QualType srcAddrType =
1783     CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
1784 
1785   // Create the temporary.
1786   llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(),
1787                                            "icr.temp");
1788   // Loading an l-value can introduce a cleanup if the l-value is __weak,
1789   // and that cleanup will be conditional if we can't prove that the l-value
1790   // isn't null, so we need to register a dominating point so that the cleanups
1791   // system will make valid IR.
1792   CodeGenFunction::ConditionalEvaluation condEval(CGF);
1793 
1794   // Zero-initialize it if we're not doing a copy-initialization.
1795   bool shouldCopy = CRE->shouldCopy();
1796   if (!shouldCopy) {
1797     llvm::Value *null =
1798       llvm::ConstantPointerNull::get(
1799         cast<llvm::PointerType>(destType->getElementType()));
1800     CGF.Builder.CreateStore(null, temp);
1801   }
1802 
1803   llvm::BasicBlock *contBB = 0;
1804 
1805   // If the address is *not* known to be non-null, we need to switch.
1806   llvm::Value *finalArgument;
1807 
1808   bool provablyNonNull = isProvablyNonNull(srcAddr);
1809   if (provablyNonNull) {
1810     finalArgument = temp;
1811   } else {
1812     llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
1813 
1814     finalArgument = CGF.Builder.CreateSelect(isNull,
1815                                    llvm::ConstantPointerNull::get(destType),
1816                                              temp, "icr.argument");
1817 
1818     // If we need to copy, then the load has to be conditional, which
1819     // means we need control flow.
1820     if (shouldCopy) {
1821       contBB = CGF.createBasicBlock("icr.cont");
1822       llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
1823       CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
1824       CGF.EmitBlock(copyBB);
1825       condEval.begin(CGF);
1826     }
1827   }
1828 
1829   // Perform a copy if necessary.
1830   if (shouldCopy) {
1831     LValue srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
1832     RValue srcRV = CGF.EmitLoadOfLValue(srcLV);
1833     assert(srcRV.isScalar());
1834 
1835     llvm::Value *src = srcRV.getScalarVal();
1836     src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
1837                                     "icr.cast");
1838 
1839     // Use an ordinary store, not a store-to-lvalue.
1840     CGF.Builder.CreateStore(src, temp);
1841   }
1842 
1843   // Finish the control flow if we needed it.
1844   if (shouldCopy && !provablyNonNull) {
1845     CGF.EmitBlock(contBB);
1846     condEval.end(CGF);
1847   }
1848 
1849   args.addWriteback(srcAddr, srcAddrType, temp);
1850   args.add(RValue::get(finalArgument), CRE->getType());
1851 }
1852 
1853 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
1854                                   QualType type) {
1855   if (const ObjCIndirectCopyRestoreExpr *CRE
1856         = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
1857     assert(getLangOpts().ObjCAutoRefCount);
1858     assert(getContext().hasSameType(E->getType(), type));
1859     return emitWritebackArg(*this, args, CRE);
1860   }
1861 
1862   assert(type->isReferenceType() == E->isGLValue() &&
1863          "reference binding to unmaterialized r-value!");
1864 
1865   if (E->isGLValue()) {
1866     assert(E->getObjectKind() == OK_Ordinary);
1867     return args.add(EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0),
1868                     type);
1869   }
1870 
1871   if (hasAggregateLLVMType(type) && !E->getType()->isAnyComplexType() &&
1872       isa<ImplicitCastExpr>(E) &&
1873       cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
1874     LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
1875     assert(L.isSimple());
1876     args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true);
1877     return;
1878   }
1879 
1880   args.add(EmitAnyExprToTemp(E), type);
1881 }
1882 
1883 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
1884 // optimizer it can aggressively ignore unwind edges.
1885 void
1886 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
1887   if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
1888       !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
1889     Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
1890                       CGM.getNoObjCARCExceptionsMetadata());
1891 }
1892 
1893 /// Emits a call or invoke instruction to the given function, depending
1894 /// on the current state of the EH stack.
1895 llvm::CallSite
1896 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
1897                                   ArrayRef<llvm::Value *> Args,
1898                                   const Twine &Name) {
1899   llvm::BasicBlock *InvokeDest = getInvokeDest();
1900 
1901   llvm::Instruction *Inst;
1902   if (!InvokeDest)
1903     Inst = Builder.CreateCall(Callee, Args, Name);
1904   else {
1905     llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
1906     Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, Name);
1907     EmitBlock(ContBB);
1908   }
1909 
1910   // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
1911   // optimizer it can aggressively ignore unwind edges.
1912   if (CGM.getLangOpts().ObjCAutoRefCount)
1913     AddObjCARCExceptionMetadata(Inst);
1914 
1915   return Inst;
1916 }
1917 
1918 llvm::CallSite
1919 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
1920                                   const Twine &Name) {
1921   return EmitCallOrInvoke(Callee, ArrayRef<llvm::Value *>(), Name);
1922 }
1923 
1924 static void checkArgMatches(llvm::Value *Elt, unsigned &ArgNo,
1925                             llvm::FunctionType *FTy) {
1926   if (ArgNo < FTy->getNumParams())
1927     assert(Elt->getType() == FTy->getParamType(ArgNo));
1928   else
1929     assert(FTy->isVarArg());
1930   ++ArgNo;
1931 }
1932 
1933 void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
1934                                        SmallVector<llvm::Value*,16> &Args,
1935                                        llvm::FunctionType *IRFuncTy) {
1936   if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
1937     unsigned NumElts = AT->getSize().getZExtValue();
1938     QualType EltTy = AT->getElementType();
1939     llvm::Value *Addr = RV.getAggregateAddr();
1940     for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
1941       llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt);
1942       LValue LV = MakeAddrLValue(EltAddr, EltTy);
1943       RValue EltRV;
1944       if (EltTy->isAnyComplexType())
1945         // FIXME: Volatile?
1946         EltRV = RValue::getComplex(LoadComplexFromAddr(LV.getAddress(), false));
1947       else if (CodeGenFunction::hasAggregateLLVMType(EltTy))
1948         EltRV = LV.asAggregateRValue();
1949       else
1950         EltRV = EmitLoadOfLValue(LV);
1951       ExpandTypeToArgs(EltTy, EltRV, Args, IRFuncTy);
1952     }
1953   } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
1954     RecordDecl *RD = RT->getDecl();
1955     assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
1956     LValue LV = MakeAddrLValue(RV.getAggregateAddr(), Ty);
1957 
1958     if (RD->isUnion()) {
1959       const FieldDecl *LargestFD = 0;
1960       CharUnits UnionSize = CharUnits::Zero();
1961 
1962       for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
1963            i != e; ++i) {
1964         const FieldDecl *FD = *i;
1965         assert(!FD->isBitField() &&
1966                "Cannot expand structure with bit-field members.");
1967         CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
1968         if (UnionSize < FieldSize) {
1969           UnionSize = FieldSize;
1970           LargestFD = FD;
1971         }
1972       }
1973       if (LargestFD) {
1974         RValue FldRV = EmitRValueForField(LV, LargestFD);
1975         ExpandTypeToArgs(LargestFD->getType(), FldRV, Args, IRFuncTy);
1976       }
1977     } else {
1978       for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
1979            i != e; ++i) {
1980         FieldDecl *FD = *i;
1981 
1982         RValue FldRV = EmitRValueForField(LV, FD);
1983         ExpandTypeToArgs(FD->getType(), FldRV, Args, IRFuncTy);
1984       }
1985     }
1986   } else if (Ty->isAnyComplexType()) {
1987     ComplexPairTy CV = RV.getComplexVal();
1988     Args.push_back(CV.first);
1989     Args.push_back(CV.second);
1990   } else {
1991     assert(RV.isScalar() &&
1992            "Unexpected non-scalar rvalue during struct expansion.");
1993 
1994     // Insert a bitcast as needed.
1995     llvm::Value *V = RV.getScalarVal();
1996     if (Args.size() < IRFuncTy->getNumParams() &&
1997         V->getType() != IRFuncTy->getParamType(Args.size()))
1998       V = Builder.CreateBitCast(V, IRFuncTy->getParamType(Args.size()));
1999 
2000     Args.push_back(V);
2001   }
2002 }
2003 
2004 
2005 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
2006                                  llvm::Value *Callee,
2007                                  ReturnValueSlot ReturnValue,
2008                                  const CallArgList &CallArgs,
2009                                  const Decl *TargetDecl,
2010                                  llvm::Instruction **callOrInvoke) {
2011   // FIXME: We no longer need the types from CallArgs; lift up and simplify.
2012   SmallVector<llvm::Value*, 16> Args;
2013 
2014   // Handle struct-return functions by passing a pointer to the
2015   // location that we would like to return into.
2016   QualType RetTy = CallInfo.getReturnType();
2017   const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
2018 
2019   // IRArgNo - Keep track of the argument number in the callee we're looking at.
2020   unsigned IRArgNo = 0;
2021   llvm::FunctionType *IRFuncTy =
2022     cast<llvm::FunctionType>(
2023                   cast<llvm::PointerType>(Callee->getType())->getElementType());
2024 
2025   // If the call returns a temporary with struct return, create a temporary
2026   // alloca to hold the result, unless one is given to us.
2027   if (CGM.ReturnTypeUsesSRet(CallInfo)) {
2028     llvm::Value *Value = ReturnValue.getValue();
2029     if (!Value)
2030       Value = CreateMemTemp(RetTy);
2031     Args.push_back(Value);
2032     checkArgMatches(Value, IRArgNo, IRFuncTy);
2033   }
2034 
2035   assert(CallInfo.arg_size() == CallArgs.size() &&
2036          "Mismatch between function signature & arguments.");
2037   CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
2038   for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
2039        I != E; ++I, ++info_it) {
2040     const ABIArgInfo &ArgInfo = info_it->info;
2041     RValue RV = I->RV;
2042 
2043     unsigned TypeAlign =
2044       getContext().getTypeAlignInChars(I->Ty).getQuantity();
2045 
2046     // Insert a padding argument to ensure proper alignment.
2047     if (llvm::Type *PaddingType = ArgInfo.getPaddingType()) {
2048       Args.push_back(llvm::UndefValue::get(PaddingType));
2049       ++IRArgNo;
2050     }
2051 
2052     switch (ArgInfo.getKind()) {
2053     case ABIArgInfo::Indirect: {
2054       if (RV.isScalar() || RV.isComplex()) {
2055         // Make a temporary alloca to pass the argument.
2056         llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
2057         if (ArgInfo.getIndirectAlign() > AI->getAlignment())
2058           AI->setAlignment(ArgInfo.getIndirectAlign());
2059         Args.push_back(AI);
2060 
2061         if (RV.isScalar())
2062           EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false,
2063                             TypeAlign, I->Ty);
2064         else
2065           StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
2066 
2067         // Validate argument match.
2068         checkArgMatches(AI, IRArgNo, IRFuncTy);
2069       } else {
2070         // We want to avoid creating an unnecessary temporary+copy here;
2071         // however, we need one in two cases:
2072         // 1. If the argument is not byval, and we are required to copy the
2073         //    source.  (This case doesn't occur on any common architecture.)
2074         // 2. If the argument is byval, RV is not sufficiently aligned, and
2075         //    we cannot force it to be sufficiently aligned.
2076         llvm::Value *Addr = RV.getAggregateAddr();
2077         unsigned Align = ArgInfo.getIndirectAlign();
2078         const llvm::DataLayout *TD = &CGM.getDataLayout();
2079         if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
2080             (ArgInfo.getIndirectByVal() && TypeAlign < Align &&
2081              llvm::getOrEnforceKnownAlignment(Addr, Align, TD) < Align)) {
2082           // Create an aligned temporary, and copy to it.
2083           llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
2084           if (Align > AI->getAlignment())
2085             AI->setAlignment(Align);
2086           Args.push_back(AI);
2087           EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
2088 
2089           // Validate argument match.
2090           checkArgMatches(AI, IRArgNo, IRFuncTy);
2091         } else {
2092           // Skip the extra memcpy call.
2093           Args.push_back(Addr);
2094 
2095           // Validate argument match.
2096           checkArgMatches(Addr, IRArgNo, IRFuncTy);
2097         }
2098       }
2099       break;
2100     }
2101 
2102     case ABIArgInfo::Ignore:
2103       break;
2104 
2105     case ABIArgInfo::Extend:
2106     case ABIArgInfo::Direct: {
2107       if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
2108           ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
2109           ArgInfo.getDirectOffset() == 0) {
2110         llvm::Value *V;
2111         if (RV.isScalar())
2112           V = RV.getScalarVal();
2113         else
2114           V = Builder.CreateLoad(RV.getAggregateAddr());
2115 
2116         // If the argument doesn't match, perform a bitcast to coerce it.  This
2117         // can happen due to trivial type mismatches.
2118         if (IRArgNo < IRFuncTy->getNumParams() &&
2119             V->getType() != IRFuncTy->getParamType(IRArgNo))
2120           V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRArgNo));
2121         Args.push_back(V);
2122 
2123         checkArgMatches(V, IRArgNo, IRFuncTy);
2124         break;
2125       }
2126 
2127       // FIXME: Avoid the conversion through memory if possible.
2128       llvm::Value *SrcPtr;
2129       if (RV.isScalar()) {
2130         SrcPtr = CreateMemTemp(I->Ty, "coerce");
2131         EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, TypeAlign, I->Ty);
2132       } else if (RV.isComplex()) {
2133         SrcPtr = CreateMemTemp(I->Ty, "coerce");
2134         StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
2135       } else
2136         SrcPtr = RV.getAggregateAddr();
2137 
2138       // If the value is offset in memory, apply the offset now.
2139       if (unsigned Offs = ArgInfo.getDirectOffset()) {
2140         SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy());
2141         SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs);
2142         SrcPtr = Builder.CreateBitCast(SrcPtr,
2143                        llvm::PointerType::getUnqual(ArgInfo.getCoerceToType()));
2144 
2145       }
2146 
2147       // If the coerce-to type is a first class aggregate, we flatten it and
2148       // pass the elements. Either way is semantically identical, but fast-isel
2149       // and the optimizer generally likes scalar values better than FCAs.
2150       if (llvm::StructType *STy =
2151             dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) {
2152         llvm::Type *SrcTy =
2153           cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
2154         uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
2155         uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
2156 
2157         // If the source type is smaller than the destination type of the
2158         // coerce-to logic, copy the source value into a temp alloca the size
2159         // of the destination type to allow loading all of it. The bits past
2160         // the source value are left undef.
2161         if (SrcSize < DstSize) {
2162           llvm::AllocaInst *TempAlloca
2163             = CreateTempAlloca(STy, SrcPtr->getName() + ".coerce");
2164           Builder.CreateMemCpy(TempAlloca, SrcPtr, SrcSize, 0);
2165           SrcPtr = TempAlloca;
2166         } else {
2167           SrcPtr = Builder.CreateBitCast(SrcPtr,
2168                                          llvm::PointerType::getUnqual(STy));
2169         }
2170 
2171         for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2172           llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i);
2173           llvm::LoadInst *LI = Builder.CreateLoad(EltPtr);
2174           // We don't know what we're loading from.
2175           LI->setAlignment(1);
2176           Args.push_back(LI);
2177 
2178           // Validate argument match.
2179           checkArgMatches(LI, IRArgNo, IRFuncTy);
2180         }
2181       } else {
2182         // In the simple case, just pass the coerced loaded value.
2183         Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
2184                                          *this));
2185 
2186         // Validate argument match.
2187         checkArgMatches(Args.back(), IRArgNo, IRFuncTy);
2188       }
2189 
2190       break;
2191     }
2192 
2193     case ABIArgInfo::Expand:
2194       ExpandTypeToArgs(I->Ty, RV, Args, IRFuncTy);
2195       IRArgNo = Args.size();
2196       break;
2197     }
2198   }
2199 
2200   // If the callee is a bitcast of a function to a varargs pointer to function
2201   // type, check to see if we can remove the bitcast.  This handles some cases
2202   // with unprototyped functions.
2203   if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
2204     if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
2205       llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
2206       llvm::FunctionType *CurFT =
2207         cast<llvm::FunctionType>(CurPT->getElementType());
2208       llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
2209 
2210       if (CE->getOpcode() == llvm::Instruction::BitCast &&
2211           ActualFT->getReturnType() == CurFT->getReturnType() &&
2212           ActualFT->getNumParams() == CurFT->getNumParams() &&
2213           ActualFT->getNumParams() == Args.size() &&
2214           (CurFT->isVarArg() || !ActualFT->isVarArg())) {
2215         bool ArgsMatch = true;
2216         for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
2217           if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
2218             ArgsMatch = false;
2219             break;
2220           }
2221 
2222         // Strip the cast if we can get away with it.  This is a nice cleanup,
2223         // but also allows us to inline the function at -O0 if it is marked
2224         // always_inline.
2225         if (ArgsMatch)
2226           Callee = CalleeF;
2227       }
2228     }
2229 
2230   unsigned CallingConv;
2231   CodeGen::AttributeListType AttributeList;
2232   CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv);
2233   llvm::AttributeSet Attrs = llvm::AttributeSet::get(getLLVMContext(),
2234                                                    AttributeList);
2235 
2236   llvm::BasicBlock *InvokeDest = 0;
2237   if (!Attrs.getFnAttributes().hasAttribute(llvm::Attributes::NoUnwind))
2238     InvokeDest = getInvokeDest();
2239 
2240   llvm::CallSite CS;
2241   if (!InvokeDest) {
2242     CS = Builder.CreateCall(Callee, Args);
2243   } else {
2244     llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
2245     CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, Args);
2246     EmitBlock(Cont);
2247   }
2248   if (callOrInvoke)
2249     *callOrInvoke = CS.getInstruction();
2250 
2251   CS.setAttributes(Attrs);
2252   CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
2253 
2254   // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
2255   // optimizer it can aggressively ignore unwind edges.
2256   if (CGM.getLangOpts().ObjCAutoRefCount)
2257     AddObjCARCExceptionMetadata(CS.getInstruction());
2258 
2259   // If the call doesn't return, finish the basic block and clear the
2260   // insertion point; this allows the rest of IRgen to discard
2261   // unreachable code.
2262   if (CS.doesNotReturn()) {
2263     Builder.CreateUnreachable();
2264     Builder.ClearInsertionPoint();
2265 
2266     // FIXME: For now, emit a dummy basic block because expr emitters in
2267     // generally are not ready to handle emitting expressions at unreachable
2268     // points.
2269     EnsureInsertPoint();
2270 
2271     // Return a reasonable RValue.
2272     return GetUndefRValue(RetTy);
2273   }
2274 
2275   llvm::Instruction *CI = CS.getInstruction();
2276   if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
2277     CI->setName("call");
2278 
2279   // Emit any writebacks immediately.  Arguably this should happen
2280   // after any return-value munging.
2281   if (CallArgs.hasWritebacks())
2282     emitWritebacks(*this, CallArgs);
2283 
2284   switch (RetAI.getKind()) {
2285   case ABIArgInfo::Indirect: {
2286     unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
2287     if (RetTy->isAnyComplexType())
2288       return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
2289     if (CodeGenFunction::hasAggregateLLVMType(RetTy))
2290       return RValue::getAggregate(Args[0]);
2291     return RValue::get(EmitLoadOfScalar(Args[0], false, Alignment, RetTy));
2292   }
2293 
2294   case ABIArgInfo::Ignore:
2295     // If we are ignoring an argument that had a result, make sure to
2296     // construct the appropriate return value for our caller.
2297     return GetUndefRValue(RetTy);
2298 
2299   case ABIArgInfo::Extend:
2300   case ABIArgInfo::Direct: {
2301     llvm::Type *RetIRTy = ConvertType(RetTy);
2302     if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
2303       if (RetTy->isAnyComplexType()) {
2304         llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
2305         llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
2306         return RValue::getComplex(std::make_pair(Real, Imag));
2307       }
2308       if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
2309         llvm::Value *DestPtr = ReturnValue.getValue();
2310         bool DestIsVolatile = ReturnValue.isVolatile();
2311 
2312         if (!DestPtr) {
2313           DestPtr = CreateMemTemp(RetTy, "agg.tmp");
2314           DestIsVolatile = false;
2315         }
2316         BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false);
2317         return RValue::getAggregate(DestPtr);
2318       }
2319 
2320       // If the argument doesn't match, perform a bitcast to coerce it.  This
2321       // can happen due to trivial type mismatches.
2322       llvm::Value *V = CI;
2323       if (V->getType() != RetIRTy)
2324         V = Builder.CreateBitCast(V, RetIRTy);
2325       return RValue::get(V);
2326     }
2327 
2328     llvm::Value *DestPtr = ReturnValue.getValue();
2329     bool DestIsVolatile = ReturnValue.isVolatile();
2330 
2331     if (!DestPtr) {
2332       DestPtr = CreateMemTemp(RetTy, "coerce");
2333       DestIsVolatile = false;
2334     }
2335 
2336     // If the value is offset in memory, apply the offset now.
2337     llvm::Value *StorePtr = DestPtr;
2338     if (unsigned Offs = RetAI.getDirectOffset()) {
2339       StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy());
2340       StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs);
2341       StorePtr = Builder.CreateBitCast(StorePtr,
2342                          llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
2343     }
2344     CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
2345 
2346     unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
2347     if (RetTy->isAnyComplexType())
2348       return RValue::getComplex(LoadComplexFromAddr(DestPtr, false));
2349     if (CodeGenFunction::hasAggregateLLVMType(RetTy))
2350       return RValue::getAggregate(DestPtr);
2351     return RValue::get(EmitLoadOfScalar(DestPtr, false, Alignment, RetTy));
2352   }
2353 
2354   case ABIArgInfo::Expand:
2355     llvm_unreachable("Invalid ABI kind for return argument");
2356   }
2357 
2358   llvm_unreachable("Unhandled ABIArgInfo::Kind");
2359 }
2360 
2361 /* VarArg handling */
2362 
2363 llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
2364   return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
2365 }
2366