1 //===--- CGCall.cpp - Encapsulate calling convention details ----*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "CGCall.h"
16 #include "ABIInfo.h"
17 #include "CGCXXABI.h"
18 #include "CodeGenFunction.h"
19 #include "CodeGenModule.h"
20 #include "TargetInfo.h"
21 #include "clang/AST/Decl.h"
22 #include "clang/AST/DeclCXX.h"
23 #include "clang/AST/DeclObjC.h"
24 #include "clang/Basic/TargetInfo.h"
25 #include "clang/Frontend/CodeGenOptions.h"
26 #include "llvm/IR/Attributes.h"
27 #include "llvm/IR/DataLayout.h"
28 #include "llvm/IR/InlineAsm.h"
29 #include "llvm/Support/CallSite.h"
30 #include "llvm/Transforms/Utils/Local.h"
31 using namespace clang;
32 using namespace CodeGen;
33 
34 /***/
35 
36 static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
37   switch (CC) {
38   default: return llvm::CallingConv::C;
39   case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
40   case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
41   case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
42   case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
43   case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
44   case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
45   // TODO: add support for CC_X86Pascal to llvm
46   }
47 }
48 
49 /// Derives the 'this' type for codegen purposes, i.e. ignoring method
50 /// qualification.
51 /// FIXME: address space qualification?
52 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
53   QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
54   return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
55 }
56 
57 /// Returns the canonical formal type of the given C++ method.
58 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
59   return MD->getType()->getCanonicalTypeUnqualified()
60            .getAs<FunctionProtoType>();
61 }
62 
63 /// Returns the "extra-canonicalized" return type, which discards
64 /// qualifiers on the return type.  Codegen doesn't care about them,
65 /// and it makes ABI code a little easier to be able to assume that
66 /// all parameter and return types are top-level unqualified.
67 static CanQualType GetReturnType(QualType RetTy) {
68   return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
69 }
70 
71 /// Arrange the argument and result information for a value of the given
72 /// unprototyped freestanding function type.
73 const CGFunctionInfo &
74 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
75   // When translating an unprototyped function type, always use a
76   // variadic type.
77   return arrangeLLVMFunctionInfo(FTNP->getResultType().getUnqualifiedType(),
78                                  ArrayRef<CanQualType>(),
79                                  FTNP->getExtInfo(),
80                                  RequiredArgs(0));
81 }
82 
83 /// Arrange the LLVM function layout for a value of the given function
84 /// type, on top of any implicit parameters already stored.  Use the
85 /// given ExtInfo instead of the ExtInfo from the function type.
86 static const CGFunctionInfo &arrangeLLVMFunctionInfo(CodeGenTypes &CGT,
87                                        SmallVectorImpl<CanQualType> &prefix,
88                                              CanQual<FunctionProtoType> FTP,
89                                               FunctionType::ExtInfo extInfo) {
90   RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
91   // FIXME: Kill copy.
92   for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
93     prefix.push_back(FTP->getArgType(i));
94   CanQualType resultType = FTP->getResultType().getUnqualifiedType();
95   return CGT.arrangeLLVMFunctionInfo(resultType, prefix, extInfo, required);
96 }
97 
98 /// Arrange the argument and result information for a free function (i.e.
99 /// not a C++ or ObjC instance method) of the given type.
100 static const CGFunctionInfo &arrangeFreeFunctionType(CodeGenTypes &CGT,
101                                       SmallVectorImpl<CanQualType> &prefix,
102                                             CanQual<FunctionProtoType> FTP) {
103   return arrangeLLVMFunctionInfo(CGT, prefix, FTP, FTP->getExtInfo());
104 }
105 
106 /// Given the formal ext-info of a C++ instance method, adjust it
107 /// according to the C++ ABI in effect.
108 static void adjustCXXMethodInfo(CodeGenTypes &CGT,
109                                 FunctionType::ExtInfo &extInfo,
110                                 bool isVariadic) {
111   if (extInfo.getCC() == CC_Default) {
112     CallingConv CC = CGT.getContext().getDefaultCXXMethodCallConv(isVariadic);
113     extInfo = extInfo.withCallingConv(CC);
114   }
115 }
116 
117 /// Arrange the argument and result information for a free function (i.e.
118 /// not a C++ or ObjC instance method) of the given type.
119 static const CGFunctionInfo &arrangeCXXMethodType(CodeGenTypes &CGT,
120                                       SmallVectorImpl<CanQualType> &prefix,
121                                             CanQual<FunctionProtoType> FTP) {
122   FunctionType::ExtInfo extInfo = FTP->getExtInfo();
123   adjustCXXMethodInfo(CGT, extInfo, FTP->isVariadic());
124   return arrangeLLVMFunctionInfo(CGT, prefix, FTP, extInfo);
125 }
126 
127 /// Arrange the argument and result information for a value of the
128 /// given freestanding function type.
129 const CGFunctionInfo &
130 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) {
131   SmallVector<CanQualType, 16> argTypes;
132   return ::arrangeFreeFunctionType(*this, argTypes, FTP);
133 }
134 
135 static CallingConv getCallingConventionForDecl(const Decl *D) {
136   // Set the appropriate calling convention for the Function.
137   if (D->hasAttr<StdCallAttr>())
138     return CC_X86StdCall;
139 
140   if (D->hasAttr<FastCallAttr>())
141     return CC_X86FastCall;
142 
143   if (D->hasAttr<ThisCallAttr>())
144     return CC_X86ThisCall;
145 
146   if (D->hasAttr<PascalAttr>())
147     return CC_X86Pascal;
148 
149   if (PcsAttr *PCS = D->getAttr<PcsAttr>())
150     return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
151 
152   if (D->hasAttr<PnaclCallAttr>())
153     return CC_PnaclCall;
154 
155   if (D->hasAttr<IntelOclBiccAttr>())
156     return CC_IntelOclBicc;
157 
158   return CC_C;
159 }
160 
161 /// Arrange the argument and result information for a call to an
162 /// unknown C++ non-static member function of the given abstract type.
163 /// The member function must be an ordinary function, i.e. not a
164 /// constructor or destructor.
165 const CGFunctionInfo &
166 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
167                                    const FunctionProtoType *FTP) {
168   SmallVector<CanQualType, 16> argTypes;
169 
170   // Add the 'this' pointer.
171   argTypes.push_back(GetThisType(Context, RD));
172 
173   return ::arrangeCXXMethodType(*this, argTypes,
174               FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
175 }
176 
177 /// Arrange the argument and result information for a declaration or
178 /// definition of the given C++ non-static member function.  The
179 /// member function must be an ordinary function, i.e. not a
180 /// constructor or destructor.
181 const CGFunctionInfo &
182 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
183   assert(!isa<CXXConstructorDecl>(MD) && "wrong method for contructors!");
184   assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
185 
186   CanQual<FunctionProtoType> prototype = GetFormalType(MD);
187 
188   if (MD->isInstance()) {
189     // The abstract case is perfectly fine.
190     return arrangeCXXMethodType(MD->getParent(), prototype.getTypePtr());
191   }
192 
193   return arrangeFreeFunctionType(prototype);
194 }
195 
196 /// Arrange the argument and result information for a declaration
197 /// or definition to the given constructor variant.
198 const CGFunctionInfo &
199 CodeGenTypes::arrangeCXXConstructorDeclaration(const CXXConstructorDecl *D,
200                                                CXXCtorType ctorKind) {
201   SmallVector<CanQualType, 16> argTypes;
202   argTypes.push_back(GetThisType(Context, D->getParent()));
203   CanQualType resultType = Context.VoidTy;
204 
205   TheCXXABI.BuildConstructorSignature(D, ctorKind, resultType, argTypes);
206 
207   CanQual<FunctionProtoType> FTP = GetFormalType(D);
208 
209   RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, argTypes.size());
210 
211   // Add the formal parameters.
212   for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
213     argTypes.push_back(FTP->getArgType(i));
214 
215   FunctionType::ExtInfo extInfo = FTP->getExtInfo();
216   adjustCXXMethodInfo(*this, extInfo, FTP->isVariadic());
217   return arrangeLLVMFunctionInfo(resultType, argTypes, extInfo, required);
218 }
219 
220 /// Arrange the argument and result information for a declaration,
221 /// definition, or call to the given destructor variant.  It so
222 /// happens that all three cases produce the same information.
223 const CGFunctionInfo &
224 CodeGenTypes::arrangeCXXDestructor(const CXXDestructorDecl *D,
225                                    CXXDtorType dtorKind) {
226   SmallVector<CanQualType, 2> argTypes;
227   argTypes.push_back(GetThisType(Context, D->getParent()));
228   CanQualType resultType = Context.VoidTy;
229 
230   TheCXXABI.BuildDestructorSignature(D, dtorKind, resultType, argTypes);
231 
232   CanQual<FunctionProtoType> FTP = GetFormalType(D);
233   assert(FTP->getNumArgs() == 0 && "dtor with formal parameters");
234   assert(FTP->isVariadic() == 0 && "dtor with formal parameters");
235 
236   FunctionType::ExtInfo extInfo = FTP->getExtInfo();
237   adjustCXXMethodInfo(*this, extInfo, false);
238   return arrangeLLVMFunctionInfo(resultType, argTypes, extInfo,
239                                  RequiredArgs::All);
240 }
241 
242 /// Arrange the argument and result information for the declaration or
243 /// definition of the given function.
244 const CGFunctionInfo &
245 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
246   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
247     if (MD->isInstance())
248       return arrangeCXXMethodDeclaration(MD);
249 
250   CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
251 
252   assert(isa<FunctionType>(FTy));
253 
254   // When declaring a function without a prototype, always use a
255   // non-variadic type.
256   if (isa<FunctionNoProtoType>(FTy)) {
257     CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>();
258     return arrangeLLVMFunctionInfo(noProto->getResultType(),
259                                    ArrayRef<CanQualType>(),
260                                    noProto->getExtInfo(),
261                                    RequiredArgs::All);
262   }
263 
264   assert(isa<FunctionProtoType>(FTy));
265   return arrangeFreeFunctionType(FTy.getAs<FunctionProtoType>());
266 }
267 
268 /// Arrange the argument and result information for the declaration or
269 /// definition of an Objective-C method.
270 const CGFunctionInfo &
271 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
272   // It happens that this is the same as a call with no optional
273   // arguments, except also using the formal 'self' type.
274   return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
275 }
276 
277 /// Arrange the argument and result information for the function type
278 /// through which to perform a send to the given Objective-C method,
279 /// using the given receiver type.  The receiver type is not always
280 /// the 'self' type of the method or even an Objective-C pointer type.
281 /// This is *not* the right method for actually performing such a
282 /// message send, due to the possibility of optional arguments.
283 const CGFunctionInfo &
284 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
285                                               QualType receiverType) {
286   SmallVector<CanQualType, 16> argTys;
287   argTys.push_back(Context.getCanonicalParamType(receiverType));
288   argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
289   // FIXME: Kill copy?
290   for (ObjCMethodDecl::param_const_iterator i = MD->param_begin(),
291          e = MD->param_end(); i != e; ++i) {
292     argTys.push_back(Context.getCanonicalParamType((*i)->getType()));
293   }
294 
295   FunctionType::ExtInfo einfo;
296   einfo = einfo.withCallingConv(getCallingConventionForDecl(MD));
297 
298   if (getContext().getLangOpts().ObjCAutoRefCount &&
299       MD->hasAttr<NSReturnsRetainedAttr>())
300     einfo = einfo.withProducesResult(true);
301 
302   RequiredArgs required =
303     (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
304 
305   return arrangeLLVMFunctionInfo(GetReturnType(MD->getResultType()), argTys,
306                                  einfo, required);
307 }
308 
309 const CGFunctionInfo &
310 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
311   // FIXME: Do we need to handle ObjCMethodDecl?
312   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
313 
314   if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
315     return arrangeCXXConstructorDeclaration(CD, GD.getCtorType());
316 
317   if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
318     return arrangeCXXDestructor(DD, GD.getDtorType());
319 
320   return arrangeFunctionDeclaration(FD);
321 }
322 
323 /// Arrange a call as unto a free function, except possibly with an
324 /// additional number of formal parameters considered required.
325 static const CGFunctionInfo &
326 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
327                             const CallArgList &args,
328                             const FunctionType *fnType,
329                             unsigned numExtraRequiredArgs) {
330   assert(args.size() >= numExtraRequiredArgs);
331 
332   // In most cases, there are no optional arguments.
333   RequiredArgs required = RequiredArgs::All;
334 
335   // If we have a variadic prototype, the required arguments are the
336   // extra prefix plus the arguments in the prototype.
337   if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
338     if (proto->isVariadic())
339       required = RequiredArgs(proto->getNumArgs() + numExtraRequiredArgs);
340 
341   // If we don't have a prototype at all, but we're supposed to
342   // explicitly use the variadic convention for unprototyped calls,
343   // treat all of the arguments as required but preserve the nominal
344   // possibility of variadics.
345   } else if (CGT.CGM.getTargetCodeGenInfo()
346                .isNoProtoCallVariadic(args, cast<FunctionNoProtoType>(fnType))) {
347     required = RequiredArgs(args.size());
348   }
349 
350   return CGT.arrangeFreeFunctionCall(fnType->getResultType(), args,
351                                      fnType->getExtInfo(), required);
352 }
353 
354 /// Figure out the rules for calling a function with the given formal
355 /// type using the given arguments.  The arguments are necessary
356 /// because the function might be unprototyped, in which case it's
357 /// target-dependent in crazy ways.
358 const CGFunctionInfo &
359 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
360                                       const FunctionType *fnType) {
361   return arrangeFreeFunctionLikeCall(*this, args, fnType, 0);
362 }
363 
364 /// A block function call is essentially a free-function call with an
365 /// extra implicit argument.
366 const CGFunctionInfo &
367 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
368                                        const FunctionType *fnType) {
369   return arrangeFreeFunctionLikeCall(*this, args, fnType, 1);
370 }
371 
372 const CGFunctionInfo &
373 CodeGenTypes::arrangeFreeFunctionCall(QualType resultType,
374                                       const CallArgList &args,
375                                       FunctionType::ExtInfo info,
376                                       RequiredArgs required) {
377   // FIXME: Kill copy.
378   SmallVector<CanQualType, 16> argTypes;
379   for (CallArgList::const_iterator i = args.begin(), e = args.end();
380        i != e; ++i)
381     argTypes.push_back(Context.getCanonicalParamType(i->Ty));
382   return arrangeLLVMFunctionInfo(GetReturnType(resultType), argTypes, info,
383                                  required);
384 }
385 
386 /// Arrange a call to a C++ method, passing the given arguments.
387 const CGFunctionInfo &
388 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
389                                    const FunctionProtoType *FPT,
390                                    RequiredArgs required) {
391   // FIXME: Kill copy.
392   SmallVector<CanQualType, 16> argTypes;
393   for (CallArgList::const_iterator i = args.begin(), e = args.end();
394        i != e; ++i)
395     argTypes.push_back(Context.getCanonicalParamType(i->Ty));
396 
397   FunctionType::ExtInfo info = FPT->getExtInfo();
398   adjustCXXMethodInfo(*this, info, FPT->isVariadic());
399   return arrangeLLVMFunctionInfo(GetReturnType(FPT->getResultType()),
400                                  argTypes, info, required);
401 }
402 
403 const CGFunctionInfo &
404 CodeGenTypes::arrangeFunctionDeclaration(QualType resultType,
405                                          const FunctionArgList &args,
406                                          const FunctionType::ExtInfo &info,
407                                          bool isVariadic) {
408   // FIXME: Kill copy.
409   SmallVector<CanQualType, 16> argTypes;
410   for (FunctionArgList::const_iterator i = args.begin(), e = args.end();
411        i != e; ++i)
412     argTypes.push_back(Context.getCanonicalParamType((*i)->getType()));
413 
414   RequiredArgs required =
415     (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All);
416   return arrangeLLVMFunctionInfo(GetReturnType(resultType), argTypes, info,
417                                  required);
418 }
419 
420 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
421   return arrangeLLVMFunctionInfo(getContext().VoidTy, ArrayRef<CanQualType>(),
422                                  FunctionType::ExtInfo(), RequiredArgs::All);
423 }
424 
425 /// Arrange the argument and result information for an abstract value
426 /// of a given function type.  This is the method which all of the
427 /// above functions ultimately defer to.
428 const CGFunctionInfo &
429 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
430                                       ArrayRef<CanQualType> argTypes,
431                                       FunctionType::ExtInfo info,
432                                       RequiredArgs required) {
433 #ifndef NDEBUG
434   for (ArrayRef<CanQualType>::const_iterator
435          I = argTypes.begin(), E = argTypes.end(); I != E; ++I)
436     assert(I->isCanonicalAsParam());
437 #endif
438 
439   unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
440 
441   // Lookup or create unique function info.
442   llvm::FoldingSetNodeID ID;
443   CGFunctionInfo::Profile(ID, info, required, resultType, argTypes);
444 
445   void *insertPos = 0;
446   CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
447   if (FI)
448     return *FI;
449 
450   // Construct the function info.  We co-allocate the ArgInfos.
451   FI = CGFunctionInfo::create(CC, info, resultType, argTypes, required);
452   FunctionInfos.InsertNode(FI, insertPos);
453 
454   bool inserted = FunctionsBeingProcessed.insert(FI); (void)inserted;
455   assert(inserted && "Recursively being processed?");
456 
457   // Compute ABI information.
458   getABIInfo().computeInfo(*FI);
459 
460   // Loop over all of the computed argument and return value info.  If any of
461   // them are direct or extend without a specified coerce type, specify the
462   // default now.
463   ABIArgInfo &retInfo = FI->getReturnInfo();
464   if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == 0)
465     retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
466 
467   for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end();
468        I != E; ++I)
469     if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0)
470       I->info.setCoerceToType(ConvertType(I->type));
471 
472   bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
473   assert(erased && "Not in set?");
474 
475   return *FI;
476 }
477 
478 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
479                                        const FunctionType::ExtInfo &info,
480                                        CanQualType resultType,
481                                        ArrayRef<CanQualType> argTypes,
482                                        RequiredArgs required) {
483   void *buffer = operator new(sizeof(CGFunctionInfo) +
484                               sizeof(ArgInfo) * (argTypes.size() + 1));
485   CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
486   FI->CallingConvention = llvmCC;
487   FI->EffectiveCallingConvention = llvmCC;
488   FI->ASTCallingConvention = info.getCC();
489   FI->NoReturn = info.getNoReturn();
490   FI->ReturnsRetained = info.getProducesResult();
491   FI->Required = required;
492   FI->HasRegParm = info.getHasRegParm();
493   FI->RegParm = info.getRegParm();
494   FI->NumArgs = argTypes.size();
495   FI->getArgsBuffer()[0].type = resultType;
496   for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
497     FI->getArgsBuffer()[i + 1].type = argTypes[i];
498   return FI;
499 }
500 
501 /***/
502 
503 void CodeGenTypes::GetExpandedTypes(QualType type,
504                      SmallVectorImpl<llvm::Type*> &expandedTypes) {
505   if (const ConstantArrayType *AT = Context.getAsConstantArrayType(type)) {
506     uint64_t NumElts = AT->getSize().getZExtValue();
507     for (uint64_t Elt = 0; Elt < NumElts; ++Elt)
508       GetExpandedTypes(AT->getElementType(), expandedTypes);
509   } else if (const RecordType *RT = type->getAs<RecordType>()) {
510     const RecordDecl *RD = RT->getDecl();
511     assert(!RD->hasFlexibleArrayMember() &&
512            "Cannot expand structure with flexible array.");
513     if (RD->isUnion()) {
514       // Unions can be here only in degenerative cases - all the fields are same
515       // after flattening. Thus we have to use the "largest" field.
516       const FieldDecl *LargestFD = 0;
517       CharUnits UnionSize = CharUnits::Zero();
518 
519       for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
520            i != e; ++i) {
521         const FieldDecl *FD = *i;
522         assert(!FD->isBitField() &&
523                "Cannot expand structure with bit-field members.");
524         CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
525         if (UnionSize < FieldSize) {
526           UnionSize = FieldSize;
527           LargestFD = FD;
528         }
529       }
530       if (LargestFD)
531         GetExpandedTypes(LargestFD->getType(), expandedTypes);
532     } else {
533       for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
534            i != e; ++i) {
535         assert(!i->isBitField() &&
536                "Cannot expand structure with bit-field members.");
537         GetExpandedTypes(i->getType(), expandedTypes);
538       }
539     }
540   } else if (const ComplexType *CT = type->getAs<ComplexType>()) {
541     llvm::Type *EltTy = ConvertType(CT->getElementType());
542     expandedTypes.push_back(EltTy);
543     expandedTypes.push_back(EltTy);
544   } else
545     expandedTypes.push_back(ConvertType(type));
546 }
547 
548 llvm::Function::arg_iterator
549 CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
550                                     llvm::Function::arg_iterator AI) {
551   assert(LV.isSimple() &&
552          "Unexpected non-simple lvalue during struct expansion.");
553 
554   if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
555     unsigned NumElts = AT->getSize().getZExtValue();
556     QualType EltTy = AT->getElementType();
557     for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
558       llvm::Value *EltAddr = Builder.CreateConstGEP2_32(LV.getAddress(), 0, Elt);
559       LValue LV = MakeAddrLValue(EltAddr, EltTy);
560       AI = ExpandTypeFromArgs(EltTy, LV, AI);
561     }
562   } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
563     RecordDecl *RD = RT->getDecl();
564     if (RD->isUnion()) {
565       // Unions can be here only in degenerative cases - all the fields are same
566       // after flattening. Thus we have to use the "largest" field.
567       const FieldDecl *LargestFD = 0;
568       CharUnits UnionSize = CharUnits::Zero();
569 
570       for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
571            i != e; ++i) {
572         const FieldDecl *FD = *i;
573         assert(!FD->isBitField() &&
574                "Cannot expand structure with bit-field members.");
575         CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
576         if (UnionSize < FieldSize) {
577           UnionSize = FieldSize;
578           LargestFD = FD;
579         }
580       }
581       if (LargestFD) {
582         // FIXME: What are the right qualifiers here?
583         LValue SubLV = EmitLValueForField(LV, LargestFD);
584         AI = ExpandTypeFromArgs(LargestFD->getType(), SubLV, AI);
585       }
586     } else {
587       for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
588            i != e; ++i) {
589         FieldDecl *FD = *i;
590         QualType FT = FD->getType();
591 
592         // FIXME: What are the right qualifiers here?
593         LValue SubLV = EmitLValueForField(LV, FD);
594         AI = ExpandTypeFromArgs(FT, SubLV, AI);
595       }
596     }
597   } else if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
598     QualType EltTy = CT->getElementType();
599     llvm::Value *RealAddr = Builder.CreateStructGEP(LV.getAddress(), 0, "real");
600     EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(RealAddr, EltTy));
601     llvm::Value *ImagAddr = Builder.CreateStructGEP(LV.getAddress(), 1, "imag");
602     EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(ImagAddr, EltTy));
603   } else {
604     EmitStoreThroughLValue(RValue::get(AI), LV);
605     ++AI;
606   }
607 
608   return AI;
609 }
610 
611 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
612 /// accessing some number of bytes out of it, try to gep into the struct to get
613 /// at its inner goodness.  Dive as deep as possible without entering an element
614 /// with an in-memory size smaller than DstSize.
615 static llvm::Value *
616 EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
617                                    llvm::StructType *SrcSTy,
618                                    uint64_t DstSize, CodeGenFunction &CGF) {
619   // We can't dive into a zero-element struct.
620   if (SrcSTy->getNumElements() == 0) return SrcPtr;
621 
622   llvm::Type *FirstElt = SrcSTy->getElementType(0);
623 
624   // If the first elt is at least as large as what we're looking for, or if the
625   // first element is the same size as the whole struct, we can enter it.
626   uint64_t FirstEltSize =
627     CGF.CGM.getDataLayout().getTypeAllocSize(FirstElt);
628   if (FirstEltSize < DstSize &&
629       FirstEltSize < CGF.CGM.getDataLayout().getTypeAllocSize(SrcSTy))
630     return SrcPtr;
631 
632   // GEP into the first element.
633   SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive");
634 
635   // If the first element is a struct, recurse.
636   llvm::Type *SrcTy =
637     cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
638   if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
639     return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
640 
641   return SrcPtr;
642 }
643 
644 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
645 /// are either integers or pointers.  This does a truncation of the value if it
646 /// is too large or a zero extension if it is too small.
647 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
648                                              llvm::Type *Ty,
649                                              CodeGenFunction &CGF) {
650   if (Val->getType() == Ty)
651     return Val;
652 
653   if (isa<llvm::PointerType>(Val->getType())) {
654     // If this is Pointer->Pointer avoid conversion to and from int.
655     if (isa<llvm::PointerType>(Ty))
656       return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
657 
658     // Convert the pointer to an integer so we can play with its width.
659     Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
660   }
661 
662   llvm::Type *DestIntTy = Ty;
663   if (isa<llvm::PointerType>(DestIntTy))
664     DestIntTy = CGF.IntPtrTy;
665 
666   if (Val->getType() != DestIntTy)
667     Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
668 
669   if (isa<llvm::PointerType>(Ty))
670     Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
671   return Val;
672 }
673 
674 
675 
676 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
677 /// a pointer to an object of type \arg Ty.
678 ///
679 /// This safely handles the case when the src type is smaller than the
680 /// destination type; in this situation the values of bits which not
681 /// present in the src are undefined.
682 static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
683                                       llvm::Type *Ty,
684                                       CodeGenFunction &CGF) {
685   llvm::Type *SrcTy =
686     cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
687 
688   // If SrcTy and Ty are the same, just do a load.
689   if (SrcTy == Ty)
690     return CGF.Builder.CreateLoad(SrcPtr);
691 
692   uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
693 
694   if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
695     SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
696     SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
697   }
698 
699   uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
700 
701   // If the source and destination are integer or pointer types, just do an
702   // extension or truncation to the desired type.
703   if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
704       (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
705     llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr);
706     return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
707   }
708 
709   // If load is legal, just bitcast the src pointer.
710   if (SrcSize >= DstSize) {
711     // Generally SrcSize is never greater than DstSize, since this means we are
712     // losing bits. However, this can happen in cases where the structure has
713     // additional padding, for example due to a user specified alignment.
714     //
715     // FIXME: Assert that we aren't truncating non-padding bits when have access
716     // to that information.
717     llvm::Value *Casted =
718       CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
719     llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
720     // FIXME: Use better alignment / avoid requiring aligned load.
721     Load->setAlignment(1);
722     return Load;
723   }
724 
725   // Otherwise do coercion through memory. This is stupid, but
726   // simple.
727   llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
728   llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy();
729   llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy);
730   llvm::Value *SrcCasted = CGF.Builder.CreateBitCast(SrcPtr, I8PtrTy);
731   // FIXME: Use better alignment.
732   CGF.Builder.CreateMemCpy(Casted, SrcCasted,
733       llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
734       1, false);
735   return CGF.Builder.CreateLoad(Tmp);
736 }
737 
738 // Function to store a first-class aggregate into memory.  We prefer to
739 // store the elements rather than the aggregate to be more friendly to
740 // fast-isel.
741 // FIXME: Do we need to recurse here?
742 static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
743                           llvm::Value *DestPtr, bool DestIsVolatile,
744                           bool LowAlignment) {
745   // Prefer scalar stores to first-class aggregate stores.
746   if (llvm::StructType *STy =
747         dyn_cast<llvm::StructType>(Val->getType())) {
748     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
749       llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(DestPtr, 0, i);
750       llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
751       llvm::StoreInst *SI = CGF.Builder.CreateStore(Elt, EltPtr,
752                                                     DestIsVolatile);
753       if (LowAlignment)
754         SI->setAlignment(1);
755     }
756   } else {
757     llvm::StoreInst *SI = CGF.Builder.CreateStore(Val, DestPtr, DestIsVolatile);
758     if (LowAlignment)
759       SI->setAlignment(1);
760   }
761 }
762 
763 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
764 /// where the source and destination may have different types.
765 ///
766 /// This safely handles the case when the src type is larger than the
767 /// destination type; the upper bits of the src will be lost.
768 static void CreateCoercedStore(llvm::Value *Src,
769                                llvm::Value *DstPtr,
770                                bool DstIsVolatile,
771                                CodeGenFunction &CGF) {
772   llvm::Type *SrcTy = Src->getType();
773   llvm::Type *DstTy =
774     cast<llvm::PointerType>(DstPtr->getType())->getElementType();
775   if (SrcTy == DstTy) {
776     CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
777     return;
778   }
779 
780   uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
781 
782   if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
783     DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF);
784     DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType();
785   }
786 
787   // If the source and destination are integer or pointer types, just do an
788   // extension or truncation to the desired type.
789   if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
790       (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
791     Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
792     CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
793     return;
794   }
795 
796   uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
797 
798   // If store is legal, just bitcast the src pointer.
799   if (SrcSize <= DstSize) {
800     llvm::Value *Casted =
801       CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
802     // FIXME: Use better alignment / avoid requiring aligned store.
803     BuildAggStore(CGF, Src, Casted, DstIsVolatile, true);
804   } else {
805     // Otherwise do coercion through memory. This is stupid, but
806     // simple.
807 
808     // Generally SrcSize is never greater than DstSize, since this means we are
809     // losing bits. However, this can happen in cases where the structure has
810     // additional padding, for example due to a user specified alignment.
811     //
812     // FIXME: Assert that we aren't truncating non-padding bits when have access
813     // to that information.
814     llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
815     CGF.Builder.CreateStore(Src, Tmp);
816     llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy();
817     llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy);
818     llvm::Value *DstCasted = CGF.Builder.CreateBitCast(DstPtr, I8PtrTy);
819     // FIXME: Use better alignment.
820     CGF.Builder.CreateMemCpy(DstCasted, Casted,
821         llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
822         1, false);
823   }
824 }
825 
826 /***/
827 
828 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
829   return FI.getReturnInfo().isIndirect();
830 }
831 
832 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
833   if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
834     switch (BT->getKind()) {
835     default:
836       return false;
837     case BuiltinType::Float:
838       return getContext().getTargetInfo().useObjCFPRetForRealType(TargetInfo::Float);
839     case BuiltinType::Double:
840       return getContext().getTargetInfo().useObjCFPRetForRealType(TargetInfo::Double);
841     case BuiltinType::LongDouble:
842       return getContext().getTargetInfo().useObjCFPRetForRealType(
843         TargetInfo::LongDouble);
844     }
845   }
846 
847   return false;
848 }
849 
850 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
851   if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
852     if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
853       if (BT->getKind() == BuiltinType::LongDouble)
854         return getContext().getTargetInfo().useObjCFP2RetForComplexLongDouble();
855     }
856   }
857 
858   return false;
859 }
860 
861 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
862   const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
863   return GetFunctionType(FI);
864 }
865 
866 llvm::FunctionType *
867 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
868 
869   bool Inserted = FunctionsBeingProcessed.insert(&FI); (void)Inserted;
870   assert(Inserted && "Recursively being processed?");
871 
872   SmallVector<llvm::Type*, 8> argTypes;
873   llvm::Type *resultType = 0;
874 
875   const ABIArgInfo &retAI = FI.getReturnInfo();
876   switch (retAI.getKind()) {
877   case ABIArgInfo::Expand:
878     llvm_unreachable("Invalid ABI kind for return argument");
879 
880   case ABIArgInfo::Extend:
881   case ABIArgInfo::Direct:
882     resultType = retAI.getCoerceToType();
883     break;
884 
885   case ABIArgInfo::Indirect: {
886     assert(!retAI.getIndirectAlign() && "Align unused on indirect return.");
887     resultType = llvm::Type::getVoidTy(getLLVMContext());
888 
889     QualType ret = FI.getReturnType();
890     llvm::Type *ty = ConvertType(ret);
891     unsigned addressSpace = Context.getTargetAddressSpace(ret);
892     argTypes.push_back(llvm::PointerType::get(ty, addressSpace));
893     break;
894   }
895 
896   case ABIArgInfo::Ignore:
897     resultType = llvm::Type::getVoidTy(getLLVMContext());
898     break;
899   }
900 
901   // Add in all of the required arguments.
902   CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), ie;
903   if (FI.isVariadic()) {
904     ie = it + FI.getRequiredArgs().getNumRequiredArgs();
905   } else {
906     ie = FI.arg_end();
907   }
908   for (; it != ie; ++it) {
909     const ABIArgInfo &argAI = it->info;
910 
911     // Insert a padding type to ensure proper alignment.
912     if (llvm::Type *PaddingType = argAI.getPaddingType())
913       argTypes.push_back(PaddingType);
914 
915     switch (argAI.getKind()) {
916     case ABIArgInfo::Ignore:
917       break;
918 
919     case ABIArgInfo::Indirect: {
920       // indirect arguments are always on the stack, which is addr space #0.
921       llvm::Type *LTy = ConvertTypeForMem(it->type);
922       argTypes.push_back(LTy->getPointerTo());
923       break;
924     }
925 
926     case ABIArgInfo::Extend:
927     case ABIArgInfo::Direct: {
928       // If the coerce-to type is a first class aggregate, flatten it.  Either
929       // way is semantically identical, but fast-isel and the optimizer
930       // generally likes scalar values better than FCAs.
931       llvm::Type *argType = argAI.getCoerceToType();
932       if (llvm::StructType *st = dyn_cast<llvm::StructType>(argType)) {
933         for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
934           argTypes.push_back(st->getElementType(i));
935       } else {
936         argTypes.push_back(argType);
937       }
938       break;
939     }
940 
941     case ABIArgInfo::Expand:
942       GetExpandedTypes(it->type, argTypes);
943       break;
944     }
945   }
946 
947   bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
948   assert(Erased && "Not in set?");
949 
950   return llvm::FunctionType::get(resultType, argTypes, FI.isVariadic());
951 }
952 
953 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
954   const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
955   const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
956 
957   if (!isFuncTypeConvertible(FPT))
958     return llvm::StructType::get(getLLVMContext());
959 
960   const CGFunctionInfo *Info;
961   if (isa<CXXDestructorDecl>(MD))
962     Info = &arrangeCXXDestructor(cast<CXXDestructorDecl>(MD), GD.getDtorType());
963   else
964     Info = &arrangeCXXMethodDeclaration(MD);
965   return GetFunctionType(*Info);
966 }
967 
968 void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
969                                            const Decl *TargetDecl,
970                                            AttributeListType &PAL,
971                                            unsigned &CallingConv) {
972   llvm::AttrBuilder FuncAttrs;
973   llvm::AttrBuilder RetAttrs;
974 
975   CallingConv = FI.getEffectiveCallingConvention();
976 
977   if (FI.isNoReturn())
978     FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
979 
980   // FIXME: handle sseregparm someday...
981   if (TargetDecl) {
982     if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
983       FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
984     if (TargetDecl->hasAttr<NoThrowAttr>())
985       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
986     else if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
987       const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>();
988       if (FPT && FPT->isNothrow(getContext()))
989         FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
990     }
991 
992     if (TargetDecl->hasAttr<NoReturnAttr>() ||
993         TargetDecl->hasAttr<CXX11NoReturnAttr>())
994       FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
995 
996     if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
997       FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
998 
999     // 'const' and 'pure' attribute functions are also nounwind.
1000     if (TargetDecl->hasAttr<ConstAttr>()) {
1001       FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
1002       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1003     } else if (TargetDecl->hasAttr<PureAttr>()) {
1004       FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
1005       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1006     }
1007     if (TargetDecl->hasAttr<MallocAttr>())
1008       RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1009   }
1010 
1011   if (CodeGenOpts.OptimizeSize)
1012     FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1013   if (CodeGenOpts.OptimizeSize == 2)
1014     FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1015   if (CodeGenOpts.DisableRedZone)
1016     FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1017   if (CodeGenOpts.NoImplicitFloat)
1018     FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1019 
1020   QualType RetTy = FI.getReturnType();
1021   unsigned Index = 1;
1022   const ABIArgInfo &RetAI = FI.getReturnInfo();
1023   switch (RetAI.getKind()) {
1024   case ABIArgInfo::Extend:
1025    if (RetTy->hasSignedIntegerRepresentation())
1026      RetAttrs.addAttribute(llvm::Attribute::SExt);
1027    else if (RetTy->hasUnsignedIntegerRepresentation())
1028      RetAttrs.addAttribute(llvm::Attribute::ZExt);
1029     break;
1030   case ABIArgInfo::Direct:
1031   case ABIArgInfo::Ignore:
1032     break;
1033 
1034   case ABIArgInfo::Indirect: {
1035     llvm::AttrBuilder SRETAttrs;
1036     SRETAttrs.addAttribute(llvm::Attribute::StructRet);
1037     if (RetAI.getInReg())
1038       SRETAttrs.addAttribute(llvm::Attribute::InReg);
1039     PAL.push_back(llvm::
1040                   AttributeSet::get(getLLVMContext(), Index, SRETAttrs));
1041 
1042     ++Index;
1043     // sret disables readnone and readonly
1044     FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1045       .removeAttribute(llvm::Attribute::ReadNone);
1046     break;
1047   }
1048 
1049   case ABIArgInfo::Expand:
1050     llvm_unreachable("Invalid ABI kind for return argument");
1051   }
1052 
1053   if (RetAttrs.hasAttributes())
1054     PAL.push_back(llvm::
1055                   AttributeSet::get(getLLVMContext(),
1056                                     llvm::AttributeSet::ReturnIndex,
1057                                     RetAttrs));
1058 
1059   for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1060          ie = FI.arg_end(); it != ie; ++it) {
1061     QualType ParamType = it->type;
1062     const ABIArgInfo &AI = it->info;
1063     llvm::AttrBuilder Attrs;
1064 
1065     if (AI.getPaddingType()) {
1066       if (AI.getPaddingInReg())
1067         PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index,
1068                                               llvm::Attribute::InReg));
1069       // Increment Index if there is padding.
1070       ++Index;
1071     }
1072 
1073     // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
1074     // have the corresponding parameter variable.  It doesn't make
1075     // sense to do it here because parameters are so messed up.
1076     switch (AI.getKind()) {
1077     case ABIArgInfo::Extend:
1078       if (ParamType->isSignedIntegerOrEnumerationType())
1079         Attrs.addAttribute(llvm::Attribute::SExt);
1080       else if (ParamType->isUnsignedIntegerOrEnumerationType())
1081         Attrs.addAttribute(llvm::Attribute::ZExt);
1082       // FALL THROUGH
1083     case ABIArgInfo::Direct:
1084       if (AI.getInReg())
1085         Attrs.addAttribute(llvm::Attribute::InReg);
1086 
1087       // FIXME: handle sseregparm someday...
1088 
1089       if (llvm::StructType *STy =
1090           dyn_cast<llvm::StructType>(AI.getCoerceToType())) {
1091         unsigned Extra = STy->getNumElements()-1;  // 1 will be added below.
1092         if (Attrs.hasAttributes())
1093           for (unsigned I = 0; I < Extra; ++I)
1094             PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index + I,
1095                                                   Attrs));
1096         Index += Extra;
1097       }
1098       break;
1099 
1100     case ABIArgInfo::Indirect:
1101       if (AI.getInReg())
1102         Attrs.addAttribute(llvm::Attribute::InReg);
1103 
1104       if (AI.getIndirectByVal())
1105         Attrs.addAttribute(llvm::Attribute::ByVal);
1106 
1107       Attrs.addAlignmentAttr(AI.getIndirectAlign());
1108 
1109       // byval disables readnone and readonly.
1110       FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1111         .removeAttribute(llvm::Attribute::ReadNone);
1112       break;
1113 
1114     case ABIArgInfo::Ignore:
1115       // Skip increment, no matching LLVM parameter.
1116       continue;
1117 
1118     case ABIArgInfo::Expand: {
1119       SmallVector<llvm::Type*, 8> types;
1120       // FIXME: This is rather inefficient. Do we ever actually need to do
1121       // anything here? The result should be just reconstructed on the other
1122       // side, so extension should be a non-issue.
1123       getTypes().GetExpandedTypes(ParamType, types);
1124       Index += types.size();
1125       continue;
1126     }
1127     }
1128 
1129     if (Attrs.hasAttributes())
1130       PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index, Attrs));
1131     ++Index;
1132   }
1133   if (FuncAttrs.hasAttributes())
1134     PAL.push_back(llvm::
1135                   AttributeSet::get(getLLVMContext(),
1136                                     llvm::AttributeSet::FunctionIndex,
1137                                     FuncAttrs));
1138 }
1139 
1140 /// An argument came in as a promoted argument; demote it back to its
1141 /// declared type.
1142 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
1143                                          const VarDecl *var,
1144                                          llvm::Value *value) {
1145   llvm::Type *varType = CGF.ConvertType(var->getType());
1146 
1147   // This can happen with promotions that actually don't change the
1148   // underlying type, like the enum promotions.
1149   if (value->getType() == varType) return value;
1150 
1151   assert((varType->isIntegerTy() || varType->isFloatingPointTy())
1152          && "unexpected promotion type");
1153 
1154   if (isa<llvm::IntegerType>(varType))
1155     return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
1156 
1157   return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
1158 }
1159 
1160 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
1161                                          llvm::Function *Fn,
1162                                          const FunctionArgList &Args) {
1163   // If this is an implicit-return-zero function, go ahead and
1164   // initialize the return value.  TODO: it might be nice to have
1165   // a more general mechanism for this that didn't require synthesized
1166   // return statements.
1167   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
1168     if (FD->hasImplicitReturnZero()) {
1169       QualType RetTy = FD->getResultType().getUnqualifiedType();
1170       llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
1171       llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
1172       Builder.CreateStore(Zero, ReturnValue);
1173     }
1174   }
1175 
1176   // FIXME: We no longer need the types from FunctionArgList; lift up and
1177   // simplify.
1178 
1179   // Emit allocs for param decls.  Give the LLVM Argument nodes names.
1180   llvm::Function::arg_iterator AI = Fn->arg_begin();
1181 
1182   // Name the struct return argument.
1183   if (CGM.ReturnTypeUsesSRet(FI)) {
1184     AI->setName("agg.result");
1185     AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1186                                         AI->getArgNo() + 1,
1187                                         llvm::Attribute::NoAlias));
1188     ++AI;
1189   }
1190 
1191   assert(FI.arg_size() == Args.size() &&
1192          "Mismatch between function signature & arguments.");
1193   unsigned ArgNo = 1;
1194   CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
1195   for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
1196        i != e; ++i, ++info_it, ++ArgNo) {
1197     const VarDecl *Arg = *i;
1198     QualType Ty = info_it->type;
1199     const ABIArgInfo &ArgI = info_it->info;
1200 
1201     bool isPromoted =
1202       isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
1203 
1204     // Skip the dummy padding argument.
1205     if (ArgI.getPaddingType())
1206       ++AI;
1207 
1208     switch (ArgI.getKind()) {
1209     case ABIArgInfo::Indirect: {
1210       llvm::Value *V = AI;
1211 
1212       if (hasAggregateLLVMType(Ty)) {
1213         // Aggregates and complex variables are accessed by reference.  All we
1214         // need to do is realign the value, if requested
1215         if (ArgI.getIndirectRealign()) {
1216           llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce");
1217 
1218           // Copy from the incoming argument pointer to the temporary with the
1219           // appropriate alignment.
1220           //
1221           // FIXME: We should have a common utility for generating an aggregate
1222           // copy.
1223           llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
1224           CharUnits Size = getContext().getTypeSizeInChars(Ty);
1225           llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
1226           llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy);
1227           Builder.CreateMemCpy(Dst,
1228                                Src,
1229                                llvm::ConstantInt::get(IntPtrTy,
1230                                                       Size.getQuantity()),
1231                                ArgI.getIndirectAlign(),
1232                                false);
1233           V = AlignedTemp;
1234         }
1235       } else {
1236         // Load scalar value from indirect argument.
1237         CharUnits Alignment = getContext().getTypeAlignInChars(Ty);
1238         V = EmitLoadOfScalar(V, false, Alignment.getQuantity(), Ty);
1239 
1240         if (isPromoted)
1241           V = emitArgumentDemotion(*this, Arg, V);
1242       }
1243       EmitParmDecl(*Arg, V, ArgNo);
1244       break;
1245     }
1246 
1247     case ABIArgInfo::Extend:
1248     case ABIArgInfo::Direct: {
1249 
1250       // If we have the trivial case, handle it with no muss and fuss.
1251       if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
1252           ArgI.getCoerceToType() == ConvertType(Ty) &&
1253           ArgI.getDirectOffset() == 0) {
1254         assert(AI != Fn->arg_end() && "Argument mismatch!");
1255         llvm::Value *V = AI;
1256 
1257         if (Arg->getType().isRestrictQualified())
1258           AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1259                                               AI->getArgNo() + 1,
1260                                               llvm::Attribute::NoAlias));
1261 
1262         // Ensure the argument is the correct type.
1263         if (V->getType() != ArgI.getCoerceToType())
1264           V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
1265 
1266         if (isPromoted)
1267           V = emitArgumentDemotion(*this, Arg, V);
1268 
1269         // Because of merging of function types from multiple decls it is
1270         // possible for the type of an argument to not match the corresponding
1271         // type in the function type. Since we are codegening the callee
1272         // in here, add a cast to the argument type.
1273         llvm::Type *LTy = ConvertType(Arg->getType());
1274         if (V->getType() != LTy)
1275           V = Builder.CreateBitCast(V, LTy);
1276 
1277         EmitParmDecl(*Arg, V, ArgNo);
1278         break;
1279       }
1280 
1281       llvm::AllocaInst *Alloca = CreateMemTemp(Ty, Arg->getName());
1282 
1283       // The alignment we need to use is the max of the requested alignment for
1284       // the argument plus the alignment required by our access code below.
1285       unsigned AlignmentToUse =
1286         CGM.getDataLayout().getABITypeAlignment(ArgI.getCoerceToType());
1287       AlignmentToUse = std::max(AlignmentToUse,
1288                         (unsigned)getContext().getDeclAlign(Arg).getQuantity());
1289 
1290       Alloca->setAlignment(AlignmentToUse);
1291       llvm::Value *V = Alloca;
1292       llvm::Value *Ptr = V;    // Pointer to store into.
1293 
1294       // If the value is offset in memory, apply the offset now.
1295       if (unsigned Offs = ArgI.getDirectOffset()) {
1296         Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy());
1297         Ptr = Builder.CreateConstGEP1_32(Ptr, Offs);
1298         Ptr = Builder.CreateBitCast(Ptr,
1299                           llvm::PointerType::getUnqual(ArgI.getCoerceToType()));
1300       }
1301 
1302       // If the coerce-to type is a first class aggregate, we flatten it and
1303       // pass the elements. Either way is semantically identical, but fast-isel
1304       // and the optimizer generally likes scalar values better than FCAs.
1305       llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
1306       if (STy && STy->getNumElements() > 1) {
1307         uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
1308         llvm::Type *DstTy =
1309           cast<llvm::PointerType>(Ptr->getType())->getElementType();
1310         uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
1311 
1312         if (SrcSize <= DstSize) {
1313           Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
1314 
1315           for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1316             assert(AI != Fn->arg_end() && "Argument mismatch!");
1317             AI->setName(Arg->getName() + ".coerce" + Twine(i));
1318             llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i);
1319             Builder.CreateStore(AI++, EltPtr);
1320           }
1321         } else {
1322           llvm::AllocaInst *TempAlloca =
1323             CreateTempAlloca(ArgI.getCoerceToType(), "coerce");
1324           TempAlloca->setAlignment(AlignmentToUse);
1325           llvm::Value *TempV = TempAlloca;
1326 
1327           for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1328             assert(AI != Fn->arg_end() && "Argument mismatch!");
1329             AI->setName(Arg->getName() + ".coerce" + Twine(i));
1330             llvm::Value *EltPtr = Builder.CreateConstGEP2_32(TempV, 0, i);
1331             Builder.CreateStore(AI++, EltPtr);
1332           }
1333 
1334           Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse);
1335         }
1336       } else {
1337         // Simple case, just do a coerced store of the argument into the alloca.
1338         assert(AI != Fn->arg_end() && "Argument mismatch!");
1339         AI->setName(Arg->getName() + ".coerce");
1340         CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this);
1341       }
1342 
1343 
1344       // Match to what EmitParmDecl is expecting for this type.
1345       if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
1346         V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty);
1347         if (isPromoted)
1348           V = emitArgumentDemotion(*this, Arg, V);
1349       }
1350       EmitParmDecl(*Arg, V, ArgNo);
1351       continue;  // Skip ++AI increment, already done.
1352     }
1353 
1354     case ABIArgInfo::Expand: {
1355       // If this structure was expanded into multiple arguments then
1356       // we need to create a temporary and reconstruct it from the
1357       // arguments.
1358       llvm::AllocaInst *Alloca = CreateMemTemp(Ty);
1359       CharUnits Align = getContext().getDeclAlign(Arg);
1360       Alloca->setAlignment(Align.getQuantity());
1361       LValue LV = MakeAddrLValue(Alloca, Ty, Align);
1362       llvm::Function::arg_iterator End = ExpandTypeFromArgs(Ty, LV, AI);
1363       EmitParmDecl(*Arg, Alloca, ArgNo);
1364 
1365       // Name the arguments used in expansion and increment AI.
1366       unsigned Index = 0;
1367       for (; AI != End; ++AI, ++Index)
1368         AI->setName(Arg->getName() + "." + Twine(Index));
1369       continue;
1370     }
1371 
1372     case ABIArgInfo::Ignore:
1373       // Initialize the local variable appropriately.
1374       if (hasAggregateLLVMType(Ty))
1375         EmitParmDecl(*Arg, CreateMemTemp(Ty), ArgNo);
1376       else
1377         EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())),
1378                      ArgNo);
1379 
1380       // Skip increment, no matching LLVM parameter.
1381       continue;
1382     }
1383 
1384     ++AI;
1385   }
1386   assert(AI == Fn->arg_end() && "Argument mismatch!");
1387 }
1388 
1389 static void eraseUnusedBitCasts(llvm::Instruction *insn) {
1390   while (insn->use_empty()) {
1391     llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
1392     if (!bitcast) return;
1393 
1394     // This is "safe" because we would have used a ConstantExpr otherwise.
1395     insn = cast<llvm::Instruction>(bitcast->getOperand(0));
1396     bitcast->eraseFromParent();
1397   }
1398 }
1399 
1400 /// Try to emit a fused autorelease of a return result.
1401 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
1402                                                     llvm::Value *result) {
1403   // We must be immediately followed the cast.
1404   llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
1405   if (BB->empty()) return 0;
1406   if (&BB->back() != result) return 0;
1407 
1408   llvm::Type *resultType = result->getType();
1409 
1410   // result is in a BasicBlock and is therefore an Instruction.
1411   llvm::Instruction *generator = cast<llvm::Instruction>(result);
1412 
1413   SmallVector<llvm::Instruction*,4> insnsToKill;
1414 
1415   // Look for:
1416   //  %generator = bitcast %type1* %generator2 to %type2*
1417   while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
1418     // We would have emitted this as a constant if the operand weren't
1419     // an Instruction.
1420     generator = cast<llvm::Instruction>(bitcast->getOperand(0));
1421 
1422     // Require the generator to be immediately followed by the cast.
1423     if (generator->getNextNode() != bitcast)
1424       return 0;
1425 
1426     insnsToKill.push_back(bitcast);
1427   }
1428 
1429   // Look for:
1430   //   %generator = call i8* @objc_retain(i8* %originalResult)
1431   // or
1432   //   %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
1433   llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
1434   if (!call) return 0;
1435 
1436   bool doRetainAutorelease;
1437 
1438   if (call->getCalledValue() == CGF.CGM.getARCEntrypoints().objc_retain) {
1439     doRetainAutorelease = true;
1440   } else if (call->getCalledValue() == CGF.CGM.getARCEntrypoints()
1441                                           .objc_retainAutoreleasedReturnValue) {
1442     doRetainAutorelease = false;
1443 
1444     // If we emitted an assembly marker for this call (and the
1445     // ARCEntrypoints field should have been set if so), go looking
1446     // for that call.  If we can't find it, we can't do this
1447     // optimization.  But it should always be the immediately previous
1448     // instruction, unless we needed bitcasts around the call.
1449     if (CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker) {
1450       llvm::Instruction *prev = call->getPrevNode();
1451       assert(prev);
1452       if (isa<llvm::BitCastInst>(prev)) {
1453         prev = prev->getPrevNode();
1454         assert(prev);
1455       }
1456       assert(isa<llvm::CallInst>(prev));
1457       assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
1458                CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker);
1459       insnsToKill.push_back(prev);
1460     }
1461   } else {
1462     return 0;
1463   }
1464 
1465   result = call->getArgOperand(0);
1466   insnsToKill.push_back(call);
1467 
1468   // Keep killing bitcasts, for sanity.  Note that we no longer care
1469   // about precise ordering as long as there's exactly one use.
1470   while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
1471     if (!bitcast->hasOneUse()) break;
1472     insnsToKill.push_back(bitcast);
1473     result = bitcast->getOperand(0);
1474   }
1475 
1476   // Delete all the unnecessary instructions, from latest to earliest.
1477   for (SmallVectorImpl<llvm::Instruction*>::iterator
1478          i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i)
1479     (*i)->eraseFromParent();
1480 
1481   // Do the fused retain/autorelease if we were asked to.
1482   if (doRetainAutorelease)
1483     result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
1484 
1485   // Cast back to the result type.
1486   return CGF.Builder.CreateBitCast(result, resultType);
1487 }
1488 
1489 /// If this is a +1 of the value of an immutable 'self', remove it.
1490 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
1491                                           llvm::Value *result) {
1492   // This is only applicable to a method with an immutable 'self'.
1493   const ObjCMethodDecl *method =
1494     dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
1495   if (!method) return 0;
1496   const VarDecl *self = method->getSelfDecl();
1497   if (!self->getType().isConstQualified()) return 0;
1498 
1499   // Look for a retain call.
1500   llvm::CallInst *retainCall =
1501     dyn_cast<llvm::CallInst>(result->stripPointerCasts());
1502   if (!retainCall ||
1503       retainCall->getCalledValue() != CGF.CGM.getARCEntrypoints().objc_retain)
1504     return 0;
1505 
1506   // Look for an ordinary load of 'self'.
1507   llvm::Value *retainedValue = retainCall->getArgOperand(0);
1508   llvm::LoadInst *load =
1509     dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
1510   if (!load || load->isAtomic() || load->isVolatile() ||
1511       load->getPointerOperand() != CGF.GetAddrOfLocalVar(self))
1512     return 0;
1513 
1514   // Okay!  Burn it all down.  This relies for correctness on the
1515   // assumption that the retain is emitted as part of the return and
1516   // that thereafter everything is used "linearly".
1517   llvm::Type *resultType = result->getType();
1518   eraseUnusedBitCasts(cast<llvm::Instruction>(result));
1519   assert(retainCall->use_empty());
1520   retainCall->eraseFromParent();
1521   eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
1522 
1523   return CGF.Builder.CreateBitCast(load, resultType);
1524 }
1525 
1526 /// Emit an ARC autorelease of the result of a function.
1527 ///
1528 /// \return the value to actually return from the function
1529 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
1530                                             llvm::Value *result) {
1531   // If we're returning 'self', kill the initial retain.  This is a
1532   // heuristic attempt to "encourage correctness" in the really unfortunate
1533   // case where we have a return of self during a dealloc and we desperately
1534   // need to avoid the possible autorelease.
1535   if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
1536     return self;
1537 
1538   // At -O0, try to emit a fused retain/autorelease.
1539   if (CGF.shouldUseFusedARCCalls())
1540     if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
1541       return fused;
1542 
1543   return CGF.EmitARCAutoreleaseReturnValue(result);
1544 }
1545 
1546 /// Heuristically search for a dominating store to the return-value slot.
1547 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
1548   // If there are multiple uses of the return-value slot, just check
1549   // for something immediately preceding the IP.  Sometimes this can
1550   // happen with how we generate implicit-returns; it can also happen
1551   // with noreturn cleanups.
1552   if (!CGF.ReturnValue->hasOneUse()) {
1553     llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
1554     if (IP->empty()) return 0;
1555     llvm::StoreInst *store = dyn_cast<llvm::StoreInst>(&IP->back());
1556     if (!store) return 0;
1557     if (store->getPointerOperand() != CGF.ReturnValue) return 0;
1558     assert(!store->isAtomic() && !store->isVolatile()); // see below
1559     return store;
1560   }
1561 
1562   llvm::StoreInst *store =
1563     dyn_cast<llvm::StoreInst>(CGF.ReturnValue->use_back());
1564   if (!store) return 0;
1565 
1566   // These aren't actually possible for non-coerced returns, and we
1567   // only care about non-coerced returns on this code path.
1568   assert(!store->isAtomic() && !store->isVolatile());
1569 
1570   // Now do a first-and-dirty dominance check: just walk up the
1571   // single-predecessors chain from the current insertion point.
1572   llvm::BasicBlock *StoreBB = store->getParent();
1573   llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
1574   while (IP != StoreBB) {
1575     if (!(IP = IP->getSinglePredecessor()))
1576       return 0;
1577   }
1578 
1579   // Okay, the store's basic block dominates the insertion point; we
1580   // can do our thing.
1581   return store;
1582 }
1583 
1584 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
1585   // Functions with no result always return void.
1586   if (ReturnValue == 0) {
1587     Builder.CreateRetVoid();
1588     return;
1589   }
1590 
1591   llvm::DebugLoc RetDbgLoc;
1592   llvm::Value *RV = 0;
1593   QualType RetTy = FI.getReturnType();
1594   const ABIArgInfo &RetAI = FI.getReturnInfo();
1595 
1596   switch (RetAI.getKind()) {
1597   case ABIArgInfo::Indirect: {
1598     unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
1599     if (RetTy->isAnyComplexType()) {
1600       ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
1601       StoreComplexToAddr(RT, CurFn->arg_begin(), false);
1602     } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1603       // Do nothing; aggregrates get evaluated directly into the destination.
1604     } else {
1605       EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
1606                         false, Alignment, RetTy);
1607     }
1608     break;
1609   }
1610 
1611   case ABIArgInfo::Extend:
1612   case ABIArgInfo::Direct:
1613     if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
1614         RetAI.getDirectOffset() == 0) {
1615       // The internal return value temp always will have pointer-to-return-type
1616       // type, just do a load.
1617 
1618       // If there is a dominating store to ReturnValue, we can elide
1619       // the load, zap the store, and usually zap the alloca.
1620       if (llvm::StoreInst *SI = findDominatingStoreToReturnValue(*this)) {
1621         // Get the stored value and nuke the now-dead store.
1622         RetDbgLoc = SI->getDebugLoc();
1623         RV = SI->getValueOperand();
1624         SI->eraseFromParent();
1625 
1626         // If that was the only use of the return value, nuke it as well now.
1627         if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
1628           cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
1629           ReturnValue = 0;
1630         }
1631 
1632       // Otherwise, we have to do a simple load.
1633       } else {
1634         RV = Builder.CreateLoad(ReturnValue);
1635       }
1636     } else {
1637       llvm::Value *V = ReturnValue;
1638       // If the value is offset in memory, apply the offset now.
1639       if (unsigned Offs = RetAI.getDirectOffset()) {
1640         V = Builder.CreateBitCast(V, Builder.getInt8PtrTy());
1641         V = Builder.CreateConstGEP1_32(V, Offs);
1642         V = Builder.CreateBitCast(V,
1643                          llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
1644       }
1645 
1646       RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
1647     }
1648 
1649     // In ARC, end functions that return a retainable type with a call
1650     // to objc_autoreleaseReturnValue.
1651     if (AutoreleaseResult) {
1652       assert(getLangOpts().ObjCAutoRefCount &&
1653              !FI.isReturnsRetained() &&
1654              RetTy->isObjCRetainableType());
1655       RV = emitAutoreleaseOfResult(*this, RV);
1656     }
1657 
1658     break;
1659 
1660   case ABIArgInfo::Ignore:
1661     break;
1662 
1663   case ABIArgInfo::Expand:
1664     llvm_unreachable("Invalid ABI kind for return argument");
1665   }
1666 
1667   llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid();
1668   if (!RetDbgLoc.isUnknown())
1669     Ret->setDebugLoc(RetDbgLoc);
1670 }
1671 
1672 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
1673                                           const VarDecl *param) {
1674   // StartFunction converted the ABI-lowered parameter(s) into a
1675   // local alloca.  We need to turn that into an r-value suitable
1676   // for EmitCall.
1677   llvm::Value *local = GetAddrOfLocalVar(param);
1678 
1679   QualType type = param->getType();
1680 
1681   // For the most part, we just need to load the alloca, except:
1682   // 1) aggregate r-values are actually pointers to temporaries, and
1683   // 2) references to aggregates are pointers directly to the aggregate.
1684   // I don't know why references to non-aggregates are different here.
1685   if (const ReferenceType *ref = type->getAs<ReferenceType>()) {
1686     if (hasAggregateLLVMType(ref->getPointeeType()))
1687       return args.add(RValue::getAggregate(local), type);
1688 
1689     // Locals which are references to scalars are represented
1690     // with allocas holding the pointer.
1691     return args.add(RValue::get(Builder.CreateLoad(local)), type);
1692   }
1693 
1694   if (type->isAnyComplexType()) {
1695     ComplexPairTy complex = LoadComplexFromAddr(local, /*volatile*/ false);
1696     return args.add(RValue::getComplex(complex), type);
1697   }
1698 
1699   if (hasAggregateLLVMType(type))
1700     return args.add(RValue::getAggregate(local), type);
1701 
1702   unsigned alignment = getContext().getDeclAlign(param).getQuantity();
1703   llvm::Value *value = EmitLoadOfScalar(local, false, alignment, type);
1704   return args.add(RValue::get(value), type);
1705 }
1706 
1707 static bool isProvablyNull(llvm::Value *addr) {
1708   return isa<llvm::ConstantPointerNull>(addr);
1709 }
1710 
1711 static bool isProvablyNonNull(llvm::Value *addr) {
1712   return isa<llvm::AllocaInst>(addr);
1713 }
1714 
1715 /// Emit the actual writing-back of a writeback.
1716 static void emitWriteback(CodeGenFunction &CGF,
1717                           const CallArgList::Writeback &writeback) {
1718   llvm::Value *srcAddr = writeback.Address;
1719   assert(!isProvablyNull(srcAddr) &&
1720          "shouldn't have writeback for provably null argument");
1721 
1722   llvm::BasicBlock *contBB = 0;
1723 
1724   // If the argument wasn't provably non-null, we need to null check
1725   // before doing the store.
1726   bool provablyNonNull = isProvablyNonNull(srcAddr);
1727   if (!provablyNonNull) {
1728     llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
1729     contBB = CGF.createBasicBlock("icr.done");
1730 
1731     llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
1732     CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
1733     CGF.EmitBlock(writebackBB);
1734   }
1735 
1736   // Load the value to writeback.
1737   llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
1738 
1739   // Cast it back, in case we're writing an id to a Foo* or something.
1740   value = CGF.Builder.CreateBitCast(value,
1741                cast<llvm::PointerType>(srcAddr->getType())->getElementType(),
1742                             "icr.writeback-cast");
1743 
1744   // Perform the writeback.
1745   QualType srcAddrType = writeback.AddressType;
1746   CGF.EmitStoreThroughLValue(RValue::get(value),
1747                              CGF.MakeAddrLValue(srcAddr, srcAddrType));
1748 
1749   // Jump to the continuation block.
1750   if (!provablyNonNull)
1751     CGF.EmitBlock(contBB);
1752 }
1753 
1754 static void emitWritebacks(CodeGenFunction &CGF,
1755                            const CallArgList &args) {
1756   for (CallArgList::writeback_iterator
1757          i = args.writeback_begin(), e = args.writeback_end(); i != e; ++i)
1758     emitWriteback(CGF, *i);
1759 }
1760 
1761 /// Emit an argument that's being passed call-by-writeback.  That is,
1762 /// we are passing the address of
1763 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
1764                              const ObjCIndirectCopyRestoreExpr *CRE) {
1765   llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr());
1766 
1767   // The dest and src types don't necessarily match in LLVM terms
1768   // because of the crazy ObjC compatibility rules.
1769 
1770   llvm::PointerType *destType =
1771     cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
1772 
1773   // If the address is a constant null, just pass the appropriate null.
1774   if (isProvablyNull(srcAddr)) {
1775     args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
1776              CRE->getType());
1777     return;
1778   }
1779 
1780   QualType srcAddrType =
1781     CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
1782 
1783   // Create the temporary.
1784   llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(),
1785                                            "icr.temp");
1786   // Loading an l-value can introduce a cleanup if the l-value is __weak,
1787   // and that cleanup will be conditional if we can't prove that the l-value
1788   // isn't null, so we need to register a dominating point so that the cleanups
1789   // system will make valid IR.
1790   CodeGenFunction::ConditionalEvaluation condEval(CGF);
1791 
1792   // Zero-initialize it if we're not doing a copy-initialization.
1793   bool shouldCopy = CRE->shouldCopy();
1794   if (!shouldCopy) {
1795     llvm::Value *null =
1796       llvm::ConstantPointerNull::get(
1797         cast<llvm::PointerType>(destType->getElementType()));
1798     CGF.Builder.CreateStore(null, temp);
1799   }
1800 
1801   llvm::BasicBlock *contBB = 0;
1802 
1803   // If the address is *not* known to be non-null, we need to switch.
1804   llvm::Value *finalArgument;
1805 
1806   bool provablyNonNull = isProvablyNonNull(srcAddr);
1807   if (provablyNonNull) {
1808     finalArgument = temp;
1809   } else {
1810     llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
1811 
1812     finalArgument = CGF.Builder.CreateSelect(isNull,
1813                                    llvm::ConstantPointerNull::get(destType),
1814                                              temp, "icr.argument");
1815 
1816     // If we need to copy, then the load has to be conditional, which
1817     // means we need control flow.
1818     if (shouldCopy) {
1819       contBB = CGF.createBasicBlock("icr.cont");
1820       llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
1821       CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
1822       CGF.EmitBlock(copyBB);
1823       condEval.begin(CGF);
1824     }
1825   }
1826 
1827   // Perform a copy if necessary.
1828   if (shouldCopy) {
1829     LValue srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
1830     RValue srcRV = CGF.EmitLoadOfLValue(srcLV);
1831     assert(srcRV.isScalar());
1832 
1833     llvm::Value *src = srcRV.getScalarVal();
1834     src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
1835                                     "icr.cast");
1836 
1837     // Use an ordinary store, not a store-to-lvalue.
1838     CGF.Builder.CreateStore(src, temp);
1839   }
1840 
1841   // Finish the control flow if we needed it.
1842   if (shouldCopy && !provablyNonNull) {
1843     CGF.EmitBlock(contBB);
1844     condEval.end(CGF);
1845   }
1846 
1847   args.addWriteback(srcAddr, srcAddrType, temp);
1848   args.add(RValue::get(finalArgument), CRE->getType());
1849 }
1850 
1851 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
1852                                   QualType type) {
1853   if (const ObjCIndirectCopyRestoreExpr *CRE
1854         = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
1855     assert(getLangOpts().ObjCAutoRefCount);
1856     assert(getContext().hasSameType(E->getType(), type));
1857     return emitWritebackArg(*this, args, CRE);
1858   }
1859 
1860   assert(type->isReferenceType() == E->isGLValue() &&
1861          "reference binding to unmaterialized r-value!");
1862 
1863   if (E->isGLValue()) {
1864     assert(E->getObjectKind() == OK_Ordinary);
1865     return args.add(EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0),
1866                     type);
1867   }
1868 
1869   if (hasAggregateLLVMType(type) && !E->getType()->isAnyComplexType() &&
1870       isa<ImplicitCastExpr>(E) &&
1871       cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
1872     LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
1873     assert(L.isSimple());
1874     args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true);
1875     return;
1876   }
1877 
1878   args.add(EmitAnyExprToTemp(E), type);
1879 }
1880 
1881 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
1882 // optimizer it can aggressively ignore unwind edges.
1883 void
1884 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
1885   if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
1886       !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
1887     Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
1888                       CGM.getNoObjCARCExceptionsMetadata());
1889 }
1890 
1891 /// Emits a call or invoke instruction to the given function, depending
1892 /// on the current state of the EH stack.
1893 llvm::CallSite
1894 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
1895                                   ArrayRef<llvm::Value *> Args,
1896                                   const Twine &Name) {
1897   llvm::BasicBlock *InvokeDest = getInvokeDest();
1898 
1899   llvm::Instruction *Inst;
1900   if (!InvokeDest)
1901     Inst = Builder.CreateCall(Callee, Args, Name);
1902   else {
1903     llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
1904     Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, Name);
1905     EmitBlock(ContBB);
1906   }
1907 
1908   // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
1909   // optimizer it can aggressively ignore unwind edges.
1910   if (CGM.getLangOpts().ObjCAutoRefCount)
1911     AddObjCARCExceptionMetadata(Inst);
1912 
1913   return Inst;
1914 }
1915 
1916 llvm::CallSite
1917 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
1918                                   const Twine &Name) {
1919   return EmitCallOrInvoke(Callee, ArrayRef<llvm::Value *>(), Name);
1920 }
1921 
1922 static void checkArgMatches(llvm::Value *Elt, unsigned &ArgNo,
1923                             llvm::FunctionType *FTy) {
1924   if (ArgNo < FTy->getNumParams())
1925     assert(Elt->getType() == FTy->getParamType(ArgNo));
1926   else
1927     assert(FTy->isVarArg());
1928   ++ArgNo;
1929 }
1930 
1931 void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
1932                                        SmallVector<llvm::Value*,16> &Args,
1933                                        llvm::FunctionType *IRFuncTy) {
1934   if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
1935     unsigned NumElts = AT->getSize().getZExtValue();
1936     QualType EltTy = AT->getElementType();
1937     llvm::Value *Addr = RV.getAggregateAddr();
1938     for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
1939       llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt);
1940       LValue LV = MakeAddrLValue(EltAddr, EltTy);
1941       RValue EltRV;
1942       if (EltTy->isAnyComplexType())
1943         // FIXME: Volatile?
1944         EltRV = RValue::getComplex(LoadComplexFromAddr(LV.getAddress(), false));
1945       else if (CodeGenFunction::hasAggregateLLVMType(EltTy))
1946         EltRV = LV.asAggregateRValue();
1947       else
1948         EltRV = EmitLoadOfLValue(LV);
1949       ExpandTypeToArgs(EltTy, EltRV, Args, IRFuncTy);
1950     }
1951   } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
1952     RecordDecl *RD = RT->getDecl();
1953     assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
1954     LValue LV = MakeAddrLValue(RV.getAggregateAddr(), Ty);
1955 
1956     if (RD->isUnion()) {
1957       const FieldDecl *LargestFD = 0;
1958       CharUnits UnionSize = CharUnits::Zero();
1959 
1960       for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
1961            i != e; ++i) {
1962         const FieldDecl *FD = *i;
1963         assert(!FD->isBitField() &&
1964                "Cannot expand structure with bit-field members.");
1965         CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
1966         if (UnionSize < FieldSize) {
1967           UnionSize = FieldSize;
1968           LargestFD = FD;
1969         }
1970       }
1971       if (LargestFD) {
1972         RValue FldRV = EmitRValueForField(LV, LargestFD);
1973         ExpandTypeToArgs(LargestFD->getType(), FldRV, Args, IRFuncTy);
1974       }
1975     } else {
1976       for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
1977            i != e; ++i) {
1978         FieldDecl *FD = *i;
1979 
1980         RValue FldRV = EmitRValueForField(LV, FD);
1981         ExpandTypeToArgs(FD->getType(), FldRV, Args, IRFuncTy);
1982       }
1983     }
1984   } else if (Ty->isAnyComplexType()) {
1985     ComplexPairTy CV = RV.getComplexVal();
1986     Args.push_back(CV.first);
1987     Args.push_back(CV.second);
1988   } else {
1989     assert(RV.isScalar() &&
1990            "Unexpected non-scalar rvalue during struct expansion.");
1991 
1992     // Insert a bitcast as needed.
1993     llvm::Value *V = RV.getScalarVal();
1994     if (Args.size() < IRFuncTy->getNumParams() &&
1995         V->getType() != IRFuncTy->getParamType(Args.size()))
1996       V = Builder.CreateBitCast(V, IRFuncTy->getParamType(Args.size()));
1997 
1998     Args.push_back(V);
1999   }
2000 }
2001 
2002 
2003 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
2004                                  llvm::Value *Callee,
2005                                  ReturnValueSlot ReturnValue,
2006                                  const CallArgList &CallArgs,
2007                                  const Decl *TargetDecl,
2008                                  llvm::Instruction **callOrInvoke) {
2009   // FIXME: We no longer need the types from CallArgs; lift up and simplify.
2010   SmallVector<llvm::Value*, 16> Args;
2011 
2012   // Handle struct-return functions by passing a pointer to the
2013   // location that we would like to return into.
2014   QualType RetTy = CallInfo.getReturnType();
2015   const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
2016 
2017   // IRArgNo - Keep track of the argument number in the callee we're looking at.
2018   unsigned IRArgNo = 0;
2019   llvm::FunctionType *IRFuncTy =
2020     cast<llvm::FunctionType>(
2021                   cast<llvm::PointerType>(Callee->getType())->getElementType());
2022 
2023   // If the call returns a temporary with struct return, create a temporary
2024   // alloca to hold the result, unless one is given to us.
2025   if (CGM.ReturnTypeUsesSRet(CallInfo)) {
2026     llvm::Value *Value = ReturnValue.getValue();
2027     if (!Value)
2028       Value = CreateMemTemp(RetTy);
2029     Args.push_back(Value);
2030     checkArgMatches(Value, IRArgNo, IRFuncTy);
2031   }
2032 
2033   assert(CallInfo.arg_size() == CallArgs.size() &&
2034          "Mismatch between function signature & arguments.");
2035   CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
2036   for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
2037        I != E; ++I, ++info_it) {
2038     const ABIArgInfo &ArgInfo = info_it->info;
2039     RValue RV = I->RV;
2040 
2041     unsigned TypeAlign =
2042       getContext().getTypeAlignInChars(I->Ty).getQuantity();
2043 
2044     // Insert a padding argument to ensure proper alignment.
2045     if (llvm::Type *PaddingType = ArgInfo.getPaddingType()) {
2046       Args.push_back(llvm::UndefValue::get(PaddingType));
2047       ++IRArgNo;
2048     }
2049 
2050     switch (ArgInfo.getKind()) {
2051     case ABIArgInfo::Indirect: {
2052       if (RV.isScalar() || RV.isComplex()) {
2053         // Make a temporary alloca to pass the argument.
2054         llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
2055         if (ArgInfo.getIndirectAlign() > AI->getAlignment())
2056           AI->setAlignment(ArgInfo.getIndirectAlign());
2057         Args.push_back(AI);
2058 
2059         if (RV.isScalar())
2060           EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false,
2061                             TypeAlign, I->Ty);
2062         else
2063           StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
2064 
2065         // Validate argument match.
2066         checkArgMatches(AI, IRArgNo, IRFuncTy);
2067       } else {
2068         // We want to avoid creating an unnecessary temporary+copy here;
2069         // however, we need one in two cases:
2070         // 1. If the argument is not byval, and we are required to copy the
2071         //    source.  (This case doesn't occur on any common architecture.)
2072         // 2. If the argument is byval, RV is not sufficiently aligned, and
2073         //    we cannot force it to be sufficiently aligned.
2074         llvm::Value *Addr = RV.getAggregateAddr();
2075         unsigned Align = ArgInfo.getIndirectAlign();
2076         const llvm::DataLayout *TD = &CGM.getDataLayout();
2077         if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
2078             (ArgInfo.getIndirectByVal() && TypeAlign < Align &&
2079              llvm::getOrEnforceKnownAlignment(Addr, Align, TD) < Align)) {
2080           // Create an aligned temporary, and copy to it.
2081           llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
2082           if (Align > AI->getAlignment())
2083             AI->setAlignment(Align);
2084           Args.push_back(AI);
2085           EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
2086 
2087           // Validate argument match.
2088           checkArgMatches(AI, IRArgNo, IRFuncTy);
2089         } else {
2090           // Skip the extra memcpy call.
2091           Args.push_back(Addr);
2092 
2093           // Validate argument match.
2094           checkArgMatches(Addr, IRArgNo, IRFuncTy);
2095         }
2096       }
2097       break;
2098     }
2099 
2100     case ABIArgInfo::Ignore:
2101       break;
2102 
2103     case ABIArgInfo::Extend:
2104     case ABIArgInfo::Direct: {
2105       if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
2106           ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
2107           ArgInfo.getDirectOffset() == 0) {
2108         llvm::Value *V;
2109         if (RV.isScalar())
2110           V = RV.getScalarVal();
2111         else
2112           V = Builder.CreateLoad(RV.getAggregateAddr());
2113 
2114         // If the argument doesn't match, perform a bitcast to coerce it.  This
2115         // can happen due to trivial type mismatches.
2116         if (IRArgNo < IRFuncTy->getNumParams() &&
2117             V->getType() != IRFuncTy->getParamType(IRArgNo))
2118           V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRArgNo));
2119         Args.push_back(V);
2120 
2121         checkArgMatches(V, IRArgNo, IRFuncTy);
2122         break;
2123       }
2124 
2125       // FIXME: Avoid the conversion through memory if possible.
2126       llvm::Value *SrcPtr;
2127       if (RV.isScalar()) {
2128         SrcPtr = CreateMemTemp(I->Ty, "coerce");
2129         EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, TypeAlign, I->Ty);
2130       } else if (RV.isComplex()) {
2131         SrcPtr = CreateMemTemp(I->Ty, "coerce");
2132         StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
2133       } else
2134         SrcPtr = RV.getAggregateAddr();
2135 
2136       // If the value is offset in memory, apply the offset now.
2137       if (unsigned Offs = ArgInfo.getDirectOffset()) {
2138         SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy());
2139         SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs);
2140         SrcPtr = Builder.CreateBitCast(SrcPtr,
2141                        llvm::PointerType::getUnqual(ArgInfo.getCoerceToType()));
2142 
2143       }
2144 
2145       // If the coerce-to type is a first class aggregate, we flatten it and
2146       // pass the elements. Either way is semantically identical, but fast-isel
2147       // and the optimizer generally likes scalar values better than FCAs.
2148       if (llvm::StructType *STy =
2149             dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) {
2150         llvm::Type *SrcTy =
2151           cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
2152         uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
2153         uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
2154 
2155         // If the source type is smaller than the destination type of the
2156         // coerce-to logic, copy the source value into a temp alloca the size
2157         // of the destination type to allow loading all of it. The bits past
2158         // the source value are left undef.
2159         if (SrcSize < DstSize) {
2160           llvm::AllocaInst *TempAlloca
2161             = CreateTempAlloca(STy, SrcPtr->getName() + ".coerce");
2162           Builder.CreateMemCpy(TempAlloca, SrcPtr, SrcSize, 0);
2163           SrcPtr = TempAlloca;
2164         } else {
2165           SrcPtr = Builder.CreateBitCast(SrcPtr,
2166                                          llvm::PointerType::getUnqual(STy));
2167         }
2168 
2169         for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2170           llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i);
2171           llvm::LoadInst *LI = Builder.CreateLoad(EltPtr);
2172           // We don't know what we're loading from.
2173           LI->setAlignment(1);
2174           Args.push_back(LI);
2175 
2176           // Validate argument match.
2177           checkArgMatches(LI, IRArgNo, IRFuncTy);
2178         }
2179       } else {
2180         // In the simple case, just pass the coerced loaded value.
2181         Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
2182                                          *this));
2183 
2184         // Validate argument match.
2185         checkArgMatches(Args.back(), IRArgNo, IRFuncTy);
2186       }
2187 
2188       break;
2189     }
2190 
2191     case ABIArgInfo::Expand:
2192       ExpandTypeToArgs(I->Ty, RV, Args, IRFuncTy);
2193       IRArgNo = Args.size();
2194       break;
2195     }
2196   }
2197 
2198   // If the callee is a bitcast of a function to a varargs pointer to function
2199   // type, check to see if we can remove the bitcast.  This handles some cases
2200   // with unprototyped functions.
2201   if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
2202     if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
2203       llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
2204       llvm::FunctionType *CurFT =
2205         cast<llvm::FunctionType>(CurPT->getElementType());
2206       llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
2207 
2208       if (CE->getOpcode() == llvm::Instruction::BitCast &&
2209           ActualFT->getReturnType() == CurFT->getReturnType() &&
2210           ActualFT->getNumParams() == CurFT->getNumParams() &&
2211           ActualFT->getNumParams() == Args.size() &&
2212           (CurFT->isVarArg() || !ActualFT->isVarArg())) {
2213         bool ArgsMatch = true;
2214         for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
2215           if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
2216             ArgsMatch = false;
2217             break;
2218           }
2219 
2220         // Strip the cast if we can get away with it.  This is a nice cleanup,
2221         // but also allows us to inline the function at -O0 if it is marked
2222         // always_inline.
2223         if (ArgsMatch)
2224           Callee = CalleeF;
2225       }
2226     }
2227 
2228   unsigned CallingConv;
2229   CodeGen::AttributeListType AttributeList;
2230   CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv);
2231   llvm::AttributeSet Attrs = llvm::AttributeSet::get(getLLVMContext(),
2232                                                    AttributeList);
2233 
2234   llvm::BasicBlock *InvokeDest = 0;
2235   if (!Attrs.hasAttribute(llvm::AttributeSet::FunctionIndex,
2236                           llvm::Attribute::NoUnwind))
2237     InvokeDest = getInvokeDest();
2238 
2239   llvm::CallSite CS;
2240   if (!InvokeDest) {
2241     CS = Builder.CreateCall(Callee, Args);
2242   } else {
2243     llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
2244     CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, Args);
2245     EmitBlock(Cont);
2246   }
2247   if (callOrInvoke)
2248     *callOrInvoke = CS.getInstruction();
2249 
2250   CS.setAttributes(Attrs);
2251   CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
2252 
2253   // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
2254   // optimizer it can aggressively ignore unwind edges.
2255   if (CGM.getLangOpts().ObjCAutoRefCount)
2256     AddObjCARCExceptionMetadata(CS.getInstruction());
2257 
2258   // If the call doesn't return, finish the basic block and clear the
2259   // insertion point; this allows the rest of IRgen to discard
2260   // unreachable code.
2261   if (CS.doesNotReturn()) {
2262     Builder.CreateUnreachable();
2263     Builder.ClearInsertionPoint();
2264 
2265     // FIXME: For now, emit a dummy basic block because expr emitters in
2266     // generally are not ready to handle emitting expressions at unreachable
2267     // points.
2268     EnsureInsertPoint();
2269 
2270     // Return a reasonable RValue.
2271     return GetUndefRValue(RetTy);
2272   }
2273 
2274   llvm::Instruction *CI = CS.getInstruction();
2275   if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
2276     CI->setName("call");
2277 
2278   // Emit any writebacks immediately.  Arguably this should happen
2279   // after any return-value munging.
2280   if (CallArgs.hasWritebacks())
2281     emitWritebacks(*this, CallArgs);
2282 
2283   switch (RetAI.getKind()) {
2284   case ABIArgInfo::Indirect: {
2285     unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
2286     if (RetTy->isAnyComplexType())
2287       return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
2288     if (CodeGenFunction::hasAggregateLLVMType(RetTy))
2289       return RValue::getAggregate(Args[0]);
2290     return RValue::get(EmitLoadOfScalar(Args[0], false, Alignment, RetTy));
2291   }
2292 
2293   case ABIArgInfo::Ignore:
2294     // If we are ignoring an argument that had a result, make sure to
2295     // construct the appropriate return value for our caller.
2296     return GetUndefRValue(RetTy);
2297 
2298   case ABIArgInfo::Extend:
2299   case ABIArgInfo::Direct: {
2300     llvm::Type *RetIRTy = ConvertType(RetTy);
2301     if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
2302       if (RetTy->isAnyComplexType()) {
2303         llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
2304         llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
2305         return RValue::getComplex(std::make_pair(Real, Imag));
2306       }
2307       if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
2308         llvm::Value *DestPtr = ReturnValue.getValue();
2309         bool DestIsVolatile = ReturnValue.isVolatile();
2310 
2311         if (!DestPtr) {
2312           DestPtr = CreateMemTemp(RetTy, "agg.tmp");
2313           DestIsVolatile = false;
2314         }
2315         BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false);
2316         return RValue::getAggregate(DestPtr);
2317       }
2318 
2319       // If the argument doesn't match, perform a bitcast to coerce it.  This
2320       // can happen due to trivial type mismatches.
2321       llvm::Value *V = CI;
2322       if (V->getType() != RetIRTy)
2323         V = Builder.CreateBitCast(V, RetIRTy);
2324       return RValue::get(V);
2325     }
2326 
2327     llvm::Value *DestPtr = ReturnValue.getValue();
2328     bool DestIsVolatile = ReturnValue.isVolatile();
2329 
2330     if (!DestPtr) {
2331       DestPtr = CreateMemTemp(RetTy, "coerce");
2332       DestIsVolatile = false;
2333     }
2334 
2335     // If the value is offset in memory, apply the offset now.
2336     llvm::Value *StorePtr = DestPtr;
2337     if (unsigned Offs = RetAI.getDirectOffset()) {
2338       StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy());
2339       StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs);
2340       StorePtr = Builder.CreateBitCast(StorePtr,
2341                          llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
2342     }
2343     CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
2344 
2345     unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
2346     if (RetTy->isAnyComplexType())
2347       return RValue::getComplex(LoadComplexFromAddr(DestPtr, false));
2348     if (CodeGenFunction::hasAggregateLLVMType(RetTy))
2349       return RValue::getAggregate(DestPtr);
2350     return RValue::get(EmitLoadOfScalar(DestPtr, false, Alignment, RetTy));
2351   }
2352 
2353   case ABIArgInfo::Expand:
2354     llvm_unreachable("Invalid ABI kind for return argument");
2355   }
2356 
2357   llvm_unreachable("Unhandled ABIArgInfo::Kind");
2358 }
2359 
2360 /* VarArg handling */
2361 
2362 llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
2363   return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
2364 }
2365