1 //===--- CGCall.cpp - Encapsulate calling convention details ----*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "CGCall.h"
16 #include "CGCXXABI.h"
17 #include "ABIInfo.h"
18 #include "CodeGenFunction.h"
19 #include "CodeGenModule.h"
20 #include "TargetInfo.h"
21 #include "clang/Basic/TargetInfo.h"
22 #include "clang/AST/Decl.h"
23 #include "clang/AST/DeclCXX.h"
24 #include "clang/AST/DeclObjC.h"
25 #include "clang/Frontend/CodeGenOptions.h"
26 #include "llvm/Attributes.h"
27 #include "llvm/Support/CallSite.h"
28 #include "llvm/DataLayout.h"
29 #include "llvm/InlineAsm.h"
30 #include "llvm/Transforms/Utils/Local.h"
31 using namespace clang;
32 using namespace CodeGen;
33 
34 /***/
35 
36 static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
37   switch (CC) {
38   default: return llvm::CallingConv::C;
39   case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
40   case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
41   case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
42   case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
43   case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
44   // TODO: add support for CC_X86Pascal to llvm
45   }
46 }
47 
48 /// Derives the 'this' type for codegen purposes, i.e. ignoring method
49 /// qualification.
50 /// FIXME: address space qualification?
51 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
52   QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
53   return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
54 }
55 
56 /// Returns the canonical formal type of the given C++ method.
57 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
58   return MD->getType()->getCanonicalTypeUnqualified()
59            .getAs<FunctionProtoType>();
60 }
61 
62 /// Returns the "extra-canonicalized" return type, which discards
63 /// qualifiers on the return type.  Codegen doesn't care about them,
64 /// and it makes ABI code a little easier to be able to assume that
65 /// all parameter and return types are top-level unqualified.
66 static CanQualType GetReturnType(QualType RetTy) {
67   return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
68 }
69 
70 /// Arrange the argument and result information for a value of the given
71 /// unprototyped freestanding function type.
72 const CGFunctionInfo &
73 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
74   // When translating an unprototyped function type, always use a
75   // variadic type.
76   return arrangeLLVMFunctionInfo(FTNP->getResultType().getUnqualifiedType(),
77                                  ArrayRef<CanQualType>(),
78                                  FTNP->getExtInfo(),
79                                  RequiredArgs(0));
80 }
81 
82 /// Arrange the LLVM function layout for a value of the given function
83 /// type, on top of any implicit parameters already stored.  Use the
84 /// given ExtInfo instead of the ExtInfo from the function type.
85 static const CGFunctionInfo &arrangeLLVMFunctionInfo(CodeGenTypes &CGT,
86                                        SmallVectorImpl<CanQualType> &prefix,
87                                              CanQual<FunctionProtoType> FTP,
88                                               FunctionType::ExtInfo extInfo) {
89   RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
90   // FIXME: Kill copy.
91   for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
92     prefix.push_back(FTP->getArgType(i));
93   CanQualType resultType = FTP->getResultType().getUnqualifiedType();
94   return CGT.arrangeLLVMFunctionInfo(resultType, prefix, extInfo, required);
95 }
96 
97 /// Arrange the argument and result information for a free function (i.e.
98 /// not a C++ or ObjC instance method) of the given type.
99 static const CGFunctionInfo &arrangeFreeFunctionType(CodeGenTypes &CGT,
100                                       SmallVectorImpl<CanQualType> &prefix,
101                                             CanQual<FunctionProtoType> FTP) {
102   return arrangeLLVMFunctionInfo(CGT, prefix, FTP, FTP->getExtInfo());
103 }
104 
105 /// Given the formal ext-info of a C++ instance method, adjust it
106 /// according to the C++ ABI in effect.
107 static void adjustCXXMethodInfo(CodeGenTypes &CGT,
108                                 FunctionType::ExtInfo &extInfo,
109                                 bool isVariadic) {
110   if (extInfo.getCC() == CC_Default) {
111     CallingConv CC = CGT.getContext().getDefaultCXXMethodCallConv(isVariadic);
112     extInfo = extInfo.withCallingConv(CC);
113   }
114 }
115 
116 /// Arrange the argument and result information for a free function (i.e.
117 /// not a C++ or ObjC instance method) of the given type.
118 static const CGFunctionInfo &arrangeCXXMethodType(CodeGenTypes &CGT,
119                                       SmallVectorImpl<CanQualType> &prefix,
120                                             CanQual<FunctionProtoType> FTP) {
121   FunctionType::ExtInfo extInfo = FTP->getExtInfo();
122   adjustCXXMethodInfo(CGT, extInfo, FTP->isVariadic());
123   return arrangeLLVMFunctionInfo(CGT, prefix, FTP, extInfo);
124 }
125 
126 /// Arrange the argument and result information for a value of the
127 /// given freestanding function type.
128 const CGFunctionInfo &
129 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) {
130   SmallVector<CanQualType, 16> argTypes;
131   return ::arrangeFreeFunctionType(*this, argTypes, FTP);
132 }
133 
134 static CallingConv getCallingConventionForDecl(const Decl *D) {
135   // Set the appropriate calling convention for the Function.
136   if (D->hasAttr<StdCallAttr>())
137     return CC_X86StdCall;
138 
139   if (D->hasAttr<FastCallAttr>())
140     return CC_X86FastCall;
141 
142   if (D->hasAttr<ThisCallAttr>())
143     return CC_X86ThisCall;
144 
145   if (D->hasAttr<PascalAttr>())
146     return CC_X86Pascal;
147 
148   if (PcsAttr *PCS = D->getAttr<PcsAttr>())
149     return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
150 
151   if (D->hasAttr<PnaclCallAttr>())
152     return CC_PnaclCall;
153 
154   return CC_C;
155 }
156 
157 /// Arrange the argument and result information for a call to an
158 /// unknown C++ non-static member function of the given abstract type.
159 /// The member function must be an ordinary function, i.e. not a
160 /// constructor or destructor.
161 const CGFunctionInfo &
162 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
163                                    const FunctionProtoType *FTP) {
164   SmallVector<CanQualType, 16> argTypes;
165 
166   // Add the 'this' pointer.
167   argTypes.push_back(GetThisType(Context, RD));
168 
169   return ::arrangeCXXMethodType(*this, argTypes,
170               FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
171 }
172 
173 /// Arrange the argument and result information for a declaration or
174 /// definition of the given C++ non-static member function.  The
175 /// member function must be an ordinary function, i.e. not a
176 /// constructor or destructor.
177 const CGFunctionInfo &
178 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
179   assert(!isa<CXXConstructorDecl>(MD) && "wrong method for contructors!");
180   assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
181 
182   CanQual<FunctionProtoType> prototype = GetFormalType(MD);
183 
184   if (MD->isInstance()) {
185     // The abstract case is perfectly fine.
186     return arrangeCXXMethodType(MD->getParent(), prototype.getTypePtr());
187   }
188 
189   return arrangeFreeFunctionType(prototype);
190 }
191 
192 /// Arrange the argument and result information for a declaration
193 /// or definition to the given constructor variant.
194 const CGFunctionInfo &
195 CodeGenTypes::arrangeCXXConstructorDeclaration(const CXXConstructorDecl *D,
196                                                CXXCtorType ctorKind) {
197   SmallVector<CanQualType, 16> argTypes;
198   argTypes.push_back(GetThisType(Context, D->getParent()));
199   CanQualType resultType = Context.VoidTy;
200 
201   TheCXXABI.BuildConstructorSignature(D, ctorKind, resultType, argTypes);
202 
203   CanQual<FunctionProtoType> FTP = GetFormalType(D);
204 
205   RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, argTypes.size());
206 
207   // Add the formal parameters.
208   for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
209     argTypes.push_back(FTP->getArgType(i));
210 
211   FunctionType::ExtInfo extInfo = FTP->getExtInfo();
212   adjustCXXMethodInfo(*this, extInfo, FTP->isVariadic());
213   return arrangeLLVMFunctionInfo(resultType, argTypes, extInfo, required);
214 }
215 
216 /// Arrange the argument and result information for a declaration,
217 /// definition, or call to the given destructor variant.  It so
218 /// happens that all three cases produce the same information.
219 const CGFunctionInfo &
220 CodeGenTypes::arrangeCXXDestructor(const CXXDestructorDecl *D,
221                                    CXXDtorType dtorKind) {
222   SmallVector<CanQualType, 2> argTypes;
223   argTypes.push_back(GetThisType(Context, D->getParent()));
224   CanQualType resultType = Context.VoidTy;
225 
226   TheCXXABI.BuildDestructorSignature(D, dtorKind, resultType, argTypes);
227 
228   CanQual<FunctionProtoType> FTP = GetFormalType(D);
229   assert(FTP->getNumArgs() == 0 && "dtor with formal parameters");
230   assert(FTP->isVariadic() == 0 && "dtor with formal parameters");
231 
232   FunctionType::ExtInfo extInfo = FTP->getExtInfo();
233   adjustCXXMethodInfo(*this, extInfo, false);
234   return arrangeLLVMFunctionInfo(resultType, argTypes, extInfo,
235                                  RequiredArgs::All);
236 }
237 
238 /// Arrange the argument and result information for the declaration or
239 /// definition of the given function.
240 const CGFunctionInfo &
241 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
242   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
243     if (MD->isInstance())
244       return arrangeCXXMethodDeclaration(MD);
245 
246   CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
247 
248   assert(isa<FunctionType>(FTy));
249 
250   // When declaring a function without a prototype, always use a
251   // non-variadic type.
252   if (isa<FunctionNoProtoType>(FTy)) {
253     CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>();
254     return arrangeLLVMFunctionInfo(noProto->getResultType(),
255                                    ArrayRef<CanQualType>(),
256                                    noProto->getExtInfo(),
257                                    RequiredArgs::All);
258   }
259 
260   assert(isa<FunctionProtoType>(FTy));
261   return arrangeFreeFunctionType(FTy.getAs<FunctionProtoType>());
262 }
263 
264 /// Arrange the argument and result information for the declaration or
265 /// definition of an Objective-C method.
266 const CGFunctionInfo &
267 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
268   // It happens that this is the same as a call with no optional
269   // arguments, except also using the formal 'self' type.
270   return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
271 }
272 
273 /// Arrange the argument and result information for the function type
274 /// through which to perform a send to the given Objective-C method,
275 /// using the given receiver type.  The receiver type is not always
276 /// the 'self' type of the method or even an Objective-C pointer type.
277 /// This is *not* the right method for actually performing such a
278 /// message send, due to the possibility of optional arguments.
279 const CGFunctionInfo &
280 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
281                                               QualType receiverType) {
282   SmallVector<CanQualType, 16> argTys;
283   argTys.push_back(Context.getCanonicalParamType(receiverType));
284   argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
285   // FIXME: Kill copy?
286   for (ObjCMethodDecl::param_const_iterator i = MD->param_begin(),
287          e = MD->param_end(); i != e; ++i) {
288     argTys.push_back(Context.getCanonicalParamType((*i)->getType()));
289   }
290 
291   FunctionType::ExtInfo einfo;
292   einfo = einfo.withCallingConv(getCallingConventionForDecl(MD));
293 
294   if (getContext().getLangOpts().ObjCAutoRefCount &&
295       MD->hasAttr<NSReturnsRetainedAttr>())
296     einfo = einfo.withProducesResult(true);
297 
298   RequiredArgs required =
299     (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
300 
301   return arrangeLLVMFunctionInfo(GetReturnType(MD->getResultType()), argTys,
302                                  einfo, required);
303 }
304 
305 const CGFunctionInfo &
306 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
307   // FIXME: Do we need to handle ObjCMethodDecl?
308   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
309 
310   if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
311     return arrangeCXXConstructorDeclaration(CD, GD.getCtorType());
312 
313   if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
314     return arrangeCXXDestructor(DD, GD.getDtorType());
315 
316   return arrangeFunctionDeclaration(FD);
317 }
318 
319 /// Figure out the rules for calling a function with the given formal
320 /// type using the given arguments.  The arguments are necessary
321 /// because the function might be unprototyped, in which case it's
322 /// target-dependent in crazy ways.
323 const CGFunctionInfo &
324 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
325                                       const FunctionType *fnType) {
326   RequiredArgs required = RequiredArgs::All;
327   if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
328     if (proto->isVariadic())
329       required = RequiredArgs(proto->getNumArgs());
330   } else if (CGM.getTargetCodeGenInfo()
331                .isNoProtoCallVariadic(args, cast<FunctionNoProtoType>(fnType))) {
332     required = RequiredArgs(0);
333   }
334 
335   return arrangeFreeFunctionCall(fnType->getResultType(), args,
336                                  fnType->getExtInfo(), required);
337 }
338 
339 const CGFunctionInfo &
340 CodeGenTypes::arrangeFreeFunctionCall(QualType resultType,
341                                       const CallArgList &args,
342                                       FunctionType::ExtInfo info,
343                                       RequiredArgs required) {
344   // FIXME: Kill copy.
345   SmallVector<CanQualType, 16> argTypes;
346   for (CallArgList::const_iterator i = args.begin(), e = args.end();
347        i != e; ++i)
348     argTypes.push_back(Context.getCanonicalParamType(i->Ty));
349   return arrangeLLVMFunctionInfo(GetReturnType(resultType), argTypes, info,
350                                  required);
351 }
352 
353 /// Arrange a call to a C++ method, passing the given arguments.
354 const CGFunctionInfo &
355 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
356                                    const FunctionProtoType *FPT,
357                                    RequiredArgs required) {
358   // FIXME: Kill copy.
359   SmallVector<CanQualType, 16> argTypes;
360   for (CallArgList::const_iterator i = args.begin(), e = args.end();
361        i != e; ++i)
362     argTypes.push_back(Context.getCanonicalParamType(i->Ty));
363 
364   FunctionType::ExtInfo info = FPT->getExtInfo();
365   adjustCXXMethodInfo(*this, info, FPT->isVariadic());
366   return arrangeLLVMFunctionInfo(GetReturnType(FPT->getResultType()),
367                                  argTypes, info, required);
368 }
369 
370 const CGFunctionInfo &
371 CodeGenTypes::arrangeFunctionDeclaration(QualType resultType,
372                                          const FunctionArgList &args,
373                                          const FunctionType::ExtInfo &info,
374                                          bool isVariadic) {
375   // FIXME: Kill copy.
376   SmallVector<CanQualType, 16> argTypes;
377   for (FunctionArgList::const_iterator i = args.begin(), e = args.end();
378        i != e; ++i)
379     argTypes.push_back(Context.getCanonicalParamType((*i)->getType()));
380 
381   RequiredArgs required =
382     (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All);
383   return arrangeLLVMFunctionInfo(GetReturnType(resultType), argTypes, info,
384                                  required);
385 }
386 
387 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
388   return arrangeLLVMFunctionInfo(getContext().VoidTy, ArrayRef<CanQualType>(),
389                                  FunctionType::ExtInfo(), RequiredArgs::All);
390 }
391 
392 /// Arrange the argument and result information for an abstract value
393 /// of a given function type.  This is the method which all of the
394 /// above functions ultimately defer to.
395 const CGFunctionInfo &
396 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
397                                       ArrayRef<CanQualType> argTypes,
398                                       FunctionType::ExtInfo info,
399                                       RequiredArgs required) {
400 #ifndef NDEBUG
401   for (ArrayRef<CanQualType>::const_iterator
402          I = argTypes.begin(), E = argTypes.end(); I != E; ++I)
403     assert(I->isCanonicalAsParam());
404 #endif
405 
406   unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
407 
408   // Lookup or create unique function info.
409   llvm::FoldingSetNodeID ID;
410   CGFunctionInfo::Profile(ID, info, required, resultType, argTypes);
411 
412   void *insertPos = 0;
413   CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
414   if (FI)
415     return *FI;
416 
417   // Construct the function info.  We co-allocate the ArgInfos.
418   FI = CGFunctionInfo::create(CC, info, resultType, argTypes, required);
419   FunctionInfos.InsertNode(FI, insertPos);
420 
421   bool inserted = FunctionsBeingProcessed.insert(FI); (void)inserted;
422   assert(inserted && "Recursively being processed?");
423 
424   // Compute ABI information.
425   getABIInfo().computeInfo(*FI);
426 
427   // Loop over all of the computed argument and return value info.  If any of
428   // them are direct or extend without a specified coerce type, specify the
429   // default now.
430   ABIArgInfo &retInfo = FI->getReturnInfo();
431   if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == 0)
432     retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
433 
434   for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end();
435        I != E; ++I)
436     if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0)
437       I->info.setCoerceToType(ConvertType(I->type));
438 
439   bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
440   assert(erased && "Not in set?");
441 
442   return *FI;
443 }
444 
445 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
446                                        const FunctionType::ExtInfo &info,
447                                        CanQualType resultType,
448                                        ArrayRef<CanQualType> argTypes,
449                                        RequiredArgs required) {
450   void *buffer = operator new(sizeof(CGFunctionInfo) +
451                               sizeof(ArgInfo) * (argTypes.size() + 1));
452   CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
453   FI->CallingConvention = llvmCC;
454   FI->EffectiveCallingConvention = llvmCC;
455   FI->ASTCallingConvention = info.getCC();
456   FI->NoReturn = info.getNoReturn();
457   FI->ReturnsRetained = info.getProducesResult();
458   FI->Required = required;
459   FI->HasRegParm = info.getHasRegParm();
460   FI->RegParm = info.getRegParm();
461   FI->NumArgs = argTypes.size();
462   FI->getArgsBuffer()[0].type = resultType;
463   for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
464     FI->getArgsBuffer()[i + 1].type = argTypes[i];
465   return FI;
466 }
467 
468 /***/
469 
470 void CodeGenTypes::GetExpandedTypes(QualType type,
471                      SmallVectorImpl<llvm::Type*> &expandedTypes) {
472   if (const ConstantArrayType *AT = Context.getAsConstantArrayType(type)) {
473     uint64_t NumElts = AT->getSize().getZExtValue();
474     for (uint64_t Elt = 0; Elt < NumElts; ++Elt)
475       GetExpandedTypes(AT->getElementType(), expandedTypes);
476   } else if (const RecordType *RT = type->getAs<RecordType>()) {
477     const RecordDecl *RD = RT->getDecl();
478     assert(!RD->hasFlexibleArrayMember() &&
479            "Cannot expand structure with flexible array.");
480     if (RD->isUnion()) {
481       // Unions can be here only in degenerative cases - all the fields are same
482       // after flattening. Thus we have to use the "largest" field.
483       const FieldDecl *LargestFD = 0;
484       CharUnits UnionSize = CharUnits::Zero();
485 
486       for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
487            i != e; ++i) {
488         const FieldDecl *FD = *i;
489         assert(!FD->isBitField() &&
490                "Cannot expand structure with bit-field members.");
491         CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
492         if (UnionSize < FieldSize) {
493           UnionSize = FieldSize;
494           LargestFD = FD;
495         }
496       }
497       if (LargestFD)
498         GetExpandedTypes(LargestFD->getType(), expandedTypes);
499     } else {
500       for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
501            i != e; ++i) {
502         assert(!i->isBitField() &&
503                "Cannot expand structure with bit-field members.");
504         GetExpandedTypes(i->getType(), expandedTypes);
505       }
506     }
507   } else if (const ComplexType *CT = type->getAs<ComplexType>()) {
508     llvm::Type *EltTy = ConvertType(CT->getElementType());
509     expandedTypes.push_back(EltTy);
510     expandedTypes.push_back(EltTy);
511   } else
512     expandedTypes.push_back(ConvertType(type));
513 }
514 
515 llvm::Function::arg_iterator
516 CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
517                                     llvm::Function::arg_iterator AI) {
518   assert(LV.isSimple() &&
519          "Unexpected non-simple lvalue during struct expansion.");
520 
521   if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
522     unsigned NumElts = AT->getSize().getZExtValue();
523     QualType EltTy = AT->getElementType();
524     for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
525       llvm::Value *EltAddr = Builder.CreateConstGEP2_32(LV.getAddress(), 0, Elt);
526       LValue LV = MakeAddrLValue(EltAddr, EltTy);
527       AI = ExpandTypeFromArgs(EltTy, LV, AI);
528     }
529   } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
530     RecordDecl *RD = RT->getDecl();
531     if (RD->isUnion()) {
532       // Unions can be here only in degenerative cases - all the fields are same
533       // after flattening. Thus we have to use the "largest" field.
534       const FieldDecl *LargestFD = 0;
535       CharUnits UnionSize = CharUnits::Zero();
536 
537       for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
538            i != e; ++i) {
539         const FieldDecl *FD = *i;
540         assert(!FD->isBitField() &&
541                "Cannot expand structure with bit-field members.");
542         CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
543         if (UnionSize < FieldSize) {
544           UnionSize = FieldSize;
545           LargestFD = FD;
546         }
547       }
548       if (LargestFD) {
549         // FIXME: What are the right qualifiers here?
550         LValue SubLV = EmitLValueForField(LV, LargestFD);
551         AI = ExpandTypeFromArgs(LargestFD->getType(), SubLV, AI);
552       }
553     } else {
554       for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
555            i != e; ++i) {
556         FieldDecl *FD = *i;
557         QualType FT = FD->getType();
558 
559         // FIXME: What are the right qualifiers here?
560         LValue SubLV = EmitLValueForField(LV, FD);
561         AI = ExpandTypeFromArgs(FT, SubLV, AI);
562       }
563     }
564   } else if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
565     QualType EltTy = CT->getElementType();
566     llvm::Value *RealAddr = Builder.CreateStructGEP(LV.getAddress(), 0, "real");
567     EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(RealAddr, EltTy));
568     llvm::Value *ImagAddr = Builder.CreateStructGEP(LV.getAddress(), 1, "imag");
569     EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(ImagAddr, EltTy));
570   } else {
571     EmitStoreThroughLValue(RValue::get(AI), LV);
572     ++AI;
573   }
574 
575   return AI;
576 }
577 
578 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
579 /// accessing some number of bytes out of it, try to gep into the struct to get
580 /// at its inner goodness.  Dive as deep as possible without entering an element
581 /// with an in-memory size smaller than DstSize.
582 static llvm::Value *
583 EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
584                                    llvm::StructType *SrcSTy,
585                                    uint64_t DstSize, CodeGenFunction &CGF) {
586   // We can't dive into a zero-element struct.
587   if (SrcSTy->getNumElements() == 0) return SrcPtr;
588 
589   llvm::Type *FirstElt = SrcSTy->getElementType(0);
590 
591   // If the first elt is at least as large as what we're looking for, or if the
592   // first element is the same size as the whole struct, we can enter it.
593   uint64_t FirstEltSize =
594     CGF.CGM.getDataLayout().getTypeAllocSize(FirstElt);
595   if (FirstEltSize < DstSize &&
596       FirstEltSize < CGF.CGM.getDataLayout().getTypeAllocSize(SrcSTy))
597     return SrcPtr;
598 
599   // GEP into the first element.
600   SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive");
601 
602   // If the first element is a struct, recurse.
603   llvm::Type *SrcTy =
604     cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
605   if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
606     return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
607 
608   return SrcPtr;
609 }
610 
611 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
612 /// are either integers or pointers.  This does a truncation of the value if it
613 /// is too large or a zero extension if it is too small.
614 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
615                                              llvm::Type *Ty,
616                                              CodeGenFunction &CGF) {
617   if (Val->getType() == Ty)
618     return Val;
619 
620   if (isa<llvm::PointerType>(Val->getType())) {
621     // If this is Pointer->Pointer avoid conversion to and from int.
622     if (isa<llvm::PointerType>(Ty))
623       return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
624 
625     // Convert the pointer to an integer so we can play with its width.
626     Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
627   }
628 
629   llvm::Type *DestIntTy = Ty;
630   if (isa<llvm::PointerType>(DestIntTy))
631     DestIntTy = CGF.IntPtrTy;
632 
633   if (Val->getType() != DestIntTy)
634     Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
635 
636   if (isa<llvm::PointerType>(Ty))
637     Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
638   return Val;
639 }
640 
641 
642 
643 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
644 /// a pointer to an object of type \arg Ty.
645 ///
646 /// This safely handles the case when the src type is smaller than the
647 /// destination type; in this situation the values of bits which not
648 /// present in the src are undefined.
649 static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
650                                       llvm::Type *Ty,
651                                       CodeGenFunction &CGF) {
652   llvm::Type *SrcTy =
653     cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
654 
655   // If SrcTy and Ty are the same, just do a load.
656   if (SrcTy == Ty)
657     return CGF.Builder.CreateLoad(SrcPtr);
658 
659   uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
660 
661   if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
662     SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
663     SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
664   }
665 
666   uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
667 
668   // If the source and destination are integer or pointer types, just do an
669   // extension or truncation to the desired type.
670   if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
671       (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
672     llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr);
673     return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
674   }
675 
676   // If load is legal, just bitcast the src pointer.
677   if (SrcSize >= DstSize) {
678     // Generally SrcSize is never greater than DstSize, since this means we are
679     // losing bits. However, this can happen in cases where the structure has
680     // additional padding, for example due to a user specified alignment.
681     //
682     // FIXME: Assert that we aren't truncating non-padding bits when have access
683     // to that information.
684     llvm::Value *Casted =
685       CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
686     llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
687     // FIXME: Use better alignment / avoid requiring aligned load.
688     Load->setAlignment(1);
689     return Load;
690   }
691 
692   // Otherwise do coercion through memory. This is stupid, but
693   // simple.
694   llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
695   llvm::Value *Casted =
696     CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
697   llvm::StoreInst *Store =
698     CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
699   // FIXME: Use better alignment / avoid requiring aligned store.
700   Store->setAlignment(1);
701   return CGF.Builder.CreateLoad(Tmp);
702 }
703 
704 // Function to store a first-class aggregate into memory.  We prefer to
705 // store the elements rather than the aggregate to be more friendly to
706 // fast-isel.
707 // FIXME: Do we need to recurse here?
708 static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
709                           llvm::Value *DestPtr, bool DestIsVolatile,
710                           bool LowAlignment) {
711   // Prefer scalar stores to first-class aggregate stores.
712   if (llvm::StructType *STy =
713         dyn_cast<llvm::StructType>(Val->getType())) {
714     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
715       llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(DestPtr, 0, i);
716       llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
717       llvm::StoreInst *SI = CGF.Builder.CreateStore(Elt, EltPtr,
718                                                     DestIsVolatile);
719       if (LowAlignment)
720         SI->setAlignment(1);
721     }
722   } else {
723     llvm::StoreInst *SI = CGF.Builder.CreateStore(Val, DestPtr, DestIsVolatile);
724     if (LowAlignment)
725       SI->setAlignment(1);
726   }
727 }
728 
729 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
730 /// where the source and destination may have different types.
731 ///
732 /// This safely handles the case when the src type is larger than the
733 /// destination type; the upper bits of the src will be lost.
734 static void CreateCoercedStore(llvm::Value *Src,
735                                llvm::Value *DstPtr,
736                                bool DstIsVolatile,
737                                CodeGenFunction &CGF) {
738   llvm::Type *SrcTy = Src->getType();
739   llvm::Type *DstTy =
740     cast<llvm::PointerType>(DstPtr->getType())->getElementType();
741   if (SrcTy == DstTy) {
742     CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
743     return;
744   }
745 
746   uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
747 
748   if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
749     DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF);
750     DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType();
751   }
752 
753   // If the source and destination are integer or pointer types, just do an
754   // extension or truncation to the desired type.
755   if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
756       (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
757     Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
758     CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
759     return;
760   }
761 
762   uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
763 
764   // If store is legal, just bitcast the src pointer.
765   if (SrcSize <= DstSize) {
766     llvm::Value *Casted =
767       CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
768     // FIXME: Use better alignment / avoid requiring aligned store.
769     BuildAggStore(CGF, Src, Casted, DstIsVolatile, true);
770   } else {
771     // Otherwise do coercion through memory. This is stupid, but
772     // simple.
773 
774     // Generally SrcSize is never greater than DstSize, since this means we are
775     // losing bits. However, this can happen in cases where the structure has
776     // additional padding, for example due to a user specified alignment.
777     //
778     // FIXME: Assert that we aren't truncating non-padding bits when have access
779     // to that information.
780     llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
781     CGF.Builder.CreateStore(Src, Tmp);
782     llvm::Value *Casted =
783       CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy));
784     llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
785     // FIXME: Use better alignment / avoid requiring aligned load.
786     Load->setAlignment(1);
787     CGF.Builder.CreateStore(Load, DstPtr, DstIsVolatile);
788   }
789 }
790 
791 /***/
792 
793 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
794   return FI.getReturnInfo().isIndirect();
795 }
796 
797 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
798   if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
799     switch (BT->getKind()) {
800     default:
801       return false;
802     case BuiltinType::Float:
803       return getContext().getTargetInfo().useObjCFPRetForRealType(TargetInfo::Float);
804     case BuiltinType::Double:
805       return getContext().getTargetInfo().useObjCFPRetForRealType(TargetInfo::Double);
806     case BuiltinType::LongDouble:
807       return getContext().getTargetInfo().useObjCFPRetForRealType(
808         TargetInfo::LongDouble);
809     }
810   }
811 
812   return false;
813 }
814 
815 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
816   if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
817     if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
818       if (BT->getKind() == BuiltinType::LongDouble)
819         return getContext().getTargetInfo().useObjCFP2RetForComplexLongDouble();
820     }
821   }
822 
823   return false;
824 }
825 
826 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
827   const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
828   return GetFunctionType(FI);
829 }
830 
831 llvm::FunctionType *
832 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
833 
834   bool Inserted = FunctionsBeingProcessed.insert(&FI); (void)Inserted;
835   assert(Inserted && "Recursively being processed?");
836 
837   SmallVector<llvm::Type*, 8> argTypes;
838   llvm::Type *resultType = 0;
839 
840   const ABIArgInfo &retAI = FI.getReturnInfo();
841   switch (retAI.getKind()) {
842   case ABIArgInfo::Expand:
843     llvm_unreachable("Invalid ABI kind for return argument");
844 
845   case ABIArgInfo::Extend:
846   case ABIArgInfo::Direct:
847     resultType = retAI.getCoerceToType();
848     break;
849 
850   case ABIArgInfo::Indirect: {
851     assert(!retAI.getIndirectAlign() && "Align unused on indirect return.");
852     resultType = llvm::Type::getVoidTy(getLLVMContext());
853 
854     QualType ret = FI.getReturnType();
855     llvm::Type *ty = ConvertType(ret);
856     unsigned addressSpace = Context.getTargetAddressSpace(ret);
857     argTypes.push_back(llvm::PointerType::get(ty, addressSpace));
858     break;
859   }
860 
861   case ABIArgInfo::Ignore:
862     resultType = llvm::Type::getVoidTy(getLLVMContext());
863     break;
864   }
865 
866   for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
867          ie = FI.arg_end(); it != ie; ++it) {
868     const ABIArgInfo &argAI = it->info;
869 
870     switch (argAI.getKind()) {
871     case ABIArgInfo::Ignore:
872       break;
873 
874     case ABIArgInfo::Indirect: {
875       // indirect arguments are always on the stack, which is addr space #0.
876       llvm::Type *LTy = ConvertTypeForMem(it->type);
877       argTypes.push_back(LTy->getPointerTo());
878       break;
879     }
880 
881     case ABIArgInfo::Extend:
882     case ABIArgInfo::Direct: {
883       // Insert a padding type to ensure proper alignment.
884       if (llvm::Type *PaddingType = argAI.getPaddingType())
885         argTypes.push_back(PaddingType);
886       // If the coerce-to type is a first class aggregate, flatten it.  Either
887       // way is semantically identical, but fast-isel and the optimizer
888       // generally likes scalar values better than FCAs.
889       llvm::Type *argType = argAI.getCoerceToType();
890       if (llvm::StructType *st = dyn_cast<llvm::StructType>(argType)) {
891         for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
892           argTypes.push_back(st->getElementType(i));
893       } else {
894         argTypes.push_back(argType);
895       }
896       break;
897     }
898 
899     case ABIArgInfo::Expand:
900       GetExpandedTypes(it->type, argTypes);
901       break;
902     }
903   }
904 
905   bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
906   assert(Erased && "Not in set?");
907 
908   return llvm::FunctionType::get(resultType, argTypes, FI.isVariadic());
909 }
910 
911 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
912   const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
913   const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
914 
915   if (!isFuncTypeConvertible(FPT))
916     return llvm::StructType::get(getLLVMContext());
917 
918   const CGFunctionInfo *Info;
919   if (isa<CXXDestructorDecl>(MD))
920     Info = &arrangeCXXDestructor(cast<CXXDestructorDecl>(MD), GD.getDtorType());
921   else
922     Info = &arrangeCXXMethodDeclaration(MD);
923   return GetFunctionType(*Info);
924 }
925 
926 void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
927                                            const Decl *TargetDecl,
928                                            AttributeListType &PAL,
929                                            unsigned &CallingConv) {
930   llvm::AttrBuilder FuncAttrs;
931   llvm::AttrBuilder RetAttrs;
932 
933   CallingConv = FI.getEffectiveCallingConvention();
934 
935   if (FI.isNoReturn())
936     FuncAttrs.addAttribute(llvm::Attributes::NoReturn);
937 
938   // FIXME: handle sseregparm someday...
939   if (TargetDecl) {
940     if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
941       FuncAttrs.addAttribute(llvm::Attributes::ReturnsTwice);
942     if (TargetDecl->hasAttr<NoThrowAttr>())
943       FuncAttrs.addAttribute(llvm::Attributes::NoUnwind);
944     else if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
945       const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>();
946       if (FPT && FPT->isNothrow(getContext()))
947         FuncAttrs.addAttribute(llvm::Attributes::NoUnwind);
948     }
949 
950     if (TargetDecl->hasAttr<NoReturnAttr>())
951       FuncAttrs.addAttribute(llvm::Attributes::NoReturn);
952 
953     if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
954       FuncAttrs.addAttribute(llvm::Attributes::ReturnsTwice);
955 
956     // 'const' and 'pure' attribute functions are also nounwind.
957     if (TargetDecl->hasAttr<ConstAttr>()) {
958       FuncAttrs.addAttribute(llvm::Attributes::ReadNone);
959       FuncAttrs.addAttribute(llvm::Attributes::NoUnwind);
960     } else if (TargetDecl->hasAttr<PureAttr>()) {
961       FuncAttrs.addAttribute(llvm::Attributes::ReadOnly);
962       FuncAttrs.addAttribute(llvm::Attributes::NoUnwind);
963     }
964     if (TargetDecl->hasAttr<MallocAttr>())
965       RetAttrs.addAttribute(llvm::Attributes::NoAlias);
966   }
967 
968   if (CodeGenOpts.OptimizeSize)
969     FuncAttrs.addAttribute(llvm::Attributes::OptimizeForSize);
970   if (CodeGenOpts.DisableRedZone)
971     FuncAttrs.addAttribute(llvm::Attributes::NoRedZone);
972   if (CodeGenOpts.NoImplicitFloat)
973     FuncAttrs.addAttribute(llvm::Attributes::NoImplicitFloat);
974 
975   QualType RetTy = FI.getReturnType();
976   unsigned Index = 1;
977   const ABIArgInfo &RetAI = FI.getReturnInfo();
978   switch (RetAI.getKind()) {
979   case ABIArgInfo::Extend:
980    if (RetTy->hasSignedIntegerRepresentation())
981      RetAttrs.addAttribute(llvm::Attributes::SExt);
982    else if (RetTy->hasUnsignedIntegerRepresentation())
983      RetAttrs.addAttribute(llvm::Attributes::ZExt);
984     break;
985   case ABIArgInfo::Direct:
986   case ABIArgInfo::Ignore:
987     break;
988 
989   case ABIArgInfo::Indirect: {
990     llvm::AttrBuilder SRETAttrs;
991     SRETAttrs.addAttribute(llvm::Attributes::StructRet);
992     if (RetAI.getInReg())
993       SRETAttrs.addAttribute(llvm::Attributes::InReg);
994     PAL.push_back(llvm::
995                   AttributeWithIndex::get(Index,
996                                          llvm::Attributes::get(getLLVMContext(),
997                                                                SRETAttrs)));
998 
999     ++Index;
1000     // sret disables readnone and readonly
1001     FuncAttrs.removeAttribute(llvm::Attributes::ReadOnly)
1002       .removeAttribute(llvm::Attributes::ReadNone);
1003     break;
1004   }
1005 
1006   case ABIArgInfo::Expand:
1007     llvm_unreachable("Invalid ABI kind for return argument");
1008   }
1009 
1010   if (RetAttrs.hasAttributes())
1011     PAL.push_back(llvm::
1012                   AttributeWithIndex::get(llvm::AttrListPtr::ReturnIndex,
1013                                          llvm::Attributes::get(getLLVMContext(),
1014                                                                RetAttrs)));
1015 
1016   for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1017          ie = FI.arg_end(); it != ie; ++it) {
1018     QualType ParamType = it->type;
1019     const ABIArgInfo &AI = it->info;
1020     llvm::AttrBuilder Attrs;
1021 
1022     // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
1023     // have the corresponding parameter variable.  It doesn't make
1024     // sense to do it here because parameters are so messed up.
1025     switch (AI.getKind()) {
1026     case ABIArgInfo::Extend:
1027       if (ParamType->isSignedIntegerOrEnumerationType())
1028         Attrs.addAttribute(llvm::Attributes::SExt);
1029       else if (ParamType->isUnsignedIntegerOrEnumerationType())
1030         Attrs.addAttribute(llvm::Attributes::ZExt);
1031       // FALL THROUGH
1032     case ABIArgInfo::Direct:
1033       if (AI.getInReg())
1034         Attrs.addAttribute(llvm::Attributes::InReg);
1035 
1036       // FIXME: handle sseregparm someday...
1037 
1038       // Increment Index if there is padding.
1039       Index += (AI.getPaddingType() != 0);
1040 
1041       if (llvm::StructType *STy =
1042           dyn_cast<llvm::StructType>(AI.getCoerceToType())) {
1043         unsigned Extra = STy->getNumElements()-1;  // 1 will be added below.
1044         if (Attrs.hasAttributes())
1045           for (unsigned I = 0; I < Extra; ++I)
1046             PAL.push_back(llvm::AttributeWithIndex::get(Index + I,
1047                                          llvm::Attributes::get(getLLVMContext(),
1048                                                                Attrs)));
1049         Index += Extra;
1050       }
1051       break;
1052 
1053     case ABIArgInfo::Indirect:
1054       if (AI.getInReg())
1055         Attrs.addAttribute(llvm::Attributes::InReg);
1056 
1057       if (AI.getIndirectByVal())
1058         Attrs.addAttribute(llvm::Attributes::ByVal);
1059 
1060       Attrs.addAlignmentAttr(AI.getIndirectAlign());
1061 
1062       // byval disables readnone and readonly.
1063       FuncAttrs.removeAttribute(llvm::Attributes::ReadOnly)
1064         .removeAttribute(llvm::Attributes::ReadNone);
1065       break;
1066 
1067     case ABIArgInfo::Ignore:
1068       // Skip increment, no matching LLVM parameter.
1069       continue;
1070 
1071     case ABIArgInfo::Expand: {
1072       SmallVector<llvm::Type*, 8> types;
1073       // FIXME: This is rather inefficient. Do we ever actually need to do
1074       // anything here? The result should be just reconstructed on the other
1075       // side, so extension should be a non-issue.
1076       getTypes().GetExpandedTypes(ParamType, types);
1077       Index += types.size();
1078       continue;
1079     }
1080     }
1081 
1082     if (Attrs.hasAttributes())
1083       PAL.push_back(llvm::AttributeWithIndex::get(Index,
1084                                          llvm::Attributes::get(getLLVMContext(),
1085                                                                Attrs)));
1086     ++Index;
1087   }
1088   if (FuncAttrs.hasAttributes())
1089     PAL.push_back(llvm::
1090                   AttributeWithIndex::get(llvm::AttrListPtr::FunctionIndex,
1091                                          llvm::Attributes::get(getLLVMContext(),
1092                                                                FuncAttrs)));
1093 }
1094 
1095 /// An argument came in as a promoted argument; demote it back to its
1096 /// declared type.
1097 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
1098                                          const VarDecl *var,
1099                                          llvm::Value *value) {
1100   llvm::Type *varType = CGF.ConvertType(var->getType());
1101 
1102   // This can happen with promotions that actually don't change the
1103   // underlying type, like the enum promotions.
1104   if (value->getType() == varType) return value;
1105 
1106   assert((varType->isIntegerTy() || varType->isFloatingPointTy())
1107          && "unexpected promotion type");
1108 
1109   if (isa<llvm::IntegerType>(varType))
1110     return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
1111 
1112   return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
1113 }
1114 
1115 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
1116                                          llvm::Function *Fn,
1117                                          const FunctionArgList &Args) {
1118   // If this is an implicit-return-zero function, go ahead and
1119   // initialize the return value.  TODO: it might be nice to have
1120   // a more general mechanism for this that didn't require synthesized
1121   // return statements.
1122   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
1123     if (FD->hasImplicitReturnZero()) {
1124       QualType RetTy = FD->getResultType().getUnqualifiedType();
1125       llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
1126       llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
1127       Builder.CreateStore(Zero, ReturnValue);
1128     }
1129   }
1130 
1131   // FIXME: We no longer need the types from FunctionArgList; lift up and
1132   // simplify.
1133 
1134   // Emit allocs for param decls.  Give the LLVM Argument nodes names.
1135   llvm::Function::arg_iterator AI = Fn->arg_begin();
1136 
1137   // Name the struct return argument.
1138   if (CGM.ReturnTypeUsesSRet(FI)) {
1139     AI->setName("agg.result");
1140     AI->addAttr(llvm::Attributes::get(getLLVMContext(),
1141                                       llvm::Attributes::NoAlias));
1142     ++AI;
1143   }
1144 
1145   assert(FI.arg_size() == Args.size() &&
1146          "Mismatch between function signature & arguments.");
1147   unsigned ArgNo = 1;
1148   CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
1149   for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
1150        i != e; ++i, ++info_it, ++ArgNo) {
1151     const VarDecl *Arg = *i;
1152     QualType Ty = info_it->type;
1153     const ABIArgInfo &ArgI = info_it->info;
1154 
1155     bool isPromoted =
1156       isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
1157 
1158     switch (ArgI.getKind()) {
1159     case ABIArgInfo::Indirect: {
1160       llvm::Value *V = AI;
1161 
1162       if (hasAggregateLLVMType(Ty)) {
1163         // Aggregates and complex variables are accessed by reference.  All we
1164         // need to do is realign the value, if requested
1165         if (ArgI.getIndirectRealign()) {
1166           llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce");
1167 
1168           // Copy from the incoming argument pointer to the temporary with the
1169           // appropriate alignment.
1170           //
1171           // FIXME: We should have a common utility for generating an aggregate
1172           // copy.
1173           llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
1174           CharUnits Size = getContext().getTypeSizeInChars(Ty);
1175           llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
1176           llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy);
1177           Builder.CreateMemCpy(Dst,
1178                                Src,
1179                                llvm::ConstantInt::get(IntPtrTy,
1180                                                       Size.getQuantity()),
1181                                ArgI.getIndirectAlign(),
1182                                false);
1183           V = AlignedTemp;
1184         }
1185       } else {
1186         // Load scalar value from indirect argument.
1187         CharUnits Alignment = getContext().getTypeAlignInChars(Ty);
1188         V = EmitLoadOfScalar(V, false, Alignment.getQuantity(), Ty);
1189 
1190         if (isPromoted)
1191           V = emitArgumentDemotion(*this, Arg, V);
1192       }
1193       EmitParmDecl(*Arg, V, ArgNo);
1194       break;
1195     }
1196 
1197     case ABIArgInfo::Extend:
1198     case ABIArgInfo::Direct: {
1199       // Skip the dummy padding argument.
1200       if (ArgI.getPaddingType())
1201         ++AI;
1202 
1203       // If we have the trivial case, handle it with no muss and fuss.
1204       if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
1205           ArgI.getCoerceToType() == ConvertType(Ty) &&
1206           ArgI.getDirectOffset() == 0) {
1207         assert(AI != Fn->arg_end() && "Argument mismatch!");
1208         llvm::Value *V = AI;
1209 
1210         if (Arg->getType().isRestrictQualified())
1211           AI->addAttr(llvm::Attributes::get(getLLVMContext(),
1212                                             llvm::Attributes::NoAlias));
1213 
1214         // Ensure the argument is the correct type.
1215         if (V->getType() != ArgI.getCoerceToType())
1216           V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
1217 
1218         if (isPromoted)
1219           V = emitArgumentDemotion(*this, Arg, V);
1220 
1221         EmitParmDecl(*Arg, V, ArgNo);
1222         break;
1223       }
1224 
1225       llvm::AllocaInst *Alloca = CreateMemTemp(Ty, Arg->getName());
1226 
1227       // The alignment we need to use is the max of the requested alignment for
1228       // the argument plus the alignment required by our access code below.
1229       unsigned AlignmentToUse =
1230         CGM.getDataLayout().getABITypeAlignment(ArgI.getCoerceToType());
1231       AlignmentToUse = std::max(AlignmentToUse,
1232                         (unsigned)getContext().getDeclAlign(Arg).getQuantity());
1233 
1234       Alloca->setAlignment(AlignmentToUse);
1235       llvm::Value *V = Alloca;
1236       llvm::Value *Ptr = V;    // Pointer to store into.
1237 
1238       // If the value is offset in memory, apply the offset now.
1239       if (unsigned Offs = ArgI.getDirectOffset()) {
1240         Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy());
1241         Ptr = Builder.CreateConstGEP1_32(Ptr, Offs);
1242         Ptr = Builder.CreateBitCast(Ptr,
1243                           llvm::PointerType::getUnqual(ArgI.getCoerceToType()));
1244       }
1245 
1246       // If the coerce-to type is a first class aggregate, we flatten it and
1247       // pass the elements. Either way is semantically identical, but fast-isel
1248       // and the optimizer generally likes scalar values better than FCAs.
1249       llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
1250       if (STy && STy->getNumElements() > 1) {
1251         uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
1252         llvm::Type *DstTy =
1253           cast<llvm::PointerType>(Ptr->getType())->getElementType();
1254         uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
1255 
1256         if (SrcSize <= DstSize) {
1257           Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
1258 
1259           for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1260             assert(AI != Fn->arg_end() && "Argument mismatch!");
1261             AI->setName(Arg->getName() + ".coerce" + Twine(i));
1262             llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i);
1263             Builder.CreateStore(AI++, EltPtr);
1264           }
1265         } else {
1266           llvm::AllocaInst *TempAlloca =
1267             CreateTempAlloca(ArgI.getCoerceToType(), "coerce");
1268           TempAlloca->setAlignment(AlignmentToUse);
1269           llvm::Value *TempV = TempAlloca;
1270 
1271           for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1272             assert(AI != Fn->arg_end() && "Argument mismatch!");
1273             AI->setName(Arg->getName() + ".coerce" + Twine(i));
1274             llvm::Value *EltPtr = Builder.CreateConstGEP2_32(TempV, 0, i);
1275             Builder.CreateStore(AI++, EltPtr);
1276           }
1277 
1278           Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse);
1279         }
1280       } else {
1281         // Simple case, just do a coerced store of the argument into the alloca.
1282         assert(AI != Fn->arg_end() && "Argument mismatch!");
1283         AI->setName(Arg->getName() + ".coerce");
1284         CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this);
1285       }
1286 
1287 
1288       // Match to what EmitParmDecl is expecting for this type.
1289       if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
1290         V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty);
1291         if (isPromoted)
1292           V = emitArgumentDemotion(*this, Arg, V);
1293       }
1294       EmitParmDecl(*Arg, V, ArgNo);
1295       continue;  // Skip ++AI increment, already done.
1296     }
1297 
1298     case ABIArgInfo::Expand: {
1299       // If this structure was expanded into multiple arguments then
1300       // we need to create a temporary and reconstruct it from the
1301       // arguments.
1302       llvm::AllocaInst *Alloca = CreateMemTemp(Ty);
1303       CharUnits Align = getContext().getDeclAlign(Arg);
1304       Alloca->setAlignment(Align.getQuantity());
1305       LValue LV = MakeAddrLValue(Alloca, Ty, Align);
1306       llvm::Function::arg_iterator End = ExpandTypeFromArgs(Ty, LV, AI);
1307       EmitParmDecl(*Arg, Alloca, ArgNo);
1308 
1309       // Name the arguments used in expansion and increment AI.
1310       unsigned Index = 0;
1311       for (; AI != End; ++AI, ++Index)
1312         AI->setName(Arg->getName() + "." + Twine(Index));
1313       continue;
1314     }
1315 
1316     case ABIArgInfo::Ignore:
1317       // Initialize the local variable appropriately.
1318       if (hasAggregateLLVMType(Ty))
1319         EmitParmDecl(*Arg, CreateMemTemp(Ty), ArgNo);
1320       else
1321         EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())),
1322                      ArgNo);
1323 
1324       // Skip increment, no matching LLVM parameter.
1325       continue;
1326     }
1327 
1328     ++AI;
1329   }
1330   assert(AI == Fn->arg_end() && "Argument mismatch!");
1331 }
1332 
1333 static void eraseUnusedBitCasts(llvm::Instruction *insn) {
1334   while (insn->use_empty()) {
1335     llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
1336     if (!bitcast) return;
1337 
1338     // This is "safe" because we would have used a ConstantExpr otherwise.
1339     insn = cast<llvm::Instruction>(bitcast->getOperand(0));
1340     bitcast->eraseFromParent();
1341   }
1342 }
1343 
1344 /// Try to emit a fused autorelease of a return result.
1345 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
1346                                                     llvm::Value *result) {
1347   // We must be immediately followed the cast.
1348   llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
1349   if (BB->empty()) return 0;
1350   if (&BB->back() != result) return 0;
1351 
1352   llvm::Type *resultType = result->getType();
1353 
1354   // result is in a BasicBlock and is therefore an Instruction.
1355   llvm::Instruction *generator = cast<llvm::Instruction>(result);
1356 
1357   SmallVector<llvm::Instruction*,4> insnsToKill;
1358 
1359   // Look for:
1360   //  %generator = bitcast %type1* %generator2 to %type2*
1361   while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
1362     // We would have emitted this as a constant if the operand weren't
1363     // an Instruction.
1364     generator = cast<llvm::Instruction>(bitcast->getOperand(0));
1365 
1366     // Require the generator to be immediately followed by the cast.
1367     if (generator->getNextNode() != bitcast)
1368       return 0;
1369 
1370     insnsToKill.push_back(bitcast);
1371   }
1372 
1373   // Look for:
1374   //   %generator = call i8* @objc_retain(i8* %originalResult)
1375   // or
1376   //   %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
1377   llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
1378   if (!call) return 0;
1379 
1380   bool doRetainAutorelease;
1381 
1382   if (call->getCalledValue() == CGF.CGM.getARCEntrypoints().objc_retain) {
1383     doRetainAutorelease = true;
1384   } else if (call->getCalledValue() == CGF.CGM.getARCEntrypoints()
1385                                           .objc_retainAutoreleasedReturnValue) {
1386     doRetainAutorelease = false;
1387 
1388     // If we emitted an assembly marker for this call (and the
1389     // ARCEntrypoints field should have been set if so), go looking
1390     // for that call.  If we can't find it, we can't do this
1391     // optimization.  But it should always be the immediately previous
1392     // instruction, unless we needed bitcasts around the call.
1393     if (CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker) {
1394       llvm::Instruction *prev = call->getPrevNode();
1395       assert(prev);
1396       if (isa<llvm::BitCastInst>(prev)) {
1397         prev = prev->getPrevNode();
1398         assert(prev);
1399       }
1400       assert(isa<llvm::CallInst>(prev));
1401       assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
1402                CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker);
1403       insnsToKill.push_back(prev);
1404     }
1405   } else {
1406     return 0;
1407   }
1408 
1409   result = call->getArgOperand(0);
1410   insnsToKill.push_back(call);
1411 
1412   // Keep killing bitcasts, for sanity.  Note that we no longer care
1413   // about precise ordering as long as there's exactly one use.
1414   while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
1415     if (!bitcast->hasOneUse()) break;
1416     insnsToKill.push_back(bitcast);
1417     result = bitcast->getOperand(0);
1418   }
1419 
1420   // Delete all the unnecessary instructions, from latest to earliest.
1421   for (SmallVectorImpl<llvm::Instruction*>::iterator
1422          i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i)
1423     (*i)->eraseFromParent();
1424 
1425   // Do the fused retain/autorelease if we were asked to.
1426   if (doRetainAutorelease)
1427     result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
1428 
1429   // Cast back to the result type.
1430   return CGF.Builder.CreateBitCast(result, resultType);
1431 }
1432 
1433 /// If this is a +1 of the value of an immutable 'self', remove it.
1434 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
1435                                           llvm::Value *result) {
1436   // This is only applicable to a method with an immutable 'self'.
1437   const ObjCMethodDecl *method =
1438     dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
1439   if (!method) return 0;
1440   const VarDecl *self = method->getSelfDecl();
1441   if (!self->getType().isConstQualified()) return 0;
1442 
1443   // Look for a retain call.
1444   llvm::CallInst *retainCall =
1445     dyn_cast<llvm::CallInst>(result->stripPointerCasts());
1446   if (!retainCall ||
1447       retainCall->getCalledValue() != CGF.CGM.getARCEntrypoints().objc_retain)
1448     return 0;
1449 
1450   // Look for an ordinary load of 'self'.
1451   llvm::Value *retainedValue = retainCall->getArgOperand(0);
1452   llvm::LoadInst *load =
1453     dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
1454   if (!load || load->isAtomic() || load->isVolatile() ||
1455       load->getPointerOperand() != CGF.GetAddrOfLocalVar(self))
1456     return 0;
1457 
1458   // Okay!  Burn it all down.  This relies for correctness on the
1459   // assumption that the retain is emitted as part of the return and
1460   // that thereafter everything is used "linearly".
1461   llvm::Type *resultType = result->getType();
1462   eraseUnusedBitCasts(cast<llvm::Instruction>(result));
1463   assert(retainCall->use_empty());
1464   retainCall->eraseFromParent();
1465   eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
1466 
1467   return CGF.Builder.CreateBitCast(load, resultType);
1468 }
1469 
1470 /// Emit an ARC autorelease of the result of a function.
1471 ///
1472 /// \return the value to actually return from the function
1473 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
1474                                             llvm::Value *result) {
1475   // If we're returning 'self', kill the initial retain.  This is a
1476   // heuristic attempt to "encourage correctness" in the really unfortunate
1477   // case where we have a return of self during a dealloc and we desperately
1478   // need to avoid the possible autorelease.
1479   if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
1480     return self;
1481 
1482   // At -O0, try to emit a fused retain/autorelease.
1483   if (CGF.shouldUseFusedARCCalls())
1484     if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
1485       return fused;
1486 
1487   return CGF.EmitARCAutoreleaseReturnValue(result);
1488 }
1489 
1490 /// Heuristically search for a dominating store to the return-value slot.
1491 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
1492   // If there are multiple uses of the return-value slot, just check
1493   // for something immediately preceding the IP.  Sometimes this can
1494   // happen with how we generate implicit-returns; it can also happen
1495   // with noreturn cleanups.
1496   if (!CGF.ReturnValue->hasOneUse()) {
1497     llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
1498     if (IP->empty()) return 0;
1499     llvm::StoreInst *store = dyn_cast<llvm::StoreInst>(&IP->back());
1500     if (!store) return 0;
1501     if (store->getPointerOperand() != CGF.ReturnValue) return 0;
1502     assert(!store->isAtomic() && !store->isVolatile()); // see below
1503     return store;
1504   }
1505 
1506   llvm::StoreInst *store =
1507     dyn_cast<llvm::StoreInst>(CGF.ReturnValue->use_back());
1508   if (!store) return 0;
1509 
1510   // These aren't actually possible for non-coerced returns, and we
1511   // only care about non-coerced returns on this code path.
1512   assert(!store->isAtomic() && !store->isVolatile());
1513 
1514   // Now do a first-and-dirty dominance check: just walk up the
1515   // single-predecessors chain from the current insertion point.
1516   llvm::BasicBlock *StoreBB = store->getParent();
1517   llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
1518   while (IP != StoreBB) {
1519     if (!(IP = IP->getSinglePredecessor()))
1520       return 0;
1521   }
1522 
1523   // Okay, the store's basic block dominates the insertion point; we
1524   // can do our thing.
1525   return store;
1526 }
1527 
1528 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
1529   // Functions with no result always return void.
1530   if (ReturnValue == 0) {
1531     Builder.CreateRetVoid();
1532     return;
1533   }
1534 
1535   llvm::DebugLoc RetDbgLoc;
1536   llvm::Value *RV = 0;
1537   QualType RetTy = FI.getReturnType();
1538   const ABIArgInfo &RetAI = FI.getReturnInfo();
1539 
1540   switch (RetAI.getKind()) {
1541   case ABIArgInfo::Indirect: {
1542     unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
1543     if (RetTy->isAnyComplexType()) {
1544       ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
1545       StoreComplexToAddr(RT, CurFn->arg_begin(), false);
1546     } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1547       // Do nothing; aggregrates get evaluated directly into the destination.
1548     } else {
1549       EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
1550                         false, Alignment, RetTy);
1551     }
1552     break;
1553   }
1554 
1555   case ABIArgInfo::Extend:
1556   case ABIArgInfo::Direct:
1557     if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
1558         RetAI.getDirectOffset() == 0) {
1559       // The internal return value temp always will have pointer-to-return-type
1560       // type, just do a load.
1561 
1562       // If there is a dominating store to ReturnValue, we can elide
1563       // the load, zap the store, and usually zap the alloca.
1564       if (llvm::StoreInst *SI = findDominatingStoreToReturnValue(*this)) {
1565         // Get the stored value and nuke the now-dead store.
1566         RetDbgLoc = SI->getDebugLoc();
1567         RV = SI->getValueOperand();
1568         SI->eraseFromParent();
1569 
1570         // If that was the only use of the return value, nuke it as well now.
1571         if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
1572           cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
1573           ReturnValue = 0;
1574         }
1575 
1576       // Otherwise, we have to do a simple load.
1577       } else {
1578         RV = Builder.CreateLoad(ReturnValue);
1579       }
1580     } else {
1581       llvm::Value *V = ReturnValue;
1582       // If the value is offset in memory, apply the offset now.
1583       if (unsigned Offs = RetAI.getDirectOffset()) {
1584         V = Builder.CreateBitCast(V, Builder.getInt8PtrTy());
1585         V = Builder.CreateConstGEP1_32(V, Offs);
1586         V = Builder.CreateBitCast(V,
1587                          llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
1588       }
1589 
1590       RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
1591     }
1592 
1593     // In ARC, end functions that return a retainable type with a call
1594     // to objc_autoreleaseReturnValue.
1595     if (AutoreleaseResult) {
1596       assert(getLangOpts().ObjCAutoRefCount &&
1597              !FI.isReturnsRetained() &&
1598              RetTy->isObjCRetainableType());
1599       RV = emitAutoreleaseOfResult(*this, RV);
1600     }
1601 
1602     break;
1603 
1604   case ABIArgInfo::Ignore:
1605     break;
1606 
1607   case ABIArgInfo::Expand:
1608     llvm_unreachable("Invalid ABI kind for return argument");
1609   }
1610 
1611   llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid();
1612   if (!RetDbgLoc.isUnknown())
1613     Ret->setDebugLoc(RetDbgLoc);
1614 }
1615 
1616 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
1617                                           const VarDecl *param) {
1618   // StartFunction converted the ABI-lowered parameter(s) into a
1619   // local alloca.  We need to turn that into an r-value suitable
1620   // for EmitCall.
1621   llvm::Value *local = GetAddrOfLocalVar(param);
1622 
1623   QualType type = param->getType();
1624 
1625   // For the most part, we just need to load the alloca, except:
1626   // 1) aggregate r-values are actually pointers to temporaries, and
1627   // 2) references to aggregates are pointers directly to the aggregate.
1628   // I don't know why references to non-aggregates are different here.
1629   if (const ReferenceType *ref = type->getAs<ReferenceType>()) {
1630     if (hasAggregateLLVMType(ref->getPointeeType()))
1631       return args.add(RValue::getAggregate(local), type);
1632 
1633     // Locals which are references to scalars are represented
1634     // with allocas holding the pointer.
1635     return args.add(RValue::get(Builder.CreateLoad(local)), type);
1636   }
1637 
1638   if (type->isAnyComplexType()) {
1639     ComplexPairTy complex = LoadComplexFromAddr(local, /*volatile*/ false);
1640     return args.add(RValue::getComplex(complex), type);
1641   }
1642 
1643   if (hasAggregateLLVMType(type))
1644     return args.add(RValue::getAggregate(local), type);
1645 
1646   unsigned alignment = getContext().getDeclAlign(param).getQuantity();
1647   llvm::Value *value = EmitLoadOfScalar(local, false, alignment, type);
1648   return args.add(RValue::get(value), type);
1649 }
1650 
1651 static bool isProvablyNull(llvm::Value *addr) {
1652   return isa<llvm::ConstantPointerNull>(addr);
1653 }
1654 
1655 static bool isProvablyNonNull(llvm::Value *addr) {
1656   return isa<llvm::AllocaInst>(addr);
1657 }
1658 
1659 /// Emit the actual writing-back of a writeback.
1660 static void emitWriteback(CodeGenFunction &CGF,
1661                           const CallArgList::Writeback &writeback) {
1662   llvm::Value *srcAddr = writeback.Address;
1663   assert(!isProvablyNull(srcAddr) &&
1664          "shouldn't have writeback for provably null argument");
1665 
1666   llvm::BasicBlock *contBB = 0;
1667 
1668   // If the argument wasn't provably non-null, we need to null check
1669   // before doing the store.
1670   bool provablyNonNull = isProvablyNonNull(srcAddr);
1671   if (!provablyNonNull) {
1672     llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
1673     contBB = CGF.createBasicBlock("icr.done");
1674 
1675     llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
1676     CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
1677     CGF.EmitBlock(writebackBB);
1678   }
1679 
1680   // Load the value to writeback.
1681   llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
1682 
1683   // Cast it back, in case we're writing an id to a Foo* or something.
1684   value = CGF.Builder.CreateBitCast(value,
1685                cast<llvm::PointerType>(srcAddr->getType())->getElementType(),
1686                             "icr.writeback-cast");
1687 
1688   // Perform the writeback.
1689   QualType srcAddrType = writeback.AddressType;
1690   CGF.EmitStoreThroughLValue(RValue::get(value),
1691                              CGF.MakeAddrLValue(srcAddr, srcAddrType));
1692 
1693   // Jump to the continuation block.
1694   if (!provablyNonNull)
1695     CGF.EmitBlock(contBB);
1696 }
1697 
1698 static void emitWritebacks(CodeGenFunction &CGF,
1699                            const CallArgList &args) {
1700   for (CallArgList::writeback_iterator
1701          i = args.writeback_begin(), e = args.writeback_end(); i != e; ++i)
1702     emitWriteback(CGF, *i);
1703 }
1704 
1705 /// Emit an argument that's being passed call-by-writeback.  That is,
1706 /// we are passing the address of
1707 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
1708                              const ObjCIndirectCopyRestoreExpr *CRE) {
1709   llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr());
1710 
1711   // The dest and src types don't necessarily match in LLVM terms
1712   // because of the crazy ObjC compatibility rules.
1713 
1714   llvm::PointerType *destType =
1715     cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
1716 
1717   // If the address is a constant null, just pass the appropriate null.
1718   if (isProvablyNull(srcAddr)) {
1719     args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
1720              CRE->getType());
1721     return;
1722   }
1723 
1724   QualType srcAddrType =
1725     CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
1726 
1727   // Create the temporary.
1728   llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(),
1729                                            "icr.temp");
1730 
1731   // Zero-initialize it if we're not doing a copy-initialization.
1732   bool shouldCopy = CRE->shouldCopy();
1733   if (!shouldCopy) {
1734     llvm::Value *null =
1735       llvm::ConstantPointerNull::get(
1736         cast<llvm::PointerType>(destType->getElementType()));
1737     CGF.Builder.CreateStore(null, temp);
1738   }
1739 
1740   llvm::BasicBlock *contBB = 0;
1741 
1742   // If the address is *not* known to be non-null, we need to switch.
1743   llvm::Value *finalArgument;
1744 
1745   bool provablyNonNull = isProvablyNonNull(srcAddr);
1746   if (provablyNonNull) {
1747     finalArgument = temp;
1748   } else {
1749     llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
1750 
1751     finalArgument = CGF.Builder.CreateSelect(isNull,
1752                                    llvm::ConstantPointerNull::get(destType),
1753                                              temp, "icr.argument");
1754 
1755     // If we need to copy, then the load has to be conditional, which
1756     // means we need control flow.
1757     if (shouldCopy) {
1758       contBB = CGF.createBasicBlock("icr.cont");
1759       llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
1760       CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
1761       CGF.EmitBlock(copyBB);
1762     }
1763   }
1764 
1765   // Perform a copy if necessary.
1766   if (shouldCopy) {
1767     LValue srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
1768     RValue srcRV = CGF.EmitLoadOfLValue(srcLV);
1769     assert(srcRV.isScalar());
1770 
1771     llvm::Value *src = srcRV.getScalarVal();
1772     src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
1773                                     "icr.cast");
1774 
1775     // Use an ordinary store, not a store-to-lvalue.
1776     CGF.Builder.CreateStore(src, temp);
1777   }
1778 
1779   // Finish the control flow if we needed it.
1780   if (shouldCopy && !provablyNonNull)
1781     CGF.EmitBlock(contBB);
1782 
1783   args.addWriteback(srcAddr, srcAddrType, temp);
1784   args.add(RValue::get(finalArgument), CRE->getType());
1785 }
1786 
1787 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
1788                                   QualType type) {
1789   if (const ObjCIndirectCopyRestoreExpr *CRE
1790         = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
1791     assert(getContext().getLangOpts().ObjCAutoRefCount);
1792     assert(getContext().hasSameType(E->getType(), type));
1793     return emitWritebackArg(*this, args, CRE);
1794   }
1795 
1796   assert(type->isReferenceType() == E->isGLValue() &&
1797          "reference binding to unmaterialized r-value!");
1798 
1799   if (E->isGLValue()) {
1800     assert(E->getObjectKind() == OK_Ordinary);
1801     return args.add(EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0),
1802                     type);
1803   }
1804 
1805   if (hasAggregateLLVMType(type) && !E->getType()->isAnyComplexType() &&
1806       isa<ImplicitCastExpr>(E) &&
1807       cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
1808     LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
1809     assert(L.isSimple());
1810     args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true);
1811     return;
1812   }
1813 
1814   args.add(EmitAnyExprToTemp(E), type);
1815 }
1816 
1817 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
1818 // optimizer it can aggressively ignore unwind edges.
1819 void
1820 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
1821   if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
1822       !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
1823     Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
1824                       CGM.getNoObjCARCExceptionsMetadata());
1825 }
1826 
1827 /// Emits a call or invoke instruction to the given function, depending
1828 /// on the current state of the EH stack.
1829 llvm::CallSite
1830 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
1831                                   ArrayRef<llvm::Value *> Args,
1832                                   const Twine &Name) {
1833   llvm::BasicBlock *InvokeDest = getInvokeDest();
1834 
1835   llvm::Instruction *Inst;
1836   if (!InvokeDest)
1837     Inst = Builder.CreateCall(Callee, Args, Name);
1838   else {
1839     llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
1840     Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, Name);
1841     EmitBlock(ContBB);
1842   }
1843 
1844   // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
1845   // optimizer it can aggressively ignore unwind edges.
1846   if (CGM.getLangOpts().ObjCAutoRefCount)
1847     AddObjCARCExceptionMetadata(Inst);
1848 
1849   return Inst;
1850 }
1851 
1852 llvm::CallSite
1853 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
1854                                   const Twine &Name) {
1855   return EmitCallOrInvoke(Callee, ArrayRef<llvm::Value *>(), Name);
1856 }
1857 
1858 static void checkArgMatches(llvm::Value *Elt, unsigned &ArgNo,
1859                             llvm::FunctionType *FTy) {
1860   if (ArgNo < FTy->getNumParams())
1861     assert(Elt->getType() == FTy->getParamType(ArgNo));
1862   else
1863     assert(FTy->isVarArg());
1864   ++ArgNo;
1865 }
1866 
1867 void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
1868                                        SmallVector<llvm::Value*,16> &Args,
1869                                        llvm::FunctionType *IRFuncTy) {
1870   if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
1871     unsigned NumElts = AT->getSize().getZExtValue();
1872     QualType EltTy = AT->getElementType();
1873     llvm::Value *Addr = RV.getAggregateAddr();
1874     for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
1875       llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt);
1876       LValue LV = MakeAddrLValue(EltAddr, EltTy);
1877       RValue EltRV;
1878       if (EltTy->isAnyComplexType())
1879         // FIXME: Volatile?
1880         EltRV = RValue::getComplex(LoadComplexFromAddr(LV.getAddress(), false));
1881       else if (CodeGenFunction::hasAggregateLLVMType(EltTy))
1882         EltRV = LV.asAggregateRValue();
1883       else
1884         EltRV = EmitLoadOfLValue(LV);
1885       ExpandTypeToArgs(EltTy, EltRV, Args, IRFuncTy);
1886     }
1887   } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
1888     RecordDecl *RD = RT->getDecl();
1889     assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
1890     LValue LV = MakeAddrLValue(RV.getAggregateAddr(), Ty);
1891 
1892     if (RD->isUnion()) {
1893       const FieldDecl *LargestFD = 0;
1894       CharUnits UnionSize = CharUnits::Zero();
1895 
1896       for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
1897            i != e; ++i) {
1898         const FieldDecl *FD = *i;
1899         assert(!FD->isBitField() &&
1900                "Cannot expand structure with bit-field members.");
1901         CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
1902         if (UnionSize < FieldSize) {
1903           UnionSize = FieldSize;
1904           LargestFD = FD;
1905         }
1906       }
1907       if (LargestFD) {
1908         RValue FldRV = EmitRValueForField(LV, LargestFD);
1909         ExpandTypeToArgs(LargestFD->getType(), FldRV, Args, IRFuncTy);
1910       }
1911     } else {
1912       for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
1913            i != e; ++i) {
1914         FieldDecl *FD = *i;
1915 
1916         RValue FldRV = EmitRValueForField(LV, FD);
1917         ExpandTypeToArgs(FD->getType(), FldRV, Args, IRFuncTy);
1918       }
1919     }
1920   } else if (Ty->isAnyComplexType()) {
1921     ComplexPairTy CV = RV.getComplexVal();
1922     Args.push_back(CV.first);
1923     Args.push_back(CV.second);
1924   } else {
1925     assert(RV.isScalar() &&
1926            "Unexpected non-scalar rvalue during struct expansion.");
1927 
1928     // Insert a bitcast as needed.
1929     llvm::Value *V = RV.getScalarVal();
1930     if (Args.size() < IRFuncTy->getNumParams() &&
1931         V->getType() != IRFuncTy->getParamType(Args.size()))
1932       V = Builder.CreateBitCast(V, IRFuncTy->getParamType(Args.size()));
1933 
1934     Args.push_back(V);
1935   }
1936 }
1937 
1938 
1939 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
1940                                  llvm::Value *Callee,
1941                                  ReturnValueSlot ReturnValue,
1942                                  const CallArgList &CallArgs,
1943                                  const Decl *TargetDecl,
1944                                  llvm::Instruction **callOrInvoke) {
1945   // FIXME: We no longer need the types from CallArgs; lift up and simplify.
1946   SmallVector<llvm::Value*, 16> Args;
1947 
1948   // Handle struct-return functions by passing a pointer to the
1949   // location that we would like to return into.
1950   QualType RetTy = CallInfo.getReturnType();
1951   const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
1952 
1953   // IRArgNo - Keep track of the argument number in the callee we're looking at.
1954   unsigned IRArgNo = 0;
1955   llvm::FunctionType *IRFuncTy =
1956     cast<llvm::FunctionType>(
1957                   cast<llvm::PointerType>(Callee->getType())->getElementType());
1958 
1959   // If the call returns a temporary with struct return, create a temporary
1960   // alloca to hold the result, unless one is given to us.
1961   if (CGM.ReturnTypeUsesSRet(CallInfo)) {
1962     llvm::Value *Value = ReturnValue.getValue();
1963     if (!Value)
1964       Value = CreateMemTemp(RetTy);
1965     Args.push_back(Value);
1966     checkArgMatches(Value, IRArgNo, IRFuncTy);
1967   }
1968 
1969   assert(CallInfo.arg_size() == CallArgs.size() &&
1970          "Mismatch between function signature & arguments.");
1971   CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
1972   for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
1973        I != E; ++I, ++info_it) {
1974     const ABIArgInfo &ArgInfo = info_it->info;
1975     RValue RV = I->RV;
1976 
1977     unsigned TypeAlign =
1978       getContext().getTypeAlignInChars(I->Ty).getQuantity();
1979     switch (ArgInfo.getKind()) {
1980     case ABIArgInfo::Indirect: {
1981       if (RV.isScalar() || RV.isComplex()) {
1982         // Make a temporary alloca to pass the argument.
1983         llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
1984         if (ArgInfo.getIndirectAlign() > AI->getAlignment())
1985           AI->setAlignment(ArgInfo.getIndirectAlign());
1986         Args.push_back(AI);
1987 
1988         if (RV.isScalar())
1989           EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false,
1990                             TypeAlign, I->Ty);
1991         else
1992           StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
1993 
1994         // Validate argument match.
1995         checkArgMatches(AI, IRArgNo, IRFuncTy);
1996       } else {
1997         // We want to avoid creating an unnecessary temporary+copy here;
1998         // however, we need one in two cases:
1999         // 1. If the argument is not byval, and we are required to copy the
2000         //    source.  (This case doesn't occur on any common architecture.)
2001         // 2. If the argument is byval, RV is not sufficiently aligned, and
2002         //    we cannot force it to be sufficiently aligned.
2003         llvm::Value *Addr = RV.getAggregateAddr();
2004         unsigned Align = ArgInfo.getIndirectAlign();
2005         const llvm::DataLayout *TD = &CGM.getDataLayout();
2006         if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
2007             (ArgInfo.getIndirectByVal() && TypeAlign < Align &&
2008              llvm::getOrEnforceKnownAlignment(Addr, Align, TD) < Align)) {
2009           // Create an aligned temporary, and copy to it.
2010           llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
2011           if (Align > AI->getAlignment())
2012             AI->setAlignment(Align);
2013           Args.push_back(AI);
2014           EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
2015 
2016           // Validate argument match.
2017           checkArgMatches(AI, IRArgNo, IRFuncTy);
2018         } else {
2019           // Skip the extra memcpy call.
2020           Args.push_back(Addr);
2021 
2022           // Validate argument match.
2023           checkArgMatches(Addr, IRArgNo, IRFuncTy);
2024         }
2025       }
2026       break;
2027     }
2028 
2029     case ABIArgInfo::Ignore:
2030       break;
2031 
2032     case ABIArgInfo::Extend:
2033     case ABIArgInfo::Direct: {
2034       // Insert a padding argument to ensure proper alignment.
2035       if (llvm::Type *PaddingType = ArgInfo.getPaddingType()) {
2036         Args.push_back(llvm::UndefValue::get(PaddingType));
2037         ++IRArgNo;
2038       }
2039 
2040       if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
2041           ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
2042           ArgInfo.getDirectOffset() == 0) {
2043         llvm::Value *V;
2044         if (RV.isScalar())
2045           V = RV.getScalarVal();
2046         else
2047           V = Builder.CreateLoad(RV.getAggregateAddr());
2048 
2049         // If the argument doesn't match, perform a bitcast to coerce it.  This
2050         // can happen due to trivial type mismatches.
2051         if (IRArgNo < IRFuncTy->getNumParams() &&
2052             V->getType() != IRFuncTy->getParamType(IRArgNo))
2053           V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRArgNo));
2054         Args.push_back(V);
2055 
2056         checkArgMatches(V, IRArgNo, IRFuncTy);
2057         break;
2058       }
2059 
2060       // FIXME: Avoid the conversion through memory if possible.
2061       llvm::Value *SrcPtr;
2062       if (RV.isScalar()) {
2063         SrcPtr = CreateMemTemp(I->Ty, "coerce");
2064         EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, TypeAlign, I->Ty);
2065       } else if (RV.isComplex()) {
2066         SrcPtr = CreateMemTemp(I->Ty, "coerce");
2067         StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
2068       } else
2069         SrcPtr = RV.getAggregateAddr();
2070 
2071       // If the value is offset in memory, apply the offset now.
2072       if (unsigned Offs = ArgInfo.getDirectOffset()) {
2073         SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy());
2074         SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs);
2075         SrcPtr = Builder.CreateBitCast(SrcPtr,
2076                        llvm::PointerType::getUnqual(ArgInfo.getCoerceToType()));
2077 
2078       }
2079 
2080       // If the coerce-to type is a first class aggregate, we flatten it and
2081       // pass the elements. Either way is semantically identical, but fast-isel
2082       // and the optimizer generally likes scalar values better than FCAs.
2083       if (llvm::StructType *STy =
2084             dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) {
2085         llvm::Type *SrcTy =
2086           cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
2087         uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
2088         uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
2089 
2090         // If the source type is smaller than the destination type of the
2091         // coerce-to logic, copy the source value into a temp alloca the size
2092         // of the destination type to allow loading all of it. The bits past
2093         // the source value are left undef.
2094         if (SrcSize < DstSize) {
2095           llvm::AllocaInst *TempAlloca
2096             = CreateTempAlloca(STy, SrcPtr->getName() + ".coerce");
2097           Builder.CreateMemCpy(TempAlloca, SrcPtr, SrcSize, 0);
2098           SrcPtr = TempAlloca;
2099         } else {
2100           SrcPtr = Builder.CreateBitCast(SrcPtr,
2101                                          llvm::PointerType::getUnqual(STy));
2102         }
2103 
2104         for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2105           llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i);
2106           llvm::LoadInst *LI = Builder.CreateLoad(EltPtr);
2107           // We don't know what we're loading from.
2108           LI->setAlignment(1);
2109           Args.push_back(LI);
2110 
2111           // Validate argument match.
2112           checkArgMatches(LI, IRArgNo, IRFuncTy);
2113         }
2114       } else {
2115         // In the simple case, just pass the coerced loaded value.
2116         Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
2117                                          *this));
2118 
2119         // Validate argument match.
2120         checkArgMatches(Args.back(), IRArgNo, IRFuncTy);
2121       }
2122 
2123       break;
2124     }
2125 
2126     case ABIArgInfo::Expand:
2127       ExpandTypeToArgs(I->Ty, RV, Args, IRFuncTy);
2128       IRArgNo = Args.size();
2129       break;
2130     }
2131   }
2132 
2133   // If the callee is a bitcast of a function to a varargs pointer to function
2134   // type, check to see if we can remove the bitcast.  This handles some cases
2135   // with unprototyped functions.
2136   if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
2137     if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
2138       llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
2139       llvm::FunctionType *CurFT =
2140         cast<llvm::FunctionType>(CurPT->getElementType());
2141       llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
2142 
2143       if (CE->getOpcode() == llvm::Instruction::BitCast &&
2144           ActualFT->getReturnType() == CurFT->getReturnType() &&
2145           ActualFT->getNumParams() == CurFT->getNumParams() &&
2146           ActualFT->getNumParams() == Args.size() &&
2147           (CurFT->isVarArg() || !ActualFT->isVarArg())) {
2148         bool ArgsMatch = true;
2149         for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
2150           if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
2151             ArgsMatch = false;
2152             break;
2153           }
2154 
2155         // Strip the cast if we can get away with it.  This is a nice cleanup,
2156         // but also allows us to inline the function at -O0 if it is marked
2157         // always_inline.
2158         if (ArgsMatch)
2159           Callee = CalleeF;
2160       }
2161     }
2162 
2163   unsigned CallingConv;
2164   CodeGen::AttributeListType AttributeList;
2165   CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv);
2166   llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList);
2167 
2168   llvm::BasicBlock *InvokeDest = 0;
2169   if (!Attrs.getFnAttributes().hasAttribute(llvm::Attributes::NoUnwind))
2170     InvokeDest = getInvokeDest();
2171 
2172   llvm::CallSite CS;
2173   if (!InvokeDest) {
2174     CS = Builder.CreateCall(Callee, Args);
2175   } else {
2176     llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
2177     CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, Args);
2178     EmitBlock(Cont);
2179   }
2180   if (callOrInvoke)
2181     *callOrInvoke = CS.getInstruction();
2182 
2183   CS.setAttributes(Attrs);
2184   CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
2185 
2186   // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
2187   // optimizer it can aggressively ignore unwind edges.
2188   if (CGM.getLangOpts().ObjCAutoRefCount)
2189     AddObjCARCExceptionMetadata(CS.getInstruction());
2190 
2191   // If the call doesn't return, finish the basic block and clear the
2192   // insertion point; this allows the rest of IRgen to discard
2193   // unreachable code.
2194   if (CS.doesNotReturn()) {
2195     Builder.CreateUnreachable();
2196     Builder.ClearInsertionPoint();
2197 
2198     // FIXME: For now, emit a dummy basic block because expr emitters in
2199     // generally are not ready to handle emitting expressions at unreachable
2200     // points.
2201     EnsureInsertPoint();
2202 
2203     // Return a reasonable RValue.
2204     return GetUndefRValue(RetTy);
2205   }
2206 
2207   llvm::Instruction *CI = CS.getInstruction();
2208   if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
2209     CI->setName("call");
2210 
2211   // Emit any writebacks immediately.  Arguably this should happen
2212   // after any return-value munging.
2213   if (CallArgs.hasWritebacks())
2214     emitWritebacks(*this, CallArgs);
2215 
2216   switch (RetAI.getKind()) {
2217   case ABIArgInfo::Indirect: {
2218     unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
2219     if (RetTy->isAnyComplexType())
2220       return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
2221     if (CodeGenFunction::hasAggregateLLVMType(RetTy))
2222       return RValue::getAggregate(Args[0]);
2223     return RValue::get(EmitLoadOfScalar(Args[0], false, Alignment, RetTy));
2224   }
2225 
2226   case ABIArgInfo::Ignore:
2227     // If we are ignoring an argument that had a result, make sure to
2228     // construct the appropriate return value for our caller.
2229     return GetUndefRValue(RetTy);
2230 
2231   case ABIArgInfo::Extend:
2232   case ABIArgInfo::Direct: {
2233     llvm::Type *RetIRTy = ConvertType(RetTy);
2234     if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
2235       if (RetTy->isAnyComplexType()) {
2236         llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
2237         llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
2238         return RValue::getComplex(std::make_pair(Real, Imag));
2239       }
2240       if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
2241         llvm::Value *DestPtr = ReturnValue.getValue();
2242         bool DestIsVolatile = ReturnValue.isVolatile();
2243 
2244         if (!DestPtr) {
2245           DestPtr = CreateMemTemp(RetTy, "agg.tmp");
2246           DestIsVolatile = false;
2247         }
2248         BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false);
2249         return RValue::getAggregate(DestPtr);
2250       }
2251 
2252       // If the argument doesn't match, perform a bitcast to coerce it.  This
2253       // can happen due to trivial type mismatches.
2254       llvm::Value *V = CI;
2255       if (V->getType() != RetIRTy)
2256         V = Builder.CreateBitCast(V, RetIRTy);
2257       return RValue::get(V);
2258     }
2259 
2260     llvm::Value *DestPtr = ReturnValue.getValue();
2261     bool DestIsVolatile = ReturnValue.isVolatile();
2262 
2263     if (!DestPtr) {
2264       DestPtr = CreateMemTemp(RetTy, "coerce");
2265       DestIsVolatile = false;
2266     }
2267 
2268     // If the value is offset in memory, apply the offset now.
2269     llvm::Value *StorePtr = DestPtr;
2270     if (unsigned Offs = RetAI.getDirectOffset()) {
2271       StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy());
2272       StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs);
2273       StorePtr = Builder.CreateBitCast(StorePtr,
2274                          llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
2275     }
2276     CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
2277 
2278     unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
2279     if (RetTy->isAnyComplexType())
2280       return RValue::getComplex(LoadComplexFromAddr(DestPtr, false));
2281     if (CodeGenFunction::hasAggregateLLVMType(RetTy))
2282       return RValue::getAggregate(DestPtr);
2283     return RValue::get(EmitLoadOfScalar(DestPtr, false, Alignment, RetTy));
2284   }
2285 
2286   case ABIArgInfo::Expand:
2287     llvm_unreachable("Invalid ABI kind for return argument");
2288   }
2289 
2290   llvm_unreachable("Unhandled ABIArgInfo::Kind");
2291 }
2292 
2293 /* VarArg handling */
2294 
2295 llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
2296   return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
2297 }
2298