1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "CGCall.h"
16 #include "ABIInfo.h"
17 #include "CGBlocks.h"
18 #include "CGCXXABI.h"
19 #include "CGCleanup.h"
20 #include "CodeGenFunction.h"
21 #include "CodeGenModule.h"
22 #include "TargetInfo.h"
23 #include "clang/AST/Decl.h"
24 #include "clang/AST/DeclCXX.h"
25 #include "clang/AST/DeclObjC.h"
26 #include "clang/Basic/TargetBuiltins.h"
27 #include "clang/Basic/TargetInfo.h"
28 #include "clang/CodeGen/CGFunctionInfo.h"
29 #include "clang/CodeGen/SwiftCallingConv.h"
30 #include "clang/Frontend/CodeGenOptions.h"
31 #include "llvm/ADT/StringExtras.h"
32 #include "llvm/IR/Attributes.h"
33 #include "llvm/IR/CallSite.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/InlineAsm.h"
36 #include "llvm/IR/Intrinsics.h"
37 #include "llvm/IR/IntrinsicInst.h"
38 #include "llvm/Transforms/Utils/Local.h"
39 using namespace clang;
40 using namespace CodeGen;
41 
42 /***/
43 
44 static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
45   switch (CC) {
46   default: return llvm::CallingConv::C;
47   case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
48   case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
49   case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
50   case CC_X86_64Win64: return llvm::CallingConv::X86_64_Win64;
51   case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
52   case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
53   case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
54   case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
55   // TODO: Add support for __pascal to LLVM.
56   case CC_X86Pascal: return llvm::CallingConv::C;
57   // TODO: Add support for __vectorcall to LLVM.
58   case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
59   case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
60   case CC_SpirKernel: return llvm::CallingConv::SPIR_KERNEL;
61   case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
62   case CC_PreserveAll: return llvm::CallingConv::PreserveAll;
63   case CC_Swift: return llvm::CallingConv::Swift;
64   }
65 }
66 
67 /// Derives the 'this' type for codegen purposes, i.e. ignoring method
68 /// qualification.
69 /// FIXME: address space qualification?
70 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
71   QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
72   return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
73 }
74 
75 /// Returns the canonical formal type of the given C++ method.
76 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
77   return MD->getType()->getCanonicalTypeUnqualified()
78            .getAs<FunctionProtoType>();
79 }
80 
81 /// Returns the "extra-canonicalized" return type, which discards
82 /// qualifiers on the return type.  Codegen doesn't care about them,
83 /// and it makes ABI code a little easier to be able to assume that
84 /// all parameter and return types are top-level unqualified.
85 static CanQualType GetReturnType(QualType RetTy) {
86   return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
87 }
88 
89 /// Arrange the argument and result information for a value of the given
90 /// unprototyped freestanding function type.
91 const CGFunctionInfo &
92 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
93   // When translating an unprototyped function type, always use a
94   // variadic type.
95   return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
96                                  /*instanceMethod=*/false,
97                                  /*chainCall=*/false, None,
98                                  FTNP->getExtInfo(), {}, RequiredArgs(0));
99 }
100 
101 /// Adds the formal paramaters in FPT to the given prefix. If any parameter in
102 /// FPT has pass_object_size attrs, then we'll add parameters for those, too.
103 static void appendParameterTypes(const CodeGenTypes &CGT,
104                                  SmallVectorImpl<CanQualType> &prefix,
105               SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
106                                  CanQual<FunctionProtoType> FPT,
107                                  const FunctionDecl *FD) {
108   // Fill out paramInfos.
109   if (FPT->hasExtParameterInfos() || !paramInfos.empty()) {
110     assert(paramInfos.size() <= prefix.size());
111     auto protoParamInfos = FPT->getExtParameterInfos();
112     paramInfos.reserve(prefix.size() + protoParamInfos.size());
113     paramInfos.resize(prefix.size());
114     paramInfos.append(protoParamInfos.begin(), protoParamInfos.end());
115   }
116 
117   // Fast path: unknown target.
118   if (FD == nullptr) {
119     prefix.append(FPT->param_type_begin(), FPT->param_type_end());
120     return;
121   }
122 
123   // In the vast majority cases, we'll have precisely FPT->getNumParams()
124   // parameters; the only thing that can change this is the presence of
125   // pass_object_size. So, we preallocate for the common case.
126   prefix.reserve(prefix.size() + FPT->getNumParams());
127 
128   assert(FD->getNumParams() == FPT->getNumParams());
129   for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
130     prefix.push_back(FPT->getParamType(I));
131     if (FD->getParamDecl(I)->hasAttr<PassObjectSizeAttr>())
132       prefix.push_back(CGT.getContext().getSizeType());
133   }
134 }
135 
136 /// Arrange the LLVM function layout for a value of the given function
137 /// type, on top of any implicit parameters already stored.
138 static const CGFunctionInfo &
139 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
140                         SmallVectorImpl<CanQualType> &prefix,
141                         CanQual<FunctionProtoType> FTP,
142                         const FunctionDecl *FD) {
143   SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
144   RequiredArgs Required =
145       RequiredArgs::forPrototypePlus(FTP, prefix.size(), FD);
146   // FIXME: Kill copy.
147   appendParameterTypes(CGT, prefix, paramInfos, FTP, FD);
148   CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
149 
150   return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
151                                      /*chainCall=*/false, prefix,
152                                      FTP->getExtInfo(), paramInfos,
153                                      Required);
154 }
155 
156 /// Arrange the argument and result information for a value of the
157 /// given freestanding function type.
158 const CGFunctionInfo &
159 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP,
160                                       const FunctionDecl *FD) {
161   SmallVector<CanQualType, 16> argTypes;
162   return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
163                                    FTP, FD);
164 }
165 
166 static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
167   // Set the appropriate calling convention for the Function.
168   if (D->hasAttr<StdCallAttr>())
169     return CC_X86StdCall;
170 
171   if (D->hasAttr<FastCallAttr>())
172     return CC_X86FastCall;
173 
174   if (D->hasAttr<ThisCallAttr>())
175     return CC_X86ThisCall;
176 
177   if (D->hasAttr<VectorCallAttr>())
178     return CC_X86VectorCall;
179 
180   if (D->hasAttr<PascalAttr>())
181     return CC_X86Pascal;
182 
183   if (PcsAttr *PCS = D->getAttr<PcsAttr>())
184     return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
185 
186   if (D->hasAttr<IntelOclBiccAttr>())
187     return CC_IntelOclBicc;
188 
189   if (D->hasAttr<MSABIAttr>())
190     return IsWindows ? CC_C : CC_X86_64Win64;
191 
192   if (D->hasAttr<SysVABIAttr>())
193     return IsWindows ? CC_X86_64SysV : CC_C;
194 
195   if (D->hasAttr<PreserveMostAttr>())
196     return CC_PreserveMost;
197 
198   if (D->hasAttr<PreserveAllAttr>())
199     return CC_PreserveAll;
200 
201   return CC_C;
202 }
203 
204 /// Arrange the argument and result information for a call to an
205 /// unknown C++ non-static member function of the given abstract type.
206 /// (Zero value of RD means we don't have any meaningful "this" argument type,
207 ///  so fall back to a generic pointer type).
208 /// The member function must be an ordinary function, i.e. not a
209 /// constructor or destructor.
210 const CGFunctionInfo &
211 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
212                                    const FunctionProtoType *FTP,
213                                    const CXXMethodDecl *MD) {
214   SmallVector<CanQualType, 16> argTypes;
215 
216   // Add the 'this' pointer.
217   if (RD)
218     argTypes.push_back(GetThisType(Context, RD));
219   else
220     argTypes.push_back(Context.VoidPtrTy);
221 
222   return ::arrangeLLVMFunctionInfo(
223       *this, true, argTypes,
224       FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>(), MD);
225 }
226 
227 /// Arrange the argument and result information for a declaration or
228 /// definition of the given C++ non-static member function.  The
229 /// member function must be an ordinary function, i.e. not a
230 /// constructor or destructor.
231 const CGFunctionInfo &
232 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
233   assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
234   assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
235 
236   CanQual<FunctionProtoType> prototype = GetFormalType(MD);
237 
238   if (MD->isInstance()) {
239     // The abstract case is perfectly fine.
240     const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
241     return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
242   }
243 
244   return arrangeFreeFunctionType(prototype, MD);
245 }
246 
247 const CGFunctionInfo &
248 CodeGenTypes::arrangeCXXStructorDeclaration(const CXXMethodDecl *MD,
249                                             StructorType Type) {
250 
251   SmallVector<CanQualType, 16> argTypes;
252   SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
253   argTypes.push_back(GetThisType(Context, MD->getParent()));
254 
255   GlobalDecl GD;
256   if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
257     GD = GlobalDecl(CD, toCXXCtorType(Type));
258   } else {
259     auto *DD = dyn_cast<CXXDestructorDecl>(MD);
260     GD = GlobalDecl(DD, toCXXDtorType(Type));
261   }
262 
263   CanQual<FunctionProtoType> FTP = GetFormalType(MD);
264 
265   // Add the formal parameters.
266   appendParameterTypes(*this, argTypes, paramInfos, FTP, MD);
267 
268   TheCXXABI.buildStructorSignature(MD, Type, argTypes);
269 
270   RequiredArgs required =
271       (MD->isVariadic() ? RequiredArgs(argTypes.size()) : RequiredArgs::All);
272 
273   FunctionType::ExtInfo extInfo = FTP->getExtInfo();
274   CanQualType resultType = TheCXXABI.HasThisReturn(GD)
275                                ? argTypes.front()
276                                : TheCXXABI.hasMostDerivedReturn(GD)
277                                      ? CGM.getContext().VoidPtrTy
278                                      : Context.VoidTy;
279   return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
280                                  /*chainCall=*/false, argTypes, extInfo,
281                                  paramInfos, required);
282 }
283 
284 static SmallVector<CanQualType, 16>
285 getArgTypesForCall(ASTContext &ctx, const CallArgList &args) {
286   SmallVector<CanQualType, 16> argTypes;
287   for (auto &arg : args)
288     argTypes.push_back(ctx.getCanonicalParamType(arg.Ty));
289   return argTypes;
290 }
291 
292 static SmallVector<CanQualType, 16>
293 getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) {
294   SmallVector<CanQualType, 16> argTypes;
295   for (auto &arg : args)
296     argTypes.push_back(ctx.getCanonicalParamType(arg->getType()));
297   return argTypes;
298 }
299 
300 static void addExtParameterInfosForCall(
301          llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
302                                         const FunctionProtoType *proto,
303                                         unsigned prefixArgs,
304                                         unsigned totalArgs) {
305   assert(proto->hasExtParameterInfos());
306   assert(paramInfos.size() <= prefixArgs);
307   assert(proto->getNumParams() + prefixArgs <= totalArgs);
308 
309   // Add default infos for any prefix args that don't already have infos.
310   paramInfos.resize(prefixArgs);
311 
312   // Add infos for the prototype.
313   auto protoInfos = proto->getExtParameterInfos();
314   paramInfos.append(protoInfos.begin(), protoInfos.end());
315 
316   // Add default infos for the variadic arguments.
317   paramInfos.resize(totalArgs);
318 }
319 
320 static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16>
321 getExtParameterInfosForCall(const FunctionProtoType *proto,
322                             unsigned prefixArgs, unsigned totalArgs) {
323   llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result;
324   if (proto->hasExtParameterInfos()) {
325     addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs);
326   }
327   return result;
328 }
329 
330 /// Arrange a call to a C++ method, passing the given arguments.
331 const CGFunctionInfo &
332 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
333                                         const CXXConstructorDecl *D,
334                                         CXXCtorType CtorKind,
335                                         unsigned ExtraArgs) {
336   // FIXME: Kill copy.
337   SmallVector<CanQualType, 16> ArgTypes;
338   for (const auto &Arg : args)
339     ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
340 
341   CanQual<FunctionProtoType> FPT = GetFormalType(D);
342   RequiredArgs Required = RequiredArgs::forPrototypePlus(FPT, 1 + ExtraArgs, D);
343   GlobalDecl GD(D, CtorKind);
344   CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
345                                ? ArgTypes.front()
346                                : TheCXXABI.hasMostDerivedReturn(GD)
347                                      ? CGM.getContext().VoidPtrTy
348                                      : Context.VoidTy;
349 
350   FunctionType::ExtInfo Info = FPT->getExtInfo();
351   auto ParamInfos = getExtParameterInfosForCall(FPT.getTypePtr(), 1 + ExtraArgs,
352                                                 ArgTypes.size());
353   return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
354                                  /*chainCall=*/false, ArgTypes, Info,
355                                  ParamInfos, Required);
356 }
357 
358 /// Arrange the argument and result information for the declaration or
359 /// definition of the given function.
360 const CGFunctionInfo &
361 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
362   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
363     if (MD->isInstance())
364       return arrangeCXXMethodDeclaration(MD);
365 
366   CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
367 
368   assert(isa<FunctionType>(FTy));
369 
370   // When declaring a function without a prototype, always use a
371   // non-variadic type.
372   if (isa<FunctionNoProtoType>(FTy)) {
373     CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>();
374     return arrangeLLVMFunctionInfo(
375         noProto->getReturnType(), /*instanceMethod=*/false,
376         /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All);
377   }
378 
379   assert(isa<FunctionProtoType>(FTy));
380   return arrangeFreeFunctionType(FTy.getAs<FunctionProtoType>(), FD);
381 }
382 
383 /// Arrange the argument and result information for the declaration or
384 /// definition of an Objective-C method.
385 const CGFunctionInfo &
386 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
387   // It happens that this is the same as a call with no optional
388   // arguments, except also using the formal 'self' type.
389   return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
390 }
391 
392 /// Arrange the argument and result information for the function type
393 /// through which to perform a send to the given Objective-C method,
394 /// using the given receiver type.  The receiver type is not always
395 /// the 'self' type of the method or even an Objective-C pointer type.
396 /// This is *not* the right method for actually performing such a
397 /// message send, due to the possibility of optional arguments.
398 const CGFunctionInfo &
399 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
400                                               QualType receiverType) {
401   SmallVector<CanQualType, 16> argTys;
402   argTys.push_back(Context.getCanonicalParamType(receiverType));
403   argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
404   // FIXME: Kill copy?
405   for (const auto *I : MD->params()) {
406     argTys.push_back(Context.getCanonicalParamType(I->getType()));
407   }
408 
409   FunctionType::ExtInfo einfo;
410   bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
411   einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
412 
413   if (getContext().getLangOpts().ObjCAutoRefCount &&
414       MD->hasAttr<NSReturnsRetainedAttr>())
415     einfo = einfo.withProducesResult(true);
416 
417   RequiredArgs required =
418     (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
419 
420   return arrangeLLVMFunctionInfo(
421       GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
422       /*chainCall=*/false, argTys, einfo, {}, required);
423 }
424 
425 const CGFunctionInfo &
426 CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType,
427                                                  const CallArgList &args) {
428   auto argTypes = getArgTypesForCall(Context, args);
429   FunctionType::ExtInfo einfo;
430 
431   return arrangeLLVMFunctionInfo(
432       GetReturnType(returnType), /*instanceMethod=*/false,
433       /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All);
434 }
435 
436 const CGFunctionInfo &
437 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
438   // FIXME: Do we need to handle ObjCMethodDecl?
439   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
440 
441   if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
442     return arrangeCXXStructorDeclaration(CD, getFromCtorType(GD.getCtorType()));
443 
444   if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
445     return arrangeCXXStructorDeclaration(DD, getFromDtorType(GD.getDtorType()));
446 
447   return arrangeFunctionDeclaration(FD);
448 }
449 
450 /// Arrange a thunk that takes 'this' as the first parameter followed by
451 /// varargs.  Return a void pointer, regardless of the actual return type.
452 /// The body of the thunk will end in a musttail call to a function of the
453 /// correct type, and the caller will bitcast the function to the correct
454 /// prototype.
455 const CGFunctionInfo &
456 CodeGenTypes::arrangeMSMemberPointerThunk(const CXXMethodDecl *MD) {
457   assert(MD->isVirtual() && "only virtual memptrs have thunks");
458   CanQual<FunctionProtoType> FTP = GetFormalType(MD);
459   CanQualType ArgTys[] = { GetThisType(Context, MD->getParent()) };
460   return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
461                                  /*chainCall=*/false, ArgTys,
462                                  FTP->getExtInfo(), {}, RequiredArgs(1));
463 }
464 
465 const CGFunctionInfo &
466 CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD,
467                                    CXXCtorType CT) {
468   assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure);
469 
470   CanQual<FunctionProtoType> FTP = GetFormalType(CD);
471   SmallVector<CanQualType, 2> ArgTys;
472   const CXXRecordDecl *RD = CD->getParent();
473   ArgTys.push_back(GetThisType(Context, RD));
474   if (CT == Ctor_CopyingClosure)
475     ArgTys.push_back(*FTP->param_type_begin());
476   if (RD->getNumVBases() > 0)
477     ArgTys.push_back(Context.IntTy);
478   CallingConv CC = Context.getDefaultCallingConvention(
479       /*IsVariadic=*/false, /*IsCXXMethod=*/true);
480   return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
481                                  /*chainCall=*/false, ArgTys,
482                                  FunctionType::ExtInfo(CC), {},
483                                  RequiredArgs::All);
484 }
485 
486 /// Arrange a call as unto a free function, except possibly with an
487 /// additional number of formal parameters considered required.
488 static const CGFunctionInfo &
489 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
490                             CodeGenModule &CGM,
491                             const CallArgList &args,
492                             const FunctionType *fnType,
493                             unsigned numExtraRequiredArgs,
494                             bool chainCall) {
495   assert(args.size() >= numExtraRequiredArgs);
496 
497   llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
498 
499   // In most cases, there are no optional arguments.
500   RequiredArgs required = RequiredArgs::All;
501 
502   // If we have a variadic prototype, the required arguments are the
503   // extra prefix plus the arguments in the prototype.
504   if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
505     if (proto->isVariadic())
506       required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs);
507 
508     if (proto->hasExtParameterInfos())
509       addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs,
510                                   args.size());
511 
512   // If we don't have a prototype at all, but we're supposed to
513   // explicitly use the variadic convention for unprototyped calls,
514   // treat all of the arguments as required but preserve the nominal
515   // possibility of variadics.
516   } else if (CGM.getTargetCodeGenInfo()
517                 .isNoProtoCallVariadic(args,
518                                        cast<FunctionNoProtoType>(fnType))) {
519     required = RequiredArgs(args.size());
520   }
521 
522   // FIXME: Kill copy.
523   SmallVector<CanQualType, 16> argTypes;
524   for (const auto &arg : args)
525     argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
526   return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()),
527                                      /*instanceMethod=*/false, chainCall,
528                                      argTypes, fnType->getExtInfo(), paramInfos,
529                                      required);
530 }
531 
532 /// Figure out the rules for calling a function with the given formal
533 /// type using the given arguments.  The arguments are necessary
534 /// because the function might be unprototyped, in which case it's
535 /// target-dependent in crazy ways.
536 const CGFunctionInfo &
537 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
538                                       const FunctionType *fnType,
539                                       bool chainCall) {
540   return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
541                                      chainCall ? 1 : 0, chainCall);
542 }
543 
544 /// A block function is essentially a free function with an
545 /// extra implicit argument.
546 const CGFunctionInfo &
547 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
548                                        const FunctionType *fnType) {
549   return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
550                                      /*chainCall=*/false);
551 }
552 
553 const CGFunctionInfo &
554 CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto,
555                                               const FunctionArgList &params) {
556   auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size());
557   auto argTypes = getArgTypesForDeclaration(Context, params);
558 
559   return arrangeLLVMFunctionInfo(
560       GetReturnType(proto->getReturnType()),
561       /*instanceMethod*/ false, /*chainCall*/ false, argTypes,
562       proto->getExtInfo(), paramInfos,
563       RequiredArgs::forPrototypePlus(proto, 1, nullptr));
564 }
565 
566 const CGFunctionInfo &
567 CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType,
568                                          const CallArgList &args) {
569   // FIXME: Kill copy.
570   SmallVector<CanQualType, 16> argTypes;
571   for (const auto &Arg : args)
572     argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
573   return arrangeLLVMFunctionInfo(
574       GetReturnType(resultType), /*instanceMethod=*/false,
575       /*chainCall=*/false, argTypes, FunctionType::ExtInfo(),
576       /*paramInfos=*/ {}, RequiredArgs::All);
577 }
578 
579 const CGFunctionInfo &
580 CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType,
581                                                 const FunctionArgList &args) {
582   auto argTypes = getArgTypesForDeclaration(Context, args);
583 
584   return arrangeLLVMFunctionInfo(
585       GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false,
586       argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
587 }
588 
589 const CGFunctionInfo &
590 CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType,
591                                               ArrayRef<CanQualType> argTypes) {
592   return arrangeLLVMFunctionInfo(
593       resultType, /*instanceMethod=*/false, /*chainCall=*/false,
594       argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
595 }
596 
597 /// Arrange a call to a C++ method, passing the given arguments.
598 const CGFunctionInfo &
599 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
600                                    const FunctionProtoType *proto,
601                                    RequiredArgs required) {
602   unsigned numRequiredArgs =
603     (proto->isVariadic() ? required.getNumRequiredArgs() : args.size());
604   unsigned numPrefixArgs = numRequiredArgs - proto->getNumParams();
605   auto paramInfos =
606     getExtParameterInfosForCall(proto, numPrefixArgs, args.size());
607 
608   // FIXME: Kill copy.
609   auto argTypes = getArgTypesForCall(Context, args);
610 
611   FunctionType::ExtInfo info = proto->getExtInfo();
612   return arrangeLLVMFunctionInfo(
613       GetReturnType(proto->getReturnType()), /*instanceMethod=*/true,
614       /*chainCall=*/false, argTypes, info, paramInfos, required);
615 }
616 
617 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
618   return arrangeLLVMFunctionInfo(
619       getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
620       None, FunctionType::ExtInfo(), {}, RequiredArgs::All);
621 }
622 
623 const CGFunctionInfo &
624 CodeGenTypes::arrangeCall(const CGFunctionInfo &signature,
625                           const CallArgList &args) {
626   assert(signature.arg_size() <= args.size());
627   if (signature.arg_size() == args.size())
628     return signature;
629 
630   SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
631   auto sigParamInfos = signature.getExtParameterInfos();
632   if (!sigParamInfos.empty()) {
633     paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
634     paramInfos.resize(args.size());
635   }
636 
637   auto argTypes = getArgTypesForCall(Context, args);
638 
639   assert(signature.getRequiredArgs().allowsOptionalArgs());
640   return arrangeLLVMFunctionInfo(signature.getReturnType(),
641                                  signature.isInstanceMethod(),
642                                  signature.isChainCall(),
643                                  argTypes,
644                                  signature.getExtInfo(),
645                                  paramInfos,
646                                  signature.getRequiredArgs());
647 }
648 
649 /// Arrange the argument and result information for an abstract value
650 /// of a given function type.  This is the method which all of the
651 /// above functions ultimately defer to.
652 const CGFunctionInfo &
653 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
654                                       bool instanceMethod,
655                                       bool chainCall,
656                                       ArrayRef<CanQualType> argTypes,
657                                       FunctionType::ExtInfo info,
658                      ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos,
659                                       RequiredArgs required) {
660   assert(std::all_of(argTypes.begin(), argTypes.end(),
661                      std::mem_fun_ref(&CanQualType::isCanonicalAsParam)));
662 
663   // Lookup or create unique function info.
664   llvm::FoldingSetNodeID ID;
665   CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos,
666                           required, resultType, argTypes);
667 
668   void *insertPos = nullptr;
669   CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
670   if (FI)
671     return *FI;
672 
673   unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
674 
675   // Construct the function info.  We co-allocate the ArgInfos.
676   FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
677                               paramInfos, resultType, argTypes, required);
678   FunctionInfos.InsertNode(FI, insertPos);
679 
680   bool inserted = FunctionsBeingProcessed.insert(FI).second;
681   (void)inserted;
682   assert(inserted && "Recursively being processed?");
683 
684   // Compute ABI information.
685   if (info.getCC() != CC_Swift) {
686     getABIInfo().computeInfo(*FI);
687   } else {
688     swiftcall::computeABIInfo(CGM, *FI);
689   }
690 
691   // Loop over all of the computed argument and return value info.  If any of
692   // them are direct or extend without a specified coerce type, specify the
693   // default now.
694   ABIArgInfo &retInfo = FI->getReturnInfo();
695   if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
696     retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
697 
698   for (auto &I : FI->arguments())
699     if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
700       I.info.setCoerceToType(ConvertType(I.type));
701 
702   bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
703   assert(erased && "Not in set?");
704 
705   return *FI;
706 }
707 
708 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
709                                        bool instanceMethod,
710                                        bool chainCall,
711                                        const FunctionType::ExtInfo &info,
712                                        ArrayRef<ExtParameterInfo> paramInfos,
713                                        CanQualType resultType,
714                                        ArrayRef<CanQualType> argTypes,
715                                        RequiredArgs required) {
716   assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
717 
718   void *buffer =
719     operator new(totalSizeToAlloc<ArgInfo,             ExtParameterInfo>(
720                                   argTypes.size() + 1, paramInfos.size()));
721 
722   CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
723   FI->CallingConvention = llvmCC;
724   FI->EffectiveCallingConvention = llvmCC;
725   FI->ASTCallingConvention = info.getCC();
726   FI->InstanceMethod = instanceMethod;
727   FI->ChainCall = chainCall;
728   FI->NoReturn = info.getNoReturn();
729   FI->ReturnsRetained = info.getProducesResult();
730   FI->Required = required;
731   FI->HasRegParm = info.getHasRegParm();
732   FI->RegParm = info.getRegParm();
733   FI->ArgStruct = nullptr;
734   FI->ArgStructAlign = 0;
735   FI->NumArgs = argTypes.size();
736   FI->HasExtParameterInfos = !paramInfos.empty();
737   FI->getArgsBuffer()[0].type = resultType;
738   for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
739     FI->getArgsBuffer()[i + 1].type = argTypes[i];
740   for (unsigned i = 0, e = paramInfos.size(); i != e; ++i)
741     FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
742   return FI;
743 }
744 
745 /***/
746 
747 namespace {
748 // ABIArgInfo::Expand implementation.
749 
750 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
751 struct TypeExpansion {
752   enum TypeExpansionKind {
753     // Elements of constant arrays are expanded recursively.
754     TEK_ConstantArray,
755     // Record fields are expanded recursively (but if record is a union, only
756     // the field with the largest size is expanded).
757     TEK_Record,
758     // For complex types, real and imaginary parts are expanded recursively.
759     TEK_Complex,
760     // All other types are not expandable.
761     TEK_None
762   };
763 
764   const TypeExpansionKind Kind;
765 
766   TypeExpansion(TypeExpansionKind K) : Kind(K) {}
767   virtual ~TypeExpansion() {}
768 };
769 
770 struct ConstantArrayExpansion : TypeExpansion {
771   QualType EltTy;
772   uint64_t NumElts;
773 
774   ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
775       : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
776   static bool classof(const TypeExpansion *TE) {
777     return TE->Kind == TEK_ConstantArray;
778   }
779 };
780 
781 struct RecordExpansion : TypeExpansion {
782   SmallVector<const CXXBaseSpecifier *, 1> Bases;
783 
784   SmallVector<const FieldDecl *, 1> Fields;
785 
786   RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
787                   SmallVector<const FieldDecl *, 1> &&Fields)
788       : TypeExpansion(TEK_Record), Bases(std::move(Bases)),
789         Fields(std::move(Fields)) {}
790   static bool classof(const TypeExpansion *TE) {
791     return TE->Kind == TEK_Record;
792   }
793 };
794 
795 struct ComplexExpansion : TypeExpansion {
796   QualType EltTy;
797 
798   ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
799   static bool classof(const TypeExpansion *TE) {
800     return TE->Kind == TEK_Complex;
801   }
802 };
803 
804 struct NoExpansion : TypeExpansion {
805   NoExpansion() : TypeExpansion(TEK_None) {}
806   static bool classof(const TypeExpansion *TE) {
807     return TE->Kind == TEK_None;
808   }
809 };
810 }  // namespace
811 
812 static std::unique_ptr<TypeExpansion>
813 getTypeExpansion(QualType Ty, const ASTContext &Context) {
814   if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
815     return llvm::make_unique<ConstantArrayExpansion>(
816         AT->getElementType(), AT->getSize().getZExtValue());
817   }
818   if (const RecordType *RT = Ty->getAs<RecordType>()) {
819     SmallVector<const CXXBaseSpecifier *, 1> Bases;
820     SmallVector<const FieldDecl *, 1> Fields;
821     const RecordDecl *RD = RT->getDecl();
822     assert(!RD->hasFlexibleArrayMember() &&
823            "Cannot expand structure with flexible array.");
824     if (RD->isUnion()) {
825       // Unions can be here only in degenerative cases - all the fields are same
826       // after flattening. Thus we have to use the "largest" field.
827       const FieldDecl *LargestFD = nullptr;
828       CharUnits UnionSize = CharUnits::Zero();
829 
830       for (const auto *FD : RD->fields()) {
831         // Skip zero length bitfields.
832         if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
833           continue;
834         assert(!FD->isBitField() &&
835                "Cannot expand structure with bit-field members.");
836         CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
837         if (UnionSize < FieldSize) {
838           UnionSize = FieldSize;
839           LargestFD = FD;
840         }
841       }
842       if (LargestFD)
843         Fields.push_back(LargestFD);
844     } else {
845       if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
846         assert(!CXXRD->isDynamicClass() &&
847                "cannot expand vtable pointers in dynamic classes");
848         for (const CXXBaseSpecifier &BS : CXXRD->bases())
849           Bases.push_back(&BS);
850       }
851 
852       for (const auto *FD : RD->fields()) {
853         // Skip zero length bitfields.
854         if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
855           continue;
856         assert(!FD->isBitField() &&
857                "Cannot expand structure with bit-field members.");
858         Fields.push_back(FD);
859       }
860     }
861     return llvm::make_unique<RecordExpansion>(std::move(Bases),
862                                               std::move(Fields));
863   }
864   if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
865     return llvm::make_unique<ComplexExpansion>(CT->getElementType());
866   }
867   return llvm::make_unique<NoExpansion>();
868 }
869 
870 static int getExpansionSize(QualType Ty, const ASTContext &Context) {
871   auto Exp = getTypeExpansion(Ty, Context);
872   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
873     return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
874   }
875   if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
876     int Res = 0;
877     for (auto BS : RExp->Bases)
878       Res += getExpansionSize(BS->getType(), Context);
879     for (auto FD : RExp->Fields)
880       Res += getExpansionSize(FD->getType(), Context);
881     return Res;
882   }
883   if (isa<ComplexExpansion>(Exp.get()))
884     return 2;
885   assert(isa<NoExpansion>(Exp.get()));
886   return 1;
887 }
888 
889 void
890 CodeGenTypes::getExpandedTypes(QualType Ty,
891                                SmallVectorImpl<llvm::Type *>::iterator &TI) {
892   auto Exp = getTypeExpansion(Ty, Context);
893   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
894     for (int i = 0, n = CAExp->NumElts; i < n; i++) {
895       getExpandedTypes(CAExp->EltTy, TI);
896     }
897   } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
898     for (auto BS : RExp->Bases)
899       getExpandedTypes(BS->getType(), TI);
900     for (auto FD : RExp->Fields)
901       getExpandedTypes(FD->getType(), TI);
902   } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
903     llvm::Type *EltTy = ConvertType(CExp->EltTy);
904     *TI++ = EltTy;
905     *TI++ = EltTy;
906   } else {
907     assert(isa<NoExpansion>(Exp.get()));
908     *TI++ = ConvertType(Ty);
909   }
910 }
911 
912 static void forConstantArrayExpansion(CodeGenFunction &CGF,
913                                       ConstantArrayExpansion *CAE,
914                                       Address BaseAddr,
915                                       llvm::function_ref<void(Address)> Fn) {
916   CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
917   CharUnits EltAlign =
918     BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
919 
920   for (int i = 0, n = CAE->NumElts; i < n; i++) {
921     llvm::Value *EltAddr =
922       CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i);
923     Fn(Address(EltAddr, EltAlign));
924   }
925 }
926 
927 void CodeGenFunction::ExpandTypeFromArgs(
928     QualType Ty, LValue LV, SmallVectorImpl<llvm::Value *>::iterator &AI) {
929   assert(LV.isSimple() &&
930          "Unexpected non-simple lvalue during struct expansion.");
931 
932   auto Exp = getTypeExpansion(Ty, getContext());
933   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
934     forConstantArrayExpansion(*this, CAExp, LV.getAddress(),
935                               [&](Address EltAddr) {
936       LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
937       ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
938     });
939   } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
940     Address This = LV.getAddress();
941     for (const CXXBaseSpecifier *BS : RExp->Bases) {
942       // Perform a single step derived-to-base conversion.
943       Address Base =
944           GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
945                                 /*NullCheckValue=*/false, SourceLocation());
946       LValue SubLV = MakeAddrLValue(Base, BS->getType());
947 
948       // Recurse onto bases.
949       ExpandTypeFromArgs(BS->getType(), SubLV, AI);
950     }
951     for (auto FD : RExp->Fields) {
952       // FIXME: What are the right qualifiers here?
953       LValue SubLV = EmitLValueForFieldInitialization(LV, FD);
954       ExpandTypeFromArgs(FD->getType(), SubLV, AI);
955     }
956   } else if (isa<ComplexExpansion>(Exp.get())) {
957     auto realValue = *AI++;
958     auto imagValue = *AI++;
959     EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
960   } else {
961     assert(isa<NoExpansion>(Exp.get()));
962     EmitStoreThroughLValue(RValue::get(*AI++), LV);
963   }
964 }
965 
966 void CodeGenFunction::ExpandTypeToArgs(
967     QualType Ty, RValue RV, llvm::FunctionType *IRFuncTy,
968     SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
969   auto Exp = getTypeExpansion(Ty, getContext());
970   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
971     forConstantArrayExpansion(*this, CAExp, RV.getAggregateAddress(),
972                               [&](Address EltAddr) {
973       RValue EltRV =
974           convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation());
975       ExpandTypeToArgs(CAExp->EltTy, EltRV, IRFuncTy, IRCallArgs, IRCallArgPos);
976     });
977   } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
978     Address This = RV.getAggregateAddress();
979     for (const CXXBaseSpecifier *BS : RExp->Bases) {
980       // Perform a single step derived-to-base conversion.
981       Address Base =
982           GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
983                                 /*NullCheckValue=*/false, SourceLocation());
984       RValue BaseRV = RValue::getAggregate(Base);
985 
986       // Recurse onto bases.
987       ExpandTypeToArgs(BS->getType(), BaseRV, IRFuncTy, IRCallArgs,
988                        IRCallArgPos);
989     }
990 
991     LValue LV = MakeAddrLValue(This, Ty);
992     for (auto FD : RExp->Fields) {
993       RValue FldRV = EmitRValueForField(LV, FD, SourceLocation());
994       ExpandTypeToArgs(FD->getType(), FldRV, IRFuncTy, IRCallArgs,
995                        IRCallArgPos);
996     }
997   } else if (isa<ComplexExpansion>(Exp.get())) {
998     ComplexPairTy CV = RV.getComplexVal();
999     IRCallArgs[IRCallArgPos++] = CV.first;
1000     IRCallArgs[IRCallArgPos++] = CV.second;
1001   } else {
1002     assert(isa<NoExpansion>(Exp.get()));
1003     assert(RV.isScalar() &&
1004            "Unexpected non-scalar rvalue during struct expansion.");
1005 
1006     // Insert a bitcast as needed.
1007     llvm::Value *V = RV.getScalarVal();
1008     if (IRCallArgPos < IRFuncTy->getNumParams() &&
1009         V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1010       V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
1011 
1012     IRCallArgs[IRCallArgPos++] = V;
1013   }
1014 }
1015 
1016 /// Create a temporary allocation for the purposes of coercion.
1017 static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty,
1018                                            CharUnits MinAlign) {
1019   // Don't use an alignment that's worse than what LLVM would prefer.
1020   auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty);
1021   CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
1022 
1023   return CGF.CreateTempAlloca(Ty, Align);
1024 }
1025 
1026 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
1027 /// accessing some number of bytes out of it, try to gep into the struct to get
1028 /// at its inner goodness.  Dive as deep as possible without entering an element
1029 /// with an in-memory size smaller than DstSize.
1030 static Address
1031 EnterStructPointerForCoercedAccess(Address SrcPtr,
1032                                    llvm::StructType *SrcSTy,
1033                                    uint64_t DstSize, CodeGenFunction &CGF) {
1034   // We can't dive into a zero-element struct.
1035   if (SrcSTy->getNumElements() == 0) return SrcPtr;
1036 
1037   llvm::Type *FirstElt = SrcSTy->getElementType(0);
1038 
1039   // If the first elt is at least as large as what we're looking for, or if the
1040   // first element is the same size as the whole struct, we can enter it. The
1041   // comparison must be made on the store size and not the alloca size. Using
1042   // the alloca size may overstate the size of the load.
1043   uint64_t FirstEltSize =
1044     CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
1045   if (FirstEltSize < DstSize &&
1046       FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
1047     return SrcPtr;
1048 
1049   // GEP into the first element.
1050   SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, CharUnits(), "coerce.dive");
1051 
1052   // If the first element is a struct, recurse.
1053   llvm::Type *SrcTy = SrcPtr.getElementType();
1054   if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1055     return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
1056 
1057   return SrcPtr;
1058 }
1059 
1060 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
1061 /// are either integers or pointers.  This does a truncation of the value if it
1062 /// is too large or a zero extension if it is too small.
1063 ///
1064 /// This behaves as if the value were coerced through memory, so on big-endian
1065 /// targets the high bits are preserved in a truncation, while little-endian
1066 /// targets preserve the low bits.
1067 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
1068                                              llvm::Type *Ty,
1069                                              CodeGenFunction &CGF) {
1070   if (Val->getType() == Ty)
1071     return Val;
1072 
1073   if (isa<llvm::PointerType>(Val->getType())) {
1074     // If this is Pointer->Pointer avoid conversion to and from int.
1075     if (isa<llvm::PointerType>(Ty))
1076       return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
1077 
1078     // Convert the pointer to an integer so we can play with its width.
1079     Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
1080   }
1081 
1082   llvm::Type *DestIntTy = Ty;
1083   if (isa<llvm::PointerType>(DestIntTy))
1084     DestIntTy = CGF.IntPtrTy;
1085 
1086   if (Val->getType() != DestIntTy) {
1087     const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
1088     if (DL.isBigEndian()) {
1089       // Preserve the high bits on big-endian targets.
1090       // That is what memory coercion does.
1091       uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1092       uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1093 
1094       if (SrcSize > DstSize) {
1095         Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
1096         Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
1097       } else {
1098         Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
1099         Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
1100       }
1101     } else {
1102       // Little-endian targets preserve the low bits. No shifts required.
1103       Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
1104     }
1105   }
1106 
1107   if (isa<llvm::PointerType>(Ty))
1108     Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
1109   return Val;
1110 }
1111 
1112 
1113 
1114 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
1115 /// a pointer to an object of type \arg Ty, known to be aligned to
1116 /// \arg SrcAlign bytes.
1117 ///
1118 /// This safely handles the case when the src type is smaller than the
1119 /// destination type; in this situation the values of bits which not
1120 /// present in the src are undefined.
1121 static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
1122                                       CodeGenFunction &CGF) {
1123   llvm::Type *SrcTy = Src.getElementType();
1124 
1125   // If SrcTy and Ty are the same, just do a load.
1126   if (SrcTy == Ty)
1127     return CGF.Builder.CreateLoad(Src);
1128 
1129   uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
1130 
1131   if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1132     Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF);
1133     SrcTy = Src.getType()->getElementType();
1134   }
1135 
1136   uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1137 
1138   // If the source and destination are integer or pointer types, just do an
1139   // extension or truncation to the desired type.
1140   if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1141       (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1142     llvm::Value *Load = CGF.Builder.CreateLoad(Src);
1143     return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
1144   }
1145 
1146   // If load is legal, just bitcast the src pointer.
1147   if (SrcSize >= DstSize) {
1148     // Generally SrcSize is never greater than DstSize, since this means we are
1149     // losing bits. However, this can happen in cases where the structure has
1150     // additional padding, for example due to a user specified alignment.
1151     //
1152     // FIXME: Assert that we aren't truncating non-padding bits when have access
1153     // to that information.
1154     Src = CGF.Builder.CreateBitCast(Src, llvm::PointerType::getUnqual(Ty));
1155     return CGF.Builder.CreateLoad(Src);
1156   }
1157 
1158   // Otherwise do coercion through memory. This is stupid, but simple.
1159   Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment());
1160   Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.Int8PtrTy);
1161   Address SrcCasted = CGF.Builder.CreateBitCast(Src, CGF.Int8PtrTy);
1162   CGF.Builder.CreateMemCpy(Casted, SrcCasted,
1163       llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
1164       false);
1165   return CGF.Builder.CreateLoad(Tmp);
1166 }
1167 
1168 // Function to store a first-class aggregate into memory.  We prefer to
1169 // store the elements rather than the aggregate to be more friendly to
1170 // fast-isel.
1171 // FIXME: Do we need to recurse here?
1172 static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
1173                           Address Dest, bool DestIsVolatile) {
1174   // Prefer scalar stores to first-class aggregate stores.
1175   if (llvm::StructType *STy =
1176         dyn_cast<llvm::StructType>(Val->getType())) {
1177     const llvm::StructLayout *Layout =
1178       CGF.CGM.getDataLayout().getStructLayout(STy);
1179 
1180     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1181       auto EltOffset = CharUnits::fromQuantity(Layout->getElementOffset(i));
1182       Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i, EltOffset);
1183       llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
1184       CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
1185     }
1186   } else {
1187     CGF.Builder.CreateStore(Val, Dest, DestIsVolatile);
1188   }
1189 }
1190 
1191 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1192 /// where the source and destination may have different types.  The
1193 /// destination is known to be aligned to \arg DstAlign bytes.
1194 ///
1195 /// This safely handles the case when the src type is larger than the
1196 /// destination type; the upper bits of the src will be lost.
1197 static void CreateCoercedStore(llvm::Value *Src,
1198                                Address Dst,
1199                                bool DstIsVolatile,
1200                                CodeGenFunction &CGF) {
1201   llvm::Type *SrcTy = Src->getType();
1202   llvm::Type *DstTy = Dst.getType()->getElementType();
1203   if (SrcTy == DstTy) {
1204     CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1205     return;
1206   }
1207 
1208   uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1209 
1210   if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1211     Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF);
1212     DstTy = Dst.getType()->getElementType();
1213   }
1214 
1215   // If the source and destination are integer or pointer types, just do an
1216   // extension or truncation to the desired type.
1217   if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1218       (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1219     Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
1220     CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1221     return;
1222   }
1223 
1224   uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
1225 
1226   // If store is legal, just bitcast the src pointer.
1227   if (SrcSize <= DstSize) {
1228     Dst = CGF.Builder.CreateBitCast(Dst, llvm::PointerType::getUnqual(SrcTy));
1229     BuildAggStore(CGF, Src, Dst, DstIsVolatile);
1230   } else {
1231     // Otherwise do coercion through memory. This is stupid, but
1232     // simple.
1233 
1234     // Generally SrcSize is never greater than DstSize, since this means we are
1235     // losing bits. However, this can happen in cases where the structure has
1236     // additional padding, for example due to a user specified alignment.
1237     //
1238     // FIXME: Assert that we aren't truncating non-padding bits when have access
1239     // to that information.
1240     Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
1241     CGF.Builder.CreateStore(Src, Tmp);
1242     Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.Int8PtrTy);
1243     Address DstCasted = CGF.Builder.CreateBitCast(Dst, CGF.Int8PtrTy);
1244     CGF.Builder.CreateMemCpy(DstCasted, Casted,
1245         llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
1246         false);
1247   }
1248 }
1249 
1250 static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr,
1251                                    const ABIArgInfo &info) {
1252   if (unsigned offset = info.getDirectOffset()) {
1253     addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty);
1254     addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
1255                                              CharUnits::fromQuantity(offset));
1256     addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType());
1257   }
1258   return addr;
1259 }
1260 
1261 namespace {
1262 
1263 /// Encapsulates information about the way function arguments from
1264 /// CGFunctionInfo should be passed to actual LLVM IR function.
1265 class ClangToLLVMArgMapping {
1266   static const unsigned InvalidIndex = ~0U;
1267   unsigned InallocaArgNo;
1268   unsigned SRetArgNo;
1269   unsigned TotalIRArgs;
1270 
1271   /// Arguments of LLVM IR function corresponding to single Clang argument.
1272   struct IRArgs {
1273     unsigned PaddingArgIndex;
1274     // Argument is expanded to IR arguments at positions
1275     // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
1276     unsigned FirstArgIndex;
1277     unsigned NumberOfArgs;
1278 
1279     IRArgs()
1280         : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1281           NumberOfArgs(0) {}
1282   };
1283 
1284   SmallVector<IRArgs, 8> ArgInfo;
1285 
1286 public:
1287   ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
1288                         bool OnlyRequiredArgs = false)
1289       : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1290         ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1291     construct(Context, FI, OnlyRequiredArgs);
1292   }
1293 
1294   bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
1295   unsigned getInallocaArgNo() const {
1296     assert(hasInallocaArg());
1297     return InallocaArgNo;
1298   }
1299 
1300   bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
1301   unsigned getSRetArgNo() const {
1302     assert(hasSRetArg());
1303     return SRetArgNo;
1304   }
1305 
1306   unsigned totalIRArgs() const { return TotalIRArgs; }
1307 
1308   bool hasPaddingArg(unsigned ArgNo) const {
1309     assert(ArgNo < ArgInfo.size());
1310     return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1311   }
1312   unsigned getPaddingArgNo(unsigned ArgNo) const {
1313     assert(hasPaddingArg(ArgNo));
1314     return ArgInfo[ArgNo].PaddingArgIndex;
1315   }
1316 
1317   /// Returns index of first IR argument corresponding to ArgNo, and their
1318   /// quantity.
1319   std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
1320     assert(ArgNo < ArgInfo.size());
1321     return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1322                           ArgInfo[ArgNo].NumberOfArgs);
1323   }
1324 
1325 private:
1326   void construct(const ASTContext &Context, const CGFunctionInfo &FI,
1327                  bool OnlyRequiredArgs);
1328 };
1329 
1330 void ClangToLLVMArgMapping::construct(const ASTContext &Context,
1331                                       const CGFunctionInfo &FI,
1332                                       bool OnlyRequiredArgs) {
1333   unsigned IRArgNo = 0;
1334   bool SwapThisWithSRet = false;
1335   const ABIArgInfo &RetAI = FI.getReturnInfo();
1336 
1337   if (RetAI.getKind() == ABIArgInfo::Indirect) {
1338     SwapThisWithSRet = RetAI.isSRetAfterThis();
1339     SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1340   }
1341 
1342   unsigned ArgNo = 0;
1343   unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
1344   for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
1345        ++I, ++ArgNo) {
1346     assert(I != FI.arg_end());
1347     QualType ArgType = I->type;
1348     const ABIArgInfo &AI = I->info;
1349     // Collect data about IR arguments corresponding to Clang argument ArgNo.
1350     auto &IRArgs = ArgInfo[ArgNo];
1351 
1352     if (AI.getPaddingType())
1353       IRArgs.PaddingArgIndex = IRArgNo++;
1354 
1355     switch (AI.getKind()) {
1356     case ABIArgInfo::Extend:
1357     case ABIArgInfo::Direct: {
1358       // FIXME: handle sseregparm someday...
1359       llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
1360       if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
1361         IRArgs.NumberOfArgs = STy->getNumElements();
1362       } else {
1363         IRArgs.NumberOfArgs = 1;
1364       }
1365       break;
1366     }
1367     case ABIArgInfo::Indirect:
1368       IRArgs.NumberOfArgs = 1;
1369       break;
1370     case ABIArgInfo::Ignore:
1371     case ABIArgInfo::InAlloca:
1372       // ignore and inalloca doesn't have matching LLVM parameters.
1373       IRArgs.NumberOfArgs = 0;
1374       break;
1375     case ABIArgInfo::CoerceAndExpand:
1376       IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size();
1377       break;
1378     case ABIArgInfo::Expand:
1379       IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
1380       break;
1381     }
1382 
1383     if (IRArgs.NumberOfArgs > 0) {
1384       IRArgs.FirstArgIndex = IRArgNo;
1385       IRArgNo += IRArgs.NumberOfArgs;
1386     }
1387 
1388     // Skip over the sret parameter when it comes second.  We already handled it
1389     // above.
1390     if (IRArgNo == 1 && SwapThisWithSRet)
1391       IRArgNo++;
1392   }
1393   assert(ArgNo == ArgInfo.size());
1394 
1395   if (FI.usesInAlloca())
1396     InallocaArgNo = IRArgNo++;
1397 
1398   TotalIRArgs = IRArgNo;
1399 }
1400 }  // namespace
1401 
1402 /***/
1403 
1404 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
1405   return FI.getReturnInfo().isIndirect();
1406 }
1407 
1408 bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) {
1409   return ReturnTypeUsesSRet(FI) &&
1410          getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
1411 }
1412 
1413 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
1414   if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
1415     switch (BT->getKind()) {
1416     default:
1417       return false;
1418     case BuiltinType::Float:
1419       return getTarget().useObjCFPRetForRealType(TargetInfo::Float);
1420     case BuiltinType::Double:
1421       return getTarget().useObjCFPRetForRealType(TargetInfo::Double);
1422     case BuiltinType::LongDouble:
1423       return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble);
1424     }
1425   }
1426 
1427   return false;
1428 }
1429 
1430 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
1431   if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
1432     if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
1433       if (BT->getKind() == BuiltinType::LongDouble)
1434         return getTarget().useObjCFP2RetForComplexLongDouble();
1435     }
1436   }
1437 
1438   return false;
1439 }
1440 
1441 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
1442   const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
1443   return GetFunctionType(FI);
1444 }
1445 
1446 llvm::FunctionType *
1447 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
1448 
1449   bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1450   (void)Inserted;
1451   assert(Inserted && "Recursively being processed?");
1452 
1453   llvm::Type *resultType = nullptr;
1454   const ABIArgInfo &retAI = FI.getReturnInfo();
1455   switch (retAI.getKind()) {
1456   case ABIArgInfo::Expand:
1457     llvm_unreachable("Invalid ABI kind for return argument");
1458 
1459   case ABIArgInfo::Extend:
1460   case ABIArgInfo::Direct:
1461     resultType = retAI.getCoerceToType();
1462     break;
1463 
1464   case ABIArgInfo::InAlloca:
1465     if (retAI.getInAllocaSRet()) {
1466       // sret things on win32 aren't void, they return the sret pointer.
1467       QualType ret = FI.getReturnType();
1468       llvm::Type *ty = ConvertType(ret);
1469       unsigned addressSpace = Context.getTargetAddressSpace(ret);
1470       resultType = llvm::PointerType::get(ty, addressSpace);
1471     } else {
1472       resultType = llvm::Type::getVoidTy(getLLVMContext());
1473     }
1474     break;
1475 
1476   case ABIArgInfo::Indirect:
1477   case ABIArgInfo::Ignore:
1478     resultType = llvm::Type::getVoidTy(getLLVMContext());
1479     break;
1480 
1481   case ABIArgInfo::CoerceAndExpand:
1482     resultType = retAI.getUnpaddedCoerceAndExpandType();
1483     break;
1484   }
1485 
1486   ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
1487   SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
1488 
1489   // Add type for sret argument.
1490   if (IRFunctionArgs.hasSRetArg()) {
1491     QualType Ret = FI.getReturnType();
1492     llvm::Type *Ty = ConvertType(Ret);
1493     unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
1494     ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1495         llvm::PointerType::get(Ty, AddressSpace);
1496   }
1497 
1498   // Add type for inalloca argument.
1499   if (IRFunctionArgs.hasInallocaArg()) {
1500     auto ArgStruct = FI.getArgStruct();
1501     assert(ArgStruct);
1502     ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
1503   }
1504 
1505   // Add in all of the required arguments.
1506   unsigned ArgNo = 0;
1507   CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1508                                      ie = it + FI.getNumRequiredArgs();
1509   for (; it != ie; ++it, ++ArgNo) {
1510     const ABIArgInfo &ArgInfo = it->info;
1511 
1512     // Insert a padding type to ensure proper alignment.
1513     if (IRFunctionArgs.hasPaddingArg(ArgNo))
1514       ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1515           ArgInfo.getPaddingType();
1516 
1517     unsigned FirstIRArg, NumIRArgs;
1518     std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1519 
1520     switch (ArgInfo.getKind()) {
1521     case ABIArgInfo::Ignore:
1522     case ABIArgInfo::InAlloca:
1523       assert(NumIRArgs == 0);
1524       break;
1525 
1526     case ABIArgInfo::Indirect: {
1527       assert(NumIRArgs == 1);
1528       // indirect arguments are always on the stack, which is addr space #0.
1529       llvm::Type *LTy = ConvertTypeForMem(it->type);
1530       ArgTypes[FirstIRArg] = LTy->getPointerTo();
1531       break;
1532     }
1533 
1534     case ABIArgInfo::Extend:
1535     case ABIArgInfo::Direct: {
1536       // Fast-isel and the optimizer generally like scalar values better than
1537       // FCAs, so we flatten them if this is safe to do for this argument.
1538       llvm::Type *argType = ArgInfo.getCoerceToType();
1539       llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1540       if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
1541         assert(NumIRArgs == st->getNumElements());
1542         for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1543           ArgTypes[FirstIRArg + i] = st->getElementType(i);
1544       } else {
1545         assert(NumIRArgs == 1);
1546         ArgTypes[FirstIRArg] = argType;
1547       }
1548       break;
1549     }
1550 
1551     case ABIArgInfo::CoerceAndExpand: {
1552       auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1553       for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) {
1554         *ArgTypesIter++ = EltTy;
1555       }
1556       assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1557       break;
1558     }
1559 
1560     case ABIArgInfo::Expand:
1561       auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1562       getExpandedTypes(it->type, ArgTypesIter);
1563       assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1564       break;
1565     }
1566   }
1567 
1568   bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1569   assert(Erased && "Not in set?");
1570 
1571   return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
1572 }
1573 
1574 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
1575   const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1576   const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
1577 
1578   if (!isFuncTypeConvertible(FPT))
1579     return llvm::StructType::get(getLLVMContext());
1580 
1581   const CGFunctionInfo *Info;
1582   if (isa<CXXDestructorDecl>(MD))
1583     Info =
1584         &arrangeCXXStructorDeclaration(MD, getFromDtorType(GD.getDtorType()));
1585   else
1586     Info = &arrangeCXXMethodDeclaration(MD);
1587   return GetFunctionType(*Info);
1588 }
1589 
1590 static void AddAttributesFromFunctionProtoType(ASTContext &Ctx,
1591                                                llvm::AttrBuilder &FuncAttrs,
1592                                                const FunctionProtoType *FPT) {
1593   if (!FPT)
1594     return;
1595 
1596   if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) &&
1597       FPT->isNothrow(Ctx))
1598     FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1599 }
1600 
1601 void CodeGenModule::ConstructAttributeList(
1602     StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo,
1603     AttributeListType &PAL, unsigned &CallingConv, bool AttrOnCallSite) {
1604   llvm::AttrBuilder FuncAttrs;
1605   llvm::AttrBuilder RetAttrs;
1606   bool HasOptnone = false;
1607 
1608   CallingConv = FI.getEffectiveCallingConvention();
1609 
1610   if (FI.isNoReturn())
1611     FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1612 
1613   // If we have information about the function prototype, we can learn
1614   // attributes form there.
1615   AddAttributesFromFunctionProtoType(getContext(), FuncAttrs,
1616                                      CalleeInfo.getCalleeFunctionProtoType());
1617 
1618   const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
1619 
1620   bool HasAnyX86InterruptAttr = false;
1621   // FIXME: handle sseregparm someday...
1622   if (TargetDecl) {
1623     if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
1624       FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
1625     if (TargetDecl->hasAttr<NoThrowAttr>())
1626       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1627     if (TargetDecl->hasAttr<NoReturnAttr>())
1628       FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1629     if (TargetDecl->hasAttr<NoDuplicateAttr>())
1630       FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
1631 
1632     if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1633       AddAttributesFromFunctionProtoType(
1634           getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
1635       // Don't use [[noreturn]] or _Noreturn for a call to a virtual function.
1636       // These attributes are not inherited by overloads.
1637       const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
1638       if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual()))
1639         FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1640     }
1641 
1642     // 'const', 'pure' and 'noalias' attributed functions are also nounwind.
1643     if (TargetDecl->hasAttr<ConstAttr>()) {
1644       FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
1645       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1646     } else if (TargetDecl->hasAttr<PureAttr>()) {
1647       FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
1648       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1649     } else if (TargetDecl->hasAttr<NoAliasAttr>()) {
1650       FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly);
1651       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1652     }
1653     if (TargetDecl->hasAttr<RestrictAttr>())
1654       RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1655     if (TargetDecl->hasAttr<ReturnsNonNullAttr>())
1656       RetAttrs.addAttribute(llvm::Attribute::NonNull);
1657 
1658     HasAnyX86InterruptAttr = TargetDecl->hasAttr<AnyX86InterruptAttr>();
1659     HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
1660   }
1661 
1662   // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
1663   if (!HasOptnone) {
1664     if (CodeGenOpts.OptimizeSize)
1665       FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1666     if (CodeGenOpts.OptimizeSize == 2)
1667       FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1668   }
1669 
1670   if (CodeGenOpts.DisableRedZone)
1671     FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1672   if (CodeGenOpts.NoImplicitFloat)
1673     FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1674   if (CodeGenOpts.EnableSegmentedStacks &&
1675       !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>()))
1676     FuncAttrs.addAttribute("split-stack");
1677 
1678   if (AttrOnCallSite) {
1679     // Attributes that should go on the call site only.
1680     if (!CodeGenOpts.SimplifyLibCalls ||
1681         CodeGenOpts.isNoBuiltinFunc(Name.data()))
1682       FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1683     if (!CodeGenOpts.TrapFuncName.empty())
1684       FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
1685   } else {
1686     // Attributes that should go on the function, but not the call site.
1687     if (!CodeGenOpts.DisableFPElim) {
1688       FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1689     } else if (CodeGenOpts.OmitLeafFramePointer) {
1690       FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1691       FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1692     } else {
1693       FuncAttrs.addAttribute("no-frame-pointer-elim", "true");
1694       FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1695     }
1696 
1697     bool DisableTailCalls =
1698         CodeGenOpts.DisableTailCalls || HasAnyX86InterruptAttr ||
1699         (TargetDecl && TargetDecl->hasAttr<DisableTailCallsAttr>());
1700     FuncAttrs.addAttribute(
1701         "disable-tail-calls",
1702         llvm::toStringRef(DisableTailCalls));
1703 
1704     FuncAttrs.addAttribute("less-precise-fpmad",
1705                            llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
1706     FuncAttrs.addAttribute("no-infs-fp-math",
1707                            llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
1708     FuncAttrs.addAttribute("no-nans-fp-math",
1709                            llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
1710     FuncAttrs.addAttribute("unsafe-fp-math",
1711                            llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
1712     FuncAttrs.addAttribute("use-soft-float",
1713                            llvm::toStringRef(CodeGenOpts.SoftFloat));
1714     FuncAttrs.addAttribute("stack-protector-buffer-size",
1715                            llvm::utostr(CodeGenOpts.SSPBufferSize));
1716 
1717     if (CodeGenOpts.StackRealignment)
1718       FuncAttrs.addAttribute("stackrealign");
1719     if (CodeGenOpts.Backchain)
1720       FuncAttrs.addAttribute("backchain");
1721 
1722     // Add target-cpu and target-features attributes to functions. If
1723     // we have a decl for the function and it has a target attribute then
1724     // parse that and add it to the feature set.
1725     StringRef TargetCPU = getTarget().getTargetOpts().CPU;
1726     const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl);
1727     if (FD && FD->hasAttr<TargetAttr>()) {
1728       llvm::StringMap<bool> FeatureMap;
1729       getFunctionFeatureMap(FeatureMap, FD);
1730 
1731       // Produce the canonical string for this set of features.
1732       std::vector<std::string> Features;
1733       for (llvm::StringMap<bool>::const_iterator it = FeatureMap.begin(),
1734                                                  ie = FeatureMap.end();
1735            it != ie; ++it)
1736         Features.push_back((it->second ? "+" : "-") + it->first().str());
1737 
1738       // Now add the target-cpu and target-features to the function.
1739       // While we populated the feature map above, we still need to
1740       // get and parse the target attribute so we can get the cpu for
1741       // the function.
1742       const auto *TD = FD->getAttr<TargetAttr>();
1743       TargetAttr::ParsedTargetAttr ParsedAttr = TD->parse();
1744       if (ParsedAttr.second != "")
1745         TargetCPU = ParsedAttr.second;
1746       if (TargetCPU != "")
1747         FuncAttrs.addAttribute("target-cpu", TargetCPU);
1748       if (!Features.empty()) {
1749         std::sort(Features.begin(), Features.end());
1750         FuncAttrs.addAttribute(
1751             "target-features",
1752             llvm::join(Features.begin(), Features.end(), ","));
1753       }
1754     } else {
1755       // Otherwise just add the existing target cpu and target features to the
1756       // function.
1757       std::vector<std::string> &Features = getTarget().getTargetOpts().Features;
1758       if (TargetCPU != "")
1759         FuncAttrs.addAttribute("target-cpu", TargetCPU);
1760       if (!Features.empty()) {
1761         std::sort(Features.begin(), Features.end());
1762         FuncAttrs.addAttribute(
1763             "target-features",
1764             llvm::join(Features.begin(), Features.end(), ","));
1765       }
1766     }
1767   }
1768 
1769   if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) {
1770     // Conservatively, mark all functions and calls in CUDA as convergent
1771     // (meaning, they may call an intrinsically convergent op, such as
1772     // __syncthreads(), and so can't have certain optimizations applied around
1773     // them).  LLVM will remove this attribute where it safely can.
1774     FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1775 
1776     // Respect -fcuda-flush-denormals-to-zero.
1777     if (getLangOpts().CUDADeviceFlushDenormalsToZero)
1778       FuncAttrs.addAttribute("nvptx-f32ftz", "true");
1779   }
1780 
1781   ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
1782 
1783   QualType RetTy = FI.getReturnType();
1784   const ABIArgInfo &RetAI = FI.getReturnInfo();
1785   switch (RetAI.getKind()) {
1786   case ABIArgInfo::Extend:
1787     if (RetTy->hasSignedIntegerRepresentation())
1788       RetAttrs.addAttribute(llvm::Attribute::SExt);
1789     else if (RetTy->hasUnsignedIntegerRepresentation())
1790       RetAttrs.addAttribute(llvm::Attribute::ZExt);
1791     // FALL THROUGH
1792   case ABIArgInfo::Direct:
1793     if (RetAI.getInReg())
1794       RetAttrs.addAttribute(llvm::Attribute::InReg);
1795     break;
1796   case ABIArgInfo::Ignore:
1797     break;
1798 
1799   case ABIArgInfo::InAlloca:
1800   case ABIArgInfo::Indirect: {
1801     // inalloca and sret disable readnone and readonly
1802     FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1803       .removeAttribute(llvm::Attribute::ReadNone);
1804     break;
1805   }
1806 
1807   case ABIArgInfo::CoerceAndExpand:
1808     break;
1809 
1810   case ABIArgInfo::Expand:
1811     llvm_unreachable("Invalid ABI kind for return argument");
1812   }
1813 
1814   if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
1815     QualType PTy = RefTy->getPointeeType();
1816     if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
1817       RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
1818                                         .getQuantity());
1819     else if (getContext().getTargetAddressSpace(PTy) == 0)
1820       RetAttrs.addAttribute(llvm::Attribute::NonNull);
1821   }
1822 
1823   // Attach return attributes.
1824   if (RetAttrs.hasAttributes()) {
1825     PAL.push_back(llvm::AttributeSet::get(
1826         getLLVMContext(), llvm::AttributeSet::ReturnIndex, RetAttrs));
1827   }
1828 
1829   bool hasUsedSRet = false;
1830 
1831   // Attach attributes to sret.
1832   if (IRFunctionArgs.hasSRetArg()) {
1833     llvm::AttrBuilder SRETAttrs;
1834     SRETAttrs.addAttribute(llvm::Attribute::StructRet);
1835     hasUsedSRet = true;
1836     if (RetAI.getInReg())
1837       SRETAttrs.addAttribute(llvm::Attribute::InReg);
1838     PAL.push_back(llvm::AttributeSet::get(
1839         getLLVMContext(), IRFunctionArgs.getSRetArgNo() + 1, SRETAttrs));
1840   }
1841 
1842   // Attach attributes to inalloca argument.
1843   if (IRFunctionArgs.hasInallocaArg()) {
1844     llvm::AttrBuilder Attrs;
1845     Attrs.addAttribute(llvm::Attribute::InAlloca);
1846     PAL.push_back(llvm::AttributeSet::get(
1847         getLLVMContext(), IRFunctionArgs.getInallocaArgNo() + 1, Attrs));
1848   }
1849 
1850   unsigned ArgNo = 0;
1851   for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(),
1852                                           E = FI.arg_end();
1853        I != E; ++I, ++ArgNo) {
1854     QualType ParamType = I->type;
1855     const ABIArgInfo &AI = I->info;
1856     llvm::AttrBuilder Attrs;
1857 
1858     // Add attribute for padding argument, if necessary.
1859     if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
1860       if (AI.getPaddingInReg())
1861         PAL.push_back(llvm::AttributeSet::get(
1862             getLLVMContext(), IRFunctionArgs.getPaddingArgNo(ArgNo) + 1,
1863             llvm::Attribute::InReg));
1864     }
1865 
1866     // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
1867     // have the corresponding parameter variable.  It doesn't make
1868     // sense to do it here because parameters are so messed up.
1869     switch (AI.getKind()) {
1870     case ABIArgInfo::Extend:
1871       if (ParamType->isSignedIntegerOrEnumerationType())
1872         Attrs.addAttribute(llvm::Attribute::SExt);
1873       else if (ParamType->isUnsignedIntegerOrEnumerationType()) {
1874         if (getTypes().getABIInfo().shouldSignExtUnsignedType(ParamType))
1875           Attrs.addAttribute(llvm::Attribute::SExt);
1876         else
1877           Attrs.addAttribute(llvm::Attribute::ZExt);
1878       }
1879       // FALL THROUGH
1880     case ABIArgInfo::Direct:
1881       if (ArgNo == 0 && FI.isChainCall())
1882         Attrs.addAttribute(llvm::Attribute::Nest);
1883       else if (AI.getInReg())
1884         Attrs.addAttribute(llvm::Attribute::InReg);
1885       break;
1886 
1887     case ABIArgInfo::Indirect: {
1888       if (AI.getInReg())
1889         Attrs.addAttribute(llvm::Attribute::InReg);
1890 
1891       if (AI.getIndirectByVal())
1892         Attrs.addAttribute(llvm::Attribute::ByVal);
1893 
1894       CharUnits Align = AI.getIndirectAlign();
1895 
1896       // In a byval argument, it is important that the required
1897       // alignment of the type is honored, as LLVM might be creating a
1898       // *new* stack object, and needs to know what alignment to give
1899       // it. (Sometimes it can deduce a sensible alignment on its own,
1900       // but not if clang decides it must emit a packed struct, or the
1901       // user specifies increased alignment requirements.)
1902       //
1903       // This is different from indirect *not* byval, where the object
1904       // exists already, and the align attribute is purely
1905       // informative.
1906       assert(!Align.isZero());
1907 
1908       // For now, only add this when we have a byval argument.
1909       // TODO: be less lazy about updating test cases.
1910       if (AI.getIndirectByVal())
1911         Attrs.addAlignmentAttr(Align.getQuantity());
1912 
1913       // byval disables readnone and readonly.
1914       FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1915         .removeAttribute(llvm::Attribute::ReadNone);
1916       break;
1917     }
1918     case ABIArgInfo::Ignore:
1919     case ABIArgInfo::Expand:
1920     case ABIArgInfo::CoerceAndExpand:
1921       break;
1922 
1923     case ABIArgInfo::InAlloca:
1924       // inalloca disables readnone and readonly.
1925       FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1926           .removeAttribute(llvm::Attribute::ReadNone);
1927       continue;
1928     }
1929 
1930     if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
1931       QualType PTy = RefTy->getPointeeType();
1932       if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
1933         Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
1934                                        .getQuantity());
1935       else if (getContext().getTargetAddressSpace(PTy) == 0)
1936         Attrs.addAttribute(llvm::Attribute::NonNull);
1937     }
1938 
1939     switch (FI.getExtParameterInfo(ArgNo).getABI()) {
1940     case ParameterABI::Ordinary:
1941       break;
1942 
1943     case ParameterABI::SwiftIndirectResult: {
1944       // Add 'sret' if we haven't already used it for something, but
1945       // only if the result is void.
1946       if (!hasUsedSRet && RetTy->isVoidType()) {
1947         Attrs.addAttribute(llvm::Attribute::StructRet);
1948         hasUsedSRet = true;
1949       }
1950 
1951       // Add 'noalias' in either case.
1952       Attrs.addAttribute(llvm::Attribute::NoAlias);
1953 
1954       // Add 'dereferenceable' and 'alignment'.
1955       auto PTy = ParamType->getPointeeType();
1956       if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
1957         auto info = getContext().getTypeInfoInChars(PTy);
1958         Attrs.addDereferenceableAttr(info.first.getQuantity());
1959         Attrs.addAttribute(llvm::Attribute::getWithAlignment(getLLVMContext(),
1960                                                  info.second.getQuantity()));
1961       }
1962       break;
1963     }
1964 
1965     case ParameterABI::SwiftErrorResult:
1966       Attrs.addAttribute(llvm::Attribute::SwiftError);
1967       break;
1968 
1969     case ParameterABI::SwiftContext:
1970       Attrs.addAttribute(llvm::Attribute::SwiftSelf);
1971       break;
1972     }
1973 
1974     if (Attrs.hasAttributes()) {
1975       unsigned FirstIRArg, NumIRArgs;
1976       std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1977       for (unsigned i = 0; i < NumIRArgs; i++)
1978         PAL.push_back(llvm::AttributeSet::get(getLLVMContext(),
1979                                               FirstIRArg + i + 1, Attrs));
1980     }
1981   }
1982   assert(ArgNo == FI.arg_size());
1983 
1984   if (FuncAttrs.hasAttributes())
1985     PAL.push_back(llvm::
1986                   AttributeSet::get(getLLVMContext(),
1987                                     llvm::AttributeSet::FunctionIndex,
1988                                     FuncAttrs));
1989 }
1990 
1991 /// An argument came in as a promoted argument; demote it back to its
1992 /// declared type.
1993 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
1994                                          const VarDecl *var,
1995                                          llvm::Value *value) {
1996   llvm::Type *varType = CGF.ConvertType(var->getType());
1997 
1998   // This can happen with promotions that actually don't change the
1999   // underlying type, like the enum promotions.
2000   if (value->getType() == varType) return value;
2001 
2002   assert((varType->isIntegerTy() || varType->isFloatingPointTy())
2003          && "unexpected promotion type");
2004 
2005   if (isa<llvm::IntegerType>(varType))
2006     return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
2007 
2008   return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
2009 }
2010 
2011 /// Returns the attribute (either parameter attribute, or function
2012 /// attribute), which declares argument ArgNo to be non-null.
2013 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
2014                                          QualType ArgType, unsigned ArgNo) {
2015   // FIXME: __attribute__((nonnull)) can also be applied to:
2016   //   - references to pointers, where the pointee is known to be
2017   //     nonnull (apparently a Clang extension)
2018   //   - transparent unions containing pointers
2019   // In the former case, LLVM IR cannot represent the constraint. In
2020   // the latter case, we have no guarantee that the transparent union
2021   // is in fact passed as a pointer.
2022   if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
2023     return nullptr;
2024   // First, check attribute on parameter itself.
2025   if (PVD) {
2026     if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
2027       return ParmNNAttr;
2028   }
2029   // Check function attributes.
2030   if (!FD)
2031     return nullptr;
2032   for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
2033     if (NNAttr->isNonNull(ArgNo))
2034       return NNAttr;
2035   }
2036   return nullptr;
2037 }
2038 
2039 namespace {
2040   struct CopyBackSwiftError final : EHScopeStack::Cleanup {
2041     Address Temp;
2042     Address Arg;
2043     CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {}
2044     void Emit(CodeGenFunction &CGF, Flags flags) override {
2045       llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp);
2046       CGF.Builder.CreateStore(errorValue, Arg);
2047     }
2048   };
2049 }
2050 
2051 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
2052                                          llvm::Function *Fn,
2053                                          const FunctionArgList &Args) {
2054   if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
2055     // Naked functions don't have prologues.
2056     return;
2057 
2058   // If this is an implicit-return-zero function, go ahead and
2059   // initialize the return value.  TODO: it might be nice to have
2060   // a more general mechanism for this that didn't require synthesized
2061   // return statements.
2062   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
2063     if (FD->hasImplicitReturnZero()) {
2064       QualType RetTy = FD->getReturnType().getUnqualifiedType();
2065       llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
2066       llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
2067       Builder.CreateStore(Zero, ReturnValue);
2068     }
2069   }
2070 
2071   // FIXME: We no longer need the types from FunctionArgList; lift up and
2072   // simplify.
2073 
2074   ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
2075   // Flattened function arguments.
2076   SmallVector<llvm::Value *, 16> FnArgs;
2077   FnArgs.reserve(IRFunctionArgs.totalIRArgs());
2078   for (auto &Arg : Fn->args()) {
2079     FnArgs.push_back(&Arg);
2080   }
2081   assert(FnArgs.size() == IRFunctionArgs.totalIRArgs());
2082 
2083   // If we're using inalloca, all the memory arguments are GEPs off of the last
2084   // parameter, which is a pointer to the complete memory area.
2085   Address ArgStruct = Address::invalid();
2086   const llvm::StructLayout *ArgStructLayout = nullptr;
2087   if (IRFunctionArgs.hasInallocaArg()) {
2088     ArgStructLayout = CGM.getDataLayout().getStructLayout(FI.getArgStruct());
2089     ArgStruct = Address(FnArgs[IRFunctionArgs.getInallocaArgNo()],
2090                         FI.getArgStructAlignment());
2091 
2092     assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo());
2093   }
2094 
2095   // Name the struct return parameter.
2096   if (IRFunctionArgs.hasSRetArg()) {
2097     auto AI = cast<llvm::Argument>(FnArgs[IRFunctionArgs.getSRetArgNo()]);
2098     AI->setName("agg.result");
2099     AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), AI->getArgNo() + 1,
2100                                         llvm::Attribute::NoAlias));
2101   }
2102 
2103   // Track if we received the parameter as a pointer (indirect, byval, or
2104   // inalloca).  If already have a pointer, EmitParmDecl doesn't need to copy it
2105   // into a local alloca for us.
2106   SmallVector<ParamValue, 16> ArgVals;
2107   ArgVals.reserve(Args.size());
2108 
2109   // Create a pointer value for every parameter declaration.  This usually
2110   // entails copying one or more LLVM IR arguments into an alloca.  Don't push
2111   // any cleanups or do anything that might unwind.  We do that separately, so
2112   // we can push the cleanups in the correct order for the ABI.
2113   assert(FI.arg_size() == Args.size() &&
2114          "Mismatch between function signature & arguments.");
2115   unsigned ArgNo = 0;
2116   CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
2117   for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
2118        i != e; ++i, ++info_it, ++ArgNo) {
2119     const VarDecl *Arg = *i;
2120     QualType Ty = info_it->type;
2121     const ABIArgInfo &ArgI = info_it->info;
2122 
2123     bool isPromoted =
2124       isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
2125 
2126     unsigned FirstIRArg, NumIRArgs;
2127     std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2128 
2129     switch (ArgI.getKind()) {
2130     case ABIArgInfo::InAlloca: {
2131       assert(NumIRArgs == 0);
2132       auto FieldIndex = ArgI.getInAllocaFieldIndex();
2133       CharUnits FieldOffset =
2134         CharUnits::fromQuantity(ArgStructLayout->getElementOffset(FieldIndex));
2135       Address V = Builder.CreateStructGEP(ArgStruct, FieldIndex, FieldOffset,
2136                                           Arg->getName());
2137       ArgVals.push_back(ParamValue::forIndirect(V));
2138       break;
2139     }
2140 
2141     case ABIArgInfo::Indirect: {
2142       assert(NumIRArgs == 1);
2143       Address ParamAddr = Address(FnArgs[FirstIRArg], ArgI.getIndirectAlign());
2144 
2145       if (!hasScalarEvaluationKind(Ty)) {
2146         // Aggregates and complex variables are accessed by reference.  All we
2147         // need to do is realign the value, if requested.
2148         Address V = ParamAddr;
2149         if (ArgI.getIndirectRealign()) {
2150           Address AlignedTemp = CreateMemTemp(Ty, "coerce");
2151 
2152           // Copy from the incoming argument pointer to the temporary with the
2153           // appropriate alignment.
2154           //
2155           // FIXME: We should have a common utility for generating an aggregate
2156           // copy.
2157           CharUnits Size = getContext().getTypeSizeInChars(Ty);
2158           auto SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity());
2159           Address Dst = Builder.CreateBitCast(AlignedTemp, Int8PtrTy);
2160           Address Src = Builder.CreateBitCast(ParamAddr, Int8PtrTy);
2161           Builder.CreateMemCpy(Dst, Src, SizeVal, false);
2162           V = AlignedTemp;
2163         }
2164         ArgVals.push_back(ParamValue::forIndirect(V));
2165       } else {
2166         // Load scalar value from indirect argument.
2167         llvm::Value *V =
2168           EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getLocStart());
2169 
2170         if (isPromoted)
2171           V = emitArgumentDemotion(*this, Arg, V);
2172         ArgVals.push_back(ParamValue::forDirect(V));
2173       }
2174       break;
2175     }
2176 
2177     case ABIArgInfo::Extend:
2178     case ABIArgInfo::Direct: {
2179 
2180       // If we have the trivial case, handle it with no muss and fuss.
2181       if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
2182           ArgI.getCoerceToType() == ConvertType(Ty) &&
2183           ArgI.getDirectOffset() == 0) {
2184         assert(NumIRArgs == 1);
2185         llvm::Value *V = FnArgs[FirstIRArg];
2186         auto AI = cast<llvm::Argument>(V);
2187 
2188         if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
2189           if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
2190                              PVD->getFunctionScopeIndex()))
2191             AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
2192                                                 AI->getArgNo() + 1,
2193                                                 llvm::Attribute::NonNull));
2194 
2195           QualType OTy = PVD->getOriginalType();
2196           if (const auto *ArrTy =
2197               getContext().getAsConstantArrayType(OTy)) {
2198             // A C99 array parameter declaration with the static keyword also
2199             // indicates dereferenceability, and if the size is constant we can
2200             // use the dereferenceable attribute (which requires the size in
2201             // bytes).
2202             if (ArrTy->getSizeModifier() == ArrayType::Static) {
2203               QualType ETy = ArrTy->getElementType();
2204               uint64_t ArrSize = ArrTy->getSize().getZExtValue();
2205               if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
2206                   ArrSize) {
2207                 llvm::AttrBuilder Attrs;
2208                 Attrs.addDereferenceableAttr(
2209                   getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize);
2210                 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
2211                                                     AI->getArgNo() + 1, Attrs));
2212               } else if (getContext().getTargetAddressSpace(ETy) == 0) {
2213                 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
2214                                                     AI->getArgNo() + 1,
2215                                                     llvm::Attribute::NonNull));
2216               }
2217             }
2218           } else if (const auto *ArrTy =
2219                      getContext().getAsVariableArrayType(OTy)) {
2220             // For C99 VLAs with the static keyword, we don't know the size so
2221             // we can't use the dereferenceable attribute, but in addrspace(0)
2222             // we know that it must be nonnull.
2223             if (ArrTy->getSizeModifier() == VariableArrayType::Static &&
2224                 !getContext().getTargetAddressSpace(ArrTy->getElementType()))
2225               AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
2226                                                   AI->getArgNo() + 1,
2227                                                   llvm::Attribute::NonNull));
2228           }
2229 
2230           const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
2231           if (!AVAttr)
2232             if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
2233               AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
2234           if (AVAttr) {
2235             llvm::Value *AlignmentValue =
2236               EmitScalarExpr(AVAttr->getAlignment());
2237             llvm::ConstantInt *AlignmentCI =
2238               cast<llvm::ConstantInt>(AlignmentValue);
2239             unsigned Alignment =
2240               std::min((unsigned) AlignmentCI->getZExtValue(),
2241                        +llvm::Value::MaximumAlignment);
2242 
2243             llvm::AttrBuilder Attrs;
2244             Attrs.addAlignmentAttr(Alignment);
2245             AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
2246                                                 AI->getArgNo() + 1, Attrs));
2247           }
2248         }
2249 
2250         if (Arg->getType().isRestrictQualified())
2251           AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
2252                                               AI->getArgNo() + 1,
2253                                               llvm::Attribute::NoAlias));
2254 
2255         // LLVM expects swifterror parameters to be used in very restricted
2256         // ways.  Copy the value into a less-restricted temporary.
2257         if (FI.getExtParameterInfo(ArgNo).getABI()
2258               == ParameterABI::SwiftErrorResult) {
2259           QualType pointeeTy = Ty->getPointeeType();
2260           assert(pointeeTy->isPointerType());
2261           Address temp =
2262             CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
2263           Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy));
2264           llvm::Value *incomingErrorValue = Builder.CreateLoad(arg);
2265           Builder.CreateStore(incomingErrorValue, temp);
2266           V = temp.getPointer();
2267 
2268           // Push a cleanup to copy the value back at the end of the function.
2269           // The convention does not guarantee that the value will be written
2270           // back if the function exits with an unwind exception.
2271           EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg);
2272         }
2273 
2274         // Ensure the argument is the correct type.
2275         if (V->getType() != ArgI.getCoerceToType())
2276           V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
2277 
2278         if (isPromoted)
2279           V = emitArgumentDemotion(*this, Arg, V);
2280 
2281         if (const CXXMethodDecl *MD =
2282             dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl)) {
2283           if (MD->isVirtual() && Arg == CXXABIThisDecl)
2284             V = CGM.getCXXABI().
2285                 adjustThisParameterInVirtualFunctionPrologue(*this, CurGD, V);
2286         }
2287 
2288         // Because of merging of function types from multiple decls it is
2289         // possible for the type of an argument to not match the corresponding
2290         // type in the function type. Since we are codegening the callee
2291         // in here, add a cast to the argument type.
2292         llvm::Type *LTy = ConvertType(Arg->getType());
2293         if (V->getType() != LTy)
2294           V = Builder.CreateBitCast(V, LTy);
2295 
2296         ArgVals.push_back(ParamValue::forDirect(V));
2297         break;
2298       }
2299 
2300       Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
2301                                      Arg->getName());
2302 
2303       // Pointer to store into.
2304       Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
2305 
2306       // Fast-isel and the optimizer generally like scalar values better than
2307       // FCAs, so we flatten them if this is safe to do for this argument.
2308       llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
2309       if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
2310           STy->getNumElements() > 1) {
2311         auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
2312         uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
2313         llvm::Type *DstTy = Ptr.getElementType();
2314         uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
2315 
2316         Address AddrToStoreInto = Address::invalid();
2317         if (SrcSize <= DstSize) {
2318           AddrToStoreInto =
2319             Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
2320         } else {
2321           AddrToStoreInto =
2322             CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
2323         }
2324 
2325         assert(STy->getNumElements() == NumIRArgs);
2326         for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2327           auto AI = FnArgs[FirstIRArg + i];
2328           AI->setName(Arg->getName() + ".coerce" + Twine(i));
2329           auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
2330           Address EltPtr =
2331             Builder.CreateStructGEP(AddrToStoreInto, i, Offset);
2332           Builder.CreateStore(AI, EltPtr);
2333         }
2334 
2335         if (SrcSize > DstSize) {
2336           Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
2337         }
2338 
2339       } else {
2340         // Simple case, just do a coerced store of the argument into the alloca.
2341         assert(NumIRArgs == 1);
2342         auto AI = FnArgs[FirstIRArg];
2343         AI->setName(Arg->getName() + ".coerce");
2344         CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, *this);
2345       }
2346 
2347       // Match to what EmitParmDecl is expecting for this type.
2348       if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
2349         llvm::Value *V =
2350           EmitLoadOfScalar(Alloca, false, Ty, Arg->getLocStart());
2351         if (isPromoted)
2352           V = emitArgumentDemotion(*this, Arg, V);
2353         ArgVals.push_back(ParamValue::forDirect(V));
2354       } else {
2355         ArgVals.push_back(ParamValue::forIndirect(Alloca));
2356       }
2357       break;
2358     }
2359 
2360     case ABIArgInfo::CoerceAndExpand: {
2361       // Reconstruct into a temporary.
2362       Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2363       ArgVals.push_back(ParamValue::forIndirect(alloca));
2364 
2365       auto coercionType = ArgI.getCoerceAndExpandType();
2366       alloca = Builder.CreateElementBitCast(alloca, coercionType);
2367       auto layout = CGM.getDataLayout().getStructLayout(coercionType);
2368 
2369       unsigned argIndex = FirstIRArg;
2370       for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2371         llvm::Type *eltType = coercionType->getElementType(i);
2372         if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType))
2373           continue;
2374 
2375         auto eltAddr = Builder.CreateStructGEP(alloca, i, layout);
2376         auto elt = FnArgs[argIndex++];
2377         Builder.CreateStore(elt, eltAddr);
2378       }
2379       assert(argIndex == FirstIRArg + NumIRArgs);
2380       break;
2381     }
2382 
2383     case ABIArgInfo::Expand: {
2384       // If this structure was expanded into multiple arguments then
2385       // we need to create a temporary and reconstruct it from the
2386       // arguments.
2387       Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2388       LValue LV = MakeAddrLValue(Alloca, Ty);
2389       ArgVals.push_back(ParamValue::forIndirect(Alloca));
2390 
2391       auto FnArgIter = FnArgs.begin() + FirstIRArg;
2392       ExpandTypeFromArgs(Ty, LV, FnArgIter);
2393       assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs);
2394       for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
2395         auto AI = FnArgs[FirstIRArg + i];
2396         AI->setName(Arg->getName() + "." + Twine(i));
2397       }
2398       break;
2399     }
2400 
2401     case ABIArgInfo::Ignore:
2402       assert(NumIRArgs == 0);
2403       // Initialize the local variable appropriately.
2404       if (!hasScalarEvaluationKind(Ty)) {
2405         ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty)));
2406       } else {
2407         llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
2408         ArgVals.push_back(ParamValue::forDirect(U));
2409       }
2410       break;
2411     }
2412   }
2413 
2414   if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2415     for (int I = Args.size() - 1; I >= 0; --I)
2416       EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2417   } else {
2418     for (unsigned I = 0, E = Args.size(); I != E; ++I)
2419       EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2420   }
2421 }
2422 
2423 static void eraseUnusedBitCasts(llvm::Instruction *insn) {
2424   while (insn->use_empty()) {
2425     llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
2426     if (!bitcast) return;
2427 
2428     // This is "safe" because we would have used a ConstantExpr otherwise.
2429     insn = cast<llvm::Instruction>(bitcast->getOperand(0));
2430     bitcast->eraseFromParent();
2431   }
2432 }
2433 
2434 /// Try to emit a fused autorelease of a return result.
2435 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
2436                                                     llvm::Value *result) {
2437   // We must be immediately followed the cast.
2438   llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
2439   if (BB->empty()) return nullptr;
2440   if (&BB->back() != result) return nullptr;
2441 
2442   llvm::Type *resultType = result->getType();
2443 
2444   // result is in a BasicBlock and is therefore an Instruction.
2445   llvm::Instruction *generator = cast<llvm::Instruction>(result);
2446 
2447   SmallVector<llvm::Instruction*,4> insnsToKill;
2448 
2449   // Look for:
2450   //  %generator = bitcast %type1* %generator2 to %type2*
2451   while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
2452     // We would have emitted this as a constant if the operand weren't
2453     // an Instruction.
2454     generator = cast<llvm::Instruction>(bitcast->getOperand(0));
2455 
2456     // Require the generator to be immediately followed by the cast.
2457     if (generator->getNextNode() != bitcast)
2458       return nullptr;
2459 
2460     insnsToKill.push_back(bitcast);
2461   }
2462 
2463   // Look for:
2464   //   %generator = call i8* @objc_retain(i8* %originalResult)
2465   // or
2466   //   %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
2467   llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
2468   if (!call) return nullptr;
2469 
2470   bool doRetainAutorelease;
2471 
2472   if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints().objc_retain) {
2473     doRetainAutorelease = true;
2474   } else if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints()
2475                                           .objc_retainAutoreleasedReturnValue) {
2476     doRetainAutorelease = false;
2477 
2478     // If we emitted an assembly marker for this call (and the
2479     // ARCEntrypoints field should have been set if so), go looking
2480     // for that call.  If we can't find it, we can't do this
2481     // optimization.  But it should always be the immediately previous
2482     // instruction, unless we needed bitcasts around the call.
2483     if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) {
2484       llvm::Instruction *prev = call->getPrevNode();
2485       assert(prev);
2486       if (isa<llvm::BitCastInst>(prev)) {
2487         prev = prev->getPrevNode();
2488         assert(prev);
2489       }
2490       assert(isa<llvm::CallInst>(prev));
2491       assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
2492                CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker);
2493       insnsToKill.push_back(prev);
2494     }
2495   } else {
2496     return nullptr;
2497   }
2498 
2499   result = call->getArgOperand(0);
2500   insnsToKill.push_back(call);
2501 
2502   // Keep killing bitcasts, for sanity.  Note that we no longer care
2503   // about precise ordering as long as there's exactly one use.
2504   while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
2505     if (!bitcast->hasOneUse()) break;
2506     insnsToKill.push_back(bitcast);
2507     result = bitcast->getOperand(0);
2508   }
2509 
2510   // Delete all the unnecessary instructions, from latest to earliest.
2511   for (SmallVectorImpl<llvm::Instruction*>::iterator
2512          i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i)
2513     (*i)->eraseFromParent();
2514 
2515   // Do the fused retain/autorelease if we were asked to.
2516   if (doRetainAutorelease)
2517     result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
2518 
2519   // Cast back to the result type.
2520   return CGF.Builder.CreateBitCast(result, resultType);
2521 }
2522 
2523 /// If this is a +1 of the value of an immutable 'self', remove it.
2524 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
2525                                           llvm::Value *result) {
2526   // This is only applicable to a method with an immutable 'self'.
2527   const ObjCMethodDecl *method =
2528     dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
2529   if (!method) return nullptr;
2530   const VarDecl *self = method->getSelfDecl();
2531   if (!self->getType().isConstQualified()) return nullptr;
2532 
2533   // Look for a retain call.
2534   llvm::CallInst *retainCall =
2535     dyn_cast<llvm::CallInst>(result->stripPointerCasts());
2536   if (!retainCall ||
2537       retainCall->getCalledValue() != CGF.CGM.getObjCEntrypoints().objc_retain)
2538     return nullptr;
2539 
2540   // Look for an ordinary load of 'self'.
2541   llvm::Value *retainedValue = retainCall->getArgOperand(0);
2542   llvm::LoadInst *load =
2543     dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
2544   if (!load || load->isAtomic() || load->isVolatile() ||
2545       load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
2546     return nullptr;
2547 
2548   // Okay!  Burn it all down.  This relies for correctness on the
2549   // assumption that the retain is emitted as part of the return and
2550   // that thereafter everything is used "linearly".
2551   llvm::Type *resultType = result->getType();
2552   eraseUnusedBitCasts(cast<llvm::Instruction>(result));
2553   assert(retainCall->use_empty());
2554   retainCall->eraseFromParent();
2555   eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
2556 
2557   return CGF.Builder.CreateBitCast(load, resultType);
2558 }
2559 
2560 /// Emit an ARC autorelease of the result of a function.
2561 ///
2562 /// \return the value to actually return from the function
2563 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
2564                                             llvm::Value *result) {
2565   // If we're returning 'self', kill the initial retain.  This is a
2566   // heuristic attempt to "encourage correctness" in the really unfortunate
2567   // case where we have a return of self during a dealloc and we desperately
2568   // need to avoid the possible autorelease.
2569   if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
2570     return self;
2571 
2572   // At -O0, try to emit a fused retain/autorelease.
2573   if (CGF.shouldUseFusedARCCalls())
2574     if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
2575       return fused;
2576 
2577   return CGF.EmitARCAutoreleaseReturnValue(result);
2578 }
2579 
2580 /// Heuristically search for a dominating store to the return-value slot.
2581 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
2582   // Check if a User is a store which pointerOperand is the ReturnValue.
2583   // We are looking for stores to the ReturnValue, not for stores of the
2584   // ReturnValue to some other location.
2585   auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
2586     auto *SI = dyn_cast<llvm::StoreInst>(U);
2587     if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer())
2588       return nullptr;
2589     // These aren't actually possible for non-coerced returns, and we
2590     // only care about non-coerced returns on this code path.
2591     assert(!SI->isAtomic() && !SI->isVolatile());
2592     return SI;
2593   };
2594   // If there are multiple uses of the return-value slot, just check
2595   // for something immediately preceding the IP.  Sometimes this can
2596   // happen with how we generate implicit-returns; it can also happen
2597   // with noreturn cleanups.
2598   if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
2599     llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2600     if (IP->empty()) return nullptr;
2601     llvm::Instruction *I = &IP->back();
2602 
2603     // Skip lifetime markers
2604     for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(),
2605                                             IE = IP->rend();
2606          II != IE; ++II) {
2607       if (llvm::IntrinsicInst *Intrinsic =
2608               dyn_cast<llvm::IntrinsicInst>(&*II)) {
2609         if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) {
2610           const llvm::Value *CastAddr = Intrinsic->getArgOperand(1);
2611           ++II;
2612           if (II == IE)
2613             break;
2614           if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II))
2615             continue;
2616         }
2617       }
2618       I = &*II;
2619       break;
2620     }
2621 
2622     return GetStoreIfValid(I);
2623   }
2624 
2625   llvm::StoreInst *store =
2626       GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
2627   if (!store) return nullptr;
2628 
2629   // Now do a first-and-dirty dominance check: just walk up the
2630   // single-predecessors chain from the current insertion point.
2631   llvm::BasicBlock *StoreBB = store->getParent();
2632   llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2633   while (IP != StoreBB) {
2634     if (!(IP = IP->getSinglePredecessor()))
2635       return nullptr;
2636   }
2637 
2638   // Okay, the store's basic block dominates the insertion point; we
2639   // can do our thing.
2640   return store;
2641 }
2642 
2643 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
2644                                          bool EmitRetDbgLoc,
2645                                          SourceLocation EndLoc) {
2646   if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
2647     // Naked functions don't have epilogues.
2648     Builder.CreateUnreachable();
2649     return;
2650   }
2651 
2652   // Functions with no result always return void.
2653   if (!ReturnValue.isValid()) {
2654     Builder.CreateRetVoid();
2655     return;
2656   }
2657 
2658   llvm::DebugLoc RetDbgLoc;
2659   llvm::Value *RV = nullptr;
2660   QualType RetTy = FI.getReturnType();
2661   const ABIArgInfo &RetAI = FI.getReturnInfo();
2662 
2663   switch (RetAI.getKind()) {
2664   case ABIArgInfo::InAlloca:
2665     // Aggregrates get evaluated directly into the destination.  Sometimes we
2666     // need to return the sret value in a register, though.
2667     assert(hasAggregateEvaluationKind(RetTy));
2668     if (RetAI.getInAllocaSRet()) {
2669       llvm::Function::arg_iterator EI = CurFn->arg_end();
2670       --EI;
2671       llvm::Value *ArgStruct = &*EI;
2672       llvm::Value *SRet = Builder.CreateStructGEP(
2673           nullptr, ArgStruct, RetAI.getInAllocaFieldIndex());
2674       RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret");
2675     }
2676     break;
2677 
2678   case ABIArgInfo::Indirect: {
2679     auto AI = CurFn->arg_begin();
2680     if (RetAI.isSRetAfterThis())
2681       ++AI;
2682     switch (getEvaluationKind(RetTy)) {
2683     case TEK_Complex: {
2684       ComplexPairTy RT =
2685         EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
2686       EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy),
2687                          /*isInit*/ true);
2688       break;
2689     }
2690     case TEK_Aggregate:
2691       // Do nothing; aggregrates get evaluated directly into the destination.
2692       break;
2693     case TEK_Scalar:
2694       EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
2695                         MakeNaturalAlignAddrLValue(&*AI, RetTy),
2696                         /*isInit*/ true);
2697       break;
2698     }
2699     break;
2700   }
2701 
2702   case ABIArgInfo::Extend:
2703   case ABIArgInfo::Direct:
2704     if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
2705         RetAI.getDirectOffset() == 0) {
2706       // The internal return value temp always will have pointer-to-return-type
2707       // type, just do a load.
2708 
2709       // If there is a dominating store to ReturnValue, we can elide
2710       // the load, zap the store, and usually zap the alloca.
2711       if (llvm::StoreInst *SI =
2712               findDominatingStoreToReturnValue(*this)) {
2713         // Reuse the debug location from the store unless there is
2714         // cleanup code to be emitted between the store and return
2715         // instruction.
2716         if (EmitRetDbgLoc && !AutoreleaseResult)
2717           RetDbgLoc = SI->getDebugLoc();
2718         // Get the stored value and nuke the now-dead store.
2719         RV = SI->getValueOperand();
2720         SI->eraseFromParent();
2721 
2722         // If that was the only use of the return value, nuke it as well now.
2723         auto returnValueInst = ReturnValue.getPointer();
2724         if (returnValueInst->use_empty()) {
2725           if (auto alloca = dyn_cast<llvm::AllocaInst>(returnValueInst)) {
2726             alloca->eraseFromParent();
2727             ReturnValue = Address::invalid();
2728           }
2729         }
2730 
2731       // Otherwise, we have to do a simple load.
2732       } else {
2733         RV = Builder.CreateLoad(ReturnValue);
2734       }
2735     } else {
2736       // If the value is offset in memory, apply the offset now.
2737       Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
2738 
2739       RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
2740     }
2741 
2742     // In ARC, end functions that return a retainable type with a call
2743     // to objc_autoreleaseReturnValue.
2744     if (AutoreleaseResult) {
2745 #ifndef NDEBUG
2746       // Type::isObjCRetainabletype has to be called on a QualType that hasn't
2747       // been stripped of the typedefs, so we cannot use RetTy here. Get the
2748       // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from
2749       // CurCodeDecl or BlockInfo.
2750       QualType RT;
2751 
2752       if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl))
2753         RT = FD->getReturnType();
2754       else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl))
2755         RT = MD->getReturnType();
2756       else if (isa<BlockDecl>(CurCodeDecl))
2757         RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType();
2758       else
2759         llvm_unreachable("Unexpected function/method type");
2760 
2761       assert(getLangOpts().ObjCAutoRefCount &&
2762              !FI.isReturnsRetained() &&
2763              RT->isObjCRetainableType());
2764 #endif
2765       RV = emitAutoreleaseOfResult(*this, RV);
2766     }
2767 
2768     break;
2769 
2770   case ABIArgInfo::Ignore:
2771     break;
2772 
2773   case ABIArgInfo::CoerceAndExpand: {
2774     auto coercionType = RetAI.getCoerceAndExpandType();
2775     auto layout = CGM.getDataLayout().getStructLayout(coercionType);
2776 
2777     // Load all of the coerced elements out into results.
2778     llvm::SmallVector<llvm::Value*, 4> results;
2779     Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType);
2780     for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2781       auto coercedEltType = coercionType->getElementType(i);
2782       if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType))
2783         continue;
2784 
2785       auto eltAddr = Builder.CreateStructGEP(addr, i, layout);
2786       auto elt = Builder.CreateLoad(eltAddr);
2787       results.push_back(elt);
2788     }
2789 
2790     // If we have one result, it's the single direct result type.
2791     if (results.size() == 1) {
2792       RV = results[0];
2793 
2794     // Otherwise, we need to make a first-class aggregate.
2795     } else {
2796       // Construct a return type that lacks padding elements.
2797       llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType();
2798 
2799       RV = llvm::UndefValue::get(returnType);
2800       for (unsigned i = 0, e = results.size(); i != e; ++i) {
2801         RV = Builder.CreateInsertValue(RV, results[i], i);
2802       }
2803     }
2804     break;
2805   }
2806 
2807   case ABIArgInfo::Expand:
2808     llvm_unreachable("Invalid ABI kind for return argument");
2809   }
2810 
2811   llvm::Instruction *Ret;
2812   if (RV) {
2813     if (CurCodeDecl && SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)) {
2814       if (auto RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>()) {
2815         SanitizerScope SanScope(this);
2816         llvm::Value *Cond = Builder.CreateICmpNE(
2817             RV, llvm::Constant::getNullValue(RV->getType()));
2818         llvm::Constant *StaticData[] = {
2819             EmitCheckSourceLocation(EndLoc),
2820             EmitCheckSourceLocation(RetNNAttr->getLocation()),
2821         };
2822         EmitCheck(std::make_pair(Cond, SanitizerKind::ReturnsNonnullAttribute),
2823                   "nonnull_return", StaticData, None);
2824       }
2825     }
2826     Ret = Builder.CreateRet(RV);
2827   } else {
2828     Ret = Builder.CreateRetVoid();
2829   }
2830 
2831   if (RetDbgLoc)
2832     Ret->setDebugLoc(std::move(RetDbgLoc));
2833 }
2834 
2835 static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) {
2836   const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
2837   return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
2838 }
2839 
2840 static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF,
2841                                           QualType Ty) {
2842   // FIXME: Generate IR in one pass, rather than going back and fixing up these
2843   // placeholders.
2844   llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
2845   llvm::Value *Placeholder =
2846     llvm::UndefValue::get(IRTy->getPointerTo()->getPointerTo());
2847   Placeholder = CGF.Builder.CreateDefaultAlignedLoad(Placeholder);
2848 
2849   // FIXME: When we generate this IR in one pass, we shouldn't need
2850   // this win32-specific alignment hack.
2851   CharUnits Align = CharUnits::fromQuantity(4);
2852 
2853   return AggValueSlot::forAddr(Address(Placeholder, Align),
2854                                Ty.getQualifiers(),
2855                                AggValueSlot::IsNotDestructed,
2856                                AggValueSlot::DoesNotNeedGCBarriers,
2857                                AggValueSlot::IsNotAliased);
2858 }
2859 
2860 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
2861                                           const VarDecl *param,
2862                                           SourceLocation loc) {
2863   // StartFunction converted the ABI-lowered parameter(s) into a
2864   // local alloca.  We need to turn that into an r-value suitable
2865   // for EmitCall.
2866   Address local = GetAddrOfLocalVar(param);
2867 
2868   QualType type = param->getType();
2869 
2870   assert(!isInAllocaArgument(CGM.getCXXABI(), type) &&
2871          "cannot emit delegate call arguments for inalloca arguments!");
2872 
2873   // For the most part, we just need to load the alloca, except that
2874   // aggregate r-values are actually pointers to temporaries.
2875   if (type->isReferenceType())
2876     args.add(RValue::get(Builder.CreateLoad(local)), type);
2877   else
2878     args.add(convertTempToRValue(local, type, loc), type);
2879 }
2880 
2881 static bool isProvablyNull(llvm::Value *addr) {
2882   return isa<llvm::ConstantPointerNull>(addr);
2883 }
2884 
2885 static bool isProvablyNonNull(llvm::Value *addr) {
2886   return isa<llvm::AllocaInst>(addr);
2887 }
2888 
2889 /// Emit the actual writing-back of a writeback.
2890 static void emitWriteback(CodeGenFunction &CGF,
2891                           const CallArgList::Writeback &writeback) {
2892   const LValue &srcLV = writeback.Source;
2893   Address srcAddr = srcLV.getAddress();
2894   assert(!isProvablyNull(srcAddr.getPointer()) &&
2895          "shouldn't have writeback for provably null argument");
2896 
2897   llvm::BasicBlock *contBB = nullptr;
2898 
2899   // If the argument wasn't provably non-null, we need to null check
2900   // before doing the store.
2901   bool provablyNonNull = isProvablyNonNull(srcAddr.getPointer());
2902   if (!provablyNonNull) {
2903     llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
2904     contBB = CGF.createBasicBlock("icr.done");
2905 
2906     llvm::Value *isNull =
2907       CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
2908     CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
2909     CGF.EmitBlock(writebackBB);
2910   }
2911 
2912   // Load the value to writeback.
2913   llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
2914 
2915   // Cast it back, in case we're writing an id to a Foo* or something.
2916   value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(),
2917                                     "icr.writeback-cast");
2918 
2919   // Perform the writeback.
2920 
2921   // If we have a "to use" value, it's something we need to emit a use
2922   // of.  This has to be carefully threaded in: if it's done after the
2923   // release it's potentially undefined behavior (and the optimizer
2924   // will ignore it), and if it happens before the retain then the
2925   // optimizer could move the release there.
2926   if (writeback.ToUse) {
2927     assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
2928 
2929     // Retain the new value.  No need to block-copy here:  the block's
2930     // being passed up the stack.
2931     value = CGF.EmitARCRetainNonBlock(value);
2932 
2933     // Emit the intrinsic use here.
2934     CGF.EmitARCIntrinsicUse(writeback.ToUse);
2935 
2936     // Load the old value (primitively).
2937     llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
2938 
2939     // Put the new value in place (primitively).
2940     CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
2941 
2942     // Release the old value.
2943     CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
2944 
2945   // Otherwise, we can just do a normal lvalue store.
2946   } else {
2947     CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
2948   }
2949 
2950   // Jump to the continuation block.
2951   if (!provablyNonNull)
2952     CGF.EmitBlock(contBB);
2953 }
2954 
2955 static void emitWritebacks(CodeGenFunction &CGF,
2956                            const CallArgList &args) {
2957   for (const auto &I : args.writebacks())
2958     emitWriteback(CGF, I);
2959 }
2960 
2961 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
2962                                             const CallArgList &CallArgs) {
2963   assert(CGF.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee());
2964   ArrayRef<CallArgList::CallArgCleanup> Cleanups =
2965     CallArgs.getCleanupsToDeactivate();
2966   // Iterate in reverse to increase the likelihood of popping the cleanup.
2967   for (const auto &I : llvm::reverse(Cleanups)) {
2968     CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP);
2969     I.IsActiveIP->eraseFromParent();
2970   }
2971 }
2972 
2973 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
2974   if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
2975     if (uop->getOpcode() == UO_AddrOf)
2976       return uop->getSubExpr();
2977   return nullptr;
2978 }
2979 
2980 /// Emit an argument that's being passed call-by-writeback.  That is,
2981 /// we are passing the address of an __autoreleased temporary; it
2982 /// might be copy-initialized with the current value of the given
2983 /// address, but it will definitely be copied out of after the call.
2984 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
2985                              const ObjCIndirectCopyRestoreExpr *CRE) {
2986   LValue srcLV;
2987 
2988   // Make an optimistic effort to emit the address as an l-value.
2989   // This can fail if the argument expression is more complicated.
2990   if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
2991     srcLV = CGF.EmitLValue(lvExpr);
2992 
2993   // Otherwise, just emit it as a scalar.
2994   } else {
2995     Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
2996 
2997     QualType srcAddrType =
2998       CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
2999     srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
3000   }
3001   Address srcAddr = srcLV.getAddress();
3002 
3003   // The dest and src types don't necessarily match in LLVM terms
3004   // because of the crazy ObjC compatibility rules.
3005 
3006   llvm::PointerType *destType =
3007     cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
3008 
3009   // If the address is a constant null, just pass the appropriate null.
3010   if (isProvablyNull(srcAddr.getPointer())) {
3011     args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
3012              CRE->getType());
3013     return;
3014   }
3015 
3016   // Create the temporary.
3017   Address temp = CGF.CreateTempAlloca(destType->getElementType(),
3018                                       CGF.getPointerAlign(),
3019                                       "icr.temp");
3020   // Loading an l-value can introduce a cleanup if the l-value is __weak,
3021   // and that cleanup will be conditional if we can't prove that the l-value
3022   // isn't null, so we need to register a dominating point so that the cleanups
3023   // system will make valid IR.
3024   CodeGenFunction::ConditionalEvaluation condEval(CGF);
3025 
3026   // Zero-initialize it if we're not doing a copy-initialization.
3027   bool shouldCopy = CRE->shouldCopy();
3028   if (!shouldCopy) {
3029     llvm::Value *null =
3030       llvm::ConstantPointerNull::get(
3031         cast<llvm::PointerType>(destType->getElementType()));
3032     CGF.Builder.CreateStore(null, temp);
3033   }
3034 
3035   llvm::BasicBlock *contBB = nullptr;
3036   llvm::BasicBlock *originBB = nullptr;
3037 
3038   // If the address is *not* known to be non-null, we need to switch.
3039   llvm::Value *finalArgument;
3040 
3041   bool provablyNonNull = isProvablyNonNull(srcAddr.getPointer());
3042   if (provablyNonNull) {
3043     finalArgument = temp.getPointer();
3044   } else {
3045     llvm::Value *isNull =
3046       CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3047 
3048     finalArgument = CGF.Builder.CreateSelect(isNull,
3049                                    llvm::ConstantPointerNull::get(destType),
3050                                              temp.getPointer(), "icr.argument");
3051 
3052     // If we need to copy, then the load has to be conditional, which
3053     // means we need control flow.
3054     if (shouldCopy) {
3055       originBB = CGF.Builder.GetInsertBlock();
3056       contBB = CGF.createBasicBlock("icr.cont");
3057       llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
3058       CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
3059       CGF.EmitBlock(copyBB);
3060       condEval.begin(CGF);
3061     }
3062   }
3063 
3064   llvm::Value *valueToUse = nullptr;
3065 
3066   // Perform a copy if necessary.
3067   if (shouldCopy) {
3068     RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
3069     assert(srcRV.isScalar());
3070 
3071     llvm::Value *src = srcRV.getScalarVal();
3072     src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
3073                                     "icr.cast");
3074 
3075     // Use an ordinary store, not a store-to-lvalue.
3076     CGF.Builder.CreateStore(src, temp);
3077 
3078     // If optimization is enabled, and the value was held in a
3079     // __strong variable, we need to tell the optimizer that this
3080     // value has to stay alive until we're doing the store back.
3081     // This is because the temporary is effectively unretained,
3082     // and so otherwise we can violate the high-level semantics.
3083     if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3084         srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) {
3085       valueToUse = src;
3086     }
3087   }
3088 
3089   // Finish the control flow if we needed it.
3090   if (shouldCopy && !provablyNonNull) {
3091     llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
3092     CGF.EmitBlock(contBB);
3093 
3094     // Make a phi for the value to intrinsically use.
3095     if (valueToUse) {
3096       llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
3097                                                       "icr.to-use");
3098       phiToUse->addIncoming(valueToUse, copyBB);
3099       phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
3100                             originBB);
3101       valueToUse = phiToUse;
3102     }
3103 
3104     condEval.end(CGF);
3105   }
3106 
3107   args.addWriteback(srcLV, temp, valueToUse);
3108   args.add(RValue::get(finalArgument), CRE->getType());
3109 }
3110 
3111 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
3112   assert(!StackBase && !StackCleanup.isValid());
3113 
3114   // Save the stack.
3115   llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
3116   StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
3117 }
3118 
3119 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
3120   if (StackBase) {
3121     // Restore the stack after the call.
3122     llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
3123     CGF.Builder.CreateCall(F, StackBase);
3124   }
3125 }
3126 
3127 void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
3128                                           SourceLocation ArgLoc,
3129                                           const FunctionDecl *FD,
3130                                           unsigned ParmNum) {
3131   if (!SanOpts.has(SanitizerKind::NonnullAttribute) || !FD)
3132     return;
3133   auto PVD = ParmNum < FD->getNumParams() ? FD->getParamDecl(ParmNum) : nullptr;
3134   unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
3135   auto NNAttr = getNonNullAttr(FD, PVD, ArgType, ArgNo);
3136   if (!NNAttr)
3137     return;
3138   SanitizerScope SanScope(this);
3139   assert(RV.isScalar());
3140   llvm::Value *V = RV.getScalarVal();
3141   llvm::Value *Cond =
3142       Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
3143   llvm::Constant *StaticData[] = {
3144       EmitCheckSourceLocation(ArgLoc),
3145       EmitCheckSourceLocation(NNAttr->getLocation()),
3146       llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
3147   };
3148   EmitCheck(std::make_pair(Cond, SanitizerKind::NonnullAttribute),
3149                 "nonnull_arg", StaticData, None);
3150 }
3151 
3152 void CodeGenFunction::EmitCallArgs(
3153     CallArgList &Args, ArrayRef<QualType> ArgTypes,
3154     llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
3155     const FunctionDecl *CalleeDecl, unsigned ParamsToSkip) {
3156   assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
3157 
3158   auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg) {
3159     if (CalleeDecl == nullptr || I >= CalleeDecl->getNumParams())
3160       return;
3161     auto *PS = CalleeDecl->getParamDecl(I)->getAttr<PassObjectSizeAttr>();
3162     if (PS == nullptr)
3163       return;
3164 
3165     const auto &Context = getContext();
3166     auto SizeTy = Context.getSizeType();
3167     auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
3168     llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T);
3169     Args.add(RValue::get(V), SizeTy);
3170   };
3171 
3172   // We *have* to evaluate arguments from right to left in the MS C++ ABI,
3173   // because arguments are destroyed left to right in the callee.
3174   if (CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
3175     // Insert a stack save if we're going to need any inalloca args.
3176     bool HasInAllocaArgs = false;
3177     for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end();
3178          I != E && !HasInAllocaArgs; ++I)
3179       HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I);
3180     if (HasInAllocaArgs) {
3181       assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3182       Args.allocateArgumentMemory(*this);
3183     }
3184 
3185     // Evaluate each argument.
3186     size_t CallArgsStart = Args.size();
3187     for (int I = ArgTypes.size() - 1; I >= 0; --I) {
3188       CallExpr::const_arg_iterator Arg = ArgRange.begin() + I;
3189       EmitCallArg(Args, *Arg, ArgTypes[I]);
3190       EmitNonNullArgCheck(Args.back().RV, ArgTypes[I], (*Arg)->getExprLoc(),
3191                           CalleeDecl, ParamsToSkip + I);
3192       MaybeEmitImplicitObjectSize(I, *Arg);
3193     }
3194 
3195     // Un-reverse the arguments we just evaluated so they match up with the LLVM
3196     // IR function.
3197     std::reverse(Args.begin() + CallArgsStart, Args.end());
3198     return;
3199   }
3200 
3201   for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
3202     CallExpr::const_arg_iterator Arg = ArgRange.begin() + I;
3203     assert(Arg != ArgRange.end());
3204     EmitCallArg(Args, *Arg, ArgTypes[I]);
3205     EmitNonNullArgCheck(Args.back().RV, ArgTypes[I], (*Arg)->getExprLoc(),
3206                         CalleeDecl, ParamsToSkip + I);
3207     MaybeEmitImplicitObjectSize(I, *Arg);
3208   }
3209 }
3210 
3211 namespace {
3212 
3213 struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
3214   DestroyUnpassedArg(Address Addr, QualType Ty)
3215       : Addr(Addr), Ty(Ty) {}
3216 
3217   Address Addr;
3218   QualType Ty;
3219 
3220   void Emit(CodeGenFunction &CGF, Flags flags) override {
3221     const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
3222     assert(!Dtor->isTrivial());
3223     CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
3224                               /*Delegating=*/false, Addr);
3225   }
3226 };
3227 
3228 struct DisableDebugLocationUpdates {
3229   CodeGenFunction &CGF;
3230   bool disabledDebugInfo;
3231   DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
3232     if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
3233       CGF.disableDebugInfo();
3234   }
3235   ~DisableDebugLocationUpdates() {
3236     if (disabledDebugInfo)
3237       CGF.enableDebugInfo();
3238   }
3239 };
3240 
3241 } // end anonymous namespace
3242 
3243 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
3244                                   QualType type) {
3245   DisableDebugLocationUpdates Dis(*this, E);
3246   if (const ObjCIndirectCopyRestoreExpr *CRE
3247         = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
3248     assert(getLangOpts().ObjCAutoRefCount);
3249     assert(getContext().hasSameType(E->getType(), type));
3250     return emitWritebackArg(*this, args, CRE);
3251   }
3252 
3253   assert(type->isReferenceType() == E->isGLValue() &&
3254          "reference binding to unmaterialized r-value!");
3255 
3256   if (E->isGLValue()) {
3257     assert(E->getObjectKind() == OK_Ordinary);
3258     return args.add(EmitReferenceBindingToExpr(E), type);
3259   }
3260 
3261   bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
3262 
3263   // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
3264   // However, we still have to push an EH-only cleanup in case we unwind before
3265   // we make it to the call.
3266   if (HasAggregateEvalKind &&
3267       CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
3268     // If we're using inalloca, use the argument memory.  Otherwise, use a
3269     // temporary.
3270     AggValueSlot Slot;
3271     if (args.isUsingInAlloca())
3272       Slot = createPlaceholderSlot(*this, type);
3273     else
3274       Slot = CreateAggTemp(type, "agg.tmp");
3275 
3276     const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
3277     bool DestroyedInCallee =
3278         RD && RD->hasNonTrivialDestructor() &&
3279         CGM.getCXXABI().getRecordArgABI(RD) != CGCXXABI::RAA_Default;
3280     if (DestroyedInCallee)
3281       Slot.setExternallyDestructed();
3282 
3283     EmitAggExpr(E, Slot);
3284     RValue RV = Slot.asRValue();
3285     args.add(RV, type);
3286 
3287     if (DestroyedInCallee) {
3288       // Create a no-op GEP between the placeholder and the cleanup so we can
3289       // RAUW it successfully.  It also serves as a marker of the first
3290       // instruction where the cleanup is active.
3291       pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
3292                                               type);
3293       // This unreachable is a temporary marker which will be removed later.
3294       llvm::Instruction *IsActive = Builder.CreateUnreachable();
3295       args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
3296     }
3297     return;
3298   }
3299 
3300   if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
3301       cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
3302     LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
3303     assert(L.isSimple());
3304     if (L.getAlignment() >= getContext().getTypeAlignInChars(type)) {
3305       args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true);
3306     } else {
3307       // We can't represent a misaligned lvalue in the CallArgList, so copy
3308       // to an aligned temporary now.
3309       Address tmp = CreateMemTemp(type);
3310       EmitAggregateCopy(tmp, L.getAddress(), type, L.isVolatile());
3311       args.add(RValue::getAggregate(tmp), type);
3312     }
3313     return;
3314   }
3315 
3316   args.add(EmitAnyExprToTemp(E), type);
3317 }
3318 
3319 QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
3320   // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
3321   // implicitly widens null pointer constants that are arguments to varargs
3322   // functions to pointer-sized ints.
3323   if (!getTarget().getTriple().isOSWindows())
3324     return Arg->getType();
3325 
3326   if (Arg->getType()->isIntegerType() &&
3327       getContext().getTypeSize(Arg->getType()) <
3328           getContext().getTargetInfo().getPointerWidth(0) &&
3329       Arg->isNullPointerConstant(getContext(),
3330                                  Expr::NPC_ValueDependentIsNotNull)) {
3331     return getContext().getIntPtrType();
3332   }
3333 
3334   return Arg->getType();
3335 }
3336 
3337 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3338 // optimizer it can aggressively ignore unwind edges.
3339 void
3340 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
3341   if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3342       !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
3343     Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
3344                       CGM.getNoObjCARCExceptionsMetadata());
3345 }
3346 
3347 /// Emits a call to the given no-arguments nounwind runtime function.
3348 llvm::CallInst *
3349 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
3350                                          const llvm::Twine &name) {
3351   return EmitNounwindRuntimeCall(callee, None, name);
3352 }
3353 
3354 /// Emits a call to the given nounwind runtime function.
3355 llvm::CallInst *
3356 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
3357                                          ArrayRef<llvm::Value*> args,
3358                                          const llvm::Twine &name) {
3359   llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
3360   call->setDoesNotThrow();
3361   return call;
3362 }
3363 
3364 /// Emits a simple call (never an invoke) to the given no-arguments
3365 /// runtime function.
3366 llvm::CallInst *
3367 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
3368                                  const llvm::Twine &name) {
3369   return EmitRuntimeCall(callee, None, name);
3370 }
3371 
3372 // Calls which may throw must have operand bundles indicating which funclet
3373 // they are nested within.
3374 static void
3375 getBundlesForFunclet(llvm::Value *Callee, llvm::Instruction *CurrentFuncletPad,
3376                      SmallVectorImpl<llvm::OperandBundleDef> &BundleList) {
3377   // There is no need for a funclet operand bundle if we aren't inside a
3378   // funclet.
3379   if (!CurrentFuncletPad)
3380     return;
3381 
3382   // Skip intrinsics which cannot throw.
3383   auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts());
3384   if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow())
3385     return;
3386 
3387   BundleList.emplace_back("funclet", CurrentFuncletPad);
3388 }
3389 
3390 /// Emits a simple call (never an invoke) to the given runtime function.
3391 llvm::CallInst *
3392 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
3393                                  ArrayRef<llvm::Value*> args,
3394                                  const llvm::Twine &name) {
3395   SmallVector<llvm::OperandBundleDef, 1> BundleList;
3396   getBundlesForFunclet(callee, CurrentFuncletPad, BundleList);
3397 
3398   llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList, name);
3399   call->setCallingConv(getRuntimeCC());
3400   return call;
3401 }
3402 
3403 /// Emits a call or invoke to the given noreturn runtime function.
3404 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee,
3405                                                ArrayRef<llvm::Value*> args) {
3406   SmallVector<llvm::OperandBundleDef, 1> BundleList;
3407   getBundlesForFunclet(callee, CurrentFuncletPad, BundleList);
3408 
3409   if (getInvokeDest()) {
3410     llvm::InvokeInst *invoke =
3411       Builder.CreateInvoke(callee,
3412                            getUnreachableBlock(),
3413                            getInvokeDest(),
3414                            args,
3415                            BundleList);
3416     invoke->setDoesNotReturn();
3417     invoke->setCallingConv(getRuntimeCC());
3418   } else {
3419     llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList);
3420     call->setDoesNotReturn();
3421     call->setCallingConv(getRuntimeCC());
3422     Builder.CreateUnreachable();
3423   }
3424 }
3425 
3426 /// Emits a call or invoke instruction to the given nullary runtime function.
3427 llvm::CallSite
3428 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
3429                                          const Twine &name) {
3430   return EmitRuntimeCallOrInvoke(callee, None, name);
3431 }
3432 
3433 /// Emits a call or invoke instruction to the given runtime function.
3434 llvm::CallSite
3435 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
3436                                          ArrayRef<llvm::Value*> args,
3437                                          const Twine &name) {
3438   llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name);
3439   callSite.setCallingConv(getRuntimeCC());
3440   return callSite;
3441 }
3442 
3443 /// Emits a call or invoke instruction to the given function, depending
3444 /// on the current state of the EH stack.
3445 llvm::CallSite
3446 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
3447                                   ArrayRef<llvm::Value *> Args,
3448                                   const Twine &Name) {
3449   llvm::BasicBlock *InvokeDest = getInvokeDest();
3450   SmallVector<llvm::OperandBundleDef, 1> BundleList;
3451   getBundlesForFunclet(Callee, CurrentFuncletPad, BundleList);
3452 
3453   llvm::Instruction *Inst;
3454   if (!InvokeDest)
3455     Inst = Builder.CreateCall(Callee, Args, BundleList, Name);
3456   else {
3457     llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
3458     Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
3459                                 Name);
3460     EmitBlock(ContBB);
3461   }
3462 
3463   // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3464   // optimizer it can aggressively ignore unwind edges.
3465   if (CGM.getLangOpts().ObjCAutoRefCount)
3466     AddObjCARCExceptionMetadata(Inst);
3467 
3468   return llvm::CallSite(Inst);
3469 }
3470 
3471 /// \brief Store a non-aggregate value to an address to initialize it.  For
3472 /// initialization, a non-atomic store will be used.
3473 static void EmitInitStoreOfNonAggregate(CodeGenFunction &CGF, RValue Src,
3474                                         LValue Dst) {
3475   if (Src.isScalar())
3476     CGF.EmitStoreOfScalar(Src.getScalarVal(), Dst, /*init=*/true);
3477   else
3478     CGF.EmitStoreOfComplex(Src.getComplexVal(), Dst, /*init=*/true);
3479 }
3480 
3481 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
3482                                                   llvm::Value *New) {
3483   DeferredReplacements.push_back(std::make_pair(Old, New));
3484 }
3485 
3486 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
3487                                  llvm::Value *Callee,
3488                                  ReturnValueSlot ReturnValue,
3489                                  const CallArgList &CallArgs,
3490                                  CGCalleeInfo CalleeInfo,
3491                                  llvm::Instruction **callOrInvoke) {
3492   // FIXME: We no longer need the types from CallArgs; lift up and simplify.
3493 
3494   // Handle struct-return functions by passing a pointer to the
3495   // location that we would like to return into.
3496   QualType RetTy = CallInfo.getReturnType();
3497   const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
3498 
3499   llvm::FunctionType *IRFuncTy =
3500     cast<llvm::FunctionType>(
3501                   cast<llvm::PointerType>(Callee->getType())->getElementType());
3502 
3503   // If we're using inalloca, insert the allocation after the stack save.
3504   // FIXME: Do this earlier rather than hacking it in here!
3505   Address ArgMemory = Address::invalid();
3506   const llvm::StructLayout *ArgMemoryLayout = nullptr;
3507   if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
3508     ArgMemoryLayout = CGM.getDataLayout().getStructLayout(ArgStruct);
3509     llvm::Instruction *IP = CallArgs.getStackBase();
3510     llvm::AllocaInst *AI;
3511     if (IP) {
3512       IP = IP->getNextNode();
3513       AI = new llvm::AllocaInst(ArgStruct, "argmem", IP);
3514     } else {
3515       AI = CreateTempAlloca(ArgStruct, "argmem");
3516     }
3517     auto Align = CallInfo.getArgStructAlignment();
3518     AI->setAlignment(Align.getQuantity());
3519     AI->setUsedWithInAlloca(true);
3520     assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
3521     ArgMemory = Address(AI, Align);
3522   }
3523 
3524   // Helper function to drill into the inalloca allocation.
3525   auto createInAllocaStructGEP = [&](unsigned FieldIndex) -> Address {
3526     auto FieldOffset =
3527       CharUnits::fromQuantity(ArgMemoryLayout->getElementOffset(FieldIndex));
3528     return Builder.CreateStructGEP(ArgMemory, FieldIndex, FieldOffset);
3529   };
3530 
3531   ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
3532   SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
3533 
3534   // If the call returns a temporary with struct return, create a temporary
3535   // alloca to hold the result, unless one is given to us.
3536   Address SRetPtr = Address::invalid();
3537   size_t UnusedReturnSize = 0;
3538   if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
3539     if (!ReturnValue.isNull()) {
3540       SRetPtr = ReturnValue.getValue();
3541     } else {
3542       SRetPtr = CreateMemTemp(RetTy);
3543       if (HaveInsertPoint() && ReturnValue.isUnused()) {
3544         uint64_t size =
3545             CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
3546         if (EmitLifetimeStart(size, SRetPtr.getPointer()))
3547           UnusedReturnSize = size;
3548       }
3549     }
3550     if (IRFunctionArgs.hasSRetArg()) {
3551       IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
3552     } else if (RetAI.isInAlloca()) {
3553       Address Addr = createInAllocaStructGEP(RetAI.getInAllocaFieldIndex());
3554       Builder.CreateStore(SRetPtr.getPointer(), Addr);
3555     }
3556   }
3557 
3558   Address swiftErrorTemp = Address::invalid();
3559   Address swiftErrorArg = Address::invalid();
3560 
3561   assert(CallInfo.arg_size() == CallArgs.size() &&
3562          "Mismatch between function signature & arguments.");
3563   unsigned ArgNo = 0;
3564   CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
3565   for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
3566        I != E; ++I, ++info_it, ++ArgNo) {
3567     const ABIArgInfo &ArgInfo = info_it->info;
3568     RValue RV = I->RV;
3569 
3570     // Insert a padding argument to ensure proper alignment.
3571     if (IRFunctionArgs.hasPaddingArg(ArgNo))
3572       IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
3573           llvm::UndefValue::get(ArgInfo.getPaddingType());
3574 
3575     unsigned FirstIRArg, NumIRArgs;
3576     std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3577 
3578     switch (ArgInfo.getKind()) {
3579     case ABIArgInfo::InAlloca: {
3580       assert(NumIRArgs == 0);
3581       assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3582       if (RV.isAggregate()) {
3583         // Replace the placeholder with the appropriate argument slot GEP.
3584         llvm::Instruction *Placeholder =
3585             cast<llvm::Instruction>(RV.getAggregatePointer());
3586         CGBuilderTy::InsertPoint IP = Builder.saveIP();
3587         Builder.SetInsertPoint(Placeholder);
3588         Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
3589         Builder.restoreIP(IP);
3590         deferPlaceholderReplacement(Placeholder, Addr.getPointer());
3591       } else {
3592         // Store the RValue into the argument struct.
3593         Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
3594         unsigned AS = Addr.getType()->getPointerAddressSpace();
3595         llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
3596         // There are some cases where a trivial bitcast is not avoidable.  The
3597         // definition of a type later in a translation unit may change it's type
3598         // from {}* to (%struct.foo*)*.
3599         if (Addr.getType() != MemType)
3600           Addr = Builder.CreateBitCast(Addr, MemType);
3601         LValue argLV = MakeAddrLValue(Addr, I->Ty);
3602         EmitInitStoreOfNonAggregate(*this, RV, argLV);
3603       }
3604       break;
3605     }
3606 
3607     case ABIArgInfo::Indirect: {
3608       assert(NumIRArgs == 1);
3609       if (RV.isScalar() || RV.isComplex()) {
3610         // Make a temporary alloca to pass the argument.
3611         Address Addr = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign());
3612         IRCallArgs[FirstIRArg] = Addr.getPointer();
3613 
3614         LValue argLV = MakeAddrLValue(Addr, I->Ty);
3615         EmitInitStoreOfNonAggregate(*this, RV, argLV);
3616       } else {
3617         // We want to avoid creating an unnecessary temporary+copy here;
3618         // however, we need one in three cases:
3619         // 1. If the argument is not byval, and we are required to copy the
3620         //    source.  (This case doesn't occur on any common architecture.)
3621         // 2. If the argument is byval, RV is not sufficiently aligned, and
3622         //    we cannot force it to be sufficiently aligned.
3623         // 3. If the argument is byval, but RV is located in an address space
3624         //    different than that of the argument (0).
3625         Address Addr = RV.getAggregateAddress();
3626         CharUnits Align = ArgInfo.getIndirectAlign();
3627         const llvm::DataLayout *TD = &CGM.getDataLayout();
3628         const unsigned RVAddrSpace = Addr.getType()->getAddressSpace();
3629         const unsigned ArgAddrSpace =
3630             (FirstIRArg < IRFuncTy->getNumParams()
3631                  ? IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace()
3632                  : 0);
3633         if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
3634             (ArgInfo.getIndirectByVal() && Addr.getAlignment() < Align &&
3635              llvm::getOrEnforceKnownAlignment(Addr.getPointer(),
3636                                               Align.getQuantity(), *TD)
3637                < Align.getQuantity()) ||
3638             (ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) {
3639           // Create an aligned temporary, and copy to it.
3640           Address AI = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign());
3641           IRCallArgs[FirstIRArg] = AI.getPointer();
3642           EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
3643         } else {
3644           // Skip the extra memcpy call.
3645           IRCallArgs[FirstIRArg] = Addr.getPointer();
3646         }
3647       }
3648       break;
3649     }
3650 
3651     case ABIArgInfo::Ignore:
3652       assert(NumIRArgs == 0);
3653       break;
3654 
3655     case ABIArgInfo::Extend:
3656     case ABIArgInfo::Direct: {
3657       if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
3658           ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
3659           ArgInfo.getDirectOffset() == 0) {
3660         assert(NumIRArgs == 1);
3661         llvm::Value *V;
3662         if (RV.isScalar())
3663           V = RV.getScalarVal();
3664         else
3665           V = Builder.CreateLoad(RV.getAggregateAddress());
3666 
3667         // Implement swifterror by copying into a new swifterror argument.
3668         // We'll write back in the normal path out of the call.
3669         if (CallInfo.getExtParameterInfo(ArgNo).getABI()
3670               == ParameterABI::SwiftErrorResult) {
3671           assert(!swiftErrorTemp.isValid() && "multiple swifterror args");
3672 
3673           QualType pointeeTy = I->Ty->getPointeeType();
3674           swiftErrorArg =
3675             Address(V, getContext().getTypeAlignInChars(pointeeTy));
3676 
3677           swiftErrorTemp =
3678             CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
3679           V = swiftErrorTemp.getPointer();
3680           cast<llvm::AllocaInst>(V)->setSwiftError(true);
3681 
3682           llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg);
3683           Builder.CreateStore(errorValue, swiftErrorTemp);
3684         }
3685 
3686         // We might have to widen integers, but we should never truncate.
3687         if (ArgInfo.getCoerceToType() != V->getType() &&
3688             V->getType()->isIntegerTy())
3689           V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
3690 
3691         // If the argument doesn't match, perform a bitcast to coerce it.  This
3692         // can happen due to trivial type mismatches.
3693         if (FirstIRArg < IRFuncTy->getNumParams() &&
3694             V->getType() != IRFuncTy->getParamType(FirstIRArg))
3695           V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
3696 
3697         IRCallArgs[FirstIRArg] = V;
3698         break;
3699       }
3700 
3701       // FIXME: Avoid the conversion through memory if possible.
3702       Address Src = Address::invalid();
3703       if (RV.isScalar() || RV.isComplex()) {
3704         Src = CreateMemTemp(I->Ty, "coerce");
3705         LValue SrcLV = MakeAddrLValue(Src, I->Ty);
3706         EmitInitStoreOfNonAggregate(*this, RV, SrcLV);
3707       } else {
3708         Src = RV.getAggregateAddress();
3709       }
3710 
3711       // If the value is offset in memory, apply the offset now.
3712       Src = emitAddressAtOffset(*this, Src, ArgInfo);
3713 
3714       // Fast-isel and the optimizer generally like scalar values better than
3715       // FCAs, so we flatten them if this is safe to do for this argument.
3716       llvm::StructType *STy =
3717             dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
3718       if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
3719         llvm::Type *SrcTy = Src.getType()->getElementType();
3720         uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
3721         uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
3722 
3723         // If the source type is smaller than the destination type of the
3724         // coerce-to logic, copy the source value into a temp alloca the size
3725         // of the destination type to allow loading all of it. The bits past
3726         // the source value are left undef.
3727         if (SrcSize < DstSize) {
3728           Address TempAlloca
3729             = CreateTempAlloca(STy, Src.getAlignment(),
3730                                Src.getName() + ".coerce");
3731           Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
3732           Src = TempAlloca;
3733         } else {
3734           Src = Builder.CreateBitCast(Src, llvm::PointerType::getUnqual(STy));
3735         }
3736 
3737         auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
3738         assert(NumIRArgs == STy->getNumElements());
3739         for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3740           auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
3741           Address EltPtr = Builder.CreateStructGEP(Src, i, Offset);
3742           llvm::Value *LI = Builder.CreateLoad(EltPtr);
3743           IRCallArgs[FirstIRArg + i] = LI;
3744         }
3745       } else {
3746         // In the simple case, just pass the coerced loaded value.
3747         assert(NumIRArgs == 1);
3748         IRCallArgs[FirstIRArg] =
3749           CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
3750       }
3751 
3752       break;
3753     }
3754 
3755     case ABIArgInfo::CoerceAndExpand: {
3756       auto coercionType = ArgInfo.getCoerceAndExpandType();
3757       auto layout = CGM.getDataLayout().getStructLayout(coercionType);
3758 
3759       llvm::Value *tempSize = nullptr;
3760       Address addr = Address::invalid();
3761       if (RV.isAggregate()) {
3762         addr = RV.getAggregateAddress();
3763       } else {
3764         assert(RV.isScalar()); // complex should always just be direct
3765 
3766         llvm::Type *scalarType = RV.getScalarVal()->getType();
3767         auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType);
3768         auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType);
3769 
3770         tempSize = llvm::ConstantInt::get(CGM.Int64Ty, scalarSize);
3771 
3772         // Materialize to a temporary.
3773         addr = CreateTempAlloca(RV.getScalarVal()->getType(),
3774                  CharUnits::fromQuantity(std::max(layout->getAlignment(),
3775                                                   scalarAlign)));
3776         EmitLifetimeStart(scalarSize, addr.getPointer());
3777 
3778         Builder.CreateStore(RV.getScalarVal(), addr);
3779       }
3780 
3781       addr = Builder.CreateElementBitCast(addr, coercionType);
3782 
3783       unsigned IRArgPos = FirstIRArg;
3784       for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3785         llvm::Type *eltType = coercionType->getElementType(i);
3786         if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
3787         Address eltAddr = Builder.CreateStructGEP(addr, i, layout);
3788         llvm::Value *elt = Builder.CreateLoad(eltAddr);
3789         IRCallArgs[IRArgPos++] = elt;
3790       }
3791       assert(IRArgPos == FirstIRArg + NumIRArgs);
3792 
3793       if (tempSize) {
3794         EmitLifetimeEnd(tempSize, addr.getPointer());
3795       }
3796 
3797       break;
3798     }
3799 
3800     case ABIArgInfo::Expand:
3801       unsigned IRArgPos = FirstIRArg;
3802       ExpandTypeToArgs(I->Ty, RV, IRFuncTy, IRCallArgs, IRArgPos);
3803       assert(IRArgPos == FirstIRArg + NumIRArgs);
3804       break;
3805     }
3806   }
3807 
3808   if (ArgMemory.isValid()) {
3809     llvm::Value *Arg = ArgMemory.getPointer();
3810     if (CallInfo.isVariadic()) {
3811       // When passing non-POD arguments by value to variadic functions, we will
3812       // end up with a variadic prototype and an inalloca call site.  In such
3813       // cases, we can't do any parameter mismatch checks.  Give up and bitcast
3814       // the callee.
3815       unsigned CalleeAS =
3816           cast<llvm::PointerType>(Callee->getType())->getAddressSpace();
3817       Callee = Builder.CreateBitCast(
3818           Callee, getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS));
3819     } else {
3820       llvm::Type *LastParamTy =
3821           IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
3822       if (Arg->getType() != LastParamTy) {
3823 #ifndef NDEBUG
3824         // Assert that these structs have equivalent element types.
3825         llvm::StructType *FullTy = CallInfo.getArgStruct();
3826         llvm::StructType *DeclaredTy = cast<llvm::StructType>(
3827             cast<llvm::PointerType>(LastParamTy)->getElementType());
3828         assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
3829         for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
3830                                                 DE = DeclaredTy->element_end(),
3831                                                 FI = FullTy->element_begin();
3832              DI != DE; ++DI, ++FI)
3833           assert(*DI == *FI);
3834 #endif
3835         Arg = Builder.CreateBitCast(Arg, LastParamTy);
3836       }
3837     }
3838     assert(IRFunctionArgs.hasInallocaArg());
3839     IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
3840   }
3841 
3842   if (!CallArgs.getCleanupsToDeactivate().empty())
3843     deactivateArgCleanupsBeforeCall(*this, CallArgs);
3844 
3845   // If the callee is a bitcast of a function to a varargs pointer to function
3846   // type, check to see if we can remove the bitcast.  This handles some cases
3847   // with unprototyped functions.
3848   if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
3849     if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
3850       llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
3851       llvm::FunctionType *CurFT =
3852         cast<llvm::FunctionType>(CurPT->getElementType());
3853       llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
3854 
3855       if (CE->getOpcode() == llvm::Instruction::BitCast &&
3856           ActualFT->getReturnType() == CurFT->getReturnType() &&
3857           ActualFT->getNumParams() == CurFT->getNumParams() &&
3858           ActualFT->getNumParams() == IRCallArgs.size() &&
3859           (CurFT->isVarArg() || !ActualFT->isVarArg())) {
3860         bool ArgsMatch = true;
3861         for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
3862           if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
3863             ArgsMatch = false;
3864             break;
3865           }
3866 
3867         // Strip the cast if we can get away with it.  This is a nice cleanup,
3868         // but also allows us to inline the function at -O0 if it is marked
3869         // always_inline.
3870         if (ArgsMatch)
3871           Callee = CalleeF;
3872       }
3873     }
3874 
3875   assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
3876   for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
3877     // Inalloca argument can have different type.
3878     if (IRFunctionArgs.hasInallocaArg() &&
3879         i == IRFunctionArgs.getInallocaArgNo())
3880       continue;
3881     if (i < IRFuncTy->getNumParams())
3882       assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
3883   }
3884 
3885   unsigned CallingConv;
3886   CodeGen::AttributeListType AttributeList;
3887   CGM.ConstructAttributeList(Callee->getName(), CallInfo, CalleeInfo,
3888                              AttributeList, CallingConv,
3889                              /*AttrOnCallSite=*/true);
3890   llvm::AttributeSet Attrs = llvm::AttributeSet::get(getLLVMContext(),
3891                                                      AttributeList);
3892 
3893   bool CannotThrow;
3894   if (currentFunctionUsesSEHTry()) {
3895     // SEH cares about asynchronous exceptions, everything can "throw."
3896     CannotThrow = false;
3897   } else if (isCleanupPadScope() &&
3898              EHPersonality::get(*this).isMSVCXXPersonality()) {
3899     // The MSVC++ personality will implicitly terminate the program if an
3900     // exception is thrown.  An unwind edge cannot be reached.
3901     CannotThrow = true;
3902   } else {
3903     // Otherwise, nowunind callsites will never throw.
3904     CannotThrow = Attrs.hasAttribute(llvm::AttributeSet::FunctionIndex,
3905                                      llvm::Attribute::NoUnwind);
3906   }
3907   llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
3908 
3909   SmallVector<llvm::OperandBundleDef, 1> BundleList;
3910   getBundlesForFunclet(Callee, CurrentFuncletPad, BundleList);
3911 
3912   llvm::CallSite CS;
3913   if (!InvokeDest) {
3914     CS = Builder.CreateCall(Callee, IRCallArgs, BundleList);
3915   } else {
3916     llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
3917     CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, IRCallArgs,
3918                               BundleList);
3919     EmitBlock(Cont);
3920   }
3921   if (callOrInvoke)
3922     *callOrInvoke = CS.getInstruction();
3923 
3924   if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
3925       !CS.hasFnAttr(llvm::Attribute::NoInline))
3926     Attrs =
3927         Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex,
3928                            llvm::Attribute::AlwaysInline);
3929 
3930   // Disable inlining inside SEH __try blocks.
3931   if (isSEHTryScope())
3932     Attrs =
3933         Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex,
3934                            llvm::Attribute::NoInline);
3935 
3936   CS.setAttributes(Attrs);
3937   CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
3938 
3939   // Insert instrumentation or attach profile metadata at indirect call sites.
3940   // For more details, see the comment before the definition of
3941   // IPVK_IndirectCallTarget in InstrProfData.inc.
3942   if (!CS.getCalledFunction())
3943     PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget,
3944                      CS.getInstruction(), Callee);
3945 
3946   // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3947   // optimizer it can aggressively ignore unwind edges.
3948   if (CGM.getLangOpts().ObjCAutoRefCount)
3949     AddObjCARCExceptionMetadata(CS.getInstruction());
3950 
3951   // If the call doesn't return, finish the basic block and clear the
3952   // insertion point; this allows the rest of IRgen to discard
3953   // unreachable code.
3954   if (CS.doesNotReturn()) {
3955     if (UnusedReturnSize)
3956       EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize),
3957                       SRetPtr.getPointer());
3958 
3959     Builder.CreateUnreachable();
3960     Builder.ClearInsertionPoint();
3961 
3962     // FIXME: For now, emit a dummy basic block because expr emitters in
3963     // generally are not ready to handle emitting expressions at unreachable
3964     // points.
3965     EnsureInsertPoint();
3966 
3967     // Return a reasonable RValue.
3968     return GetUndefRValue(RetTy);
3969   }
3970 
3971   llvm::Instruction *CI = CS.getInstruction();
3972   if (!CI->getType()->isVoidTy())
3973     CI->setName("call");
3974 
3975   // Perform the swifterror writeback.
3976   if (swiftErrorTemp.isValid()) {
3977     llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp);
3978     Builder.CreateStore(errorResult, swiftErrorArg);
3979   }
3980 
3981   // Emit any writebacks immediately.  Arguably this should happen
3982   // after any return-value munging.
3983   if (CallArgs.hasWritebacks())
3984     emitWritebacks(*this, CallArgs);
3985 
3986   // The stack cleanup for inalloca arguments has to run out of the normal
3987   // lexical order, so deactivate it and run it manually here.
3988   CallArgs.freeArgumentMemory(*this);
3989 
3990   if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
3991     const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
3992     if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
3993       Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
3994   }
3995 
3996   RValue Ret = [&] {
3997     switch (RetAI.getKind()) {
3998     case ABIArgInfo::CoerceAndExpand: {
3999       auto coercionType = RetAI.getCoerceAndExpandType();
4000       auto layout = CGM.getDataLayout().getStructLayout(coercionType);
4001 
4002       Address addr = SRetPtr;
4003       addr = Builder.CreateElementBitCast(addr, coercionType);
4004 
4005       assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType());
4006       bool requiresExtract = isa<llvm::StructType>(CI->getType());
4007 
4008       unsigned unpaddedIndex = 0;
4009       for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4010         llvm::Type *eltType = coercionType->getElementType(i);
4011         if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4012         Address eltAddr = Builder.CreateStructGEP(addr, i, layout);
4013         llvm::Value *elt = CI;
4014         if (requiresExtract)
4015           elt = Builder.CreateExtractValue(elt, unpaddedIndex++);
4016         else
4017           assert(unpaddedIndex == 0);
4018         Builder.CreateStore(elt, eltAddr);
4019       }
4020       // FALLTHROUGH
4021     }
4022 
4023     case ABIArgInfo::InAlloca:
4024     case ABIArgInfo::Indirect: {
4025       RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
4026       if (UnusedReturnSize)
4027         EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize),
4028                         SRetPtr.getPointer());
4029       return ret;
4030     }
4031 
4032     case ABIArgInfo::Ignore:
4033       // If we are ignoring an argument that had a result, make sure to
4034       // construct the appropriate return value for our caller.
4035       return GetUndefRValue(RetTy);
4036 
4037     case ABIArgInfo::Extend:
4038     case ABIArgInfo::Direct: {
4039       llvm::Type *RetIRTy = ConvertType(RetTy);
4040       if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
4041         switch (getEvaluationKind(RetTy)) {
4042         case TEK_Complex: {
4043           llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
4044           llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
4045           return RValue::getComplex(std::make_pair(Real, Imag));
4046         }
4047         case TEK_Aggregate: {
4048           Address DestPtr = ReturnValue.getValue();
4049           bool DestIsVolatile = ReturnValue.isVolatile();
4050 
4051           if (!DestPtr.isValid()) {
4052             DestPtr = CreateMemTemp(RetTy, "agg.tmp");
4053             DestIsVolatile = false;
4054           }
4055           BuildAggStore(*this, CI, DestPtr, DestIsVolatile);
4056           return RValue::getAggregate(DestPtr);
4057         }
4058         case TEK_Scalar: {
4059           // If the argument doesn't match, perform a bitcast to coerce it.  This
4060           // can happen due to trivial type mismatches.
4061           llvm::Value *V = CI;
4062           if (V->getType() != RetIRTy)
4063             V = Builder.CreateBitCast(V, RetIRTy);
4064           return RValue::get(V);
4065         }
4066         }
4067         llvm_unreachable("bad evaluation kind");
4068       }
4069 
4070       Address DestPtr = ReturnValue.getValue();
4071       bool DestIsVolatile = ReturnValue.isVolatile();
4072 
4073       if (!DestPtr.isValid()) {
4074         DestPtr = CreateMemTemp(RetTy, "coerce");
4075         DestIsVolatile = false;
4076       }
4077 
4078       // If the value is offset in memory, apply the offset now.
4079       Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
4080       CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
4081 
4082       return convertTempToRValue(DestPtr, RetTy, SourceLocation());
4083     }
4084 
4085     case ABIArgInfo::Expand:
4086       llvm_unreachable("Invalid ABI kind for return argument");
4087     }
4088 
4089     llvm_unreachable("Unhandled ABIArgInfo::Kind");
4090   } ();
4091 
4092   const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
4093 
4094   if (Ret.isScalar() && TargetDecl) {
4095     if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
4096       llvm::Value *OffsetValue = nullptr;
4097       if (const auto *Offset = AA->getOffset())
4098         OffsetValue = EmitScalarExpr(Offset);
4099 
4100       llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment());
4101       llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment);
4102       EmitAlignmentAssumption(Ret.getScalarVal(), AlignmentCI->getZExtValue(),
4103                               OffsetValue);
4104     }
4105   }
4106 
4107   return Ret;
4108 }
4109 
4110 /* VarArg handling */
4111 
4112 Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) {
4113   VAListAddr = VE->isMicrosoftABI()
4114                  ? EmitMSVAListRef(VE->getSubExpr())
4115                  : EmitVAListRef(VE->getSubExpr());
4116   QualType Ty = VE->getType();
4117   if (VE->isMicrosoftABI())
4118     return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty);
4119   return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty);
4120 }
4121