1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // These classes wrap the information about a call or function
10 // definition used to handle ABI compliancy.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CGCall.h"
15 #include "ABIInfo.h"
16 #include "CGBlocks.h"
17 #include "CGCXXABI.h"
18 #include "CGCleanup.h"
19 #include "CodeGenFunction.h"
20 #include "CodeGenModule.h"
21 #include "TargetInfo.h"
22 #include "clang/AST/Decl.h"
23 #include "clang/AST/DeclCXX.h"
24 #include "clang/AST/DeclObjC.h"
25 #include "clang/Basic/CodeGenOptions.h"
26 #include "clang/Basic/TargetBuiltins.h"
27 #include "clang/Basic/TargetInfo.h"
28 #include "clang/CodeGen/CGFunctionInfo.h"
29 #include "clang/CodeGen/SwiftCallingConv.h"
30 #include "llvm/ADT/StringExtras.h"
31 #include "llvm/Transforms/Utils/Local.h"
32 #include "llvm/Analysis/ValueTracking.h"
33 #include "llvm/IR/Attributes.h"
34 #include "llvm/IR/CallingConv.h"
35 #include "llvm/IR/DataLayout.h"
36 #include "llvm/IR/InlineAsm.h"
37 #include "llvm/IR/IntrinsicInst.h"
38 #include "llvm/IR/Intrinsics.h"
39 using namespace clang;
40 using namespace CodeGen;
41 
42 /***/
43 
44 unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
45   switch (CC) {
46   default: return llvm::CallingConv::C;
47   case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
48   case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
49   case CC_X86RegCall: return llvm::CallingConv::X86_RegCall;
50   case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
51   case CC_Win64: return llvm::CallingConv::Win64;
52   case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
53   case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
54   case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
55   case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
56   // TODO: Add support for __pascal to LLVM.
57   case CC_X86Pascal: return llvm::CallingConv::C;
58   // TODO: Add support for __vectorcall to LLVM.
59   case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
60   case CC_AArch64VectorCall: return llvm::CallingConv::AArch64_VectorCall;
61   case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
62   case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv();
63   case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
64   case CC_PreserveAll: return llvm::CallingConv::PreserveAll;
65   case CC_Swift: return llvm::CallingConv::Swift;
66   }
67 }
68 
69 /// Derives the 'this' type for codegen purposes, i.e. ignoring method CVR
70 /// qualification. Either or both of RD and MD may be null. A null RD indicates
71 /// that there is no meaningful 'this' type, and a null MD can occur when
72 /// calling a method pointer.
73 CanQualType CodeGenTypes::DeriveThisType(const CXXRecordDecl *RD,
74                                          const CXXMethodDecl *MD) {
75   QualType RecTy;
76   if (RD)
77     RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
78   else
79     RecTy = Context.VoidTy;
80 
81   if (MD)
82     RecTy = Context.getAddrSpaceQualType(RecTy, MD->getMethodQualifiers().getAddressSpace());
83   return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
84 }
85 
86 /// Returns the canonical formal type of the given C++ method.
87 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
88   return MD->getType()->getCanonicalTypeUnqualified()
89            .getAs<FunctionProtoType>();
90 }
91 
92 /// Returns the "extra-canonicalized" return type, which discards
93 /// qualifiers on the return type.  Codegen doesn't care about them,
94 /// and it makes ABI code a little easier to be able to assume that
95 /// all parameter and return types are top-level unqualified.
96 static CanQualType GetReturnType(QualType RetTy) {
97   return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
98 }
99 
100 /// Arrange the argument and result information for a value of the given
101 /// unprototyped freestanding function type.
102 const CGFunctionInfo &
103 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
104   // When translating an unprototyped function type, always use a
105   // variadic type.
106   return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
107                                  /*instanceMethod=*/false,
108                                  /*chainCall=*/false, None,
109                                  FTNP->getExtInfo(), {}, RequiredArgs(0));
110 }
111 
112 static void addExtParameterInfosForCall(
113          llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
114                                         const FunctionProtoType *proto,
115                                         unsigned prefixArgs,
116                                         unsigned totalArgs) {
117   assert(proto->hasExtParameterInfos());
118   assert(paramInfos.size() <= prefixArgs);
119   assert(proto->getNumParams() + prefixArgs <= totalArgs);
120 
121   paramInfos.reserve(totalArgs);
122 
123   // Add default infos for any prefix args that don't already have infos.
124   paramInfos.resize(prefixArgs);
125 
126   // Add infos for the prototype.
127   for (const auto &ParamInfo : proto->getExtParameterInfos()) {
128     paramInfos.push_back(ParamInfo);
129     // pass_object_size params have no parameter info.
130     if (ParamInfo.hasPassObjectSize())
131       paramInfos.emplace_back();
132   }
133 
134   assert(paramInfos.size() <= totalArgs &&
135          "Did we forget to insert pass_object_size args?");
136   // Add default infos for the variadic and/or suffix arguments.
137   paramInfos.resize(totalArgs);
138 }
139 
140 /// Adds the formal parameters in FPT to the given prefix. If any parameter in
141 /// FPT has pass_object_size attrs, then we'll add parameters for those, too.
142 static void appendParameterTypes(const CodeGenTypes &CGT,
143                                  SmallVectorImpl<CanQualType> &prefix,
144               SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
145                                  CanQual<FunctionProtoType> FPT) {
146   // Fast path: don't touch param info if we don't need to.
147   if (!FPT->hasExtParameterInfos()) {
148     assert(paramInfos.empty() &&
149            "We have paramInfos, but the prototype doesn't?");
150     prefix.append(FPT->param_type_begin(), FPT->param_type_end());
151     return;
152   }
153 
154   unsigned PrefixSize = prefix.size();
155   // In the vast majority of cases, we'll have precisely FPT->getNumParams()
156   // parameters; the only thing that can change this is the presence of
157   // pass_object_size. So, we preallocate for the common case.
158   prefix.reserve(prefix.size() + FPT->getNumParams());
159 
160   auto ExtInfos = FPT->getExtParameterInfos();
161   assert(ExtInfos.size() == FPT->getNumParams());
162   for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
163     prefix.push_back(FPT->getParamType(I));
164     if (ExtInfos[I].hasPassObjectSize())
165       prefix.push_back(CGT.getContext().getSizeType());
166   }
167 
168   addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize,
169                               prefix.size());
170 }
171 
172 /// Arrange the LLVM function layout for a value of the given function
173 /// type, on top of any implicit parameters already stored.
174 static const CGFunctionInfo &
175 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
176                         SmallVectorImpl<CanQualType> &prefix,
177                         CanQual<FunctionProtoType> FTP) {
178   SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
179   RequiredArgs Required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
180   // FIXME: Kill copy.
181   appendParameterTypes(CGT, prefix, paramInfos, FTP);
182   CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
183 
184   return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
185                                      /*chainCall=*/false, prefix,
186                                      FTP->getExtInfo(), paramInfos,
187                                      Required);
188 }
189 
190 /// Arrange the argument and result information for a value of the
191 /// given freestanding function type.
192 const CGFunctionInfo &
193 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) {
194   SmallVector<CanQualType, 16> argTypes;
195   return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
196                                    FTP);
197 }
198 
199 static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
200   // Set the appropriate calling convention for the Function.
201   if (D->hasAttr<StdCallAttr>())
202     return CC_X86StdCall;
203 
204   if (D->hasAttr<FastCallAttr>())
205     return CC_X86FastCall;
206 
207   if (D->hasAttr<RegCallAttr>())
208     return CC_X86RegCall;
209 
210   if (D->hasAttr<ThisCallAttr>())
211     return CC_X86ThisCall;
212 
213   if (D->hasAttr<VectorCallAttr>())
214     return CC_X86VectorCall;
215 
216   if (D->hasAttr<PascalAttr>())
217     return CC_X86Pascal;
218 
219   if (PcsAttr *PCS = D->getAttr<PcsAttr>())
220     return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
221 
222   if (D->hasAttr<AArch64VectorPcsAttr>())
223     return CC_AArch64VectorCall;
224 
225   if (D->hasAttr<IntelOclBiccAttr>())
226     return CC_IntelOclBicc;
227 
228   if (D->hasAttr<MSABIAttr>())
229     return IsWindows ? CC_C : CC_Win64;
230 
231   if (D->hasAttr<SysVABIAttr>())
232     return IsWindows ? CC_X86_64SysV : CC_C;
233 
234   if (D->hasAttr<PreserveMostAttr>())
235     return CC_PreserveMost;
236 
237   if (D->hasAttr<PreserveAllAttr>())
238     return CC_PreserveAll;
239 
240   return CC_C;
241 }
242 
243 /// Arrange the argument and result information for a call to an
244 /// unknown C++ non-static member function of the given abstract type.
245 /// (A null RD means we don't have any meaningful "this" argument type,
246 ///  so fall back to a generic pointer type).
247 /// The member function must be an ordinary function, i.e. not a
248 /// constructor or destructor.
249 const CGFunctionInfo &
250 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
251                                    const FunctionProtoType *FTP,
252                                    const CXXMethodDecl *MD) {
253   SmallVector<CanQualType, 16> argTypes;
254 
255   // Add the 'this' pointer.
256   argTypes.push_back(DeriveThisType(RD, MD));
257 
258   return ::arrangeLLVMFunctionInfo(
259       *this, true, argTypes,
260       FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
261 }
262 
263 /// Set calling convention for CUDA/HIP kernel.
264 static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM,
265                                            const FunctionDecl *FD) {
266   if (FD->hasAttr<CUDAGlobalAttr>()) {
267     const FunctionType *FT = FTy->getAs<FunctionType>();
268     CGM.getTargetCodeGenInfo().setCUDAKernelCallingConvention(FT);
269     FTy = FT->getCanonicalTypeUnqualified();
270   }
271 }
272 
273 /// Arrange the argument and result information for a declaration or
274 /// definition of the given C++ non-static member function.  The
275 /// member function must be an ordinary function, i.e. not a
276 /// constructor or destructor.
277 const CGFunctionInfo &
278 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
279   assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
280   assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
281 
282   CanQualType FT = GetFormalType(MD).getAs<Type>();
283   setCUDAKernelCallingConvention(FT, CGM, MD);
284   auto prototype = FT.getAs<FunctionProtoType>();
285 
286   if (MD->isInstance()) {
287     // The abstract case is perfectly fine.
288     const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
289     return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
290   }
291 
292   return arrangeFreeFunctionType(prototype);
293 }
294 
295 bool CodeGenTypes::inheritingCtorHasParams(
296     const InheritedConstructor &Inherited, CXXCtorType Type) {
297   // Parameters are unnecessary if we're constructing a base class subobject
298   // and the inherited constructor lives in a virtual base.
299   return Type == Ctor_Complete ||
300          !Inherited.getShadowDecl()->constructsVirtualBase() ||
301          !Target.getCXXABI().hasConstructorVariants();
302 }
303 
304 const CGFunctionInfo &
305 CodeGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) {
306   auto *MD = cast<CXXMethodDecl>(GD.getDecl());
307 
308   SmallVector<CanQualType, 16> argTypes;
309   SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
310   argTypes.push_back(DeriveThisType(MD->getParent(), MD));
311 
312   bool PassParams = true;
313 
314   if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
315     // A base class inheriting constructor doesn't get forwarded arguments
316     // needed to construct a virtual base (or base class thereof).
317     if (auto Inherited = CD->getInheritedConstructor())
318       PassParams = inheritingCtorHasParams(Inherited, GD.getCtorType());
319   }
320 
321   CanQual<FunctionProtoType> FTP = GetFormalType(MD);
322 
323   // Add the formal parameters.
324   if (PassParams)
325     appendParameterTypes(*this, argTypes, paramInfos, FTP);
326 
327   CGCXXABI::AddedStructorArgs AddedArgs =
328       TheCXXABI.buildStructorSignature(GD, argTypes);
329   if (!paramInfos.empty()) {
330     // Note: prefix implies after the first param.
331     if (AddedArgs.Prefix)
332       paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix,
333                         FunctionProtoType::ExtParameterInfo{});
334     if (AddedArgs.Suffix)
335       paramInfos.append(AddedArgs.Suffix,
336                         FunctionProtoType::ExtParameterInfo{});
337   }
338 
339   RequiredArgs required =
340       (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size())
341                                       : RequiredArgs::All);
342 
343   FunctionType::ExtInfo extInfo = FTP->getExtInfo();
344   CanQualType resultType = TheCXXABI.HasThisReturn(GD)
345                                ? argTypes.front()
346                                : TheCXXABI.hasMostDerivedReturn(GD)
347                                      ? CGM.getContext().VoidPtrTy
348                                      : Context.VoidTy;
349   return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
350                                  /*chainCall=*/false, argTypes, extInfo,
351                                  paramInfos, required);
352 }
353 
354 static SmallVector<CanQualType, 16>
355 getArgTypesForCall(ASTContext &ctx, const CallArgList &args) {
356   SmallVector<CanQualType, 16> argTypes;
357   for (auto &arg : args)
358     argTypes.push_back(ctx.getCanonicalParamType(arg.Ty));
359   return argTypes;
360 }
361 
362 static SmallVector<CanQualType, 16>
363 getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) {
364   SmallVector<CanQualType, 16> argTypes;
365   for (auto &arg : args)
366     argTypes.push_back(ctx.getCanonicalParamType(arg->getType()));
367   return argTypes;
368 }
369 
370 static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16>
371 getExtParameterInfosForCall(const FunctionProtoType *proto,
372                             unsigned prefixArgs, unsigned totalArgs) {
373   llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result;
374   if (proto->hasExtParameterInfos()) {
375     addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs);
376   }
377   return result;
378 }
379 
380 /// Arrange a call to a C++ method, passing the given arguments.
381 ///
382 /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this`
383 /// parameter.
384 /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of
385 /// args.
386 /// PassProtoArgs indicates whether `args` has args for the parameters in the
387 /// given CXXConstructorDecl.
388 const CGFunctionInfo &
389 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
390                                         const CXXConstructorDecl *D,
391                                         CXXCtorType CtorKind,
392                                         unsigned ExtraPrefixArgs,
393                                         unsigned ExtraSuffixArgs,
394                                         bool PassProtoArgs) {
395   // FIXME: Kill copy.
396   SmallVector<CanQualType, 16> ArgTypes;
397   for (const auto &Arg : args)
398     ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
399 
400   // +1 for implicit this, which should always be args[0].
401   unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
402 
403   CanQual<FunctionProtoType> FPT = GetFormalType(D);
404   RequiredArgs Required = PassProtoArgs
405                               ? RequiredArgs::forPrototypePlus(
406                                     FPT, TotalPrefixArgs + ExtraSuffixArgs)
407                               : RequiredArgs::All;
408 
409   GlobalDecl GD(D, CtorKind);
410   CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
411                                ? ArgTypes.front()
412                                : TheCXXABI.hasMostDerivedReturn(GD)
413                                      ? CGM.getContext().VoidPtrTy
414                                      : Context.VoidTy;
415 
416   FunctionType::ExtInfo Info = FPT->getExtInfo();
417   llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> ParamInfos;
418   // If the prototype args are elided, we should only have ABI-specific args,
419   // which never have param info.
420   if (PassProtoArgs && FPT->hasExtParameterInfos()) {
421     // ABI-specific suffix arguments are treated the same as variadic arguments.
422     addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs,
423                                 ArgTypes.size());
424   }
425   return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
426                                  /*chainCall=*/false, ArgTypes, Info,
427                                  ParamInfos, Required);
428 }
429 
430 /// Arrange the argument and result information for the declaration or
431 /// definition of the given function.
432 const CGFunctionInfo &
433 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
434   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
435     if (MD->isInstance())
436       return arrangeCXXMethodDeclaration(MD);
437 
438   CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
439 
440   assert(isa<FunctionType>(FTy));
441   setCUDAKernelCallingConvention(FTy, CGM, FD);
442 
443   // When declaring a function without a prototype, always use a
444   // non-variadic type.
445   if (CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>()) {
446     return arrangeLLVMFunctionInfo(
447         noProto->getReturnType(), /*instanceMethod=*/false,
448         /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All);
449   }
450 
451   return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>());
452 }
453 
454 /// Arrange the argument and result information for the declaration or
455 /// definition of an Objective-C method.
456 const CGFunctionInfo &
457 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
458   // It happens that this is the same as a call with no optional
459   // arguments, except also using the formal 'self' type.
460   return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
461 }
462 
463 /// Arrange the argument and result information for the function type
464 /// through which to perform a send to the given Objective-C method,
465 /// using the given receiver type.  The receiver type is not always
466 /// the 'self' type of the method or even an Objective-C pointer type.
467 /// This is *not* the right method for actually performing such a
468 /// message send, due to the possibility of optional arguments.
469 const CGFunctionInfo &
470 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
471                                               QualType receiverType) {
472   SmallVector<CanQualType, 16> argTys;
473   SmallVector<FunctionProtoType::ExtParameterInfo, 4> extParamInfos(2);
474   argTys.push_back(Context.getCanonicalParamType(receiverType));
475   argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
476   // FIXME: Kill copy?
477   for (const auto *I : MD->parameters()) {
478     argTys.push_back(Context.getCanonicalParamType(I->getType()));
479     auto extParamInfo = FunctionProtoType::ExtParameterInfo().withIsNoEscape(
480         I->hasAttr<NoEscapeAttr>());
481     extParamInfos.push_back(extParamInfo);
482   }
483 
484   FunctionType::ExtInfo einfo;
485   bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
486   einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
487 
488   if (getContext().getLangOpts().ObjCAutoRefCount &&
489       MD->hasAttr<NSReturnsRetainedAttr>())
490     einfo = einfo.withProducesResult(true);
491 
492   RequiredArgs required =
493     (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
494 
495   return arrangeLLVMFunctionInfo(
496       GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
497       /*chainCall=*/false, argTys, einfo, extParamInfos, required);
498 }
499 
500 const CGFunctionInfo &
501 CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType,
502                                                  const CallArgList &args) {
503   auto argTypes = getArgTypesForCall(Context, args);
504   FunctionType::ExtInfo einfo;
505 
506   return arrangeLLVMFunctionInfo(
507       GetReturnType(returnType), /*instanceMethod=*/false,
508       /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All);
509 }
510 
511 const CGFunctionInfo &
512 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
513   // FIXME: Do we need to handle ObjCMethodDecl?
514   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
515 
516   if (isa<CXXConstructorDecl>(GD.getDecl()) ||
517       isa<CXXDestructorDecl>(GD.getDecl()))
518     return arrangeCXXStructorDeclaration(GD);
519 
520   return arrangeFunctionDeclaration(FD);
521 }
522 
523 /// Arrange a thunk that takes 'this' as the first parameter followed by
524 /// varargs.  Return a void pointer, regardless of the actual return type.
525 /// The body of the thunk will end in a musttail call to a function of the
526 /// correct type, and the caller will bitcast the function to the correct
527 /// prototype.
528 const CGFunctionInfo &
529 CodeGenTypes::arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD) {
530   assert(MD->isVirtual() && "only methods have thunks");
531   CanQual<FunctionProtoType> FTP = GetFormalType(MD);
532   CanQualType ArgTys[] = {DeriveThisType(MD->getParent(), MD)};
533   return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
534                                  /*chainCall=*/false, ArgTys,
535                                  FTP->getExtInfo(), {}, RequiredArgs(1));
536 }
537 
538 const CGFunctionInfo &
539 CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD,
540                                    CXXCtorType CT) {
541   assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure);
542 
543   CanQual<FunctionProtoType> FTP = GetFormalType(CD);
544   SmallVector<CanQualType, 2> ArgTys;
545   const CXXRecordDecl *RD = CD->getParent();
546   ArgTys.push_back(DeriveThisType(RD, CD));
547   if (CT == Ctor_CopyingClosure)
548     ArgTys.push_back(*FTP->param_type_begin());
549   if (RD->getNumVBases() > 0)
550     ArgTys.push_back(Context.IntTy);
551   CallingConv CC = Context.getDefaultCallingConvention(
552       /*IsVariadic=*/false, /*IsCXXMethod=*/true);
553   return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
554                                  /*chainCall=*/false, ArgTys,
555                                  FunctionType::ExtInfo(CC), {},
556                                  RequiredArgs::All);
557 }
558 
559 /// Arrange a call as unto a free function, except possibly with an
560 /// additional number of formal parameters considered required.
561 static const CGFunctionInfo &
562 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
563                             CodeGenModule &CGM,
564                             const CallArgList &args,
565                             const FunctionType *fnType,
566                             unsigned numExtraRequiredArgs,
567                             bool chainCall) {
568   assert(args.size() >= numExtraRequiredArgs);
569 
570   llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
571 
572   // In most cases, there are no optional arguments.
573   RequiredArgs required = RequiredArgs::All;
574 
575   // If we have a variadic prototype, the required arguments are the
576   // extra prefix plus the arguments in the prototype.
577   if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
578     if (proto->isVariadic())
579       required = RequiredArgs::forPrototypePlus(proto, numExtraRequiredArgs);
580 
581     if (proto->hasExtParameterInfos())
582       addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs,
583                                   args.size());
584 
585   // If we don't have a prototype at all, but we're supposed to
586   // explicitly use the variadic convention for unprototyped calls,
587   // treat all of the arguments as required but preserve the nominal
588   // possibility of variadics.
589   } else if (CGM.getTargetCodeGenInfo()
590                 .isNoProtoCallVariadic(args,
591                                        cast<FunctionNoProtoType>(fnType))) {
592     required = RequiredArgs(args.size());
593   }
594 
595   // FIXME: Kill copy.
596   SmallVector<CanQualType, 16> argTypes;
597   for (const auto &arg : args)
598     argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
599   return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()),
600                                      /*instanceMethod=*/false, chainCall,
601                                      argTypes, fnType->getExtInfo(), paramInfos,
602                                      required);
603 }
604 
605 /// Figure out the rules for calling a function with the given formal
606 /// type using the given arguments.  The arguments are necessary
607 /// because the function might be unprototyped, in which case it's
608 /// target-dependent in crazy ways.
609 const CGFunctionInfo &
610 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
611                                       const FunctionType *fnType,
612                                       bool chainCall) {
613   return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
614                                      chainCall ? 1 : 0, chainCall);
615 }
616 
617 /// A block function is essentially a free function with an
618 /// extra implicit argument.
619 const CGFunctionInfo &
620 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
621                                        const FunctionType *fnType) {
622   return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
623                                      /*chainCall=*/false);
624 }
625 
626 const CGFunctionInfo &
627 CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto,
628                                               const FunctionArgList &params) {
629   auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size());
630   auto argTypes = getArgTypesForDeclaration(Context, params);
631 
632   return arrangeLLVMFunctionInfo(GetReturnType(proto->getReturnType()),
633                                  /*instanceMethod*/ false, /*chainCall*/ false,
634                                  argTypes, proto->getExtInfo(), paramInfos,
635                                  RequiredArgs::forPrototypePlus(proto, 1));
636 }
637 
638 const CGFunctionInfo &
639 CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType,
640                                          const CallArgList &args) {
641   // FIXME: Kill copy.
642   SmallVector<CanQualType, 16> argTypes;
643   for (const auto &Arg : args)
644     argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
645   return arrangeLLVMFunctionInfo(
646       GetReturnType(resultType), /*instanceMethod=*/false,
647       /*chainCall=*/false, argTypes, FunctionType::ExtInfo(),
648       /*paramInfos=*/ {}, RequiredArgs::All);
649 }
650 
651 const CGFunctionInfo &
652 CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType,
653                                                 const FunctionArgList &args) {
654   auto argTypes = getArgTypesForDeclaration(Context, args);
655 
656   return arrangeLLVMFunctionInfo(
657       GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false,
658       argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
659 }
660 
661 const CGFunctionInfo &
662 CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType,
663                                               ArrayRef<CanQualType> argTypes) {
664   return arrangeLLVMFunctionInfo(
665       resultType, /*instanceMethod=*/false, /*chainCall=*/false,
666       argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
667 }
668 
669 /// Arrange a call to a C++ method, passing the given arguments.
670 ///
671 /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It
672 /// does not count `this`.
673 const CGFunctionInfo &
674 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
675                                    const FunctionProtoType *proto,
676                                    RequiredArgs required,
677                                    unsigned numPrefixArgs) {
678   assert(numPrefixArgs + 1 <= args.size() &&
679          "Emitting a call with less args than the required prefix?");
680   // Add one to account for `this`. It's a bit awkward here, but we don't count
681   // `this` in similar places elsewhere.
682   auto paramInfos =
683     getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size());
684 
685   // FIXME: Kill copy.
686   auto argTypes = getArgTypesForCall(Context, args);
687 
688   FunctionType::ExtInfo info = proto->getExtInfo();
689   return arrangeLLVMFunctionInfo(
690       GetReturnType(proto->getReturnType()), /*instanceMethod=*/true,
691       /*chainCall=*/false, argTypes, info, paramInfos, required);
692 }
693 
694 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
695   return arrangeLLVMFunctionInfo(
696       getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
697       None, FunctionType::ExtInfo(), {}, RequiredArgs::All);
698 }
699 
700 const CGFunctionInfo &
701 CodeGenTypes::arrangeCall(const CGFunctionInfo &signature,
702                           const CallArgList &args) {
703   assert(signature.arg_size() <= args.size());
704   if (signature.arg_size() == args.size())
705     return signature;
706 
707   SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
708   auto sigParamInfos = signature.getExtParameterInfos();
709   if (!sigParamInfos.empty()) {
710     paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
711     paramInfos.resize(args.size());
712   }
713 
714   auto argTypes = getArgTypesForCall(Context, args);
715 
716   assert(signature.getRequiredArgs().allowsOptionalArgs());
717   return arrangeLLVMFunctionInfo(signature.getReturnType(),
718                                  signature.isInstanceMethod(),
719                                  signature.isChainCall(),
720                                  argTypes,
721                                  signature.getExtInfo(),
722                                  paramInfos,
723                                  signature.getRequiredArgs());
724 }
725 
726 namespace clang {
727 namespace CodeGen {
728 void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI);
729 }
730 }
731 
732 /// Arrange the argument and result information for an abstract value
733 /// of a given function type.  This is the method which all of the
734 /// above functions ultimately defer to.
735 const CGFunctionInfo &
736 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
737                                       bool instanceMethod,
738                                       bool chainCall,
739                                       ArrayRef<CanQualType> argTypes,
740                                       FunctionType::ExtInfo info,
741                      ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos,
742                                       RequiredArgs required) {
743   assert(llvm::all_of(argTypes,
744                       [](CanQualType T) { return T.isCanonicalAsParam(); }));
745 
746   // Lookup or create unique function info.
747   llvm::FoldingSetNodeID ID;
748   CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos,
749                           required, resultType, argTypes);
750 
751   void *insertPos = nullptr;
752   CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
753   if (FI)
754     return *FI;
755 
756   unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
757 
758   // Construct the function info.  We co-allocate the ArgInfos.
759   FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
760                               paramInfos, resultType, argTypes, required);
761   FunctionInfos.InsertNode(FI, insertPos);
762 
763   bool inserted = FunctionsBeingProcessed.insert(FI).second;
764   (void)inserted;
765   assert(inserted && "Recursively being processed?");
766 
767   // Compute ABI information.
768   if (CC == llvm::CallingConv::SPIR_KERNEL) {
769     // Force target independent argument handling for the host visible
770     // kernel functions.
771     computeSPIRKernelABIInfo(CGM, *FI);
772   } else if (info.getCC() == CC_Swift) {
773     swiftcall::computeABIInfo(CGM, *FI);
774   } else {
775     getABIInfo().computeInfo(*FI);
776   }
777 
778   // Loop over all of the computed argument and return value info.  If any of
779   // them are direct or extend without a specified coerce type, specify the
780   // default now.
781   ABIArgInfo &retInfo = FI->getReturnInfo();
782   if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
783     retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
784 
785   for (auto &I : FI->arguments())
786     if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
787       I.info.setCoerceToType(ConvertType(I.type));
788 
789   bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
790   assert(erased && "Not in set?");
791 
792   return *FI;
793 }
794 
795 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
796                                        bool instanceMethod,
797                                        bool chainCall,
798                                        const FunctionType::ExtInfo &info,
799                                        ArrayRef<ExtParameterInfo> paramInfos,
800                                        CanQualType resultType,
801                                        ArrayRef<CanQualType> argTypes,
802                                        RequiredArgs required) {
803   assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
804   assert(!required.allowsOptionalArgs() ||
805          required.getNumRequiredArgs() <= argTypes.size());
806 
807   void *buffer =
808     operator new(totalSizeToAlloc<ArgInfo,             ExtParameterInfo>(
809                                   argTypes.size() + 1, paramInfos.size()));
810 
811   CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
812   FI->CallingConvention = llvmCC;
813   FI->EffectiveCallingConvention = llvmCC;
814   FI->ASTCallingConvention = info.getCC();
815   FI->InstanceMethod = instanceMethod;
816   FI->ChainCall = chainCall;
817   FI->NoReturn = info.getNoReturn();
818   FI->ReturnsRetained = info.getProducesResult();
819   FI->NoCallerSavedRegs = info.getNoCallerSavedRegs();
820   FI->NoCfCheck = info.getNoCfCheck();
821   FI->Required = required;
822   FI->HasRegParm = info.getHasRegParm();
823   FI->RegParm = info.getRegParm();
824   FI->ArgStruct = nullptr;
825   FI->ArgStructAlign = 0;
826   FI->NumArgs = argTypes.size();
827   FI->HasExtParameterInfos = !paramInfos.empty();
828   FI->getArgsBuffer()[0].type = resultType;
829   for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
830     FI->getArgsBuffer()[i + 1].type = argTypes[i];
831   for (unsigned i = 0, e = paramInfos.size(); i != e; ++i)
832     FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
833   return FI;
834 }
835 
836 /***/
837 
838 namespace {
839 // ABIArgInfo::Expand implementation.
840 
841 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
842 struct TypeExpansion {
843   enum TypeExpansionKind {
844     // Elements of constant arrays are expanded recursively.
845     TEK_ConstantArray,
846     // Record fields are expanded recursively (but if record is a union, only
847     // the field with the largest size is expanded).
848     TEK_Record,
849     // For complex types, real and imaginary parts are expanded recursively.
850     TEK_Complex,
851     // All other types are not expandable.
852     TEK_None
853   };
854 
855   const TypeExpansionKind Kind;
856 
857   TypeExpansion(TypeExpansionKind K) : Kind(K) {}
858   virtual ~TypeExpansion() {}
859 };
860 
861 struct ConstantArrayExpansion : TypeExpansion {
862   QualType EltTy;
863   uint64_t NumElts;
864 
865   ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
866       : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
867   static bool classof(const TypeExpansion *TE) {
868     return TE->Kind == TEK_ConstantArray;
869   }
870 };
871 
872 struct RecordExpansion : TypeExpansion {
873   SmallVector<const CXXBaseSpecifier *, 1> Bases;
874 
875   SmallVector<const FieldDecl *, 1> Fields;
876 
877   RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
878                   SmallVector<const FieldDecl *, 1> &&Fields)
879       : TypeExpansion(TEK_Record), Bases(std::move(Bases)),
880         Fields(std::move(Fields)) {}
881   static bool classof(const TypeExpansion *TE) {
882     return TE->Kind == TEK_Record;
883   }
884 };
885 
886 struct ComplexExpansion : TypeExpansion {
887   QualType EltTy;
888 
889   ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
890   static bool classof(const TypeExpansion *TE) {
891     return TE->Kind == TEK_Complex;
892   }
893 };
894 
895 struct NoExpansion : TypeExpansion {
896   NoExpansion() : TypeExpansion(TEK_None) {}
897   static bool classof(const TypeExpansion *TE) {
898     return TE->Kind == TEK_None;
899   }
900 };
901 }  // namespace
902 
903 static std::unique_ptr<TypeExpansion>
904 getTypeExpansion(QualType Ty, const ASTContext &Context) {
905   if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
906     return std::make_unique<ConstantArrayExpansion>(
907         AT->getElementType(), AT->getSize().getZExtValue());
908   }
909   if (const RecordType *RT = Ty->getAs<RecordType>()) {
910     SmallVector<const CXXBaseSpecifier *, 1> Bases;
911     SmallVector<const FieldDecl *, 1> Fields;
912     const RecordDecl *RD = RT->getDecl();
913     assert(!RD->hasFlexibleArrayMember() &&
914            "Cannot expand structure with flexible array.");
915     if (RD->isUnion()) {
916       // Unions can be here only in degenerative cases - all the fields are same
917       // after flattening. Thus we have to use the "largest" field.
918       const FieldDecl *LargestFD = nullptr;
919       CharUnits UnionSize = CharUnits::Zero();
920 
921       for (const auto *FD : RD->fields()) {
922         if (FD->isZeroLengthBitField(Context))
923           continue;
924         assert(!FD->isBitField() &&
925                "Cannot expand structure with bit-field members.");
926         CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
927         if (UnionSize < FieldSize) {
928           UnionSize = FieldSize;
929           LargestFD = FD;
930         }
931       }
932       if (LargestFD)
933         Fields.push_back(LargestFD);
934     } else {
935       if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
936         assert(!CXXRD->isDynamicClass() &&
937                "cannot expand vtable pointers in dynamic classes");
938         for (const CXXBaseSpecifier &BS : CXXRD->bases())
939           Bases.push_back(&BS);
940       }
941 
942       for (const auto *FD : RD->fields()) {
943         if (FD->isZeroLengthBitField(Context))
944           continue;
945         assert(!FD->isBitField() &&
946                "Cannot expand structure with bit-field members.");
947         Fields.push_back(FD);
948       }
949     }
950     return std::make_unique<RecordExpansion>(std::move(Bases),
951                                               std::move(Fields));
952   }
953   if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
954     return std::make_unique<ComplexExpansion>(CT->getElementType());
955   }
956   return std::make_unique<NoExpansion>();
957 }
958 
959 static int getExpansionSize(QualType Ty, const ASTContext &Context) {
960   auto Exp = getTypeExpansion(Ty, Context);
961   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
962     return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
963   }
964   if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
965     int Res = 0;
966     for (auto BS : RExp->Bases)
967       Res += getExpansionSize(BS->getType(), Context);
968     for (auto FD : RExp->Fields)
969       Res += getExpansionSize(FD->getType(), Context);
970     return Res;
971   }
972   if (isa<ComplexExpansion>(Exp.get()))
973     return 2;
974   assert(isa<NoExpansion>(Exp.get()));
975   return 1;
976 }
977 
978 void
979 CodeGenTypes::getExpandedTypes(QualType Ty,
980                                SmallVectorImpl<llvm::Type *>::iterator &TI) {
981   auto Exp = getTypeExpansion(Ty, Context);
982   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
983     for (int i = 0, n = CAExp->NumElts; i < n; i++) {
984       getExpandedTypes(CAExp->EltTy, TI);
985     }
986   } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
987     for (auto BS : RExp->Bases)
988       getExpandedTypes(BS->getType(), TI);
989     for (auto FD : RExp->Fields)
990       getExpandedTypes(FD->getType(), TI);
991   } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
992     llvm::Type *EltTy = ConvertType(CExp->EltTy);
993     *TI++ = EltTy;
994     *TI++ = EltTy;
995   } else {
996     assert(isa<NoExpansion>(Exp.get()));
997     *TI++ = ConvertType(Ty);
998   }
999 }
1000 
1001 static void forConstantArrayExpansion(CodeGenFunction &CGF,
1002                                       ConstantArrayExpansion *CAE,
1003                                       Address BaseAddr,
1004                                       llvm::function_ref<void(Address)> Fn) {
1005   CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
1006   CharUnits EltAlign =
1007     BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
1008 
1009   for (int i = 0, n = CAE->NumElts; i < n; i++) {
1010     llvm::Value *EltAddr =
1011       CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i);
1012     Fn(Address(EltAddr, EltAlign));
1013   }
1014 }
1015 
1016 void CodeGenFunction::ExpandTypeFromArgs(
1017     QualType Ty, LValue LV, SmallVectorImpl<llvm::Value *>::iterator &AI) {
1018   assert(LV.isSimple() &&
1019          "Unexpected non-simple lvalue during struct expansion.");
1020 
1021   auto Exp = getTypeExpansion(Ty, getContext());
1022   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1023     forConstantArrayExpansion(*this, CAExp, LV.getAddress(),
1024                               [&](Address EltAddr) {
1025       LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1026       ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1027     });
1028   } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1029     Address This = LV.getAddress();
1030     for (const CXXBaseSpecifier *BS : RExp->Bases) {
1031       // Perform a single step derived-to-base conversion.
1032       Address Base =
1033           GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1034                                 /*NullCheckValue=*/false, SourceLocation());
1035       LValue SubLV = MakeAddrLValue(Base, BS->getType());
1036 
1037       // Recurse onto bases.
1038       ExpandTypeFromArgs(BS->getType(), SubLV, AI);
1039     }
1040     for (auto FD : RExp->Fields) {
1041       // FIXME: What are the right qualifiers here?
1042       LValue SubLV = EmitLValueForFieldInitialization(LV, FD);
1043       ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1044     }
1045   } else if (isa<ComplexExpansion>(Exp.get())) {
1046     auto realValue = *AI++;
1047     auto imagValue = *AI++;
1048     EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
1049   } else {
1050     assert(isa<NoExpansion>(Exp.get()));
1051     EmitStoreThroughLValue(RValue::get(*AI++), LV);
1052   }
1053 }
1054 
1055 void CodeGenFunction::ExpandTypeToArgs(
1056     QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
1057     SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
1058   auto Exp = getTypeExpansion(Ty, getContext());
1059   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1060     Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress()
1061                                    : Arg.getKnownRValue().getAggregateAddress();
1062     forConstantArrayExpansion(
1063         *this, CAExp, Addr, [&](Address EltAddr) {
1064           CallArg EltArg = CallArg(
1065               convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()),
1066               CAExp->EltTy);
1067           ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
1068                            IRCallArgPos);
1069         });
1070   } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1071     Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress()
1072                                    : Arg.getKnownRValue().getAggregateAddress();
1073     for (const CXXBaseSpecifier *BS : RExp->Bases) {
1074       // Perform a single step derived-to-base conversion.
1075       Address Base =
1076           GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1077                                 /*NullCheckValue=*/false, SourceLocation());
1078       CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType());
1079 
1080       // Recurse onto bases.
1081       ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs,
1082                        IRCallArgPos);
1083     }
1084 
1085     LValue LV = MakeAddrLValue(This, Ty);
1086     for (auto FD : RExp->Fields) {
1087       CallArg FldArg =
1088           CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType());
1089       ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
1090                        IRCallArgPos);
1091     }
1092   } else if (isa<ComplexExpansion>(Exp.get())) {
1093     ComplexPairTy CV = Arg.getKnownRValue().getComplexVal();
1094     IRCallArgs[IRCallArgPos++] = CV.first;
1095     IRCallArgs[IRCallArgPos++] = CV.second;
1096   } else {
1097     assert(isa<NoExpansion>(Exp.get()));
1098     auto RV = Arg.getKnownRValue();
1099     assert(RV.isScalar() &&
1100            "Unexpected non-scalar rvalue during struct expansion.");
1101 
1102     // Insert a bitcast as needed.
1103     llvm::Value *V = RV.getScalarVal();
1104     if (IRCallArgPos < IRFuncTy->getNumParams() &&
1105         V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1106       V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
1107 
1108     IRCallArgs[IRCallArgPos++] = V;
1109   }
1110 }
1111 
1112 /// Create a temporary allocation for the purposes of coercion.
1113 static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty,
1114                                            CharUnits MinAlign) {
1115   // Don't use an alignment that's worse than what LLVM would prefer.
1116   auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty);
1117   CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
1118 
1119   return CGF.CreateTempAlloca(Ty, Align);
1120 }
1121 
1122 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
1123 /// accessing some number of bytes out of it, try to gep into the struct to get
1124 /// at its inner goodness.  Dive as deep as possible without entering an element
1125 /// with an in-memory size smaller than DstSize.
1126 static Address
1127 EnterStructPointerForCoercedAccess(Address SrcPtr,
1128                                    llvm::StructType *SrcSTy,
1129                                    uint64_t DstSize, CodeGenFunction &CGF) {
1130   // We can't dive into a zero-element struct.
1131   if (SrcSTy->getNumElements() == 0) return SrcPtr;
1132 
1133   llvm::Type *FirstElt = SrcSTy->getElementType(0);
1134 
1135   // If the first elt is at least as large as what we're looking for, or if the
1136   // first element is the same size as the whole struct, we can enter it. The
1137   // comparison must be made on the store size and not the alloca size. Using
1138   // the alloca size may overstate the size of the load.
1139   uint64_t FirstEltSize =
1140     CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
1141   if (FirstEltSize < DstSize &&
1142       FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
1143     return SrcPtr;
1144 
1145   // GEP into the first element.
1146   SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, "coerce.dive");
1147 
1148   // If the first element is a struct, recurse.
1149   llvm::Type *SrcTy = SrcPtr.getElementType();
1150   if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1151     return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
1152 
1153   return SrcPtr;
1154 }
1155 
1156 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
1157 /// are either integers or pointers.  This does a truncation of the value if it
1158 /// is too large or a zero extension if it is too small.
1159 ///
1160 /// This behaves as if the value were coerced through memory, so on big-endian
1161 /// targets the high bits are preserved in a truncation, while little-endian
1162 /// targets preserve the low bits.
1163 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
1164                                              llvm::Type *Ty,
1165                                              CodeGenFunction &CGF) {
1166   if (Val->getType() == Ty)
1167     return Val;
1168 
1169   if (isa<llvm::PointerType>(Val->getType())) {
1170     // If this is Pointer->Pointer avoid conversion to and from int.
1171     if (isa<llvm::PointerType>(Ty))
1172       return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
1173 
1174     // Convert the pointer to an integer so we can play with its width.
1175     Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
1176   }
1177 
1178   llvm::Type *DestIntTy = Ty;
1179   if (isa<llvm::PointerType>(DestIntTy))
1180     DestIntTy = CGF.IntPtrTy;
1181 
1182   if (Val->getType() != DestIntTy) {
1183     const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
1184     if (DL.isBigEndian()) {
1185       // Preserve the high bits on big-endian targets.
1186       // That is what memory coercion does.
1187       uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1188       uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1189 
1190       if (SrcSize > DstSize) {
1191         Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
1192         Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
1193       } else {
1194         Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
1195         Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
1196       }
1197     } else {
1198       // Little-endian targets preserve the low bits. No shifts required.
1199       Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
1200     }
1201   }
1202 
1203   if (isa<llvm::PointerType>(Ty))
1204     Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
1205   return Val;
1206 }
1207 
1208 
1209 
1210 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
1211 /// a pointer to an object of type \arg Ty, known to be aligned to
1212 /// \arg SrcAlign bytes.
1213 ///
1214 /// This safely handles the case when the src type is smaller than the
1215 /// destination type; in this situation the values of bits which not
1216 /// present in the src are undefined.
1217 static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
1218                                       CodeGenFunction &CGF) {
1219   llvm::Type *SrcTy = Src.getElementType();
1220 
1221   // If SrcTy and Ty are the same, just do a load.
1222   if (SrcTy == Ty)
1223     return CGF.Builder.CreateLoad(Src);
1224 
1225   uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
1226 
1227   if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1228     Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF);
1229     SrcTy = Src.getType()->getElementType();
1230   }
1231 
1232   uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1233 
1234   // If the source and destination are integer or pointer types, just do an
1235   // extension or truncation to the desired type.
1236   if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1237       (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1238     llvm::Value *Load = CGF.Builder.CreateLoad(Src);
1239     return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
1240   }
1241 
1242   // If load is legal, just bitcast the src pointer.
1243   if (SrcSize >= DstSize) {
1244     // Generally SrcSize is never greater than DstSize, since this means we are
1245     // losing bits. However, this can happen in cases where the structure has
1246     // additional padding, for example due to a user specified alignment.
1247     //
1248     // FIXME: Assert that we aren't truncating non-padding bits when have access
1249     // to that information.
1250     Src = CGF.Builder.CreateBitCast(Src,
1251                                     Ty->getPointerTo(Src.getAddressSpace()));
1252     return CGF.Builder.CreateLoad(Src);
1253   }
1254 
1255   // Otherwise do coercion through memory. This is stupid, but simple.
1256   Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment());
1257   Address Casted = CGF.Builder.CreateElementBitCast(Tmp,CGF.Int8Ty);
1258   Address SrcCasted = CGF.Builder.CreateElementBitCast(Src,CGF.Int8Ty);
1259   CGF.Builder.CreateMemCpy(Casted, SrcCasted,
1260       llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
1261       false);
1262   return CGF.Builder.CreateLoad(Tmp);
1263 }
1264 
1265 // Function to store a first-class aggregate into memory.  We prefer to
1266 // store the elements rather than the aggregate to be more friendly to
1267 // fast-isel.
1268 // FIXME: Do we need to recurse here?
1269 static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
1270                           Address Dest, bool DestIsVolatile) {
1271   // Prefer scalar stores to first-class aggregate stores.
1272   if (llvm::StructType *STy =
1273         dyn_cast<llvm::StructType>(Val->getType())) {
1274     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1275       Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i);
1276       llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
1277       CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
1278     }
1279   } else {
1280     CGF.Builder.CreateStore(Val, Dest, DestIsVolatile);
1281   }
1282 }
1283 
1284 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1285 /// where the source and destination may have different types.  The
1286 /// destination is known to be aligned to \arg DstAlign bytes.
1287 ///
1288 /// This safely handles the case when the src type is larger than the
1289 /// destination type; the upper bits of the src will be lost.
1290 static void CreateCoercedStore(llvm::Value *Src,
1291                                Address Dst,
1292                                bool DstIsVolatile,
1293                                CodeGenFunction &CGF) {
1294   llvm::Type *SrcTy = Src->getType();
1295   llvm::Type *DstTy = Dst.getType()->getElementType();
1296   if (SrcTy == DstTy) {
1297     CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1298     return;
1299   }
1300 
1301   uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1302 
1303   if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1304     Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF);
1305     DstTy = Dst.getType()->getElementType();
1306   }
1307 
1308   llvm::PointerType *SrcPtrTy = llvm::dyn_cast<llvm::PointerType>(SrcTy);
1309   llvm::PointerType *DstPtrTy = llvm::dyn_cast<llvm::PointerType>(DstTy);
1310   if (SrcPtrTy && DstPtrTy &&
1311       SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) {
1312     Src = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy);
1313     CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1314     return;
1315   }
1316 
1317   // If the source and destination are integer or pointer types, just do an
1318   // extension or truncation to the desired type.
1319   if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1320       (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1321     Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
1322     CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1323     return;
1324   }
1325 
1326   uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
1327 
1328   // If store is legal, just bitcast the src pointer.
1329   if (SrcSize <= DstSize) {
1330     Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy);
1331     BuildAggStore(CGF, Src, Dst, DstIsVolatile);
1332   } else {
1333     // Otherwise do coercion through memory. This is stupid, but
1334     // simple.
1335 
1336     // Generally SrcSize is never greater than DstSize, since this means we are
1337     // losing bits. However, this can happen in cases where the structure has
1338     // additional padding, for example due to a user specified alignment.
1339     //
1340     // FIXME: Assert that we aren't truncating non-padding bits when have access
1341     // to that information.
1342     Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
1343     CGF.Builder.CreateStore(Src, Tmp);
1344     Address Casted = CGF.Builder.CreateElementBitCast(Tmp,CGF.Int8Ty);
1345     Address DstCasted = CGF.Builder.CreateElementBitCast(Dst,CGF.Int8Ty);
1346     CGF.Builder.CreateMemCpy(DstCasted, Casted,
1347         llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
1348         false);
1349   }
1350 }
1351 
1352 static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr,
1353                                    const ABIArgInfo &info) {
1354   if (unsigned offset = info.getDirectOffset()) {
1355     addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty);
1356     addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
1357                                              CharUnits::fromQuantity(offset));
1358     addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType());
1359   }
1360   return addr;
1361 }
1362 
1363 namespace {
1364 
1365 /// Encapsulates information about the way function arguments from
1366 /// CGFunctionInfo should be passed to actual LLVM IR function.
1367 class ClangToLLVMArgMapping {
1368   static const unsigned InvalidIndex = ~0U;
1369   unsigned InallocaArgNo;
1370   unsigned SRetArgNo;
1371   unsigned TotalIRArgs;
1372 
1373   /// Arguments of LLVM IR function corresponding to single Clang argument.
1374   struct IRArgs {
1375     unsigned PaddingArgIndex;
1376     // Argument is expanded to IR arguments at positions
1377     // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
1378     unsigned FirstArgIndex;
1379     unsigned NumberOfArgs;
1380 
1381     IRArgs()
1382         : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1383           NumberOfArgs(0) {}
1384   };
1385 
1386   SmallVector<IRArgs, 8> ArgInfo;
1387 
1388 public:
1389   ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
1390                         bool OnlyRequiredArgs = false)
1391       : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1392         ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1393     construct(Context, FI, OnlyRequiredArgs);
1394   }
1395 
1396   bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
1397   unsigned getInallocaArgNo() const {
1398     assert(hasInallocaArg());
1399     return InallocaArgNo;
1400   }
1401 
1402   bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
1403   unsigned getSRetArgNo() const {
1404     assert(hasSRetArg());
1405     return SRetArgNo;
1406   }
1407 
1408   unsigned totalIRArgs() const { return TotalIRArgs; }
1409 
1410   bool hasPaddingArg(unsigned ArgNo) const {
1411     assert(ArgNo < ArgInfo.size());
1412     return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1413   }
1414   unsigned getPaddingArgNo(unsigned ArgNo) const {
1415     assert(hasPaddingArg(ArgNo));
1416     return ArgInfo[ArgNo].PaddingArgIndex;
1417   }
1418 
1419   /// Returns index of first IR argument corresponding to ArgNo, and their
1420   /// quantity.
1421   std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
1422     assert(ArgNo < ArgInfo.size());
1423     return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1424                           ArgInfo[ArgNo].NumberOfArgs);
1425   }
1426 
1427 private:
1428   void construct(const ASTContext &Context, const CGFunctionInfo &FI,
1429                  bool OnlyRequiredArgs);
1430 };
1431 
1432 void ClangToLLVMArgMapping::construct(const ASTContext &Context,
1433                                       const CGFunctionInfo &FI,
1434                                       bool OnlyRequiredArgs) {
1435   unsigned IRArgNo = 0;
1436   bool SwapThisWithSRet = false;
1437   const ABIArgInfo &RetAI = FI.getReturnInfo();
1438 
1439   if (RetAI.getKind() == ABIArgInfo::Indirect) {
1440     SwapThisWithSRet = RetAI.isSRetAfterThis();
1441     SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1442   }
1443 
1444   unsigned ArgNo = 0;
1445   unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
1446   for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
1447        ++I, ++ArgNo) {
1448     assert(I != FI.arg_end());
1449     QualType ArgType = I->type;
1450     const ABIArgInfo &AI = I->info;
1451     // Collect data about IR arguments corresponding to Clang argument ArgNo.
1452     auto &IRArgs = ArgInfo[ArgNo];
1453 
1454     if (AI.getPaddingType())
1455       IRArgs.PaddingArgIndex = IRArgNo++;
1456 
1457     switch (AI.getKind()) {
1458     case ABIArgInfo::Extend:
1459     case ABIArgInfo::Direct: {
1460       // FIXME: handle sseregparm someday...
1461       llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
1462       if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
1463         IRArgs.NumberOfArgs = STy->getNumElements();
1464       } else {
1465         IRArgs.NumberOfArgs = 1;
1466       }
1467       break;
1468     }
1469     case ABIArgInfo::Indirect:
1470       IRArgs.NumberOfArgs = 1;
1471       break;
1472     case ABIArgInfo::Ignore:
1473     case ABIArgInfo::InAlloca:
1474       // ignore and inalloca doesn't have matching LLVM parameters.
1475       IRArgs.NumberOfArgs = 0;
1476       break;
1477     case ABIArgInfo::CoerceAndExpand:
1478       IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size();
1479       break;
1480     case ABIArgInfo::Expand:
1481       IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
1482       break;
1483     }
1484 
1485     if (IRArgs.NumberOfArgs > 0) {
1486       IRArgs.FirstArgIndex = IRArgNo;
1487       IRArgNo += IRArgs.NumberOfArgs;
1488     }
1489 
1490     // Skip over the sret parameter when it comes second.  We already handled it
1491     // above.
1492     if (IRArgNo == 1 && SwapThisWithSRet)
1493       IRArgNo++;
1494   }
1495   assert(ArgNo == ArgInfo.size());
1496 
1497   if (FI.usesInAlloca())
1498     InallocaArgNo = IRArgNo++;
1499 
1500   TotalIRArgs = IRArgNo;
1501 }
1502 }  // namespace
1503 
1504 /***/
1505 
1506 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
1507   const auto &RI = FI.getReturnInfo();
1508   return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
1509 }
1510 
1511 bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) {
1512   return ReturnTypeUsesSRet(FI) &&
1513          getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
1514 }
1515 
1516 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
1517   if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
1518     switch (BT->getKind()) {
1519     default:
1520       return false;
1521     case BuiltinType::Float:
1522       return getTarget().useObjCFPRetForRealType(TargetInfo::Float);
1523     case BuiltinType::Double:
1524       return getTarget().useObjCFPRetForRealType(TargetInfo::Double);
1525     case BuiltinType::LongDouble:
1526       return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble);
1527     }
1528   }
1529 
1530   return false;
1531 }
1532 
1533 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
1534   if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
1535     if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
1536       if (BT->getKind() == BuiltinType::LongDouble)
1537         return getTarget().useObjCFP2RetForComplexLongDouble();
1538     }
1539   }
1540 
1541   return false;
1542 }
1543 
1544 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
1545   const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
1546   return GetFunctionType(FI);
1547 }
1548 
1549 llvm::FunctionType *
1550 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
1551 
1552   bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1553   (void)Inserted;
1554   assert(Inserted && "Recursively being processed?");
1555 
1556   llvm::Type *resultType = nullptr;
1557   const ABIArgInfo &retAI = FI.getReturnInfo();
1558   switch (retAI.getKind()) {
1559   case ABIArgInfo::Expand:
1560     llvm_unreachable("Invalid ABI kind for return argument");
1561 
1562   case ABIArgInfo::Extend:
1563   case ABIArgInfo::Direct:
1564     resultType = retAI.getCoerceToType();
1565     break;
1566 
1567   case ABIArgInfo::InAlloca:
1568     if (retAI.getInAllocaSRet()) {
1569       // sret things on win32 aren't void, they return the sret pointer.
1570       QualType ret = FI.getReturnType();
1571       llvm::Type *ty = ConvertType(ret);
1572       unsigned addressSpace = Context.getTargetAddressSpace(ret);
1573       resultType = llvm::PointerType::get(ty, addressSpace);
1574     } else {
1575       resultType = llvm::Type::getVoidTy(getLLVMContext());
1576     }
1577     break;
1578 
1579   case ABIArgInfo::Indirect:
1580   case ABIArgInfo::Ignore:
1581     resultType = llvm::Type::getVoidTy(getLLVMContext());
1582     break;
1583 
1584   case ABIArgInfo::CoerceAndExpand:
1585     resultType = retAI.getUnpaddedCoerceAndExpandType();
1586     break;
1587   }
1588 
1589   ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
1590   SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
1591 
1592   // Add type for sret argument.
1593   if (IRFunctionArgs.hasSRetArg()) {
1594     QualType Ret = FI.getReturnType();
1595     llvm::Type *Ty = ConvertType(Ret);
1596     unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
1597     ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1598         llvm::PointerType::get(Ty, AddressSpace);
1599   }
1600 
1601   // Add type for inalloca argument.
1602   if (IRFunctionArgs.hasInallocaArg()) {
1603     auto ArgStruct = FI.getArgStruct();
1604     assert(ArgStruct);
1605     ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
1606   }
1607 
1608   // Add in all of the required arguments.
1609   unsigned ArgNo = 0;
1610   CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1611                                      ie = it + FI.getNumRequiredArgs();
1612   for (; it != ie; ++it, ++ArgNo) {
1613     const ABIArgInfo &ArgInfo = it->info;
1614 
1615     // Insert a padding type to ensure proper alignment.
1616     if (IRFunctionArgs.hasPaddingArg(ArgNo))
1617       ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1618           ArgInfo.getPaddingType();
1619 
1620     unsigned FirstIRArg, NumIRArgs;
1621     std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1622 
1623     switch (ArgInfo.getKind()) {
1624     case ABIArgInfo::Ignore:
1625     case ABIArgInfo::InAlloca:
1626       assert(NumIRArgs == 0);
1627       break;
1628 
1629     case ABIArgInfo::Indirect: {
1630       assert(NumIRArgs == 1);
1631       // indirect arguments are always on the stack, which is alloca addr space.
1632       llvm::Type *LTy = ConvertTypeForMem(it->type);
1633       ArgTypes[FirstIRArg] = LTy->getPointerTo(
1634           CGM.getDataLayout().getAllocaAddrSpace());
1635       break;
1636     }
1637 
1638     case ABIArgInfo::Extend:
1639     case ABIArgInfo::Direct: {
1640       // Fast-isel and the optimizer generally like scalar values better than
1641       // FCAs, so we flatten them if this is safe to do for this argument.
1642       llvm::Type *argType = ArgInfo.getCoerceToType();
1643       llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1644       if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
1645         assert(NumIRArgs == st->getNumElements());
1646         for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1647           ArgTypes[FirstIRArg + i] = st->getElementType(i);
1648       } else {
1649         assert(NumIRArgs == 1);
1650         ArgTypes[FirstIRArg] = argType;
1651       }
1652       break;
1653     }
1654 
1655     case ABIArgInfo::CoerceAndExpand: {
1656       auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1657       for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) {
1658         *ArgTypesIter++ = EltTy;
1659       }
1660       assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1661       break;
1662     }
1663 
1664     case ABIArgInfo::Expand:
1665       auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1666       getExpandedTypes(it->type, ArgTypesIter);
1667       assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1668       break;
1669     }
1670   }
1671 
1672   bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1673   assert(Erased && "Not in set?");
1674 
1675   return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
1676 }
1677 
1678 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
1679   const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1680   const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
1681 
1682   if (!isFuncTypeConvertible(FPT))
1683     return llvm::StructType::get(getLLVMContext());
1684 
1685   return GetFunctionType(GD);
1686 }
1687 
1688 static void AddAttributesFromFunctionProtoType(ASTContext &Ctx,
1689                                                llvm::AttrBuilder &FuncAttrs,
1690                                                const FunctionProtoType *FPT) {
1691   if (!FPT)
1692     return;
1693 
1694   if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) &&
1695       FPT->isNothrow())
1696     FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1697 }
1698 
1699 void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
1700                                                bool AttrOnCallSite,
1701                                                llvm::AttrBuilder &FuncAttrs) {
1702   // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
1703   if (!HasOptnone) {
1704     if (CodeGenOpts.OptimizeSize)
1705       FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1706     if (CodeGenOpts.OptimizeSize == 2)
1707       FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1708   }
1709 
1710   if (CodeGenOpts.DisableRedZone)
1711     FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1712   if (CodeGenOpts.IndirectTlsSegRefs)
1713     FuncAttrs.addAttribute("indirect-tls-seg-refs");
1714   if (CodeGenOpts.NoImplicitFloat)
1715     FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1716 
1717   if (AttrOnCallSite) {
1718     // Attributes that should go on the call site only.
1719     if (!CodeGenOpts.SimplifyLibCalls ||
1720         CodeGenOpts.isNoBuiltinFunc(Name.data()))
1721       FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1722     if (!CodeGenOpts.TrapFuncName.empty())
1723       FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
1724   } else {
1725     StringRef FpKind;
1726     switch (CodeGenOpts.getFramePointer()) {
1727     case CodeGenOptions::FramePointerKind::None:
1728       FpKind = "none";
1729       break;
1730     case CodeGenOptions::FramePointerKind::NonLeaf:
1731       FpKind = "non-leaf";
1732       break;
1733     case CodeGenOptions::FramePointerKind::All:
1734       FpKind = "all";
1735       break;
1736     }
1737     FuncAttrs.addAttribute("frame-pointer", FpKind);
1738 
1739     FuncAttrs.addAttribute("less-precise-fpmad",
1740                            llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
1741 
1742     if (CodeGenOpts.NullPointerIsValid)
1743       FuncAttrs.addAttribute("null-pointer-is-valid", "true");
1744     if (!CodeGenOpts.FPDenormalMode.empty())
1745       FuncAttrs.addAttribute("denormal-fp-math", CodeGenOpts.FPDenormalMode);
1746 
1747     FuncAttrs.addAttribute("no-trapping-math",
1748                            llvm::toStringRef(CodeGenOpts.NoTrappingMath));
1749 
1750     // Strict (compliant) code is the default, so only add this attribute to
1751     // indicate that we are trying to workaround a problem case.
1752     if (!CodeGenOpts.StrictFloatCastOverflow)
1753       FuncAttrs.addAttribute("strict-float-cast-overflow", "false");
1754 
1755     // TODO: Are these all needed?
1756     // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
1757     FuncAttrs.addAttribute("no-infs-fp-math",
1758                            llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
1759     FuncAttrs.addAttribute("no-nans-fp-math",
1760                            llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
1761     FuncAttrs.addAttribute("unsafe-fp-math",
1762                            llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
1763     FuncAttrs.addAttribute("use-soft-float",
1764                            llvm::toStringRef(CodeGenOpts.SoftFloat));
1765     FuncAttrs.addAttribute("stack-protector-buffer-size",
1766                            llvm::utostr(CodeGenOpts.SSPBufferSize));
1767     FuncAttrs.addAttribute("no-signed-zeros-fp-math",
1768                            llvm::toStringRef(CodeGenOpts.NoSignedZeros));
1769     FuncAttrs.addAttribute(
1770         "correctly-rounded-divide-sqrt-fp-math",
1771         llvm::toStringRef(CodeGenOpts.CorrectlyRoundedDivSqrt));
1772 
1773     if (getLangOpts().OpenCL)
1774       FuncAttrs.addAttribute("denorms-are-zero",
1775                              llvm::toStringRef(CodeGenOpts.FlushDenorm));
1776 
1777     // TODO: Reciprocal estimate codegen options should apply to instructions?
1778     const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals;
1779     if (!Recips.empty())
1780       FuncAttrs.addAttribute("reciprocal-estimates",
1781                              llvm::join(Recips, ","));
1782 
1783     if (!CodeGenOpts.PreferVectorWidth.empty() &&
1784         CodeGenOpts.PreferVectorWidth != "none")
1785       FuncAttrs.addAttribute("prefer-vector-width",
1786                              CodeGenOpts.PreferVectorWidth);
1787 
1788     if (CodeGenOpts.StackRealignment)
1789       FuncAttrs.addAttribute("stackrealign");
1790     if (CodeGenOpts.Backchain)
1791       FuncAttrs.addAttribute("backchain");
1792 
1793     if (CodeGenOpts.SpeculativeLoadHardening)
1794       FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
1795   }
1796 
1797   if (getLangOpts().assumeFunctionsAreConvergent()) {
1798     // Conservatively, mark all functions and calls in CUDA and OpenCL as
1799     // convergent (meaning, they may call an intrinsically convergent op, such
1800     // as __syncthreads() / barrier(), and so can't have certain optimizations
1801     // applied around them).  LLVM will remove this attribute where it safely
1802     // can.
1803     FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1804   }
1805 
1806   if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) {
1807     // Exceptions aren't supported in CUDA device code.
1808     FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1809 
1810     // Respect -fcuda-flush-denormals-to-zero.
1811     if (CodeGenOpts.FlushDenorm)
1812       FuncAttrs.addAttribute("nvptx-f32ftz", "true");
1813   }
1814 
1815   for (StringRef Attr : CodeGenOpts.DefaultFunctionAttrs) {
1816     StringRef Var, Value;
1817     std::tie(Var, Value) = Attr.split('=');
1818     FuncAttrs.addAttribute(Var, Value);
1819   }
1820 }
1821 
1822 void CodeGenModule::AddDefaultFnAttrs(llvm::Function &F) {
1823   llvm::AttrBuilder FuncAttrs;
1824   ConstructDefaultFnAttrList(F.getName(), F.hasOptNone(),
1825                              /* AttrOnCallSite = */ false, FuncAttrs);
1826   F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs);
1827 }
1828 
1829 void CodeGenModule::ConstructAttributeList(
1830     StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo,
1831     llvm::AttributeList &AttrList, unsigned &CallingConv, bool AttrOnCallSite) {
1832   llvm::AttrBuilder FuncAttrs;
1833   llvm::AttrBuilder RetAttrs;
1834 
1835   CallingConv = FI.getEffectiveCallingConvention();
1836   if (FI.isNoReturn())
1837     FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1838 
1839   // If we have information about the function prototype, we can learn
1840   // attributes from there.
1841   AddAttributesFromFunctionProtoType(getContext(), FuncAttrs,
1842                                      CalleeInfo.getCalleeFunctionProtoType());
1843 
1844   const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl();
1845 
1846   bool HasOptnone = false;
1847   // FIXME: handle sseregparm someday...
1848   if (TargetDecl) {
1849     if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
1850       FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
1851     if (TargetDecl->hasAttr<NoThrowAttr>())
1852       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1853     if (TargetDecl->hasAttr<NoReturnAttr>())
1854       FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1855     if (TargetDecl->hasAttr<ColdAttr>())
1856       FuncAttrs.addAttribute(llvm::Attribute::Cold);
1857     if (TargetDecl->hasAttr<NoDuplicateAttr>())
1858       FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
1859     if (TargetDecl->hasAttr<ConvergentAttr>())
1860       FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1861 
1862     if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1863       AddAttributesFromFunctionProtoType(
1864           getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
1865       const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
1866       const bool IsVirtualCall = MD && MD->isVirtual();
1867       // Don't use [[noreturn]], _Noreturn or [[no_builtin]] for a call to a
1868       // virtual function. These attributes are not inherited by overloads.
1869       if (!(AttrOnCallSite && IsVirtualCall)) {
1870         if (Fn->isNoReturn())
1871           FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1872 
1873         if (const auto *NBA = TargetDecl->getAttr<NoBuiltinAttr>()) {
1874           bool HasWildcard = llvm::is_contained(NBA->builtinNames(), "*");
1875           if (HasWildcard)
1876             FuncAttrs.addAttribute("no-builtins");
1877           else
1878             for (StringRef BuiltinName : NBA->builtinNames()) {
1879               SmallString<32> AttributeName;
1880               AttributeName += "no-builtin-";
1881               AttributeName += BuiltinName;
1882               FuncAttrs.addAttribute(AttributeName);
1883             }
1884         }
1885       }
1886     }
1887 
1888     // 'const', 'pure' and 'noalias' attributed functions are also nounwind.
1889     if (TargetDecl->hasAttr<ConstAttr>()) {
1890       FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
1891       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1892     } else if (TargetDecl->hasAttr<PureAttr>()) {
1893       FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
1894       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1895     } else if (TargetDecl->hasAttr<NoAliasAttr>()) {
1896       FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly);
1897       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1898     }
1899     if (TargetDecl->hasAttr<RestrictAttr>())
1900       RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1901     if (TargetDecl->hasAttr<ReturnsNonNullAttr>() &&
1902         !CodeGenOpts.NullPointerIsValid)
1903       RetAttrs.addAttribute(llvm::Attribute::NonNull);
1904     if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>())
1905       FuncAttrs.addAttribute("no_caller_saved_registers");
1906     if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>())
1907       FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
1908 
1909     HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
1910     if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) {
1911       Optional<unsigned> NumElemsParam;
1912       if (AllocSize->getNumElemsParam().isValid())
1913         NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
1914       FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
1915                                  NumElemsParam);
1916     }
1917   }
1918 
1919   ConstructDefaultFnAttrList(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
1920 
1921   // This must run after constructing the default function attribute list
1922   // to ensure that the speculative load hardening attribute is removed
1923   // in the case where the -mspeculative-load-hardening flag was passed.
1924   if (TargetDecl) {
1925     if (TargetDecl->hasAttr<NoSpeculativeLoadHardeningAttr>())
1926       FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening);
1927     if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>())
1928       FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
1929   }
1930 
1931   if (CodeGenOpts.EnableSegmentedStacks &&
1932       !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>()))
1933     FuncAttrs.addAttribute("split-stack");
1934 
1935   // Add NonLazyBind attribute to function declarations when -fno-plt
1936   // is used.
1937   if (TargetDecl && CodeGenOpts.NoPLT) {
1938     if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1939       if (!Fn->isDefined() && !AttrOnCallSite) {
1940         FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
1941       }
1942     }
1943   }
1944 
1945   if (TargetDecl && TargetDecl->hasAttr<OpenCLKernelAttr>()) {
1946     if (getLangOpts().OpenCLVersion <= 120) {
1947       // OpenCL v1.2 Work groups are always uniform
1948       FuncAttrs.addAttribute("uniform-work-group-size", "true");
1949     } else {
1950       // OpenCL v2.0 Work groups may be whether uniform or not.
1951       // '-cl-uniform-work-group-size' compile option gets a hint
1952       // to the compiler that the global work-size be a multiple of
1953       // the work-group size specified to clEnqueueNDRangeKernel
1954       // (i.e. work groups are uniform).
1955       FuncAttrs.addAttribute("uniform-work-group-size",
1956                              llvm::toStringRef(CodeGenOpts.UniformWGSize));
1957     }
1958   }
1959 
1960   if (!AttrOnCallSite) {
1961     bool DisableTailCalls = false;
1962 
1963     if (CodeGenOpts.DisableTailCalls)
1964       DisableTailCalls = true;
1965     else if (TargetDecl) {
1966       if (TargetDecl->hasAttr<DisableTailCallsAttr>() ||
1967           TargetDecl->hasAttr<AnyX86InterruptAttr>())
1968         DisableTailCalls = true;
1969       else if (CodeGenOpts.NoEscapingBlockTailCalls) {
1970         if (const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
1971           if (!BD->doesNotEscape())
1972             DisableTailCalls = true;
1973       }
1974     }
1975 
1976     FuncAttrs.addAttribute("disable-tail-calls",
1977                            llvm::toStringRef(DisableTailCalls));
1978     GetCPUAndFeaturesAttributes(CalleeInfo.getCalleeDecl(), FuncAttrs);
1979   }
1980 
1981   ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
1982 
1983   QualType RetTy = FI.getReturnType();
1984   const ABIArgInfo &RetAI = FI.getReturnInfo();
1985   switch (RetAI.getKind()) {
1986   case ABIArgInfo::Extend:
1987     if (RetAI.isSignExt())
1988       RetAttrs.addAttribute(llvm::Attribute::SExt);
1989     else
1990       RetAttrs.addAttribute(llvm::Attribute::ZExt);
1991     LLVM_FALLTHROUGH;
1992   case ABIArgInfo::Direct:
1993     if (RetAI.getInReg())
1994       RetAttrs.addAttribute(llvm::Attribute::InReg);
1995     break;
1996   case ABIArgInfo::Ignore:
1997     break;
1998 
1999   case ABIArgInfo::InAlloca:
2000   case ABIArgInfo::Indirect: {
2001     // inalloca and sret disable readnone and readonly
2002     FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2003       .removeAttribute(llvm::Attribute::ReadNone);
2004     break;
2005   }
2006 
2007   case ABIArgInfo::CoerceAndExpand:
2008     break;
2009 
2010   case ABIArgInfo::Expand:
2011     llvm_unreachable("Invalid ABI kind for return argument");
2012   }
2013 
2014   if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
2015     QualType PTy = RefTy->getPointeeType();
2016     if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2017       RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
2018                                         .getQuantity());
2019     else if (getContext().getTargetAddressSpace(PTy) == 0 &&
2020              !CodeGenOpts.NullPointerIsValid)
2021       RetAttrs.addAttribute(llvm::Attribute::NonNull);
2022   }
2023 
2024   bool hasUsedSRet = false;
2025   SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs());
2026 
2027   // Attach attributes to sret.
2028   if (IRFunctionArgs.hasSRetArg()) {
2029     llvm::AttrBuilder SRETAttrs;
2030     SRETAttrs.addAttribute(llvm::Attribute::StructRet);
2031     hasUsedSRet = true;
2032     if (RetAI.getInReg())
2033       SRETAttrs.addAttribute(llvm::Attribute::InReg);
2034     ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
2035         llvm::AttributeSet::get(getLLVMContext(), SRETAttrs);
2036   }
2037 
2038   // Attach attributes to inalloca argument.
2039   if (IRFunctionArgs.hasInallocaArg()) {
2040     llvm::AttrBuilder Attrs;
2041     Attrs.addAttribute(llvm::Attribute::InAlloca);
2042     ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
2043         llvm::AttributeSet::get(getLLVMContext(), Attrs);
2044   }
2045 
2046   unsigned ArgNo = 0;
2047   for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(),
2048                                           E = FI.arg_end();
2049        I != E; ++I, ++ArgNo) {
2050     QualType ParamType = I->type;
2051     const ABIArgInfo &AI = I->info;
2052     llvm::AttrBuilder Attrs;
2053 
2054     // Add attribute for padding argument, if necessary.
2055     if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
2056       if (AI.getPaddingInReg()) {
2057         ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
2058             llvm::AttributeSet::get(
2059                 getLLVMContext(),
2060                 llvm::AttrBuilder().addAttribute(llvm::Attribute::InReg));
2061       }
2062     }
2063 
2064     // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
2065     // have the corresponding parameter variable.  It doesn't make
2066     // sense to do it here because parameters are so messed up.
2067     switch (AI.getKind()) {
2068     case ABIArgInfo::Extend:
2069       if (AI.isSignExt())
2070         Attrs.addAttribute(llvm::Attribute::SExt);
2071       else
2072         Attrs.addAttribute(llvm::Attribute::ZExt);
2073       LLVM_FALLTHROUGH;
2074     case ABIArgInfo::Direct:
2075       if (ArgNo == 0 && FI.isChainCall())
2076         Attrs.addAttribute(llvm::Attribute::Nest);
2077       else if (AI.getInReg())
2078         Attrs.addAttribute(llvm::Attribute::InReg);
2079       break;
2080 
2081     case ABIArgInfo::Indirect: {
2082       if (AI.getInReg())
2083         Attrs.addAttribute(llvm::Attribute::InReg);
2084 
2085       if (AI.getIndirectByVal())
2086         Attrs.addByValAttr(getTypes().ConvertTypeForMem(ParamType));
2087 
2088       CharUnits Align = AI.getIndirectAlign();
2089 
2090       // In a byval argument, it is important that the required
2091       // alignment of the type is honored, as LLVM might be creating a
2092       // *new* stack object, and needs to know what alignment to give
2093       // it. (Sometimes it can deduce a sensible alignment on its own,
2094       // but not if clang decides it must emit a packed struct, or the
2095       // user specifies increased alignment requirements.)
2096       //
2097       // This is different from indirect *not* byval, where the object
2098       // exists already, and the align attribute is purely
2099       // informative.
2100       assert(!Align.isZero());
2101 
2102       // For now, only add this when we have a byval argument.
2103       // TODO: be less lazy about updating test cases.
2104       if (AI.getIndirectByVal())
2105         Attrs.addAlignmentAttr(Align.getQuantity());
2106 
2107       // byval disables readnone and readonly.
2108       FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2109         .removeAttribute(llvm::Attribute::ReadNone);
2110       break;
2111     }
2112     case ABIArgInfo::Ignore:
2113     case ABIArgInfo::Expand:
2114     case ABIArgInfo::CoerceAndExpand:
2115       break;
2116 
2117     case ABIArgInfo::InAlloca:
2118       // inalloca disables readnone and readonly.
2119       FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2120           .removeAttribute(llvm::Attribute::ReadNone);
2121       continue;
2122     }
2123 
2124     if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
2125       QualType PTy = RefTy->getPointeeType();
2126       if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2127         Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
2128                                        .getQuantity());
2129       else if (getContext().getTargetAddressSpace(PTy) == 0 &&
2130                !CodeGenOpts.NullPointerIsValid)
2131         Attrs.addAttribute(llvm::Attribute::NonNull);
2132     }
2133 
2134     switch (FI.getExtParameterInfo(ArgNo).getABI()) {
2135     case ParameterABI::Ordinary:
2136       break;
2137 
2138     case ParameterABI::SwiftIndirectResult: {
2139       // Add 'sret' if we haven't already used it for something, but
2140       // only if the result is void.
2141       if (!hasUsedSRet && RetTy->isVoidType()) {
2142         Attrs.addAttribute(llvm::Attribute::StructRet);
2143         hasUsedSRet = true;
2144       }
2145 
2146       // Add 'noalias' in either case.
2147       Attrs.addAttribute(llvm::Attribute::NoAlias);
2148 
2149       // Add 'dereferenceable' and 'alignment'.
2150       auto PTy = ParamType->getPointeeType();
2151       if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2152         auto info = getContext().getTypeInfoInChars(PTy);
2153         Attrs.addDereferenceableAttr(info.first.getQuantity());
2154         Attrs.addAttribute(llvm::Attribute::getWithAlignment(
2155             getLLVMContext(), info.second.getAsAlign()));
2156       }
2157       break;
2158     }
2159 
2160     case ParameterABI::SwiftErrorResult:
2161       Attrs.addAttribute(llvm::Attribute::SwiftError);
2162       break;
2163 
2164     case ParameterABI::SwiftContext:
2165       Attrs.addAttribute(llvm::Attribute::SwiftSelf);
2166       break;
2167     }
2168 
2169     if (FI.getExtParameterInfo(ArgNo).isNoEscape())
2170       Attrs.addAttribute(llvm::Attribute::NoCapture);
2171 
2172     if (Attrs.hasAttributes()) {
2173       unsigned FirstIRArg, NumIRArgs;
2174       std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2175       for (unsigned i = 0; i < NumIRArgs; i++)
2176         ArgAttrs[FirstIRArg + i] =
2177             llvm::AttributeSet::get(getLLVMContext(), Attrs);
2178     }
2179   }
2180   assert(ArgNo == FI.arg_size());
2181 
2182   AttrList = llvm::AttributeList::get(
2183       getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs),
2184       llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs);
2185 }
2186 
2187 /// An argument came in as a promoted argument; demote it back to its
2188 /// declared type.
2189 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
2190                                          const VarDecl *var,
2191                                          llvm::Value *value) {
2192   llvm::Type *varType = CGF.ConvertType(var->getType());
2193 
2194   // This can happen with promotions that actually don't change the
2195   // underlying type, like the enum promotions.
2196   if (value->getType() == varType) return value;
2197 
2198   assert((varType->isIntegerTy() || varType->isFloatingPointTy())
2199          && "unexpected promotion type");
2200 
2201   if (isa<llvm::IntegerType>(varType))
2202     return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
2203 
2204   return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
2205 }
2206 
2207 /// Returns the attribute (either parameter attribute, or function
2208 /// attribute), which declares argument ArgNo to be non-null.
2209 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
2210                                          QualType ArgType, unsigned ArgNo) {
2211   // FIXME: __attribute__((nonnull)) can also be applied to:
2212   //   - references to pointers, where the pointee is known to be
2213   //     nonnull (apparently a Clang extension)
2214   //   - transparent unions containing pointers
2215   // In the former case, LLVM IR cannot represent the constraint. In
2216   // the latter case, we have no guarantee that the transparent union
2217   // is in fact passed as a pointer.
2218   if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
2219     return nullptr;
2220   // First, check attribute on parameter itself.
2221   if (PVD) {
2222     if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
2223       return ParmNNAttr;
2224   }
2225   // Check function attributes.
2226   if (!FD)
2227     return nullptr;
2228   for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
2229     if (NNAttr->isNonNull(ArgNo))
2230       return NNAttr;
2231   }
2232   return nullptr;
2233 }
2234 
2235 namespace {
2236   struct CopyBackSwiftError final : EHScopeStack::Cleanup {
2237     Address Temp;
2238     Address Arg;
2239     CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {}
2240     void Emit(CodeGenFunction &CGF, Flags flags) override {
2241       llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp);
2242       CGF.Builder.CreateStore(errorValue, Arg);
2243     }
2244   };
2245 }
2246 
2247 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
2248                                          llvm::Function *Fn,
2249                                          const FunctionArgList &Args) {
2250   if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
2251     // Naked functions don't have prologues.
2252     return;
2253 
2254   // If this is an implicit-return-zero function, go ahead and
2255   // initialize the return value.  TODO: it might be nice to have
2256   // a more general mechanism for this that didn't require synthesized
2257   // return statements.
2258   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
2259     if (FD->hasImplicitReturnZero()) {
2260       QualType RetTy = FD->getReturnType().getUnqualifiedType();
2261       llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
2262       llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
2263       Builder.CreateStore(Zero, ReturnValue);
2264     }
2265   }
2266 
2267   // FIXME: We no longer need the types from FunctionArgList; lift up and
2268   // simplify.
2269 
2270   ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
2271   // Flattened function arguments.
2272   SmallVector<llvm::Value *, 16> FnArgs;
2273   FnArgs.reserve(IRFunctionArgs.totalIRArgs());
2274   for (auto &Arg : Fn->args()) {
2275     FnArgs.push_back(&Arg);
2276   }
2277   assert(FnArgs.size() == IRFunctionArgs.totalIRArgs());
2278 
2279   // If we're using inalloca, all the memory arguments are GEPs off of the last
2280   // parameter, which is a pointer to the complete memory area.
2281   Address ArgStruct = Address::invalid();
2282   if (IRFunctionArgs.hasInallocaArg()) {
2283     ArgStruct = Address(FnArgs[IRFunctionArgs.getInallocaArgNo()],
2284                         FI.getArgStructAlignment());
2285 
2286     assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo());
2287   }
2288 
2289   // Name the struct return parameter.
2290   if (IRFunctionArgs.hasSRetArg()) {
2291     auto AI = cast<llvm::Argument>(FnArgs[IRFunctionArgs.getSRetArgNo()]);
2292     AI->setName("agg.result");
2293     AI->addAttr(llvm::Attribute::NoAlias);
2294   }
2295 
2296   // Track if we received the parameter as a pointer (indirect, byval, or
2297   // inalloca).  If already have a pointer, EmitParmDecl doesn't need to copy it
2298   // into a local alloca for us.
2299   SmallVector<ParamValue, 16> ArgVals;
2300   ArgVals.reserve(Args.size());
2301 
2302   // Create a pointer value for every parameter declaration.  This usually
2303   // entails copying one or more LLVM IR arguments into an alloca.  Don't push
2304   // any cleanups or do anything that might unwind.  We do that separately, so
2305   // we can push the cleanups in the correct order for the ABI.
2306   assert(FI.arg_size() == Args.size() &&
2307          "Mismatch between function signature & arguments.");
2308   unsigned ArgNo = 0;
2309   CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
2310   for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
2311        i != e; ++i, ++info_it, ++ArgNo) {
2312     const VarDecl *Arg = *i;
2313     const ABIArgInfo &ArgI = info_it->info;
2314 
2315     bool isPromoted =
2316       isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
2317     // We are converting from ABIArgInfo type to VarDecl type directly, unless
2318     // the parameter is promoted. In this case we convert to
2319     // CGFunctionInfo::ArgInfo type with subsequent argument demotion.
2320     QualType Ty = isPromoted ? info_it->type : Arg->getType();
2321     assert(hasScalarEvaluationKind(Ty) ==
2322            hasScalarEvaluationKind(Arg->getType()));
2323 
2324     unsigned FirstIRArg, NumIRArgs;
2325     std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2326 
2327     switch (ArgI.getKind()) {
2328     case ABIArgInfo::InAlloca: {
2329       assert(NumIRArgs == 0);
2330       auto FieldIndex = ArgI.getInAllocaFieldIndex();
2331       Address V =
2332           Builder.CreateStructGEP(ArgStruct, FieldIndex, Arg->getName());
2333       ArgVals.push_back(ParamValue::forIndirect(V));
2334       break;
2335     }
2336 
2337     case ABIArgInfo::Indirect: {
2338       assert(NumIRArgs == 1);
2339       Address ParamAddr = Address(FnArgs[FirstIRArg], ArgI.getIndirectAlign());
2340 
2341       if (!hasScalarEvaluationKind(Ty)) {
2342         // Aggregates and complex variables are accessed by reference.  All we
2343         // need to do is realign the value, if requested.
2344         Address V = ParamAddr;
2345         if (ArgI.getIndirectRealign()) {
2346           Address AlignedTemp = CreateMemTemp(Ty, "coerce");
2347 
2348           // Copy from the incoming argument pointer to the temporary with the
2349           // appropriate alignment.
2350           //
2351           // FIXME: We should have a common utility for generating an aggregate
2352           // copy.
2353           CharUnits Size = getContext().getTypeSizeInChars(Ty);
2354           auto SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity());
2355           Address Dst = Builder.CreateBitCast(AlignedTemp, Int8PtrTy);
2356           Address Src = Builder.CreateBitCast(ParamAddr, Int8PtrTy);
2357           Builder.CreateMemCpy(Dst, Src, SizeVal, false);
2358           V = AlignedTemp;
2359         }
2360         ArgVals.push_back(ParamValue::forIndirect(V));
2361       } else {
2362         // Load scalar value from indirect argument.
2363         llvm::Value *V =
2364             EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getBeginLoc());
2365 
2366         if (isPromoted)
2367           V = emitArgumentDemotion(*this, Arg, V);
2368         ArgVals.push_back(ParamValue::forDirect(V));
2369       }
2370       break;
2371     }
2372 
2373     case ABIArgInfo::Extend:
2374     case ABIArgInfo::Direct: {
2375 
2376       // If we have the trivial case, handle it with no muss and fuss.
2377       if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
2378           ArgI.getCoerceToType() == ConvertType(Ty) &&
2379           ArgI.getDirectOffset() == 0) {
2380         assert(NumIRArgs == 1);
2381         llvm::Value *V = FnArgs[FirstIRArg];
2382         auto AI = cast<llvm::Argument>(V);
2383 
2384         if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
2385           if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
2386                              PVD->getFunctionScopeIndex()) &&
2387               !CGM.getCodeGenOpts().NullPointerIsValid)
2388             AI->addAttr(llvm::Attribute::NonNull);
2389 
2390           QualType OTy = PVD->getOriginalType();
2391           if (const auto *ArrTy =
2392               getContext().getAsConstantArrayType(OTy)) {
2393             // A C99 array parameter declaration with the static keyword also
2394             // indicates dereferenceability, and if the size is constant we can
2395             // use the dereferenceable attribute (which requires the size in
2396             // bytes).
2397             if (ArrTy->getSizeModifier() == ArrayType::Static) {
2398               QualType ETy = ArrTy->getElementType();
2399               uint64_t ArrSize = ArrTy->getSize().getZExtValue();
2400               if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
2401                   ArrSize) {
2402                 llvm::AttrBuilder Attrs;
2403                 Attrs.addDereferenceableAttr(
2404                   getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize);
2405                 AI->addAttrs(Attrs);
2406               } else if (getContext().getTargetAddressSpace(ETy) == 0 &&
2407                          !CGM.getCodeGenOpts().NullPointerIsValid) {
2408                 AI->addAttr(llvm::Attribute::NonNull);
2409               }
2410             }
2411           } else if (const auto *ArrTy =
2412                      getContext().getAsVariableArrayType(OTy)) {
2413             // For C99 VLAs with the static keyword, we don't know the size so
2414             // we can't use the dereferenceable attribute, but in addrspace(0)
2415             // we know that it must be nonnull.
2416             if (ArrTy->getSizeModifier() == VariableArrayType::Static &&
2417                 !getContext().getTargetAddressSpace(ArrTy->getElementType()) &&
2418                 !CGM.getCodeGenOpts().NullPointerIsValid)
2419               AI->addAttr(llvm::Attribute::NonNull);
2420           }
2421 
2422           const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
2423           if (!AVAttr)
2424             if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
2425               AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
2426           if (AVAttr && !SanOpts.has(SanitizerKind::Alignment)) {
2427             // If alignment-assumption sanitizer is enabled, we do *not* add
2428             // alignment attribute here, but emit normal alignment assumption,
2429             // so the UBSAN check could function.
2430             llvm::Value *AlignmentValue =
2431               EmitScalarExpr(AVAttr->getAlignment());
2432             llvm::ConstantInt *AlignmentCI =
2433               cast<llvm::ConstantInt>(AlignmentValue);
2434             unsigned Alignment = std::min((unsigned)AlignmentCI->getZExtValue(),
2435                                           +llvm::Value::MaximumAlignment);
2436             AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment));
2437           }
2438         }
2439 
2440         if (Arg->getType().isRestrictQualified())
2441           AI->addAttr(llvm::Attribute::NoAlias);
2442 
2443         // LLVM expects swifterror parameters to be used in very restricted
2444         // ways.  Copy the value into a less-restricted temporary.
2445         if (FI.getExtParameterInfo(ArgNo).getABI()
2446               == ParameterABI::SwiftErrorResult) {
2447           QualType pointeeTy = Ty->getPointeeType();
2448           assert(pointeeTy->isPointerType());
2449           Address temp =
2450             CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
2451           Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy));
2452           llvm::Value *incomingErrorValue = Builder.CreateLoad(arg);
2453           Builder.CreateStore(incomingErrorValue, temp);
2454           V = temp.getPointer();
2455 
2456           // Push a cleanup to copy the value back at the end of the function.
2457           // The convention does not guarantee that the value will be written
2458           // back if the function exits with an unwind exception.
2459           EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg);
2460         }
2461 
2462         // Ensure the argument is the correct type.
2463         if (V->getType() != ArgI.getCoerceToType())
2464           V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
2465 
2466         if (isPromoted)
2467           V = emitArgumentDemotion(*this, Arg, V);
2468 
2469         // Because of merging of function types from multiple decls it is
2470         // possible for the type of an argument to not match the corresponding
2471         // type in the function type. Since we are codegening the callee
2472         // in here, add a cast to the argument type.
2473         llvm::Type *LTy = ConvertType(Arg->getType());
2474         if (V->getType() != LTy)
2475           V = Builder.CreateBitCast(V, LTy);
2476 
2477         ArgVals.push_back(ParamValue::forDirect(V));
2478         break;
2479       }
2480 
2481       Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
2482                                      Arg->getName());
2483 
2484       // Pointer to store into.
2485       Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
2486 
2487       // Fast-isel and the optimizer generally like scalar values better than
2488       // FCAs, so we flatten them if this is safe to do for this argument.
2489       llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
2490       if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
2491           STy->getNumElements() > 1) {
2492         uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
2493         llvm::Type *DstTy = Ptr.getElementType();
2494         uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
2495 
2496         Address AddrToStoreInto = Address::invalid();
2497         if (SrcSize <= DstSize) {
2498           AddrToStoreInto = Builder.CreateElementBitCast(Ptr, STy);
2499         } else {
2500           AddrToStoreInto =
2501             CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
2502         }
2503 
2504         assert(STy->getNumElements() == NumIRArgs);
2505         for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2506           auto AI = FnArgs[FirstIRArg + i];
2507           AI->setName(Arg->getName() + ".coerce" + Twine(i));
2508           Address EltPtr = Builder.CreateStructGEP(AddrToStoreInto, i);
2509           Builder.CreateStore(AI, EltPtr);
2510         }
2511 
2512         if (SrcSize > DstSize) {
2513           Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
2514         }
2515 
2516       } else {
2517         // Simple case, just do a coerced store of the argument into the alloca.
2518         assert(NumIRArgs == 1);
2519         auto AI = FnArgs[FirstIRArg];
2520         AI->setName(Arg->getName() + ".coerce");
2521         CreateCoercedStore(AI, Ptr, /*DstIsVolatile=*/false, *this);
2522       }
2523 
2524       // Match to what EmitParmDecl is expecting for this type.
2525       if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
2526         llvm::Value *V =
2527             EmitLoadOfScalar(Alloca, false, Ty, Arg->getBeginLoc());
2528         if (isPromoted)
2529           V = emitArgumentDemotion(*this, Arg, V);
2530         ArgVals.push_back(ParamValue::forDirect(V));
2531       } else {
2532         ArgVals.push_back(ParamValue::forIndirect(Alloca));
2533       }
2534       break;
2535     }
2536 
2537     case ABIArgInfo::CoerceAndExpand: {
2538       // Reconstruct into a temporary.
2539       Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2540       ArgVals.push_back(ParamValue::forIndirect(alloca));
2541 
2542       auto coercionType = ArgI.getCoerceAndExpandType();
2543       alloca = Builder.CreateElementBitCast(alloca, coercionType);
2544 
2545       unsigned argIndex = FirstIRArg;
2546       for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2547         llvm::Type *eltType = coercionType->getElementType(i);
2548         if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType))
2549           continue;
2550 
2551         auto eltAddr = Builder.CreateStructGEP(alloca, i);
2552         auto elt = FnArgs[argIndex++];
2553         Builder.CreateStore(elt, eltAddr);
2554       }
2555       assert(argIndex == FirstIRArg + NumIRArgs);
2556       break;
2557     }
2558 
2559     case ABIArgInfo::Expand: {
2560       // If this structure was expanded into multiple arguments then
2561       // we need to create a temporary and reconstruct it from the
2562       // arguments.
2563       Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2564       LValue LV = MakeAddrLValue(Alloca, Ty);
2565       ArgVals.push_back(ParamValue::forIndirect(Alloca));
2566 
2567       auto FnArgIter = FnArgs.begin() + FirstIRArg;
2568       ExpandTypeFromArgs(Ty, LV, FnArgIter);
2569       assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs);
2570       for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
2571         auto AI = FnArgs[FirstIRArg + i];
2572         AI->setName(Arg->getName() + "." + Twine(i));
2573       }
2574       break;
2575     }
2576 
2577     case ABIArgInfo::Ignore:
2578       assert(NumIRArgs == 0);
2579       // Initialize the local variable appropriately.
2580       if (!hasScalarEvaluationKind(Ty)) {
2581         ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty)));
2582       } else {
2583         llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
2584         ArgVals.push_back(ParamValue::forDirect(U));
2585       }
2586       break;
2587     }
2588   }
2589 
2590   if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2591     for (int I = Args.size() - 1; I >= 0; --I)
2592       EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2593   } else {
2594     for (unsigned I = 0, E = Args.size(); I != E; ++I)
2595       EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2596   }
2597 }
2598 
2599 static void eraseUnusedBitCasts(llvm::Instruction *insn) {
2600   while (insn->use_empty()) {
2601     llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
2602     if (!bitcast) return;
2603 
2604     // This is "safe" because we would have used a ConstantExpr otherwise.
2605     insn = cast<llvm::Instruction>(bitcast->getOperand(0));
2606     bitcast->eraseFromParent();
2607   }
2608 }
2609 
2610 /// Try to emit a fused autorelease of a return result.
2611 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
2612                                                     llvm::Value *result) {
2613   // We must be immediately followed the cast.
2614   llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
2615   if (BB->empty()) return nullptr;
2616   if (&BB->back() != result) return nullptr;
2617 
2618   llvm::Type *resultType = result->getType();
2619 
2620   // result is in a BasicBlock and is therefore an Instruction.
2621   llvm::Instruction *generator = cast<llvm::Instruction>(result);
2622 
2623   SmallVector<llvm::Instruction *, 4> InstsToKill;
2624 
2625   // Look for:
2626   //  %generator = bitcast %type1* %generator2 to %type2*
2627   while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
2628     // We would have emitted this as a constant if the operand weren't
2629     // an Instruction.
2630     generator = cast<llvm::Instruction>(bitcast->getOperand(0));
2631 
2632     // Require the generator to be immediately followed by the cast.
2633     if (generator->getNextNode() != bitcast)
2634       return nullptr;
2635 
2636     InstsToKill.push_back(bitcast);
2637   }
2638 
2639   // Look for:
2640   //   %generator = call i8* @objc_retain(i8* %originalResult)
2641   // or
2642   //   %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
2643   llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
2644   if (!call) return nullptr;
2645 
2646   bool doRetainAutorelease;
2647 
2648   if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints().objc_retain) {
2649     doRetainAutorelease = true;
2650   } else if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints()
2651                                           .objc_retainAutoreleasedReturnValue) {
2652     doRetainAutorelease = false;
2653 
2654     // If we emitted an assembly marker for this call (and the
2655     // ARCEntrypoints field should have been set if so), go looking
2656     // for that call.  If we can't find it, we can't do this
2657     // optimization.  But it should always be the immediately previous
2658     // instruction, unless we needed bitcasts around the call.
2659     if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) {
2660       llvm::Instruction *prev = call->getPrevNode();
2661       assert(prev);
2662       if (isa<llvm::BitCastInst>(prev)) {
2663         prev = prev->getPrevNode();
2664         assert(prev);
2665       }
2666       assert(isa<llvm::CallInst>(prev));
2667       assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
2668                CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker);
2669       InstsToKill.push_back(prev);
2670     }
2671   } else {
2672     return nullptr;
2673   }
2674 
2675   result = call->getArgOperand(0);
2676   InstsToKill.push_back(call);
2677 
2678   // Keep killing bitcasts, for sanity.  Note that we no longer care
2679   // about precise ordering as long as there's exactly one use.
2680   while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
2681     if (!bitcast->hasOneUse()) break;
2682     InstsToKill.push_back(bitcast);
2683     result = bitcast->getOperand(0);
2684   }
2685 
2686   // Delete all the unnecessary instructions, from latest to earliest.
2687   for (auto *I : InstsToKill)
2688     I->eraseFromParent();
2689 
2690   // Do the fused retain/autorelease if we were asked to.
2691   if (doRetainAutorelease)
2692     result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
2693 
2694   // Cast back to the result type.
2695   return CGF.Builder.CreateBitCast(result, resultType);
2696 }
2697 
2698 /// If this is a +1 of the value of an immutable 'self', remove it.
2699 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
2700                                           llvm::Value *result) {
2701   // This is only applicable to a method with an immutable 'self'.
2702   const ObjCMethodDecl *method =
2703     dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
2704   if (!method) return nullptr;
2705   const VarDecl *self = method->getSelfDecl();
2706   if (!self->getType().isConstQualified()) return nullptr;
2707 
2708   // Look for a retain call.
2709   llvm::CallInst *retainCall =
2710     dyn_cast<llvm::CallInst>(result->stripPointerCasts());
2711   if (!retainCall ||
2712       retainCall->getCalledValue() != CGF.CGM.getObjCEntrypoints().objc_retain)
2713     return nullptr;
2714 
2715   // Look for an ordinary load of 'self'.
2716   llvm::Value *retainedValue = retainCall->getArgOperand(0);
2717   llvm::LoadInst *load =
2718     dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
2719   if (!load || load->isAtomic() || load->isVolatile() ||
2720       load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
2721     return nullptr;
2722 
2723   // Okay!  Burn it all down.  This relies for correctness on the
2724   // assumption that the retain is emitted as part of the return and
2725   // that thereafter everything is used "linearly".
2726   llvm::Type *resultType = result->getType();
2727   eraseUnusedBitCasts(cast<llvm::Instruction>(result));
2728   assert(retainCall->use_empty());
2729   retainCall->eraseFromParent();
2730   eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
2731 
2732   return CGF.Builder.CreateBitCast(load, resultType);
2733 }
2734 
2735 /// Emit an ARC autorelease of the result of a function.
2736 ///
2737 /// \return the value to actually return from the function
2738 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
2739                                             llvm::Value *result) {
2740   // If we're returning 'self', kill the initial retain.  This is a
2741   // heuristic attempt to "encourage correctness" in the really unfortunate
2742   // case where we have a return of self during a dealloc and we desperately
2743   // need to avoid the possible autorelease.
2744   if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
2745     return self;
2746 
2747   // At -O0, try to emit a fused retain/autorelease.
2748   if (CGF.shouldUseFusedARCCalls())
2749     if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
2750       return fused;
2751 
2752   return CGF.EmitARCAutoreleaseReturnValue(result);
2753 }
2754 
2755 /// Heuristically search for a dominating store to the return-value slot.
2756 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
2757   // Check if a User is a store which pointerOperand is the ReturnValue.
2758   // We are looking for stores to the ReturnValue, not for stores of the
2759   // ReturnValue to some other location.
2760   auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
2761     auto *SI = dyn_cast<llvm::StoreInst>(U);
2762     if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer())
2763       return nullptr;
2764     // These aren't actually possible for non-coerced returns, and we
2765     // only care about non-coerced returns on this code path.
2766     assert(!SI->isAtomic() && !SI->isVolatile());
2767     return SI;
2768   };
2769   // If there are multiple uses of the return-value slot, just check
2770   // for something immediately preceding the IP.  Sometimes this can
2771   // happen with how we generate implicit-returns; it can also happen
2772   // with noreturn cleanups.
2773   if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
2774     llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2775     if (IP->empty()) return nullptr;
2776     llvm::Instruction *I = &IP->back();
2777 
2778     // Skip lifetime markers
2779     for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(),
2780                                             IE = IP->rend();
2781          II != IE; ++II) {
2782       if (llvm::IntrinsicInst *Intrinsic =
2783               dyn_cast<llvm::IntrinsicInst>(&*II)) {
2784         if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) {
2785           const llvm::Value *CastAddr = Intrinsic->getArgOperand(1);
2786           ++II;
2787           if (II == IE)
2788             break;
2789           if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II))
2790             continue;
2791         }
2792       }
2793       I = &*II;
2794       break;
2795     }
2796 
2797     return GetStoreIfValid(I);
2798   }
2799 
2800   llvm::StoreInst *store =
2801       GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
2802   if (!store) return nullptr;
2803 
2804   // Now do a first-and-dirty dominance check: just walk up the
2805   // single-predecessors chain from the current insertion point.
2806   llvm::BasicBlock *StoreBB = store->getParent();
2807   llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2808   while (IP != StoreBB) {
2809     if (!(IP = IP->getSinglePredecessor()))
2810       return nullptr;
2811   }
2812 
2813   // Okay, the store's basic block dominates the insertion point; we
2814   // can do our thing.
2815   return store;
2816 }
2817 
2818 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
2819                                          bool EmitRetDbgLoc,
2820                                          SourceLocation EndLoc) {
2821   if (FI.isNoReturn()) {
2822     // Noreturn functions don't return.
2823     EmitUnreachable(EndLoc);
2824     return;
2825   }
2826 
2827   if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
2828     // Naked functions don't have epilogues.
2829     Builder.CreateUnreachable();
2830     return;
2831   }
2832 
2833   // Functions with no result always return void.
2834   if (!ReturnValue.isValid()) {
2835     Builder.CreateRetVoid();
2836     return;
2837   }
2838 
2839   llvm::DebugLoc RetDbgLoc;
2840   llvm::Value *RV = nullptr;
2841   QualType RetTy = FI.getReturnType();
2842   const ABIArgInfo &RetAI = FI.getReturnInfo();
2843 
2844   switch (RetAI.getKind()) {
2845   case ABIArgInfo::InAlloca:
2846     // Aggregrates get evaluated directly into the destination.  Sometimes we
2847     // need to return the sret value in a register, though.
2848     assert(hasAggregateEvaluationKind(RetTy));
2849     if (RetAI.getInAllocaSRet()) {
2850       llvm::Function::arg_iterator EI = CurFn->arg_end();
2851       --EI;
2852       llvm::Value *ArgStruct = &*EI;
2853       llvm::Value *SRet = Builder.CreateStructGEP(
2854           nullptr, ArgStruct, RetAI.getInAllocaFieldIndex());
2855       RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret");
2856     }
2857     break;
2858 
2859   case ABIArgInfo::Indirect: {
2860     auto AI = CurFn->arg_begin();
2861     if (RetAI.isSRetAfterThis())
2862       ++AI;
2863     switch (getEvaluationKind(RetTy)) {
2864     case TEK_Complex: {
2865       ComplexPairTy RT =
2866         EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
2867       EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy),
2868                          /*isInit*/ true);
2869       break;
2870     }
2871     case TEK_Aggregate:
2872       // Do nothing; aggregrates get evaluated directly into the destination.
2873       break;
2874     case TEK_Scalar:
2875       EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
2876                         MakeNaturalAlignAddrLValue(&*AI, RetTy),
2877                         /*isInit*/ true);
2878       break;
2879     }
2880     break;
2881   }
2882 
2883   case ABIArgInfo::Extend:
2884   case ABIArgInfo::Direct:
2885     if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
2886         RetAI.getDirectOffset() == 0) {
2887       // The internal return value temp always will have pointer-to-return-type
2888       // type, just do a load.
2889 
2890       // If there is a dominating store to ReturnValue, we can elide
2891       // the load, zap the store, and usually zap the alloca.
2892       if (llvm::StoreInst *SI =
2893               findDominatingStoreToReturnValue(*this)) {
2894         // Reuse the debug location from the store unless there is
2895         // cleanup code to be emitted between the store and return
2896         // instruction.
2897         if (EmitRetDbgLoc && !AutoreleaseResult)
2898           RetDbgLoc = SI->getDebugLoc();
2899         // Get the stored value and nuke the now-dead store.
2900         RV = SI->getValueOperand();
2901         SI->eraseFromParent();
2902 
2903       // Otherwise, we have to do a simple load.
2904       } else {
2905         RV = Builder.CreateLoad(ReturnValue);
2906       }
2907     } else {
2908       // If the value is offset in memory, apply the offset now.
2909       Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
2910 
2911       RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
2912     }
2913 
2914     // In ARC, end functions that return a retainable type with a call
2915     // to objc_autoreleaseReturnValue.
2916     if (AutoreleaseResult) {
2917 #ifndef NDEBUG
2918       // Type::isObjCRetainabletype has to be called on a QualType that hasn't
2919       // been stripped of the typedefs, so we cannot use RetTy here. Get the
2920       // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from
2921       // CurCodeDecl or BlockInfo.
2922       QualType RT;
2923 
2924       if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl))
2925         RT = FD->getReturnType();
2926       else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl))
2927         RT = MD->getReturnType();
2928       else if (isa<BlockDecl>(CurCodeDecl))
2929         RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType();
2930       else
2931         llvm_unreachable("Unexpected function/method type");
2932 
2933       assert(getLangOpts().ObjCAutoRefCount &&
2934              !FI.isReturnsRetained() &&
2935              RT->isObjCRetainableType());
2936 #endif
2937       RV = emitAutoreleaseOfResult(*this, RV);
2938     }
2939 
2940     break;
2941 
2942   case ABIArgInfo::Ignore:
2943     break;
2944 
2945   case ABIArgInfo::CoerceAndExpand: {
2946     auto coercionType = RetAI.getCoerceAndExpandType();
2947 
2948     // Load all of the coerced elements out into results.
2949     llvm::SmallVector<llvm::Value*, 4> results;
2950     Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType);
2951     for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2952       auto coercedEltType = coercionType->getElementType(i);
2953       if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType))
2954         continue;
2955 
2956       auto eltAddr = Builder.CreateStructGEP(addr, i);
2957       auto elt = Builder.CreateLoad(eltAddr);
2958       results.push_back(elt);
2959     }
2960 
2961     // If we have one result, it's the single direct result type.
2962     if (results.size() == 1) {
2963       RV = results[0];
2964 
2965     // Otherwise, we need to make a first-class aggregate.
2966     } else {
2967       // Construct a return type that lacks padding elements.
2968       llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType();
2969 
2970       RV = llvm::UndefValue::get(returnType);
2971       for (unsigned i = 0, e = results.size(); i != e; ++i) {
2972         RV = Builder.CreateInsertValue(RV, results[i], i);
2973       }
2974     }
2975     break;
2976   }
2977 
2978   case ABIArgInfo::Expand:
2979     llvm_unreachable("Invalid ABI kind for return argument");
2980   }
2981 
2982   llvm::Instruction *Ret;
2983   if (RV) {
2984     EmitReturnValueCheck(RV);
2985     Ret = Builder.CreateRet(RV);
2986   } else {
2987     Ret = Builder.CreateRetVoid();
2988   }
2989 
2990   if (RetDbgLoc)
2991     Ret->setDebugLoc(std::move(RetDbgLoc));
2992 }
2993 
2994 void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV) {
2995   // A current decl may not be available when emitting vtable thunks.
2996   if (!CurCodeDecl)
2997     return;
2998 
2999   ReturnsNonNullAttr *RetNNAttr = nullptr;
3000   if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute))
3001     RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>();
3002 
3003   if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
3004     return;
3005 
3006   // Prefer the returns_nonnull attribute if it's present.
3007   SourceLocation AttrLoc;
3008   SanitizerMask CheckKind;
3009   SanitizerHandler Handler;
3010   if (RetNNAttr) {
3011     assert(!requiresReturnValueNullabilityCheck() &&
3012            "Cannot check nullability and the nonnull attribute");
3013     AttrLoc = RetNNAttr->getLocation();
3014     CheckKind = SanitizerKind::ReturnsNonnullAttribute;
3015     Handler = SanitizerHandler::NonnullReturn;
3016   } else {
3017     if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl))
3018       if (auto *TSI = DD->getTypeSourceInfo())
3019         if (auto FTL = TSI->getTypeLoc().castAs<FunctionTypeLoc>())
3020           AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
3021     CheckKind = SanitizerKind::NullabilityReturn;
3022     Handler = SanitizerHandler::NullabilityReturn;
3023   }
3024 
3025   SanitizerScope SanScope(this);
3026 
3027   // Make sure the "return" source location is valid. If we're checking a
3028   // nullability annotation, make sure the preconditions for the check are met.
3029   llvm::BasicBlock *Check = createBasicBlock("nullcheck");
3030   llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck");
3031   llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load");
3032   llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr);
3033   if (requiresReturnValueNullabilityCheck())
3034     CanNullCheck =
3035         Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
3036   Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
3037   EmitBlock(Check);
3038 
3039   // Now do the null check.
3040   llvm::Value *Cond = Builder.CreateIsNotNull(RV);
3041   llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)};
3042   llvm::Value *DynamicData[] = {SLocPtr};
3043   EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData);
3044 
3045   EmitBlock(NoCheck);
3046 
3047 #ifndef NDEBUG
3048   // The return location should not be used after the check has been emitted.
3049   ReturnLocation = Address::invalid();
3050 #endif
3051 }
3052 
3053 static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) {
3054   const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
3055   return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
3056 }
3057 
3058 static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF,
3059                                           QualType Ty) {
3060   // FIXME: Generate IR in one pass, rather than going back and fixing up these
3061   // placeholders.
3062   llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
3063   llvm::Type *IRPtrTy = IRTy->getPointerTo();
3064   llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo());
3065 
3066   // FIXME: When we generate this IR in one pass, we shouldn't need
3067   // this win32-specific alignment hack.
3068   CharUnits Align = CharUnits::fromQuantity(4);
3069   Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align);
3070 
3071   return AggValueSlot::forAddr(Address(Placeholder, Align),
3072                                Ty.getQualifiers(),
3073                                AggValueSlot::IsNotDestructed,
3074                                AggValueSlot::DoesNotNeedGCBarriers,
3075                                AggValueSlot::IsNotAliased,
3076                                AggValueSlot::DoesNotOverlap);
3077 }
3078 
3079 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
3080                                           const VarDecl *param,
3081                                           SourceLocation loc) {
3082   // StartFunction converted the ABI-lowered parameter(s) into a
3083   // local alloca.  We need to turn that into an r-value suitable
3084   // for EmitCall.
3085   Address local = GetAddrOfLocalVar(param);
3086 
3087   QualType type = param->getType();
3088 
3089   if (isInAllocaArgument(CGM.getCXXABI(), type)) {
3090     CGM.ErrorUnsupported(param, "forwarded non-trivially copyable parameter");
3091   }
3092 
3093   // GetAddrOfLocalVar returns a pointer-to-pointer for references,
3094   // but the argument needs to be the original pointer.
3095   if (type->isReferenceType()) {
3096     args.add(RValue::get(Builder.CreateLoad(local)), type);
3097 
3098   // In ARC, move out of consumed arguments so that the release cleanup
3099   // entered by StartFunction doesn't cause an over-release.  This isn't
3100   // optimal -O0 code generation, but it should get cleaned up when
3101   // optimization is enabled.  This also assumes that delegate calls are
3102   // performed exactly once for a set of arguments, but that should be safe.
3103   } else if (getLangOpts().ObjCAutoRefCount &&
3104              param->hasAttr<NSConsumedAttr>() &&
3105              type->isObjCRetainableType()) {
3106     llvm::Value *ptr = Builder.CreateLoad(local);
3107     auto null =
3108       llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
3109     Builder.CreateStore(null, local);
3110     args.add(RValue::get(ptr), type);
3111 
3112   // For the most part, we just need to load the alloca, except that
3113   // aggregate r-values are actually pointers to temporaries.
3114   } else {
3115     args.add(convertTempToRValue(local, type, loc), type);
3116   }
3117 
3118   // Deactivate the cleanup for the callee-destructed param that was pushed.
3119   if (hasAggregateEvaluationKind(type) && !CurFuncIsThunk &&
3120       type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee() &&
3121       param->needsDestruction(getContext())) {
3122     EHScopeStack::stable_iterator cleanup =
3123         CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param));
3124     assert(cleanup.isValid() &&
3125            "cleanup for callee-destructed param not recorded");
3126     // This unreachable is a temporary marker which will be removed later.
3127     llvm::Instruction *isActive = Builder.CreateUnreachable();
3128     args.addArgCleanupDeactivation(cleanup, isActive);
3129   }
3130 }
3131 
3132 static bool isProvablyNull(llvm::Value *addr) {
3133   return isa<llvm::ConstantPointerNull>(addr);
3134 }
3135 
3136 /// Emit the actual writing-back of a writeback.
3137 static void emitWriteback(CodeGenFunction &CGF,
3138                           const CallArgList::Writeback &writeback) {
3139   const LValue &srcLV = writeback.Source;
3140   Address srcAddr = srcLV.getAddress();
3141   assert(!isProvablyNull(srcAddr.getPointer()) &&
3142          "shouldn't have writeback for provably null argument");
3143 
3144   llvm::BasicBlock *contBB = nullptr;
3145 
3146   // If the argument wasn't provably non-null, we need to null check
3147   // before doing the store.
3148   bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3149                                               CGF.CGM.getDataLayout());
3150   if (!provablyNonNull) {
3151     llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
3152     contBB = CGF.createBasicBlock("icr.done");
3153 
3154     llvm::Value *isNull =
3155       CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3156     CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
3157     CGF.EmitBlock(writebackBB);
3158   }
3159 
3160   // Load the value to writeback.
3161   llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
3162 
3163   // Cast it back, in case we're writing an id to a Foo* or something.
3164   value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(),
3165                                     "icr.writeback-cast");
3166 
3167   // Perform the writeback.
3168 
3169   // If we have a "to use" value, it's something we need to emit a use
3170   // of.  This has to be carefully threaded in: if it's done after the
3171   // release it's potentially undefined behavior (and the optimizer
3172   // will ignore it), and if it happens before the retain then the
3173   // optimizer could move the release there.
3174   if (writeback.ToUse) {
3175     assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
3176 
3177     // Retain the new value.  No need to block-copy here:  the block's
3178     // being passed up the stack.
3179     value = CGF.EmitARCRetainNonBlock(value);
3180 
3181     // Emit the intrinsic use here.
3182     CGF.EmitARCIntrinsicUse(writeback.ToUse);
3183 
3184     // Load the old value (primitively).
3185     llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
3186 
3187     // Put the new value in place (primitively).
3188     CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
3189 
3190     // Release the old value.
3191     CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
3192 
3193   // Otherwise, we can just do a normal lvalue store.
3194   } else {
3195     CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
3196   }
3197 
3198   // Jump to the continuation block.
3199   if (!provablyNonNull)
3200     CGF.EmitBlock(contBB);
3201 }
3202 
3203 static void emitWritebacks(CodeGenFunction &CGF,
3204                            const CallArgList &args) {
3205   for (const auto &I : args.writebacks())
3206     emitWriteback(CGF, I);
3207 }
3208 
3209 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
3210                                             const CallArgList &CallArgs) {
3211   ArrayRef<CallArgList::CallArgCleanup> Cleanups =
3212     CallArgs.getCleanupsToDeactivate();
3213   // Iterate in reverse to increase the likelihood of popping the cleanup.
3214   for (const auto &I : llvm::reverse(Cleanups)) {
3215     CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP);
3216     I.IsActiveIP->eraseFromParent();
3217   }
3218 }
3219 
3220 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
3221   if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
3222     if (uop->getOpcode() == UO_AddrOf)
3223       return uop->getSubExpr();
3224   return nullptr;
3225 }
3226 
3227 /// Emit an argument that's being passed call-by-writeback.  That is,
3228 /// we are passing the address of an __autoreleased temporary; it
3229 /// might be copy-initialized with the current value of the given
3230 /// address, but it will definitely be copied out of after the call.
3231 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
3232                              const ObjCIndirectCopyRestoreExpr *CRE) {
3233   LValue srcLV;
3234 
3235   // Make an optimistic effort to emit the address as an l-value.
3236   // This can fail if the argument expression is more complicated.
3237   if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
3238     srcLV = CGF.EmitLValue(lvExpr);
3239 
3240   // Otherwise, just emit it as a scalar.
3241   } else {
3242     Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
3243 
3244     QualType srcAddrType =
3245       CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
3246     srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
3247   }
3248   Address srcAddr = srcLV.getAddress();
3249 
3250   // The dest and src types don't necessarily match in LLVM terms
3251   // because of the crazy ObjC compatibility rules.
3252 
3253   llvm::PointerType *destType =
3254     cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
3255 
3256   // If the address is a constant null, just pass the appropriate null.
3257   if (isProvablyNull(srcAddr.getPointer())) {
3258     args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
3259              CRE->getType());
3260     return;
3261   }
3262 
3263   // Create the temporary.
3264   Address temp = CGF.CreateTempAlloca(destType->getElementType(),
3265                                       CGF.getPointerAlign(),
3266                                       "icr.temp");
3267   // Loading an l-value can introduce a cleanup if the l-value is __weak,
3268   // and that cleanup will be conditional if we can't prove that the l-value
3269   // isn't null, so we need to register a dominating point so that the cleanups
3270   // system will make valid IR.
3271   CodeGenFunction::ConditionalEvaluation condEval(CGF);
3272 
3273   // Zero-initialize it if we're not doing a copy-initialization.
3274   bool shouldCopy = CRE->shouldCopy();
3275   if (!shouldCopy) {
3276     llvm::Value *null =
3277       llvm::ConstantPointerNull::get(
3278         cast<llvm::PointerType>(destType->getElementType()));
3279     CGF.Builder.CreateStore(null, temp);
3280   }
3281 
3282   llvm::BasicBlock *contBB = nullptr;
3283   llvm::BasicBlock *originBB = nullptr;
3284 
3285   // If the address is *not* known to be non-null, we need to switch.
3286   llvm::Value *finalArgument;
3287 
3288   bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3289                                               CGF.CGM.getDataLayout());
3290   if (provablyNonNull) {
3291     finalArgument = temp.getPointer();
3292   } else {
3293     llvm::Value *isNull =
3294       CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3295 
3296     finalArgument = CGF.Builder.CreateSelect(isNull,
3297                                    llvm::ConstantPointerNull::get(destType),
3298                                              temp.getPointer(), "icr.argument");
3299 
3300     // If we need to copy, then the load has to be conditional, which
3301     // means we need control flow.
3302     if (shouldCopy) {
3303       originBB = CGF.Builder.GetInsertBlock();
3304       contBB = CGF.createBasicBlock("icr.cont");
3305       llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
3306       CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
3307       CGF.EmitBlock(copyBB);
3308       condEval.begin(CGF);
3309     }
3310   }
3311 
3312   llvm::Value *valueToUse = nullptr;
3313 
3314   // Perform a copy if necessary.
3315   if (shouldCopy) {
3316     RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
3317     assert(srcRV.isScalar());
3318 
3319     llvm::Value *src = srcRV.getScalarVal();
3320     src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
3321                                     "icr.cast");
3322 
3323     // Use an ordinary store, not a store-to-lvalue.
3324     CGF.Builder.CreateStore(src, temp);
3325 
3326     // If optimization is enabled, and the value was held in a
3327     // __strong variable, we need to tell the optimizer that this
3328     // value has to stay alive until we're doing the store back.
3329     // This is because the temporary is effectively unretained,
3330     // and so otherwise we can violate the high-level semantics.
3331     if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3332         srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) {
3333       valueToUse = src;
3334     }
3335   }
3336 
3337   // Finish the control flow if we needed it.
3338   if (shouldCopy && !provablyNonNull) {
3339     llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
3340     CGF.EmitBlock(contBB);
3341 
3342     // Make a phi for the value to intrinsically use.
3343     if (valueToUse) {
3344       llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
3345                                                       "icr.to-use");
3346       phiToUse->addIncoming(valueToUse, copyBB);
3347       phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
3348                             originBB);
3349       valueToUse = phiToUse;
3350     }
3351 
3352     condEval.end(CGF);
3353   }
3354 
3355   args.addWriteback(srcLV, temp, valueToUse);
3356   args.add(RValue::get(finalArgument), CRE->getType());
3357 }
3358 
3359 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
3360   assert(!StackBase);
3361 
3362   // Save the stack.
3363   llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
3364   StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
3365 }
3366 
3367 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
3368   if (StackBase) {
3369     // Restore the stack after the call.
3370     llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
3371     CGF.Builder.CreateCall(F, StackBase);
3372   }
3373 }
3374 
3375 void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
3376                                           SourceLocation ArgLoc,
3377                                           AbstractCallee AC,
3378                                           unsigned ParmNum) {
3379   if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) ||
3380                          SanOpts.has(SanitizerKind::NullabilityArg)))
3381     return;
3382 
3383   // The param decl may be missing in a variadic function.
3384   auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr;
3385   unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
3386 
3387   // Prefer the nonnull attribute if it's present.
3388   const NonNullAttr *NNAttr = nullptr;
3389   if (SanOpts.has(SanitizerKind::NonnullAttribute))
3390     NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo);
3391 
3392   bool CanCheckNullability = false;
3393   if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) {
3394     auto Nullability = PVD->getType()->getNullability(getContext());
3395     CanCheckNullability = Nullability &&
3396                           *Nullability == NullabilityKind::NonNull &&
3397                           PVD->getTypeSourceInfo();
3398   }
3399 
3400   if (!NNAttr && !CanCheckNullability)
3401     return;
3402 
3403   SourceLocation AttrLoc;
3404   SanitizerMask CheckKind;
3405   SanitizerHandler Handler;
3406   if (NNAttr) {
3407     AttrLoc = NNAttr->getLocation();
3408     CheckKind = SanitizerKind::NonnullAttribute;
3409     Handler = SanitizerHandler::NonnullArg;
3410   } else {
3411     AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
3412     CheckKind = SanitizerKind::NullabilityArg;
3413     Handler = SanitizerHandler::NullabilityArg;
3414   }
3415 
3416   SanitizerScope SanScope(this);
3417   assert(RV.isScalar());
3418   llvm::Value *V = RV.getScalarVal();
3419   llvm::Value *Cond =
3420       Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
3421   llvm::Constant *StaticData[] = {
3422       EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc),
3423       llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
3424   };
3425   EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None);
3426 }
3427 
3428 void CodeGenFunction::EmitCallArgs(
3429     CallArgList &Args, ArrayRef<QualType> ArgTypes,
3430     llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
3431     AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) {
3432   assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
3433 
3434   // We *have* to evaluate arguments from right to left in the MS C++ ABI,
3435   // because arguments are destroyed left to right in the callee. As a special
3436   // case, there are certain language constructs that require left-to-right
3437   // evaluation, and in those cases we consider the evaluation order requirement
3438   // to trump the "destruction order is reverse construction order" guarantee.
3439   bool LeftToRight =
3440       CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()
3441           ? Order == EvaluationOrder::ForceLeftToRight
3442           : Order != EvaluationOrder::ForceRightToLeft;
3443 
3444   auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg,
3445                                          RValue EmittedArg) {
3446     if (!AC.hasFunctionDecl() || I >= AC.getNumParams())
3447       return;
3448     auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>();
3449     if (PS == nullptr)
3450       return;
3451 
3452     const auto &Context = getContext();
3453     auto SizeTy = Context.getSizeType();
3454     auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
3455     assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?");
3456     llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T,
3457                                                      EmittedArg.getScalarVal(),
3458                                                      PS->isDynamic());
3459     Args.add(RValue::get(V), SizeTy);
3460     // If we're emitting args in reverse, be sure to do so with
3461     // pass_object_size, as well.
3462     if (!LeftToRight)
3463       std::swap(Args.back(), *(&Args.back() - 1));
3464   };
3465 
3466   // Insert a stack save if we're going to need any inalloca args.
3467   bool HasInAllocaArgs = false;
3468   if (CGM.getTarget().getCXXABI().isMicrosoft()) {
3469     for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end();
3470          I != E && !HasInAllocaArgs; ++I)
3471       HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I);
3472     if (HasInAllocaArgs) {
3473       assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3474       Args.allocateArgumentMemory(*this);
3475     }
3476   }
3477 
3478   // Evaluate each argument in the appropriate order.
3479   size_t CallArgsStart = Args.size();
3480   for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
3481     unsigned Idx = LeftToRight ? I : E - I - 1;
3482     CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx;
3483     unsigned InitialArgSize = Args.size();
3484     // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of
3485     // the argument and parameter match or the objc method is parameterized.
3486     assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) ||
3487             getContext().hasSameUnqualifiedType((*Arg)->getType(),
3488                                                 ArgTypes[Idx]) ||
3489             (isa<ObjCMethodDecl>(AC.getDecl()) &&
3490              isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) &&
3491            "Argument and parameter types don't match");
3492     EmitCallArg(Args, *Arg, ArgTypes[Idx]);
3493     // In particular, we depend on it being the last arg in Args, and the
3494     // objectsize bits depend on there only being one arg if !LeftToRight.
3495     assert(InitialArgSize + 1 == Args.size() &&
3496            "The code below depends on only adding one arg per EmitCallArg");
3497     (void)InitialArgSize;
3498     // Since pointer argument are never emitted as LValue, it is safe to emit
3499     // non-null argument check for r-value only.
3500     if (!Args.back().hasLValue()) {
3501       RValue RVArg = Args.back().getKnownRValue();
3502       EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC,
3503                           ParamsToSkip + Idx);
3504       // @llvm.objectsize should never have side-effects and shouldn't need
3505       // destruction/cleanups, so we can safely "emit" it after its arg,
3506       // regardless of right-to-leftness
3507       MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
3508     }
3509   }
3510 
3511   if (!LeftToRight) {
3512     // Un-reverse the arguments we just evaluated so they match up with the LLVM
3513     // IR function.
3514     std::reverse(Args.begin() + CallArgsStart, Args.end());
3515   }
3516 }
3517 
3518 namespace {
3519 
3520 struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
3521   DestroyUnpassedArg(Address Addr, QualType Ty)
3522       : Addr(Addr), Ty(Ty) {}
3523 
3524   Address Addr;
3525   QualType Ty;
3526 
3527   void Emit(CodeGenFunction &CGF, Flags flags) override {
3528     QualType::DestructionKind DtorKind = Ty.isDestructedType();
3529     if (DtorKind == QualType::DK_cxx_destructor) {
3530       const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
3531       assert(!Dtor->isTrivial());
3532       CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
3533                                 /*Delegating=*/false, Addr, Ty);
3534     } else {
3535       CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty));
3536     }
3537   }
3538 };
3539 
3540 struct DisableDebugLocationUpdates {
3541   CodeGenFunction &CGF;
3542   bool disabledDebugInfo;
3543   DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
3544     if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
3545       CGF.disableDebugInfo();
3546   }
3547   ~DisableDebugLocationUpdates() {
3548     if (disabledDebugInfo)
3549       CGF.enableDebugInfo();
3550   }
3551 };
3552 
3553 } // end anonymous namespace
3554 
3555 RValue CallArg::getRValue(CodeGenFunction &CGF) const {
3556   if (!HasLV)
3557     return RV;
3558   LValue Copy = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty), Ty);
3559   CGF.EmitAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap,
3560                         LV.isVolatile());
3561   IsUsed = true;
3562   return RValue::getAggregate(Copy.getAddress());
3563 }
3564 
3565 void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const {
3566   LValue Dst = CGF.MakeAddrLValue(Addr, Ty);
3567   if (!HasLV && RV.isScalar())
3568     CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*isInit=*/true);
3569   else if (!HasLV && RV.isComplex())
3570     CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true);
3571   else {
3572     auto Addr = HasLV ? LV.getAddress() : RV.getAggregateAddress();
3573     LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty);
3574     // We assume that call args are never copied into subobjects.
3575     CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap,
3576                           HasLV ? LV.isVolatileQualified()
3577                                 : RV.isVolatileQualified());
3578   }
3579   IsUsed = true;
3580 }
3581 
3582 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
3583                                   QualType type) {
3584   DisableDebugLocationUpdates Dis(*this, E);
3585   if (const ObjCIndirectCopyRestoreExpr *CRE
3586         = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
3587     assert(getLangOpts().ObjCAutoRefCount);
3588     return emitWritebackArg(*this, args, CRE);
3589   }
3590 
3591   assert(type->isReferenceType() == E->isGLValue() &&
3592          "reference binding to unmaterialized r-value!");
3593 
3594   if (E->isGLValue()) {
3595     assert(E->getObjectKind() == OK_Ordinary);
3596     return args.add(EmitReferenceBindingToExpr(E), type);
3597   }
3598 
3599   bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
3600 
3601   // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
3602   // However, we still have to push an EH-only cleanup in case we unwind before
3603   // we make it to the call.
3604   if (HasAggregateEvalKind &&
3605       type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
3606     // If we're using inalloca, use the argument memory.  Otherwise, use a
3607     // temporary.
3608     AggValueSlot Slot;
3609     if (args.isUsingInAlloca())
3610       Slot = createPlaceholderSlot(*this, type);
3611     else
3612       Slot = CreateAggTemp(type, "agg.tmp");
3613 
3614     bool DestroyedInCallee = true, NeedsEHCleanup = true;
3615     if (const auto *RD = type->getAsCXXRecordDecl())
3616       DestroyedInCallee = RD->hasNonTrivialDestructor();
3617     else
3618       NeedsEHCleanup = needsEHCleanup(type.isDestructedType());
3619 
3620     if (DestroyedInCallee)
3621       Slot.setExternallyDestructed();
3622 
3623     EmitAggExpr(E, Slot);
3624     RValue RV = Slot.asRValue();
3625     args.add(RV, type);
3626 
3627     if (DestroyedInCallee && NeedsEHCleanup) {
3628       // Create a no-op GEP between the placeholder and the cleanup so we can
3629       // RAUW it successfully.  It also serves as a marker of the first
3630       // instruction where the cleanup is active.
3631       pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
3632                                               type);
3633       // This unreachable is a temporary marker which will be removed later.
3634       llvm::Instruction *IsActive = Builder.CreateUnreachable();
3635       args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
3636     }
3637     return;
3638   }
3639 
3640   if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
3641       cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
3642     LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
3643     assert(L.isSimple());
3644     args.addUncopiedAggregate(L, type);
3645     return;
3646   }
3647 
3648   args.add(EmitAnyExprToTemp(E), type);
3649 }
3650 
3651 QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
3652   // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
3653   // implicitly widens null pointer constants that are arguments to varargs
3654   // functions to pointer-sized ints.
3655   if (!getTarget().getTriple().isOSWindows())
3656     return Arg->getType();
3657 
3658   if (Arg->getType()->isIntegerType() &&
3659       getContext().getTypeSize(Arg->getType()) <
3660           getContext().getTargetInfo().getPointerWidth(0) &&
3661       Arg->isNullPointerConstant(getContext(),
3662                                  Expr::NPC_ValueDependentIsNotNull)) {
3663     return getContext().getIntPtrType();
3664   }
3665 
3666   return Arg->getType();
3667 }
3668 
3669 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3670 // optimizer it can aggressively ignore unwind edges.
3671 void
3672 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
3673   if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3674       !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
3675     Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
3676                       CGM.getNoObjCARCExceptionsMetadata());
3677 }
3678 
3679 /// Emits a call to the given no-arguments nounwind runtime function.
3680 llvm::CallInst *
3681 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
3682                                          const llvm::Twine &name) {
3683   return EmitNounwindRuntimeCall(callee, None, name);
3684 }
3685 
3686 /// Emits a call to the given nounwind runtime function.
3687 llvm::CallInst *
3688 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
3689                                          ArrayRef<llvm::Value *> args,
3690                                          const llvm::Twine &name) {
3691   llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
3692   call->setDoesNotThrow();
3693   return call;
3694 }
3695 
3696 /// Emits a simple call (never an invoke) to the given no-arguments
3697 /// runtime function.
3698 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
3699                                                  const llvm::Twine &name) {
3700   return EmitRuntimeCall(callee, None, name);
3701 }
3702 
3703 // Calls which may throw must have operand bundles indicating which funclet
3704 // they are nested within.
3705 SmallVector<llvm::OperandBundleDef, 1>
3706 CodeGenFunction::getBundlesForFunclet(llvm::Value *Callee) {
3707   SmallVector<llvm::OperandBundleDef, 1> BundleList;
3708   // There is no need for a funclet operand bundle if we aren't inside a
3709   // funclet.
3710   if (!CurrentFuncletPad)
3711     return BundleList;
3712 
3713   // Skip intrinsics which cannot throw.
3714   auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts());
3715   if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow())
3716     return BundleList;
3717 
3718   BundleList.emplace_back("funclet", CurrentFuncletPad);
3719   return BundleList;
3720 }
3721 
3722 /// Emits a simple call (never an invoke) to the given runtime function.
3723 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
3724                                                  ArrayRef<llvm::Value *> args,
3725                                                  const llvm::Twine &name) {
3726   llvm::CallInst *call = Builder.CreateCall(
3727       callee, args, getBundlesForFunclet(callee.getCallee()), name);
3728   call->setCallingConv(getRuntimeCC());
3729   return call;
3730 }
3731 
3732 /// Emits a call or invoke to the given noreturn runtime function.
3733 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(
3734     llvm::FunctionCallee callee, ArrayRef<llvm::Value *> args) {
3735   SmallVector<llvm::OperandBundleDef, 1> BundleList =
3736       getBundlesForFunclet(callee.getCallee());
3737 
3738   if (getInvokeDest()) {
3739     llvm::InvokeInst *invoke =
3740       Builder.CreateInvoke(callee,
3741                            getUnreachableBlock(),
3742                            getInvokeDest(),
3743                            args,
3744                            BundleList);
3745     invoke->setDoesNotReturn();
3746     invoke->setCallingConv(getRuntimeCC());
3747   } else {
3748     llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList);
3749     call->setDoesNotReturn();
3750     call->setCallingConv(getRuntimeCC());
3751     Builder.CreateUnreachable();
3752   }
3753 }
3754 
3755 /// Emits a call or invoke instruction to the given nullary runtime function.
3756 llvm::CallBase *
3757 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
3758                                          const Twine &name) {
3759   return EmitRuntimeCallOrInvoke(callee, None, name);
3760 }
3761 
3762 /// Emits a call or invoke instruction to the given runtime function.
3763 llvm::CallBase *
3764 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
3765                                          ArrayRef<llvm::Value *> args,
3766                                          const Twine &name) {
3767   llvm::CallBase *call = EmitCallOrInvoke(callee, args, name);
3768   call->setCallingConv(getRuntimeCC());
3769   return call;
3770 }
3771 
3772 /// Emits a call or invoke instruction to the given function, depending
3773 /// on the current state of the EH stack.
3774 llvm::CallBase *CodeGenFunction::EmitCallOrInvoke(llvm::FunctionCallee Callee,
3775                                                   ArrayRef<llvm::Value *> Args,
3776                                                   const Twine &Name) {
3777   llvm::BasicBlock *InvokeDest = getInvokeDest();
3778   SmallVector<llvm::OperandBundleDef, 1> BundleList =
3779       getBundlesForFunclet(Callee.getCallee());
3780 
3781   llvm::CallBase *Inst;
3782   if (!InvokeDest)
3783     Inst = Builder.CreateCall(Callee, Args, BundleList, Name);
3784   else {
3785     llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
3786     Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
3787                                 Name);
3788     EmitBlock(ContBB);
3789   }
3790 
3791   // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3792   // optimizer it can aggressively ignore unwind edges.
3793   if (CGM.getLangOpts().ObjCAutoRefCount)
3794     AddObjCARCExceptionMetadata(Inst);
3795 
3796   return Inst;
3797 }
3798 
3799 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
3800                                                   llvm::Value *New) {
3801   DeferredReplacements.push_back(std::make_pair(Old, New));
3802 }
3803 
3804 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
3805                                  const CGCallee &Callee,
3806                                  ReturnValueSlot ReturnValue,
3807                                  const CallArgList &CallArgs,
3808                                  llvm::CallBase **callOrInvoke,
3809                                  SourceLocation Loc) {
3810   // FIXME: We no longer need the types from CallArgs; lift up and simplify.
3811 
3812   assert(Callee.isOrdinary() || Callee.isVirtual());
3813 
3814   // Handle struct-return functions by passing a pointer to the
3815   // location that we would like to return into.
3816   QualType RetTy = CallInfo.getReturnType();
3817   const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
3818 
3819   llvm::FunctionType *IRFuncTy = getTypes().GetFunctionType(CallInfo);
3820 
3821   const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
3822   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl))
3823     // We can only guarantee that a function is called from the correct
3824     // context/function based on the appropriate target attributes,
3825     // so only check in the case where we have both always_inline and target
3826     // since otherwise we could be making a conditional call after a check for
3827     // the proper cpu features (and it won't cause code generation issues due to
3828     // function based code generation).
3829     if (TargetDecl->hasAttr<AlwaysInlineAttr>() &&
3830         TargetDecl->hasAttr<TargetAttr>())
3831       checkTargetFeatures(Loc, FD);
3832 
3833 #ifndef NDEBUG
3834   if (!(CallInfo.isVariadic() && CallInfo.getArgStruct())) {
3835     // For an inalloca varargs function, we don't expect CallInfo to match the
3836     // function pointer's type, because the inalloca struct a will have extra
3837     // fields in it for the varargs parameters.  Code later in this function
3838     // bitcasts the function pointer to the type derived from CallInfo.
3839     //
3840     // In other cases, we assert that the types match up (until pointers stop
3841     // having pointee types).
3842     llvm::Type *TypeFromVal;
3843     if (Callee.isVirtual())
3844       TypeFromVal = Callee.getVirtualFunctionType();
3845     else
3846       TypeFromVal =
3847           Callee.getFunctionPointer()->getType()->getPointerElementType();
3848     assert(IRFuncTy == TypeFromVal);
3849   }
3850 #endif
3851 
3852   // 1. Set up the arguments.
3853 
3854   // If we're using inalloca, insert the allocation after the stack save.
3855   // FIXME: Do this earlier rather than hacking it in here!
3856   Address ArgMemory = Address::invalid();
3857   if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
3858     const llvm::DataLayout &DL = CGM.getDataLayout();
3859     llvm::Instruction *IP = CallArgs.getStackBase();
3860     llvm::AllocaInst *AI;
3861     if (IP) {
3862       IP = IP->getNextNode();
3863       AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
3864                                 "argmem", IP);
3865     } else {
3866       AI = CreateTempAlloca(ArgStruct, "argmem");
3867     }
3868     auto Align = CallInfo.getArgStructAlignment();
3869     AI->setAlignment(Align.getAsAlign());
3870     AI->setUsedWithInAlloca(true);
3871     assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
3872     ArgMemory = Address(AI, Align);
3873   }
3874 
3875   ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
3876   SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
3877 
3878   // If the call returns a temporary with struct return, create a temporary
3879   // alloca to hold the result, unless one is given to us.
3880   Address SRetPtr = Address::invalid();
3881   Address SRetAlloca = Address::invalid();
3882   llvm::Value *UnusedReturnSizePtr = nullptr;
3883   if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
3884     if (!ReturnValue.isNull()) {
3885       SRetPtr = ReturnValue.getValue();
3886     } else {
3887       SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca);
3888       if (HaveInsertPoint() && ReturnValue.isUnused()) {
3889         uint64_t size =
3890             CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
3891         UnusedReturnSizePtr = EmitLifetimeStart(size, SRetAlloca.getPointer());
3892       }
3893     }
3894     if (IRFunctionArgs.hasSRetArg()) {
3895       IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
3896     } else if (RetAI.isInAlloca()) {
3897       Address Addr =
3898           Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex());
3899       Builder.CreateStore(SRetPtr.getPointer(), Addr);
3900     }
3901   }
3902 
3903   Address swiftErrorTemp = Address::invalid();
3904   Address swiftErrorArg = Address::invalid();
3905 
3906   // When passing arguments using temporary allocas, we need to add the
3907   // appropriate lifetime markers. This vector keeps track of all the lifetime
3908   // markers that need to be ended right after the call.
3909   SmallVector<CallLifetimeEnd, 2> CallLifetimeEndAfterCall;
3910 
3911   // Translate all of the arguments as necessary to match the IR lowering.
3912   assert(CallInfo.arg_size() == CallArgs.size() &&
3913          "Mismatch between function signature & arguments.");
3914   unsigned ArgNo = 0;
3915   CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
3916   for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
3917        I != E; ++I, ++info_it, ++ArgNo) {
3918     const ABIArgInfo &ArgInfo = info_it->info;
3919 
3920     // Insert a padding argument to ensure proper alignment.
3921     if (IRFunctionArgs.hasPaddingArg(ArgNo))
3922       IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
3923           llvm::UndefValue::get(ArgInfo.getPaddingType());
3924 
3925     unsigned FirstIRArg, NumIRArgs;
3926     std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3927 
3928     switch (ArgInfo.getKind()) {
3929     case ABIArgInfo::InAlloca: {
3930       assert(NumIRArgs == 0);
3931       assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3932       if (I->isAggregate()) {
3933         // Replace the placeholder with the appropriate argument slot GEP.
3934         Address Addr = I->hasLValue()
3935                            ? I->getKnownLValue().getAddress()
3936                            : I->getKnownRValue().getAggregateAddress();
3937         llvm::Instruction *Placeholder =
3938             cast<llvm::Instruction>(Addr.getPointer());
3939         CGBuilderTy::InsertPoint IP = Builder.saveIP();
3940         Builder.SetInsertPoint(Placeholder);
3941         Addr =
3942             Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
3943         Builder.restoreIP(IP);
3944         deferPlaceholderReplacement(Placeholder, Addr.getPointer());
3945       } else {
3946         // Store the RValue into the argument struct.
3947         Address Addr =
3948             Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
3949         unsigned AS = Addr.getType()->getPointerAddressSpace();
3950         llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
3951         // There are some cases where a trivial bitcast is not avoidable.  The
3952         // definition of a type later in a translation unit may change it's type
3953         // from {}* to (%struct.foo*)*.
3954         if (Addr.getType() != MemType)
3955           Addr = Builder.CreateBitCast(Addr, MemType);
3956         I->copyInto(*this, Addr);
3957       }
3958       break;
3959     }
3960 
3961     case ABIArgInfo::Indirect: {
3962       assert(NumIRArgs == 1);
3963       if (!I->isAggregate()) {
3964         // Make a temporary alloca to pass the argument.
3965         Address Addr = CreateMemTempWithoutCast(
3966             I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp");
3967         IRCallArgs[FirstIRArg] = Addr.getPointer();
3968 
3969         I->copyInto(*this, Addr);
3970       } else {
3971         // We want to avoid creating an unnecessary temporary+copy here;
3972         // however, we need one in three cases:
3973         // 1. If the argument is not byval, and we are required to copy the
3974         //    source.  (This case doesn't occur on any common architecture.)
3975         // 2. If the argument is byval, RV is not sufficiently aligned, and
3976         //    we cannot force it to be sufficiently aligned.
3977         // 3. If the argument is byval, but RV is not located in default
3978         //    or alloca address space.
3979         Address Addr = I->hasLValue()
3980                            ? I->getKnownLValue().getAddress()
3981                            : I->getKnownRValue().getAggregateAddress();
3982         llvm::Value *V = Addr.getPointer();
3983         CharUnits Align = ArgInfo.getIndirectAlign();
3984         const llvm::DataLayout *TD = &CGM.getDataLayout();
3985 
3986         assert((FirstIRArg >= IRFuncTy->getNumParams() ||
3987                 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==
3988                     TD->getAllocaAddrSpace()) &&
3989                "indirect argument must be in alloca address space");
3990 
3991         bool NeedCopy = false;
3992 
3993         if (Addr.getAlignment() < Align &&
3994             llvm::getOrEnforceKnownAlignment(V, Align.getQuantity(), *TD) <
3995                 Align.getQuantity()) {
3996           NeedCopy = true;
3997         } else if (I->hasLValue()) {
3998           auto LV = I->getKnownLValue();
3999           auto AS = LV.getAddressSpace();
4000 
4001           if ((!ArgInfo.getIndirectByVal() &&
4002                (LV.getAlignment() >=
4003                 getContext().getTypeAlignInChars(I->Ty)))) {
4004             NeedCopy = true;
4005           }
4006           if (!getLangOpts().OpenCL) {
4007             if ((ArgInfo.getIndirectByVal() &&
4008                 (AS != LangAS::Default &&
4009                  AS != CGM.getASTAllocaAddressSpace()))) {
4010               NeedCopy = true;
4011             }
4012           }
4013           // For OpenCL even if RV is located in default or alloca address space
4014           // we don't want to perform address space cast for it.
4015           else if ((ArgInfo.getIndirectByVal() &&
4016                     Addr.getType()->getAddressSpace() != IRFuncTy->
4017                       getParamType(FirstIRArg)->getPointerAddressSpace())) {
4018             NeedCopy = true;
4019           }
4020         }
4021 
4022         if (NeedCopy) {
4023           // Create an aligned temporary, and copy to it.
4024           Address AI = CreateMemTempWithoutCast(
4025               I->Ty, ArgInfo.getIndirectAlign(), "byval-temp");
4026           IRCallArgs[FirstIRArg] = AI.getPointer();
4027 
4028           // Emit lifetime markers for the temporary alloca.
4029           uint64_t ByvalTempElementSize =
4030               CGM.getDataLayout().getTypeAllocSize(AI.getElementType());
4031           llvm::Value *LifetimeSize =
4032               EmitLifetimeStart(ByvalTempElementSize, AI.getPointer());
4033 
4034           // Add cleanup code to emit the end lifetime marker after the call.
4035           if (LifetimeSize) // In case we disabled lifetime markers.
4036             CallLifetimeEndAfterCall.emplace_back(AI, LifetimeSize);
4037 
4038           // Generate the copy.
4039           I->copyInto(*this, AI);
4040         } else {
4041           // Skip the extra memcpy call.
4042           auto *T = V->getType()->getPointerElementType()->getPointerTo(
4043               CGM.getDataLayout().getAllocaAddrSpace());
4044           IRCallArgs[FirstIRArg] = getTargetHooks().performAddrSpaceCast(
4045               *this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T,
4046               true);
4047         }
4048       }
4049       break;
4050     }
4051 
4052     case ABIArgInfo::Ignore:
4053       assert(NumIRArgs == 0);
4054       break;
4055 
4056     case ABIArgInfo::Extend:
4057     case ABIArgInfo::Direct: {
4058       if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
4059           ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
4060           ArgInfo.getDirectOffset() == 0) {
4061         assert(NumIRArgs == 1);
4062         llvm::Value *V;
4063         if (!I->isAggregate())
4064           V = I->getKnownRValue().getScalarVal();
4065         else
4066           V = Builder.CreateLoad(
4067               I->hasLValue() ? I->getKnownLValue().getAddress()
4068                              : I->getKnownRValue().getAggregateAddress());
4069 
4070         // Implement swifterror by copying into a new swifterror argument.
4071         // We'll write back in the normal path out of the call.
4072         if (CallInfo.getExtParameterInfo(ArgNo).getABI()
4073               == ParameterABI::SwiftErrorResult) {
4074           assert(!swiftErrorTemp.isValid() && "multiple swifterror args");
4075 
4076           QualType pointeeTy = I->Ty->getPointeeType();
4077           swiftErrorArg =
4078             Address(V, getContext().getTypeAlignInChars(pointeeTy));
4079 
4080           swiftErrorTemp =
4081             CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
4082           V = swiftErrorTemp.getPointer();
4083           cast<llvm::AllocaInst>(V)->setSwiftError(true);
4084 
4085           llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg);
4086           Builder.CreateStore(errorValue, swiftErrorTemp);
4087         }
4088 
4089         // We might have to widen integers, but we should never truncate.
4090         if (ArgInfo.getCoerceToType() != V->getType() &&
4091             V->getType()->isIntegerTy())
4092           V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
4093 
4094         // If the argument doesn't match, perform a bitcast to coerce it.  This
4095         // can happen due to trivial type mismatches.
4096         if (FirstIRArg < IRFuncTy->getNumParams() &&
4097             V->getType() != IRFuncTy->getParamType(FirstIRArg))
4098           V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
4099 
4100         IRCallArgs[FirstIRArg] = V;
4101         break;
4102       }
4103 
4104       // FIXME: Avoid the conversion through memory if possible.
4105       Address Src = Address::invalid();
4106       if (!I->isAggregate()) {
4107         Src = CreateMemTemp(I->Ty, "coerce");
4108         I->copyInto(*this, Src);
4109       } else {
4110         Src = I->hasLValue() ? I->getKnownLValue().getAddress()
4111                              : I->getKnownRValue().getAggregateAddress();
4112       }
4113 
4114       // If the value is offset in memory, apply the offset now.
4115       Src = emitAddressAtOffset(*this, Src, ArgInfo);
4116 
4117       // Fast-isel and the optimizer generally like scalar values better than
4118       // FCAs, so we flatten them if this is safe to do for this argument.
4119       llvm::StructType *STy =
4120             dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
4121       if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
4122         llvm::Type *SrcTy = Src.getType()->getElementType();
4123         uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
4124         uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
4125 
4126         // If the source type is smaller than the destination type of the
4127         // coerce-to logic, copy the source value into a temp alloca the size
4128         // of the destination type to allow loading all of it. The bits past
4129         // the source value are left undef.
4130         if (SrcSize < DstSize) {
4131           Address TempAlloca
4132             = CreateTempAlloca(STy, Src.getAlignment(),
4133                                Src.getName() + ".coerce");
4134           Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
4135           Src = TempAlloca;
4136         } else {
4137           Src = Builder.CreateBitCast(Src,
4138                                       STy->getPointerTo(Src.getAddressSpace()));
4139         }
4140 
4141         assert(NumIRArgs == STy->getNumElements());
4142         for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
4143           Address EltPtr = Builder.CreateStructGEP(Src, i);
4144           llvm::Value *LI = Builder.CreateLoad(EltPtr);
4145           IRCallArgs[FirstIRArg + i] = LI;
4146         }
4147       } else {
4148         // In the simple case, just pass the coerced loaded value.
4149         assert(NumIRArgs == 1);
4150         IRCallArgs[FirstIRArg] =
4151           CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
4152       }
4153 
4154       break;
4155     }
4156 
4157     case ABIArgInfo::CoerceAndExpand: {
4158       auto coercionType = ArgInfo.getCoerceAndExpandType();
4159       auto layout = CGM.getDataLayout().getStructLayout(coercionType);
4160 
4161       llvm::Value *tempSize = nullptr;
4162       Address addr = Address::invalid();
4163       Address AllocaAddr = Address::invalid();
4164       if (I->isAggregate()) {
4165         addr = I->hasLValue() ? I->getKnownLValue().getAddress()
4166                               : I->getKnownRValue().getAggregateAddress();
4167 
4168       } else {
4169         RValue RV = I->getKnownRValue();
4170         assert(RV.isScalar()); // complex should always just be direct
4171 
4172         llvm::Type *scalarType = RV.getScalarVal()->getType();
4173         auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType);
4174         auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType);
4175 
4176         // Materialize to a temporary.
4177         addr = CreateTempAlloca(
4178             RV.getScalarVal()->getType(),
4179             CharUnits::fromQuantity(std::max(
4180                 (unsigned)layout->getAlignment().value(), scalarAlign)),
4181             "tmp",
4182             /*ArraySize=*/nullptr, &AllocaAddr);
4183         tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer());
4184 
4185         Builder.CreateStore(RV.getScalarVal(), addr);
4186       }
4187 
4188       addr = Builder.CreateElementBitCast(addr, coercionType);
4189 
4190       unsigned IRArgPos = FirstIRArg;
4191       for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4192         llvm::Type *eltType = coercionType->getElementType(i);
4193         if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4194         Address eltAddr = Builder.CreateStructGEP(addr, i);
4195         llvm::Value *elt = Builder.CreateLoad(eltAddr);
4196         IRCallArgs[IRArgPos++] = elt;
4197       }
4198       assert(IRArgPos == FirstIRArg + NumIRArgs);
4199 
4200       if (tempSize) {
4201         EmitLifetimeEnd(tempSize, AllocaAddr.getPointer());
4202       }
4203 
4204       break;
4205     }
4206 
4207     case ABIArgInfo::Expand:
4208       unsigned IRArgPos = FirstIRArg;
4209       ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
4210       assert(IRArgPos == FirstIRArg + NumIRArgs);
4211       break;
4212     }
4213   }
4214 
4215   const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this);
4216   llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer();
4217 
4218   // If we're using inalloca, set up that argument.
4219   if (ArgMemory.isValid()) {
4220     llvm::Value *Arg = ArgMemory.getPointer();
4221     if (CallInfo.isVariadic()) {
4222       // When passing non-POD arguments by value to variadic functions, we will
4223       // end up with a variadic prototype and an inalloca call site.  In such
4224       // cases, we can't do any parameter mismatch checks.  Give up and bitcast
4225       // the callee.
4226       unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace();
4227       CalleePtr =
4228           Builder.CreateBitCast(CalleePtr, IRFuncTy->getPointerTo(CalleeAS));
4229     } else {
4230       llvm::Type *LastParamTy =
4231           IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
4232       if (Arg->getType() != LastParamTy) {
4233 #ifndef NDEBUG
4234         // Assert that these structs have equivalent element types.
4235         llvm::StructType *FullTy = CallInfo.getArgStruct();
4236         llvm::StructType *DeclaredTy = cast<llvm::StructType>(
4237             cast<llvm::PointerType>(LastParamTy)->getElementType());
4238         assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
4239         for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
4240                                                 DE = DeclaredTy->element_end(),
4241                                                 FI = FullTy->element_begin();
4242              DI != DE; ++DI, ++FI)
4243           assert(*DI == *FI);
4244 #endif
4245         Arg = Builder.CreateBitCast(Arg, LastParamTy);
4246       }
4247     }
4248     assert(IRFunctionArgs.hasInallocaArg());
4249     IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
4250   }
4251 
4252   // 2. Prepare the function pointer.
4253 
4254   // If the callee is a bitcast of a non-variadic function to have a
4255   // variadic function pointer type, check to see if we can remove the
4256   // bitcast.  This comes up with unprototyped functions.
4257   //
4258   // This makes the IR nicer, but more importantly it ensures that we
4259   // can inline the function at -O0 if it is marked always_inline.
4260   auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT,
4261                                    llvm::Value *Ptr) -> llvm::Function * {
4262     if (!CalleeFT->isVarArg())
4263       return nullptr;
4264 
4265     // Get underlying value if it's a bitcast
4266     if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr)) {
4267       if (CE->getOpcode() == llvm::Instruction::BitCast)
4268         Ptr = CE->getOperand(0);
4269     }
4270 
4271     llvm::Function *OrigFn = dyn_cast<llvm::Function>(Ptr);
4272     if (!OrigFn)
4273       return nullptr;
4274 
4275     llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
4276 
4277     // If the original type is variadic, or if any of the component types
4278     // disagree, we cannot remove the cast.
4279     if (OrigFT->isVarArg() ||
4280         OrigFT->getNumParams() != CalleeFT->getNumParams() ||
4281         OrigFT->getReturnType() != CalleeFT->getReturnType())
4282       return nullptr;
4283 
4284     for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
4285       if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
4286         return nullptr;
4287 
4288     return OrigFn;
4289   };
4290 
4291   if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) {
4292     CalleePtr = OrigFn;
4293     IRFuncTy = OrigFn->getFunctionType();
4294   }
4295 
4296   // 3. Perform the actual call.
4297 
4298   // Deactivate any cleanups that we're supposed to do immediately before
4299   // the call.
4300   if (!CallArgs.getCleanupsToDeactivate().empty())
4301     deactivateArgCleanupsBeforeCall(*this, CallArgs);
4302 
4303   // Assert that the arguments we computed match up.  The IR verifier
4304   // will catch this, but this is a common enough source of problems
4305   // during IRGen changes that it's way better for debugging to catch
4306   // it ourselves here.
4307 #ifndef NDEBUG
4308   assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
4309   for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
4310     // Inalloca argument can have different type.
4311     if (IRFunctionArgs.hasInallocaArg() &&
4312         i == IRFunctionArgs.getInallocaArgNo())
4313       continue;
4314     if (i < IRFuncTy->getNumParams())
4315       assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
4316   }
4317 #endif
4318 
4319   // Update the largest vector width if any arguments have vector types.
4320   for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
4321     if (auto *VT = dyn_cast<llvm::VectorType>(IRCallArgs[i]->getType()))
4322       LargestVectorWidth = std::max((uint64_t)LargestVectorWidth,
4323                                    VT->getPrimitiveSizeInBits().getFixedSize());
4324   }
4325 
4326   // Compute the calling convention and attributes.
4327   unsigned CallingConv;
4328   llvm::AttributeList Attrs;
4329   CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo,
4330                              Callee.getAbstractInfo(), Attrs, CallingConv,
4331                              /*AttrOnCallSite=*/true);
4332 
4333   // Apply some call-site-specific attributes.
4334   // TODO: work this into building the attribute set.
4335 
4336   // Apply always_inline to all calls within flatten functions.
4337   // FIXME: should this really take priority over __try, below?
4338   if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
4339       !(TargetDecl && TargetDecl->hasAttr<NoInlineAttr>())) {
4340     Attrs =
4341         Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4342                            llvm::Attribute::AlwaysInline);
4343   }
4344 
4345   // Disable inlining inside SEH __try blocks.
4346   if (isSEHTryScope()) {
4347     Attrs =
4348         Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4349                            llvm::Attribute::NoInline);
4350   }
4351 
4352   // Decide whether to use a call or an invoke.
4353   bool CannotThrow;
4354   if (currentFunctionUsesSEHTry()) {
4355     // SEH cares about asynchronous exceptions, so everything can "throw."
4356     CannotThrow = false;
4357   } else if (isCleanupPadScope() &&
4358              EHPersonality::get(*this).isMSVCXXPersonality()) {
4359     // The MSVC++ personality will implicitly terminate the program if an
4360     // exception is thrown during a cleanup outside of a try/catch.
4361     // We don't need to model anything in IR to get this behavior.
4362     CannotThrow = true;
4363   } else {
4364     // Otherwise, nounwind call sites will never throw.
4365     CannotThrow = Attrs.hasAttribute(llvm::AttributeList::FunctionIndex,
4366                                      llvm::Attribute::NoUnwind);
4367   }
4368 
4369   // If we made a temporary, be sure to clean up after ourselves. Note that we
4370   // can't depend on being inside of an ExprWithCleanups, so we need to manually
4371   // pop this cleanup later on. Being eager about this is OK, since this
4372   // temporary is 'invisible' outside of the callee.
4373   if (UnusedReturnSizePtr)
4374     pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetAlloca,
4375                                          UnusedReturnSizePtr);
4376 
4377   llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
4378 
4379   SmallVector<llvm::OperandBundleDef, 1> BundleList =
4380       getBundlesForFunclet(CalleePtr);
4381 
4382   // Emit the actual call/invoke instruction.
4383   llvm::CallBase *CI;
4384   if (!InvokeDest) {
4385     CI = Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList);
4386   } else {
4387     llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
4388     CI = Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs,
4389                               BundleList);
4390     EmitBlock(Cont);
4391   }
4392   if (callOrInvoke)
4393     *callOrInvoke = CI;
4394 
4395   // Apply the attributes and calling convention.
4396   CI->setAttributes(Attrs);
4397   CI->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
4398 
4399   // Apply various metadata.
4400 
4401   if (!CI->getType()->isVoidTy())
4402     CI->setName("call");
4403 
4404   // Update largest vector width from the return type.
4405   if (auto *VT = dyn_cast<llvm::VectorType>(CI->getType()))
4406     LargestVectorWidth = std::max((uint64_t)LargestVectorWidth,
4407                                   VT->getPrimitiveSizeInBits().getFixedSize());
4408 
4409   // Insert instrumentation or attach profile metadata at indirect call sites.
4410   // For more details, see the comment before the definition of
4411   // IPVK_IndirectCallTarget in InstrProfData.inc.
4412   if (!CI->getCalledFunction())
4413     PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget,
4414                      CI, CalleePtr);
4415 
4416   // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4417   // optimizer it can aggressively ignore unwind edges.
4418   if (CGM.getLangOpts().ObjCAutoRefCount)
4419     AddObjCARCExceptionMetadata(CI);
4420 
4421   // Suppress tail calls if requested.
4422   if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
4423     if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
4424       Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
4425   }
4426 
4427   // Add metadata for calls to MSAllocator functions
4428   if (getDebugInfo() && TargetDecl &&
4429       TargetDecl->hasAttr<MSAllocatorAttr>())
4430     getDebugInfo()->addHeapAllocSiteMetadata(CI, RetTy, Loc);
4431 
4432   // 4. Finish the call.
4433 
4434   // If the call doesn't return, finish the basic block and clear the
4435   // insertion point; this allows the rest of IRGen to discard
4436   // unreachable code.
4437   if (CI->doesNotReturn()) {
4438     if (UnusedReturnSizePtr)
4439       PopCleanupBlock();
4440 
4441     // Strip away the noreturn attribute to better diagnose unreachable UB.
4442     if (SanOpts.has(SanitizerKind::Unreachable)) {
4443       // Also remove from function since CallBase::hasFnAttr additionally checks
4444       // attributes of the called function.
4445       if (auto *F = CI->getCalledFunction())
4446         F->removeFnAttr(llvm::Attribute::NoReturn);
4447       CI->removeAttribute(llvm::AttributeList::FunctionIndex,
4448                           llvm::Attribute::NoReturn);
4449 
4450       // Avoid incompatibility with ASan which relies on the `noreturn`
4451       // attribute to insert handler calls.
4452       if (SanOpts.hasOneOf(SanitizerKind::Address |
4453                            SanitizerKind::KernelAddress)) {
4454         SanitizerScope SanScope(this);
4455         llvm::IRBuilder<>::InsertPointGuard IPGuard(Builder);
4456         Builder.SetInsertPoint(CI);
4457         auto *FnType = llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
4458         llvm::FunctionCallee Fn =
4459             CGM.CreateRuntimeFunction(FnType, "__asan_handle_no_return");
4460         EmitNounwindRuntimeCall(Fn);
4461       }
4462     }
4463 
4464     EmitUnreachable(Loc);
4465     Builder.ClearInsertionPoint();
4466 
4467     // FIXME: For now, emit a dummy basic block because expr emitters in
4468     // generally are not ready to handle emitting expressions at unreachable
4469     // points.
4470     EnsureInsertPoint();
4471 
4472     // Return a reasonable RValue.
4473     return GetUndefRValue(RetTy);
4474   }
4475 
4476   // Perform the swifterror writeback.
4477   if (swiftErrorTemp.isValid()) {
4478     llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp);
4479     Builder.CreateStore(errorResult, swiftErrorArg);
4480   }
4481 
4482   // Emit any call-associated writebacks immediately.  Arguably this
4483   // should happen after any return-value munging.
4484   if (CallArgs.hasWritebacks())
4485     emitWritebacks(*this, CallArgs);
4486 
4487   // The stack cleanup for inalloca arguments has to run out of the normal
4488   // lexical order, so deactivate it and run it manually here.
4489   CallArgs.freeArgumentMemory(*this);
4490 
4491   // Extract the return value.
4492   RValue Ret = [&] {
4493     switch (RetAI.getKind()) {
4494     case ABIArgInfo::CoerceAndExpand: {
4495       auto coercionType = RetAI.getCoerceAndExpandType();
4496 
4497       Address addr = SRetPtr;
4498       addr = Builder.CreateElementBitCast(addr, coercionType);
4499 
4500       assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType());
4501       bool requiresExtract = isa<llvm::StructType>(CI->getType());
4502 
4503       unsigned unpaddedIndex = 0;
4504       for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4505         llvm::Type *eltType = coercionType->getElementType(i);
4506         if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4507         Address eltAddr = Builder.CreateStructGEP(addr, i);
4508         llvm::Value *elt = CI;
4509         if (requiresExtract)
4510           elt = Builder.CreateExtractValue(elt, unpaddedIndex++);
4511         else
4512           assert(unpaddedIndex == 0);
4513         Builder.CreateStore(elt, eltAddr);
4514       }
4515       // FALLTHROUGH
4516       LLVM_FALLTHROUGH;
4517     }
4518 
4519     case ABIArgInfo::InAlloca:
4520     case ABIArgInfo::Indirect: {
4521       RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
4522       if (UnusedReturnSizePtr)
4523         PopCleanupBlock();
4524       return ret;
4525     }
4526 
4527     case ABIArgInfo::Ignore:
4528       // If we are ignoring an argument that had a result, make sure to
4529       // construct the appropriate return value for our caller.
4530       return GetUndefRValue(RetTy);
4531 
4532     case ABIArgInfo::Extend:
4533     case ABIArgInfo::Direct: {
4534       llvm::Type *RetIRTy = ConvertType(RetTy);
4535       if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
4536         switch (getEvaluationKind(RetTy)) {
4537         case TEK_Complex: {
4538           llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
4539           llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
4540           return RValue::getComplex(std::make_pair(Real, Imag));
4541         }
4542         case TEK_Aggregate: {
4543           Address DestPtr = ReturnValue.getValue();
4544           bool DestIsVolatile = ReturnValue.isVolatile();
4545 
4546           if (!DestPtr.isValid()) {
4547             DestPtr = CreateMemTemp(RetTy, "agg.tmp");
4548             DestIsVolatile = false;
4549           }
4550           BuildAggStore(*this, CI, DestPtr, DestIsVolatile);
4551           return RValue::getAggregate(DestPtr);
4552         }
4553         case TEK_Scalar: {
4554           // If the argument doesn't match, perform a bitcast to coerce it.  This
4555           // can happen due to trivial type mismatches.
4556           llvm::Value *V = CI;
4557           if (V->getType() != RetIRTy)
4558             V = Builder.CreateBitCast(V, RetIRTy);
4559           return RValue::get(V);
4560         }
4561         }
4562         llvm_unreachable("bad evaluation kind");
4563       }
4564 
4565       Address DestPtr = ReturnValue.getValue();
4566       bool DestIsVolatile = ReturnValue.isVolatile();
4567 
4568       if (!DestPtr.isValid()) {
4569         DestPtr = CreateMemTemp(RetTy, "coerce");
4570         DestIsVolatile = false;
4571       }
4572 
4573       // If the value is offset in memory, apply the offset now.
4574       Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
4575       CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
4576 
4577       return convertTempToRValue(DestPtr, RetTy, SourceLocation());
4578     }
4579 
4580     case ABIArgInfo::Expand:
4581       llvm_unreachable("Invalid ABI kind for return argument");
4582     }
4583 
4584     llvm_unreachable("Unhandled ABIArgInfo::Kind");
4585   } ();
4586 
4587   // Emit the assume_aligned check on the return value.
4588   if (Ret.isScalar() && TargetDecl) {
4589     if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
4590       llvm::Value *OffsetValue = nullptr;
4591       if (const auto *Offset = AA->getOffset())
4592         OffsetValue = EmitScalarExpr(Offset);
4593 
4594       llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment());
4595       llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment);
4596       EmitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc, AA->getLocation(),
4597                               AlignmentCI, OffsetValue);
4598     } else if (const auto *AA = TargetDecl->getAttr<AllocAlignAttr>()) {
4599       llvm::Value *AlignmentVal = CallArgs[AA->getParamIndex().getLLVMIndex()]
4600                                       .getRValue(*this)
4601                                       .getScalarVal();
4602       EmitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc, AA->getLocation(),
4603                               AlignmentVal);
4604     }
4605   }
4606 
4607   // Explicitly call CallLifetimeEnd::Emit just to re-use the code even though
4608   // we can't use the full cleanup mechanism.
4609   for (CallLifetimeEnd &LifetimeEnd : CallLifetimeEndAfterCall)
4610     LifetimeEnd.Emit(*this, /*Flags=*/{});
4611 
4612   return Ret;
4613 }
4614 
4615 CGCallee CGCallee::prepareConcreteCallee(CodeGenFunction &CGF) const {
4616   if (isVirtual()) {
4617     const CallExpr *CE = getVirtualCallExpr();
4618     return CGF.CGM.getCXXABI().getVirtualFunctionPointer(
4619         CGF, getVirtualMethodDecl(), getThisAddress(), getVirtualFunctionType(),
4620         CE ? CE->getBeginLoc() : SourceLocation());
4621   }
4622 
4623   return *this;
4624 }
4625 
4626 /* VarArg handling */
4627 
4628 Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) {
4629   VAListAddr = VE->isMicrosoftABI()
4630                  ? EmitMSVAListRef(VE->getSubExpr())
4631                  : EmitVAListRef(VE->getSubExpr());
4632   QualType Ty = VE->getType();
4633   if (VE->isMicrosoftABI())
4634     return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty);
4635   return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty);
4636 }
4637