1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // These classes wrap the information about a call or function
10 // definition used to handle ABI compliancy.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CGCall.h"
15 #include "ABIInfo.h"
16 #include "CGBlocks.h"
17 #include "CGCXXABI.h"
18 #include "CGCleanup.h"
19 #include "CodeGenFunction.h"
20 #include "CodeGenModule.h"
21 #include "TargetInfo.h"
22 #include "clang/AST/Decl.h"
23 #include "clang/AST/DeclCXX.h"
24 #include "clang/AST/DeclObjC.h"
25 #include "clang/Basic/CodeGenOptions.h"
26 #include "clang/Basic/TargetBuiltins.h"
27 #include "clang/Basic/TargetInfo.h"
28 #include "clang/CodeGen/CGFunctionInfo.h"
29 #include "clang/CodeGen/SwiftCallingConv.h"
30 #include "llvm/ADT/StringExtras.h"
31 #include "llvm/Transforms/Utils/Local.h"
32 #include "llvm/Analysis/ValueTracking.h"
33 #include "llvm/IR/Attributes.h"
34 #include "llvm/IR/CallingConv.h"
35 #include "llvm/IR/DataLayout.h"
36 #include "llvm/IR/InlineAsm.h"
37 #include "llvm/IR/IntrinsicInst.h"
38 #include "llvm/IR/Intrinsics.h"
39 using namespace clang;
40 using namespace CodeGen;
41 
42 /***/
43 
44 unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
45   switch (CC) {
46   default: return llvm::CallingConv::C;
47   case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
48   case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
49   case CC_X86RegCall: return llvm::CallingConv::X86_RegCall;
50   case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
51   case CC_Win64: return llvm::CallingConv::Win64;
52   case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
53   case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
54   case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
55   case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
56   // TODO: Add support for __pascal to LLVM.
57   case CC_X86Pascal: return llvm::CallingConv::C;
58   // TODO: Add support for __vectorcall to LLVM.
59   case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
60   case CC_AArch64VectorCall: return llvm::CallingConv::AArch64_VectorCall;
61   case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
62   case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv();
63   case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
64   case CC_PreserveAll: return llvm::CallingConv::PreserveAll;
65   case CC_Swift: return llvm::CallingConv::Swift;
66   }
67 }
68 
69 /// Derives the 'this' type for codegen purposes, i.e. ignoring method CVR
70 /// qualification. Either or both of RD and MD may be null. A null RD indicates
71 /// that there is no meaningful 'this' type, and a null MD can occur when
72 /// calling a method pointer.
73 CanQualType CodeGenTypes::DeriveThisType(const CXXRecordDecl *RD,
74                                          const CXXMethodDecl *MD) {
75   QualType RecTy;
76   if (RD)
77     RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
78   else
79     RecTy = Context.VoidTy;
80 
81   if (MD)
82     RecTy = Context.getAddrSpaceQualType(RecTy, MD->getMethodQualifiers().getAddressSpace());
83   return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
84 }
85 
86 /// Returns the canonical formal type of the given C++ method.
87 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
88   return MD->getType()->getCanonicalTypeUnqualified()
89            .getAs<FunctionProtoType>();
90 }
91 
92 /// Returns the "extra-canonicalized" return type, which discards
93 /// qualifiers on the return type.  Codegen doesn't care about them,
94 /// and it makes ABI code a little easier to be able to assume that
95 /// all parameter and return types are top-level unqualified.
96 static CanQualType GetReturnType(QualType RetTy) {
97   return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
98 }
99 
100 /// Arrange the argument and result information for a value of the given
101 /// unprototyped freestanding function type.
102 const CGFunctionInfo &
103 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
104   // When translating an unprototyped function type, always use a
105   // variadic type.
106   return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
107                                  /*instanceMethod=*/false,
108                                  /*chainCall=*/false, None,
109                                  FTNP->getExtInfo(), {}, RequiredArgs(0));
110 }
111 
112 static void addExtParameterInfosForCall(
113          llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
114                                         const FunctionProtoType *proto,
115                                         unsigned prefixArgs,
116                                         unsigned totalArgs) {
117   assert(proto->hasExtParameterInfos());
118   assert(paramInfos.size() <= prefixArgs);
119   assert(proto->getNumParams() + prefixArgs <= totalArgs);
120 
121   paramInfos.reserve(totalArgs);
122 
123   // Add default infos for any prefix args that don't already have infos.
124   paramInfos.resize(prefixArgs);
125 
126   // Add infos for the prototype.
127   for (const auto &ParamInfo : proto->getExtParameterInfos()) {
128     paramInfos.push_back(ParamInfo);
129     // pass_object_size params have no parameter info.
130     if (ParamInfo.hasPassObjectSize())
131       paramInfos.emplace_back();
132   }
133 
134   assert(paramInfos.size() <= totalArgs &&
135          "Did we forget to insert pass_object_size args?");
136   // Add default infos for the variadic and/or suffix arguments.
137   paramInfos.resize(totalArgs);
138 }
139 
140 /// Adds the formal parameters in FPT to the given prefix. If any parameter in
141 /// FPT has pass_object_size attrs, then we'll add parameters for those, too.
142 static void appendParameterTypes(const CodeGenTypes &CGT,
143                                  SmallVectorImpl<CanQualType> &prefix,
144               SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
145                                  CanQual<FunctionProtoType> FPT) {
146   // Fast path: don't touch param info if we don't need to.
147   if (!FPT->hasExtParameterInfos()) {
148     assert(paramInfos.empty() &&
149            "We have paramInfos, but the prototype doesn't?");
150     prefix.append(FPT->param_type_begin(), FPT->param_type_end());
151     return;
152   }
153 
154   unsigned PrefixSize = prefix.size();
155   // In the vast majority of cases, we'll have precisely FPT->getNumParams()
156   // parameters; the only thing that can change this is the presence of
157   // pass_object_size. So, we preallocate for the common case.
158   prefix.reserve(prefix.size() + FPT->getNumParams());
159 
160   auto ExtInfos = FPT->getExtParameterInfos();
161   assert(ExtInfos.size() == FPT->getNumParams());
162   for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
163     prefix.push_back(FPT->getParamType(I));
164     if (ExtInfos[I].hasPassObjectSize())
165       prefix.push_back(CGT.getContext().getSizeType());
166   }
167 
168   addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize,
169                               prefix.size());
170 }
171 
172 /// Arrange the LLVM function layout for a value of the given function
173 /// type, on top of any implicit parameters already stored.
174 static const CGFunctionInfo &
175 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
176                         SmallVectorImpl<CanQualType> &prefix,
177                         CanQual<FunctionProtoType> FTP) {
178   SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
179   RequiredArgs Required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
180   // FIXME: Kill copy.
181   appendParameterTypes(CGT, prefix, paramInfos, FTP);
182   CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
183 
184   return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
185                                      /*chainCall=*/false, prefix,
186                                      FTP->getExtInfo(), paramInfos,
187                                      Required);
188 }
189 
190 /// Arrange the argument and result information for a value of the
191 /// given freestanding function type.
192 const CGFunctionInfo &
193 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) {
194   SmallVector<CanQualType, 16> argTypes;
195   return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
196                                    FTP);
197 }
198 
199 static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
200   // Set the appropriate calling convention for the Function.
201   if (D->hasAttr<StdCallAttr>())
202     return CC_X86StdCall;
203 
204   if (D->hasAttr<FastCallAttr>())
205     return CC_X86FastCall;
206 
207   if (D->hasAttr<RegCallAttr>())
208     return CC_X86RegCall;
209 
210   if (D->hasAttr<ThisCallAttr>())
211     return CC_X86ThisCall;
212 
213   if (D->hasAttr<VectorCallAttr>())
214     return CC_X86VectorCall;
215 
216   if (D->hasAttr<PascalAttr>())
217     return CC_X86Pascal;
218 
219   if (PcsAttr *PCS = D->getAttr<PcsAttr>())
220     return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
221 
222   if (D->hasAttr<AArch64VectorPcsAttr>())
223     return CC_AArch64VectorCall;
224 
225   if (D->hasAttr<IntelOclBiccAttr>())
226     return CC_IntelOclBicc;
227 
228   if (D->hasAttr<MSABIAttr>())
229     return IsWindows ? CC_C : CC_Win64;
230 
231   if (D->hasAttr<SysVABIAttr>())
232     return IsWindows ? CC_X86_64SysV : CC_C;
233 
234   if (D->hasAttr<PreserveMostAttr>())
235     return CC_PreserveMost;
236 
237   if (D->hasAttr<PreserveAllAttr>())
238     return CC_PreserveAll;
239 
240   return CC_C;
241 }
242 
243 /// Arrange the argument and result information for a call to an
244 /// unknown C++ non-static member function of the given abstract type.
245 /// (A null RD means we don't have any meaningful "this" argument type,
246 ///  so fall back to a generic pointer type).
247 /// The member function must be an ordinary function, i.e. not a
248 /// constructor or destructor.
249 const CGFunctionInfo &
250 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
251                                    const FunctionProtoType *FTP,
252                                    const CXXMethodDecl *MD) {
253   SmallVector<CanQualType, 16> argTypes;
254 
255   // Add the 'this' pointer.
256   argTypes.push_back(DeriveThisType(RD, MD));
257 
258   return ::arrangeLLVMFunctionInfo(
259       *this, true, argTypes,
260       FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
261 }
262 
263 /// Set calling convention for CUDA/HIP kernel.
264 static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM,
265                                            const FunctionDecl *FD) {
266   if (FD->hasAttr<CUDAGlobalAttr>()) {
267     const FunctionType *FT = FTy->getAs<FunctionType>();
268     CGM.getTargetCodeGenInfo().setCUDAKernelCallingConvention(FT);
269     FTy = FT->getCanonicalTypeUnqualified();
270   }
271 }
272 
273 /// Arrange the argument and result information for a declaration or
274 /// definition of the given C++ non-static member function.  The
275 /// member function must be an ordinary function, i.e. not a
276 /// constructor or destructor.
277 const CGFunctionInfo &
278 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
279   assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
280   assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
281 
282   CanQualType FT = GetFormalType(MD).getAs<Type>();
283   setCUDAKernelCallingConvention(FT, CGM, MD);
284   auto prototype = FT.getAs<FunctionProtoType>();
285 
286   if (MD->isInstance()) {
287     // The abstract case is perfectly fine.
288     const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
289     return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
290   }
291 
292   return arrangeFreeFunctionType(prototype);
293 }
294 
295 bool CodeGenTypes::inheritingCtorHasParams(
296     const InheritedConstructor &Inherited, CXXCtorType Type) {
297   // Parameters are unnecessary if we're constructing a base class subobject
298   // and the inherited constructor lives in a virtual base.
299   return Type == Ctor_Complete ||
300          !Inherited.getShadowDecl()->constructsVirtualBase() ||
301          !Target.getCXXABI().hasConstructorVariants();
302 }
303 
304 const CGFunctionInfo &
305 CodeGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) {
306   auto *MD = cast<CXXMethodDecl>(GD.getDecl());
307 
308   SmallVector<CanQualType, 16> argTypes;
309   SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
310   argTypes.push_back(DeriveThisType(MD->getParent(), MD));
311 
312   bool PassParams = true;
313 
314   if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
315     // A base class inheriting constructor doesn't get forwarded arguments
316     // needed to construct a virtual base (or base class thereof).
317     if (auto Inherited = CD->getInheritedConstructor())
318       PassParams = inheritingCtorHasParams(Inherited, GD.getCtorType());
319   }
320 
321   CanQual<FunctionProtoType> FTP = GetFormalType(MD);
322 
323   // Add the formal parameters.
324   if (PassParams)
325     appendParameterTypes(*this, argTypes, paramInfos, FTP);
326 
327   CGCXXABI::AddedStructorArgs AddedArgs =
328       TheCXXABI.buildStructorSignature(GD, argTypes);
329   if (!paramInfos.empty()) {
330     // Note: prefix implies after the first param.
331     if (AddedArgs.Prefix)
332       paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix,
333                         FunctionProtoType::ExtParameterInfo{});
334     if (AddedArgs.Suffix)
335       paramInfos.append(AddedArgs.Suffix,
336                         FunctionProtoType::ExtParameterInfo{});
337   }
338 
339   RequiredArgs required =
340       (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size())
341                                       : RequiredArgs::All);
342 
343   FunctionType::ExtInfo extInfo = FTP->getExtInfo();
344   CanQualType resultType = TheCXXABI.HasThisReturn(GD)
345                                ? argTypes.front()
346                                : TheCXXABI.hasMostDerivedReturn(GD)
347                                      ? CGM.getContext().VoidPtrTy
348                                      : Context.VoidTy;
349   return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
350                                  /*chainCall=*/false, argTypes, extInfo,
351                                  paramInfos, required);
352 }
353 
354 static SmallVector<CanQualType, 16>
355 getArgTypesForCall(ASTContext &ctx, const CallArgList &args) {
356   SmallVector<CanQualType, 16> argTypes;
357   for (auto &arg : args)
358     argTypes.push_back(ctx.getCanonicalParamType(arg.Ty));
359   return argTypes;
360 }
361 
362 static SmallVector<CanQualType, 16>
363 getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) {
364   SmallVector<CanQualType, 16> argTypes;
365   for (auto &arg : args)
366     argTypes.push_back(ctx.getCanonicalParamType(arg->getType()));
367   return argTypes;
368 }
369 
370 static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16>
371 getExtParameterInfosForCall(const FunctionProtoType *proto,
372                             unsigned prefixArgs, unsigned totalArgs) {
373   llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result;
374   if (proto->hasExtParameterInfos()) {
375     addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs);
376   }
377   return result;
378 }
379 
380 /// Arrange a call to a C++ method, passing the given arguments.
381 ///
382 /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this`
383 /// parameter.
384 /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of
385 /// args.
386 /// PassProtoArgs indicates whether `args` has args for the parameters in the
387 /// given CXXConstructorDecl.
388 const CGFunctionInfo &
389 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
390                                         const CXXConstructorDecl *D,
391                                         CXXCtorType CtorKind,
392                                         unsigned ExtraPrefixArgs,
393                                         unsigned ExtraSuffixArgs,
394                                         bool PassProtoArgs) {
395   // FIXME: Kill copy.
396   SmallVector<CanQualType, 16> ArgTypes;
397   for (const auto &Arg : args)
398     ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
399 
400   // +1 for implicit this, which should always be args[0].
401   unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
402 
403   CanQual<FunctionProtoType> FPT = GetFormalType(D);
404   RequiredArgs Required = PassProtoArgs
405                               ? RequiredArgs::forPrototypePlus(
406                                     FPT, TotalPrefixArgs + ExtraSuffixArgs)
407                               : RequiredArgs::All;
408 
409   GlobalDecl GD(D, CtorKind);
410   CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
411                                ? ArgTypes.front()
412                                : TheCXXABI.hasMostDerivedReturn(GD)
413                                      ? CGM.getContext().VoidPtrTy
414                                      : Context.VoidTy;
415 
416   FunctionType::ExtInfo Info = FPT->getExtInfo();
417   llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> ParamInfos;
418   // If the prototype args are elided, we should only have ABI-specific args,
419   // which never have param info.
420   if (PassProtoArgs && FPT->hasExtParameterInfos()) {
421     // ABI-specific suffix arguments are treated the same as variadic arguments.
422     addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs,
423                                 ArgTypes.size());
424   }
425   return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
426                                  /*chainCall=*/false, ArgTypes, Info,
427                                  ParamInfos, Required);
428 }
429 
430 /// Arrange the argument and result information for the declaration or
431 /// definition of the given function.
432 const CGFunctionInfo &
433 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
434   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
435     if (MD->isInstance())
436       return arrangeCXXMethodDeclaration(MD);
437 
438   CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
439 
440   assert(isa<FunctionType>(FTy));
441   setCUDAKernelCallingConvention(FTy, CGM, FD);
442 
443   // When declaring a function without a prototype, always use a
444   // non-variadic type.
445   if (CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>()) {
446     return arrangeLLVMFunctionInfo(
447         noProto->getReturnType(), /*instanceMethod=*/false,
448         /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All);
449   }
450 
451   return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>());
452 }
453 
454 /// Arrange the argument and result information for the declaration or
455 /// definition of an Objective-C method.
456 const CGFunctionInfo &
457 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
458   // It happens that this is the same as a call with no optional
459   // arguments, except also using the formal 'self' type.
460   return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
461 }
462 
463 /// Arrange the argument and result information for the function type
464 /// through which to perform a send to the given Objective-C method,
465 /// using the given receiver type.  The receiver type is not always
466 /// the 'self' type of the method or even an Objective-C pointer type.
467 /// This is *not* the right method for actually performing such a
468 /// message send, due to the possibility of optional arguments.
469 const CGFunctionInfo &
470 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
471                                               QualType receiverType) {
472   SmallVector<CanQualType, 16> argTys;
473   SmallVector<FunctionProtoType::ExtParameterInfo, 4> extParamInfos(2);
474   argTys.push_back(Context.getCanonicalParamType(receiverType));
475   argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
476   // FIXME: Kill copy?
477   for (const auto *I : MD->parameters()) {
478     argTys.push_back(Context.getCanonicalParamType(I->getType()));
479     auto extParamInfo = FunctionProtoType::ExtParameterInfo().withIsNoEscape(
480         I->hasAttr<NoEscapeAttr>());
481     extParamInfos.push_back(extParamInfo);
482   }
483 
484   FunctionType::ExtInfo einfo;
485   bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
486   einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
487 
488   if (getContext().getLangOpts().ObjCAutoRefCount &&
489       MD->hasAttr<NSReturnsRetainedAttr>())
490     einfo = einfo.withProducesResult(true);
491 
492   RequiredArgs required =
493     (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
494 
495   return arrangeLLVMFunctionInfo(
496       GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
497       /*chainCall=*/false, argTys, einfo, extParamInfos, required);
498 }
499 
500 const CGFunctionInfo &
501 CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType,
502                                                  const CallArgList &args) {
503   auto argTypes = getArgTypesForCall(Context, args);
504   FunctionType::ExtInfo einfo;
505 
506   return arrangeLLVMFunctionInfo(
507       GetReturnType(returnType), /*instanceMethod=*/false,
508       /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All);
509 }
510 
511 const CGFunctionInfo &
512 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
513   // FIXME: Do we need to handle ObjCMethodDecl?
514   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
515 
516   if (isa<CXXConstructorDecl>(GD.getDecl()) ||
517       isa<CXXDestructorDecl>(GD.getDecl()))
518     return arrangeCXXStructorDeclaration(GD);
519 
520   return arrangeFunctionDeclaration(FD);
521 }
522 
523 /// Arrange a thunk that takes 'this' as the first parameter followed by
524 /// varargs.  Return a void pointer, regardless of the actual return type.
525 /// The body of the thunk will end in a musttail call to a function of the
526 /// correct type, and the caller will bitcast the function to the correct
527 /// prototype.
528 const CGFunctionInfo &
529 CodeGenTypes::arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD) {
530   assert(MD->isVirtual() && "only methods have thunks");
531   CanQual<FunctionProtoType> FTP = GetFormalType(MD);
532   CanQualType ArgTys[] = {DeriveThisType(MD->getParent(), MD)};
533   return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
534                                  /*chainCall=*/false, ArgTys,
535                                  FTP->getExtInfo(), {}, RequiredArgs(1));
536 }
537 
538 const CGFunctionInfo &
539 CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD,
540                                    CXXCtorType CT) {
541   assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure);
542 
543   CanQual<FunctionProtoType> FTP = GetFormalType(CD);
544   SmallVector<CanQualType, 2> ArgTys;
545   const CXXRecordDecl *RD = CD->getParent();
546   ArgTys.push_back(DeriveThisType(RD, CD));
547   if (CT == Ctor_CopyingClosure)
548     ArgTys.push_back(*FTP->param_type_begin());
549   if (RD->getNumVBases() > 0)
550     ArgTys.push_back(Context.IntTy);
551   CallingConv CC = Context.getDefaultCallingConvention(
552       /*IsVariadic=*/false, /*IsCXXMethod=*/true);
553   return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
554                                  /*chainCall=*/false, ArgTys,
555                                  FunctionType::ExtInfo(CC), {},
556                                  RequiredArgs::All);
557 }
558 
559 /// Arrange a call as unto a free function, except possibly with an
560 /// additional number of formal parameters considered required.
561 static const CGFunctionInfo &
562 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
563                             CodeGenModule &CGM,
564                             const CallArgList &args,
565                             const FunctionType *fnType,
566                             unsigned numExtraRequiredArgs,
567                             bool chainCall) {
568   assert(args.size() >= numExtraRequiredArgs);
569 
570   llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
571 
572   // In most cases, there are no optional arguments.
573   RequiredArgs required = RequiredArgs::All;
574 
575   // If we have a variadic prototype, the required arguments are the
576   // extra prefix plus the arguments in the prototype.
577   if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
578     if (proto->isVariadic())
579       required = RequiredArgs::forPrototypePlus(proto, numExtraRequiredArgs);
580 
581     if (proto->hasExtParameterInfos())
582       addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs,
583                                   args.size());
584 
585   // If we don't have a prototype at all, but we're supposed to
586   // explicitly use the variadic convention for unprototyped calls,
587   // treat all of the arguments as required but preserve the nominal
588   // possibility of variadics.
589   } else if (CGM.getTargetCodeGenInfo()
590                 .isNoProtoCallVariadic(args,
591                                        cast<FunctionNoProtoType>(fnType))) {
592     required = RequiredArgs(args.size());
593   }
594 
595   // FIXME: Kill copy.
596   SmallVector<CanQualType, 16> argTypes;
597   for (const auto &arg : args)
598     argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
599   return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()),
600                                      /*instanceMethod=*/false, chainCall,
601                                      argTypes, fnType->getExtInfo(), paramInfos,
602                                      required);
603 }
604 
605 /// Figure out the rules for calling a function with the given formal
606 /// type using the given arguments.  The arguments are necessary
607 /// because the function might be unprototyped, in which case it's
608 /// target-dependent in crazy ways.
609 const CGFunctionInfo &
610 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
611                                       const FunctionType *fnType,
612                                       bool chainCall) {
613   return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
614                                      chainCall ? 1 : 0, chainCall);
615 }
616 
617 /// A block function is essentially a free function with an
618 /// extra implicit argument.
619 const CGFunctionInfo &
620 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
621                                        const FunctionType *fnType) {
622   return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
623                                      /*chainCall=*/false);
624 }
625 
626 const CGFunctionInfo &
627 CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto,
628                                               const FunctionArgList &params) {
629   auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size());
630   auto argTypes = getArgTypesForDeclaration(Context, params);
631 
632   return arrangeLLVMFunctionInfo(GetReturnType(proto->getReturnType()),
633                                  /*instanceMethod*/ false, /*chainCall*/ false,
634                                  argTypes, proto->getExtInfo(), paramInfos,
635                                  RequiredArgs::forPrototypePlus(proto, 1));
636 }
637 
638 const CGFunctionInfo &
639 CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType,
640                                          const CallArgList &args) {
641   // FIXME: Kill copy.
642   SmallVector<CanQualType, 16> argTypes;
643   for (const auto &Arg : args)
644     argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
645   return arrangeLLVMFunctionInfo(
646       GetReturnType(resultType), /*instanceMethod=*/false,
647       /*chainCall=*/false, argTypes, FunctionType::ExtInfo(),
648       /*paramInfos=*/ {}, RequiredArgs::All);
649 }
650 
651 const CGFunctionInfo &
652 CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType,
653                                                 const FunctionArgList &args) {
654   auto argTypes = getArgTypesForDeclaration(Context, args);
655 
656   return arrangeLLVMFunctionInfo(
657       GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false,
658       argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
659 }
660 
661 const CGFunctionInfo &
662 CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType,
663                                               ArrayRef<CanQualType> argTypes) {
664   return arrangeLLVMFunctionInfo(
665       resultType, /*instanceMethod=*/false, /*chainCall=*/false,
666       argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
667 }
668 
669 /// Arrange a call to a C++ method, passing the given arguments.
670 ///
671 /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It
672 /// does not count `this`.
673 const CGFunctionInfo &
674 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
675                                    const FunctionProtoType *proto,
676                                    RequiredArgs required,
677                                    unsigned numPrefixArgs) {
678   assert(numPrefixArgs + 1 <= args.size() &&
679          "Emitting a call with less args than the required prefix?");
680   // Add one to account for `this`. It's a bit awkward here, but we don't count
681   // `this` in similar places elsewhere.
682   auto paramInfos =
683     getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size());
684 
685   // FIXME: Kill copy.
686   auto argTypes = getArgTypesForCall(Context, args);
687 
688   FunctionType::ExtInfo info = proto->getExtInfo();
689   return arrangeLLVMFunctionInfo(
690       GetReturnType(proto->getReturnType()), /*instanceMethod=*/true,
691       /*chainCall=*/false, argTypes, info, paramInfos, required);
692 }
693 
694 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
695   return arrangeLLVMFunctionInfo(
696       getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
697       None, FunctionType::ExtInfo(), {}, RequiredArgs::All);
698 }
699 
700 const CGFunctionInfo &
701 CodeGenTypes::arrangeCall(const CGFunctionInfo &signature,
702                           const CallArgList &args) {
703   assert(signature.arg_size() <= args.size());
704   if (signature.arg_size() == args.size())
705     return signature;
706 
707   SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
708   auto sigParamInfos = signature.getExtParameterInfos();
709   if (!sigParamInfos.empty()) {
710     paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
711     paramInfos.resize(args.size());
712   }
713 
714   auto argTypes = getArgTypesForCall(Context, args);
715 
716   assert(signature.getRequiredArgs().allowsOptionalArgs());
717   return arrangeLLVMFunctionInfo(signature.getReturnType(),
718                                  signature.isInstanceMethod(),
719                                  signature.isChainCall(),
720                                  argTypes,
721                                  signature.getExtInfo(),
722                                  paramInfos,
723                                  signature.getRequiredArgs());
724 }
725 
726 namespace clang {
727 namespace CodeGen {
728 void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI);
729 }
730 }
731 
732 /// Arrange the argument and result information for an abstract value
733 /// of a given function type.  This is the method which all of the
734 /// above functions ultimately defer to.
735 const CGFunctionInfo &
736 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
737                                       bool instanceMethod,
738                                       bool chainCall,
739                                       ArrayRef<CanQualType> argTypes,
740                                       FunctionType::ExtInfo info,
741                      ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos,
742                                       RequiredArgs required) {
743   assert(llvm::all_of(argTypes,
744                       [](CanQualType T) { return T.isCanonicalAsParam(); }));
745 
746   // Lookup or create unique function info.
747   llvm::FoldingSetNodeID ID;
748   CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos,
749                           required, resultType, argTypes);
750 
751   void *insertPos = nullptr;
752   CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
753   if (FI)
754     return *FI;
755 
756   unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
757 
758   // Construct the function info.  We co-allocate the ArgInfos.
759   FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
760                               paramInfos, resultType, argTypes, required);
761   FunctionInfos.InsertNode(FI, insertPos);
762 
763   bool inserted = FunctionsBeingProcessed.insert(FI).second;
764   (void)inserted;
765   assert(inserted && "Recursively being processed?");
766 
767   // Compute ABI information.
768   if (CC == llvm::CallingConv::SPIR_KERNEL) {
769     // Force target independent argument handling for the host visible
770     // kernel functions.
771     computeSPIRKernelABIInfo(CGM, *FI);
772   } else if (info.getCC() == CC_Swift) {
773     swiftcall::computeABIInfo(CGM, *FI);
774   } else {
775     getABIInfo().computeInfo(*FI);
776   }
777 
778   // Loop over all of the computed argument and return value info.  If any of
779   // them are direct or extend without a specified coerce type, specify the
780   // default now.
781   ABIArgInfo &retInfo = FI->getReturnInfo();
782   if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
783     retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
784 
785   for (auto &I : FI->arguments())
786     if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
787       I.info.setCoerceToType(ConvertType(I.type));
788 
789   bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
790   assert(erased && "Not in set?");
791 
792   return *FI;
793 }
794 
795 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
796                                        bool instanceMethod,
797                                        bool chainCall,
798                                        const FunctionType::ExtInfo &info,
799                                        ArrayRef<ExtParameterInfo> paramInfos,
800                                        CanQualType resultType,
801                                        ArrayRef<CanQualType> argTypes,
802                                        RequiredArgs required) {
803   assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
804   assert(!required.allowsOptionalArgs() ||
805          required.getNumRequiredArgs() <= argTypes.size());
806 
807   void *buffer =
808     operator new(totalSizeToAlloc<ArgInfo,             ExtParameterInfo>(
809                                   argTypes.size() + 1, paramInfos.size()));
810 
811   CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
812   FI->CallingConvention = llvmCC;
813   FI->EffectiveCallingConvention = llvmCC;
814   FI->ASTCallingConvention = info.getCC();
815   FI->InstanceMethod = instanceMethod;
816   FI->ChainCall = chainCall;
817   FI->NoReturn = info.getNoReturn();
818   FI->ReturnsRetained = info.getProducesResult();
819   FI->NoCallerSavedRegs = info.getNoCallerSavedRegs();
820   FI->NoCfCheck = info.getNoCfCheck();
821   FI->Required = required;
822   FI->HasRegParm = info.getHasRegParm();
823   FI->RegParm = info.getRegParm();
824   FI->ArgStruct = nullptr;
825   FI->ArgStructAlign = 0;
826   FI->NumArgs = argTypes.size();
827   FI->HasExtParameterInfos = !paramInfos.empty();
828   FI->getArgsBuffer()[0].type = resultType;
829   for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
830     FI->getArgsBuffer()[i + 1].type = argTypes[i];
831   for (unsigned i = 0, e = paramInfos.size(); i != e; ++i)
832     FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
833   return FI;
834 }
835 
836 /***/
837 
838 namespace {
839 // ABIArgInfo::Expand implementation.
840 
841 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
842 struct TypeExpansion {
843   enum TypeExpansionKind {
844     // Elements of constant arrays are expanded recursively.
845     TEK_ConstantArray,
846     // Record fields are expanded recursively (but if record is a union, only
847     // the field with the largest size is expanded).
848     TEK_Record,
849     // For complex types, real and imaginary parts are expanded recursively.
850     TEK_Complex,
851     // All other types are not expandable.
852     TEK_None
853   };
854 
855   const TypeExpansionKind Kind;
856 
857   TypeExpansion(TypeExpansionKind K) : Kind(K) {}
858   virtual ~TypeExpansion() {}
859 };
860 
861 struct ConstantArrayExpansion : TypeExpansion {
862   QualType EltTy;
863   uint64_t NumElts;
864 
865   ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
866       : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
867   static bool classof(const TypeExpansion *TE) {
868     return TE->Kind == TEK_ConstantArray;
869   }
870 };
871 
872 struct RecordExpansion : TypeExpansion {
873   SmallVector<const CXXBaseSpecifier *, 1> Bases;
874 
875   SmallVector<const FieldDecl *, 1> Fields;
876 
877   RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
878                   SmallVector<const FieldDecl *, 1> &&Fields)
879       : TypeExpansion(TEK_Record), Bases(std::move(Bases)),
880         Fields(std::move(Fields)) {}
881   static bool classof(const TypeExpansion *TE) {
882     return TE->Kind == TEK_Record;
883   }
884 };
885 
886 struct ComplexExpansion : TypeExpansion {
887   QualType EltTy;
888 
889   ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
890   static bool classof(const TypeExpansion *TE) {
891     return TE->Kind == TEK_Complex;
892   }
893 };
894 
895 struct NoExpansion : TypeExpansion {
896   NoExpansion() : TypeExpansion(TEK_None) {}
897   static bool classof(const TypeExpansion *TE) {
898     return TE->Kind == TEK_None;
899   }
900 };
901 }  // namespace
902 
903 static std::unique_ptr<TypeExpansion>
904 getTypeExpansion(QualType Ty, const ASTContext &Context) {
905   if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
906     return std::make_unique<ConstantArrayExpansion>(
907         AT->getElementType(), AT->getSize().getZExtValue());
908   }
909   if (const RecordType *RT = Ty->getAs<RecordType>()) {
910     SmallVector<const CXXBaseSpecifier *, 1> Bases;
911     SmallVector<const FieldDecl *, 1> Fields;
912     const RecordDecl *RD = RT->getDecl();
913     assert(!RD->hasFlexibleArrayMember() &&
914            "Cannot expand structure with flexible array.");
915     if (RD->isUnion()) {
916       // Unions can be here only in degenerative cases - all the fields are same
917       // after flattening. Thus we have to use the "largest" field.
918       const FieldDecl *LargestFD = nullptr;
919       CharUnits UnionSize = CharUnits::Zero();
920 
921       for (const auto *FD : RD->fields()) {
922         if (FD->isZeroLengthBitField(Context))
923           continue;
924         assert(!FD->isBitField() &&
925                "Cannot expand structure with bit-field members.");
926         CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
927         if (UnionSize < FieldSize) {
928           UnionSize = FieldSize;
929           LargestFD = FD;
930         }
931       }
932       if (LargestFD)
933         Fields.push_back(LargestFD);
934     } else {
935       if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
936         assert(!CXXRD->isDynamicClass() &&
937                "cannot expand vtable pointers in dynamic classes");
938         for (const CXXBaseSpecifier &BS : CXXRD->bases())
939           Bases.push_back(&BS);
940       }
941 
942       for (const auto *FD : RD->fields()) {
943         if (FD->isZeroLengthBitField(Context))
944           continue;
945         assert(!FD->isBitField() &&
946                "Cannot expand structure with bit-field members.");
947         Fields.push_back(FD);
948       }
949     }
950     return std::make_unique<RecordExpansion>(std::move(Bases),
951                                               std::move(Fields));
952   }
953   if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
954     return std::make_unique<ComplexExpansion>(CT->getElementType());
955   }
956   return std::make_unique<NoExpansion>();
957 }
958 
959 static int getExpansionSize(QualType Ty, const ASTContext &Context) {
960   auto Exp = getTypeExpansion(Ty, Context);
961   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
962     return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
963   }
964   if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
965     int Res = 0;
966     for (auto BS : RExp->Bases)
967       Res += getExpansionSize(BS->getType(), Context);
968     for (auto FD : RExp->Fields)
969       Res += getExpansionSize(FD->getType(), Context);
970     return Res;
971   }
972   if (isa<ComplexExpansion>(Exp.get()))
973     return 2;
974   assert(isa<NoExpansion>(Exp.get()));
975   return 1;
976 }
977 
978 void
979 CodeGenTypes::getExpandedTypes(QualType Ty,
980                                SmallVectorImpl<llvm::Type *>::iterator &TI) {
981   auto Exp = getTypeExpansion(Ty, Context);
982   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
983     for (int i = 0, n = CAExp->NumElts; i < n; i++) {
984       getExpandedTypes(CAExp->EltTy, TI);
985     }
986   } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
987     for (auto BS : RExp->Bases)
988       getExpandedTypes(BS->getType(), TI);
989     for (auto FD : RExp->Fields)
990       getExpandedTypes(FD->getType(), TI);
991   } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
992     llvm::Type *EltTy = ConvertType(CExp->EltTy);
993     *TI++ = EltTy;
994     *TI++ = EltTy;
995   } else {
996     assert(isa<NoExpansion>(Exp.get()));
997     *TI++ = ConvertType(Ty);
998   }
999 }
1000 
1001 static void forConstantArrayExpansion(CodeGenFunction &CGF,
1002                                       ConstantArrayExpansion *CAE,
1003                                       Address BaseAddr,
1004                                       llvm::function_ref<void(Address)> Fn) {
1005   CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
1006   CharUnits EltAlign =
1007     BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
1008 
1009   for (int i = 0, n = CAE->NumElts; i < n; i++) {
1010     llvm::Value *EltAddr =
1011       CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i);
1012     Fn(Address(EltAddr, EltAlign));
1013   }
1014 }
1015 
1016 void CodeGenFunction::ExpandTypeFromArgs(
1017     QualType Ty, LValue LV, SmallVectorImpl<llvm::Value *>::iterator &AI) {
1018   assert(LV.isSimple() &&
1019          "Unexpected non-simple lvalue during struct expansion.");
1020 
1021   auto Exp = getTypeExpansion(Ty, getContext());
1022   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1023     forConstantArrayExpansion(*this, CAExp, LV.getAddress(),
1024                               [&](Address EltAddr) {
1025       LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1026       ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1027     });
1028   } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1029     Address This = LV.getAddress();
1030     for (const CXXBaseSpecifier *BS : RExp->Bases) {
1031       // Perform a single step derived-to-base conversion.
1032       Address Base =
1033           GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1034                                 /*NullCheckValue=*/false, SourceLocation());
1035       LValue SubLV = MakeAddrLValue(Base, BS->getType());
1036 
1037       // Recurse onto bases.
1038       ExpandTypeFromArgs(BS->getType(), SubLV, AI);
1039     }
1040     for (auto FD : RExp->Fields) {
1041       // FIXME: What are the right qualifiers here?
1042       LValue SubLV = EmitLValueForFieldInitialization(LV, FD);
1043       ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1044     }
1045   } else if (isa<ComplexExpansion>(Exp.get())) {
1046     auto realValue = *AI++;
1047     auto imagValue = *AI++;
1048     EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
1049   } else {
1050     assert(isa<NoExpansion>(Exp.get()));
1051     EmitStoreThroughLValue(RValue::get(*AI++), LV);
1052   }
1053 }
1054 
1055 void CodeGenFunction::ExpandTypeToArgs(
1056     QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
1057     SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
1058   auto Exp = getTypeExpansion(Ty, getContext());
1059   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1060     Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress()
1061                                    : Arg.getKnownRValue().getAggregateAddress();
1062     forConstantArrayExpansion(
1063         *this, CAExp, Addr, [&](Address EltAddr) {
1064           CallArg EltArg = CallArg(
1065               convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()),
1066               CAExp->EltTy);
1067           ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
1068                            IRCallArgPos);
1069         });
1070   } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1071     Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress()
1072                                    : Arg.getKnownRValue().getAggregateAddress();
1073     for (const CXXBaseSpecifier *BS : RExp->Bases) {
1074       // Perform a single step derived-to-base conversion.
1075       Address Base =
1076           GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1077                                 /*NullCheckValue=*/false, SourceLocation());
1078       CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType());
1079 
1080       // Recurse onto bases.
1081       ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs,
1082                        IRCallArgPos);
1083     }
1084 
1085     LValue LV = MakeAddrLValue(This, Ty);
1086     for (auto FD : RExp->Fields) {
1087       CallArg FldArg =
1088           CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType());
1089       ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
1090                        IRCallArgPos);
1091     }
1092   } else if (isa<ComplexExpansion>(Exp.get())) {
1093     ComplexPairTy CV = Arg.getKnownRValue().getComplexVal();
1094     IRCallArgs[IRCallArgPos++] = CV.first;
1095     IRCallArgs[IRCallArgPos++] = CV.second;
1096   } else {
1097     assert(isa<NoExpansion>(Exp.get()));
1098     auto RV = Arg.getKnownRValue();
1099     assert(RV.isScalar() &&
1100            "Unexpected non-scalar rvalue during struct expansion.");
1101 
1102     // Insert a bitcast as needed.
1103     llvm::Value *V = RV.getScalarVal();
1104     if (IRCallArgPos < IRFuncTy->getNumParams() &&
1105         V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1106       V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
1107 
1108     IRCallArgs[IRCallArgPos++] = V;
1109   }
1110 }
1111 
1112 /// Create a temporary allocation for the purposes of coercion.
1113 static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty,
1114                                            CharUnits MinAlign) {
1115   // Don't use an alignment that's worse than what LLVM would prefer.
1116   auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty);
1117   CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
1118 
1119   return CGF.CreateTempAlloca(Ty, Align);
1120 }
1121 
1122 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
1123 /// accessing some number of bytes out of it, try to gep into the struct to get
1124 /// at its inner goodness.  Dive as deep as possible without entering an element
1125 /// with an in-memory size smaller than DstSize.
1126 static Address
1127 EnterStructPointerForCoercedAccess(Address SrcPtr,
1128                                    llvm::StructType *SrcSTy,
1129                                    uint64_t DstSize, CodeGenFunction &CGF) {
1130   // We can't dive into a zero-element struct.
1131   if (SrcSTy->getNumElements() == 0) return SrcPtr;
1132 
1133   llvm::Type *FirstElt = SrcSTy->getElementType(0);
1134 
1135   // If the first elt is at least as large as what we're looking for, or if the
1136   // first element is the same size as the whole struct, we can enter it. The
1137   // comparison must be made on the store size and not the alloca size. Using
1138   // the alloca size may overstate the size of the load.
1139   uint64_t FirstEltSize =
1140     CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
1141   if (FirstEltSize < DstSize &&
1142       FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
1143     return SrcPtr;
1144 
1145   // GEP into the first element.
1146   SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, "coerce.dive");
1147 
1148   // If the first element is a struct, recurse.
1149   llvm::Type *SrcTy = SrcPtr.getElementType();
1150   if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1151     return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
1152 
1153   return SrcPtr;
1154 }
1155 
1156 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
1157 /// are either integers or pointers.  This does a truncation of the value if it
1158 /// is too large or a zero extension if it is too small.
1159 ///
1160 /// This behaves as if the value were coerced through memory, so on big-endian
1161 /// targets the high bits are preserved in a truncation, while little-endian
1162 /// targets preserve the low bits.
1163 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
1164                                              llvm::Type *Ty,
1165                                              CodeGenFunction &CGF) {
1166   if (Val->getType() == Ty)
1167     return Val;
1168 
1169   if (isa<llvm::PointerType>(Val->getType())) {
1170     // If this is Pointer->Pointer avoid conversion to and from int.
1171     if (isa<llvm::PointerType>(Ty))
1172       return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
1173 
1174     // Convert the pointer to an integer so we can play with its width.
1175     Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
1176   }
1177 
1178   llvm::Type *DestIntTy = Ty;
1179   if (isa<llvm::PointerType>(DestIntTy))
1180     DestIntTy = CGF.IntPtrTy;
1181 
1182   if (Val->getType() != DestIntTy) {
1183     const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
1184     if (DL.isBigEndian()) {
1185       // Preserve the high bits on big-endian targets.
1186       // That is what memory coercion does.
1187       uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1188       uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1189 
1190       if (SrcSize > DstSize) {
1191         Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
1192         Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
1193       } else {
1194         Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
1195         Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
1196       }
1197     } else {
1198       // Little-endian targets preserve the low bits. No shifts required.
1199       Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
1200     }
1201   }
1202 
1203   if (isa<llvm::PointerType>(Ty))
1204     Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
1205   return Val;
1206 }
1207 
1208 
1209 
1210 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
1211 /// a pointer to an object of type \arg Ty, known to be aligned to
1212 /// \arg SrcAlign bytes.
1213 ///
1214 /// This safely handles the case when the src type is smaller than the
1215 /// destination type; in this situation the values of bits which not
1216 /// present in the src are undefined.
1217 static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
1218                                       CodeGenFunction &CGF) {
1219   llvm::Type *SrcTy = Src.getElementType();
1220 
1221   // If SrcTy and Ty are the same, just do a load.
1222   if (SrcTy == Ty)
1223     return CGF.Builder.CreateLoad(Src);
1224 
1225   uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
1226 
1227   if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1228     Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF);
1229     SrcTy = Src.getType()->getElementType();
1230   }
1231 
1232   uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1233 
1234   // If the source and destination are integer or pointer types, just do an
1235   // extension or truncation to the desired type.
1236   if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1237       (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1238     llvm::Value *Load = CGF.Builder.CreateLoad(Src);
1239     return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
1240   }
1241 
1242   // If load is legal, just bitcast the src pointer.
1243   if (SrcSize >= DstSize) {
1244     // Generally SrcSize is never greater than DstSize, since this means we are
1245     // losing bits. However, this can happen in cases where the structure has
1246     // additional padding, for example due to a user specified alignment.
1247     //
1248     // FIXME: Assert that we aren't truncating non-padding bits when have access
1249     // to that information.
1250     Src = CGF.Builder.CreateBitCast(Src,
1251                                     Ty->getPointerTo(Src.getAddressSpace()));
1252     return CGF.Builder.CreateLoad(Src);
1253   }
1254 
1255   // Otherwise do coercion through memory. This is stupid, but simple.
1256   Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment());
1257   Address Casted = CGF.Builder.CreateElementBitCast(Tmp,CGF.Int8Ty);
1258   Address SrcCasted = CGF.Builder.CreateElementBitCast(Src,CGF.Int8Ty);
1259   CGF.Builder.CreateMemCpy(Casted, SrcCasted,
1260       llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
1261       false);
1262   return CGF.Builder.CreateLoad(Tmp);
1263 }
1264 
1265 // Function to store a first-class aggregate into memory.  We prefer to
1266 // store the elements rather than the aggregate to be more friendly to
1267 // fast-isel.
1268 // FIXME: Do we need to recurse here?
1269 static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
1270                           Address Dest, bool DestIsVolatile) {
1271   // Prefer scalar stores to first-class aggregate stores.
1272   if (llvm::StructType *STy =
1273         dyn_cast<llvm::StructType>(Val->getType())) {
1274     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1275       Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i);
1276       llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
1277       CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
1278     }
1279   } else {
1280     CGF.Builder.CreateStore(Val, Dest, DestIsVolatile);
1281   }
1282 }
1283 
1284 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1285 /// where the source and destination may have different types.  The
1286 /// destination is known to be aligned to \arg DstAlign bytes.
1287 ///
1288 /// This safely handles the case when the src type is larger than the
1289 /// destination type; the upper bits of the src will be lost.
1290 static void CreateCoercedStore(llvm::Value *Src,
1291                                Address Dst,
1292                                bool DstIsVolatile,
1293                                CodeGenFunction &CGF) {
1294   llvm::Type *SrcTy = Src->getType();
1295   llvm::Type *DstTy = Dst.getType()->getElementType();
1296   if (SrcTy == DstTy) {
1297     CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1298     return;
1299   }
1300 
1301   uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1302 
1303   if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1304     Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF);
1305     DstTy = Dst.getType()->getElementType();
1306   }
1307 
1308   // If the source and destination are integer or pointer types, just do an
1309   // extension or truncation to the desired type.
1310   if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1311       (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1312     Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
1313     CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1314     return;
1315   }
1316 
1317   uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
1318 
1319   // If store is legal, just bitcast the src pointer.
1320   if (SrcSize <= DstSize) {
1321     Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy);
1322     BuildAggStore(CGF, Src, Dst, DstIsVolatile);
1323   } else {
1324     // Otherwise do coercion through memory. This is stupid, but
1325     // simple.
1326 
1327     // Generally SrcSize is never greater than DstSize, since this means we are
1328     // losing bits. However, this can happen in cases where the structure has
1329     // additional padding, for example due to a user specified alignment.
1330     //
1331     // FIXME: Assert that we aren't truncating non-padding bits when have access
1332     // to that information.
1333     Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
1334     CGF.Builder.CreateStore(Src, Tmp);
1335     Address Casted = CGF.Builder.CreateElementBitCast(Tmp,CGF.Int8Ty);
1336     Address DstCasted = CGF.Builder.CreateElementBitCast(Dst,CGF.Int8Ty);
1337     CGF.Builder.CreateMemCpy(DstCasted, Casted,
1338         llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
1339         false);
1340   }
1341 }
1342 
1343 static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr,
1344                                    const ABIArgInfo &info) {
1345   if (unsigned offset = info.getDirectOffset()) {
1346     addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty);
1347     addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
1348                                              CharUnits::fromQuantity(offset));
1349     addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType());
1350   }
1351   return addr;
1352 }
1353 
1354 namespace {
1355 
1356 /// Encapsulates information about the way function arguments from
1357 /// CGFunctionInfo should be passed to actual LLVM IR function.
1358 class ClangToLLVMArgMapping {
1359   static const unsigned InvalidIndex = ~0U;
1360   unsigned InallocaArgNo;
1361   unsigned SRetArgNo;
1362   unsigned TotalIRArgs;
1363 
1364   /// Arguments of LLVM IR function corresponding to single Clang argument.
1365   struct IRArgs {
1366     unsigned PaddingArgIndex;
1367     // Argument is expanded to IR arguments at positions
1368     // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
1369     unsigned FirstArgIndex;
1370     unsigned NumberOfArgs;
1371 
1372     IRArgs()
1373         : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1374           NumberOfArgs(0) {}
1375   };
1376 
1377   SmallVector<IRArgs, 8> ArgInfo;
1378 
1379 public:
1380   ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
1381                         bool OnlyRequiredArgs = false)
1382       : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1383         ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1384     construct(Context, FI, OnlyRequiredArgs);
1385   }
1386 
1387   bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
1388   unsigned getInallocaArgNo() const {
1389     assert(hasInallocaArg());
1390     return InallocaArgNo;
1391   }
1392 
1393   bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
1394   unsigned getSRetArgNo() const {
1395     assert(hasSRetArg());
1396     return SRetArgNo;
1397   }
1398 
1399   unsigned totalIRArgs() const { return TotalIRArgs; }
1400 
1401   bool hasPaddingArg(unsigned ArgNo) const {
1402     assert(ArgNo < ArgInfo.size());
1403     return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1404   }
1405   unsigned getPaddingArgNo(unsigned ArgNo) const {
1406     assert(hasPaddingArg(ArgNo));
1407     return ArgInfo[ArgNo].PaddingArgIndex;
1408   }
1409 
1410   /// Returns index of first IR argument corresponding to ArgNo, and their
1411   /// quantity.
1412   std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
1413     assert(ArgNo < ArgInfo.size());
1414     return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1415                           ArgInfo[ArgNo].NumberOfArgs);
1416   }
1417 
1418 private:
1419   void construct(const ASTContext &Context, const CGFunctionInfo &FI,
1420                  bool OnlyRequiredArgs);
1421 };
1422 
1423 void ClangToLLVMArgMapping::construct(const ASTContext &Context,
1424                                       const CGFunctionInfo &FI,
1425                                       bool OnlyRequiredArgs) {
1426   unsigned IRArgNo = 0;
1427   bool SwapThisWithSRet = false;
1428   const ABIArgInfo &RetAI = FI.getReturnInfo();
1429 
1430   if (RetAI.getKind() == ABIArgInfo::Indirect) {
1431     SwapThisWithSRet = RetAI.isSRetAfterThis();
1432     SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1433   }
1434 
1435   unsigned ArgNo = 0;
1436   unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
1437   for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
1438        ++I, ++ArgNo) {
1439     assert(I != FI.arg_end());
1440     QualType ArgType = I->type;
1441     const ABIArgInfo &AI = I->info;
1442     // Collect data about IR arguments corresponding to Clang argument ArgNo.
1443     auto &IRArgs = ArgInfo[ArgNo];
1444 
1445     if (AI.getPaddingType())
1446       IRArgs.PaddingArgIndex = IRArgNo++;
1447 
1448     switch (AI.getKind()) {
1449     case ABIArgInfo::Extend:
1450     case ABIArgInfo::Direct: {
1451       // FIXME: handle sseregparm someday...
1452       llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
1453       if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
1454         IRArgs.NumberOfArgs = STy->getNumElements();
1455       } else {
1456         IRArgs.NumberOfArgs = 1;
1457       }
1458       break;
1459     }
1460     case ABIArgInfo::Indirect:
1461       IRArgs.NumberOfArgs = 1;
1462       break;
1463     case ABIArgInfo::Ignore:
1464     case ABIArgInfo::InAlloca:
1465       // ignore and inalloca doesn't have matching LLVM parameters.
1466       IRArgs.NumberOfArgs = 0;
1467       break;
1468     case ABIArgInfo::CoerceAndExpand:
1469       IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size();
1470       break;
1471     case ABIArgInfo::Expand:
1472       IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
1473       break;
1474     }
1475 
1476     if (IRArgs.NumberOfArgs > 0) {
1477       IRArgs.FirstArgIndex = IRArgNo;
1478       IRArgNo += IRArgs.NumberOfArgs;
1479     }
1480 
1481     // Skip over the sret parameter when it comes second.  We already handled it
1482     // above.
1483     if (IRArgNo == 1 && SwapThisWithSRet)
1484       IRArgNo++;
1485   }
1486   assert(ArgNo == ArgInfo.size());
1487 
1488   if (FI.usesInAlloca())
1489     InallocaArgNo = IRArgNo++;
1490 
1491   TotalIRArgs = IRArgNo;
1492 }
1493 }  // namespace
1494 
1495 /***/
1496 
1497 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
1498   const auto &RI = FI.getReturnInfo();
1499   return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
1500 }
1501 
1502 bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) {
1503   return ReturnTypeUsesSRet(FI) &&
1504          getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
1505 }
1506 
1507 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
1508   if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
1509     switch (BT->getKind()) {
1510     default:
1511       return false;
1512     case BuiltinType::Float:
1513       return getTarget().useObjCFPRetForRealType(TargetInfo::Float);
1514     case BuiltinType::Double:
1515       return getTarget().useObjCFPRetForRealType(TargetInfo::Double);
1516     case BuiltinType::LongDouble:
1517       return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble);
1518     }
1519   }
1520 
1521   return false;
1522 }
1523 
1524 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
1525   if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
1526     if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
1527       if (BT->getKind() == BuiltinType::LongDouble)
1528         return getTarget().useObjCFP2RetForComplexLongDouble();
1529     }
1530   }
1531 
1532   return false;
1533 }
1534 
1535 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
1536   const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
1537   return GetFunctionType(FI);
1538 }
1539 
1540 llvm::FunctionType *
1541 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
1542 
1543   bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1544   (void)Inserted;
1545   assert(Inserted && "Recursively being processed?");
1546 
1547   llvm::Type *resultType = nullptr;
1548   const ABIArgInfo &retAI = FI.getReturnInfo();
1549   switch (retAI.getKind()) {
1550   case ABIArgInfo::Expand:
1551     llvm_unreachable("Invalid ABI kind for return argument");
1552 
1553   case ABIArgInfo::Extend:
1554   case ABIArgInfo::Direct:
1555     resultType = retAI.getCoerceToType();
1556     break;
1557 
1558   case ABIArgInfo::InAlloca:
1559     if (retAI.getInAllocaSRet()) {
1560       // sret things on win32 aren't void, they return the sret pointer.
1561       QualType ret = FI.getReturnType();
1562       llvm::Type *ty = ConvertType(ret);
1563       unsigned addressSpace = Context.getTargetAddressSpace(ret);
1564       resultType = llvm::PointerType::get(ty, addressSpace);
1565     } else {
1566       resultType = llvm::Type::getVoidTy(getLLVMContext());
1567     }
1568     break;
1569 
1570   case ABIArgInfo::Indirect:
1571   case ABIArgInfo::Ignore:
1572     resultType = llvm::Type::getVoidTy(getLLVMContext());
1573     break;
1574 
1575   case ABIArgInfo::CoerceAndExpand:
1576     resultType = retAI.getUnpaddedCoerceAndExpandType();
1577     break;
1578   }
1579 
1580   ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
1581   SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
1582 
1583   // Add type for sret argument.
1584   if (IRFunctionArgs.hasSRetArg()) {
1585     QualType Ret = FI.getReturnType();
1586     llvm::Type *Ty = ConvertType(Ret);
1587     unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
1588     ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1589         llvm::PointerType::get(Ty, AddressSpace);
1590   }
1591 
1592   // Add type for inalloca argument.
1593   if (IRFunctionArgs.hasInallocaArg()) {
1594     auto ArgStruct = FI.getArgStruct();
1595     assert(ArgStruct);
1596     ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
1597   }
1598 
1599   // Add in all of the required arguments.
1600   unsigned ArgNo = 0;
1601   CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1602                                      ie = it + FI.getNumRequiredArgs();
1603   for (; it != ie; ++it, ++ArgNo) {
1604     const ABIArgInfo &ArgInfo = it->info;
1605 
1606     // Insert a padding type to ensure proper alignment.
1607     if (IRFunctionArgs.hasPaddingArg(ArgNo))
1608       ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1609           ArgInfo.getPaddingType();
1610 
1611     unsigned FirstIRArg, NumIRArgs;
1612     std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1613 
1614     switch (ArgInfo.getKind()) {
1615     case ABIArgInfo::Ignore:
1616     case ABIArgInfo::InAlloca:
1617       assert(NumIRArgs == 0);
1618       break;
1619 
1620     case ABIArgInfo::Indirect: {
1621       assert(NumIRArgs == 1);
1622       // indirect arguments are always on the stack, which is alloca addr space.
1623       llvm::Type *LTy = ConvertTypeForMem(it->type);
1624       ArgTypes[FirstIRArg] = LTy->getPointerTo(
1625           CGM.getDataLayout().getAllocaAddrSpace());
1626       break;
1627     }
1628 
1629     case ABIArgInfo::Extend:
1630     case ABIArgInfo::Direct: {
1631       // Fast-isel and the optimizer generally like scalar values better than
1632       // FCAs, so we flatten them if this is safe to do for this argument.
1633       llvm::Type *argType = ArgInfo.getCoerceToType();
1634       llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1635       if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
1636         assert(NumIRArgs == st->getNumElements());
1637         for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1638           ArgTypes[FirstIRArg + i] = st->getElementType(i);
1639       } else {
1640         assert(NumIRArgs == 1);
1641         ArgTypes[FirstIRArg] = argType;
1642       }
1643       break;
1644     }
1645 
1646     case ABIArgInfo::CoerceAndExpand: {
1647       auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1648       for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) {
1649         *ArgTypesIter++ = EltTy;
1650       }
1651       assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1652       break;
1653     }
1654 
1655     case ABIArgInfo::Expand:
1656       auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1657       getExpandedTypes(it->type, ArgTypesIter);
1658       assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1659       break;
1660     }
1661   }
1662 
1663   bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1664   assert(Erased && "Not in set?");
1665 
1666   return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
1667 }
1668 
1669 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
1670   const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1671   const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
1672 
1673   if (!isFuncTypeConvertible(FPT))
1674     return llvm::StructType::get(getLLVMContext());
1675 
1676   return GetFunctionType(GD);
1677 }
1678 
1679 static void AddAttributesFromFunctionProtoType(ASTContext &Ctx,
1680                                                llvm::AttrBuilder &FuncAttrs,
1681                                                const FunctionProtoType *FPT) {
1682   if (!FPT)
1683     return;
1684 
1685   if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) &&
1686       FPT->isNothrow())
1687     FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1688 }
1689 
1690 void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
1691                                                bool AttrOnCallSite,
1692                                                llvm::AttrBuilder &FuncAttrs) {
1693   // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
1694   if (!HasOptnone) {
1695     if (CodeGenOpts.OptimizeSize)
1696       FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1697     if (CodeGenOpts.OptimizeSize == 2)
1698       FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1699   }
1700 
1701   if (CodeGenOpts.DisableRedZone)
1702     FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1703   if (CodeGenOpts.IndirectTlsSegRefs)
1704     FuncAttrs.addAttribute("indirect-tls-seg-refs");
1705   if (CodeGenOpts.NoImplicitFloat)
1706     FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1707 
1708   if (AttrOnCallSite) {
1709     // Attributes that should go on the call site only.
1710     if (!CodeGenOpts.SimplifyLibCalls ||
1711         CodeGenOpts.isNoBuiltinFunc(Name.data()))
1712       FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1713     if (!CodeGenOpts.TrapFuncName.empty())
1714       FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
1715   } else {
1716     StringRef FpKind;
1717     switch (CodeGenOpts.getFramePointer()) {
1718     case CodeGenOptions::FramePointerKind::None:
1719       FpKind = "none";
1720       break;
1721     case CodeGenOptions::FramePointerKind::NonLeaf:
1722       FpKind = "non-leaf";
1723       break;
1724     case CodeGenOptions::FramePointerKind::All:
1725       FpKind = "all";
1726       break;
1727     }
1728     FuncAttrs.addAttribute("frame-pointer", FpKind);
1729 
1730     FuncAttrs.addAttribute("less-precise-fpmad",
1731                            llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
1732 
1733     if (CodeGenOpts.NullPointerIsValid)
1734       FuncAttrs.addAttribute("null-pointer-is-valid", "true");
1735     if (!CodeGenOpts.FPDenormalMode.empty())
1736       FuncAttrs.addAttribute("denormal-fp-math", CodeGenOpts.FPDenormalMode);
1737 
1738     FuncAttrs.addAttribute("no-trapping-math",
1739                            llvm::toStringRef(CodeGenOpts.NoTrappingMath));
1740 
1741     // Strict (compliant) code is the default, so only add this attribute to
1742     // indicate that we are trying to workaround a problem case.
1743     if (!CodeGenOpts.StrictFloatCastOverflow)
1744       FuncAttrs.addAttribute("strict-float-cast-overflow", "false");
1745 
1746     // TODO: Are these all needed?
1747     // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
1748     FuncAttrs.addAttribute("no-infs-fp-math",
1749                            llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
1750     FuncAttrs.addAttribute("no-nans-fp-math",
1751                            llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
1752     FuncAttrs.addAttribute("unsafe-fp-math",
1753                            llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
1754     FuncAttrs.addAttribute("use-soft-float",
1755                            llvm::toStringRef(CodeGenOpts.SoftFloat));
1756     FuncAttrs.addAttribute("stack-protector-buffer-size",
1757                            llvm::utostr(CodeGenOpts.SSPBufferSize));
1758     FuncAttrs.addAttribute("no-signed-zeros-fp-math",
1759                            llvm::toStringRef(CodeGenOpts.NoSignedZeros));
1760     FuncAttrs.addAttribute(
1761         "correctly-rounded-divide-sqrt-fp-math",
1762         llvm::toStringRef(CodeGenOpts.CorrectlyRoundedDivSqrt));
1763 
1764     if (getLangOpts().OpenCL)
1765       FuncAttrs.addAttribute("denorms-are-zero",
1766                              llvm::toStringRef(CodeGenOpts.FlushDenorm));
1767 
1768     // TODO: Reciprocal estimate codegen options should apply to instructions?
1769     const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals;
1770     if (!Recips.empty())
1771       FuncAttrs.addAttribute("reciprocal-estimates",
1772                              llvm::join(Recips, ","));
1773 
1774     if (!CodeGenOpts.PreferVectorWidth.empty() &&
1775         CodeGenOpts.PreferVectorWidth != "none")
1776       FuncAttrs.addAttribute("prefer-vector-width",
1777                              CodeGenOpts.PreferVectorWidth);
1778 
1779     if (CodeGenOpts.StackRealignment)
1780       FuncAttrs.addAttribute("stackrealign");
1781     if (CodeGenOpts.Backchain)
1782       FuncAttrs.addAttribute("backchain");
1783 
1784     if (CodeGenOpts.SpeculativeLoadHardening)
1785       FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
1786   }
1787 
1788   if (getLangOpts().assumeFunctionsAreConvergent()) {
1789     // Conservatively, mark all functions and calls in CUDA and OpenCL as
1790     // convergent (meaning, they may call an intrinsically convergent op, such
1791     // as __syncthreads() / barrier(), and so can't have certain optimizations
1792     // applied around them).  LLVM will remove this attribute where it safely
1793     // can.
1794     FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1795   }
1796 
1797   if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) {
1798     // Exceptions aren't supported in CUDA device code.
1799     FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1800 
1801     // Respect -fcuda-flush-denormals-to-zero.
1802     if (CodeGenOpts.FlushDenorm)
1803       FuncAttrs.addAttribute("nvptx-f32ftz", "true");
1804   }
1805 
1806   for (StringRef Attr : CodeGenOpts.DefaultFunctionAttrs) {
1807     StringRef Var, Value;
1808     std::tie(Var, Value) = Attr.split('=');
1809     FuncAttrs.addAttribute(Var, Value);
1810   }
1811 }
1812 
1813 void CodeGenModule::AddDefaultFnAttrs(llvm::Function &F) {
1814   llvm::AttrBuilder FuncAttrs;
1815   ConstructDefaultFnAttrList(F.getName(), F.hasOptNone(),
1816                              /* AttrOnCallSite = */ false, FuncAttrs);
1817   F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs);
1818 }
1819 
1820 void CodeGenModule::ConstructAttributeList(
1821     StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo,
1822     llvm::AttributeList &AttrList, unsigned &CallingConv, bool AttrOnCallSite) {
1823   llvm::AttrBuilder FuncAttrs;
1824   llvm::AttrBuilder RetAttrs;
1825 
1826   CallingConv = FI.getEffectiveCallingConvention();
1827   if (FI.isNoReturn())
1828     FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1829 
1830   // If we have information about the function prototype, we can learn
1831   // attributes from there.
1832   AddAttributesFromFunctionProtoType(getContext(), FuncAttrs,
1833                                      CalleeInfo.getCalleeFunctionProtoType());
1834 
1835   const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl();
1836 
1837   bool HasOptnone = false;
1838   // FIXME: handle sseregparm someday...
1839   if (TargetDecl) {
1840     if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
1841       FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
1842     if (TargetDecl->hasAttr<NoThrowAttr>())
1843       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1844     if (TargetDecl->hasAttr<NoReturnAttr>())
1845       FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1846     if (TargetDecl->hasAttr<ColdAttr>())
1847       FuncAttrs.addAttribute(llvm::Attribute::Cold);
1848     if (TargetDecl->hasAttr<NoDuplicateAttr>())
1849       FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
1850     if (TargetDecl->hasAttr<ConvergentAttr>())
1851       FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1852 
1853     if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1854       AddAttributesFromFunctionProtoType(
1855           getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
1856       const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
1857       const bool IsVirtualCall = MD && MD->isVirtual();
1858       // Don't use [[noreturn]], _Noreturn or [[no_builtin]] for a call to a
1859       // virtual function. These attributes are not inherited by overloads.
1860       if (!(AttrOnCallSite && IsVirtualCall)) {
1861         if (Fn->isNoReturn())
1862           FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1863 
1864         if (const auto *NBA = TargetDecl->getAttr<NoBuiltinAttr>()) {
1865           bool HasWildcard = llvm::is_contained(NBA->builtinNames(), "*");
1866           if (HasWildcard)
1867             FuncAttrs.addAttribute("no-builtins");
1868           else
1869             for (StringRef BuiltinName : NBA->builtinNames()) {
1870               SmallString<32> AttributeName;
1871               AttributeName += "no-builtin-";
1872               AttributeName += BuiltinName;
1873               FuncAttrs.addAttribute(AttributeName);
1874             }
1875         }
1876       }
1877     }
1878 
1879     // 'const', 'pure' and 'noalias' attributed functions are also nounwind.
1880     if (TargetDecl->hasAttr<ConstAttr>()) {
1881       FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
1882       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1883     } else if (TargetDecl->hasAttr<PureAttr>()) {
1884       FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
1885       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1886     } else if (TargetDecl->hasAttr<NoAliasAttr>()) {
1887       FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly);
1888       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1889     }
1890     if (TargetDecl->hasAttr<RestrictAttr>())
1891       RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1892     if (TargetDecl->hasAttr<ReturnsNonNullAttr>() &&
1893         !CodeGenOpts.NullPointerIsValid)
1894       RetAttrs.addAttribute(llvm::Attribute::NonNull);
1895     if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>())
1896       FuncAttrs.addAttribute("no_caller_saved_registers");
1897     if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>())
1898       FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
1899 
1900     HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
1901     if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) {
1902       Optional<unsigned> NumElemsParam;
1903       if (AllocSize->getNumElemsParam().isValid())
1904         NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
1905       FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
1906                                  NumElemsParam);
1907     }
1908   }
1909 
1910   ConstructDefaultFnAttrList(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
1911 
1912   // This must run after constructing the default function attribute list
1913   // to ensure that the speculative load hardening attribute is removed
1914   // in the case where the -mspeculative-load-hardening flag was passed.
1915   if (TargetDecl) {
1916     if (TargetDecl->hasAttr<NoSpeculativeLoadHardeningAttr>())
1917       FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening);
1918     if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>())
1919       FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
1920   }
1921 
1922   if (CodeGenOpts.EnableSegmentedStacks &&
1923       !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>()))
1924     FuncAttrs.addAttribute("split-stack");
1925 
1926   // Add NonLazyBind attribute to function declarations when -fno-plt
1927   // is used.
1928   if (TargetDecl && CodeGenOpts.NoPLT) {
1929     if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1930       if (!Fn->isDefined() && !AttrOnCallSite) {
1931         FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
1932       }
1933     }
1934   }
1935 
1936   if (TargetDecl && TargetDecl->hasAttr<OpenCLKernelAttr>()) {
1937     if (getLangOpts().OpenCLVersion <= 120) {
1938       // OpenCL v1.2 Work groups are always uniform
1939       FuncAttrs.addAttribute("uniform-work-group-size", "true");
1940     } else {
1941       // OpenCL v2.0 Work groups may be whether uniform or not.
1942       // '-cl-uniform-work-group-size' compile option gets a hint
1943       // to the compiler that the global work-size be a multiple of
1944       // the work-group size specified to clEnqueueNDRangeKernel
1945       // (i.e. work groups are uniform).
1946       FuncAttrs.addAttribute("uniform-work-group-size",
1947                              llvm::toStringRef(CodeGenOpts.UniformWGSize));
1948     }
1949   }
1950 
1951   if (!AttrOnCallSite) {
1952     bool DisableTailCalls = false;
1953 
1954     if (CodeGenOpts.DisableTailCalls)
1955       DisableTailCalls = true;
1956     else if (TargetDecl) {
1957       if (TargetDecl->hasAttr<DisableTailCallsAttr>() ||
1958           TargetDecl->hasAttr<AnyX86InterruptAttr>())
1959         DisableTailCalls = true;
1960       else if (CodeGenOpts.NoEscapingBlockTailCalls) {
1961         if (const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
1962           if (!BD->doesNotEscape())
1963             DisableTailCalls = true;
1964       }
1965     }
1966 
1967     FuncAttrs.addAttribute("disable-tail-calls",
1968                            llvm::toStringRef(DisableTailCalls));
1969     GetCPUAndFeaturesAttributes(CalleeInfo.getCalleeDecl(), FuncAttrs);
1970   }
1971 
1972   ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
1973 
1974   QualType RetTy = FI.getReturnType();
1975   const ABIArgInfo &RetAI = FI.getReturnInfo();
1976   switch (RetAI.getKind()) {
1977   case ABIArgInfo::Extend:
1978     if (RetAI.isSignExt())
1979       RetAttrs.addAttribute(llvm::Attribute::SExt);
1980     else
1981       RetAttrs.addAttribute(llvm::Attribute::ZExt);
1982     LLVM_FALLTHROUGH;
1983   case ABIArgInfo::Direct:
1984     if (RetAI.getInReg())
1985       RetAttrs.addAttribute(llvm::Attribute::InReg);
1986     break;
1987   case ABIArgInfo::Ignore:
1988     break;
1989 
1990   case ABIArgInfo::InAlloca:
1991   case ABIArgInfo::Indirect: {
1992     // inalloca and sret disable readnone and readonly
1993     FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1994       .removeAttribute(llvm::Attribute::ReadNone);
1995     break;
1996   }
1997 
1998   case ABIArgInfo::CoerceAndExpand:
1999     break;
2000 
2001   case ABIArgInfo::Expand:
2002     llvm_unreachable("Invalid ABI kind for return argument");
2003   }
2004 
2005   if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
2006     QualType PTy = RefTy->getPointeeType();
2007     if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2008       RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
2009                                         .getQuantity());
2010     else if (getContext().getTargetAddressSpace(PTy) == 0 &&
2011              !CodeGenOpts.NullPointerIsValid)
2012       RetAttrs.addAttribute(llvm::Attribute::NonNull);
2013   }
2014 
2015   bool hasUsedSRet = false;
2016   SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs());
2017 
2018   // Attach attributes to sret.
2019   if (IRFunctionArgs.hasSRetArg()) {
2020     llvm::AttrBuilder SRETAttrs;
2021     SRETAttrs.addAttribute(llvm::Attribute::StructRet);
2022     hasUsedSRet = true;
2023     if (RetAI.getInReg())
2024       SRETAttrs.addAttribute(llvm::Attribute::InReg);
2025     ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
2026         llvm::AttributeSet::get(getLLVMContext(), SRETAttrs);
2027   }
2028 
2029   // Attach attributes to inalloca argument.
2030   if (IRFunctionArgs.hasInallocaArg()) {
2031     llvm::AttrBuilder Attrs;
2032     Attrs.addAttribute(llvm::Attribute::InAlloca);
2033     ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
2034         llvm::AttributeSet::get(getLLVMContext(), Attrs);
2035   }
2036 
2037   unsigned ArgNo = 0;
2038   for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(),
2039                                           E = FI.arg_end();
2040        I != E; ++I, ++ArgNo) {
2041     QualType ParamType = I->type;
2042     const ABIArgInfo &AI = I->info;
2043     llvm::AttrBuilder Attrs;
2044 
2045     // Add attribute for padding argument, if necessary.
2046     if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
2047       if (AI.getPaddingInReg()) {
2048         ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
2049             llvm::AttributeSet::get(
2050                 getLLVMContext(),
2051                 llvm::AttrBuilder().addAttribute(llvm::Attribute::InReg));
2052       }
2053     }
2054 
2055     // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
2056     // have the corresponding parameter variable.  It doesn't make
2057     // sense to do it here because parameters are so messed up.
2058     switch (AI.getKind()) {
2059     case ABIArgInfo::Extend:
2060       if (AI.isSignExt())
2061         Attrs.addAttribute(llvm::Attribute::SExt);
2062       else
2063         Attrs.addAttribute(llvm::Attribute::ZExt);
2064       LLVM_FALLTHROUGH;
2065     case ABIArgInfo::Direct:
2066       if (ArgNo == 0 && FI.isChainCall())
2067         Attrs.addAttribute(llvm::Attribute::Nest);
2068       else if (AI.getInReg())
2069         Attrs.addAttribute(llvm::Attribute::InReg);
2070       break;
2071 
2072     case ABIArgInfo::Indirect: {
2073       if (AI.getInReg())
2074         Attrs.addAttribute(llvm::Attribute::InReg);
2075 
2076       if (AI.getIndirectByVal())
2077         Attrs.addByValAttr(getTypes().ConvertTypeForMem(ParamType));
2078 
2079       CharUnits Align = AI.getIndirectAlign();
2080 
2081       // In a byval argument, it is important that the required
2082       // alignment of the type is honored, as LLVM might be creating a
2083       // *new* stack object, and needs to know what alignment to give
2084       // it. (Sometimes it can deduce a sensible alignment on its own,
2085       // but not if clang decides it must emit a packed struct, or the
2086       // user specifies increased alignment requirements.)
2087       //
2088       // This is different from indirect *not* byval, where the object
2089       // exists already, and the align attribute is purely
2090       // informative.
2091       assert(!Align.isZero());
2092 
2093       // For now, only add this when we have a byval argument.
2094       // TODO: be less lazy about updating test cases.
2095       if (AI.getIndirectByVal())
2096         Attrs.addAlignmentAttr(Align.getQuantity());
2097 
2098       // byval disables readnone and readonly.
2099       FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2100         .removeAttribute(llvm::Attribute::ReadNone);
2101       break;
2102     }
2103     case ABIArgInfo::Ignore:
2104     case ABIArgInfo::Expand:
2105     case ABIArgInfo::CoerceAndExpand:
2106       break;
2107 
2108     case ABIArgInfo::InAlloca:
2109       // inalloca disables readnone and readonly.
2110       FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2111           .removeAttribute(llvm::Attribute::ReadNone);
2112       continue;
2113     }
2114 
2115     if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
2116       QualType PTy = RefTy->getPointeeType();
2117       if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2118         Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
2119                                        .getQuantity());
2120       else if (getContext().getTargetAddressSpace(PTy) == 0 &&
2121                !CodeGenOpts.NullPointerIsValid)
2122         Attrs.addAttribute(llvm::Attribute::NonNull);
2123     }
2124 
2125     switch (FI.getExtParameterInfo(ArgNo).getABI()) {
2126     case ParameterABI::Ordinary:
2127       break;
2128 
2129     case ParameterABI::SwiftIndirectResult: {
2130       // Add 'sret' if we haven't already used it for something, but
2131       // only if the result is void.
2132       if (!hasUsedSRet && RetTy->isVoidType()) {
2133         Attrs.addAttribute(llvm::Attribute::StructRet);
2134         hasUsedSRet = true;
2135       }
2136 
2137       // Add 'noalias' in either case.
2138       Attrs.addAttribute(llvm::Attribute::NoAlias);
2139 
2140       // Add 'dereferenceable' and 'alignment'.
2141       auto PTy = ParamType->getPointeeType();
2142       if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2143         auto info = getContext().getTypeInfoInChars(PTy);
2144         Attrs.addDereferenceableAttr(info.first.getQuantity());
2145         Attrs.addAttribute(llvm::Attribute::getWithAlignment(
2146             getLLVMContext(), info.second.getAsAlign()));
2147       }
2148       break;
2149     }
2150 
2151     case ParameterABI::SwiftErrorResult:
2152       Attrs.addAttribute(llvm::Attribute::SwiftError);
2153       break;
2154 
2155     case ParameterABI::SwiftContext:
2156       Attrs.addAttribute(llvm::Attribute::SwiftSelf);
2157       break;
2158     }
2159 
2160     if (FI.getExtParameterInfo(ArgNo).isNoEscape())
2161       Attrs.addAttribute(llvm::Attribute::NoCapture);
2162 
2163     if (Attrs.hasAttributes()) {
2164       unsigned FirstIRArg, NumIRArgs;
2165       std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2166       for (unsigned i = 0; i < NumIRArgs; i++)
2167         ArgAttrs[FirstIRArg + i] =
2168             llvm::AttributeSet::get(getLLVMContext(), Attrs);
2169     }
2170   }
2171   assert(ArgNo == FI.arg_size());
2172 
2173   AttrList = llvm::AttributeList::get(
2174       getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs),
2175       llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs);
2176 }
2177 
2178 /// An argument came in as a promoted argument; demote it back to its
2179 /// declared type.
2180 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
2181                                          const VarDecl *var,
2182                                          llvm::Value *value) {
2183   llvm::Type *varType = CGF.ConvertType(var->getType());
2184 
2185   // This can happen with promotions that actually don't change the
2186   // underlying type, like the enum promotions.
2187   if (value->getType() == varType) return value;
2188 
2189   assert((varType->isIntegerTy() || varType->isFloatingPointTy())
2190          && "unexpected promotion type");
2191 
2192   if (isa<llvm::IntegerType>(varType))
2193     return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
2194 
2195   return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
2196 }
2197 
2198 /// Returns the attribute (either parameter attribute, or function
2199 /// attribute), which declares argument ArgNo to be non-null.
2200 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
2201                                          QualType ArgType, unsigned ArgNo) {
2202   // FIXME: __attribute__((nonnull)) can also be applied to:
2203   //   - references to pointers, where the pointee is known to be
2204   //     nonnull (apparently a Clang extension)
2205   //   - transparent unions containing pointers
2206   // In the former case, LLVM IR cannot represent the constraint. In
2207   // the latter case, we have no guarantee that the transparent union
2208   // is in fact passed as a pointer.
2209   if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
2210     return nullptr;
2211   // First, check attribute on parameter itself.
2212   if (PVD) {
2213     if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
2214       return ParmNNAttr;
2215   }
2216   // Check function attributes.
2217   if (!FD)
2218     return nullptr;
2219   for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
2220     if (NNAttr->isNonNull(ArgNo))
2221       return NNAttr;
2222   }
2223   return nullptr;
2224 }
2225 
2226 namespace {
2227   struct CopyBackSwiftError final : EHScopeStack::Cleanup {
2228     Address Temp;
2229     Address Arg;
2230     CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {}
2231     void Emit(CodeGenFunction &CGF, Flags flags) override {
2232       llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp);
2233       CGF.Builder.CreateStore(errorValue, Arg);
2234     }
2235   };
2236 }
2237 
2238 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
2239                                          llvm::Function *Fn,
2240                                          const FunctionArgList &Args) {
2241   if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
2242     // Naked functions don't have prologues.
2243     return;
2244 
2245   // If this is an implicit-return-zero function, go ahead and
2246   // initialize the return value.  TODO: it might be nice to have
2247   // a more general mechanism for this that didn't require synthesized
2248   // return statements.
2249   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
2250     if (FD->hasImplicitReturnZero()) {
2251       QualType RetTy = FD->getReturnType().getUnqualifiedType();
2252       llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
2253       llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
2254       Builder.CreateStore(Zero, ReturnValue);
2255     }
2256   }
2257 
2258   // FIXME: We no longer need the types from FunctionArgList; lift up and
2259   // simplify.
2260 
2261   ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
2262   // Flattened function arguments.
2263   SmallVector<llvm::Value *, 16> FnArgs;
2264   FnArgs.reserve(IRFunctionArgs.totalIRArgs());
2265   for (auto &Arg : Fn->args()) {
2266     FnArgs.push_back(&Arg);
2267   }
2268   assert(FnArgs.size() == IRFunctionArgs.totalIRArgs());
2269 
2270   // If we're using inalloca, all the memory arguments are GEPs off of the last
2271   // parameter, which is a pointer to the complete memory area.
2272   Address ArgStruct = Address::invalid();
2273   if (IRFunctionArgs.hasInallocaArg()) {
2274     ArgStruct = Address(FnArgs[IRFunctionArgs.getInallocaArgNo()],
2275                         FI.getArgStructAlignment());
2276 
2277     assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo());
2278   }
2279 
2280   // Name the struct return parameter.
2281   if (IRFunctionArgs.hasSRetArg()) {
2282     auto AI = cast<llvm::Argument>(FnArgs[IRFunctionArgs.getSRetArgNo()]);
2283     AI->setName("agg.result");
2284     AI->addAttr(llvm::Attribute::NoAlias);
2285   }
2286 
2287   // Track if we received the parameter as a pointer (indirect, byval, or
2288   // inalloca).  If already have a pointer, EmitParmDecl doesn't need to copy it
2289   // into a local alloca for us.
2290   SmallVector<ParamValue, 16> ArgVals;
2291   ArgVals.reserve(Args.size());
2292 
2293   // Create a pointer value for every parameter declaration.  This usually
2294   // entails copying one or more LLVM IR arguments into an alloca.  Don't push
2295   // any cleanups or do anything that might unwind.  We do that separately, so
2296   // we can push the cleanups in the correct order for the ABI.
2297   assert(FI.arg_size() == Args.size() &&
2298          "Mismatch between function signature & arguments.");
2299   unsigned ArgNo = 0;
2300   CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
2301   for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
2302        i != e; ++i, ++info_it, ++ArgNo) {
2303     const VarDecl *Arg = *i;
2304     const ABIArgInfo &ArgI = info_it->info;
2305 
2306     bool isPromoted =
2307       isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
2308     // We are converting from ABIArgInfo type to VarDecl type directly, unless
2309     // the parameter is promoted. In this case we convert to
2310     // CGFunctionInfo::ArgInfo type with subsequent argument demotion.
2311     QualType Ty = isPromoted ? info_it->type : Arg->getType();
2312     assert(hasScalarEvaluationKind(Ty) ==
2313            hasScalarEvaluationKind(Arg->getType()));
2314 
2315     unsigned FirstIRArg, NumIRArgs;
2316     std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2317 
2318     switch (ArgI.getKind()) {
2319     case ABIArgInfo::InAlloca: {
2320       assert(NumIRArgs == 0);
2321       auto FieldIndex = ArgI.getInAllocaFieldIndex();
2322       Address V =
2323           Builder.CreateStructGEP(ArgStruct, FieldIndex, Arg->getName());
2324       ArgVals.push_back(ParamValue::forIndirect(V));
2325       break;
2326     }
2327 
2328     case ABIArgInfo::Indirect: {
2329       assert(NumIRArgs == 1);
2330       Address ParamAddr = Address(FnArgs[FirstIRArg], ArgI.getIndirectAlign());
2331 
2332       if (!hasScalarEvaluationKind(Ty)) {
2333         // Aggregates and complex variables are accessed by reference.  All we
2334         // need to do is realign the value, if requested.
2335         Address V = ParamAddr;
2336         if (ArgI.getIndirectRealign()) {
2337           Address AlignedTemp = CreateMemTemp(Ty, "coerce");
2338 
2339           // Copy from the incoming argument pointer to the temporary with the
2340           // appropriate alignment.
2341           //
2342           // FIXME: We should have a common utility for generating an aggregate
2343           // copy.
2344           CharUnits Size = getContext().getTypeSizeInChars(Ty);
2345           auto SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity());
2346           Address Dst = Builder.CreateBitCast(AlignedTemp, Int8PtrTy);
2347           Address Src = Builder.CreateBitCast(ParamAddr, Int8PtrTy);
2348           Builder.CreateMemCpy(Dst, Src, SizeVal, false);
2349           V = AlignedTemp;
2350         }
2351         ArgVals.push_back(ParamValue::forIndirect(V));
2352       } else {
2353         // Load scalar value from indirect argument.
2354         llvm::Value *V =
2355             EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getBeginLoc());
2356 
2357         if (isPromoted)
2358           V = emitArgumentDemotion(*this, Arg, V);
2359         ArgVals.push_back(ParamValue::forDirect(V));
2360       }
2361       break;
2362     }
2363 
2364     case ABIArgInfo::Extend:
2365     case ABIArgInfo::Direct: {
2366 
2367       // If we have the trivial case, handle it with no muss and fuss.
2368       if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
2369           ArgI.getCoerceToType() == ConvertType(Ty) &&
2370           ArgI.getDirectOffset() == 0) {
2371         assert(NumIRArgs == 1);
2372         llvm::Value *V = FnArgs[FirstIRArg];
2373         auto AI = cast<llvm::Argument>(V);
2374 
2375         if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
2376           if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
2377                              PVD->getFunctionScopeIndex()) &&
2378               !CGM.getCodeGenOpts().NullPointerIsValid)
2379             AI->addAttr(llvm::Attribute::NonNull);
2380 
2381           QualType OTy = PVD->getOriginalType();
2382           if (const auto *ArrTy =
2383               getContext().getAsConstantArrayType(OTy)) {
2384             // A C99 array parameter declaration with the static keyword also
2385             // indicates dereferenceability, and if the size is constant we can
2386             // use the dereferenceable attribute (which requires the size in
2387             // bytes).
2388             if (ArrTy->getSizeModifier() == ArrayType::Static) {
2389               QualType ETy = ArrTy->getElementType();
2390               uint64_t ArrSize = ArrTy->getSize().getZExtValue();
2391               if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
2392                   ArrSize) {
2393                 llvm::AttrBuilder Attrs;
2394                 Attrs.addDereferenceableAttr(
2395                   getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize);
2396                 AI->addAttrs(Attrs);
2397               } else if (getContext().getTargetAddressSpace(ETy) == 0 &&
2398                          !CGM.getCodeGenOpts().NullPointerIsValid) {
2399                 AI->addAttr(llvm::Attribute::NonNull);
2400               }
2401             }
2402           } else if (const auto *ArrTy =
2403                      getContext().getAsVariableArrayType(OTy)) {
2404             // For C99 VLAs with the static keyword, we don't know the size so
2405             // we can't use the dereferenceable attribute, but in addrspace(0)
2406             // we know that it must be nonnull.
2407             if (ArrTy->getSizeModifier() == VariableArrayType::Static &&
2408                 !getContext().getTargetAddressSpace(ArrTy->getElementType()) &&
2409                 !CGM.getCodeGenOpts().NullPointerIsValid)
2410               AI->addAttr(llvm::Attribute::NonNull);
2411           }
2412 
2413           const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
2414           if (!AVAttr)
2415             if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
2416               AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
2417           if (AVAttr && !SanOpts.has(SanitizerKind::Alignment)) {
2418             // If alignment-assumption sanitizer is enabled, we do *not* add
2419             // alignment attribute here, but emit normal alignment assumption,
2420             // so the UBSAN check could function.
2421             llvm::Value *AlignmentValue =
2422               EmitScalarExpr(AVAttr->getAlignment());
2423             llvm::ConstantInt *AlignmentCI =
2424               cast<llvm::ConstantInt>(AlignmentValue);
2425             unsigned Alignment = std::min((unsigned)AlignmentCI->getZExtValue(),
2426                                           +llvm::Value::MaximumAlignment);
2427             AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment));
2428           }
2429         }
2430 
2431         if (Arg->getType().isRestrictQualified())
2432           AI->addAttr(llvm::Attribute::NoAlias);
2433 
2434         // LLVM expects swifterror parameters to be used in very restricted
2435         // ways.  Copy the value into a less-restricted temporary.
2436         if (FI.getExtParameterInfo(ArgNo).getABI()
2437               == ParameterABI::SwiftErrorResult) {
2438           QualType pointeeTy = Ty->getPointeeType();
2439           assert(pointeeTy->isPointerType());
2440           Address temp =
2441             CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
2442           Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy));
2443           llvm::Value *incomingErrorValue = Builder.CreateLoad(arg);
2444           Builder.CreateStore(incomingErrorValue, temp);
2445           V = temp.getPointer();
2446 
2447           // Push a cleanup to copy the value back at the end of the function.
2448           // The convention does not guarantee that the value will be written
2449           // back if the function exits with an unwind exception.
2450           EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg);
2451         }
2452 
2453         // Ensure the argument is the correct type.
2454         if (V->getType() != ArgI.getCoerceToType())
2455           V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
2456 
2457         if (isPromoted)
2458           V = emitArgumentDemotion(*this, Arg, V);
2459 
2460         // Because of merging of function types from multiple decls it is
2461         // possible for the type of an argument to not match the corresponding
2462         // type in the function type. Since we are codegening the callee
2463         // in here, add a cast to the argument type.
2464         llvm::Type *LTy = ConvertType(Arg->getType());
2465         if (V->getType() != LTy)
2466           V = Builder.CreateBitCast(V, LTy);
2467 
2468         ArgVals.push_back(ParamValue::forDirect(V));
2469         break;
2470       }
2471 
2472       Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
2473                                      Arg->getName());
2474 
2475       // Pointer to store into.
2476       Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
2477 
2478       // Fast-isel and the optimizer generally like scalar values better than
2479       // FCAs, so we flatten them if this is safe to do for this argument.
2480       llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
2481       if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
2482           STy->getNumElements() > 1) {
2483         uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
2484         llvm::Type *DstTy = Ptr.getElementType();
2485         uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
2486 
2487         Address AddrToStoreInto = Address::invalid();
2488         if (SrcSize <= DstSize) {
2489           AddrToStoreInto = Builder.CreateElementBitCast(Ptr, STy);
2490         } else {
2491           AddrToStoreInto =
2492             CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
2493         }
2494 
2495         assert(STy->getNumElements() == NumIRArgs);
2496         for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2497           auto AI = FnArgs[FirstIRArg + i];
2498           AI->setName(Arg->getName() + ".coerce" + Twine(i));
2499           Address EltPtr = Builder.CreateStructGEP(AddrToStoreInto, i);
2500           Builder.CreateStore(AI, EltPtr);
2501         }
2502 
2503         if (SrcSize > DstSize) {
2504           Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
2505         }
2506 
2507       } else {
2508         // Simple case, just do a coerced store of the argument into the alloca.
2509         assert(NumIRArgs == 1);
2510         auto AI = FnArgs[FirstIRArg];
2511         AI->setName(Arg->getName() + ".coerce");
2512         CreateCoercedStore(AI, Ptr, /*DstIsVolatile=*/false, *this);
2513       }
2514 
2515       // Match to what EmitParmDecl is expecting for this type.
2516       if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
2517         llvm::Value *V =
2518             EmitLoadOfScalar(Alloca, false, Ty, Arg->getBeginLoc());
2519         if (isPromoted)
2520           V = emitArgumentDemotion(*this, Arg, V);
2521         ArgVals.push_back(ParamValue::forDirect(V));
2522       } else {
2523         ArgVals.push_back(ParamValue::forIndirect(Alloca));
2524       }
2525       break;
2526     }
2527 
2528     case ABIArgInfo::CoerceAndExpand: {
2529       // Reconstruct into a temporary.
2530       Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2531       ArgVals.push_back(ParamValue::forIndirect(alloca));
2532 
2533       auto coercionType = ArgI.getCoerceAndExpandType();
2534       alloca = Builder.CreateElementBitCast(alloca, coercionType);
2535 
2536       unsigned argIndex = FirstIRArg;
2537       for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2538         llvm::Type *eltType = coercionType->getElementType(i);
2539         if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType))
2540           continue;
2541 
2542         auto eltAddr = Builder.CreateStructGEP(alloca, i);
2543         auto elt = FnArgs[argIndex++];
2544         Builder.CreateStore(elt, eltAddr);
2545       }
2546       assert(argIndex == FirstIRArg + NumIRArgs);
2547       break;
2548     }
2549 
2550     case ABIArgInfo::Expand: {
2551       // If this structure was expanded into multiple arguments then
2552       // we need to create a temporary and reconstruct it from the
2553       // arguments.
2554       Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2555       LValue LV = MakeAddrLValue(Alloca, Ty);
2556       ArgVals.push_back(ParamValue::forIndirect(Alloca));
2557 
2558       auto FnArgIter = FnArgs.begin() + FirstIRArg;
2559       ExpandTypeFromArgs(Ty, LV, FnArgIter);
2560       assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs);
2561       for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
2562         auto AI = FnArgs[FirstIRArg + i];
2563         AI->setName(Arg->getName() + "." + Twine(i));
2564       }
2565       break;
2566     }
2567 
2568     case ABIArgInfo::Ignore:
2569       assert(NumIRArgs == 0);
2570       // Initialize the local variable appropriately.
2571       if (!hasScalarEvaluationKind(Ty)) {
2572         ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty)));
2573       } else {
2574         llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
2575         ArgVals.push_back(ParamValue::forDirect(U));
2576       }
2577       break;
2578     }
2579   }
2580 
2581   if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2582     for (int I = Args.size() - 1; I >= 0; --I)
2583       EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2584   } else {
2585     for (unsigned I = 0, E = Args.size(); I != E; ++I)
2586       EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2587   }
2588 }
2589 
2590 static void eraseUnusedBitCasts(llvm::Instruction *insn) {
2591   while (insn->use_empty()) {
2592     llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
2593     if (!bitcast) return;
2594 
2595     // This is "safe" because we would have used a ConstantExpr otherwise.
2596     insn = cast<llvm::Instruction>(bitcast->getOperand(0));
2597     bitcast->eraseFromParent();
2598   }
2599 }
2600 
2601 /// Try to emit a fused autorelease of a return result.
2602 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
2603                                                     llvm::Value *result) {
2604   // We must be immediately followed the cast.
2605   llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
2606   if (BB->empty()) return nullptr;
2607   if (&BB->back() != result) return nullptr;
2608 
2609   llvm::Type *resultType = result->getType();
2610 
2611   // result is in a BasicBlock and is therefore an Instruction.
2612   llvm::Instruction *generator = cast<llvm::Instruction>(result);
2613 
2614   SmallVector<llvm::Instruction *, 4> InstsToKill;
2615 
2616   // Look for:
2617   //  %generator = bitcast %type1* %generator2 to %type2*
2618   while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
2619     // We would have emitted this as a constant if the operand weren't
2620     // an Instruction.
2621     generator = cast<llvm::Instruction>(bitcast->getOperand(0));
2622 
2623     // Require the generator to be immediately followed by the cast.
2624     if (generator->getNextNode() != bitcast)
2625       return nullptr;
2626 
2627     InstsToKill.push_back(bitcast);
2628   }
2629 
2630   // Look for:
2631   //   %generator = call i8* @objc_retain(i8* %originalResult)
2632   // or
2633   //   %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
2634   llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
2635   if (!call) return nullptr;
2636 
2637   bool doRetainAutorelease;
2638 
2639   if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints().objc_retain) {
2640     doRetainAutorelease = true;
2641   } else if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints()
2642                                           .objc_retainAutoreleasedReturnValue) {
2643     doRetainAutorelease = false;
2644 
2645     // If we emitted an assembly marker for this call (and the
2646     // ARCEntrypoints field should have been set if so), go looking
2647     // for that call.  If we can't find it, we can't do this
2648     // optimization.  But it should always be the immediately previous
2649     // instruction, unless we needed bitcasts around the call.
2650     if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) {
2651       llvm::Instruction *prev = call->getPrevNode();
2652       assert(prev);
2653       if (isa<llvm::BitCastInst>(prev)) {
2654         prev = prev->getPrevNode();
2655         assert(prev);
2656       }
2657       assert(isa<llvm::CallInst>(prev));
2658       assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
2659                CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker);
2660       InstsToKill.push_back(prev);
2661     }
2662   } else {
2663     return nullptr;
2664   }
2665 
2666   result = call->getArgOperand(0);
2667   InstsToKill.push_back(call);
2668 
2669   // Keep killing bitcasts, for sanity.  Note that we no longer care
2670   // about precise ordering as long as there's exactly one use.
2671   while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
2672     if (!bitcast->hasOneUse()) break;
2673     InstsToKill.push_back(bitcast);
2674     result = bitcast->getOperand(0);
2675   }
2676 
2677   // Delete all the unnecessary instructions, from latest to earliest.
2678   for (auto *I : InstsToKill)
2679     I->eraseFromParent();
2680 
2681   // Do the fused retain/autorelease if we were asked to.
2682   if (doRetainAutorelease)
2683     result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
2684 
2685   // Cast back to the result type.
2686   return CGF.Builder.CreateBitCast(result, resultType);
2687 }
2688 
2689 /// If this is a +1 of the value of an immutable 'self', remove it.
2690 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
2691                                           llvm::Value *result) {
2692   // This is only applicable to a method with an immutable 'self'.
2693   const ObjCMethodDecl *method =
2694     dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
2695   if (!method) return nullptr;
2696   const VarDecl *self = method->getSelfDecl();
2697   if (!self->getType().isConstQualified()) return nullptr;
2698 
2699   // Look for a retain call.
2700   llvm::CallInst *retainCall =
2701     dyn_cast<llvm::CallInst>(result->stripPointerCasts());
2702   if (!retainCall ||
2703       retainCall->getCalledValue() != CGF.CGM.getObjCEntrypoints().objc_retain)
2704     return nullptr;
2705 
2706   // Look for an ordinary load of 'self'.
2707   llvm::Value *retainedValue = retainCall->getArgOperand(0);
2708   llvm::LoadInst *load =
2709     dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
2710   if (!load || load->isAtomic() || load->isVolatile() ||
2711       load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
2712     return nullptr;
2713 
2714   // Okay!  Burn it all down.  This relies for correctness on the
2715   // assumption that the retain is emitted as part of the return and
2716   // that thereafter everything is used "linearly".
2717   llvm::Type *resultType = result->getType();
2718   eraseUnusedBitCasts(cast<llvm::Instruction>(result));
2719   assert(retainCall->use_empty());
2720   retainCall->eraseFromParent();
2721   eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
2722 
2723   return CGF.Builder.CreateBitCast(load, resultType);
2724 }
2725 
2726 /// Emit an ARC autorelease of the result of a function.
2727 ///
2728 /// \return the value to actually return from the function
2729 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
2730                                             llvm::Value *result) {
2731   // If we're returning 'self', kill the initial retain.  This is a
2732   // heuristic attempt to "encourage correctness" in the really unfortunate
2733   // case where we have a return of self during a dealloc and we desperately
2734   // need to avoid the possible autorelease.
2735   if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
2736     return self;
2737 
2738   // At -O0, try to emit a fused retain/autorelease.
2739   if (CGF.shouldUseFusedARCCalls())
2740     if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
2741       return fused;
2742 
2743   return CGF.EmitARCAutoreleaseReturnValue(result);
2744 }
2745 
2746 /// Heuristically search for a dominating store to the return-value slot.
2747 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
2748   // Check if a User is a store which pointerOperand is the ReturnValue.
2749   // We are looking for stores to the ReturnValue, not for stores of the
2750   // ReturnValue to some other location.
2751   auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
2752     auto *SI = dyn_cast<llvm::StoreInst>(U);
2753     if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer())
2754       return nullptr;
2755     // These aren't actually possible for non-coerced returns, and we
2756     // only care about non-coerced returns on this code path.
2757     assert(!SI->isAtomic() && !SI->isVolatile());
2758     return SI;
2759   };
2760   // If there are multiple uses of the return-value slot, just check
2761   // for something immediately preceding the IP.  Sometimes this can
2762   // happen with how we generate implicit-returns; it can also happen
2763   // with noreturn cleanups.
2764   if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
2765     llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2766     if (IP->empty()) return nullptr;
2767     llvm::Instruction *I = &IP->back();
2768 
2769     // Skip lifetime markers
2770     for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(),
2771                                             IE = IP->rend();
2772          II != IE; ++II) {
2773       if (llvm::IntrinsicInst *Intrinsic =
2774               dyn_cast<llvm::IntrinsicInst>(&*II)) {
2775         if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) {
2776           const llvm::Value *CastAddr = Intrinsic->getArgOperand(1);
2777           ++II;
2778           if (II == IE)
2779             break;
2780           if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II))
2781             continue;
2782         }
2783       }
2784       I = &*II;
2785       break;
2786     }
2787 
2788     return GetStoreIfValid(I);
2789   }
2790 
2791   llvm::StoreInst *store =
2792       GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
2793   if (!store) return nullptr;
2794 
2795   // Now do a first-and-dirty dominance check: just walk up the
2796   // single-predecessors chain from the current insertion point.
2797   llvm::BasicBlock *StoreBB = store->getParent();
2798   llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2799   while (IP != StoreBB) {
2800     if (!(IP = IP->getSinglePredecessor()))
2801       return nullptr;
2802   }
2803 
2804   // Okay, the store's basic block dominates the insertion point; we
2805   // can do our thing.
2806   return store;
2807 }
2808 
2809 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
2810                                          bool EmitRetDbgLoc,
2811                                          SourceLocation EndLoc) {
2812   if (FI.isNoReturn()) {
2813     // Noreturn functions don't return.
2814     EmitUnreachable(EndLoc);
2815     return;
2816   }
2817 
2818   if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
2819     // Naked functions don't have epilogues.
2820     Builder.CreateUnreachable();
2821     return;
2822   }
2823 
2824   // Functions with no result always return void.
2825   if (!ReturnValue.isValid()) {
2826     Builder.CreateRetVoid();
2827     return;
2828   }
2829 
2830   llvm::DebugLoc RetDbgLoc;
2831   llvm::Value *RV = nullptr;
2832   QualType RetTy = FI.getReturnType();
2833   const ABIArgInfo &RetAI = FI.getReturnInfo();
2834 
2835   switch (RetAI.getKind()) {
2836   case ABIArgInfo::InAlloca:
2837     // Aggregrates get evaluated directly into the destination.  Sometimes we
2838     // need to return the sret value in a register, though.
2839     assert(hasAggregateEvaluationKind(RetTy));
2840     if (RetAI.getInAllocaSRet()) {
2841       llvm::Function::arg_iterator EI = CurFn->arg_end();
2842       --EI;
2843       llvm::Value *ArgStruct = &*EI;
2844       llvm::Value *SRet = Builder.CreateStructGEP(
2845           nullptr, ArgStruct, RetAI.getInAllocaFieldIndex());
2846       RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret");
2847     }
2848     break;
2849 
2850   case ABIArgInfo::Indirect: {
2851     auto AI = CurFn->arg_begin();
2852     if (RetAI.isSRetAfterThis())
2853       ++AI;
2854     switch (getEvaluationKind(RetTy)) {
2855     case TEK_Complex: {
2856       ComplexPairTy RT =
2857         EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
2858       EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy),
2859                          /*isInit*/ true);
2860       break;
2861     }
2862     case TEK_Aggregate:
2863       // Do nothing; aggregrates get evaluated directly into the destination.
2864       break;
2865     case TEK_Scalar:
2866       EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
2867                         MakeNaturalAlignAddrLValue(&*AI, RetTy),
2868                         /*isInit*/ true);
2869       break;
2870     }
2871     break;
2872   }
2873 
2874   case ABIArgInfo::Extend:
2875   case ABIArgInfo::Direct:
2876     if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
2877         RetAI.getDirectOffset() == 0) {
2878       // The internal return value temp always will have pointer-to-return-type
2879       // type, just do a load.
2880 
2881       // If there is a dominating store to ReturnValue, we can elide
2882       // the load, zap the store, and usually zap the alloca.
2883       if (llvm::StoreInst *SI =
2884               findDominatingStoreToReturnValue(*this)) {
2885         // Reuse the debug location from the store unless there is
2886         // cleanup code to be emitted between the store and return
2887         // instruction.
2888         if (EmitRetDbgLoc && !AutoreleaseResult)
2889           RetDbgLoc = SI->getDebugLoc();
2890         // Get the stored value and nuke the now-dead store.
2891         RV = SI->getValueOperand();
2892         SI->eraseFromParent();
2893 
2894       // Otherwise, we have to do a simple load.
2895       } else {
2896         RV = Builder.CreateLoad(ReturnValue);
2897       }
2898     } else {
2899       // If the value is offset in memory, apply the offset now.
2900       Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
2901 
2902       RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
2903     }
2904 
2905     // In ARC, end functions that return a retainable type with a call
2906     // to objc_autoreleaseReturnValue.
2907     if (AutoreleaseResult) {
2908 #ifndef NDEBUG
2909       // Type::isObjCRetainabletype has to be called on a QualType that hasn't
2910       // been stripped of the typedefs, so we cannot use RetTy here. Get the
2911       // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from
2912       // CurCodeDecl or BlockInfo.
2913       QualType RT;
2914 
2915       if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl))
2916         RT = FD->getReturnType();
2917       else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl))
2918         RT = MD->getReturnType();
2919       else if (isa<BlockDecl>(CurCodeDecl))
2920         RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType();
2921       else
2922         llvm_unreachable("Unexpected function/method type");
2923 
2924       assert(getLangOpts().ObjCAutoRefCount &&
2925              !FI.isReturnsRetained() &&
2926              RT->isObjCRetainableType());
2927 #endif
2928       RV = emitAutoreleaseOfResult(*this, RV);
2929     }
2930 
2931     break;
2932 
2933   case ABIArgInfo::Ignore:
2934     break;
2935 
2936   case ABIArgInfo::CoerceAndExpand: {
2937     auto coercionType = RetAI.getCoerceAndExpandType();
2938 
2939     // Load all of the coerced elements out into results.
2940     llvm::SmallVector<llvm::Value*, 4> results;
2941     Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType);
2942     for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2943       auto coercedEltType = coercionType->getElementType(i);
2944       if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType))
2945         continue;
2946 
2947       auto eltAddr = Builder.CreateStructGEP(addr, i);
2948       auto elt = Builder.CreateLoad(eltAddr);
2949       results.push_back(elt);
2950     }
2951 
2952     // If we have one result, it's the single direct result type.
2953     if (results.size() == 1) {
2954       RV = results[0];
2955 
2956     // Otherwise, we need to make a first-class aggregate.
2957     } else {
2958       // Construct a return type that lacks padding elements.
2959       llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType();
2960 
2961       RV = llvm::UndefValue::get(returnType);
2962       for (unsigned i = 0, e = results.size(); i != e; ++i) {
2963         RV = Builder.CreateInsertValue(RV, results[i], i);
2964       }
2965     }
2966     break;
2967   }
2968 
2969   case ABIArgInfo::Expand:
2970     llvm_unreachable("Invalid ABI kind for return argument");
2971   }
2972 
2973   llvm::Instruction *Ret;
2974   if (RV) {
2975     EmitReturnValueCheck(RV);
2976     Ret = Builder.CreateRet(RV);
2977   } else {
2978     Ret = Builder.CreateRetVoid();
2979   }
2980 
2981   if (RetDbgLoc)
2982     Ret->setDebugLoc(std::move(RetDbgLoc));
2983 }
2984 
2985 void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV) {
2986   // A current decl may not be available when emitting vtable thunks.
2987   if (!CurCodeDecl)
2988     return;
2989 
2990   ReturnsNonNullAttr *RetNNAttr = nullptr;
2991   if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute))
2992     RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>();
2993 
2994   if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
2995     return;
2996 
2997   // Prefer the returns_nonnull attribute if it's present.
2998   SourceLocation AttrLoc;
2999   SanitizerMask CheckKind;
3000   SanitizerHandler Handler;
3001   if (RetNNAttr) {
3002     assert(!requiresReturnValueNullabilityCheck() &&
3003            "Cannot check nullability and the nonnull attribute");
3004     AttrLoc = RetNNAttr->getLocation();
3005     CheckKind = SanitizerKind::ReturnsNonnullAttribute;
3006     Handler = SanitizerHandler::NonnullReturn;
3007   } else {
3008     if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl))
3009       if (auto *TSI = DD->getTypeSourceInfo())
3010         if (auto FTL = TSI->getTypeLoc().castAs<FunctionTypeLoc>())
3011           AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
3012     CheckKind = SanitizerKind::NullabilityReturn;
3013     Handler = SanitizerHandler::NullabilityReturn;
3014   }
3015 
3016   SanitizerScope SanScope(this);
3017 
3018   // Make sure the "return" source location is valid. If we're checking a
3019   // nullability annotation, make sure the preconditions for the check are met.
3020   llvm::BasicBlock *Check = createBasicBlock("nullcheck");
3021   llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck");
3022   llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load");
3023   llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr);
3024   if (requiresReturnValueNullabilityCheck())
3025     CanNullCheck =
3026         Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
3027   Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
3028   EmitBlock(Check);
3029 
3030   // Now do the null check.
3031   llvm::Value *Cond = Builder.CreateIsNotNull(RV);
3032   llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)};
3033   llvm::Value *DynamicData[] = {SLocPtr};
3034   EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData);
3035 
3036   EmitBlock(NoCheck);
3037 
3038 #ifndef NDEBUG
3039   // The return location should not be used after the check has been emitted.
3040   ReturnLocation = Address::invalid();
3041 #endif
3042 }
3043 
3044 static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) {
3045   const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
3046   return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
3047 }
3048 
3049 static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF,
3050                                           QualType Ty) {
3051   // FIXME: Generate IR in one pass, rather than going back and fixing up these
3052   // placeholders.
3053   llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
3054   llvm::Type *IRPtrTy = IRTy->getPointerTo();
3055   llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo());
3056 
3057   // FIXME: When we generate this IR in one pass, we shouldn't need
3058   // this win32-specific alignment hack.
3059   CharUnits Align = CharUnits::fromQuantity(4);
3060   Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align);
3061 
3062   return AggValueSlot::forAddr(Address(Placeholder, Align),
3063                                Ty.getQualifiers(),
3064                                AggValueSlot::IsNotDestructed,
3065                                AggValueSlot::DoesNotNeedGCBarriers,
3066                                AggValueSlot::IsNotAliased,
3067                                AggValueSlot::DoesNotOverlap);
3068 }
3069 
3070 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
3071                                           const VarDecl *param,
3072                                           SourceLocation loc) {
3073   // StartFunction converted the ABI-lowered parameter(s) into a
3074   // local alloca.  We need to turn that into an r-value suitable
3075   // for EmitCall.
3076   Address local = GetAddrOfLocalVar(param);
3077 
3078   QualType type = param->getType();
3079 
3080   if (isInAllocaArgument(CGM.getCXXABI(), type)) {
3081     CGM.ErrorUnsupported(param, "forwarded non-trivially copyable parameter");
3082   }
3083 
3084   // GetAddrOfLocalVar returns a pointer-to-pointer for references,
3085   // but the argument needs to be the original pointer.
3086   if (type->isReferenceType()) {
3087     args.add(RValue::get(Builder.CreateLoad(local)), type);
3088 
3089   // In ARC, move out of consumed arguments so that the release cleanup
3090   // entered by StartFunction doesn't cause an over-release.  This isn't
3091   // optimal -O0 code generation, but it should get cleaned up when
3092   // optimization is enabled.  This also assumes that delegate calls are
3093   // performed exactly once for a set of arguments, but that should be safe.
3094   } else if (getLangOpts().ObjCAutoRefCount &&
3095              param->hasAttr<NSConsumedAttr>() &&
3096              type->isObjCRetainableType()) {
3097     llvm::Value *ptr = Builder.CreateLoad(local);
3098     auto null =
3099       llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
3100     Builder.CreateStore(null, local);
3101     args.add(RValue::get(ptr), type);
3102 
3103   // For the most part, we just need to load the alloca, except that
3104   // aggregate r-values are actually pointers to temporaries.
3105   } else {
3106     args.add(convertTempToRValue(local, type, loc), type);
3107   }
3108 
3109   // Deactivate the cleanup for the callee-destructed param that was pushed.
3110   if (hasAggregateEvaluationKind(type) && !CurFuncIsThunk &&
3111       type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee() &&
3112       param->needsDestruction(getContext())) {
3113     EHScopeStack::stable_iterator cleanup =
3114         CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param));
3115     assert(cleanup.isValid() &&
3116            "cleanup for callee-destructed param not recorded");
3117     // This unreachable is a temporary marker which will be removed later.
3118     llvm::Instruction *isActive = Builder.CreateUnreachable();
3119     args.addArgCleanupDeactivation(cleanup, isActive);
3120   }
3121 }
3122 
3123 static bool isProvablyNull(llvm::Value *addr) {
3124   return isa<llvm::ConstantPointerNull>(addr);
3125 }
3126 
3127 /// Emit the actual writing-back of a writeback.
3128 static void emitWriteback(CodeGenFunction &CGF,
3129                           const CallArgList::Writeback &writeback) {
3130   const LValue &srcLV = writeback.Source;
3131   Address srcAddr = srcLV.getAddress();
3132   assert(!isProvablyNull(srcAddr.getPointer()) &&
3133          "shouldn't have writeback for provably null argument");
3134 
3135   llvm::BasicBlock *contBB = nullptr;
3136 
3137   // If the argument wasn't provably non-null, we need to null check
3138   // before doing the store.
3139   bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3140                                               CGF.CGM.getDataLayout());
3141   if (!provablyNonNull) {
3142     llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
3143     contBB = CGF.createBasicBlock("icr.done");
3144 
3145     llvm::Value *isNull =
3146       CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3147     CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
3148     CGF.EmitBlock(writebackBB);
3149   }
3150 
3151   // Load the value to writeback.
3152   llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
3153 
3154   // Cast it back, in case we're writing an id to a Foo* or something.
3155   value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(),
3156                                     "icr.writeback-cast");
3157 
3158   // Perform the writeback.
3159 
3160   // If we have a "to use" value, it's something we need to emit a use
3161   // of.  This has to be carefully threaded in: if it's done after the
3162   // release it's potentially undefined behavior (and the optimizer
3163   // will ignore it), and if it happens before the retain then the
3164   // optimizer could move the release there.
3165   if (writeback.ToUse) {
3166     assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
3167 
3168     // Retain the new value.  No need to block-copy here:  the block's
3169     // being passed up the stack.
3170     value = CGF.EmitARCRetainNonBlock(value);
3171 
3172     // Emit the intrinsic use here.
3173     CGF.EmitARCIntrinsicUse(writeback.ToUse);
3174 
3175     // Load the old value (primitively).
3176     llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
3177 
3178     // Put the new value in place (primitively).
3179     CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
3180 
3181     // Release the old value.
3182     CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
3183 
3184   // Otherwise, we can just do a normal lvalue store.
3185   } else {
3186     CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
3187   }
3188 
3189   // Jump to the continuation block.
3190   if (!provablyNonNull)
3191     CGF.EmitBlock(contBB);
3192 }
3193 
3194 static void emitWritebacks(CodeGenFunction &CGF,
3195                            const CallArgList &args) {
3196   for (const auto &I : args.writebacks())
3197     emitWriteback(CGF, I);
3198 }
3199 
3200 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
3201                                             const CallArgList &CallArgs) {
3202   ArrayRef<CallArgList::CallArgCleanup> Cleanups =
3203     CallArgs.getCleanupsToDeactivate();
3204   // Iterate in reverse to increase the likelihood of popping the cleanup.
3205   for (const auto &I : llvm::reverse(Cleanups)) {
3206     CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP);
3207     I.IsActiveIP->eraseFromParent();
3208   }
3209 }
3210 
3211 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
3212   if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
3213     if (uop->getOpcode() == UO_AddrOf)
3214       return uop->getSubExpr();
3215   return nullptr;
3216 }
3217 
3218 /// Emit an argument that's being passed call-by-writeback.  That is,
3219 /// we are passing the address of an __autoreleased temporary; it
3220 /// might be copy-initialized with the current value of the given
3221 /// address, but it will definitely be copied out of after the call.
3222 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
3223                              const ObjCIndirectCopyRestoreExpr *CRE) {
3224   LValue srcLV;
3225 
3226   // Make an optimistic effort to emit the address as an l-value.
3227   // This can fail if the argument expression is more complicated.
3228   if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
3229     srcLV = CGF.EmitLValue(lvExpr);
3230 
3231   // Otherwise, just emit it as a scalar.
3232   } else {
3233     Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
3234 
3235     QualType srcAddrType =
3236       CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
3237     srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
3238   }
3239   Address srcAddr = srcLV.getAddress();
3240 
3241   // The dest and src types don't necessarily match in LLVM terms
3242   // because of the crazy ObjC compatibility rules.
3243 
3244   llvm::PointerType *destType =
3245     cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
3246 
3247   // If the address is a constant null, just pass the appropriate null.
3248   if (isProvablyNull(srcAddr.getPointer())) {
3249     args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
3250              CRE->getType());
3251     return;
3252   }
3253 
3254   // Create the temporary.
3255   Address temp = CGF.CreateTempAlloca(destType->getElementType(),
3256                                       CGF.getPointerAlign(),
3257                                       "icr.temp");
3258   // Loading an l-value can introduce a cleanup if the l-value is __weak,
3259   // and that cleanup will be conditional if we can't prove that the l-value
3260   // isn't null, so we need to register a dominating point so that the cleanups
3261   // system will make valid IR.
3262   CodeGenFunction::ConditionalEvaluation condEval(CGF);
3263 
3264   // Zero-initialize it if we're not doing a copy-initialization.
3265   bool shouldCopy = CRE->shouldCopy();
3266   if (!shouldCopy) {
3267     llvm::Value *null =
3268       llvm::ConstantPointerNull::get(
3269         cast<llvm::PointerType>(destType->getElementType()));
3270     CGF.Builder.CreateStore(null, temp);
3271   }
3272 
3273   llvm::BasicBlock *contBB = nullptr;
3274   llvm::BasicBlock *originBB = nullptr;
3275 
3276   // If the address is *not* known to be non-null, we need to switch.
3277   llvm::Value *finalArgument;
3278 
3279   bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3280                                               CGF.CGM.getDataLayout());
3281   if (provablyNonNull) {
3282     finalArgument = temp.getPointer();
3283   } else {
3284     llvm::Value *isNull =
3285       CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3286 
3287     finalArgument = CGF.Builder.CreateSelect(isNull,
3288                                    llvm::ConstantPointerNull::get(destType),
3289                                              temp.getPointer(), "icr.argument");
3290 
3291     // If we need to copy, then the load has to be conditional, which
3292     // means we need control flow.
3293     if (shouldCopy) {
3294       originBB = CGF.Builder.GetInsertBlock();
3295       contBB = CGF.createBasicBlock("icr.cont");
3296       llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
3297       CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
3298       CGF.EmitBlock(copyBB);
3299       condEval.begin(CGF);
3300     }
3301   }
3302 
3303   llvm::Value *valueToUse = nullptr;
3304 
3305   // Perform a copy if necessary.
3306   if (shouldCopy) {
3307     RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
3308     assert(srcRV.isScalar());
3309 
3310     llvm::Value *src = srcRV.getScalarVal();
3311     src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
3312                                     "icr.cast");
3313 
3314     // Use an ordinary store, not a store-to-lvalue.
3315     CGF.Builder.CreateStore(src, temp);
3316 
3317     // If optimization is enabled, and the value was held in a
3318     // __strong variable, we need to tell the optimizer that this
3319     // value has to stay alive until we're doing the store back.
3320     // This is because the temporary is effectively unretained,
3321     // and so otherwise we can violate the high-level semantics.
3322     if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3323         srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) {
3324       valueToUse = src;
3325     }
3326   }
3327 
3328   // Finish the control flow if we needed it.
3329   if (shouldCopy && !provablyNonNull) {
3330     llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
3331     CGF.EmitBlock(contBB);
3332 
3333     // Make a phi for the value to intrinsically use.
3334     if (valueToUse) {
3335       llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
3336                                                       "icr.to-use");
3337       phiToUse->addIncoming(valueToUse, copyBB);
3338       phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
3339                             originBB);
3340       valueToUse = phiToUse;
3341     }
3342 
3343     condEval.end(CGF);
3344   }
3345 
3346   args.addWriteback(srcLV, temp, valueToUse);
3347   args.add(RValue::get(finalArgument), CRE->getType());
3348 }
3349 
3350 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
3351   assert(!StackBase);
3352 
3353   // Save the stack.
3354   llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
3355   StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
3356 }
3357 
3358 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
3359   if (StackBase) {
3360     // Restore the stack after the call.
3361     llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
3362     CGF.Builder.CreateCall(F, StackBase);
3363   }
3364 }
3365 
3366 void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
3367                                           SourceLocation ArgLoc,
3368                                           AbstractCallee AC,
3369                                           unsigned ParmNum) {
3370   if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) ||
3371                          SanOpts.has(SanitizerKind::NullabilityArg)))
3372     return;
3373 
3374   // The param decl may be missing in a variadic function.
3375   auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr;
3376   unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
3377 
3378   // Prefer the nonnull attribute if it's present.
3379   const NonNullAttr *NNAttr = nullptr;
3380   if (SanOpts.has(SanitizerKind::NonnullAttribute))
3381     NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo);
3382 
3383   bool CanCheckNullability = false;
3384   if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) {
3385     auto Nullability = PVD->getType()->getNullability(getContext());
3386     CanCheckNullability = Nullability &&
3387                           *Nullability == NullabilityKind::NonNull &&
3388                           PVD->getTypeSourceInfo();
3389   }
3390 
3391   if (!NNAttr && !CanCheckNullability)
3392     return;
3393 
3394   SourceLocation AttrLoc;
3395   SanitizerMask CheckKind;
3396   SanitizerHandler Handler;
3397   if (NNAttr) {
3398     AttrLoc = NNAttr->getLocation();
3399     CheckKind = SanitizerKind::NonnullAttribute;
3400     Handler = SanitizerHandler::NonnullArg;
3401   } else {
3402     AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
3403     CheckKind = SanitizerKind::NullabilityArg;
3404     Handler = SanitizerHandler::NullabilityArg;
3405   }
3406 
3407   SanitizerScope SanScope(this);
3408   assert(RV.isScalar());
3409   llvm::Value *V = RV.getScalarVal();
3410   llvm::Value *Cond =
3411       Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
3412   llvm::Constant *StaticData[] = {
3413       EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc),
3414       llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
3415   };
3416   EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None);
3417 }
3418 
3419 void CodeGenFunction::EmitCallArgs(
3420     CallArgList &Args, ArrayRef<QualType> ArgTypes,
3421     llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
3422     AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) {
3423   assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
3424 
3425   // We *have* to evaluate arguments from right to left in the MS C++ ABI,
3426   // because arguments are destroyed left to right in the callee. As a special
3427   // case, there are certain language constructs that require left-to-right
3428   // evaluation, and in those cases we consider the evaluation order requirement
3429   // to trump the "destruction order is reverse construction order" guarantee.
3430   bool LeftToRight =
3431       CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()
3432           ? Order == EvaluationOrder::ForceLeftToRight
3433           : Order != EvaluationOrder::ForceRightToLeft;
3434 
3435   auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg,
3436                                          RValue EmittedArg) {
3437     if (!AC.hasFunctionDecl() || I >= AC.getNumParams())
3438       return;
3439     auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>();
3440     if (PS == nullptr)
3441       return;
3442 
3443     const auto &Context = getContext();
3444     auto SizeTy = Context.getSizeType();
3445     auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
3446     assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?");
3447     llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T,
3448                                                      EmittedArg.getScalarVal(),
3449                                                      PS->isDynamic());
3450     Args.add(RValue::get(V), SizeTy);
3451     // If we're emitting args in reverse, be sure to do so with
3452     // pass_object_size, as well.
3453     if (!LeftToRight)
3454       std::swap(Args.back(), *(&Args.back() - 1));
3455   };
3456 
3457   // Insert a stack save if we're going to need any inalloca args.
3458   bool HasInAllocaArgs = false;
3459   if (CGM.getTarget().getCXXABI().isMicrosoft()) {
3460     for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end();
3461          I != E && !HasInAllocaArgs; ++I)
3462       HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I);
3463     if (HasInAllocaArgs) {
3464       assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3465       Args.allocateArgumentMemory(*this);
3466     }
3467   }
3468 
3469   // Evaluate each argument in the appropriate order.
3470   size_t CallArgsStart = Args.size();
3471   for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
3472     unsigned Idx = LeftToRight ? I : E - I - 1;
3473     CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx;
3474     unsigned InitialArgSize = Args.size();
3475     // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of
3476     // the argument and parameter match or the objc method is parameterized.
3477     assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) ||
3478             getContext().hasSameUnqualifiedType((*Arg)->getType(),
3479                                                 ArgTypes[Idx]) ||
3480             (isa<ObjCMethodDecl>(AC.getDecl()) &&
3481              isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) &&
3482            "Argument and parameter types don't match");
3483     EmitCallArg(Args, *Arg, ArgTypes[Idx]);
3484     // In particular, we depend on it being the last arg in Args, and the
3485     // objectsize bits depend on there only being one arg if !LeftToRight.
3486     assert(InitialArgSize + 1 == Args.size() &&
3487            "The code below depends on only adding one arg per EmitCallArg");
3488     (void)InitialArgSize;
3489     // Since pointer argument are never emitted as LValue, it is safe to emit
3490     // non-null argument check for r-value only.
3491     if (!Args.back().hasLValue()) {
3492       RValue RVArg = Args.back().getKnownRValue();
3493       EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC,
3494                           ParamsToSkip + Idx);
3495       // @llvm.objectsize should never have side-effects and shouldn't need
3496       // destruction/cleanups, so we can safely "emit" it after its arg,
3497       // regardless of right-to-leftness
3498       MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
3499     }
3500   }
3501 
3502   if (!LeftToRight) {
3503     // Un-reverse the arguments we just evaluated so they match up with the LLVM
3504     // IR function.
3505     std::reverse(Args.begin() + CallArgsStart, Args.end());
3506   }
3507 }
3508 
3509 namespace {
3510 
3511 struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
3512   DestroyUnpassedArg(Address Addr, QualType Ty)
3513       : Addr(Addr), Ty(Ty) {}
3514 
3515   Address Addr;
3516   QualType Ty;
3517 
3518   void Emit(CodeGenFunction &CGF, Flags flags) override {
3519     QualType::DestructionKind DtorKind = Ty.isDestructedType();
3520     if (DtorKind == QualType::DK_cxx_destructor) {
3521       const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
3522       assert(!Dtor->isTrivial());
3523       CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
3524                                 /*Delegating=*/false, Addr, Ty);
3525     } else {
3526       CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty));
3527     }
3528   }
3529 };
3530 
3531 struct DisableDebugLocationUpdates {
3532   CodeGenFunction &CGF;
3533   bool disabledDebugInfo;
3534   DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
3535     if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
3536       CGF.disableDebugInfo();
3537   }
3538   ~DisableDebugLocationUpdates() {
3539     if (disabledDebugInfo)
3540       CGF.enableDebugInfo();
3541   }
3542 };
3543 
3544 } // end anonymous namespace
3545 
3546 RValue CallArg::getRValue(CodeGenFunction &CGF) const {
3547   if (!HasLV)
3548     return RV;
3549   LValue Copy = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty), Ty);
3550   CGF.EmitAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap,
3551                         LV.isVolatile());
3552   IsUsed = true;
3553   return RValue::getAggregate(Copy.getAddress());
3554 }
3555 
3556 void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const {
3557   LValue Dst = CGF.MakeAddrLValue(Addr, Ty);
3558   if (!HasLV && RV.isScalar())
3559     CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*isInit=*/true);
3560   else if (!HasLV && RV.isComplex())
3561     CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true);
3562   else {
3563     auto Addr = HasLV ? LV.getAddress() : RV.getAggregateAddress();
3564     LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty);
3565     // We assume that call args are never copied into subobjects.
3566     CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap,
3567                           HasLV ? LV.isVolatileQualified()
3568                                 : RV.isVolatileQualified());
3569   }
3570   IsUsed = true;
3571 }
3572 
3573 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
3574                                   QualType type) {
3575   DisableDebugLocationUpdates Dis(*this, E);
3576   if (const ObjCIndirectCopyRestoreExpr *CRE
3577         = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
3578     assert(getLangOpts().ObjCAutoRefCount);
3579     return emitWritebackArg(*this, args, CRE);
3580   }
3581 
3582   assert(type->isReferenceType() == E->isGLValue() &&
3583          "reference binding to unmaterialized r-value!");
3584 
3585   if (E->isGLValue()) {
3586     assert(E->getObjectKind() == OK_Ordinary);
3587     return args.add(EmitReferenceBindingToExpr(E), type);
3588   }
3589 
3590   bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
3591 
3592   // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
3593   // However, we still have to push an EH-only cleanup in case we unwind before
3594   // we make it to the call.
3595   if (HasAggregateEvalKind &&
3596       type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
3597     // If we're using inalloca, use the argument memory.  Otherwise, use a
3598     // temporary.
3599     AggValueSlot Slot;
3600     if (args.isUsingInAlloca())
3601       Slot = createPlaceholderSlot(*this, type);
3602     else
3603       Slot = CreateAggTemp(type, "agg.tmp");
3604 
3605     bool DestroyedInCallee = true, NeedsEHCleanup = true;
3606     if (const auto *RD = type->getAsCXXRecordDecl())
3607       DestroyedInCallee = RD->hasNonTrivialDestructor();
3608     else
3609       NeedsEHCleanup = needsEHCleanup(type.isDestructedType());
3610 
3611     if (DestroyedInCallee)
3612       Slot.setExternallyDestructed();
3613 
3614     EmitAggExpr(E, Slot);
3615     RValue RV = Slot.asRValue();
3616     args.add(RV, type);
3617 
3618     if (DestroyedInCallee && NeedsEHCleanup) {
3619       // Create a no-op GEP between the placeholder and the cleanup so we can
3620       // RAUW it successfully.  It also serves as a marker of the first
3621       // instruction where the cleanup is active.
3622       pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
3623                                               type);
3624       // This unreachable is a temporary marker which will be removed later.
3625       llvm::Instruction *IsActive = Builder.CreateUnreachable();
3626       args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
3627     }
3628     return;
3629   }
3630 
3631   if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
3632       cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
3633     LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
3634     assert(L.isSimple());
3635     args.addUncopiedAggregate(L, type);
3636     return;
3637   }
3638 
3639   args.add(EmitAnyExprToTemp(E), type);
3640 }
3641 
3642 QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
3643   // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
3644   // implicitly widens null pointer constants that are arguments to varargs
3645   // functions to pointer-sized ints.
3646   if (!getTarget().getTriple().isOSWindows())
3647     return Arg->getType();
3648 
3649   if (Arg->getType()->isIntegerType() &&
3650       getContext().getTypeSize(Arg->getType()) <
3651           getContext().getTargetInfo().getPointerWidth(0) &&
3652       Arg->isNullPointerConstant(getContext(),
3653                                  Expr::NPC_ValueDependentIsNotNull)) {
3654     return getContext().getIntPtrType();
3655   }
3656 
3657   return Arg->getType();
3658 }
3659 
3660 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3661 // optimizer it can aggressively ignore unwind edges.
3662 void
3663 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
3664   if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3665       !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
3666     Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
3667                       CGM.getNoObjCARCExceptionsMetadata());
3668 }
3669 
3670 /// Emits a call to the given no-arguments nounwind runtime function.
3671 llvm::CallInst *
3672 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
3673                                          const llvm::Twine &name) {
3674   return EmitNounwindRuntimeCall(callee, None, name);
3675 }
3676 
3677 /// Emits a call to the given nounwind runtime function.
3678 llvm::CallInst *
3679 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
3680                                          ArrayRef<llvm::Value *> args,
3681                                          const llvm::Twine &name) {
3682   llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
3683   call->setDoesNotThrow();
3684   return call;
3685 }
3686 
3687 /// Emits a simple call (never an invoke) to the given no-arguments
3688 /// runtime function.
3689 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
3690                                                  const llvm::Twine &name) {
3691   return EmitRuntimeCall(callee, None, name);
3692 }
3693 
3694 // Calls which may throw must have operand bundles indicating which funclet
3695 // they are nested within.
3696 SmallVector<llvm::OperandBundleDef, 1>
3697 CodeGenFunction::getBundlesForFunclet(llvm::Value *Callee) {
3698   SmallVector<llvm::OperandBundleDef, 1> BundleList;
3699   // There is no need for a funclet operand bundle if we aren't inside a
3700   // funclet.
3701   if (!CurrentFuncletPad)
3702     return BundleList;
3703 
3704   // Skip intrinsics which cannot throw.
3705   auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts());
3706   if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow())
3707     return BundleList;
3708 
3709   BundleList.emplace_back("funclet", CurrentFuncletPad);
3710   return BundleList;
3711 }
3712 
3713 /// Emits a simple call (never an invoke) to the given runtime function.
3714 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
3715                                                  ArrayRef<llvm::Value *> args,
3716                                                  const llvm::Twine &name) {
3717   llvm::CallInst *call = Builder.CreateCall(
3718       callee, args, getBundlesForFunclet(callee.getCallee()), name);
3719   call->setCallingConv(getRuntimeCC());
3720   return call;
3721 }
3722 
3723 /// Emits a call or invoke to the given noreturn runtime function.
3724 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(
3725     llvm::FunctionCallee callee, ArrayRef<llvm::Value *> args) {
3726   SmallVector<llvm::OperandBundleDef, 1> BundleList =
3727       getBundlesForFunclet(callee.getCallee());
3728 
3729   if (getInvokeDest()) {
3730     llvm::InvokeInst *invoke =
3731       Builder.CreateInvoke(callee,
3732                            getUnreachableBlock(),
3733                            getInvokeDest(),
3734                            args,
3735                            BundleList);
3736     invoke->setDoesNotReturn();
3737     invoke->setCallingConv(getRuntimeCC());
3738   } else {
3739     llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList);
3740     call->setDoesNotReturn();
3741     call->setCallingConv(getRuntimeCC());
3742     Builder.CreateUnreachable();
3743   }
3744 }
3745 
3746 /// Emits a call or invoke instruction to the given nullary runtime function.
3747 llvm::CallBase *
3748 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
3749                                          const Twine &name) {
3750   return EmitRuntimeCallOrInvoke(callee, None, name);
3751 }
3752 
3753 /// Emits a call or invoke instruction to the given runtime function.
3754 llvm::CallBase *
3755 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
3756                                          ArrayRef<llvm::Value *> args,
3757                                          const Twine &name) {
3758   llvm::CallBase *call = EmitCallOrInvoke(callee, args, name);
3759   call->setCallingConv(getRuntimeCC());
3760   return call;
3761 }
3762 
3763 /// Emits a call or invoke instruction to the given function, depending
3764 /// on the current state of the EH stack.
3765 llvm::CallBase *CodeGenFunction::EmitCallOrInvoke(llvm::FunctionCallee Callee,
3766                                                   ArrayRef<llvm::Value *> Args,
3767                                                   const Twine &Name) {
3768   llvm::BasicBlock *InvokeDest = getInvokeDest();
3769   SmallVector<llvm::OperandBundleDef, 1> BundleList =
3770       getBundlesForFunclet(Callee.getCallee());
3771 
3772   llvm::CallBase *Inst;
3773   if (!InvokeDest)
3774     Inst = Builder.CreateCall(Callee, Args, BundleList, Name);
3775   else {
3776     llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
3777     Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
3778                                 Name);
3779     EmitBlock(ContBB);
3780   }
3781 
3782   // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3783   // optimizer it can aggressively ignore unwind edges.
3784   if (CGM.getLangOpts().ObjCAutoRefCount)
3785     AddObjCARCExceptionMetadata(Inst);
3786 
3787   return Inst;
3788 }
3789 
3790 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
3791                                                   llvm::Value *New) {
3792   DeferredReplacements.push_back(std::make_pair(Old, New));
3793 }
3794 
3795 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
3796                                  const CGCallee &Callee,
3797                                  ReturnValueSlot ReturnValue,
3798                                  const CallArgList &CallArgs,
3799                                  llvm::CallBase **callOrInvoke,
3800                                  SourceLocation Loc) {
3801   // FIXME: We no longer need the types from CallArgs; lift up and simplify.
3802 
3803   assert(Callee.isOrdinary() || Callee.isVirtual());
3804 
3805   // Handle struct-return functions by passing a pointer to the
3806   // location that we would like to return into.
3807   QualType RetTy = CallInfo.getReturnType();
3808   const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
3809 
3810   llvm::FunctionType *IRFuncTy = getTypes().GetFunctionType(CallInfo);
3811 
3812   const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
3813   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl))
3814     // We can only guarantee that a function is called from the correct
3815     // context/function based on the appropriate target attributes,
3816     // so only check in the case where we have both always_inline and target
3817     // since otherwise we could be making a conditional call after a check for
3818     // the proper cpu features (and it won't cause code generation issues due to
3819     // function based code generation).
3820     if (TargetDecl->hasAttr<AlwaysInlineAttr>() &&
3821         TargetDecl->hasAttr<TargetAttr>())
3822       checkTargetFeatures(Loc, FD);
3823 
3824 #ifndef NDEBUG
3825   if (!(CallInfo.isVariadic() && CallInfo.getArgStruct())) {
3826     // For an inalloca varargs function, we don't expect CallInfo to match the
3827     // function pointer's type, because the inalloca struct a will have extra
3828     // fields in it for the varargs parameters.  Code later in this function
3829     // bitcasts the function pointer to the type derived from CallInfo.
3830     //
3831     // In other cases, we assert that the types match up (until pointers stop
3832     // having pointee types).
3833     llvm::Type *TypeFromVal;
3834     if (Callee.isVirtual())
3835       TypeFromVal = Callee.getVirtualFunctionType();
3836     else
3837       TypeFromVal =
3838           Callee.getFunctionPointer()->getType()->getPointerElementType();
3839     assert(IRFuncTy == TypeFromVal);
3840   }
3841 #endif
3842 
3843   // 1. Set up the arguments.
3844 
3845   // If we're using inalloca, insert the allocation after the stack save.
3846   // FIXME: Do this earlier rather than hacking it in here!
3847   Address ArgMemory = Address::invalid();
3848   if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
3849     const llvm::DataLayout &DL = CGM.getDataLayout();
3850     llvm::Instruction *IP = CallArgs.getStackBase();
3851     llvm::AllocaInst *AI;
3852     if (IP) {
3853       IP = IP->getNextNode();
3854       AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
3855                                 "argmem", IP);
3856     } else {
3857       AI = CreateTempAlloca(ArgStruct, "argmem");
3858     }
3859     auto Align = CallInfo.getArgStructAlignment();
3860     AI->setAlignment(Align.getAsAlign());
3861     AI->setUsedWithInAlloca(true);
3862     assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
3863     ArgMemory = Address(AI, Align);
3864   }
3865 
3866   ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
3867   SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
3868 
3869   // If the call returns a temporary with struct return, create a temporary
3870   // alloca to hold the result, unless one is given to us.
3871   Address SRetPtr = Address::invalid();
3872   Address SRetAlloca = Address::invalid();
3873   llvm::Value *UnusedReturnSizePtr = nullptr;
3874   if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
3875     if (!ReturnValue.isNull()) {
3876       SRetPtr = ReturnValue.getValue();
3877     } else {
3878       SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca);
3879       if (HaveInsertPoint() && ReturnValue.isUnused()) {
3880         uint64_t size =
3881             CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
3882         UnusedReturnSizePtr = EmitLifetimeStart(size, SRetAlloca.getPointer());
3883       }
3884     }
3885     if (IRFunctionArgs.hasSRetArg()) {
3886       IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
3887     } else if (RetAI.isInAlloca()) {
3888       Address Addr =
3889           Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex());
3890       Builder.CreateStore(SRetPtr.getPointer(), Addr);
3891     }
3892   }
3893 
3894   Address swiftErrorTemp = Address::invalid();
3895   Address swiftErrorArg = Address::invalid();
3896 
3897   // When passing arguments using temporary allocas, we need to add the
3898   // appropriate lifetime markers. This vector keeps track of all the lifetime
3899   // markers that need to be ended right after the call.
3900   SmallVector<CallLifetimeEnd, 2> CallLifetimeEndAfterCall;
3901 
3902   // Translate all of the arguments as necessary to match the IR lowering.
3903   assert(CallInfo.arg_size() == CallArgs.size() &&
3904          "Mismatch between function signature & arguments.");
3905   unsigned ArgNo = 0;
3906   CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
3907   for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
3908        I != E; ++I, ++info_it, ++ArgNo) {
3909     const ABIArgInfo &ArgInfo = info_it->info;
3910 
3911     // Insert a padding argument to ensure proper alignment.
3912     if (IRFunctionArgs.hasPaddingArg(ArgNo))
3913       IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
3914           llvm::UndefValue::get(ArgInfo.getPaddingType());
3915 
3916     unsigned FirstIRArg, NumIRArgs;
3917     std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3918 
3919     switch (ArgInfo.getKind()) {
3920     case ABIArgInfo::InAlloca: {
3921       assert(NumIRArgs == 0);
3922       assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3923       if (I->isAggregate()) {
3924         // Replace the placeholder with the appropriate argument slot GEP.
3925         Address Addr = I->hasLValue()
3926                            ? I->getKnownLValue().getAddress()
3927                            : I->getKnownRValue().getAggregateAddress();
3928         llvm::Instruction *Placeholder =
3929             cast<llvm::Instruction>(Addr.getPointer());
3930         CGBuilderTy::InsertPoint IP = Builder.saveIP();
3931         Builder.SetInsertPoint(Placeholder);
3932         Addr =
3933             Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
3934         Builder.restoreIP(IP);
3935         deferPlaceholderReplacement(Placeholder, Addr.getPointer());
3936       } else {
3937         // Store the RValue into the argument struct.
3938         Address Addr =
3939             Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
3940         unsigned AS = Addr.getType()->getPointerAddressSpace();
3941         llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
3942         // There are some cases where a trivial bitcast is not avoidable.  The
3943         // definition of a type later in a translation unit may change it's type
3944         // from {}* to (%struct.foo*)*.
3945         if (Addr.getType() != MemType)
3946           Addr = Builder.CreateBitCast(Addr, MemType);
3947         I->copyInto(*this, Addr);
3948       }
3949       break;
3950     }
3951 
3952     case ABIArgInfo::Indirect: {
3953       assert(NumIRArgs == 1);
3954       if (!I->isAggregate()) {
3955         // Make a temporary alloca to pass the argument.
3956         Address Addr = CreateMemTempWithoutCast(
3957             I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp");
3958         IRCallArgs[FirstIRArg] = Addr.getPointer();
3959 
3960         I->copyInto(*this, Addr);
3961       } else {
3962         // We want to avoid creating an unnecessary temporary+copy here;
3963         // however, we need one in three cases:
3964         // 1. If the argument is not byval, and we are required to copy the
3965         //    source.  (This case doesn't occur on any common architecture.)
3966         // 2. If the argument is byval, RV is not sufficiently aligned, and
3967         //    we cannot force it to be sufficiently aligned.
3968         // 3. If the argument is byval, but RV is not located in default
3969         //    or alloca address space.
3970         Address Addr = I->hasLValue()
3971                            ? I->getKnownLValue().getAddress()
3972                            : I->getKnownRValue().getAggregateAddress();
3973         llvm::Value *V = Addr.getPointer();
3974         CharUnits Align = ArgInfo.getIndirectAlign();
3975         const llvm::DataLayout *TD = &CGM.getDataLayout();
3976 
3977         assert((FirstIRArg >= IRFuncTy->getNumParams() ||
3978                 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==
3979                     TD->getAllocaAddrSpace()) &&
3980                "indirect argument must be in alloca address space");
3981 
3982         bool NeedCopy = false;
3983 
3984         if (Addr.getAlignment() < Align &&
3985             llvm::getOrEnforceKnownAlignment(V, Align.getQuantity(), *TD) <
3986                 Align.getQuantity()) {
3987           NeedCopy = true;
3988         } else if (I->hasLValue()) {
3989           auto LV = I->getKnownLValue();
3990           auto AS = LV.getAddressSpace();
3991 
3992           if ((!ArgInfo.getIndirectByVal() &&
3993                (LV.getAlignment() >=
3994                 getContext().getTypeAlignInChars(I->Ty)))) {
3995             NeedCopy = true;
3996           }
3997           if (!getLangOpts().OpenCL) {
3998             if ((ArgInfo.getIndirectByVal() &&
3999                 (AS != LangAS::Default &&
4000                  AS != CGM.getASTAllocaAddressSpace()))) {
4001               NeedCopy = true;
4002             }
4003           }
4004           // For OpenCL even if RV is located in default or alloca address space
4005           // we don't want to perform address space cast for it.
4006           else if ((ArgInfo.getIndirectByVal() &&
4007                     Addr.getType()->getAddressSpace() != IRFuncTy->
4008                       getParamType(FirstIRArg)->getPointerAddressSpace())) {
4009             NeedCopy = true;
4010           }
4011         }
4012 
4013         if (NeedCopy) {
4014           // Create an aligned temporary, and copy to it.
4015           Address AI = CreateMemTempWithoutCast(
4016               I->Ty, ArgInfo.getIndirectAlign(), "byval-temp");
4017           IRCallArgs[FirstIRArg] = AI.getPointer();
4018 
4019           // Emit lifetime markers for the temporary alloca.
4020           uint64_t ByvalTempElementSize =
4021               CGM.getDataLayout().getTypeAllocSize(AI.getElementType());
4022           llvm::Value *LifetimeSize =
4023               EmitLifetimeStart(ByvalTempElementSize, AI.getPointer());
4024 
4025           // Add cleanup code to emit the end lifetime marker after the call.
4026           if (LifetimeSize) // In case we disabled lifetime markers.
4027             CallLifetimeEndAfterCall.emplace_back(AI, LifetimeSize);
4028 
4029           // Generate the copy.
4030           I->copyInto(*this, AI);
4031         } else {
4032           // Skip the extra memcpy call.
4033           auto *T = V->getType()->getPointerElementType()->getPointerTo(
4034               CGM.getDataLayout().getAllocaAddrSpace());
4035           IRCallArgs[FirstIRArg] = getTargetHooks().performAddrSpaceCast(
4036               *this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T,
4037               true);
4038         }
4039       }
4040       break;
4041     }
4042 
4043     case ABIArgInfo::Ignore:
4044       assert(NumIRArgs == 0);
4045       break;
4046 
4047     case ABIArgInfo::Extend:
4048     case ABIArgInfo::Direct: {
4049       if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
4050           ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
4051           ArgInfo.getDirectOffset() == 0) {
4052         assert(NumIRArgs == 1);
4053         llvm::Value *V;
4054         if (!I->isAggregate())
4055           V = I->getKnownRValue().getScalarVal();
4056         else
4057           V = Builder.CreateLoad(
4058               I->hasLValue() ? I->getKnownLValue().getAddress()
4059                              : I->getKnownRValue().getAggregateAddress());
4060 
4061         // Implement swifterror by copying into a new swifterror argument.
4062         // We'll write back in the normal path out of the call.
4063         if (CallInfo.getExtParameterInfo(ArgNo).getABI()
4064               == ParameterABI::SwiftErrorResult) {
4065           assert(!swiftErrorTemp.isValid() && "multiple swifterror args");
4066 
4067           QualType pointeeTy = I->Ty->getPointeeType();
4068           swiftErrorArg =
4069             Address(V, getContext().getTypeAlignInChars(pointeeTy));
4070 
4071           swiftErrorTemp =
4072             CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
4073           V = swiftErrorTemp.getPointer();
4074           cast<llvm::AllocaInst>(V)->setSwiftError(true);
4075 
4076           llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg);
4077           Builder.CreateStore(errorValue, swiftErrorTemp);
4078         }
4079 
4080         // We might have to widen integers, but we should never truncate.
4081         if (ArgInfo.getCoerceToType() != V->getType() &&
4082             V->getType()->isIntegerTy())
4083           V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
4084 
4085         // If the argument doesn't match, perform a bitcast to coerce it.  This
4086         // can happen due to trivial type mismatches.
4087         if (FirstIRArg < IRFuncTy->getNumParams() &&
4088             V->getType() != IRFuncTy->getParamType(FirstIRArg))
4089           V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
4090 
4091         IRCallArgs[FirstIRArg] = V;
4092         break;
4093       }
4094 
4095       // FIXME: Avoid the conversion through memory if possible.
4096       Address Src = Address::invalid();
4097       if (!I->isAggregate()) {
4098         Src = CreateMemTemp(I->Ty, "coerce");
4099         I->copyInto(*this, Src);
4100       } else {
4101         Src = I->hasLValue() ? I->getKnownLValue().getAddress()
4102                              : I->getKnownRValue().getAggregateAddress();
4103       }
4104 
4105       // If the value is offset in memory, apply the offset now.
4106       Src = emitAddressAtOffset(*this, Src, ArgInfo);
4107 
4108       // Fast-isel and the optimizer generally like scalar values better than
4109       // FCAs, so we flatten them if this is safe to do for this argument.
4110       llvm::StructType *STy =
4111             dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
4112       if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
4113         llvm::Type *SrcTy = Src.getType()->getElementType();
4114         uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
4115         uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
4116 
4117         // If the source type is smaller than the destination type of the
4118         // coerce-to logic, copy the source value into a temp alloca the size
4119         // of the destination type to allow loading all of it. The bits past
4120         // the source value are left undef.
4121         if (SrcSize < DstSize) {
4122           Address TempAlloca
4123             = CreateTempAlloca(STy, Src.getAlignment(),
4124                                Src.getName() + ".coerce");
4125           Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
4126           Src = TempAlloca;
4127         } else {
4128           Src = Builder.CreateBitCast(Src,
4129                                       STy->getPointerTo(Src.getAddressSpace()));
4130         }
4131 
4132         assert(NumIRArgs == STy->getNumElements());
4133         for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
4134           Address EltPtr = Builder.CreateStructGEP(Src, i);
4135           llvm::Value *LI = Builder.CreateLoad(EltPtr);
4136           IRCallArgs[FirstIRArg + i] = LI;
4137         }
4138       } else {
4139         // In the simple case, just pass the coerced loaded value.
4140         assert(NumIRArgs == 1);
4141         IRCallArgs[FirstIRArg] =
4142           CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
4143       }
4144 
4145       break;
4146     }
4147 
4148     case ABIArgInfo::CoerceAndExpand: {
4149       auto coercionType = ArgInfo.getCoerceAndExpandType();
4150       auto layout = CGM.getDataLayout().getStructLayout(coercionType);
4151 
4152       llvm::Value *tempSize = nullptr;
4153       Address addr = Address::invalid();
4154       Address AllocaAddr = Address::invalid();
4155       if (I->isAggregate()) {
4156         addr = I->hasLValue() ? I->getKnownLValue().getAddress()
4157                               : I->getKnownRValue().getAggregateAddress();
4158 
4159       } else {
4160         RValue RV = I->getKnownRValue();
4161         assert(RV.isScalar()); // complex should always just be direct
4162 
4163         llvm::Type *scalarType = RV.getScalarVal()->getType();
4164         auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType);
4165         auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType);
4166 
4167         // Materialize to a temporary.
4168         addr = CreateTempAlloca(
4169             RV.getScalarVal()->getType(),
4170             CharUnits::fromQuantity(std::max(
4171                 (unsigned)layout->getAlignment().value(), scalarAlign)),
4172             "tmp",
4173             /*ArraySize=*/nullptr, &AllocaAddr);
4174         tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer());
4175 
4176         Builder.CreateStore(RV.getScalarVal(), addr);
4177       }
4178 
4179       addr = Builder.CreateElementBitCast(addr, coercionType);
4180 
4181       unsigned IRArgPos = FirstIRArg;
4182       for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4183         llvm::Type *eltType = coercionType->getElementType(i);
4184         if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4185         Address eltAddr = Builder.CreateStructGEP(addr, i);
4186         llvm::Value *elt = Builder.CreateLoad(eltAddr);
4187         IRCallArgs[IRArgPos++] = elt;
4188       }
4189       assert(IRArgPos == FirstIRArg + NumIRArgs);
4190 
4191       if (tempSize) {
4192         EmitLifetimeEnd(tempSize, AllocaAddr.getPointer());
4193       }
4194 
4195       break;
4196     }
4197 
4198     case ABIArgInfo::Expand:
4199       unsigned IRArgPos = FirstIRArg;
4200       ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
4201       assert(IRArgPos == FirstIRArg + NumIRArgs);
4202       break;
4203     }
4204   }
4205 
4206   const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this);
4207   llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer();
4208 
4209   // If we're using inalloca, set up that argument.
4210   if (ArgMemory.isValid()) {
4211     llvm::Value *Arg = ArgMemory.getPointer();
4212     if (CallInfo.isVariadic()) {
4213       // When passing non-POD arguments by value to variadic functions, we will
4214       // end up with a variadic prototype and an inalloca call site.  In such
4215       // cases, we can't do any parameter mismatch checks.  Give up and bitcast
4216       // the callee.
4217       unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace();
4218       CalleePtr =
4219           Builder.CreateBitCast(CalleePtr, IRFuncTy->getPointerTo(CalleeAS));
4220     } else {
4221       llvm::Type *LastParamTy =
4222           IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
4223       if (Arg->getType() != LastParamTy) {
4224 #ifndef NDEBUG
4225         // Assert that these structs have equivalent element types.
4226         llvm::StructType *FullTy = CallInfo.getArgStruct();
4227         llvm::StructType *DeclaredTy = cast<llvm::StructType>(
4228             cast<llvm::PointerType>(LastParamTy)->getElementType());
4229         assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
4230         for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
4231                                                 DE = DeclaredTy->element_end(),
4232                                                 FI = FullTy->element_begin();
4233              DI != DE; ++DI, ++FI)
4234           assert(*DI == *FI);
4235 #endif
4236         Arg = Builder.CreateBitCast(Arg, LastParamTy);
4237       }
4238     }
4239     assert(IRFunctionArgs.hasInallocaArg());
4240     IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
4241   }
4242 
4243   // 2. Prepare the function pointer.
4244 
4245   // If the callee is a bitcast of a non-variadic function to have a
4246   // variadic function pointer type, check to see if we can remove the
4247   // bitcast.  This comes up with unprototyped functions.
4248   //
4249   // This makes the IR nicer, but more importantly it ensures that we
4250   // can inline the function at -O0 if it is marked always_inline.
4251   auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT,
4252                                    llvm::Value *Ptr) -> llvm::Function * {
4253     if (!CalleeFT->isVarArg())
4254       return nullptr;
4255 
4256     // Get underlying value if it's a bitcast
4257     if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr)) {
4258       if (CE->getOpcode() == llvm::Instruction::BitCast)
4259         Ptr = CE->getOperand(0);
4260     }
4261 
4262     llvm::Function *OrigFn = dyn_cast<llvm::Function>(Ptr);
4263     if (!OrigFn)
4264       return nullptr;
4265 
4266     llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
4267 
4268     // If the original type is variadic, or if any of the component types
4269     // disagree, we cannot remove the cast.
4270     if (OrigFT->isVarArg() ||
4271         OrigFT->getNumParams() != CalleeFT->getNumParams() ||
4272         OrigFT->getReturnType() != CalleeFT->getReturnType())
4273       return nullptr;
4274 
4275     for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
4276       if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
4277         return nullptr;
4278 
4279     return OrigFn;
4280   };
4281 
4282   if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) {
4283     CalleePtr = OrigFn;
4284     IRFuncTy = OrigFn->getFunctionType();
4285   }
4286 
4287   // 3. Perform the actual call.
4288 
4289   // Deactivate any cleanups that we're supposed to do immediately before
4290   // the call.
4291   if (!CallArgs.getCleanupsToDeactivate().empty())
4292     deactivateArgCleanupsBeforeCall(*this, CallArgs);
4293 
4294   // Assert that the arguments we computed match up.  The IR verifier
4295   // will catch this, but this is a common enough source of problems
4296   // during IRGen changes that it's way better for debugging to catch
4297   // it ourselves here.
4298 #ifndef NDEBUG
4299   assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
4300   for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
4301     // Inalloca argument can have different type.
4302     if (IRFunctionArgs.hasInallocaArg() &&
4303         i == IRFunctionArgs.getInallocaArgNo())
4304       continue;
4305     if (i < IRFuncTy->getNumParams())
4306       assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
4307   }
4308 #endif
4309 
4310   // Update the largest vector width if any arguments have vector types.
4311   for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
4312     if (auto *VT = dyn_cast<llvm::VectorType>(IRCallArgs[i]->getType()))
4313       LargestVectorWidth = std::max((uint64_t)LargestVectorWidth,
4314                                    VT->getPrimitiveSizeInBits().getFixedSize());
4315   }
4316 
4317   // Compute the calling convention and attributes.
4318   unsigned CallingConv;
4319   llvm::AttributeList Attrs;
4320   CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo,
4321                              Callee.getAbstractInfo(), Attrs, CallingConv,
4322                              /*AttrOnCallSite=*/true);
4323 
4324   // Apply some call-site-specific attributes.
4325   // TODO: work this into building the attribute set.
4326 
4327   // Apply always_inline to all calls within flatten functions.
4328   // FIXME: should this really take priority over __try, below?
4329   if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
4330       !(TargetDecl && TargetDecl->hasAttr<NoInlineAttr>())) {
4331     Attrs =
4332         Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4333                            llvm::Attribute::AlwaysInline);
4334   }
4335 
4336   // Disable inlining inside SEH __try blocks.
4337   if (isSEHTryScope()) {
4338     Attrs =
4339         Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4340                            llvm::Attribute::NoInline);
4341   }
4342 
4343   // Decide whether to use a call or an invoke.
4344   bool CannotThrow;
4345   if (currentFunctionUsesSEHTry()) {
4346     // SEH cares about asynchronous exceptions, so everything can "throw."
4347     CannotThrow = false;
4348   } else if (isCleanupPadScope() &&
4349              EHPersonality::get(*this).isMSVCXXPersonality()) {
4350     // The MSVC++ personality will implicitly terminate the program if an
4351     // exception is thrown during a cleanup outside of a try/catch.
4352     // We don't need to model anything in IR to get this behavior.
4353     CannotThrow = true;
4354   } else {
4355     // Otherwise, nounwind call sites will never throw.
4356     CannotThrow = Attrs.hasAttribute(llvm::AttributeList::FunctionIndex,
4357                                      llvm::Attribute::NoUnwind);
4358   }
4359 
4360   // If we made a temporary, be sure to clean up after ourselves. Note that we
4361   // can't depend on being inside of an ExprWithCleanups, so we need to manually
4362   // pop this cleanup later on. Being eager about this is OK, since this
4363   // temporary is 'invisible' outside of the callee.
4364   if (UnusedReturnSizePtr)
4365     pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetAlloca,
4366                                          UnusedReturnSizePtr);
4367 
4368   llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
4369 
4370   SmallVector<llvm::OperandBundleDef, 1> BundleList =
4371       getBundlesForFunclet(CalleePtr);
4372 
4373   // Emit the actual call/invoke instruction.
4374   llvm::CallBase *CI;
4375   if (!InvokeDest) {
4376     CI = Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList);
4377   } else {
4378     llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
4379     CI = Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs,
4380                               BundleList);
4381     EmitBlock(Cont);
4382   }
4383   if (callOrInvoke)
4384     *callOrInvoke = CI;
4385 
4386   // Apply the attributes and calling convention.
4387   CI->setAttributes(Attrs);
4388   CI->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
4389 
4390   // Apply various metadata.
4391 
4392   if (!CI->getType()->isVoidTy())
4393     CI->setName("call");
4394 
4395   // Update largest vector width from the return type.
4396   if (auto *VT = dyn_cast<llvm::VectorType>(CI->getType()))
4397     LargestVectorWidth = std::max((uint64_t)LargestVectorWidth,
4398                                   VT->getPrimitiveSizeInBits().getFixedSize());
4399 
4400   // Insert instrumentation or attach profile metadata at indirect call sites.
4401   // For more details, see the comment before the definition of
4402   // IPVK_IndirectCallTarget in InstrProfData.inc.
4403   if (!CI->getCalledFunction())
4404     PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget,
4405                      CI, CalleePtr);
4406 
4407   // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4408   // optimizer it can aggressively ignore unwind edges.
4409   if (CGM.getLangOpts().ObjCAutoRefCount)
4410     AddObjCARCExceptionMetadata(CI);
4411 
4412   // Suppress tail calls if requested.
4413   if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
4414     if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
4415       Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
4416   }
4417 
4418   // Add metadata for calls to MSAllocator functions
4419   if (getDebugInfo() && TargetDecl &&
4420       TargetDecl->hasAttr<MSAllocatorAttr>())
4421     getDebugInfo()->addHeapAllocSiteMetadata(CI, RetTy, Loc);
4422 
4423   // 4. Finish the call.
4424 
4425   // If the call doesn't return, finish the basic block and clear the
4426   // insertion point; this allows the rest of IRGen to discard
4427   // unreachable code.
4428   if (CI->doesNotReturn()) {
4429     if (UnusedReturnSizePtr)
4430       PopCleanupBlock();
4431 
4432     // Strip away the noreturn attribute to better diagnose unreachable UB.
4433     if (SanOpts.has(SanitizerKind::Unreachable)) {
4434       // Also remove from function since CallBase::hasFnAttr additionally checks
4435       // attributes of the called function.
4436       if (auto *F = CI->getCalledFunction())
4437         F->removeFnAttr(llvm::Attribute::NoReturn);
4438       CI->removeAttribute(llvm::AttributeList::FunctionIndex,
4439                           llvm::Attribute::NoReturn);
4440 
4441       // Avoid incompatibility with ASan which relies on the `noreturn`
4442       // attribute to insert handler calls.
4443       if (SanOpts.hasOneOf(SanitizerKind::Address |
4444                            SanitizerKind::KernelAddress)) {
4445         SanitizerScope SanScope(this);
4446         llvm::IRBuilder<>::InsertPointGuard IPGuard(Builder);
4447         Builder.SetInsertPoint(CI);
4448         auto *FnType = llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
4449         llvm::FunctionCallee Fn =
4450             CGM.CreateRuntimeFunction(FnType, "__asan_handle_no_return");
4451         EmitNounwindRuntimeCall(Fn);
4452       }
4453     }
4454 
4455     EmitUnreachable(Loc);
4456     Builder.ClearInsertionPoint();
4457 
4458     // FIXME: For now, emit a dummy basic block because expr emitters in
4459     // generally are not ready to handle emitting expressions at unreachable
4460     // points.
4461     EnsureInsertPoint();
4462 
4463     // Return a reasonable RValue.
4464     return GetUndefRValue(RetTy);
4465   }
4466 
4467   // Perform the swifterror writeback.
4468   if (swiftErrorTemp.isValid()) {
4469     llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp);
4470     Builder.CreateStore(errorResult, swiftErrorArg);
4471   }
4472 
4473   // Emit any call-associated writebacks immediately.  Arguably this
4474   // should happen after any return-value munging.
4475   if (CallArgs.hasWritebacks())
4476     emitWritebacks(*this, CallArgs);
4477 
4478   // The stack cleanup for inalloca arguments has to run out of the normal
4479   // lexical order, so deactivate it and run it manually here.
4480   CallArgs.freeArgumentMemory(*this);
4481 
4482   // Extract the return value.
4483   RValue Ret = [&] {
4484     switch (RetAI.getKind()) {
4485     case ABIArgInfo::CoerceAndExpand: {
4486       auto coercionType = RetAI.getCoerceAndExpandType();
4487 
4488       Address addr = SRetPtr;
4489       addr = Builder.CreateElementBitCast(addr, coercionType);
4490 
4491       assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType());
4492       bool requiresExtract = isa<llvm::StructType>(CI->getType());
4493 
4494       unsigned unpaddedIndex = 0;
4495       for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4496         llvm::Type *eltType = coercionType->getElementType(i);
4497         if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4498         Address eltAddr = Builder.CreateStructGEP(addr, i);
4499         llvm::Value *elt = CI;
4500         if (requiresExtract)
4501           elt = Builder.CreateExtractValue(elt, unpaddedIndex++);
4502         else
4503           assert(unpaddedIndex == 0);
4504         Builder.CreateStore(elt, eltAddr);
4505       }
4506       // FALLTHROUGH
4507       LLVM_FALLTHROUGH;
4508     }
4509 
4510     case ABIArgInfo::InAlloca:
4511     case ABIArgInfo::Indirect: {
4512       RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
4513       if (UnusedReturnSizePtr)
4514         PopCleanupBlock();
4515       return ret;
4516     }
4517 
4518     case ABIArgInfo::Ignore:
4519       // If we are ignoring an argument that had a result, make sure to
4520       // construct the appropriate return value for our caller.
4521       return GetUndefRValue(RetTy);
4522 
4523     case ABIArgInfo::Extend:
4524     case ABIArgInfo::Direct: {
4525       llvm::Type *RetIRTy = ConvertType(RetTy);
4526       if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
4527         switch (getEvaluationKind(RetTy)) {
4528         case TEK_Complex: {
4529           llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
4530           llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
4531           return RValue::getComplex(std::make_pair(Real, Imag));
4532         }
4533         case TEK_Aggregate: {
4534           Address DestPtr = ReturnValue.getValue();
4535           bool DestIsVolatile = ReturnValue.isVolatile();
4536 
4537           if (!DestPtr.isValid()) {
4538             DestPtr = CreateMemTemp(RetTy, "agg.tmp");
4539             DestIsVolatile = false;
4540           }
4541           BuildAggStore(*this, CI, DestPtr, DestIsVolatile);
4542           return RValue::getAggregate(DestPtr);
4543         }
4544         case TEK_Scalar: {
4545           // If the argument doesn't match, perform a bitcast to coerce it.  This
4546           // can happen due to trivial type mismatches.
4547           llvm::Value *V = CI;
4548           if (V->getType() != RetIRTy)
4549             V = Builder.CreateBitCast(V, RetIRTy);
4550           return RValue::get(V);
4551         }
4552         }
4553         llvm_unreachable("bad evaluation kind");
4554       }
4555 
4556       Address DestPtr = ReturnValue.getValue();
4557       bool DestIsVolatile = ReturnValue.isVolatile();
4558 
4559       if (!DestPtr.isValid()) {
4560         DestPtr = CreateMemTemp(RetTy, "coerce");
4561         DestIsVolatile = false;
4562       }
4563 
4564       // If the value is offset in memory, apply the offset now.
4565       Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
4566       CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
4567 
4568       return convertTempToRValue(DestPtr, RetTy, SourceLocation());
4569     }
4570 
4571     case ABIArgInfo::Expand:
4572       llvm_unreachable("Invalid ABI kind for return argument");
4573     }
4574 
4575     llvm_unreachable("Unhandled ABIArgInfo::Kind");
4576   } ();
4577 
4578   // Emit the assume_aligned check on the return value.
4579   if (Ret.isScalar() && TargetDecl) {
4580     if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
4581       llvm::Value *OffsetValue = nullptr;
4582       if (const auto *Offset = AA->getOffset())
4583         OffsetValue = EmitScalarExpr(Offset);
4584 
4585       llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment());
4586       llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment);
4587       EmitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc, AA->getLocation(),
4588                               AlignmentCI, OffsetValue);
4589     } else if (const auto *AA = TargetDecl->getAttr<AllocAlignAttr>()) {
4590       llvm::Value *AlignmentVal = CallArgs[AA->getParamIndex().getLLVMIndex()]
4591                                       .getRValue(*this)
4592                                       .getScalarVal();
4593       EmitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc, AA->getLocation(),
4594                               AlignmentVal);
4595     }
4596   }
4597 
4598   // Explicitly call CallLifetimeEnd::Emit just to re-use the code even though
4599   // we can't use the full cleanup mechanism.
4600   for (CallLifetimeEnd &LifetimeEnd : CallLifetimeEndAfterCall)
4601     LifetimeEnd.Emit(*this, /*Flags=*/{});
4602 
4603   return Ret;
4604 }
4605 
4606 CGCallee CGCallee::prepareConcreteCallee(CodeGenFunction &CGF) const {
4607   if (isVirtual()) {
4608     const CallExpr *CE = getVirtualCallExpr();
4609     return CGF.CGM.getCXXABI().getVirtualFunctionPointer(
4610         CGF, getVirtualMethodDecl(), getThisAddress(), getVirtualFunctionType(),
4611         CE ? CE->getBeginLoc() : SourceLocation());
4612   }
4613 
4614   return *this;
4615 }
4616 
4617 /* VarArg handling */
4618 
4619 Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) {
4620   VAListAddr = VE->isMicrosoftABI()
4621                  ? EmitMSVAListRef(VE->getSubExpr())
4622                  : EmitVAListRef(VE->getSubExpr());
4623   QualType Ty = VE->getType();
4624   if (VE->isMicrosoftABI())
4625     return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty);
4626   return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty);
4627 }
4628