1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // These classes wrap the information about a call or function
10 // definition used to handle ABI compliancy.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CGCall.h"
15 #include "ABIInfo.h"
16 #include "CGBlocks.h"
17 #include "CGCXXABI.h"
18 #include "CGCleanup.h"
19 #include "CodeGenFunction.h"
20 #include "CodeGenModule.h"
21 #include "TargetInfo.h"
22 #include "clang/AST/Decl.h"
23 #include "clang/AST/DeclCXX.h"
24 #include "clang/AST/DeclObjC.h"
25 #include "clang/Basic/CodeGenOptions.h"
26 #include "clang/Basic/TargetBuiltins.h"
27 #include "clang/Basic/TargetInfo.h"
28 #include "clang/CodeGen/CGFunctionInfo.h"
29 #include "clang/CodeGen/SwiftCallingConv.h"
30 #include "llvm/ADT/StringExtras.h"
31 #include "llvm/Transforms/Utils/Local.h"
32 #include "llvm/Analysis/ValueTracking.h"
33 #include "llvm/IR/Attributes.h"
34 #include "llvm/IR/CallingConv.h"
35 #include "llvm/IR/DataLayout.h"
36 #include "llvm/IR/InlineAsm.h"
37 #include "llvm/IR/IntrinsicInst.h"
38 #include "llvm/IR/Intrinsics.h"
39 using namespace clang;
40 using namespace CodeGen;
41 
42 /***/
43 
44 unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
45   switch (CC) {
46   default: return llvm::CallingConv::C;
47   case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
48   case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
49   case CC_X86RegCall: return llvm::CallingConv::X86_RegCall;
50   case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
51   case CC_Win64: return llvm::CallingConv::Win64;
52   case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
53   case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
54   case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
55   case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
56   // TODO: Add support for __pascal to LLVM.
57   case CC_X86Pascal: return llvm::CallingConv::C;
58   // TODO: Add support for __vectorcall to LLVM.
59   case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
60   case CC_AArch64VectorCall: return llvm::CallingConv::AArch64_VectorCall;
61   case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
62   case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv();
63   case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
64   case CC_PreserveAll: return llvm::CallingConv::PreserveAll;
65   case CC_Swift: return llvm::CallingConv::Swift;
66   }
67 }
68 
69 /// Derives the 'this' type for codegen purposes, i.e. ignoring method CVR
70 /// qualification. Either or both of RD and MD may be null. A null RD indicates
71 /// that there is no meaningful 'this' type, and a null MD can occur when
72 /// calling a method pointer.
73 CanQualType CodeGenTypes::DeriveThisType(const CXXRecordDecl *RD,
74                                          const CXXMethodDecl *MD) {
75   QualType RecTy;
76   if (RD)
77     RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
78   else
79     RecTy = Context.VoidTy;
80 
81   if (MD)
82     RecTy = Context.getAddrSpaceQualType(RecTy, MD->getMethodQualifiers().getAddressSpace());
83   return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
84 }
85 
86 /// Returns the canonical formal type of the given C++ method.
87 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
88   return MD->getType()->getCanonicalTypeUnqualified()
89            .getAs<FunctionProtoType>();
90 }
91 
92 /// Returns the "extra-canonicalized" return type, which discards
93 /// qualifiers on the return type.  Codegen doesn't care about them,
94 /// and it makes ABI code a little easier to be able to assume that
95 /// all parameter and return types are top-level unqualified.
96 static CanQualType GetReturnType(QualType RetTy) {
97   return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
98 }
99 
100 /// Arrange the argument and result information for a value of the given
101 /// unprototyped freestanding function type.
102 const CGFunctionInfo &
103 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
104   // When translating an unprototyped function type, always use a
105   // variadic type.
106   return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
107                                  /*instanceMethod=*/false,
108                                  /*chainCall=*/false, None,
109                                  FTNP->getExtInfo(), {}, RequiredArgs(0));
110 }
111 
112 static void addExtParameterInfosForCall(
113          llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
114                                         const FunctionProtoType *proto,
115                                         unsigned prefixArgs,
116                                         unsigned totalArgs) {
117   assert(proto->hasExtParameterInfos());
118   assert(paramInfos.size() <= prefixArgs);
119   assert(proto->getNumParams() + prefixArgs <= totalArgs);
120 
121   paramInfos.reserve(totalArgs);
122 
123   // Add default infos for any prefix args that don't already have infos.
124   paramInfos.resize(prefixArgs);
125 
126   // Add infos for the prototype.
127   for (const auto &ParamInfo : proto->getExtParameterInfos()) {
128     paramInfos.push_back(ParamInfo);
129     // pass_object_size params have no parameter info.
130     if (ParamInfo.hasPassObjectSize())
131       paramInfos.emplace_back();
132   }
133 
134   assert(paramInfos.size() <= totalArgs &&
135          "Did we forget to insert pass_object_size args?");
136   // Add default infos for the variadic and/or suffix arguments.
137   paramInfos.resize(totalArgs);
138 }
139 
140 /// Adds the formal parameters in FPT to the given prefix. If any parameter in
141 /// FPT has pass_object_size attrs, then we'll add parameters for those, too.
142 static void appendParameterTypes(const CodeGenTypes &CGT,
143                                  SmallVectorImpl<CanQualType> &prefix,
144               SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
145                                  CanQual<FunctionProtoType> FPT) {
146   // Fast path: don't touch param info if we don't need to.
147   if (!FPT->hasExtParameterInfos()) {
148     assert(paramInfos.empty() &&
149            "We have paramInfos, but the prototype doesn't?");
150     prefix.append(FPT->param_type_begin(), FPT->param_type_end());
151     return;
152   }
153 
154   unsigned PrefixSize = prefix.size();
155   // In the vast majority of cases, we'll have precisely FPT->getNumParams()
156   // parameters; the only thing that can change this is the presence of
157   // pass_object_size. So, we preallocate for the common case.
158   prefix.reserve(prefix.size() + FPT->getNumParams());
159 
160   auto ExtInfos = FPT->getExtParameterInfos();
161   assert(ExtInfos.size() == FPT->getNumParams());
162   for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
163     prefix.push_back(FPT->getParamType(I));
164     if (ExtInfos[I].hasPassObjectSize())
165       prefix.push_back(CGT.getContext().getSizeType());
166   }
167 
168   addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize,
169                               prefix.size());
170 }
171 
172 /// Arrange the LLVM function layout for a value of the given function
173 /// type, on top of any implicit parameters already stored.
174 static const CGFunctionInfo &
175 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
176                         SmallVectorImpl<CanQualType> &prefix,
177                         CanQual<FunctionProtoType> FTP) {
178   SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
179   RequiredArgs Required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
180   // FIXME: Kill copy.
181   appendParameterTypes(CGT, prefix, paramInfos, FTP);
182   CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
183 
184   return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
185                                      /*chainCall=*/false, prefix,
186                                      FTP->getExtInfo(), paramInfos,
187                                      Required);
188 }
189 
190 /// Arrange the argument and result information for a value of the
191 /// given freestanding function type.
192 const CGFunctionInfo &
193 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) {
194   SmallVector<CanQualType, 16> argTypes;
195   return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
196                                    FTP);
197 }
198 
199 static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
200   // Set the appropriate calling convention for the Function.
201   if (D->hasAttr<StdCallAttr>())
202     return CC_X86StdCall;
203 
204   if (D->hasAttr<FastCallAttr>())
205     return CC_X86FastCall;
206 
207   if (D->hasAttr<RegCallAttr>())
208     return CC_X86RegCall;
209 
210   if (D->hasAttr<ThisCallAttr>())
211     return CC_X86ThisCall;
212 
213   if (D->hasAttr<VectorCallAttr>())
214     return CC_X86VectorCall;
215 
216   if (D->hasAttr<PascalAttr>())
217     return CC_X86Pascal;
218 
219   if (PcsAttr *PCS = D->getAttr<PcsAttr>())
220     return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
221 
222   if (D->hasAttr<AArch64VectorPcsAttr>())
223     return CC_AArch64VectorCall;
224 
225   if (D->hasAttr<IntelOclBiccAttr>())
226     return CC_IntelOclBicc;
227 
228   if (D->hasAttr<MSABIAttr>())
229     return IsWindows ? CC_C : CC_Win64;
230 
231   if (D->hasAttr<SysVABIAttr>())
232     return IsWindows ? CC_X86_64SysV : CC_C;
233 
234   if (D->hasAttr<PreserveMostAttr>())
235     return CC_PreserveMost;
236 
237   if (D->hasAttr<PreserveAllAttr>())
238     return CC_PreserveAll;
239 
240   return CC_C;
241 }
242 
243 /// Arrange the argument and result information for a call to an
244 /// unknown C++ non-static member function of the given abstract type.
245 /// (A null RD means we don't have any meaningful "this" argument type,
246 ///  so fall back to a generic pointer type).
247 /// The member function must be an ordinary function, i.e. not a
248 /// constructor or destructor.
249 const CGFunctionInfo &
250 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
251                                    const FunctionProtoType *FTP,
252                                    const CXXMethodDecl *MD) {
253   SmallVector<CanQualType, 16> argTypes;
254 
255   // Add the 'this' pointer.
256   argTypes.push_back(DeriveThisType(RD, MD));
257 
258   return ::arrangeLLVMFunctionInfo(
259       *this, true, argTypes,
260       FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
261 }
262 
263 /// Set calling convention for CUDA/HIP kernel.
264 static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM,
265                                            const FunctionDecl *FD) {
266   if (FD->hasAttr<CUDAGlobalAttr>()) {
267     const FunctionType *FT = FTy->getAs<FunctionType>();
268     CGM.getTargetCodeGenInfo().setCUDAKernelCallingConvention(FT);
269     FTy = FT->getCanonicalTypeUnqualified();
270   }
271 }
272 
273 /// Arrange the argument and result information for a declaration or
274 /// definition of the given C++ non-static member function.  The
275 /// member function must be an ordinary function, i.e. not a
276 /// constructor or destructor.
277 const CGFunctionInfo &
278 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
279   assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
280   assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
281 
282   CanQualType FT = GetFormalType(MD).getAs<Type>();
283   setCUDAKernelCallingConvention(FT, CGM, MD);
284   auto prototype = FT.getAs<FunctionProtoType>();
285 
286   if (MD->isInstance()) {
287     // The abstract case is perfectly fine.
288     const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
289     return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
290   }
291 
292   return arrangeFreeFunctionType(prototype);
293 }
294 
295 bool CodeGenTypes::inheritingCtorHasParams(
296     const InheritedConstructor &Inherited, CXXCtorType Type) {
297   // Parameters are unnecessary if we're constructing a base class subobject
298   // and the inherited constructor lives in a virtual base.
299   return Type == Ctor_Complete ||
300          !Inherited.getShadowDecl()->constructsVirtualBase() ||
301          !Target.getCXXABI().hasConstructorVariants();
302 }
303 
304 const CGFunctionInfo &
305 CodeGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) {
306   auto *MD = cast<CXXMethodDecl>(GD.getDecl());
307 
308   SmallVector<CanQualType, 16> argTypes;
309   SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
310   argTypes.push_back(DeriveThisType(MD->getParent(), MD));
311 
312   bool PassParams = true;
313 
314   if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
315     // A base class inheriting constructor doesn't get forwarded arguments
316     // needed to construct a virtual base (or base class thereof).
317     if (auto Inherited = CD->getInheritedConstructor())
318       PassParams = inheritingCtorHasParams(Inherited, GD.getCtorType());
319   }
320 
321   CanQual<FunctionProtoType> FTP = GetFormalType(MD);
322 
323   // Add the formal parameters.
324   if (PassParams)
325     appendParameterTypes(*this, argTypes, paramInfos, FTP);
326 
327   CGCXXABI::AddedStructorArgs AddedArgs =
328       TheCXXABI.buildStructorSignature(GD, argTypes);
329   if (!paramInfos.empty()) {
330     // Note: prefix implies after the first param.
331     if (AddedArgs.Prefix)
332       paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix,
333                         FunctionProtoType::ExtParameterInfo{});
334     if (AddedArgs.Suffix)
335       paramInfos.append(AddedArgs.Suffix,
336                         FunctionProtoType::ExtParameterInfo{});
337   }
338 
339   RequiredArgs required =
340       (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size())
341                                       : RequiredArgs::All);
342 
343   FunctionType::ExtInfo extInfo = FTP->getExtInfo();
344   CanQualType resultType = TheCXXABI.HasThisReturn(GD)
345                                ? argTypes.front()
346                                : TheCXXABI.hasMostDerivedReturn(GD)
347                                      ? CGM.getContext().VoidPtrTy
348                                      : Context.VoidTy;
349   return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
350                                  /*chainCall=*/false, argTypes, extInfo,
351                                  paramInfos, required);
352 }
353 
354 static SmallVector<CanQualType, 16>
355 getArgTypesForCall(ASTContext &ctx, const CallArgList &args) {
356   SmallVector<CanQualType, 16> argTypes;
357   for (auto &arg : args)
358     argTypes.push_back(ctx.getCanonicalParamType(arg.Ty));
359   return argTypes;
360 }
361 
362 static SmallVector<CanQualType, 16>
363 getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) {
364   SmallVector<CanQualType, 16> argTypes;
365   for (auto &arg : args)
366     argTypes.push_back(ctx.getCanonicalParamType(arg->getType()));
367   return argTypes;
368 }
369 
370 static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16>
371 getExtParameterInfosForCall(const FunctionProtoType *proto,
372                             unsigned prefixArgs, unsigned totalArgs) {
373   llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result;
374   if (proto->hasExtParameterInfos()) {
375     addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs);
376   }
377   return result;
378 }
379 
380 /// Arrange a call to a C++ method, passing the given arguments.
381 ///
382 /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this`
383 /// parameter.
384 /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of
385 /// args.
386 /// PassProtoArgs indicates whether `args` has args for the parameters in the
387 /// given CXXConstructorDecl.
388 const CGFunctionInfo &
389 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
390                                         const CXXConstructorDecl *D,
391                                         CXXCtorType CtorKind,
392                                         unsigned ExtraPrefixArgs,
393                                         unsigned ExtraSuffixArgs,
394                                         bool PassProtoArgs) {
395   // FIXME: Kill copy.
396   SmallVector<CanQualType, 16> ArgTypes;
397   for (const auto &Arg : args)
398     ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
399 
400   // +1 for implicit this, which should always be args[0].
401   unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
402 
403   CanQual<FunctionProtoType> FPT = GetFormalType(D);
404   RequiredArgs Required = PassProtoArgs
405                               ? RequiredArgs::forPrototypePlus(
406                                     FPT, TotalPrefixArgs + ExtraSuffixArgs)
407                               : RequiredArgs::All;
408 
409   GlobalDecl GD(D, CtorKind);
410   CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
411                                ? ArgTypes.front()
412                                : TheCXXABI.hasMostDerivedReturn(GD)
413                                      ? CGM.getContext().VoidPtrTy
414                                      : Context.VoidTy;
415 
416   FunctionType::ExtInfo Info = FPT->getExtInfo();
417   llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> ParamInfos;
418   // If the prototype args are elided, we should only have ABI-specific args,
419   // which never have param info.
420   if (PassProtoArgs && FPT->hasExtParameterInfos()) {
421     // ABI-specific suffix arguments are treated the same as variadic arguments.
422     addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs,
423                                 ArgTypes.size());
424   }
425   return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
426                                  /*chainCall=*/false, ArgTypes, Info,
427                                  ParamInfos, Required);
428 }
429 
430 /// Arrange the argument and result information for the declaration or
431 /// definition of the given function.
432 const CGFunctionInfo &
433 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
434   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
435     if (MD->isInstance())
436       return arrangeCXXMethodDeclaration(MD);
437 
438   CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
439 
440   assert(isa<FunctionType>(FTy));
441   setCUDAKernelCallingConvention(FTy, CGM, FD);
442 
443   // When declaring a function without a prototype, always use a
444   // non-variadic type.
445   if (CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>()) {
446     return arrangeLLVMFunctionInfo(
447         noProto->getReturnType(), /*instanceMethod=*/false,
448         /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All);
449   }
450 
451   return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>());
452 }
453 
454 /// Arrange the argument and result information for the declaration or
455 /// definition of an Objective-C method.
456 const CGFunctionInfo &
457 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
458   // It happens that this is the same as a call with no optional
459   // arguments, except also using the formal 'self' type.
460   return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
461 }
462 
463 /// Arrange the argument and result information for the function type
464 /// through which to perform a send to the given Objective-C method,
465 /// using the given receiver type.  The receiver type is not always
466 /// the 'self' type of the method or even an Objective-C pointer type.
467 /// This is *not* the right method for actually performing such a
468 /// message send, due to the possibility of optional arguments.
469 const CGFunctionInfo &
470 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
471                                               QualType receiverType) {
472   SmallVector<CanQualType, 16> argTys;
473   SmallVector<FunctionProtoType::ExtParameterInfo, 4> extParamInfos(2);
474   argTys.push_back(Context.getCanonicalParamType(receiverType));
475   argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
476   // FIXME: Kill copy?
477   for (const auto *I : MD->parameters()) {
478     argTys.push_back(Context.getCanonicalParamType(I->getType()));
479     auto extParamInfo = FunctionProtoType::ExtParameterInfo().withIsNoEscape(
480         I->hasAttr<NoEscapeAttr>());
481     extParamInfos.push_back(extParamInfo);
482   }
483 
484   FunctionType::ExtInfo einfo;
485   bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
486   einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
487 
488   if (getContext().getLangOpts().ObjCAutoRefCount &&
489       MD->hasAttr<NSReturnsRetainedAttr>())
490     einfo = einfo.withProducesResult(true);
491 
492   RequiredArgs required =
493     (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
494 
495   return arrangeLLVMFunctionInfo(
496       GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
497       /*chainCall=*/false, argTys, einfo, extParamInfos, required);
498 }
499 
500 const CGFunctionInfo &
501 CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType,
502                                                  const CallArgList &args) {
503   auto argTypes = getArgTypesForCall(Context, args);
504   FunctionType::ExtInfo einfo;
505 
506   return arrangeLLVMFunctionInfo(
507       GetReturnType(returnType), /*instanceMethod=*/false,
508       /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All);
509 }
510 
511 const CGFunctionInfo &
512 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
513   // FIXME: Do we need to handle ObjCMethodDecl?
514   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
515 
516   if (isa<CXXConstructorDecl>(GD.getDecl()) ||
517       isa<CXXDestructorDecl>(GD.getDecl()))
518     return arrangeCXXStructorDeclaration(GD);
519 
520   return arrangeFunctionDeclaration(FD);
521 }
522 
523 /// Arrange a thunk that takes 'this' as the first parameter followed by
524 /// varargs.  Return a void pointer, regardless of the actual return type.
525 /// The body of the thunk will end in a musttail call to a function of the
526 /// correct type, and the caller will bitcast the function to the correct
527 /// prototype.
528 const CGFunctionInfo &
529 CodeGenTypes::arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD) {
530   assert(MD->isVirtual() && "only methods have thunks");
531   CanQual<FunctionProtoType> FTP = GetFormalType(MD);
532   CanQualType ArgTys[] = {DeriveThisType(MD->getParent(), MD)};
533   return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
534                                  /*chainCall=*/false, ArgTys,
535                                  FTP->getExtInfo(), {}, RequiredArgs(1));
536 }
537 
538 const CGFunctionInfo &
539 CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD,
540                                    CXXCtorType CT) {
541   assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure);
542 
543   CanQual<FunctionProtoType> FTP = GetFormalType(CD);
544   SmallVector<CanQualType, 2> ArgTys;
545   const CXXRecordDecl *RD = CD->getParent();
546   ArgTys.push_back(DeriveThisType(RD, CD));
547   if (CT == Ctor_CopyingClosure)
548     ArgTys.push_back(*FTP->param_type_begin());
549   if (RD->getNumVBases() > 0)
550     ArgTys.push_back(Context.IntTy);
551   CallingConv CC = Context.getDefaultCallingConvention(
552       /*IsVariadic=*/false, /*IsCXXMethod=*/true);
553   return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
554                                  /*chainCall=*/false, ArgTys,
555                                  FunctionType::ExtInfo(CC), {},
556                                  RequiredArgs::All);
557 }
558 
559 /// Arrange a call as unto a free function, except possibly with an
560 /// additional number of formal parameters considered required.
561 static const CGFunctionInfo &
562 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
563                             CodeGenModule &CGM,
564                             const CallArgList &args,
565                             const FunctionType *fnType,
566                             unsigned numExtraRequiredArgs,
567                             bool chainCall) {
568   assert(args.size() >= numExtraRequiredArgs);
569 
570   llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
571 
572   // In most cases, there are no optional arguments.
573   RequiredArgs required = RequiredArgs::All;
574 
575   // If we have a variadic prototype, the required arguments are the
576   // extra prefix plus the arguments in the prototype.
577   if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
578     if (proto->isVariadic())
579       required = RequiredArgs::forPrototypePlus(proto, numExtraRequiredArgs);
580 
581     if (proto->hasExtParameterInfos())
582       addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs,
583                                   args.size());
584 
585   // If we don't have a prototype at all, but we're supposed to
586   // explicitly use the variadic convention for unprototyped calls,
587   // treat all of the arguments as required but preserve the nominal
588   // possibility of variadics.
589   } else if (CGM.getTargetCodeGenInfo()
590                 .isNoProtoCallVariadic(args,
591                                        cast<FunctionNoProtoType>(fnType))) {
592     required = RequiredArgs(args.size());
593   }
594 
595   // FIXME: Kill copy.
596   SmallVector<CanQualType, 16> argTypes;
597   for (const auto &arg : args)
598     argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
599   return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()),
600                                      /*instanceMethod=*/false, chainCall,
601                                      argTypes, fnType->getExtInfo(), paramInfos,
602                                      required);
603 }
604 
605 /// Figure out the rules for calling a function with the given formal
606 /// type using the given arguments.  The arguments are necessary
607 /// because the function might be unprototyped, in which case it's
608 /// target-dependent in crazy ways.
609 const CGFunctionInfo &
610 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
611                                       const FunctionType *fnType,
612                                       bool chainCall) {
613   return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
614                                      chainCall ? 1 : 0, chainCall);
615 }
616 
617 /// A block function is essentially a free function with an
618 /// extra implicit argument.
619 const CGFunctionInfo &
620 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
621                                        const FunctionType *fnType) {
622   return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
623                                      /*chainCall=*/false);
624 }
625 
626 const CGFunctionInfo &
627 CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto,
628                                               const FunctionArgList &params) {
629   auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size());
630   auto argTypes = getArgTypesForDeclaration(Context, params);
631 
632   return arrangeLLVMFunctionInfo(GetReturnType(proto->getReturnType()),
633                                  /*instanceMethod*/ false, /*chainCall*/ false,
634                                  argTypes, proto->getExtInfo(), paramInfos,
635                                  RequiredArgs::forPrototypePlus(proto, 1));
636 }
637 
638 const CGFunctionInfo &
639 CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType,
640                                          const CallArgList &args) {
641   // FIXME: Kill copy.
642   SmallVector<CanQualType, 16> argTypes;
643   for (const auto &Arg : args)
644     argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
645   return arrangeLLVMFunctionInfo(
646       GetReturnType(resultType), /*instanceMethod=*/false,
647       /*chainCall=*/false, argTypes, FunctionType::ExtInfo(),
648       /*paramInfos=*/ {}, RequiredArgs::All);
649 }
650 
651 const CGFunctionInfo &
652 CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType,
653                                                 const FunctionArgList &args) {
654   auto argTypes = getArgTypesForDeclaration(Context, args);
655 
656   return arrangeLLVMFunctionInfo(
657       GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false,
658       argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
659 }
660 
661 const CGFunctionInfo &
662 CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType,
663                                               ArrayRef<CanQualType> argTypes) {
664   return arrangeLLVMFunctionInfo(
665       resultType, /*instanceMethod=*/false, /*chainCall=*/false,
666       argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
667 }
668 
669 /// Arrange a call to a C++ method, passing the given arguments.
670 ///
671 /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It
672 /// does not count `this`.
673 const CGFunctionInfo &
674 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
675                                    const FunctionProtoType *proto,
676                                    RequiredArgs required,
677                                    unsigned numPrefixArgs) {
678   assert(numPrefixArgs + 1 <= args.size() &&
679          "Emitting a call with less args than the required prefix?");
680   // Add one to account for `this`. It's a bit awkward here, but we don't count
681   // `this` in similar places elsewhere.
682   auto paramInfos =
683     getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size());
684 
685   // FIXME: Kill copy.
686   auto argTypes = getArgTypesForCall(Context, args);
687 
688   FunctionType::ExtInfo info = proto->getExtInfo();
689   return arrangeLLVMFunctionInfo(
690       GetReturnType(proto->getReturnType()), /*instanceMethod=*/true,
691       /*chainCall=*/false, argTypes, info, paramInfos, required);
692 }
693 
694 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
695   return arrangeLLVMFunctionInfo(
696       getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
697       None, FunctionType::ExtInfo(), {}, RequiredArgs::All);
698 }
699 
700 const CGFunctionInfo &
701 CodeGenTypes::arrangeCall(const CGFunctionInfo &signature,
702                           const CallArgList &args) {
703   assert(signature.arg_size() <= args.size());
704   if (signature.arg_size() == args.size())
705     return signature;
706 
707   SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
708   auto sigParamInfos = signature.getExtParameterInfos();
709   if (!sigParamInfos.empty()) {
710     paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
711     paramInfos.resize(args.size());
712   }
713 
714   auto argTypes = getArgTypesForCall(Context, args);
715 
716   assert(signature.getRequiredArgs().allowsOptionalArgs());
717   return arrangeLLVMFunctionInfo(signature.getReturnType(),
718                                  signature.isInstanceMethod(),
719                                  signature.isChainCall(),
720                                  argTypes,
721                                  signature.getExtInfo(),
722                                  paramInfos,
723                                  signature.getRequiredArgs());
724 }
725 
726 namespace clang {
727 namespace CodeGen {
728 void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI);
729 }
730 }
731 
732 /// Arrange the argument and result information for an abstract value
733 /// of a given function type.  This is the method which all of the
734 /// above functions ultimately defer to.
735 const CGFunctionInfo &
736 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
737                                       bool instanceMethod,
738                                       bool chainCall,
739                                       ArrayRef<CanQualType> argTypes,
740                                       FunctionType::ExtInfo info,
741                      ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos,
742                                       RequiredArgs required) {
743   assert(llvm::all_of(argTypes,
744                       [](CanQualType T) { return T.isCanonicalAsParam(); }));
745 
746   // Lookup or create unique function info.
747   llvm::FoldingSetNodeID ID;
748   CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos,
749                           required, resultType, argTypes);
750 
751   void *insertPos = nullptr;
752   CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
753   if (FI)
754     return *FI;
755 
756   unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
757 
758   // Construct the function info.  We co-allocate the ArgInfos.
759   FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
760                               paramInfos, resultType, argTypes, required);
761   FunctionInfos.InsertNode(FI, insertPos);
762 
763   bool inserted = FunctionsBeingProcessed.insert(FI).second;
764   (void)inserted;
765   assert(inserted && "Recursively being processed?");
766 
767   // Compute ABI information.
768   if (CC == llvm::CallingConv::SPIR_KERNEL) {
769     // Force target independent argument handling for the host visible
770     // kernel functions.
771     computeSPIRKernelABIInfo(CGM, *FI);
772   } else if (info.getCC() == CC_Swift) {
773     swiftcall::computeABIInfo(CGM, *FI);
774   } else {
775     getABIInfo().computeInfo(*FI);
776   }
777 
778   // Loop over all of the computed argument and return value info.  If any of
779   // them are direct or extend without a specified coerce type, specify the
780   // default now.
781   ABIArgInfo &retInfo = FI->getReturnInfo();
782   if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
783     retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
784 
785   for (auto &I : FI->arguments())
786     if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
787       I.info.setCoerceToType(ConvertType(I.type));
788 
789   bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
790   assert(erased && "Not in set?");
791 
792   return *FI;
793 }
794 
795 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
796                                        bool instanceMethod,
797                                        bool chainCall,
798                                        const FunctionType::ExtInfo &info,
799                                        ArrayRef<ExtParameterInfo> paramInfos,
800                                        CanQualType resultType,
801                                        ArrayRef<CanQualType> argTypes,
802                                        RequiredArgs required) {
803   assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
804   assert(!required.allowsOptionalArgs() ||
805          required.getNumRequiredArgs() <= argTypes.size());
806 
807   void *buffer =
808     operator new(totalSizeToAlloc<ArgInfo,             ExtParameterInfo>(
809                                   argTypes.size() + 1, paramInfos.size()));
810 
811   CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
812   FI->CallingConvention = llvmCC;
813   FI->EffectiveCallingConvention = llvmCC;
814   FI->ASTCallingConvention = info.getCC();
815   FI->InstanceMethod = instanceMethod;
816   FI->ChainCall = chainCall;
817   FI->NoReturn = info.getNoReturn();
818   FI->ReturnsRetained = info.getProducesResult();
819   FI->NoCallerSavedRegs = info.getNoCallerSavedRegs();
820   FI->NoCfCheck = info.getNoCfCheck();
821   FI->Required = required;
822   FI->HasRegParm = info.getHasRegParm();
823   FI->RegParm = info.getRegParm();
824   FI->ArgStruct = nullptr;
825   FI->ArgStructAlign = 0;
826   FI->NumArgs = argTypes.size();
827   FI->HasExtParameterInfos = !paramInfos.empty();
828   FI->getArgsBuffer()[0].type = resultType;
829   for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
830     FI->getArgsBuffer()[i + 1].type = argTypes[i];
831   for (unsigned i = 0, e = paramInfos.size(); i != e; ++i)
832     FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
833   return FI;
834 }
835 
836 /***/
837 
838 namespace {
839 // ABIArgInfo::Expand implementation.
840 
841 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
842 struct TypeExpansion {
843   enum TypeExpansionKind {
844     // Elements of constant arrays are expanded recursively.
845     TEK_ConstantArray,
846     // Record fields are expanded recursively (but if record is a union, only
847     // the field with the largest size is expanded).
848     TEK_Record,
849     // For complex types, real and imaginary parts are expanded recursively.
850     TEK_Complex,
851     // All other types are not expandable.
852     TEK_None
853   };
854 
855   const TypeExpansionKind Kind;
856 
857   TypeExpansion(TypeExpansionKind K) : Kind(K) {}
858   virtual ~TypeExpansion() {}
859 };
860 
861 struct ConstantArrayExpansion : TypeExpansion {
862   QualType EltTy;
863   uint64_t NumElts;
864 
865   ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
866       : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
867   static bool classof(const TypeExpansion *TE) {
868     return TE->Kind == TEK_ConstantArray;
869   }
870 };
871 
872 struct RecordExpansion : TypeExpansion {
873   SmallVector<const CXXBaseSpecifier *, 1> Bases;
874 
875   SmallVector<const FieldDecl *, 1> Fields;
876 
877   RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
878                   SmallVector<const FieldDecl *, 1> &&Fields)
879       : TypeExpansion(TEK_Record), Bases(std::move(Bases)),
880         Fields(std::move(Fields)) {}
881   static bool classof(const TypeExpansion *TE) {
882     return TE->Kind == TEK_Record;
883   }
884 };
885 
886 struct ComplexExpansion : TypeExpansion {
887   QualType EltTy;
888 
889   ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
890   static bool classof(const TypeExpansion *TE) {
891     return TE->Kind == TEK_Complex;
892   }
893 };
894 
895 struct NoExpansion : TypeExpansion {
896   NoExpansion() : TypeExpansion(TEK_None) {}
897   static bool classof(const TypeExpansion *TE) {
898     return TE->Kind == TEK_None;
899   }
900 };
901 }  // namespace
902 
903 static std::unique_ptr<TypeExpansion>
904 getTypeExpansion(QualType Ty, const ASTContext &Context) {
905   if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
906     return llvm::make_unique<ConstantArrayExpansion>(
907         AT->getElementType(), AT->getSize().getZExtValue());
908   }
909   if (const RecordType *RT = Ty->getAs<RecordType>()) {
910     SmallVector<const CXXBaseSpecifier *, 1> Bases;
911     SmallVector<const FieldDecl *, 1> Fields;
912     const RecordDecl *RD = RT->getDecl();
913     assert(!RD->hasFlexibleArrayMember() &&
914            "Cannot expand structure with flexible array.");
915     if (RD->isUnion()) {
916       // Unions can be here only in degenerative cases - all the fields are same
917       // after flattening. Thus we have to use the "largest" field.
918       const FieldDecl *LargestFD = nullptr;
919       CharUnits UnionSize = CharUnits::Zero();
920 
921       for (const auto *FD : RD->fields()) {
922         if (FD->isZeroLengthBitField(Context))
923           continue;
924         assert(!FD->isBitField() &&
925                "Cannot expand structure with bit-field members.");
926         CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
927         if (UnionSize < FieldSize) {
928           UnionSize = FieldSize;
929           LargestFD = FD;
930         }
931       }
932       if (LargestFD)
933         Fields.push_back(LargestFD);
934     } else {
935       if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
936         assert(!CXXRD->isDynamicClass() &&
937                "cannot expand vtable pointers in dynamic classes");
938         for (const CXXBaseSpecifier &BS : CXXRD->bases())
939           Bases.push_back(&BS);
940       }
941 
942       for (const auto *FD : RD->fields()) {
943         if (FD->isZeroLengthBitField(Context))
944           continue;
945         assert(!FD->isBitField() &&
946                "Cannot expand structure with bit-field members.");
947         Fields.push_back(FD);
948       }
949     }
950     return llvm::make_unique<RecordExpansion>(std::move(Bases),
951                                               std::move(Fields));
952   }
953   if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
954     return llvm::make_unique<ComplexExpansion>(CT->getElementType());
955   }
956   return llvm::make_unique<NoExpansion>();
957 }
958 
959 static int getExpansionSize(QualType Ty, const ASTContext &Context) {
960   auto Exp = getTypeExpansion(Ty, Context);
961   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
962     return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
963   }
964   if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
965     int Res = 0;
966     for (auto BS : RExp->Bases)
967       Res += getExpansionSize(BS->getType(), Context);
968     for (auto FD : RExp->Fields)
969       Res += getExpansionSize(FD->getType(), Context);
970     return Res;
971   }
972   if (isa<ComplexExpansion>(Exp.get()))
973     return 2;
974   assert(isa<NoExpansion>(Exp.get()));
975   return 1;
976 }
977 
978 void
979 CodeGenTypes::getExpandedTypes(QualType Ty,
980                                SmallVectorImpl<llvm::Type *>::iterator &TI) {
981   auto Exp = getTypeExpansion(Ty, Context);
982   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
983     for (int i = 0, n = CAExp->NumElts; i < n; i++) {
984       getExpandedTypes(CAExp->EltTy, TI);
985     }
986   } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
987     for (auto BS : RExp->Bases)
988       getExpandedTypes(BS->getType(), TI);
989     for (auto FD : RExp->Fields)
990       getExpandedTypes(FD->getType(), TI);
991   } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
992     llvm::Type *EltTy = ConvertType(CExp->EltTy);
993     *TI++ = EltTy;
994     *TI++ = EltTy;
995   } else {
996     assert(isa<NoExpansion>(Exp.get()));
997     *TI++ = ConvertType(Ty);
998   }
999 }
1000 
1001 static void forConstantArrayExpansion(CodeGenFunction &CGF,
1002                                       ConstantArrayExpansion *CAE,
1003                                       Address BaseAddr,
1004                                       llvm::function_ref<void(Address)> Fn) {
1005   CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
1006   CharUnits EltAlign =
1007     BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
1008 
1009   for (int i = 0, n = CAE->NumElts; i < n; i++) {
1010     llvm::Value *EltAddr =
1011       CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i);
1012     Fn(Address(EltAddr, EltAlign));
1013   }
1014 }
1015 
1016 void CodeGenFunction::ExpandTypeFromArgs(
1017     QualType Ty, LValue LV, SmallVectorImpl<llvm::Value *>::iterator &AI) {
1018   assert(LV.isSimple() &&
1019          "Unexpected non-simple lvalue during struct expansion.");
1020 
1021   auto Exp = getTypeExpansion(Ty, getContext());
1022   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1023     forConstantArrayExpansion(*this, CAExp, LV.getAddress(),
1024                               [&](Address EltAddr) {
1025       LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1026       ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1027     });
1028   } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1029     Address This = LV.getAddress();
1030     for (const CXXBaseSpecifier *BS : RExp->Bases) {
1031       // Perform a single step derived-to-base conversion.
1032       Address Base =
1033           GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1034                                 /*NullCheckValue=*/false, SourceLocation());
1035       LValue SubLV = MakeAddrLValue(Base, BS->getType());
1036 
1037       // Recurse onto bases.
1038       ExpandTypeFromArgs(BS->getType(), SubLV, AI);
1039     }
1040     for (auto FD : RExp->Fields) {
1041       // FIXME: What are the right qualifiers here?
1042       LValue SubLV = EmitLValueForFieldInitialization(LV, FD);
1043       ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1044     }
1045   } else if (isa<ComplexExpansion>(Exp.get())) {
1046     auto realValue = *AI++;
1047     auto imagValue = *AI++;
1048     EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
1049   } else {
1050     assert(isa<NoExpansion>(Exp.get()));
1051     EmitStoreThroughLValue(RValue::get(*AI++), LV);
1052   }
1053 }
1054 
1055 void CodeGenFunction::ExpandTypeToArgs(
1056     QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
1057     SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
1058   auto Exp = getTypeExpansion(Ty, getContext());
1059   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1060     Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress()
1061                                    : Arg.getKnownRValue().getAggregateAddress();
1062     forConstantArrayExpansion(
1063         *this, CAExp, Addr, [&](Address EltAddr) {
1064           CallArg EltArg = CallArg(
1065               convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()),
1066               CAExp->EltTy);
1067           ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
1068                            IRCallArgPos);
1069         });
1070   } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1071     Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress()
1072                                    : Arg.getKnownRValue().getAggregateAddress();
1073     for (const CXXBaseSpecifier *BS : RExp->Bases) {
1074       // Perform a single step derived-to-base conversion.
1075       Address Base =
1076           GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1077                                 /*NullCheckValue=*/false, SourceLocation());
1078       CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType());
1079 
1080       // Recurse onto bases.
1081       ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs,
1082                        IRCallArgPos);
1083     }
1084 
1085     LValue LV = MakeAddrLValue(This, Ty);
1086     for (auto FD : RExp->Fields) {
1087       CallArg FldArg =
1088           CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType());
1089       ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
1090                        IRCallArgPos);
1091     }
1092   } else if (isa<ComplexExpansion>(Exp.get())) {
1093     ComplexPairTy CV = Arg.getKnownRValue().getComplexVal();
1094     IRCallArgs[IRCallArgPos++] = CV.first;
1095     IRCallArgs[IRCallArgPos++] = CV.second;
1096   } else {
1097     assert(isa<NoExpansion>(Exp.get()));
1098     auto RV = Arg.getKnownRValue();
1099     assert(RV.isScalar() &&
1100            "Unexpected non-scalar rvalue during struct expansion.");
1101 
1102     // Insert a bitcast as needed.
1103     llvm::Value *V = RV.getScalarVal();
1104     if (IRCallArgPos < IRFuncTy->getNumParams() &&
1105         V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1106       V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
1107 
1108     IRCallArgs[IRCallArgPos++] = V;
1109   }
1110 }
1111 
1112 /// Create a temporary allocation for the purposes of coercion.
1113 static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty,
1114                                            CharUnits MinAlign) {
1115   // Don't use an alignment that's worse than what LLVM would prefer.
1116   auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty);
1117   CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
1118 
1119   return CGF.CreateTempAlloca(Ty, Align);
1120 }
1121 
1122 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
1123 /// accessing some number of bytes out of it, try to gep into the struct to get
1124 /// at its inner goodness.  Dive as deep as possible without entering an element
1125 /// with an in-memory size smaller than DstSize.
1126 static Address
1127 EnterStructPointerForCoercedAccess(Address SrcPtr,
1128                                    llvm::StructType *SrcSTy,
1129                                    uint64_t DstSize, CodeGenFunction &CGF) {
1130   // We can't dive into a zero-element struct.
1131   if (SrcSTy->getNumElements() == 0) return SrcPtr;
1132 
1133   llvm::Type *FirstElt = SrcSTy->getElementType(0);
1134 
1135   // If the first elt is at least as large as what we're looking for, or if the
1136   // first element is the same size as the whole struct, we can enter it. The
1137   // comparison must be made on the store size and not the alloca size. Using
1138   // the alloca size may overstate the size of the load.
1139   uint64_t FirstEltSize =
1140     CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
1141   if (FirstEltSize < DstSize &&
1142       FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
1143     return SrcPtr;
1144 
1145   // GEP into the first element.
1146   SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, "coerce.dive");
1147 
1148   // If the first element is a struct, recurse.
1149   llvm::Type *SrcTy = SrcPtr.getElementType();
1150   if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1151     return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
1152 
1153   return SrcPtr;
1154 }
1155 
1156 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
1157 /// are either integers or pointers.  This does a truncation of the value if it
1158 /// is too large or a zero extension if it is too small.
1159 ///
1160 /// This behaves as if the value were coerced through memory, so on big-endian
1161 /// targets the high bits are preserved in a truncation, while little-endian
1162 /// targets preserve the low bits.
1163 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
1164                                              llvm::Type *Ty,
1165                                              CodeGenFunction &CGF) {
1166   if (Val->getType() == Ty)
1167     return Val;
1168 
1169   if (isa<llvm::PointerType>(Val->getType())) {
1170     // If this is Pointer->Pointer avoid conversion to and from int.
1171     if (isa<llvm::PointerType>(Ty))
1172       return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
1173 
1174     // Convert the pointer to an integer so we can play with its width.
1175     Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
1176   }
1177 
1178   llvm::Type *DestIntTy = Ty;
1179   if (isa<llvm::PointerType>(DestIntTy))
1180     DestIntTy = CGF.IntPtrTy;
1181 
1182   if (Val->getType() != DestIntTy) {
1183     const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
1184     if (DL.isBigEndian()) {
1185       // Preserve the high bits on big-endian targets.
1186       // That is what memory coercion does.
1187       uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1188       uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1189 
1190       if (SrcSize > DstSize) {
1191         Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
1192         Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
1193       } else {
1194         Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
1195         Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
1196       }
1197     } else {
1198       // Little-endian targets preserve the low bits. No shifts required.
1199       Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
1200     }
1201   }
1202 
1203   if (isa<llvm::PointerType>(Ty))
1204     Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
1205   return Val;
1206 }
1207 
1208 
1209 
1210 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
1211 /// a pointer to an object of type \arg Ty, known to be aligned to
1212 /// \arg SrcAlign bytes.
1213 ///
1214 /// This safely handles the case when the src type is smaller than the
1215 /// destination type; in this situation the values of bits which not
1216 /// present in the src are undefined.
1217 static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
1218                                       CodeGenFunction &CGF) {
1219   llvm::Type *SrcTy = Src.getElementType();
1220 
1221   // If SrcTy and Ty are the same, just do a load.
1222   if (SrcTy == Ty)
1223     return CGF.Builder.CreateLoad(Src);
1224 
1225   uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
1226 
1227   if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1228     Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF);
1229     SrcTy = Src.getType()->getElementType();
1230   }
1231 
1232   uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1233 
1234   // If the source and destination are integer or pointer types, just do an
1235   // extension or truncation to the desired type.
1236   if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1237       (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1238     llvm::Value *Load = CGF.Builder.CreateLoad(Src);
1239     return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
1240   }
1241 
1242   // If load is legal, just bitcast the src pointer.
1243   if (SrcSize >= DstSize) {
1244     // Generally SrcSize is never greater than DstSize, since this means we are
1245     // losing bits. However, this can happen in cases where the structure has
1246     // additional padding, for example due to a user specified alignment.
1247     //
1248     // FIXME: Assert that we aren't truncating non-padding bits when have access
1249     // to that information.
1250     Src = CGF.Builder.CreateBitCast(Src,
1251                                     Ty->getPointerTo(Src.getAddressSpace()));
1252     return CGF.Builder.CreateLoad(Src);
1253   }
1254 
1255   // Otherwise do coercion through memory. This is stupid, but simple.
1256   Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment());
1257   Address Casted = CGF.Builder.CreateElementBitCast(Tmp,CGF.Int8Ty);
1258   Address SrcCasted = CGF.Builder.CreateElementBitCast(Src,CGF.Int8Ty);
1259   CGF.Builder.CreateMemCpy(Casted, SrcCasted,
1260       llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
1261       false);
1262   return CGF.Builder.CreateLoad(Tmp);
1263 }
1264 
1265 // Function to store a first-class aggregate into memory.  We prefer to
1266 // store the elements rather than the aggregate to be more friendly to
1267 // fast-isel.
1268 // FIXME: Do we need to recurse here?
1269 static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
1270                           Address Dest, bool DestIsVolatile) {
1271   // Prefer scalar stores to first-class aggregate stores.
1272   if (llvm::StructType *STy =
1273         dyn_cast<llvm::StructType>(Val->getType())) {
1274     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1275       Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i);
1276       llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
1277       CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
1278     }
1279   } else {
1280     CGF.Builder.CreateStore(Val, Dest, DestIsVolatile);
1281   }
1282 }
1283 
1284 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1285 /// where the source and destination may have different types.  The
1286 /// destination is known to be aligned to \arg DstAlign bytes.
1287 ///
1288 /// This safely handles the case when the src type is larger than the
1289 /// destination type; the upper bits of the src will be lost.
1290 static void CreateCoercedStore(llvm::Value *Src,
1291                                Address Dst,
1292                                bool DstIsVolatile,
1293                                CodeGenFunction &CGF) {
1294   llvm::Type *SrcTy = Src->getType();
1295   llvm::Type *DstTy = Dst.getType()->getElementType();
1296   if (SrcTy == DstTy) {
1297     CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1298     return;
1299   }
1300 
1301   uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1302 
1303   if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1304     Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF);
1305     DstTy = Dst.getType()->getElementType();
1306   }
1307 
1308   // If the source and destination are integer or pointer types, just do an
1309   // extension or truncation to the desired type.
1310   if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1311       (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1312     Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
1313     CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1314     return;
1315   }
1316 
1317   uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
1318 
1319   // If store is legal, just bitcast the src pointer.
1320   if (SrcSize <= DstSize) {
1321     Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy);
1322     BuildAggStore(CGF, Src, Dst, DstIsVolatile);
1323   } else {
1324     // Otherwise do coercion through memory. This is stupid, but
1325     // simple.
1326 
1327     // Generally SrcSize is never greater than DstSize, since this means we are
1328     // losing bits. However, this can happen in cases where the structure has
1329     // additional padding, for example due to a user specified alignment.
1330     //
1331     // FIXME: Assert that we aren't truncating non-padding bits when have access
1332     // to that information.
1333     Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
1334     CGF.Builder.CreateStore(Src, Tmp);
1335     Address Casted = CGF.Builder.CreateElementBitCast(Tmp,CGF.Int8Ty);
1336     Address DstCasted = CGF.Builder.CreateElementBitCast(Dst,CGF.Int8Ty);
1337     CGF.Builder.CreateMemCpy(DstCasted, Casted,
1338         llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
1339         false);
1340   }
1341 }
1342 
1343 static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr,
1344                                    const ABIArgInfo &info) {
1345   if (unsigned offset = info.getDirectOffset()) {
1346     addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty);
1347     addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
1348                                              CharUnits::fromQuantity(offset));
1349     addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType());
1350   }
1351   return addr;
1352 }
1353 
1354 namespace {
1355 
1356 /// Encapsulates information about the way function arguments from
1357 /// CGFunctionInfo should be passed to actual LLVM IR function.
1358 class ClangToLLVMArgMapping {
1359   static const unsigned InvalidIndex = ~0U;
1360   unsigned InallocaArgNo;
1361   unsigned SRetArgNo;
1362   unsigned TotalIRArgs;
1363 
1364   /// Arguments of LLVM IR function corresponding to single Clang argument.
1365   struct IRArgs {
1366     unsigned PaddingArgIndex;
1367     // Argument is expanded to IR arguments at positions
1368     // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
1369     unsigned FirstArgIndex;
1370     unsigned NumberOfArgs;
1371 
1372     IRArgs()
1373         : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1374           NumberOfArgs(0) {}
1375   };
1376 
1377   SmallVector<IRArgs, 8> ArgInfo;
1378 
1379 public:
1380   ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
1381                         bool OnlyRequiredArgs = false)
1382       : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1383         ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1384     construct(Context, FI, OnlyRequiredArgs);
1385   }
1386 
1387   bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
1388   unsigned getInallocaArgNo() const {
1389     assert(hasInallocaArg());
1390     return InallocaArgNo;
1391   }
1392 
1393   bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
1394   unsigned getSRetArgNo() const {
1395     assert(hasSRetArg());
1396     return SRetArgNo;
1397   }
1398 
1399   unsigned totalIRArgs() const { return TotalIRArgs; }
1400 
1401   bool hasPaddingArg(unsigned ArgNo) const {
1402     assert(ArgNo < ArgInfo.size());
1403     return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1404   }
1405   unsigned getPaddingArgNo(unsigned ArgNo) const {
1406     assert(hasPaddingArg(ArgNo));
1407     return ArgInfo[ArgNo].PaddingArgIndex;
1408   }
1409 
1410   /// Returns index of first IR argument corresponding to ArgNo, and their
1411   /// quantity.
1412   std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
1413     assert(ArgNo < ArgInfo.size());
1414     return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1415                           ArgInfo[ArgNo].NumberOfArgs);
1416   }
1417 
1418 private:
1419   void construct(const ASTContext &Context, const CGFunctionInfo &FI,
1420                  bool OnlyRequiredArgs);
1421 };
1422 
1423 void ClangToLLVMArgMapping::construct(const ASTContext &Context,
1424                                       const CGFunctionInfo &FI,
1425                                       bool OnlyRequiredArgs) {
1426   unsigned IRArgNo = 0;
1427   bool SwapThisWithSRet = false;
1428   const ABIArgInfo &RetAI = FI.getReturnInfo();
1429 
1430   if (RetAI.getKind() == ABIArgInfo::Indirect) {
1431     SwapThisWithSRet = RetAI.isSRetAfterThis();
1432     SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1433   }
1434 
1435   unsigned ArgNo = 0;
1436   unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
1437   for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
1438        ++I, ++ArgNo) {
1439     assert(I != FI.arg_end());
1440     QualType ArgType = I->type;
1441     const ABIArgInfo &AI = I->info;
1442     // Collect data about IR arguments corresponding to Clang argument ArgNo.
1443     auto &IRArgs = ArgInfo[ArgNo];
1444 
1445     if (AI.getPaddingType())
1446       IRArgs.PaddingArgIndex = IRArgNo++;
1447 
1448     switch (AI.getKind()) {
1449     case ABIArgInfo::Extend:
1450     case ABIArgInfo::Direct: {
1451       // FIXME: handle sseregparm someday...
1452       llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
1453       if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
1454         IRArgs.NumberOfArgs = STy->getNumElements();
1455       } else {
1456         IRArgs.NumberOfArgs = 1;
1457       }
1458       break;
1459     }
1460     case ABIArgInfo::Indirect:
1461       IRArgs.NumberOfArgs = 1;
1462       break;
1463     case ABIArgInfo::Ignore:
1464     case ABIArgInfo::InAlloca:
1465       // ignore and inalloca doesn't have matching LLVM parameters.
1466       IRArgs.NumberOfArgs = 0;
1467       break;
1468     case ABIArgInfo::CoerceAndExpand:
1469       IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size();
1470       break;
1471     case ABIArgInfo::Expand:
1472       IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
1473       break;
1474     }
1475 
1476     if (IRArgs.NumberOfArgs > 0) {
1477       IRArgs.FirstArgIndex = IRArgNo;
1478       IRArgNo += IRArgs.NumberOfArgs;
1479     }
1480 
1481     // Skip over the sret parameter when it comes second.  We already handled it
1482     // above.
1483     if (IRArgNo == 1 && SwapThisWithSRet)
1484       IRArgNo++;
1485   }
1486   assert(ArgNo == ArgInfo.size());
1487 
1488   if (FI.usesInAlloca())
1489     InallocaArgNo = IRArgNo++;
1490 
1491   TotalIRArgs = IRArgNo;
1492 }
1493 }  // namespace
1494 
1495 /***/
1496 
1497 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
1498   const auto &RI = FI.getReturnInfo();
1499   return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
1500 }
1501 
1502 bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) {
1503   return ReturnTypeUsesSRet(FI) &&
1504          getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
1505 }
1506 
1507 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
1508   if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
1509     switch (BT->getKind()) {
1510     default:
1511       return false;
1512     case BuiltinType::Float:
1513       return getTarget().useObjCFPRetForRealType(TargetInfo::Float);
1514     case BuiltinType::Double:
1515       return getTarget().useObjCFPRetForRealType(TargetInfo::Double);
1516     case BuiltinType::LongDouble:
1517       return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble);
1518     }
1519   }
1520 
1521   return false;
1522 }
1523 
1524 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
1525   if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
1526     if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
1527       if (BT->getKind() == BuiltinType::LongDouble)
1528         return getTarget().useObjCFP2RetForComplexLongDouble();
1529     }
1530   }
1531 
1532   return false;
1533 }
1534 
1535 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
1536   const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
1537   return GetFunctionType(FI);
1538 }
1539 
1540 llvm::FunctionType *
1541 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
1542 
1543   bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1544   (void)Inserted;
1545   assert(Inserted && "Recursively being processed?");
1546 
1547   llvm::Type *resultType = nullptr;
1548   const ABIArgInfo &retAI = FI.getReturnInfo();
1549   switch (retAI.getKind()) {
1550   case ABIArgInfo::Expand:
1551     llvm_unreachable("Invalid ABI kind for return argument");
1552 
1553   case ABIArgInfo::Extend:
1554   case ABIArgInfo::Direct:
1555     resultType = retAI.getCoerceToType();
1556     break;
1557 
1558   case ABIArgInfo::InAlloca:
1559     if (retAI.getInAllocaSRet()) {
1560       // sret things on win32 aren't void, they return the sret pointer.
1561       QualType ret = FI.getReturnType();
1562       llvm::Type *ty = ConvertType(ret);
1563       unsigned addressSpace = Context.getTargetAddressSpace(ret);
1564       resultType = llvm::PointerType::get(ty, addressSpace);
1565     } else {
1566       resultType = llvm::Type::getVoidTy(getLLVMContext());
1567     }
1568     break;
1569 
1570   case ABIArgInfo::Indirect:
1571   case ABIArgInfo::Ignore:
1572     resultType = llvm::Type::getVoidTy(getLLVMContext());
1573     break;
1574 
1575   case ABIArgInfo::CoerceAndExpand:
1576     resultType = retAI.getUnpaddedCoerceAndExpandType();
1577     break;
1578   }
1579 
1580   ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
1581   SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
1582 
1583   // Add type for sret argument.
1584   if (IRFunctionArgs.hasSRetArg()) {
1585     QualType Ret = FI.getReturnType();
1586     llvm::Type *Ty = ConvertType(Ret);
1587     unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
1588     ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1589         llvm::PointerType::get(Ty, AddressSpace);
1590   }
1591 
1592   // Add type for inalloca argument.
1593   if (IRFunctionArgs.hasInallocaArg()) {
1594     auto ArgStruct = FI.getArgStruct();
1595     assert(ArgStruct);
1596     ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
1597   }
1598 
1599   // Add in all of the required arguments.
1600   unsigned ArgNo = 0;
1601   CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1602                                      ie = it + FI.getNumRequiredArgs();
1603   for (; it != ie; ++it, ++ArgNo) {
1604     const ABIArgInfo &ArgInfo = it->info;
1605 
1606     // Insert a padding type to ensure proper alignment.
1607     if (IRFunctionArgs.hasPaddingArg(ArgNo))
1608       ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1609           ArgInfo.getPaddingType();
1610 
1611     unsigned FirstIRArg, NumIRArgs;
1612     std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1613 
1614     switch (ArgInfo.getKind()) {
1615     case ABIArgInfo::Ignore:
1616     case ABIArgInfo::InAlloca:
1617       assert(NumIRArgs == 0);
1618       break;
1619 
1620     case ABIArgInfo::Indirect: {
1621       assert(NumIRArgs == 1);
1622       // indirect arguments are always on the stack, which is alloca addr space.
1623       llvm::Type *LTy = ConvertTypeForMem(it->type);
1624       ArgTypes[FirstIRArg] = LTy->getPointerTo(
1625           CGM.getDataLayout().getAllocaAddrSpace());
1626       break;
1627     }
1628 
1629     case ABIArgInfo::Extend:
1630     case ABIArgInfo::Direct: {
1631       // Fast-isel and the optimizer generally like scalar values better than
1632       // FCAs, so we flatten them if this is safe to do for this argument.
1633       llvm::Type *argType = ArgInfo.getCoerceToType();
1634       llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1635       if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
1636         assert(NumIRArgs == st->getNumElements());
1637         for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1638           ArgTypes[FirstIRArg + i] = st->getElementType(i);
1639       } else {
1640         assert(NumIRArgs == 1);
1641         ArgTypes[FirstIRArg] = argType;
1642       }
1643       break;
1644     }
1645 
1646     case ABIArgInfo::CoerceAndExpand: {
1647       auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1648       for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) {
1649         *ArgTypesIter++ = EltTy;
1650       }
1651       assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1652       break;
1653     }
1654 
1655     case ABIArgInfo::Expand:
1656       auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1657       getExpandedTypes(it->type, ArgTypesIter);
1658       assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1659       break;
1660     }
1661   }
1662 
1663   bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1664   assert(Erased && "Not in set?");
1665 
1666   return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
1667 }
1668 
1669 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
1670   const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1671   const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
1672 
1673   if (!isFuncTypeConvertible(FPT))
1674     return llvm::StructType::get(getLLVMContext());
1675 
1676   return GetFunctionType(GD);
1677 }
1678 
1679 static void AddAttributesFromFunctionProtoType(ASTContext &Ctx,
1680                                                llvm::AttrBuilder &FuncAttrs,
1681                                                const FunctionProtoType *FPT) {
1682   if (!FPT)
1683     return;
1684 
1685   if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) &&
1686       FPT->isNothrow())
1687     FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1688 }
1689 
1690 void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
1691                                                bool AttrOnCallSite,
1692                                                llvm::AttrBuilder &FuncAttrs) {
1693   // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
1694   if (!HasOptnone) {
1695     if (CodeGenOpts.OptimizeSize)
1696       FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1697     if (CodeGenOpts.OptimizeSize == 2)
1698       FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1699   }
1700 
1701   if (CodeGenOpts.DisableRedZone)
1702     FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1703   if (CodeGenOpts.IndirectTlsSegRefs)
1704     FuncAttrs.addAttribute("indirect-tls-seg-refs");
1705   if (CodeGenOpts.NoImplicitFloat)
1706     FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1707 
1708   if (AttrOnCallSite) {
1709     // Attributes that should go on the call site only.
1710     if (!CodeGenOpts.SimplifyLibCalls ||
1711         CodeGenOpts.isNoBuiltinFunc(Name.data()))
1712       FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1713     if (!CodeGenOpts.TrapFuncName.empty())
1714       FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
1715   } else {
1716     // Attributes that should go on the function, but not the call site.
1717     if (!CodeGenOpts.DisableFPElim) {
1718       FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1719     } else if (CodeGenOpts.OmitLeafFramePointer) {
1720       FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1721       FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1722     } else {
1723       FuncAttrs.addAttribute("no-frame-pointer-elim", "true");
1724       FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1725     }
1726 
1727     FuncAttrs.addAttribute("less-precise-fpmad",
1728                            llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
1729 
1730     if (CodeGenOpts.NullPointerIsValid)
1731       FuncAttrs.addAttribute("null-pointer-is-valid", "true");
1732     if (!CodeGenOpts.FPDenormalMode.empty())
1733       FuncAttrs.addAttribute("denormal-fp-math", CodeGenOpts.FPDenormalMode);
1734 
1735     FuncAttrs.addAttribute("no-trapping-math",
1736                            llvm::toStringRef(CodeGenOpts.NoTrappingMath));
1737 
1738     // Strict (compliant) code is the default, so only add this attribute to
1739     // indicate that we are trying to workaround a problem case.
1740     if (!CodeGenOpts.StrictFloatCastOverflow)
1741       FuncAttrs.addAttribute("strict-float-cast-overflow", "false");
1742 
1743     // TODO: Are these all needed?
1744     // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
1745     FuncAttrs.addAttribute("no-infs-fp-math",
1746                            llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
1747     FuncAttrs.addAttribute("no-nans-fp-math",
1748                            llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
1749     FuncAttrs.addAttribute("unsafe-fp-math",
1750                            llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
1751     FuncAttrs.addAttribute("use-soft-float",
1752                            llvm::toStringRef(CodeGenOpts.SoftFloat));
1753     FuncAttrs.addAttribute("stack-protector-buffer-size",
1754                            llvm::utostr(CodeGenOpts.SSPBufferSize));
1755     FuncAttrs.addAttribute("no-signed-zeros-fp-math",
1756                            llvm::toStringRef(CodeGenOpts.NoSignedZeros));
1757     FuncAttrs.addAttribute(
1758         "correctly-rounded-divide-sqrt-fp-math",
1759         llvm::toStringRef(CodeGenOpts.CorrectlyRoundedDivSqrt));
1760 
1761     if (getLangOpts().OpenCL)
1762       FuncAttrs.addAttribute("denorms-are-zero",
1763                              llvm::toStringRef(CodeGenOpts.FlushDenorm));
1764 
1765     // TODO: Reciprocal estimate codegen options should apply to instructions?
1766     const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals;
1767     if (!Recips.empty())
1768       FuncAttrs.addAttribute("reciprocal-estimates",
1769                              llvm::join(Recips, ","));
1770 
1771     if (!CodeGenOpts.PreferVectorWidth.empty() &&
1772         CodeGenOpts.PreferVectorWidth != "none")
1773       FuncAttrs.addAttribute("prefer-vector-width",
1774                              CodeGenOpts.PreferVectorWidth);
1775 
1776     if (CodeGenOpts.StackRealignment)
1777       FuncAttrs.addAttribute("stackrealign");
1778     if (CodeGenOpts.Backchain)
1779       FuncAttrs.addAttribute("backchain");
1780 
1781     if (CodeGenOpts.SpeculativeLoadHardening)
1782       FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
1783   }
1784 
1785   if (getLangOpts().assumeFunctionsAreConvergent()) {
1786     // Conservatively, mark all functions and calls in CUDA and OpenCL as
1787     // convergent (meaning, they may call an intrinsically convergent op, such
1788     // as __syncthreads() / barrier(), and so can't have certain optimizations
1789     // applied around them).  LLVM will remove this attribute where it safely
1790     // can.
1791     FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1792   }
1793 
1794   if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) {
1795     // Exceptions aren't supported in CUDA device code.
1796     FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1797 
1798     // Respect -fcuda-flush-denormals-to-zero.
1799     if (CodeGenOpts.FlushDenorm)
1800       FuncAttrs.addAttribute("nvptx-f32ftz", "true");
1801   }
1802 
1803   for (StringRef Attr : CodeGenOpts.DefaultFunctionAttrs) {
1804     StringRef Var, Value;
1805     std::tie(Var, Value) = Attr.split('=');
1806     FuncAttrs.addAttribute(Var, Value);
1807   }
1808 }
1809 
1810 void CodeGenModule::AddDefaultFnAttrs(llvm::Function &F) {
1811   llvm::AttrBuilder FuncAttrs;
1812   ConstructDefaultFnAttrList(F.getName(), F.hasOptNone(),
1813                              /* AttrOnCallsite = */ false, FuncAttrs);
1814   F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs);
1815 }
1816 
1817 void CodeGenModule::ConstructAttributeList(
1818     StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo,
1819     llvm::AttributeList &AttrList, unsigned &CallingConv, bool AttrOnCallSite) {
1820   llvm::AttrBuilder FuncAttrs;
1821   llvm::AttrBuilder RetAttrs;
1822 
1823   CallingConv = FI.getEffectiveCallingConvention();
1824   if (FI.isNoReturn())
1825     FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1826 
1827   // If we have information about the function prototype, we can learn
1828   // attributes from there.
1829   AddAttributesFromFunctionProtoType(getContext(), FuncAttrs,
1830                                      CalleeInfo.getCalleeFunctionProtoType());
1831 
1832   const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl();
1833 
1834   bool HasOptnone = false;
1835   // FIXME: handle sseregparm someday...
1836   if (TargetDecl) {
1837     if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
1838       FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
1839     if (TargetDecl->hasAttr<NoThrowAttr>())
1840       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1841     if (TargetDecl->hasAttr<NoReturnAttr>())
1842       FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1843     if (TargetDecl->hasAttr<ColdAttr>())
1844       FuncAttrs.addAttribute(llvm::Attribute::Cold);
1845     if (TargetDecl->hasAttr<NoDuplicateAttr>())
1846       FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
1847     if (TargetDecl->hasAttr<ConvergentAttr>())
1848       FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1849 
1850     if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1851       AddAttributesFromFunctionProtoType(
1852           getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
1853       // Don't use [[noreturn]] or _Noreturn for a call to a virtual function.
1854       // These attributes are not inherited by overloads.
1855       const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
1856       if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual()))
1857         FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1858     }
1859 
1860     // 'const', 'pure' and 'noalias' attributed functions are also nounwind.
1861     if (TargetDecl->hasAttr<ConstAttr>()) {
1862       FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
1863       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1864     } else if (TargetDecl->hasAttr<PureAttr>()) {
1865       FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
1866       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1867     } else if (TargetDecl->hasAttr<NoAliasAttr>()) {
1868       FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly);
1869       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1870     }
1871     if (TargetDecl->hasAttr<RestrictAttr>())
1872       RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1873     if (TargetDecl->hasAttr<ReturnsNonNullAttr>() &&
1874         !CodeGenOpts.NullPointerIsValid)
1875       RetAttrs.addAttribute(llvm::Attribute::NonNull);
1876     if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>())
1877       FuncAttrs.addAttribute("no_caller_saved_registers");
1878     if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>())
1879       FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
1880 
1881     HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
1882     if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) {
1883       Optional<unsigned> NumElemsParam;
1884       if (AllocSize->getNumElemsParam().isValid())
1885         NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
1886       FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
1887                                  NumElemsParam);
1888     }
1889   }
1890 
1891   ConstructDefaultFnAttrList(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
1892 
1893   // This must run after constructing the default function attribute list
1894   // to ensure that the speculative load hardening attribute is removed
1895   // in the case where the -mspeculative-load-hardening flag was passed.
1896   if (TargetDecl) {
1897     if (TargetDecl->hasAttr<NoSpeculativeLoadHardeningAttr>())
1898       FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening);
1899     if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>())
1900       FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
1901   }
1902 
1903   if (CodeGenOpts.EnableSegmentedStacks &&
1904       !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>()))
1905     FuncAttrs.addAttribute("split-stack");
1906 
1907   // Add NonLazyBind attribute to function declarations when -fno-plt
1908   // is used.
1909   if (TargetDecl && CodeGenOpts.NoPLT) {
1910     if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1911       if (!Fn->isDefined() && !AttrOnCallSite) {
1912         FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
1913       }
1914     }
1915   }
1916 
1917   if (TargetDecl && TargetDecl->hasAttr<OpenCLKernelAttr>()) {
1918     if (getLangOpts().OpenCLVersion <= 120) {
1919       // OpenCL v1.2 Work groups are always uniform
1920       FuncAttrs.addAttribute("uniform-work-group-size", "true");
1921     } else {
1922       // OpenCL v2.0 Work groups may be whether uniform or not.
1923       // '-cl-uniform-work-group-size' compile option gets a hint
1924       // to the compiler that the global work-size be a multiple of
1925       // the work-group size specified to clEnqueueNDRangeKernel
1926       // (i.e. work groups are uniform).
1927       FuncAttrs.addAttribute("uniform-work-group-size",
1928                              llvm::toStringRef(CodeGenOpts.UniformWGSize));
1929     }
1930   }
1931 
1932   if (!AttrOnCallSite) {
1933     bool DisableTailCalls = false;
1934 
1935     if (CodeGenOpts.DisableTailCalls)
1936       DisableTailCalls = true;
1937     else if (TargetDecl) {
1938       if (TargetDecl->hasAttr<DisableTailCallsAttr>() ||
1939           TargetDecl->hasAttr<AnyX86InterruptAttr>())
1940         DisableTailCalls = true;
1941       else if (CodeGenOpts.NoEscapingBlockTailCalls) {
1942         if (const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
1943           if (!BD->doesNotEscape())
1944             DisableTailCalls = true;
1945       }
1946     }
1947 
1948     FuncAttrs.addAttribute("disable-tail-calls",
1949                            llvm::toStringRef(DisableTailCalls));
1950     GetCPUAndFeaturesAttributes(CalleeInfo.getCalleeDecl(), FuncAttrs);
1951   }
1952 
1953   ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
1954 
1955   QualType RetTy = FI.getReturnType();
1956   const ABIArgInfo &RetAI = FI.getReturnInfo();
1957   switch (RetAI.getKind()) {
1958   case ABIArgInfo::Extend:
1959     if (RetAI.isSignExt())
1960       RetAttrs.addAttribute(llvm::Attribute::SExt);
1961     else
1962       RetAttrs.addAttribute(llvm::Attribute::ZExt);
1963     LLVM_FALLTHROUGH;
1964   case ABIArgInfo::Direct:
1965     if (RetAI.getInReg())
1966       RetAttrs.addAttribute(llvm::Attribute::InReg);
1967     break;
1968   case ABIArgInfo::Ignore:
1969     break;
1970 
1971   case ABIArgInfo::InAlloca:
1972   case ABIArgInfo::Indirect: {
1973     // inalloca and sret disable readnone and readonly
1974     FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1975       .removeAttribute(llvm::Attribute::ReadNone);
1976     break;
1977   }
1978 
1979   case ABIArgInfo::CoerceAndExpand:
1980     break;
1981 
1982   case ABIArgInfo::Expand:
1983     llvm_unreachable("Invalid ABI kind for return argument");
1984   }
1985 
1986   if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
1987     QualType PTy = RefTy->getPointeeType();
1988     if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
1989       RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
1990                                         .getQuantity());
1991     else if (getContext().getTargetAddressSpace(PTy) == 0 &&
1992              !CodeGenOpts.NullPointerIsValid)
1993       RetAttrs.addAttribute(llvm::Attribute::NonNull);
1994   }
1995 
1996   bool hasUsedSRet = false;
1997   SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs());
1998 
1999   // Attach attributes to sret.
2000   if (IRFunctionArgs.hasSRetArg()) {
2001     llvm::AttrBuilder SRETAttrs;
2002     if (!RetAI.getSuppressSRet())
2003       SRETAttrs.addAttribute(llvm::Attribute::StructRet);
2004     hasUsedSRet = true;
2005     if (RetAI.getInReg())
2006       SRETAttrs.addAttribute(llvm::Attribute::InReg);
2007     ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
2008         llvm::AttributeSet::get(getLLVMContext(), SRETAttrs);
2009   }
2010 
2011   // Attach attributes to inalloca argument.
2012   if (IRFunctionArgs.hasInallocaArg()) {
2013     llvm::AttrBuilder Attrs;
2014     Attrs.addAttribute(llvm::Attribute::InAlloca);
2015     ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
2016         llvm::AttributeSet::get(getLLVMContext(), Attrs);
2017   }
2018 
2019   unsigned ArgNo = 0;
2020   for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(),
2021                                           E = FI.arg_end();
2022        I != E; ++I, ++ArgNo) {
2023     QualType ParamType = I->type;
2024     const ABIArgInfo &AI = I->info;
2025     llvm::AttrBuilder Attrs;
2026 
2027     // Add attribute for padding argument, if necessary.
2028     if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
2029       if (AI.getPaddingInReg()) {
2030         ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
2031             llvm::AttributeSet::get(
2032                 getLLVMContext(),
2033                 llvm::AttrBuilder().addAttribute(llvm::Attribute::InReg));
2034       }
2035     }
2036 
2037     // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
2038     // have the corresponding parameter variable.  It doesn't make
2039     // sense to do it here because parameters are so messed up.
2040     switch (AI.getKind()) {
2041     case ABIArgInfo::Extend:
2042       if (AI.isSignExt())
2043         Attrs.addAttribute(llvm::Attribute::SExt);
2044       else
2045         Attrs.addAttribute(llvm::Attribute::ZExt);
2046       LLVM_FALLTHROUGH;
2047     case ABIArgInfo::Direct:
2048       if (ArgNo == 0 && FI.isChainCall())
2049         Attrs.addAttribute(llvm::Attribute::Nest);
2050       else if (AI.getInReg())
2051         Attrs.addAttribute(llvm::Attribute::InReg);
2052       break;
2053 
2054     case ABIArgInfo::Indirect: {
2055       if (AI.getInReg())
2056         Attrs.addAttribute(llvm::Attribute::InReg);
2057 
2058       if (AI.getIndirectByVal())
2059         Attrs.addAttribute(llvm::Attribute::ByVal);
2060 
2061       CharUnits Align = AI.getIndirectAlign();
2062 
2063       // In a byval argument, it is important that the required
2064       // alignment of the type is honored, as LLVM might be creating a
2065       // *new* stack object, and needs to know what alignment to give
2066       // it. (Sometimes it can deduce a sensible alignment on its own,
2067       // but not if clang decides it must emit a packed struct, or the
2068       // user specifies increased alignment requirements.)
2069       //
2070       // This is different from indirect *not* byval, where the object
2071       // exists already, and the align attribute is purely
2072       // informative.
2073       assert(!Align.isZero());
2074 
2075       // For now, only add this when we have a byval argument.
2076       // TODO: be less lazy about updating test cases.
2077       if (AI.getIndirectByVal())
2078         Attrs.addAlignmentAttr(Align.getQuantity());
2079 
2080       // byval disables readnone and readonly.
2081       FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2082         .removeAttribute(llvm::Attribute::ReadNone);
2083       break;
2084     }
2085     case ABIArgInfo::Ignore:
2086     case ABIArgInfo::Expand:
2087     case ABIArgInfo::CoerceAndExpand:
2088       break;
2089 
2090     case ABIArgInfo::InAlloca:
2091       // inalloca disables readnone and readonly.
2092       FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2093           .removeAttribute(llvm::Attribute::ReadNone);
2094       continue;
2095     }
2096 
2097     if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
2098       QualType PTy = RefTy->getPointeeType();
2099       if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2100         Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
2101                                        .getQuantity());
2102       else if (getContext().getTargetAddressSpace(PTy) == 0 &&
2103                !CodeGenOpts.NullPointerIsValid)
2104         Attrs.addAttribute(llvm::Attribute::NonNull);
2105     }
2106 
2107     switch (FI.getExtParameterInfo(ArgNo).getABI()) {
2108     case ParameterABI::Ordinary:
2109       break;
2110 
2111     case ParameterABI::SwiftIndirectResult: {
2112       // Add 'sret' if we haven't already used it for something, but
2113       // only if the result is void.
2114       if (!hasUsedSRet && RetTy->isVoidType()) {
2115         Attrs.addAttribute(llvm::Attribute::StructRet);
2116         hasUsedSRet = true;
2117       }
2118 
2119       // Add 'noalias' in either case.
2120       Attrs.addAttribute(llvm::Attribute::NoAlias);
2121 
2122       // Add 'dereferenceable' and 'alignment'.
2123       auto PTy = ParamType->getPointeeType();
2124       if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2125         auto info = getContext().getTypeInfoInChars(PTy);
2126         Attrs.addDereferenceableAttr(info.first.getQuantity());
2127         Attrs.addAttribute(llvm::Attribute::getWithAlignment(getLLVMContext(),
2128                                                  info.second.getQuantity()));
2129       }
2130       break;
2131     }
2132 
2133     case ParameterABI::SwiftErrorResult:
2134       Attrs.addAttribute(llvm::Attribute::SwiftError);
2135       break;
2136 
2137     case ParameterABI::SwiftContext:
2138       Attrs.addAttribute(llvm::Attribute::SwiftSelf);
2139       break;
2140     }
2141 
2142     if (FI.getExtParameterInfo(ArgNo).isNoEscape())
2143       Attrs.addAttribute(llvm::Attribute::NoCapture);
2144 
2145     if (Attrs.hasAttributes()) {
2146       unsigned FirstIRArg, NumIRArgs;
2147       std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2148       for (unsigned i = 0; i < NumIRArgs; i++)
2149         ArgAttrs[FirstIRArg + i] =
2150             llvm::AttributeSet::get(getLLVMContext(), Attrs);
2151     }
2152   }
2153   assert(ArgNo == FI.arg_size());
2154 
2155   AttrList = llvm::AttributeList::get(
2156       getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs),
2157       llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs);
2158 }
2159 
2160 /// An argument came in as a promoted argument; demote it back to its
2161 /// declared type.
2162 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
2163                                          const VarDecl *var,
2164                                          llvm::Value *value) {
2165   llvm::Type *varType = CGF.ConvertType(var->getType());
2166 
2167   // This can happen with promotions that actually don't change the
2168   // underlying type, like the enum promotions.
2169   if (value->getType() == varType) return value;
2170 
2171   assert((varType->isIntegerTy() || varType->isFloatingPointTy())
2172          && "unexpected promotion type");
2173 
2174   if (isa<llvm::IntegerType>(varType))
2175     return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
2176 
2177   return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
2178 }
2179 
2180 /// Returns the attribute (either parameter attribute, or function
2181 /// attribute), which declares argument ArgNo to be non-null.
2182 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
2183                                          QualType ArgType, unsigned ArgNo) {
2184   // FIXME: __attribute__((nonnull)) can also be applied to:
2185   //   - references to pointers, where the pointee is known to be
2186   //     nonnull (apparently a Clang extension)
2187   //   - transparent unions containing pointers
2188   // In the former case, LLVM IR cannot represent the constraint. In
2189   // the latter case, we have no guarantee that the transparent union
2190   // is in fact passed as a pointer.
2191   if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
2192     return nullptr;
2193   // First, check attribute on parameter itself.
2194   if (PVD) {
2195     if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
2196       return ParmNNAttr;
2197   }
2198   // Check function attributes.
2199   if (!FD)
2200     return nullptr;
2201   for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
2202     if (NNAttr->isNonNull(ArgNo))
2203       return NNAttr;
2204   }
2205   return nullptr;
2206 }
2207 
2208 namespace {
2209   struct CopyBackSwiftError final : EHScopeStack::Cleanup {
2210     Address Temp;
2211     Address Arg;
2212     CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {}
2213     void Emit(CodeGenFunction &CGF, Flags flags) override {
2214       llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp);
2215       CGF.Builder.CreateStore(errorValue, Arg);
2216     }
2217   };
2218 }
2219 
2220 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
2221                                          llvm::Function *Fn,
2222                                          const FunctionArgList &Args) {
2223   if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
2224     // Naked functions don't have prologues.
2225     return;
2226 
2227   // If this is an implicit-return-zero function, go ahead and
2228   // initialize the return value.  TODO: it might be nice to have
2229   // a more general mechanism for this that didn't require synthesized
2230   // return statements.
2231   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
2232     if (FD->hasImplicitReturnZero()) {
2233       QualType RetTy = FD->getReturnType().getUnqualifiedType();
2234       llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
2235       llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
2236       Builder.CreateStore(Zero, ReturnValue);
2237     }
2238   }
2239 
2240   // FIXME: We no longer need the types from FunctionArgList; lift up and
2241   // simplify.
2242 
2243   ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
2244   // Flattened function arguments.
2245   SmallVector<llvm::Value *, 16> FnArgs;
2246   FnArgs.reserve(IRFunctionArgs.totalIRArgs());
2247   for (auto &Arg : Fn->args()) {
2248     FnArgs.push_back(&Arg);
2249   }
2250   assert(FnArgs.size() == IRFunctionArgs.totalIRArgs());
2251 
2252   // If we're using inalloca, all the memory arguments are GEPs off of the last
2253   // parameter, which is a pointer to the complete memory area.
2254   Address ArgStruct = Address::invalid();
2255   if (IRFunctionArgs.hasInallocaArg()) {
2256     ArgStruct = Address(FnArgs[IRFunctionArgs.getInallocaArgNo()],
2257                         FI.getArgStructAlignment());
2258 
2259     assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo());
2260   }
2261 
2262   // Name the struct return parameter.
2263   if (IRFunctionArgs.hasSRetArg()) {
2264     auto AI = cast<llvm::Argument>(FnArgs[IRFunctionArgs.getSRetArgNo()]);
2265     AI->setName("agg.result");
2266     AI->addAttr(llvm::Attribute::NoAlias);
2267   }
2268 
2269   // Track if we received the parameter as a pointer (indirect, byval, or
2270   // inalloca).  If already have a pointer, EmitParmDecl doesn't need to copy it
2271   // into a local alloca for us.
2272   SmallVector<ParamValue, 16> ArgVals;
2273   ArgVals.reserve(Args.size());
2274 
2275   // Create a pointer value for every parameter declaration.  This usually
2276   // entails copying one or more LLVM IR arguments into an alloca.  Don't push
2277   // any cleanups or do anything that might unwind.  We do that separately, so
2278   // we can push the cleanups in the correct order for the ABI.
2279   assert(FI.arg_size() == Args.size() &&
2280          "Mismatch between function signature & arguments.");
2281   unsigned ArgNo = 0;
2282   CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
2283   for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
2284        i != e; ++i, ++info_it, ++ArgNo) {
2285     const VarDecl *Arg = *i;
2286     const ABIArgInfo &ArgI = info_it->info;
2287 
2288     bool isPromoted =
2289       isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
2290     // We are converting from ABIArgInfo type to VarDecl type directly, unless
2291     // the parameter is promoted. In this case we convert to
2292     // CGFunctionInfo::ArgInfo type with subsequent argument demotion.
2293     QualType Ty = isPromoted ? info_it->type : Arg->getType();
2294     assert(hasScalarEvaluationKind(Ty) ==
2295            hasScalarEvaluationKind(Arg->getType()));
2296 
2297     unsigned FirstIRArg, NumIRArgs;
2298     std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2299 
2300     switch (ArgI.getKind()) {
2301     case ABIArgInfo::InAlloca: {
2302       assert(NumIRArgs == 0);
2303       auto FieldIndex = ArgI.getInAllocaFieldIndex();
2304       Address V =
2305           Builder.CreateStructGEP(ArgStruct, FieldIndex, Arg->getName());
2306       ArgVals.push_back(ParamValue::forIndirect(V));
2307       break;
2308     }
2309 
2310     case ABIArgInfo::Indirect: {
2311       assert(NumIRArgs == 1);
2312       Address ParamAddr = Address(FnArgs[FirstIRArg], ArgI.getIndirectAlign());
2313 
2314       if (!hasScalarEvaluationKind(Ty)) {
2315         // Aggregates and complex variables are accessed by reference.  All we
2316         // need to do is realign the value, if requested.
2317         Address V = ParamAddr;
2318         if (ArgI.getIndirectRealign()) {
2319           Address AlignedTemp = CreateMemTemp(Ty, "coerce");
2320 
2321           // Copy from the incoming argument pointer to the temporary with the
2322           // appropriate alignment.
2323           //
2324           // FIXME: We should have a common utility for generating an aggregate
2325           // copy.
2326           CharUnits Size = getContext().getTypeSizeInChars(Ty);
2327           auto SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity());
2328           Address Dst = Builder.CreateBitCast(AlignedTemp, Int8PtrTy);
2329           Address Src = Builder.CreateBitCast(ParamAddr, Int8PtrTy);
2330           Builder.CreateMemCpy(Dst, Src, SizeVal, false);
2331           V = AlignedTemp;
2332         }
2333         ArgVals.push_back(ParamValue::forIndirect(V));
2334       } else {
2335         // Load scalar value from indirect argument.
2336         llvm::Value *V =
2337             EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getBeginLoc());
2338 
2339         if (isPromoted)
2340           V = emitArgumentDemotion(*this, Arg, V);
2341         ArgVals.push_back(ParamValue::forDirect(V));
2342       }
2343       break;
2344     }
2345 
2346     case ABIArgInfo::Extend:
2347     case ABIArgInfo::Direct: {
2348 
2349       // If we have the trivial case, handle it with no muss and fuss.
2350       if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
2351           ArgI.getCoerceToType() == ConvertType(Ty) &&
2352           ArgI.getDirectOffset() == 0) {
2353         assert(NumIRArgs == 1);
2354         llvm::Value *V = FnArgs[FirstIRArg];
2355         auto AI = cast<llvm::Argument>(V);
2356 
2357         if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
2358           if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
2359                              PVD->getFunctionScopeIndex()) &&
2360               !CGM.getCodeGenOpts().NullPointerIsValid)
2361             AI->addAttr(llvm::Attribute::NonNull);
2362 
2363           QualType OTy = PVD->getOriginalType();
2364           if (const auto *ArrTy =
2365               getContext().getAsConstantArrayType(OTy)) {
2366             // A C99 array parameter declaration with the static keyword also
2367             // indicates dereferenceability, and if the size is constant we can
2368             // use the dereferenceable attribute (which requires the size in
2369             // bytes).
2370             if (ArrTy->getSizeModifier() == ArrayType::Static) {
2371               QualType ETy = ArrTy->getElementType();
2372               uint64_t ArrSize = ArrTy->getSize().getZExtValue();
2373               if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
2374                   ArrSize) {
2375                 llvm::AttrBuilder Attrs;
2376                 Attrs.addDereferenceableAttr(
2377                   getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize);
2378                 AI->addAttrs(Attrs);
2379               } else if (getContext().getTargetAddressSpace(ETy) == 0 &&
2380                          !CGM.getCodeGenOpts().NullPointerIsValid) {
2381                 AI->addAttr(llvm::Attribute::NonNull);
2382               }
2383             }
2384           } else if (const auto *ArrTy =
2385                      getContext().getAsVariableArrayType(OTy)) {
2386             // For C99 VLAs with the static keyword, we don't know the size so
2387             // we can't use the dereferenceable attribute, but in addrspace(0)
2388             // we know that it must be nonnull.
2389             if (ArrTy->getSizeModifier() == VariableArrayType::Static &&
2390                 !getContext().getTargetAddressSpace(ArrTy->getElementType()) &&
2391                 !CGM.getCodeGenOpts().NullPointerIsValid)
2392               AI->addAttr(llvm::Attribute::NonNull);
2393           }
2394 
2395           const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
2396           if (!AVAttr)
2397             if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
2398               AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
2399           if (AVAttr && !SanOpts.has(SanitizerKind::Alignment)) {
2400             // If alignment-assumption sanitizer is enabled, we do *not* add
2401             // alignment attribute here, but emit normal alignment assumption,
2402             // so the UBSAN check could function.
2403             llvm::Value *AlignmentValue =
2404               EmitScalarExpr(AVAttr->getAlignment());
2405             llvm::ConstantInt *AlignmentCI =
2406               cast<llvm::ConstantInt>(AlignmentValue);
2407             unsigned Alignment = std::min((unsigned)AlignmentCI->getZExtValue(),
2408                                           +llvm::Value::MaximumAlignment);
2409             AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment));
2410           }
2411         }
2412 
2413         if (Arg->getType().isRestrictQualified())
2414           AI->addAttr(llvm::Attribute::NoAlias);
2415 
2416         // LLVM expects swifterror parameters to be used in very restricted
2417         // ways.  Copy the value into a less-restricted temporary.
2418         if (FI.getExtParameterInfo(ArgNo).getABI()
2419               == ParameterABI::SwiftErrorResult) {
2420           QualType pointeeTy = Ty->getPointeeType();
2421           assert(pointeeTy->isPointerType());
2422           Address temp =
2423             CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
2424           Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy));
2425           llvm::Value *incomingErrorValue = Builder.CreateLoad(arg);
2426           Builder.CreateStore(incomingErrorValue, temp);
2427           V = temp.getPointer();
2428 
2429           // Push a cleanup to copy the value back at the end of the function.
2430           // The convention does not guarantee that the value will be written
2431           // back if the function exits with an unwind exception.
2432           EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg);
2433         }
2434 
2435         // Ensure the argument is the correct type.
2436         if (V->getType() != ArgI.getCoerceToType())
2437           V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
2438 
2439         if (isPromoted)
2440           V = emitArgumentDemotion(*this, Arg, V);
2441 
2442         // Because of merging of function types from multiple decls it is
2443         // possible for the type of an argument to not match the corresponding
2444         // type in the function type. Since we are codegening the callee
2445         // in here, add a cast to the argument type.
2446         llvm::Type *LTy = ConvertType(Arg->getType());
2447         if (V->getType() != LTy)
2448           V = Builder.CreateBitCast(V, LTy);
2449 
2450         ArgVals.push_back(ParamValue::forDirect(V));
2451         break;
2452       }
2453 
2454       Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
2455                                      Arg->getName());
2456 
2457       // Pointer to store into.
2458       Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
2459 
2460       // Fast-isel and the optimizer generally like scalar values better than
2461       // FCAs, so we flatten them if this is safe to do for this argument.
2462       llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
2463       if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
2464           STy->getNumElements() > 1) {
2465         uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
2466         llvm::Type *DstTy = Ptr.getElementType();
2467         uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
2468 
2469         Address AddrToStoreInto = Address::invalid();
2470         if (SrcSize <= DstSize) {
2471           AddrToStoreInto = Builder.CreateElementBitCast(Ptr, STy);
2472         } else {
2473           AddrToStoreInto =
2474             CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
2475         }
2476 
2477         assert(STy->getNumElements() == NumIRArgs);
2478         for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2479           auto AI = FnArgs[FirstIRArg + i];
2480           AI->setName(Arg->getName() + ".coerce" + Twine(i));
2481           Address EltPtr = Builder.CreateStructGEP(AddrToStoreInto, i);
2482           Builder.CreateStore(AI, EltPtr);
2483         }
2484 
2485         if (SrcSize > DstSize) {
2486           Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
2487         }
2488 
2489       } else {
2490         // Simple case, just do a coerced store of the argument into the alloca.
2491         assert(NumIRArgs == 1);
2492         auto AI = FnArgs[FirstIRArg];
2493         AI->setName(Arg->getName() + ".coerce");
2494         CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, *this);
2495       }
2496 
2497       // Match to what EmitParmDecl is expecting for this type.
2498       if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
2499         llvm::Value *V =
2500             EmitLoadOfScalar(Alloca, false, Ty, Arg->getBeginLoc());
2501         if (isPromoted)
2502           V = emitArgumentDemotion(*this, Arg, V);
2503         ArgVals.push_back(ParamValue::forDirect(V));
2504       } else {
2505         ArgVals.push_back(ParamValue::forIndirect(Alloca));
2506       }
2507       break;
2508     }
2509 
2510     case ABIArgInfo::CoerceAndExpand: {
2511       // Reconstruct into a temporary.
2512       Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2513       ArgVals.push_back(ParamValue::forIndirect(alloca));
2514 
2515       auto coercionType = ArgI.getCoerceAndExpandType();
2516       alloca = Builder.CreateElementBitCast(alloca, coercionType);
2517 
2518       unsigned argIndex = FirstIRArg;
2519       for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2520         llvm::Type *eltType = coercionType->getElementType(i);
2521         if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType))
2522           continue;
2523 
2524         auto eltAddr = Builder.CreateStructGEP(alloca, i);
2525         auto elt = FnArgs[argIndex++];
2526         Builder.CreateStore(elt, eltAddr);
2527       }
2528       assert(argIndex == FirstIRArg + NumIRArgs);
2529       break;
2530     }
2531 
2532     case ABIArgInfo::Expand: {
2533       // If this structure was expanded into multiple arguments then
2534       // we need to create a temporary and reconstruct it from the
2535       // arguments.
2536       Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2537       LValue LV = MakeAddrLValue(Alloca, Ty);
2538       ArgVals.push_back(ParamValue::forIndirect(Alloca));
2539 
2540       auto FnArgIter = FnArgs.begin() + FirstIRArg;
2541       ExpandTypeFromArgs(Ty, LV, FnArgIter);
2542       assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs);
2543       for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
2544         auto AI = FnArgs[FirstIRArg + i];
2545         AI->setName(Arg->getName() + "." + Twine(i));
2546       }
2547       break;
2548     }
2549 
2550     case ABIArgInfo::Ignore:
2551       assert(NumIRArgs == 0);
2552       // Initialize the local variable appropriately.
2553       if (!hasScalarEvaluationKind(Ty)) {
2554         ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty)));
2555       } else {
2556         llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
2557         ArgVals.push_back(ParamValue::forDirect(U));
2558       }
2559       break;
2560     }
2561   }
2562 
2563   if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2564     for (int I = Args.size() - 1; I >= 0; --I)
2565       EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2566   } else {
2567     for (unsigned I = 0, E = Args.size(); I != E; ++I)
2568       EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2569   }
2570 }
2571 
2572 static void eraseUnusedBitCasts(llvm::Instruction *insn) {
2573   while (insn->use_empty()) {
2574     llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
2575     if (!bitcast) return;
2576 
2577     // This is "safe" because we would have used a ConstantExpr otherwise.
2578     insn = cast<llvm::Instruction>(bitcast->getOperand(0));
2579     bitcast->eraseFromParent();
2580   }
2581 }
2582 
2583 /// Try to emit a fused autorelease of a return result.
2584 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
2585                                                     llvm::Value *result) {
2586   // We must be immediately followed the cast.
2587   llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
2588   if (BB->empty()) return nullptr;
2589   if (&BB->back() != result) return nullptr;
2590 
2591   llvm::Type *resultType = result->getType();
2592 
2593   // result is in a BasicBlock and is therefore an Instruction.
2594   llvm::Instruction *generator = cast<llvm::Instruction>(result);
2595 
2596   SmallVector<llvm::Instruction *, 4> InstsToKill;
2597 
2598   // Look for:
2599   //  %generator = bitcast %type1* %generator2 to %type2*
2600   while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
2601     // We would have emitted this as a constant if the operand weren't
2602     // an Instruction.
2603     generator = cast<llvm::Instruction>(bitcast->getOperand(0));
2604 
2605     // Require the generator to be immediately followed by the cast.
2606     if (generator->getNextNode() != bitcast)
2607       return nullptr;
2608 
2609     InstsToKill.push_back(bitcast);
2610   }
2611 
2612   // Look for:
2613   //   %generator = call i8* @objc_retain(i8* %originalResult)
2614   // or
2615   //   %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
2616   llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
2617   if (!call) return nullptr;
2618 
2619   bool doRetainAutorelease;
2620 
2621   if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints().objc_retain) {
2622     doRetainAutorelease = true;
2623   } else if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints()
2624                                           .objc_retainAutoreleasedReturnValue) {
2625     doRetainAutorelease = false;
2626 
2627     // If we emitted an assembly marker for this call (and the
2628     // ARCEntrypoints field should have been set if so), go looking
2629     // for that call.  If we can't find it, we can't do this
2630     // optimization.  But it should always be the immediately previous
2631     // instruction, unless we needed bitcasts around the call.
2632     if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) {
2633       llvm::Instruction *prev = call->getPrevNode();
2634       assert(prev);
2635       if (isa<llvm::BitCastInst>(prev)) {
2636         prev = prev->getPrevNode();
2637         assert(prev);
2638       }
2639       assert(isa<llvm::CallInst>(prev));
2640       assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
2641                CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker);
2642       InstsToKill.push_back(prev);
2643     }
2644   } else {
2645     return nullptr;
2646   }
2647 
2648   result = call->getArgOperand(0);
2649   InstsToKill.push_back(call);
2650 
2651   // Keep killing bitcasts, for sanity.  Note that we no longer care
2652   // about precise ordering as long as there's exactly one use.
2653   while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
2654     if (!bitcast->hasOneUse()) break;
2655     InstsToKill.push_back(bitcast);
2656     result = bitcast->getOperand(0);
2657   }
2658 
2659   // Delete all the unnecessary instructions, from latest to earliest.
2660   for (auto *I : InstsToKill)
2661     I->eraseFromParent();
2662 
2663   // Do the fused retain/autorelease if we were asked to.
2664   if (doRetainAutorelease)
2665     result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
2666 
2667   // Cast back to the result type.
2668   return CGF.Builder.CreateBitCast(result, resultType);
2669 }
2670 
2671 /// If this is a +1 of the value of an immutable 'self', remove it.
2672 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
2673                                           llvm::Value *result) {
2674   // This is only applicable to a method with an immutable 'self'.
2675   const ObjCMethodDecl *method =
2676     dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
2677   if (!method) return nullptr;
2678   const VarDecl *self = method->getSelfDecl();
2679   if (!self->getType().isConstQualified()) return nullptr;
2680 
2681   // Look for a retain call.
2682   llvm::CallInst *retainCall =
2683     dyn_cast<llvm::CallInst>(result->stripPointerCasts());
2684   if (!retainCall ||
2685       retainCall->getCalledValue() != CGF.CGM.getObjCEntrypoints().objc_retain)
2686     return nullptr;
2687 
2688   // Look for an ordinary load of 'self'.
2689   llvm::Value *retainedValue = retainCall->getArgOperand(0);
2690   llvm::LoadInst *load =
2691     dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
2692   if (!load || load->isAtomic() || load->isVolatile() ||
2693       load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
2694     return nullptr;
2695 
2696   // Okay!  Burn it all down.  This relies for correctness on the
2697   // assumption that the retain is emitted as part of the return and
2698   // that thereafter everything is used "linearly".
2699   llvm::Type *resultType = result->getType();
2700   eraseUnusedBitCasts(cast<llvm::Instruction>(result));
2701   assert(retainCall->use_empty());
2702   retainCall->eraseFromParent();
2703   eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
2704 
2705   return CGF.Builder.CreateBitCast(load, resultType);
2706 }
2707 
2708 /// Emit an ARC autorelease of the result of a function.
2709 ///
2710 /// \return the value to actually return from the function
2711 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
2712                                             llvm::Value *result) {
2713   // If we're returning 'self', kill the initial retain.  This is a
2714   // heuristic attempt to "encourage correctness" in the really unfortunate
2715   // case where we have a return of self during a dealloc and we desperately
2716   // need to avoid the possible autorelease.
2717   if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
2718     return self;
2719 
2720   // At -O0, try to emit a fused retain/autorelease.
2721   if (CGF.shouldUseFusedARCCalls())
2722     if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
2723       return fused;
2724 
2725   return CGF.EmitARCAutoreleaseReturnValue(result);
2726 }
2727 
2728 /// Heuristically search for a dominating store to the return-value slot.
2729 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
2730   // Check if a User is a store which pointerOperand is the ReturnValue.
2731   // We are looking for stores to the ReturnValue, not for stores of the
2732   // ReturnValue to some other location.
2733   auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
2734     auto *SI = dyn_cast<llvm::StoreInst>(U);
2735     if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer())
2736       return nullptr;
2737     // These aren't actually possible for non-coerced returns, and we
2738     // only care about non-coerced returns on this code path.
2739     assert(!SI->isAtomic() && !SI->isVolatile());
2740     return SI;
2741   };
2742   // If there are multiple uses of the return-value slot, just check
2743   // for something immediately preceding the IP.  Sometimes this can
2744   // happen with how we generate implicit-returns; it can also happen
2745   // with noreturn cleanups.
2746   if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
2747     llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2748     if (IP->empty()) return nullptr;
2749     llvm::Instruction *I = &IP->back();
2750 
2751     // Skip lifetime markers
2752     for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(),
2753                                             IE = IP->rend();
2754          II != IE; ++II) {
2755       if (llvm::IntrinsicInst *Intrinsic =
2756               dyn_cast<llvm::IntrinsicInst>(&*II)) {
2757         if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) {
2758           const llvm::Value *CastAddr = Intrinsic->getArgOperand(1);
2759           ++II;
2760           if (II == IE)
2761             break;
2762           if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II))
2763             continue;
2764         }
2765       }
2766       I = &*II;
2767       break;
2768     }
2769 
2770     return GetStoreIfValid(I);
2771   }
2772 
2773   llvm::StoreInst *store =
2774       GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
2775   if (!store) return nullptr;
2776 
2777   // Now do a first-and-dirty dominance check: just walk up the
2778   // single-predecessors chain from the current insertion point.
2779   llvm::BasicBlock *StoreBB = store->getParent();
2780   llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2781   while (IP != StoreBB) {
2782     if (!(IP = IP->getSinglePredecessor()))
2783       return nullptr;
2784   }
2785 
2786   // Okay, the store's basic block dominates the insertion point; we
2787   // can do our thing.
2788   return store;
2789 }
2790 
2791 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
2792                                          bool EmitRetDbgLoc,
2793                                          SourceLocation EndLoc) {
2794   if (FI.isNoReturn()) {
2795     // Noreturn functions don't return.
2796     EmitUnreachable(EndLoc);
2797     return;
2798   }
2799 
2800   if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
2801     // Naked functions don't have epilogues.
2802     Builder.CreateUnreachable();
2803     return;
2804   }
2805 
2806   // Functions with no result always return void.
2807   if (!ReturnValue.isValid()) {
2808     Builder.CreateRetVoid();
2809     return;
2810   }
2811 
2812   llvm::DebugLoc RetDbgLoc;
2813   llvm::Value *RV = nullptr;
2814   QualType RetTy = FI.getReturnType();
2815   const ABIArgInfo &RetAI = FI.getReturnInfo();
2816 
2817   switch (RetAI.getKind()) {
2818   case ABIArgInfo::InAlloca:
2819     // Aggregrates get evaluated directly into the destination.  Sometimes we
2820     // need to return the sret value in a register, though.
2821     assert(hasAggregateEvaluationKind(RetTy));
2822     if (RetAI.getInAllocaSRet()) {
2823       llvm::Function::arg_iterator EI = CurFn->arg_end();
2824       --EI;
2825       llvm::Value *ArgStruct = &*EI;
2826       llvm::Value *SRet = Builder.CreateStructGEP(
2827           nullptr, ArgStruct, RetAI.getInAllocaFieldIndex());
2828       RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret");
2829     }
2830     break;
2831 
2832   case ABIArgInfo::Indirect: {
2833     auto AI = CurFn->arg_begin();
2834     if (RetAI.isSRetAfterThis())
2835       ++AI;
2836     switch (getEvaluationKind(RetTy)) {
2837     case TEK_Complex: {
2838       ComplexPairTy RT =
2839         EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
2840       EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy),
2841                          /*isInit*/ true);
2842       break;
2843     }
2844     case TEK_Aggregate:
2845       // Do nothing; aggregrates get evaluated directly into the destination.
2846       break;
2847     case TEK_Scalar:
2848       EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
2849                         MakeNaturalAlignAddrLValue(&*AI, RetTy),
2850                         /*isInit*/ true);
2851       break;
2852     }
2853     break;
2854   }
2855 
2856   case ABIArgInfo::Extend:
2857   case ABIArgInfo::Direct:
2858     if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
2859         RetAI.getDirectOffset() == 0) {
2860       // The internal return value temp always will have pointer-to-return-type
2861       // type, just do a load.
2862 
2863       // If there is a dominating store to ReturnValue, we can elide
2864       // the load, zap the store, and usually zap the alloca.
2865       if (llvm::StoreInst *SI =
2866               findDominatingStoreToReturnValue(*this)) {
2867         // Reuse the debug location from the store unless there is
2868         // cleanup code to be emitted between the store and return
2869         // instruction.
2870         if (EmitRetDbgLoc && !AutoreleaseResult)
2871           RetDbgLoc = SI->getDebugLoc();
2872         // Get the stored value and nuke the now-dead store.
2873         RV = SI->getValueOperand();
2874         SI->eraseFromParent();
2875 
2876         // If that was the only use of the return value, nuke it as well now.
2877         auto returnValueInst = ReturnValue.getPointer();
2878         if (returnValueInst->use_empty()) {
2879           if (auto alloca = dyn_cast<llvm::AllocaInst>(returnValueInst)) {
2880             alloca->eraseFromParent();
2881             ReturnValue = Address::invalid();
2882           }
2883         }
2884 
2885       // Otherwise, we have to do a simple load.
2886       } else {
2887         RV = Builder.CreateLoad(ReturnValue);
2888       }
2889     } else {
2890       // If the value is offset in memory, apply the offset now.
2891       Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
2892 
2893       RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
2894     }
2895 
2896     // In ARC, end functions that return a retainable type with a call
2897     // to objc_autoreleaseReturnValue.
2898     if (AutoreleaseResult) {
2899 #ifndef NDEBUG
2900       // Type::isObjCRetainabletype has to be called on a QualType that hasn't
2901       // been stripped of the typedefs, so we cannot use RetTy here. Get the
2902       // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from
2903       // CurCodeDecl or BlockInfo.
2904       QualType RT;
2905 
2906       if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl))
2907         RT = FD->getReturnType();
2908       else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl))
2909         RT = MD->getReturnType();
2910       else if (isa<BlockDecl>(CurCodeDecl))
2911         RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType();
2912       else
2913         llvm_unreachable("Unexpected function/method type");
2914 
2915       assert(getLangOpts().ObjCAutoRefCount &&
2916              !FI.isReturnsRetained() &&
2917              RT->isObjCRetainableType());
2918 #endif
2919       RV = emitAutoreleaseOfResult(*this, RV);
2920     }
2921 
2922     break;
2923 
2924   case ABIArgInfo::Ignore:
2925     break;
2926 
2927   case ABIArgInfo::CoerceAndExpand: {
2928     auto coercionType = RetAI.getCoerceAndExpandType();
2929 
2930     // Load all of the coerced elements out into results.
2931     llvm::SmallVector<llvm::Value*, 4> results;
2932     Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType);
2933     for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2934       auto coercedEltType = coercionType->getElementType(i);
2935       if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType))
2936         continue;
2937 
2938       auto eltAddr = Builder.CreateStructGEP(addr, i);
2939       auto elt = Builder.CreateLoad(eltAddr);
2940       results.push_back(elt);
2941     }
2942 
2943     // If we have one result, it's the single direct result type.
2944     if (results.size() == 1) {
2945       RV = results[0];
2946 
2947     // Otherwise, we need to make a first-class aggregate.
2948     } else {
2949       // Construct a return type that lacks padding elements.
2950       llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType();
2951 
2952       RV = llvm::UndefValue::get(returnType);
2953       for (unsigned i = 0, e = results.size(); i != e; ++i) {
2954         RV = Builder.CreateInsertValue(RV, results[i], i);
2955       }
2956     }
2957     break;
2958   }
2959 
2960   case ABIArgInfo::Expand:
2961     llvm_unreachable("Invalid ABI kind for return argument");
2962   }
2963 
2964   llvm::Instruction *Ret;
2965   if (RV) {
2966     EmitReturnValueCheck(RV);
2967     Ret = Builder.CreateRet(RV);
2968   } else {
2969     Ret = Builder.CreateRetVoid();
2970   }
2971 
2972   if (RetDbgLoc)
2973     Ret->setDebugLoc(std::move(RetDbgLoc));
2974 }
2975 
2976 void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV) {
2977   // A current decl may not be available when emitting vtable thunks.
2978   if (!CurCodeDecl)
2979     return;
2980 
2981   ReturnsNonNullAttr *RetNNAttr = nullptr;
2982   if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute))
2983     RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>();
2984 
2985   if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
2986     return;
2987 
2988   // Prefer the returns_nonnull attribute if it's present.
2989   SourceLocation AttrLoc;
2990   SanitizerMask CheckKind;
2991   SanitizerHandler Handler;
2992   if (RetNNAttr) {
2993     assert(!requiresReturnValueNullabilityCheck() &&
2994            "Cannot check nullability and the nonnull attribute");
2995     AttrLoc = RetNNAttr->getLocation();
2996     CheckKind = SanitizerKind::ReturnsNonnullAttribute;
2997     Handler = SanitizerHandler::NonnullReturn;
2998   } else {
2999     if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl))
3000       if (auto *TSI = DD->getTypeSourceInfo())
3001         if (auto FTL = TSI->getTypeLoc().castAs<FunctionTypeLoc>())
3002           AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
3003     CheckKind = SanitizerKind::NullabilityReturn;
3004     Handler = SanitizerHandler::NullabilityReturn;
3005   }
3006 
3007   SanitizerScope SanScope(this);
3008 
3009   // Make sure the "return" source location is valid. If we're checking a
3010   // nullability annotation, make sure the preconditions for the check are met.
3011   llvm::BasicBlock *Check = createBasicBlock("nullcheck");
3012   llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck");
3013   llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load");
3014   llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr);
3015   if (requiresReturnValueNullabilityCheck())
3016     CanNullCheck =
3017         Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
3018   Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
3019   EmitBlock(Check);
3020 
3021   // Now do the null check.
3022   llvm::Value *Cond = Builder.CreateIsNotNull(RV);
3023   llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)};
3024   llvm::Value *DynamicData[] = {SLocPtr};
3025   EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData);
3026 
3027   EmitBlock(NoCheck);
3028 
3029 #ifndef NDEBUG
3030   // The return location should not be used after the check has been emitted.
3031   ReturnLocation = Address::invalid();
3032 #endif
3033 }
3034 
3035 static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) {
3036   const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
3037   return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
3038 }
3039 
3040 static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF,
3041                                           QualType Ty) {
3042   // FIXME: Generate IR in one pass, rather than going back and fixing up these
3043   // placeholders.
3044   llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
3045   llvm::Type *IRPtrTy = IRTy->getPointerTo();
3046   llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo());
3047 
3048   // FIXME: When we generate this IR in one pass, we shouldn't need
3049   // this win32-specific alignment hack.
3050   CharUnits Align = CharUnits::fromQuantity(4);
3051   Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align);
3052 
3053   return AggValueSlot::forAddr(Address(Placeholder, Align),
3054                                Ty.getQualifiers(),
3055                                AggValueSlot::IsNotDestructed,
3056                                AggValueSlot::DoesNotNeedGCBarriers,
3057                                AggValueSlot::IsNotAliased,
3058                                AggValueSlot::DoesNotOverlap);
3059 }
3060 
3061 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
3062                                           const VarDecl *param,
3063                                           SourceLocation loc) {
3064   // StartFunction converted the ABI-lowered parameter(s) into a
3065   // local alloca.  We need to turn that into an r-value suitable
3066   // for EmitCall.
3067   Address local = GetAddrOfLocalVar(param);
3068 
3069   QualType type = param->getType();
3070 
3071   if (isInAllocaArgument(CGM.getCXXABI(), type)) {
3072     CGM.ErrorUnsupported(param, "forwarded non-trivially copyable parameter");
3073   }
3074 
3075   // GetAddrOfLocalVar returns a pointer-to-pointer for references,
3076   // but the argument needs to be the original pointer.
3077   if (type->isReferenceType()) {
3078     args.add(RValue::get(Builder.CreateLoad(local)), type);
3079 
3080   // In ARC, move out of consumed arguments so that the release cleanup
3081   // entered by StartFunction doesn't cause an over-release.  This isn't
3082   // optimal -O0 code generation, but it should get cleaned up when
3083   // optimization is enabled.  This also assumes that delegate calls are
3084   // performed exactly once for a set of arguments, but that should be safe.
3085   } else if (getLangOpts().ObjCAutoRefCount &&
3086              param->hasAttr<NSConsumedAttr>() &&
3087              type->isObjCRetainableType()) {
3088     llvm::Value *ptr = Builder.CreateLoad(local);
3089     auto null =
3090       llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
3091     Builder.CreateStore(null, local);
3092     args.add(RValue::get(ptr), type);
3093 
3094   // For the most part, we just need to load the alloca, except that
3095   // aggregate r-values are actually pointers to temporaries.
3096   } else {
3097     args.add(convertTempToRValue(local, type, loc), type);
3098   }
3099 
3100   // Deactivate the cleanup for the callee-destructed param that was pushed.
3101   if (hasAggregateEvaluationKind(type) && !CurFuncIsThunk &&
3102       type->getAs<RecordType>()->getDecl()->isParamDestroyedInCallee() &&
3103       type.isDestructedType()) {
3104     EHScopeStack::stable_iterator cleanup =
3105         CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param));
3106     assert(cleanup.isValid() &&
3107            "cleanup for callee-destructed param not recorded");
3108     // This unreachable is a temporary marker which will be removed later.
3109     llvm::Instruction *isActive = Builder.CreateUnreachable();
3110     args.addArgCleanupDeactivation(cleanup, isActive);
3111   }
3112 }
3113 
3114 static bool isProvablyNull(llvm::Value *addr) {
3115   return isa<llvm::ConstantPointerNull>(addr);
3116 }
3117 
3118 /// Emit the actual writing-back of a writeback.
3119 static void emitWriteback(CodeGenFunction &CGF,
3120                           const CallArgList::Writeback &writeback) {
3121   const LValue &srcLV = writeback.Source;
3122   Address srcAddr = srcLV.getAddress();
3123   assert(!isProvablyNull(srcAddr.getPointer()) &&
3124          "shouldn't have writeback for provably null argument");
3125 
3126   llvm::BasicBlock *contBB = nullptr;
3127 
3128   // If the argument wasn't provably non-null, we need to null check
3129   // before doing the store.
3130   bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3131                                               CGF.CGM.getDataLayout());
3132   if (!provablyNonNull) {
3133     llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
3134     contBB = CGF.createBasicBlock("icr.done");
3135 
3136     llvm::Value *isNull =
3137       CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3138     CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
3139     CGF.EmitBlock(writebackBB);
3140   }
3141 
3142   // Load the value to writeback.
3143   llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
3144 
3145   // Cast it back, in case we're writing an id to a Foo* or something.
3146   value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(),
3147                                     "icr.writeback-cast");
3148 
3149   // Perform the writeback.
3150 
3151   // If we have a "to use" value, it's something we need to emit a use
3152   // of.  This has to be carefully threaded in: if it's done after the
3153   // release it's potentially undefined behavior (and the optimizer
3154   // will ignore it), and if it happens before the retain then the
3155   // optimizer could move the release there.
3156   if (writeback.ToUse) {
3157     assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
3158 
3159     // Retain the new value.  No need to block-copy here:  the block's
3160     // being passed up the stack.
3161     value = CGF.EmitARCRetainNonBlock(value);
3162 
3163     // Emit the intrinsic use here.
3164     CGF.EmitARCIntrinsicUse(writeback.ToUse);
3165 
3166     // Load the old value (primitively).
3167     llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
3168 
3169     // Put the new value in place (primitively).
3170     CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
3171 
3172     // Release the old value.
3173     CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
3174 
3175   // Otherwise, we can just do a normal lvalue store.
3176   } else {
3177     CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
3178   }
3179 
3180   // Jump to the continuation block.
3181   if (!provablyNonNull)
3182     CGF.EmitBlock(contBB);
3183 }
3184 
3185 static void emitWritebacks(CodeGenFunction &CGF,
3186                            const CallArgList &args) {
3187   for (const auto &I : args.writebacks())
3188     emitWriteback(CGF, I);
3189 }
3190 
3191 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
3192                                             const CallArgList &CallArgs) {
3193   ArrayRef<CallArgList::CallArgCleanup> Cleanups =
3194     CallArgs.getCleanupsToDeactivate();
3195   // Iterate in reverse to increase the likelihood of popping the cleanup.
3196   for (const auto &I : llvm::reverse(Cleanups)) {
3197     CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP);
3198     I.IsActiveIP->eraseFromParent();
3199   }
3200 }
3201 
3202 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
3203   if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
3204     if (uop->getOpcode() == UO_AddrOf)
3205       return uop->getSubExpr();
3206   return nullptr;
3207 }
3208 
3209 /// Emit an argument that's being passed call-by-writeback.  That is,
3210 /// we are passing the address of an __autoreleased temporary; it
3211 /// might be copy-initialized with the current value of the given
3212 /// address, but it will definitely be copied out of after the call.
3213 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
3214                              const ObjCIndirectCopyRestoreExpr *CRE) {
3215   LValue srcLV;
3216 
3217   // Make an optimistic effort to emit the address as an l-value.
3218   // This can fail if the argument expression is more complicated.
3219   if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
3220     srcLV = CGF.EmitLValue(lvExpr);
3221 
3222   // Otherwise, just emit it as a scalar.
3223   } else {
3224     Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
3225 
3226     QualType srcAddrType =
3227       CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
3228     srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
3229   }
3230   Address srcAddr = srcLV.getAddress();
3231 
3232   // The dest and src types don't necessarily match in LLVM terms
3233   // because of the crazy ObjC compatibility rules.
3234 
3235   llvm::PointerType *destType =
3236     cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
3237 
3238   // If the address is a constant null, just pass the appropriate null.
3239   if (isProvablyNull(srcAddr.getPointer())) {
3240     args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
3241              CRE->getType());
3242     return;
3243   }
3244 
3245   // Create the temporary.
3246   Address temp = CGF.CreateTempAlloca(destType->getElementType(),
3247                                       CGF.getPointerAlign(),
3248                                       "icr.temp");
3249   // Loading an l-value can introduce a cleanup if the l-value is __weak,
3250   // and that cleanup will be conditional if we can't prove that the l-value
3251   // isn't null, so we need to register a dominating point so that the cleanups
3252   // system will make valid IR.
3253   CodeGenFunction::ConditionalEvaluation condEval(CGF);
3254 
3255   // Zero-initialize it if we're not doing a copy-initialization.
3256   bool shouldCopy = CRE->shouldCopy();
3257   if (!shouldCopy) {
3258     llvm::Value *null =
3259       llvm::ConstantPointerNull::get(
3260         cast<llvm::PointerType>(destType->getElementType()));
3261     CGF.Builder.CreateStore(null, temp);
3262   }
3263 
3264   llvm::BasicBlock *contBB = nullptr;
3265   llvm::BasicBlock *originBB = nullptr;
3266 
3267   // If the address is *not* known to be non-null, we need to switch.
3268   llvm::Value *finalArgument;
3269 
3270   bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3271                                               CGF.CGM.getDataLayout());
3272   if (provablyNonNull) {
3273     finalArgument = temp.getPointer();
3274   } else {
3275     llvm::Value *isNull =
3276       CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3277 
3278     finalArgument = CGF.Builder.CreateSelect(isNull,
3279                                    llvm::ConstantPointerNull::get(destType),
3280                                              temp.getPointer(), "icr.argument");
3281 
3282     // If we need to copy, then the load has to be conditional, which
3283     // means we need control flow.
3284     if (shouldCopy) {
3285       originBB = CGF.Builder.GetInsertBlock();
3286       contBB = CGF.createBasicBlock("icr.cont");
3287       llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
3288       CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
3289       CGF.EmitBlock(copyBB);
3290       condEval.begin(CGF);
3291     }
3292   }
3293 
3294   llvm::Value *valueToUse = nullptr;
3295 
3296   // Perform a copy if necessary.
3297   if (shouldCopy) {
3298     RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
3299     assert(srcRV.isScalar());
3300 
3301     llvm::Value *src = srcRV.getScalarVal();
3302     src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
3303                                     "icr.cast");
3304 
3305     // Use an ordinary store, not a store-to-lvalue.
3306     CGF.Builder.CreateStore(src, temp);
3307 
3308     // If optimization is enabled, and the value was held in a
3309     // __strong variable, we need to tell the optimizer that this
3310     // value has to stay alive until we're doing the store back.
3311     // This is because the temporary is effectively unretained,
3312     // and so otherwise we can violate the high-level semantics.
3313     if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3314         srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) {
3315       valueToUse = src;
3316     }
3317   }
3318 
3319   // Finish the control flow if we needed it.
3320   if (shouldCopy && !provablyNonNull) {
3321     llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
3322     CGF.EmitBlock(contBB);
3323 
3324     // Make a phi for the value to intrinsically use.
3325     if (valueToUse) {
3326       llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
3327                                                       "icr.to-use");
3328       phiToUse->addIncoming(valueToUse, copyBB);
3329       phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
3330                             originBB);
3331       valueToUse = phiToUse;
3332     }
3333 
3334     condEval.end(CGF);
3335   }
3336 
3337   args.addWriteback(srcLV, temp, valueToUse);
3338   args.add(RValue::get(finalArgument), CRE->getType());
3339 }
3340 
3341 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
3342   assert(!StackBase);
3343 
3344   // Save the stack.
3345   llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
3346   StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
3347 }
3348 
3349 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
3350   if (StackBase) {
3351     // Restore the stack after the call.
3352     llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
3353     CGF.Builder.CreateCall(F, StackBase);
3354   }
3355 }
3356 
3357 void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
3358                                           SourceLocation ArgLoc,
3359                                           AbstractCallee AC,
3360                                           unsigned ParmNum) {
3361   if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) ||
3362                          SanOpts.has(SanitizerKind::NullabilityArg)))
3363     return;
3364 
3365   // The param decl may be missing in a variadic function.
3366   auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr;
3367   unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
3368 
3369   // Prefer the nonnull attribute if it's present.
3370   const NonNullAttr *NNAttr = nullptr;
3371   if (SanOpts.has(SanitizerKind::NonnullAttribute))
3372     NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo);
3373 
3374   bool CanCheckNullability = false;
3375   if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) {
3376     auto Nullability = PVD->getType()->getNullability(getContext());
3377     CanCheckNullability = Nullability &&
3378                           *Nullability == NullabilityKind::NonNull &&
3379                           PVD->getTypeSourceInfo();
3380   }
3381 
3382   if (!NNAttr && !CanCheckNullability)
3383     return;
3384 
3385   SourceLocation AttrLoc;
3386   SanitizerMask CheckKind;
3387   SanitizerHandler Handler;
3388   if (NNAttr) {
3389     AttrLoc = NNAttr->getLocation();
3390     CheckKind = SanitizerKind::NonnullAttribute;
3391     Handler = SanitizerHandler::NonnullArg;
3392   } else {
3393     AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
3394     CheckKind = SanitizerKind::NullabilityArg;
3395     Handler = SanitizerHandler::NullabilityArg;
3396   }
3397 
3398   SanitizerScope SanScope(this);
3399   assert(RV.isScalar());
3400   llvm::Value *V = RV.getScalarVal();
3401   llvm::Value *Cond =
3402       Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
3403   llvm::Constant *StaticData[] = {
3404       EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc),
3405       llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
3406   };
3407   EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None);
3408 }
3409 
3410 void CodeGenFunction::EmitCallArgs(
3411     CallArgList &Args, ArrayRef<QualType> ArgTypes,
3412     llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
3413     AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) {
3414   assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
3415 
3416   // We *have* to evaluate arguments from right to left in the MS C++ ABI,
3417   // because arguments are destroyed left to right in the callee. As a special
3418   // case, there are certain language constructs that require left-to-right
3419   // evaluation, and in those cases we consider the evaluation order requirement
3420   // to trump the "destruction order is reverse construction order" guarantee.
3421   bool LeftToRight =
3422       CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()
3423           ? Order == EvaluationOrder::ForceLeftToRight
3424           : Order != EvaluationOrder::ForceRightToLeft;
3425 
3426   auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg,
3427                                          RValue EmittedArg) {
3428     if (!AC.hasFunctionDecl() || I >= AC.getNumParams())
3429       return;
3430     auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>();
3431     if (PS == nullptr)
3432       return;
3433 
3434     const auto &Context = getContext();
3435     auto SizeTy = Context.getSizeType();
3436     auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
3437     assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?");
3438     llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T,
3439                                                      EmittedArg.getScalarVal(),
3440                                                      PS->isDynamic());
3441     Args.add(RValue::get(V), SizeTy);
3442     // If we're emitting args in reverse, be sure to do so with
3443     // pass_object_size, as well.
3444     if (!LeftToRight)
3445       std::swap(Args.back(), *(&Args.back() - 1));
3446   };
3447 
3448   // Insert a stack save if we're going to need any inalloca args.
3449   bool HasInAllocaArgs = false;
3450   if (CGM.getTarget().getCXXABI().isMicrosoft()) {
3451     for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end();
3452          I != E && !HasInAllocaArgs; ++I)
3453       HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I);
3454     if (HasInAllocaArgs) {
3455       assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3456       Args.allocateArgumentMemory(*this);
3457     }
3458   }
3459 
3460   // Evaluate each argument in the appropriate order.
3461   size_t CallArgsStart = Args.size();
3462   for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
3463     unsigned Idx = LeftToRight ? I : E - I - 1;
3464     CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx;
3465     unsigned InitialArgSize = Args.size();
3466     // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of
3467     // the argument and parameter match or the objc method is parameterized.
3468     assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) ||
3469             getContext().hasSameUnqualifiedType((*Arg)->getType(),
3470                                                 ArgTypes[Idx]) ||
3471             (isa<ObjCMethodDecl>(AC.getDecl()) &&
3472              isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) &&
3473            "Argument and parameter types don't match");
3474     EmitCallArg(Args, *Arg, ArgTypes[Idx]);
3475     // In particular, we depend on it being the last arg in Args, and the
3476     // objectsize bits depend on there only being one arg if !LeftToRight.
3477     assert(InitialArgSize + 1 == Args.size() &&
3478            "The code below depends on only adding one arg per EmitCallArg");
3479     (void)InitialArgSize;
3480     // Since pointer argument are never emitted as LValue, it is safe to emit
3481     // non-null argument check for r-value only.
3482     if (!Args.back().hasLValue()) {
3483       RValue RVArg = Args.back().getKnownRValue();
3484       EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC,
3485                           ParamsToSkip + Idx);
3486       // @llvm.objectsize should never have side-effects and shouldn't need
3487       // destruction/cleanups, so we can safely "emit" it after its arg,
3488       // regardless of right-to-leftness
3489       MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
3490     }
3491   }
3492 
3493   if (!LeftToRight) {
3494     // Un-reverse the arguments we just evaluated so they match up with the LLVM
3495     // IR function.
3496     std::reverse(Args.begin() + CallArgsStart, Args.end());
3497   }
3498 }
3499 
3500 namespace {
3501 
3502 struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
3503   DestroyUnpassedArg(Address Addr, QualType Ty)
3504       : Addr(Addr), Ty(Ty) {}
3505 
3506   Address Addr;
3507   QualType Ty;
3508 
3509   void Emit(CodeGenFunction &CGF, Flags flags) override {
3510     QualType::DestructionKind DtorKind = Ty.isDestructedType();
3511     if (DtorKind == QualType::DK_cxx_destructor) {
3512       const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
3513       assert(!Dtor->isTrivial());
3514       CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
3515                                 /*Delegating=*/false, Addr);
3516     } else {
3517       CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty));
3518     }
3519   }
3520 };
3521 
3522 struct DisableDebugLocationUpdates {
3523   CodeGenFunction &CGF;
3524   bool disabledDebugInfo;
3525   DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
3526     if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
3527       CGF.disableDebugInfo();
3528   }
3529   ~DisableDebugLocationUpdates() {
3530     if (disabledDebugInfo)
3531       CGF.enableDebugInfo();
3532   }
3533 };
3534 
3535 } // end anonymous namespace
3536 
3537 RValue CallArg::getRValue(CodeGenFunction &CGF) const {
3538   if (!HasLV)
3539     return RV;
3540   LValue Copy = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty), Ty);
3541   CGF.EmitAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap,
3542                         LV.isVolatile());
3543   IsUsed = true;
3544   return RValue::getAggregate(Copy.getAddress());
3545 }
3546 
3547 void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const {
3548   LValue Dst = CGF.MakeAddrLValue(Addr, Ty);
3549   if (!HasLV && RV.isScalar())
3550     CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*init=*/true);
3551   else if (!HasLV && RV.isComplex())
3552     CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true);
3553   else {
3554     auto Addr = HasLV ? LV.getAddress() : RV.getAggregateAddress();
3555     LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty);
3556     // We assume that call args are never copied into subobjects.
3557     CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap,
3558                           HasLV ? LV.isVolatileQualified()
3559                                 : RV.isVolatileQualified());
3560   }
3561   IsUsed = true;
3562 }
3563 
3564 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
3565                                   QualType type) {
3566   DisableDebugLocationUpdates Dis(*this, E);
3567   if (const ObjCIndirectCopyRestoreExpr *CRE
3568         = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
3569     assert(getLangOpts().ObjCAutoRefCount);
3570     return emitWritebackArg(*this, args, CRE);
3571   }
3572 
3573   assert(type->isReferenceType() == E->isGLValue() &&
3574          "reference binding to unmaterialized r-value!");
3575 
3576   if (E->isGLValue()) {
3577     assert(E->getObjectKind() == OK_Ordinary);
3578     return args.add(EmitReferenceBindingToExpr(E), type);
3579   }
3580 
3581   bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
3582 
3583   // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
3584   // However, we still have to push an EH-only cleanup in case we unwind before
3585   // we make it to the call.
3586   if (HasAggregateEvalKind &&
3587       type->getAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
3588     // If we're using inalloca, use the argument memory.  Otherwise, use a
3589     // temporary.
3590     AggValueSlot Slot;
3591     if (args.isUsingInAlloca())
3592       Slot = createPlaceholderSlot(*this, type);
3593     else
3594       Slot = CreateAggTemp(type, "agg.tmp");
3595 
3596     bool DestroyedInCallee = true, NeedsEHCleanup = true;
3597     if (const auto *RD = type->getAsCXXRecordDecl())
3598       DestroyedInCallee = RD->hasNonTrivialDestructor();
3599     else
3600       NeedsEHCleanup = needsEHCleanup(type.isDestructedType());
3601 
3602     if (DestroyedInCallee)
3603       Slot.setExternallyDestructed();
3604 
3605     EmitAggExpr(E, Slot);
3606     RValue RV = Slot.asRValue();
3607     args.add(RV, type);
3608 
3609     if (DestroyedInCallee && NeedsEHCleanup) {
3610       // Create a no-op GEP between the placeholder and the cleanup so we can
3611       // RAUW it successfully.  It also serves as a marker of the first
3612       // instruction where the cleanup is active.
3613       pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
3614                                               type);
3615       // This unreachable is a temporary marker which will be removed later.
3616       llvm::Instruction *IsActive = Builder.CreateUnreachable();
3617       args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
3618     }
3619     return;
3620   }
3621 
3622   if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
3623       cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
3624     LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
3625     assert(L.isSimple());
3626     args.addUncopiedAggregate(L, type);
3627     return;
3628   }
3629 
3630   args.add(EmitAnyExprToTemp(E), type);
3631 }
3632 
3633 QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
3634   // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
3635   // implicitly widens null pointer constants that are arguments to varargs
3636   // functions to pointer-sized ints.
3637   if (!getTarget().getTriple().isOSWindows())
3638     return Arg->getType();
3639 
3640   if (Arg->getType()->isIntegerType() &&
3641       getContext().getTypeSize(Arg->getType()) <
3642           getContext().getTargetInfo().getPointerWidth(0) &&
3643       Arg->isNullPointerConstant(getContext(),
3644                                  Expr::NPC_ValueDependentIsNotNull)) {
3645     return getContext().getIntPtrType();
3646   }
3647 
3648   return Arg->getType();
3649 }
3650 
3651 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3652 // optimizer it can aggressively ignore unwind edges.
3653 void
3654 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
3655   if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3656       !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
3657     Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
3658                       CGM.getNoObjCARCExceptionsMetadata());
3659 }
3660 
3661 /// Emits a call to the given no-arguments nounwind runtime function.
3662 llvm::CallInst *
3663 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
3664                                          const llvm::Twine &name) {
3665   return EmitNounwindRuntimeCall(callee, None, name);
3666 }
3667 
3668 /// Emits a call to the given nounwind runtime function.
3669 llvm::CallInst *
3670 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
3671                                          ArrayRef<llvm::Value *> args,
3672                                          const llvm::Twine &name) {
3673   llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
3674   call->setDoesNotThrow();
3675   return call;
3676 }
3677 
3678 /// Emits a simple call (never an invoke) to the given no-arguments
3679 /// runtime function.
3680 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
3681                                                  const llvm::Twine &name) {
3682   return EmitRuntimeCall(callee, None, name);
3683 }
3684 
3685 // Calls which may throw must have operand bundles indicating which funclet
3686 // they are nested within.
3687 SmallVector<llvm::OperandBundleDef, 1>
3688 CodeGenFunction::getBundlesForFunclet(llvm::Value *Callee) {
3689   SmallVector<llvm::OperandBundleDef, 1> BundleList;
3690   // There is no need for a funclet operand bundle if we aren't inside a
3691   // funclet.
3692   if (!CurrentFuncletPad)
3693     return BundleList;
3694 
3695   // Skip intrinsics which cannot throw.
3696   auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts());
3697   if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow())
3698     return BundleList;
3699 
3700   BundleList.emplace_back("funclet", CurrentFuncletPad);
3701   return BundleList;
3702 }
3703 
3704 /// Emits a simple call (never an invoke) to the given runtime function.
3705 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
3706                                                  ArrayRef<llvm::Value *> args,
3707                                                  const llvm::Twine &name) {
3708   llvm::CallInst *call = Builder.CreateCall(
3709       callee, args, getBundlesForFunclet(callee.getCallee()), name);
3710   call->setCallingConv(getRuntimeCC());
3711   return call;
3712 }
3713 
3714 /// Emits a call or invoke to the given noreturn runtime function.
3715 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(
3716     llvm::FunctionCallee callee, ArrayRef<llvm::Value *> args) {
3717   SmallVector<llvm::OperandBundleDef, 1> BundleList =
3718       getBundlesForFunclet(callee.getCallee());
3719 
3720   if (getInvokeDest()) {
3721     llvm::InvokeInst *invoke =
3722       Builder.CreateInvoke(callee,
3723                            getUnreachableBlock(),
3724                            getInvokeDest(),
3725                            args,
3726                            BundleList);
3727     invoke->setDoesNotReturn();
3728     invoke->setCallingConv(getRuntimeCC());
3729   } else {
3730     llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList);
3731     call->setDoesNotReturn();
3732     call->setCallingConv(getRuntimeCC());
3733     Builder.CreateUnreachable();
3734   }
3735 }
3736 
3737 /// Emits a call or invoke instruction to the given nullary runtime function.
3738 llvm::CallBase *
3739 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
3740                                          const Twine &name) {
3741   return EmitRuntimeCallOrInvoke(callee, None, name);
3742 }
3743 
3744 /// Emits a call or invoke instruction to the given runtime function.
3745 llvm::CallBase *
3746 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
3747                                          ArrayRef<llvm::Value *> args,
3748                                          const Twine &name) {
3749   llvm::CallBase *call = EmitCallOrInvoke(callee, args, name);
3750   call->setCallingConv(getRuntimeCC());
3751   return call;
3752 }
3753 
3754 /// Emits a call or invoke instruction to the given function, depending
3755 /// on the current state of the EH stack.
3756 llvm::CallBase *CodeGenFunction::EmitCallOrInvoke(llvm::FunctionCallee Callee,
3757                                                   ArrayRef<llvm::Value *> Args,
3758                                                   const Twine &Name) {
3759   llvm::BasicBlock *InvokeDest = getInvokeDest();
3760   SmallVector<llvm::OperandBundleDef, 1> BundleList =
3761       getBundlesForFunclet(Callee.getCallee());
3762 
3763   llvm::CallBase *Inst;
3764   if (!InvokeDest)
3765     Inst = Builder.CreateCall(Callee, Args, BundleList, Name);
3766   else {
3767     llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
3768     Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
3769                                 Name);
3770     EmitBlock(ContBB);
3771   }
3772 
3773   // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3774   // optimizer it can aggressively ignore unwind edges.
3775   if (CGM.getLangOpts().ObjCAutoRefCount)
3776     AddObjCARCExceptionMetadata(Inst);
3777 
3778   return Inst;
3779 }
3780 
3781 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
3782                                                   llvm::Value *New) {
3783   DeferredReplacements.push_back(std::make_pair(Old, New));
3784 }
3785 
3786 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
3787                                  const CGCallee &Callee,
3788                                  ReturnValueSlot ReturnValue,
3789                                  const CallArgList &CallArgs,
3790                                  llvm::CallBase **callOrInvoke,
3791                                  SourceLocation Loc) {
3792   // FIXME: We no longer need the types from CallArgs; lift up and simplify.
3793 
3794   assert(Callee.isOrdinary() || Callee.isVirtual());
3795 
3796   // Handle struct-return functions by passing a pointer to the
3797   // location that we would like to return into.
3798   QualType RetTy = CallInfo.getReturnType();
3799   const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
3800 
3801   llvm::FunctionType *IRFuncTy = getTypes().GetFunctionType(CallInfo);
3802 
3803 #ifndef NDEBUG
3804   if (!(CallInfo.isVariadic() && CallInfo.getArgStruct())) {
3805     // For an inalloca varargs function, we don't expect CallInfo to match the
3806     // function pointer's type, because the inalloca struct a will have extra
3807     // fields in it for the varargs parameters.  Code later in this function
3808     // bitcasts the function pointer to the type derived from CallInfo.
3809     //
3810     // In other cases, we assert that the types match up (until pointers stop
3811     // having pointee types).
3812     llvm::Type *TypeFromVal;
3813     if (Callee.isVirtual())
3814       TypeFromVal = Callee.getVirtualFunctionType();
3815     else
3816       TypeFromVal =
3817           Callee.getFunctionPointer()->getType()->getPointerElementType();
3818     assert(IRFuncTy == TypeFromVal);
3819   }
3820 #endif
3821 
3822   // 1. Set up the arguments.
3823 
3824   // If we're using inalloca, insert the allocation after the stack save.
3825   // FIXME: Do this earlier rather than hacking it in here!
3826   Address ArgMemory = Address::invalid();
3827   if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
3828     const llvm::DataLayout &DL = CGM.getDataLayout();
3829     llvm::Instruction *IP = CallArgs.getStackBase();
3830     llvm::AllocaInst *AI;
3831     if (IP) {
3832       IP = IP->getNextNode();
3833       AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
3834                                 "argmem", IP);
3835     } else {
3836       AI = CreateTempAlloca(ArgStruct, "argmem");
3837     }
3838     auto Align = CallInfo.getArgStructAlignment();
3839     AI->setAlignment(Align.getQuantity());
3840     AI->setUsedWithInAlloca(true);
3841     assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
3842     ArgMemory = Address(AI, Align);
3843   }
3844 
3845   ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
3846   SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
3847 
3848   // If the call returns a temporary with struct return, create a temporary
3849   // alloca to hold the result, unless one is given to us.
3850   Address SRetPtr = Address::invalid();
3851   Address SRetAlloca = Address::invalid();
3852   llvm::Value *UnusedReturnSizePtr = nullptr;
3853   if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
3854     if (!ReturnValue.isNull()) {
3855       SRetPtr = ReturnValue.getValue();
3856     } else {
3857       SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca);
3858       if (HaveInsertPoint() && ReturnValue.isUnused()) {
3859         uint64_t size =
3860             CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
3861         UnusedReturnSizePtr = EmitLifetimeStart(size, SRetAlloca.getPointer());
3862       }
3863     }
3864     if (IRFunctionArgs.hasSRetArg()) {
3865       IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
3866     } else if (RetAI.isInAlloca()) {
3867       Address Addr =
3868           Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex());
3869       Builder.CreateStore(SRetPtr.getPointer(), Addr);
3870     }
3871   }
3872 
3873   Address swiftErrorTemp = Address::invalid();
3874   Address swiftErrorArg = Address::invalid();
3875 
3876   // Translate all of the arguments as necessary to match the IR lowering.
3877   assert(CallInfo.arg_size() == CallArgs.size() &&
3878          "Mismatch between function signature & arguments.");
3879   unsigned ArgNo = 0;
3880   CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
3881   for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
3882        I != E; ++I, ++info_it, ++ArgNo) {
3883     const ABIArgInfo &ArgInfo = info_it->info;
3884 
3885     // Insert a padding argument to ensure proper alignment.
3886     if (IRFunctionArgs.hasPaddingArg(ArgNo))
3887       IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
3888           llvm::UndefValue::get(ArgInfo.getPaddingType());
3889 
3890     unsigned FirstIRArg, NumIRArgs;
3891     std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3892 
3893     switch (ArgInfo.getKind()) {
3894     case ABIArgInfo::InAlloca: {
3895       assert(NumIRArgs == 0);
3896       assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3897       if (I->isAggregate()) {
3898         // Replace the placeholder with the appropriate argument slot GEP.
3899         Address Addr = I->hasLValue()
3900                            ? I->getKnownLValue().getAddress()
3901                            : I->getKnownRValue().getAggregateAddress();
3902         llvm::Instruction *Placeholder =
3903             cast<llvm::Instruction>(Addr.getPointer());
3904         CGBuilderTy::InsertPoint IP = Builder.saveIP();
3905         Builder.SetInsertPoint(Placeholder);
3906         Addr =
3907             Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
3908         Builder.restoreIP(IP);
3909         deferPlaceholderReplacement(Placeholder, Addr.getPointer());
3910       } else {
3911         // Store the RValue into the argument struct.
3912         Address Addr =
3913             Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
3914         unsigned AS = Addr.getType()->getPointerAddressSpace();
3915         llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
3916         // There are some cases where a trivial bitcast is not avoidable.  The
3917         // definition of a type later in a translation unit may change it's type
3918         // from {}* to (%struct.foo*)*.
3919         if (Addr.getType() != MemType)
3920           Addr = Builder.CreateBitCast(Addr, MemType);
3921         I->copyInto(*this, Addr);
3922       }
3923       break;
3924     }
3925 
3926     case ABIArgInfo::Indirect: {
3927       assert(NumIRArgs == 1);
3928       if (!I->isAggregate()) {
3929         // Make a temporary alloca to pass the argument.
3930         Address Addr = CreateMemTempWithoutCast(
3931             I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp");
3932         IRCallArgs[FirstIRArg] = Addr.getPointer();
3933 
3934         I->copyInto(*this, Addr);
3935       } else {
3936         // We want to avoid creating an unnecessary temporary+copy here;
3937         // however, we need one in three cases:
3938         // 1. If the argument is not byval, and we are required to copy the
3939         //    source.  (This case doesn't occur on any common architecture.)
3940         // 2. If the argument is byval, RV is not sufficiently aligned, and
3941         //    we cannot force it to be sufficiently aligned.
3942         // 3. If the argument is byval, but RV is not located in default
3943         //    or alloca address space.
3944         Address Addr = I->hasLValue()
3945                            ? I->getKnownLValue().getAddress()
3946                            : I->getKnownRValue().getAggregateAddress();
3947         llvm::Value *V = Addr.getPointer();
3948         CharUnits Align = ArgInfo.getIndirectAlign();
3949         const llvm::DataLayout *TD = &CGM.getDataLayout();
3950 
3951         assert((FirstIRArg >= IRFuncTy->getNumParams() ||
3952                 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==
3953                     TD->getAllocaAddrSpace()) &&
3954                "indirect argument must be in alloca address space");
3955 
3956         bool NeedCopy = false;
3957 
3958         if (Addr.getAlignment() < Align &&
3959             llvm::getOrEnforceKnownAlignment(V, Align.getQuantity(), *TD) <
3960                 Align.getQuantity()) {
3961           NeedCopy = true;
3962         } else if (I->hasLValue()) {
3963           auto LV = I->getKnownLValue();
3964           auto AS = LV.getAddressSpace();
3965 
3966           if ((!ArgInfo.getIndirectByVal() &&
3967                (LV.getAlignment() >=
3968                 getContext().getTypeAlignInChars(I->Ty)))) {
3969             NeedCopy = true;
3970           }
3971           if (!getLangOpts().OpenCL) {
3972             if ((ArgInfo.getIndirectByVal() &&
3973                 (AS != LangAS::Default &&
3974                  AS != CGM.getASTAllocaAddressSpace()))) {
3975               NeedCopy = true;
3976             }
3977           }
3978           // For OpenCL even if RV is located in default or alloca address space
3979           // we don't want to perform address space cast for it.
3980           else if ((ArgInfo.getIndirectByVal() &&
3981                     Addr.getType()->getAddressSpace() != IRFuncTy->
3982                       getParamType(FirstIRArg)->getPointerAddressSpace())) {
3983             NeedCopy = true;
3984           }
3985         }
3986 
3987         if (NeedCopy) {
3988           // Create an aligned temporary, and copy to it.
3989           Address AI = CreateMemTempWithoutCast(
3990               I->Ty, ArgInfo.getIndirectAlign(), "byval-temp");
3991           IRCallArgs[FirstIRArg] = AI.getPointer();
3992           I->copyInto(*this, AI);
3993         } else {
3994           // Skip the extra memcpy call.
3995           auto *T = V->getType()->getPointerElementType()->getPointerTo(
3996               CGM.getDataLayout().getAllocaAddrSpace());
3997           IRCallArgs[FirstIRArg] = getTargetHooks().performAddrSpaceCast(
3998               *this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T,
3999               true);
4000         }
4001       }
4002       break;
4003     }
4004 
4005     case ABIArgInfo::Ignore:
4006       assert(NumIRArgs == 0);
4007       break;
4008 
4009     case ABIArgInfo::Extend:
4010     case ABIArgInfo::Direct: {
4011       if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
4012           ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
4013           ArgInfo.getDirectOffset() == 0) {
4014         assert(NumIRArgs == 1);
4015         llvm::Value *V;
4016         if (!I->isAggregate())
4017           V = I->getKnownRValue().getScalarVal();
4018         else
4019           V = Builder.CreateLoad(
4020               I->hasLValue() ? I->getKnownLValue().getAddress()
4021                              : I->getKnownRValue().getAggregateAddress());
4022 
4023         // Implement swifterror by copying into a new swifterror argument.
4024         // We'll write back in the normal path out of the call.
4025         if (CallInfo.getExtParameterInfo(ArgNo).getABI()
4026               == ParameterABI::SwiftErrorResult) {
4027           assert(!swiftErrorTemp.isValid() && "multiple swifterror args");
4028 
4029           QualType pointeeTy = I->Ty->getPointeeType();
4030           swiftErrorArg =
4031             Address(V, getContext().getTypeAlignInChars(pointeeTy));
4032 
4033           swiftErrorTemp =
4034             CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
4035           V = swiftErrorTemp.getPointer();
4036           cast<llvm::AllocaInst>(V)->setSwiftError(true);
4037 
4038           llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg);
4039           Builder.CreateStore(errorValue, swiftErrorTemp);
4040         }
4041 
4042         // We might have to widen integers, but we should never truncate.
4043         if (ArgInfo.getCoerceToType() != V->getType() &&
4044             V->getType()->isIntegerTy())
4045           V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
4046 
4047         // If the argument doesn't match, perform a bitcast to coerce it.  This
4048         // can happen due to trivial type mismatches.
4049         if (FirstIRArg < IRFuncTy->getNumParams() &&
4050             V->getType() != IRFuncTy->getParamType(FirstIRArg))
4051           V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
4052 
4053         IRCallArgs[FirstIRArg] = V;
4054         break;
4055       }
4056 
4057       // FIXME: Avoid the conversion through memory if possible.
4058       Address Src = Address::invalid();
4059       if (!I->isAggregate()) {
4060         Src = CreateMemTemp(I->Ty, "coerce");
4061         I->copyInto(*this, Src);
4062       } else {
4063         Src = I->hasLValue() ? I->getKnownLValue().getAddress()
4064                              : I->getKnownRValue().getAggregateAddress();
4065       }
4066 
4067       // If the value is offset in memory, apply the offset now.
4068       Src = emitAddressAtOffset(*this, Src, ArgInfo);
4069 
4070       // Fast-isel and the optimizer generally like scalar values better than
4071       // FCAs, so we flatten them if this is safe to do for this argument.
4072       llvm::StructType *STy =
4073             dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
4074       if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
4075         llvm::Type *SrcTy = Src.getType()->getElementType();
4076         uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
4077         uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
4078 
4079         // If the source type is smaller than the destination type of the
4080         // coerce-to logic, copy the source value into a temp alloca the size
4081         // of the destination type to allow loading all of it. The bits past
4082         // the source value are left undef.
4083         if (SrcSize < DstSize) {
4084           Address TempAlloca
4085             = CreateTempAlloca(STy, Src.getAlignment(),
4086                                Src.getName() + ".coerce");
4087           Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
4088           Src = TempAlloca;
4089         } else {
4090           Src = Builder.CreateBitCast(Src,
4091                                       STy->getPointerTo(Src.getAddressSpace()));
4092         }
4093 
4094         assert(NumIRArgs == STy->getNumElements());
4095         for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
4096           Address EltPtr = Builder.CreateStructGEP(Src, i);
4097           llvm::Value *LI = Builder.CreateLoad(EltPtr);
4098           IRCallArgs[FirstIRArg + i] = LI;
4099         }
4100       } else {
4101         // In the simple case, just pass the coerced loaded value.
4102         assert(NumIRArgs == 1);
4103         IRCallArgs[FirstIRArg] =
4104           CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
4105       }
4106 
4107       break;
4108     }
4109 
4110     case ABIArgInfo::CoerceAndExpand: {
4111       auto coercionType = ArgInfo.getCoerceAndExpandType();
4112       auto layout = CGM.getDataLayout().getStructLayout(coercionType);
4113 
4114       llvm::Value *tempSize = nullptr;
4115       Address addr = Address::invalid();
4116       Address AllocaAddr = Address::invalid();
4117       if (I->isAggregate()) {
4118         addr = I->hasLValue() ? I->getKnownLValue().getAddress()
4119                               : I->getKnownRValue().getAggregateAddress();
4120 
4121       } else {
4122         RValue RV = I->getKnownRValue();
4123         assert(RV.isScalar()); // complex should always just be direct
4124 
4125         llvm::Type *scalarType = RV.getScalarVal()->getType();
4126         auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType);
4127         auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType);
4128 
4129         // Materialize to a temporary.
4130         addr = CreateTempAlloca(RV.getScalarVal()->getType(),
4131                                 CharUnits::fromQuantity(std::max(
4132                                     layout->getAlignment(), scalarAlign)),
4133                                 "tmp",
4134                                 /*ArraySize=*/nullptr, &AllocaAddr);
4135         tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer());
4136 
4137         Builder.CreateStore(RV.getScalarVal(), addr);
4138       }
4139 
4140       addr = Builder.CreateElementBitCast(addr, coercionType);
4141 
4142       unsigned IRArgPos = FirstIRArg;
4143       for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4144         llvm::Type *eltType = coercionType->getElementType(i);
4145         if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4146         Address eltAddr = Builder.CreateStructGEP(addr, i);
4147         llvm::Value *elt = Builder.CreateLoad(eltAddr);
4148         IRCallArgs[IRArgPos++] = elt;
4149       }
4150       assert(IRArgPos == FirstIRArg + NumIRArgs);
4151 
4152       if (tempSize) {
4153         EmitLifetimeEnd(tempSize, AllocaAddr.getPointer());
4154       }
4155 
4156       break;
4157     }
4158 
4159     case ABIArgInfo::Expand:
4160       unsigned IRArgPos = FirstIRArg;
4161       ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
4162       assert(IRArgPos == FirstIRArg + NumIRArgs);
4163       break;
4164     }
4165   }
4166 
4167   const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this);
4168   llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer();
4169 
4170   // If we're using inalloca, set up that argument.
4171   if (ArgMemory.isValid()) {
4172     llvm::Value *Arg = ArgMemory.getPointer();
4173     if (CallInfo.isVariadic()) {
4174       // When passing non-POD arguments by value to variadic functions, we will
4175       // end up with a variadic prototype and an inalloca call site.  In such
4176       // cases, we can't do any parameter mismatch checks.  Give up and bitcast
4177       // the callee.
4178       unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace();
4179       CalleePtr =
4180           Builder.CreateBitCast(CalleePtr, IRFuncTy->getPointerTo(CalleeAS));
4181     } else {
4182       llvm::Type *LastParamTy =
4183           IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
4184       if (Arg->getType() != LastParamTy) {
4185 #ifndef NDEBUG
4186         // Assert that these structs have equivalent element types.
4187         llvm::StructType *FullTy = CallInfo.getArgStruct();
4188         llvm::StructType *DeclaredTy = cast<llvm::StructType>(
4189             cast<llvm::PointerType>(LastParamTy)->getElementType());
4190         assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
4191         for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
4192                                                 DE = DeclaredTy->element_end(),
4193                                                 FI = FullTy->element_begin();
4194              DI != DE; ++DI, ++FI)
4195           assert(*DI == *FI);
4196 #endif
4197         Arg = Builder.CreateBitCast(Arg, LastParamTy);
4198       }
4199     }
4200     assert(IRFunctionArgs.hasInallocaArg());
4201     IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
4202   }
4203 
4204   // 2. Prepare the function pointer.
4205 
4206   // If the callee is a bitcast of a non-variadic function to have a
4207   // variadic function pointer type, check to see if we can remove the
4208   // bitcast.  This comes up with unprototyped functions.
4209   //
4210   // This makes the IR nicer, but more importantly it ensures that we
4211   // can inline the function at -O0 if it is marked always_inline.
4212   auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT,
4213                                    llvm::Value *Ptr) -> llvm::Function * {
4214     if (!CalleeFT->isVarArg())
4215       return nullptr;
4216 
4217     // Get underlying value if it's a bitcast
4218     if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr)) {
4219       if (CE->getOpcode() == llvm::Instruction::BitCast)
4220         Ptr = CE->getOperand(0);
4221     }
4222 
4223     llvm::Function *OrigFn = dyn_cast<llvm::Function>(Ptr);
4224     if (!OrigFn)
4225       return nullptr;
4226 
4227     llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
4228 
4229     // If the original type is variadic, or if any of the component types
4230     // disagree, we cannot remove the cast.
4231     if (OrigFT->isVarArg() ||
4232         OrigFT->getNumParams() != CalleeFT->getNumParams() ||
4233         OrigFT->getReturnType() != CalleeFT->getReturnType())
4234       return nullptr;
4235 
4236     for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
4237       if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
4238         return nullptr;
4239 
4240     return OrigFn;
4241   };
4242 
4243   if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) {
4244     CalleePtr = OrigFn;
4245     IRFuncTy = OrigFn->getFunctionType();
4246   }
4247 
4248   // 3. Perform the actual call.
4249 
4250   // Deactivate any cleanups that we're supposed to do immediately before
4251   // the call.
4252   if (!CallArgs.getCleanupsToDeactivate().empty())
4253     deactivateArgCleanupsBeforeCall(*this, CallArgs);
4254 
4255   // Assert that the arguments we computed match up.  The IR verifier
4256   // will catch this, but this is a common enough source of problems
4257   // during IRGen changes that it's way better for debugging to catch
4258   // it ourselves here.
4259 #ifndef NDEBUG
4260   assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
4261   for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
4262     // Inalloca argument can have different type.
4263     if (IRFunctionArgs.hasInallocaArg() &&
4264         i == IRFunctionArgs.getInallocaArgNo())
4265       continue;
4266     if (i < IRFuncTy->getNumParams())
4267       assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
4268   }
4269 #endif
4270 
4271   // Update the largest vector width if any arguments have vector types.
4272   for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
4273     if (auto *VT = dyn_cast<llvm::VectorType>(IRCallArgs[i]->getType()))
4274       LargestVectorWidth = std::max(LargestVectorWidth,
4275                                     VT->getPrimitiveSizeInBits());
4276   }
4277 
4278   // Compute the calling convention and attributes.
4279   unsigned CallingConv;
4280   llvm::AttributeList Attrs;
4281   CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo,
4282                              Callee.getAbstractInfo(), Attrs, CallingConv,
4283                              /*AttrOnCallSite=*/true);
4284 
4285   // Apply some call-site-specific attributes.
4286   // TODO: work this into building the attribute set.
4287 
4288   // Apply always_inline to all calls within flatten functions.
4289   // FIXME: should this really take priority over __try, below?
4290   if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
4291       !(Callee.getAbstractInfo().getCalleeDecl().getDecl() &&
4292         Callee.getAbstractInfo()
4293             .getCalleeDecl()
4294             .getDecl()
4295             ->hasAttr<NoInlineAttr>())) {
4296     Attrs =
4297         Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4298                            llvm::Attribute::AlwaysInline);
4299   }
4300 
4301   // Disable inlining inside SEH __try blocks.
4302   if (isSEHTryScope()) {
4303     Attrs =
4304         Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4305                            llvm::Attribute::NoInline);
4306   }
4307 
4308   // Decide whether to use a call or an invoke.
4309   bool CannotThrow;
4310   if (currentFunctionUsesSEHTry()) {
4311     // SEH cares about asynchronous exceptions, so everything can "throw."
4312     CannotThrow = false;
4313   } else if (isCleanupPadScope() &&
4314              EHPersonality::get(*this).isMSVCXXPersonality()) {
4315     // The MSVC++ personality will implicitly terminate the program if an
4316     // exception is thrown during a cleanup outside of a try/catch.
4317     // We don't need to model anything in IR to get this behavior.
4318     CannotThrow = true;
4319   } else {
4320     // Otherwise, nounwind call sites will never throw.
4321     CannotThrow = Attrs.hasAttribute(llvm::AttributeList::FunctionIndex,
4322                                      llvm::Attribute::NoUnwind);
4323   }
4324 
4325   // If we made a temporary, be sure to clean up after ourselves. Note that we
4326   // can't depend on being inside of an ExprWithCleanups, so we need to manually
4327   // pop this cleanup later on. Being eager about this is OK, since this
4328   // temporary is 'invisible' outside of the callee.
4329   if (UnusedReturnSizePtr)
4330     pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetAlloca,
4331                                          UnusedReturnSizePtr);
4332 
4333   llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
4334 
4335   SmallVector<llvm::OperandBundleDef, 1> BundleList =
4336       getBundlesForFunclet(CalleePtr);
4337 
4338   // Emit the actual call/invoke instruction.
4339   llvm::CallBase *CI;
4340   if (!InvokeDest) {
4341     CI = Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList);
4342   } else {
4343     llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
4344     CI = Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs,
4345                               BundleList);
4346     EmitBlock(Cont);
4347   }
4348   if (callOrInvoke)
4349     *callOrInvoke = CI;
4350 
4351   // Apply the attributes and calling convention.
4352   CI->setAttributes(Attrs);
4353   CI->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
4354 
4355   // Apply various metadata.
4356 
4357   if (!CI->getType()->isVoidTy())
4358     CI->setName("call");
4359 
4360   // Update largest vector width from the return type.
4361   if (auto *VT = dyn_cast<llvm::VectorType>(CI->getType()))
4362     LargestVectorWidth = std::max(LargestVectorWidth,
4363                                   VT->getPrimitiveSizeInBits());
4364 
4365   // Insert instrumentation or attach profile metadata at indirect call sites.
4366   // For more details, see the comment before the definition of
4367   // IPVK_IndirectCallTarget in InstrProfData.inc.
4368   if (!CI->getCalledFunction())
4369     PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget,
4370                      CI, CalleePtr);
4371 
4372   // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4373   // optimizer it can aggressively ignore unwind edges.
4374   if (CGM.getLangOpts().ObjCAutoRefCount)
4375     AddObjCARCExceptionMetadata(CI);
4376 
4377   // Suppress tail calls if requested.
4378   if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
4379     const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
4380     if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
4381       Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
4382   }
4383 
4384   // 4. Finish the call.
4385 
4386   // If the call doesn't return, finish the basic block and clear the
4387   // insertion point; this allows the rest of IRGen to discard
4388   // unreachable code.
4389   if (CI->doesNotReturn()) {
4390     if (UnusedReturnSizePtr)
4391       PopCleanupBlock();
4392 
4393     // Strip away the noreturn attribute to better diagnose unreachable UB.
4394     if (SanOpts.has(SanitizerKind::Unreachable)) {
4395       // Also remove from function since CallBase::hasFnAttr additionally checks
4396       // attributes of the called function.
4397       if (auto *F = CI->getCalledFunction())
4398         F->removeFnAttr(llvm::Attribute::NoReturn);
4399       CI->removeAttribute(llvm::AttributeList::FunctionIndex,
4400                           llvm::Attribute::NoReturn);
4401 
4402       // Avoid incompatibility with ASan which relies on the `noreturn`
4403       // attribute to insert handler calls.
4404       if (SanOpts.hasOneOf(SanitizerKind::Address |
4405                            SanitizerKind::KernelAddress)) {
4406         SanitizerScope SanScope(this);
4407         llvm::IRBuilder<>::InsertPointGuard IPGuard(Builder);
4408         Builder.SetInsertPoint(CI);
4409         auto *FnType = llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
4410         llvm::FunctionCallee Fn =
4411             CGM.CreateRuntimeFunction(FnType, "__asan_handle_no_return");
4412         EmitNounwindRuntimeCall(Fn);
4413       }
4414     }
4415 
4416     EmitUnreachable(Loc);
4417     Builder.ClearInsertionPoint();
4418 
4419     // FIXME: For now, emit a dummy basic block because expr emitters in
4420     // generally are not ready to handle emitting expressions at unreachable
4421     // points.
4422     EnsureInsertPoint();
4423 
4424     // Return a reasonable RValue.
4425     return GetUndefRValue(RetTy);
4426   }
4427 
4428   // Perform the swifterror writeback.
4429   if (swiftErrorTemp.isValid()) {
4430     llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp);
4431     Builder.CreateStore(errorResult, swiftErrorArg);
4432   }
4433 
4434   // Emit any call-associated writebacks immediately.  Arguably this
4435   // should happen after any return-value munging.
4436   if (CallArgs.hasWritebacks())
4437     emitWritebacks(*this, CallArgs);
4438 
4439   // The stack cleanup for inalloca arguments has to run out of the normal
4440   // lexical order, so deactivate it and run it manually here.
4441   CallArgs.freeArgumentMemory(*this);
4442 
4443   // Extract the return value.
4444   RValue Ret = [&] {
4445     switch (RetAI.getKind()) {
4446     case ABIArgInfo::CoerceAndExpand: {
4447       auto coercionType = RetAI.getCoerceAndExpandType();
4448 
4449       Address addr = SRetPtr;
4450       addr = Builder.CreateElementBitCast(addr, coercionType);
4451 
4452       assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType());
4453       bool requiresExtract = isa<llvm::StructType>(CI->getType());
4454 
4455       unsigned unpaddedIndex = 0;
4456       for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4457         llvm::Type *eltType = coercionType->getElementType(i);
4458         if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4459         Address eltAddr = Builder.CreateStructGEP(addr, i);
4460         llvm::Value *elt = CI;
4461         if (requiresExtract)
4462           elt = Builder.CreateExtractValue(elt, unpaddedIndex++);
4463         else
4464           assert(unpaddedIndex == 0);
4465         Builder.CreateStore(elt, eltAddr);
4466       }
4467       // FALLTHROUGH
4468       LLVM_FALLTHROUGH;
4469     }
4470 
4471     case ABIArgInfo::InAlloca:
4472     case ABIArgInfo::Indirect: {
4473       RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
4474       if (UnusedReturnSizePtr)
4475         PopCleanupBlock();
4476       return ret;
4477     }
4478 
4479     case ABIArgInfo::Ignore:
4480       // If we are ignoring an argument that had a result, make sure to
4481       // construct the appropriate return value for our caller.
4482       return GetUndefRValue(RetTy);
4483 
4484     case ABIArgInfo::Extend:
4485     case ABIArgInfo::Direct: {
4486       llvm::Type *RetIRTy = ConvertType(RetTy);
4487       if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
4488         switch (getEvaluationKind(RetTy)) {
4489         case TEK_Complex: {
4490           llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
4491           llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
4492           return RValue::getComplex(std::make_pair(Real, Imag));
4493         }
4494         case TEK_Aggregate: {
4495           Address DestPtr = ReturnValue.getValue();
4496           bool DestIsVolatile = ReturnValue.isVolatile();
4497 
4498           if (!DestPtr.isValid()) {
4499             DestPtr = CreateMemTemp(RetTy, "agg.tmp");
4500             DestIsVolatile = false;
4501           }
4502           BuildAggStore(*this, CI, DestPtr, DestIsVolatile);
4503           return RValue::getAggregate(DestPtr);
4504         }
4505         case TEK_Scalar: {
4506           // If the argument doesn't match, perform a bitcast to coerce it.  This
4507           // can happen due to trivial type mismatches.
4508           llvm::Value *V = CI;
4509           if (V->getType() != RetIRTy)
4510             V = Builder.CreateBitCast(V, RetIRTy);
4511           return RValue::get(V);
4512         }
4513         }
4514         llvm_unreachable("bad evaluation kind");
4515       }
4516 
4517       Address DestPtr = ReturnValue.getValue();
4518       bool DestIsVolatile = ReturnValue.isVolatile();
4519 
4520       if (!DestPtr.isValid()) {
4521         DestPtr = CreateMemTemp(RetTy, "coerce");
4522         DestIsVolatile = false;
4523       }
4524 
4525       // If the value is offset in memory, apply the offset now.
4526       Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
4527       CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
4528 
4529       return convertTempToRValue(DestPtr, RetTy, SourceLocation());
4530     }
4531 
4532     case ABIArgInfo::Expand:
4533       llvm_unreachable("Invalid ABI kind for return argument");
4534     }
4535 
4536     llvm_unreachable("Unhandled ABIArgInfo::Kind");
4537   } ();
4538 
4539   // Emit the assume_aligned check on the return value.
4540   const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
4541   if (Ret.isScalar() && TargetDecl) {
4542     if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
4543       llvm::Value *OffsetValue = nullptr;
4544       if (const auto *Offset = AA->getOffset())
4545         OffsetValue = EmitScalarExpr(Offset);
4546 
4547       llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment());
4548       llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment);
4549       EmitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc, AA->getLocation(),
4550                               AlignmentCI->getZExtValue(), OffsetValue);
4551     } else if (const auto *AA = TargetDecl->getAttr<AllocAlignAttr>()) {
4552       llvm::Value *AlignmentVal = CallArgs[AA->getParamIndex().getLLVMIndex()]
4553                                       .getRValue(*this)
4554                                       .getScalarVal();
4555       EmitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc, AA->getLocation(),
4556                               AlignmentVal);
4557     }
4558   }
4559 
4560   return Ret;
4561 }
4562 
4563 CGCallee CGCallee::prepareConcreteCallee(CodeGenFunction &CGF) const {
4564   if (isVirtual()) {
4565     const CallExpr *CE = getVirtualCallExpr();
4566     return CGF.CGM.getCXXABI().getVirtualFunctionPointer(
4567         CGF, getVirtualMethodDecl(), getThisAddress(), getVirtualFunctionType(),
4568         CE ? CE->getBeginLoc() : SourceLocation());
4569   }
4570 
4571   return *this;
4572 }
4573 
4574 /* VarArg handling */
4575 
4576 Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) {
4577   VAListAddr = VE->isMicrosoftABI()
4578                  ? EmitMSVAListRef(VE->getSubExpr())
4579                  : EmitVAListRef(VE->getSubExpr());
4580   QualType Ty = VE->getType();
4581   if (VE->isMicrosoftABI())
4582     return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty);
4583   return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty);
4584 }
4585