1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "CGCall.h"
16 #include "ABIInfo.h"
17 #include "CGBlocks.h"
18 #include "CGCXXABI.h"
19 #include "CGCleanup.h"
20 #include "CodeGenFunction.h"
21 #include "CodeGenModule.h"
22 #include "TargetInfo.h"
23 #include "clang/AST/Decl.h"
24 #include "clang/AST/DeclCXX.h"
25 #include "clang/AST/DeclObjC.h"
26 #include "clang/Basic/TargetBuiltins.h"
27 #include "clang/Basic/TargetInfo.h"
28 #include "clang/CodeGen/CGFunctionInfo.h"
29 #include "clang/CodeGen/SwiftCallingConv.h"
30 #include "clang/Frontend/CodeGenOptions.h"
31 #include "llvm/ADT/StringExtras.h"
32 #include "llvm/Transforms/Utils/Local.h"
33 #include "llvm/Analysis/ValueTracking.h"
34 #include "llvm/IR/Attributes.h"
35 #include "llvm/IR/CallSite.h"
36 #include "llvm/IR/CallingConv.h"
37 #include "llvm/IR/DataLayout.h"
38 #include "llvm/IR/InlineAsm.h"
39 #include "llvm/IR/IntrinsicInst.h"
40 #include "llvm/IR/Intrinsics.h"
41 using namespace clang;
42 using namespace CodeGen;
43 
44 /***/
45 
46 unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
47   switch (CC) {
48   default: return llvm::CallingConv::C;
49   case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
50   case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
51   case CC_X86RegCall: return llvm::CallingConv::X86_RegCall;
52   case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
53   case CC_Win64: return llvm::CallingConv::Win64;
54   case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
55   case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
56   case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
57   case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
58   // TODO: Add support for __pascal to LLVM.
59   case CC_X86Pascal: return llvm::CallingConv::C;
60   // TODO: Add support for __vectorcall to LLVM.
61   case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
62   case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
63   case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv();
64   case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
65   case CC_PreserveAll: return llvm::CallingConv::PreserveAll;
66   case CC_Swift: return llvm::CallingConv::Swift;
67   }
68 }
69 
70 /// Derives the 'this' type for codegen purposes, i.e. ignoring method
71 /// qualification.
72 /// FIXME: address space qualification?
73 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
74   QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
75   return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
76 }
77 
78 /// Returns the canonical formal type of the given C++ method.
79 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
80   return MD->getType()->getCanonicalTypeUnqualified()
81            .getAs<FunctionProtoType>();
82 }
83 
84 /// Returns the "extra-canonicalized" return type, which discards
85 /// qualifiers on the return type.  Codegen doesn't care about them,
86 /// and it makes ABI code a little easier to be able to assume that
87 /// all parameter and return types are top-level unqualified.
88 static CanQualType GetReturnType(QualType RetTy) {
89   return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
90 }
91 
92 /// Arrange the argument and result information for a value of the given
93 /// unprototyped freestanding function type.
94 const CGFunctionInfo &
95 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
96   // When translating an unprototyped function type, always use a
97   // variadic type.
98   return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
99                                  /*instanceMethod=*/false,
100                                  /*chainCall=*/false, None,
101                                  FTNP->getExtInfo(), {}, RequiredArgs(0));
102 }
103 
104 static void addExtParameterInfosForCall(
105          llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
106                                         const FunctionProtoType *proto,
107                                         unsigned prefixArgs,
108                                         unsigned totalArgs) {
109   assert(proto->hasExtParameterInfos());
110   assert(paramInfos.size() <= prefixArgs);
111   assert(proto->getNumParams() + prefixArgs <= totalArgs);
112 
113   paramInfos.reserve(totalArgs);
114 
115   // Add default infos for any prefix args that don't already have infos.
116   paramInfos.resize(prefixArgs);
117 
118   // Add infos for the prototype.
119   for (const auto &ParamInfo : proto->getExtParameterInfos()) {
120     paramInfos.push_back(ParamInfo);
121     // pass_object_size params have no parameter info.
122     if (ParamInfo.hasPassObjectSize())
123       paramInfos.emplace_back();
124   }
125 
126   assert(paramInfos.size() <= totalArgs &&
127          "Did we forget to insert pass_object_size args?");
128   // Add default infos for the variadic and/or suffix arguments.
129   paramInfos.resize(totalArgs);
130 }
131 
132 /// Adds the formal parameters in FPT to the given prefix. If any parameter in
133 /// FPT has pass_object_size attrs, then we'll add parameters for those, too.
134 static void appendParameterTypes(const CodeGenTypes &CGT,
135                                  SmallVectorImpl<CanQualType> &prefix,
136               SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
137                                  CanQual<FunctionProtoType> FPT) {
138   // Fast path: don't touch param info if we don't need to.
139   if (!FPT->hasExtParameterInfos()) {
140     assert(paramInfos.empty() &&
141            "We have paramInfos, but the prototype doesn't?");
142     prefix.append(FPT->param_type_begin(), FPT->param_type_end());
143     return;
144   }
145 
146   unsigned PrefixSize = prefix.size();
147   // In the vast majority of cases, we'll have precisely FPT->getNumParams()
148   // parameters; the only thing that can change this is the presence of
149   // pass_object_size. So, we preallocate for the common case.
150   prefix.reserve(prefix.size() + FPT->getNumParams());
151 
152   auto ExtInfos = FPT->getExtParameterInfos();
153   assert(ExtInfos.size() == FPT->getNumParams());
154   for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
155     prefix.push_back(FPT->getParamType(I));
156     if (ExtInfos[I].hasPassObjectSize())
157       prefix.push_back(CGT.getContext().getSizeType());
158   }
159 
160   addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize,
161                               prefix.size());
162 }
163 
164 /// Arrange the LLVM function layout for a value of the given function
165 /// type, on top of any implicit parameters already stored.
166 static const CGFunctionInfo &
167 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
168                         SmallVectorImpl<CanQualType> &prefix,
169                         CanQual<FunctionProtoType> FTP,
170                         const FunctionDecl *FD) {
171   SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
172   RequiredArgs Required =
173       RequiredArgs::forPrototypePlus(FTP, prefix.size(), FD);
174   // FIXME: Kill copy.
175   appendParameterTypes(CGT, prefix, paramInfos, FTP);
176   CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
177 
178   return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
179                                      /*chainCall=*/false, prefix,
180                                      FTP->getExtInfo(), paramInfos,
181                                      Required);
182 }
183 
184 /// Arrange the argument and result information for a value of the
185 /// given freestanding function type.
186 const CGFunctionInfo &
187 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP,
188                                       const FunctionDecl *FD) {
189   SmallVector<CanQualType, 16> argTypes;
190   return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
191                                    FTP, FD);
192 }
193 
194 static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
195   // Set the appropriate calling convention for the Function.
196   if (D->hasAttr<StdCallAttr>())
197     return CC_X86StdCall;
198 
199   if (D->hasAttr<FastCallAttr>())
200     return CC_X86FastCall;
201 
202   if (D->hasAttr<RegCallAttr>())
203     return CC_X86RegCall;
204 
205   if (D->hasAttr<ThisCallAttr>())
206     return CC_X86ThisCall;
207 
208   if (D->hasAttr<VectorCallAttr>())
209     return CC_X86VectorCall;
210 
211   if (D->hasAttr<PascalAttr>())
212     return CC_X86Pascal;
213 
214   if (PcsAttr *PCS = D->getAttr<PcsAttr>())
215     return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
216 
217   if (D->hasAttr<IntelOclBiccAttr>())
218     return CC_IntelOclBicc;
219 
220   if (D->hasAttr<MSABIAttr>())
221     return IsWindows ? CC_C : CC_Win64;
222 
223   if (D->hasAttr<SysVABIAttr>())
224     return IsWindows ? CC_X86_64SysV : CC_C;
225 
226   if (D->hasAttr<PreserveMostAttr>())
227     return CC_PreserveMost;
228 
229   if (D->hasAttr<PreserveAllAttr>())
230     return CC_PreserveAll;
231 
232   return CC_C;
233 }
234 
235 /// Arrange the argument and result information for a call to an
236 /// unknown C++ non-static member function of the given abstract type.
237 /// (Zero value of RD means we don't have any meaningful "this" argument type,
238 ///  so fall back to a generic pointer type).
239 /// The member function must be an ordinary function, i.e. not a
240 /// constructor or destructor.
241 const CGFunctionInfo &
242 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
243                                    const FunctionProtoType *FTP,
244                                    const CXXMethodDecl *MD) {
245   SmallVector<CanQualType, 16> argTypes;
246 
247   // Add the 'this' pointer.
248   if (RD)
249     argTypes.push_back(GetThisType(Context, RD));
250   else
251     argTypes.push_back(Context.VoidPtrTy);
252 
253   return ::arrangeLLVMFunctionInfo(
254       *this, true, argTypes,
255       FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>(), MD);
256 }
257 
258 /// Set calling convention for CUDA/HIP kernel.
259 static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM,
260                                            const FunctionDecl *FD) {
261   if (FD->hasAttr<CUDAGlobalAttr>()) {
262     const FunctionType *FT = FTy->getAs<FunctionType>();
263     CGM.getTargetCodeGenInfo().setCUDAKernelCallingConvention(FT);
264     FTy = FT->getCanonicalTypeUnqualified();
265   }
266 }
267 
268 /// Arrange the argument and result information for a declaration or
269 /// definition of the given C++ non-static member function.  The
270 /// member function must be an ordinary function, i.e. not a
271 /// constructor or destructor.
272 const CGFunctionInfo &
273 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
274   assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
275   assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
276 
277   CanQualType FT = GetFormalType(MD).getAs<Type>();
278   setCUDAKernelCallingConvention(FT, CGM, MD);
279   auto prototype = FT.getAs<FunctionProtoType>();
280 
281   if (MD->isInstance()) {
282     // The abstract case is perfectly fine.
283     const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
284     return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
285   }
286 
287   return arrangeFreeFunctionType(prototype, MD);
288 }
289 
290 bool CodeGenTypes::inheritingCtorHasParams(
291     const InheritedConstructor &Inherited, CXXCtorType Type) {
292   // Parameters are unnecessary if we're constructing a base class subobject
293   // and the inherited constructor lives in a virtual base.
294   return Type == Ctor_Complete ||
295          !Inherited.getShadowDecl()->constructsVirtualBase() ||
296          !Target.getCXXABI().hasConstructorVariants();
297   }
298 
299 const CGFunctionInfo &
300 CodeGenTypes::arrangeCXXStructorDeclaration(const CXXMethodDecl *MD,
301                                             StructorType Type) {
302 
303   SmallVector<CanQualType, 16> argTypes;
304   SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
305   argTypes.push_back(GetThisType(Context, MD->getParent()));
306 
307   bool PassParams = true;
308 
309   GlobalDecl GD;
310   if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
311     GD = GlobalDecl(CD, toCXXCtorType(Type));
312 
313     // A base class inheriting constructor doesn't get forwarded arguments
314     // needed to construct a virtual base (or base class thereof).
315     if (auto Inherited = CD->getInheritedConstructor())
316       PassParams = inheritingCtorHasParams(Inherited, toCXXCtorType(Type));
317   } else {
318     auto *DD = dyn_cast<CXXDestructorDecl>(MD);
319     GD = GlobalDecl(DD, toCXXDtorType(Type));
320   }
321 
322   CanQual<FunctionProtoType> FTP = GetFormalType(MD);
323 
324   // Add the formal parameters.
325   if (PassParams)
326     appendParameterTypes(*this, argTypes, paramInfos, FTP);
327 
328   CGCXXABI::AddedStructorArgs AddedArgs =
329       TheCXXABI.buildStructorSignature(MD, Type, argTypes);
330   if (!paramInfos.empty()) {
331     // Note: prefix implies after the first param.
332     if (AddedArgs.Prefix)
333       paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix,
334                         FunctionProtoType::ExtParameterInfo{});
335     if (AddedArgs.Suffix)
336       paramInfos.append(AddedArgs.Suffix,
337                         FunctionProtoType::ExtParameterInfo{});
338   }
339 
340   RequiredArgs required =
341       (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size())
342                                       : RequiredArgs::All);
343 
344   FunctionType::ExtInfo extInfo = FTP->getExtInfo();
345   CanQualType resultType = TheCXXABI.HasThisReturn(GD)
346                                ? argTypes.front()
347                                : TheCXXABI.hasMostDerivedReturn(GD)
348                                      ? CGM.getContext().VoidPtrTy
349                                      : Context.VoidTy;
350   return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
351                                  /*chainCall=*/false, argTypes, extInfo,
352                                  paramInfos, required);
353 }
354 
355 static SmallVector<CanQualType, 16>
356 getArgTypesForCall(ASTContext &ctx, const CallArgList &args) {
357   SmallVector<CanQualType, 16> argTypes;
358   for (auto &arg : args)
359     argTypes.push_back(ctx.getCanonicalParamType(arg.Ty));
360   return argTypes;
361 }
362 
363 static SmallVector<CanQualType, 16>
364 getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) {
365   SmallVector<CanQualType, 16> argTypes;
366   for (auto &arg : args)
367     argTypes.push_back(ctx.getCanonicalParamType(arg->getType()));
368   return argTypes;
369 }
370 
371 static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16>
372 getExtParameterInfosForCall(const FunctionProtoType *proto,
373                             unsigned prefixArgs, unsigned totalArgs) {
374   llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result;
375   if (proto->hasExtParameterInfos()) {
376     addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs);
377   }
378   return result;
379 }
380 
381 /// Arrange a call to a C++ method, passing the given arguments.
382 ///
383 /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this`
384 /// parameter.
385 /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of
386 /// args.
387 /// PassProtoArgs indicates whether `args` has args for the parameters in the
388 /// given CXXConstructorDecl.
389 const CGFunctionInfo &
390 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
391                                         const CXXConstructorDecl *D,
392                                         CXXCtorType CtorKind,
393                                         unsigned ExtraPrefixArgs,
394                                         unsigned ExtraSuffixArgs,
395                                         bool PassProtoArgs) {
396   // FIXME: Kill copy.
397   SmallVector<CanQualType, 16> ArgTypes;
398   for (const auto &Arg : args)
399     ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
400 
401   // +1 for implicit this, which should always be args[0].
402   unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
403 
404   CanQual<FunctionProtoType> FPT = GetFormalType(D);
405   RequiredArgs Required =
406       RequiredArgs::forPrototypePlus(FPT, TotalPrefixArgs + ExtraSuffixArgs, D);
407   GlobalDecl GD(D, CtorKind);
408   CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
409                                ? ArgTypes.front()
410                                : TheCXXABI.hasMostDerivedReturn(GD)
411                                      ? CGM.getContext().VoidPtrTy
412                                      : Context.VoidTy;
413 
414   FunctionType::ExtInfo Info = FPT->getExtInfo();
415   llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> ParamInfos;
416   // If the prototype args are elided, we should only have ABI-specific args,
417   // which never have param info.
418   if (PassProtoArgs && FPT->hasExtParameterInfos()) {
419     // ABI-specific suffix arguments are treated the same as variadic arguments.
420     addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs,
421                                 ArgTypes.size());
422   }
423   return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
424                                  /*chainCall=*/false, ArgTypes, Info,
425                                  ParamInfos, Required);
426 }
427 
428 /// Arrange the argument and result information for the declaration or
429 /// definition of the given function.
430 const CGFunctionInfo &
431 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
432   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
433     if (MD->isInstance())
434       return arrangeCXXMethodDeclaration(MD);
435 
436   CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
437 
438   assert(isa<FunctionType>(FTy));
439   setCUDAKernelCallingConvention(FTy, CGM, FD);
440 
441   // When declaring a function without a prototype, always use a
442   // non-variadic type.
443   if (CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>()) {
444     return arrangeLLVMFunctionInfo(
445         noProto->getReturnType(), /*instanceMethod=*/false,
446         /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All);
447   }
448 
449   return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>(), FD);
450 }
451 
452 /// Arrange the argument and result information for the declaration or
453 /// definition of an Objective-C method.
454 const CGFunctionInfo &
455 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
456   // It happens that this is the same as a call with no optional
457   // arguments, except also using the formal 'self' type.
458   return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
459 }
460 
461 /// Arrange the argument and result information for the function type
462 /// through which to perform a send to the given Objective-C method,
463 /// using the given receiver type.  The receiver type is not always
464 /// the 'self' type of the method or even an Objective-C pointer type.
465 /// This is *not* the right method for actually performing such a
466 /// message send, due to the possibility of optional arguments.
467 const CGFunctionInfo &
468 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
469                                               QualType receiverType) {
470   SmallVector<CanQualType, 16> argTys;
471   SmallVector<FunctionProtoType::ExtParameterInfo, 4> extParamInfos(2);
472   argTys.push_back(Context.getCanonicalParamType(receiverType));
473   argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
474   // FIXME: Kill copy?
475   for (const auto *I : MD->parameters()) {
476     argTys.push_back(Context.getCanonicalParamType(I->getType()));
477     auto extParamInfo = FunctionProtoType::ExtParameterInfo().withIsNoEscape(
478         I->hasAttr<NoEscapeAttr>());
479     extParamInfos.push_back(extParamInfo);
480   }
481 
482   FunctionType::ExtInfo einfo;
483   bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
484   einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
485 
486   if (getContext().getLangOpts().ObjCAutoRefCount &&
487       MD->hasAttr<NSReturnsRetainedAttr>())
488     einfo = einfo.withProducesResult(true);
489 
490   RequiredArgs required =
491     (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
492 
493   return arrangeLLVMFunctionInfo(
494       GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
495       /*chainCall=*/false, argTys, einfo, extParamInfos, required);
496 }
497 
498 const CGFunctionInfo &
499 CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType,
500                                                  const CallArgList &args) {
501   auto argTypes = getArgTypesForCall(Context, args);
502   FunctionType::ExtInfo einfo;
503 
504   return arrangeLLVMFunctionInfo(
505       GetReturnType(returnType), /*instanceMethod=*/false,
506       /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All);
507 }
508 
509 const CGFunctionInfo &
510 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
511   // FIXME: Do we need to handle ObjCMethodDecl?
512   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
513 
514   if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
515     return arrangeCXXStructorDeclaration(CD, getFromCtorType(GD.getCtorType()));
516 
517   if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
518     return arrangeCXXStructorDeclaration(DD, getFromDtorType(GD.getDtorType()));
519 
520   return arrangeFunctionDeclaration(FD);
521 }
522 
523 /// Arrange a thunk that takes 'this' as the first parameter followed by
524 /// varargs.  Return a void pointer, regardless of the actual return type.
525 /// The body of the thunk will end in a musttail call to a function of the
526 /// correct type, and the caller will bitcast the function to the correct
527 /// prototype.
528 const CGFunctionInfo &
529 CodeGenTypes::arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD) {
530   assert(MD->isVirtual() && "only methods have thunks");
531   CanQual<FunctionProtoType> FTP = GetFormalType(MD);
532   CanQualType ArgTys[] = { GetThisType(Context, MD->getParent()) };
533   return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
534                                  /*chainCall=*/false, ArgTys,
535                                  FTP->getExtInfo(), {}, RequiredArgs(1));
536 }
537 
538 const CGFunctionInfo &
539 CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD,
540                                    CXXCtorType CT) {
541   assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure);
542 
543   CanQual<FunctionProtoType> FTP = GetFormalType(CD);
544   SmallVector<CanQualType, 2> ArgTys;
545   const CXXRecordDecl *RD = CD->getParent();
546   ArgTys.push_back(GetThisType(Context, RD));
547   if (CT == Ctor_CopyingClosure)
548     ArgTys.push_back(*FTP->param_type_begin());
549   if (RD->getNumVBases() > 0)
550     ArgTys.push_back(Context.IntTy);
551   CallingConv CC = Context.getDefaultCallingConvention(
552       /*IsVariadic=*/false, /*IsCXXMethod=*/true);
553   return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
554                                  /*chainCall=*/false, ArgTys,
555                                  FunctionType::ExtInfo(CC), {},
556                                  RequiredArgs::All);
557 }
558 
559 /// Arrange a call as unto a free function, except possibly with an
560 /// additional number of formal parameters considered required.
561 static const CGFunctionInfo &
562 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
563                             CodeGenModule &CGM,
564                             const CallArgList &args,
565                             const FunctionType *fnType,
566                             unsigned numExtraRequiredArgs,
567                             bool chainCall) {
568   assert(args.size() >= numExtraRequiredArgs);
569 
570   llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
571 
572   // In most cases, there are no optional arguments.
573   RequiredArgs required = RequiredArgs::All;
574 
575   // If we have a variadic prototype, the required arguments are the
576   // extra prefix plus the arguments in the prototype.
577   if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
578     if (proto->isVariadic())
579       required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs);
580 
581     if (proto->hasExtParameterInfos())
582       addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs,
583                                   args.size());
584 
585   // If we don't have a prototype at all, but we're supposed to
586   // explicitly use the variadic convention for unprototyped calls,
587   // treat all of the arguments as required but preserve the nominal
588   // possibility of variadics.
589   } else if (CGM.getTargetCodeGenInfo()
590                 .isNoProtoCallVariadic(args,
591                                        cast<FunctionNoProtoType>(fnType))) {
592     required = RequiredArgs(args.size());
593   }
594 
595   // FIXME: Kill copy.
596   SmallVector<CanQualType, 16> argTypes;
597   for (const auto &arg : args)
598     argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
599   return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()),
600                                      /*instanceMethod=*/false, chainCall,
601                                      argTypes, fnType->getExtInfo(), paramInfos,
602                                      required);
603 }
604 
605 /// Figure out the rules for calling a function with the given formal
606 /// type using the given arguments.  The arguments are necessary
607 /// because the function might be unprototyped, in which case it's
608 /// target-dependent in crazy ways.
609 const CGFunctionInfo &
610 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
611                                       const FunctionType *fnType,
612                                       bool chainCall) {
613   return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
614                                      chainCall ? 1 : 0, chainCall);
615 }
616 
617 /// A block function is essentially a free function with an
618 /// extra implicit argument.
619 const CGFunctionInfo &
620 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
621                                        const FunctionType *fnType) {
622   return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
623                                      /*chainCall=*/false);
624 }
625 
626 const CGFunctionInfo &
627 CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto,
628                                               const FunctionArgList &params) {
629   auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size());
630   auto argTypes = getArgTypesForDeclaration(Context, params);
631 
632   return arrangeLLVMFunctionInfo(
633       GetReturnType(proto->getReturnType()),
634       /*instanceMethod*/ false, /*chainCall*/ false, argTypes,
635       proto->getExtInfo(), paramInfos,
636       RequiredArgs::forPrototypePlus(proto, 1, nullptr));
637 }
638 
639 const CGFunctionInfo &
640 CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType,
641                                          const CallArgList &args) {
642   // FIXME: Kill copy.
643   SmallVector<CanQualType, 16> argTypes;
644   for (const auto &Arg : args)
645     argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
646   return arrangeLLVMFunctionInfo(
647       GetReturnType(resultType), /*instanceMethod=*/false,
648       /*chainCall=*/false, argTypes, FunctionType::ExtInfo(),
649       /*paramInfos=*/ {}, RequiredArgs::All);
650 }
651 
652 const CGFunctionInfo &
653 CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType,
654                                                 const FunctionArgList &args) {
655   auto argTypes = getArgTypesForDeclaration(Context, args);
656 
657   return arrangeLLVMFunctionInfo(
658       GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false,
659       argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
660 }
661 
662 const CGFunctionInfo &
663 CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType,
664                                               ArrayRef<CanQualType> argTypes) {
665   return arrangeLLVMFunctionInfo(
666       resultType, /*instanceMethod=*/false, /*chainCall=*/false,
667       argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
668 }
669 
670 /// Arrange a call to a C++ method, passing the given arguments.
671 ///
672 /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It
673 /// does not count `this`.
674 const CGFunctionInfo &
675 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
676                                    const FunctionProtoType *proto,
677                                    RequiredArgs required,
678                                    unsigned numPrefixArgs) {
679   assert(numPrefixArgs + 1 <= args.size() &&
680          "Emitting a call with less args than the required prefix?");
681   // Add one to account for `this`. It's a bit awkward here, but we don't count
682   // `this` in similar places elsewhere.
683   auto paramInfos =
684     getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size());
685 
686   // FIXME: Kill copy.
687   auto argTypes = getArgTypesForCall(Context, args);
688 
689   FunctionType::ExtInfo info = proto->getExtInfo();
690   return arrangeLLVMFunctionInfo(
691       GetReturnType(proto->getReturnType()), /*instanceMethod=*/true,
692       /*chainCall=*/false, argTypes, info, paramInfos, required);
693 }
694 
695 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
696   return arrangeLLVMFunctionInfo(
697       getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
698       None, FunctionType::ExtInfo(), {}, RequiredArgs::All);
699 }
700 
701 const CGFunctionInfo &
702 CodeGenTypes::arrangeCall(const CGFunctionInfo &signature,
703                           const CallArgList &args) {
704   assert(signature.arg_size() <= args.size());
705   if (signature.arg_size() == args.size())
706     return signature;
707 
708   SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
709   auto sigParamInfos = signature.getExtParameterInfos();
710   if (!sigParamInfos.empty()) {
711     paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
712     paramInfos.resize(args.size());
713   }
714 
715   auto argTypes = getArgTypesForCall(Context, args);
716 
717   assert(signature.getRequiredArgs().allowsOptionalArgs());
718   return arrangeLLVMFunctionInfo(signature.getReturnType(),
719                                  signature.isInstanceMethod(),
720                                  signature.isChainCall(),
721                                  argTypes,
722                                  signature.getExtInfo(),
723                                  paramInfos,
724                                  signature.getRequiredArgs());
725 }
726 
727 namespace clang {
728 namespace CodeGen {
729 void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI);
730 }
731 }
732 
733 /// Arrange the argument and result information for an abstract value
734 /// of a given function type.  This is the method which all of the
735 /// above functions ultimately defer to.
736 const CGFunctionInfo &
737 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
738                                       bool instanceMethod,
739                                       bool chainCall,
740                                       ArrayRef<CanQualType> argTypes,
741                                       FunctionType::ExtInfo info,
742                      ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos,
743                                       RequiredArgs required) {
744   assert(std::all_of(argTypes.begin(), argTypes.end(),
745                      [](CanQualType T) { return T.isCanonicalAsParam(); }));
746 
747   // Lookup or create unique function info.
748   llvm::FoldingSetNodeID ID;
749   CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos,
750                           required, resultType, argTypes);
751 
752   void *insertPos = nullptr;
753   CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
754   if (FI)
755     return *FI;
756 
757   unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
758 
759   // Construct the function info.  We co-allocate the ArgInfos.
760   FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
761                               paramInfos, resultType, argTypes, required);
762   FunctionInfos.InsertNode(FI, insertPos);
763 
764   bool inserted = FunctionsBeingProcessed.insert(FI).second;
765   (void)inserted;
766   assert(inserted && "Recursively being processed?");
767 
768   // Compute ABI information.
769   if (CC == llvm::CallingConv::SPIR_KERNEL) {
770     // Force target independent argument handling for the host visible
771     // kernel functions.
772     computeSPIRKernelABIInfo(CGM, *FI);
773   } else if (info.getCC() == CC_Swift) {
774     swiftcall::computeABIInfo(CGM, *FI);
775   } else {
776     getABIInfo().computeInfo(*FI);
777   }
778 
779   // Loop over all of the computed argument and return value info.  If any of
780   // them are direct or extend without a specified coerce type, specify the
781   // default now.
782   ABIArgInfo &retInfo = FI->getReturnInfo();
783   if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
784     retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
785 
786   for (auto &I : FI->arguments())
787     if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
788       I.info.setCoerceToType(ConvertType(I.type));
789 
790   bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
791   assert(erased && "Not in set?");
792 
793   return *FI;
794 }
795 
796 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
797                                        bool instanceMethod,
798                                        bool chainCall,
799                                        const FunctionType::ExtInfo &info,
800                                        ArrayRef<ExtParameterInfo> paramInfos,
801                                        CanQualType resultType,
802                                        ArrayRef<CanQualType> argTypes,
803                                        RequiredArgs required) {
804   assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
805 
806   void *buffer =
807     operator new(totalSizeToAlloc<ArgInfo,             ExtParameterInfo>(
808                                   argTypes.size() + 1, paramInfos.size()));
809 
810   CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
811   FI->CallingConvention = llvmCC;
812   FI->EffectiveCallingConvention = llvmCC;
813   FI->ASTCallingConvention = info.getCC();
814   FI->InstanceMethod = instanceMethod;
815   FI->ChainCall = chainCall;
816   FI->NoReturn = info.getNoReturn();
817   FI->ReturnsRetained = info.getProducesResult();
818   FI->NoCallerSavedRegs = info.getNoCallerSavedRegs();
819   FI->NoCfCheck = info.getNoCfCheck();
820   FI->Required = required;
821   FI->HasRegParm = info.getHasRegParm();
822   FI->RegParm = info.getRegParm();
823   FI->ArgStruct = nullptr;
824   FI->ArgStructAlign = 0;
825   FI->NumArgs = argTypes.size();
826   FI->HasExtParameterInfos = !paramInfos.empty();
827   FI->getArgsBuffer()[0].type = resultType;
828   for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
829     FI->getArgsBuffer()[i + 1].type = argTypes[i];
830   for (unsigned i = 0, e = paramInfos.size(); i != e; ++i)
831     FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
832   return FI;
833 }
834 
835 /***/
836 
837 namespace {
838 // ABIArgInfo::Expand implementation.
839 
840 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
841 struct TypeExpansion {
842   enum TypeExpansionKind {
843     // Elements of constant arrays are expanded recursively.
844     TEK_ConstantArray,
845     // Record fields are expanded recursively (but if record is a union, only
846     // the field with the largest size is expanded).
847     TEK_Record,
848     // For complex types, real and imaginary parts are expanded recursively.
849     TEK_Complex,
850     // All other types are not expandable.
851     TEK_None
852   };
853 
854   const TypeExpansionKind Kind;
855 
856   TypeExpansion(TypeExpansionKind K) : Kind(K) {}
857   virtual ~TypeExpansion() {}
858 };
859 
860 struct ConstantArrayExpansion : TypeExpansion {
861   QualType EltTy;
862   uint64_t NumElts;
863 
864   ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
865       : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
866   static bool classof(const TypeExpansion *TE) {
867     return TE->Kind == TEK_ConstantArray;
868   }
869 };
870 
871 struct RecordExpansion : TypeExpansion {
872   SmallVector<const CXXBaseSpecifier *, 1> Bases;
873 
874   SmallVector<const FieldDecl *, 1> Fields;
875 
876   RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
877                   SmallVector<const FieldDecl *, 1> &&Fields)
878       : TypeExpansion(TEK_Record), Bases(std::move(Bases)),
879         Fields(std::move(Fields)) {}
880   static bool classof(const TypeExpansion *TE) {
881     return TE->Kind == TEK_Record;
882   }
883 };
884 
885 struct ComplexExpansion : TypeExpansion {
886   QualType EltTy;
887 
888   ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
889   static bool classof(const TypeExpansion *TE) {
890     return TE->Kind == TEK_Complex;
891   }
892 };
893 
894 struct NoExpansion : TypeExpansion {
895   NoExpansion() : TypeExpansion(TEK_None) {}
896   static bool classof(const TypeExpansion *TE) {
897     return TE->Kind == TEK_None;
898   }
899 };
900 }  // namespace
901 
902 static std::unique_ptr<TypeExpansion>
903 getTypeExpansion(QualType Ty, const ASTContext &Context) {
904   if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
905     return llvm::make_unique<ConstantArrayExpansion>(
906         AT->getElementType(), AT->getSize().getZExtValue());
907   }
908   if (const RecordType *RT = Ty->getAs<RecordType>()) {
909     SmallVector<const CXXBaseSpecifier *, 1> Bases;
910     SmallVector<const FieldDecl *, 1> Fields;
911     const RecordDecl *RD = RT->getDecl();
912     assert(!RD->hasFlexibleArrayMember() &&
913            "Cannot expand structure with flexible array.");
914     if (RD->isUnion()) {
915       // Unions can be here only in degenerative cases - all the fields are same
916       // after flattening. Thus we have to use the "largest" field.
917       const FieldDecl *LargestFD = nullptr;
918       CharUnits UnionSize = CharUnits::Zero();
919 
920       for (const auto *FD : RD->fields()) {
921         if (FD->isZeroLengthBitField(Context))
922           continue;
923         assert(!FD->isBitField() &&
924                "Cannot expand structure with bit-field members.");
925         CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
926         if (UnionSize < FieldSize) {
927           UnionSize = FieldSize;
928           LargestFD = FD;
929         }
930       }
931       if (LargestFD)
932         Fields.push_back(LargestFD);
933     } else {
934       if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
935         assert(!CXXRD->isDynamicClass() &&
936                "cannot expand vtable pointers in dynamic classes");
937         for (const CXXBaseSpecifier &BS : CXXRD->bases())
938           Bases.push_back(&BS);
939       }
940 
941       for (const auto *FD : RD->fields()) {
942         if (FD->isZeroLengthBitField(Context))
943           continue;
944         assert(!FD->isBitField() &&
945                "Cannot expand structure with bit-field members.");
946         Fields.push_back(FD);
947       }
948     }
949     return llvm::make_unique<RecordExpansion>(std::move(Bases),
950                                               std::move(Fields));
951   }
952   if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
953     return llvm::make_unique<ComplexExpansion>(CT->getElementType());
954   }
955   return llvm::make_unique<NoExpansion>();
956 }
957 
958 static int getExpansionSize(QualType Ty, const ASTContext &Context) {
959   auto Exp = getTypeExpansion(Ty, Context);
960   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
961     return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
962   }
963   if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
964     int Res = 0;
965     for (auto BS : RExp->Bases)
966       Res += getExpansionSize(BS->getType(), Context);
967     for (auto FD : RExp->Fields)
968       Res += getExpansionSize(FD->getType(), Context);
969     return Res;
970   }
971   if (isa<ComplexExpansion>(Exp.get()))
972     return 2;
973   assert(isa<NoExpansion>(Exp.get()));
974   return 1;
975 }
976 
977 void
978 CodeGenTypes::getExpandedTypes(QualType Ty,
979                                SmallVectorImpl<llvm::Type *>::iterator &TI) {
980   auto Exp = getTypeExpansion(Ty, Context);
981   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
982     for (int i = 0, n = CAExp->NumElts; i < n; i++) {
983       getExpandedTypes(CAExp->EltTy, TI);
984     }
985   } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
986     for (auto BS : RExp->Bases)
987       getExpandedTypes(BS->getType(), TI);
988     for (auto FD : RExp->Fields)
989       getExpandedTypes(FD->getType(), TI);
990   } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
991     llvm::Type *EltTy = ConvertType(CExp->EltTy);
992     *TI++ = EltTy;
993     *TI++ = EltTy;
994   } else {
995     assert(isa<NoExpansion>(Exp.get()));
996     *TI++ = ConvertType(Ty);
997   }
998 }
999 
1000 static void forConstantArrayExpansion(CodeGenFunction &CGF,
1001                                       ConstantArrayExpansion *CAE,
1002                                       Address BaseAddr,
1003                                       llvm::function_ref<void(Address)> Fn) {
1004   CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
1005   CharUnits EltAlign =
1006     BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
1007 
1008   for (int i = 0, n = CAE->NumElts; i < n; i++) {
1009     llvm::Value *EltAddr =
1010       CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i);
1011     Fn(Address(EltAddr, EltAlign));
1012   }
1013 }
1014 
1015 void CodeGenFunction::ExpandTypeFromArgs(
1016     QualType Ty, LValue LV, SmallVectorImpl<llvm::Value *>::iterator &AI) {
1017   assert(LV.isSimple() &&
1018          "Unexpected non-simple lvalue during struct expansion.");
1019 
1020   auto Exp = getTypeExpansion(Ty, getContext());
1021   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1022     forConstantArrayExpansion(*this, CAExp, LV.getAddress(),
1023                               [&](Address EltAddr) {
1024       LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1025       ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1026     });
1027   } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1028     Address This = LV.getAddress();
1029     for (const CXXBaseSpecifier *BS : RExp->Bases) {
1030       // Perform a single step derived-to-base conversion.
1031       Address Base =
1032           GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1033                                 /*NullCheckValue=*/false, SourceLocation());
1034       LValue SubLV = MakeAddrLValue(Base, BS->getType());
1035 
1036       // Recurse onto bases.
1037       ExpandTypeFromArgs(BS->getType(), SubLV, AI);
1038     }
1039     for (auto FD : RExp->Fields) {
1040       // FIXME: What are the right qualifiers here?
1041       LValue SubLV = EmitLValueForFieldInitialization(LV, FD);
1042       ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1043     }
1044   } else if (isa<ComplexExpansion>(Exp.get())) {
1045     auto realValue = *AI++;
1046     auto imagValue = *AI++;
1047     EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
1048   } else {
1049     assert(isa<NoExpansion>(Exp.get()));
1050     EmitStoreThroughLValue(RValue::get(*AI++), LV);
1051   }
1052 }
1053 
1054 void CodeGenFunction::ExpandTypeToArgs(
1055     QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
1056     SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
1057   auto Exp = getTypeExpansion(Ty, getContext());
1058   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1059     Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress()
1060                                    : Arg.getKnownRValue().getAggregateAddress();
1061     forConstantArrayExpansion(
1062         *this, CAExp, Addr, [&](Address EltAddr) {
1063           CallArg EltArg = CallArg(
1064               convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()),
1065               CAExp->EltTy);
1066           ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
1067                            IRCallArgPos);
1068         });
1069   } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1070     Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress()
1071                                    : Arg.getKnownRValue().getAggregateAddress();
1072     for (const CXXBaseSpecifier *BS : RExp->Bases) {
1073       // Perform a single step derived-to-base conversion.
1074       Address Base =
1075           GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1076                                 /*NullCheckValue=*/false, SourceLocation());
1077       CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType());
1078 
1079       // Recurse onto bases.
1080       ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs,
1081                        IRCallArgPos);
1082     }
1083 
1084     LValue LV = MakeAddrLValue(This, Ty);
1085     for (auto FD : RExp->Fields) {
1086       CallArg FldArg =
1087           CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType());
1088       ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
1089                        IRCallArgPos);
1090     }
1091   } else if (isa<ComplexExpansion>(Exp.get())) {
1092     ComplexPairTy CV = Arg.getKnownRValue().getComplexVal();
1093     IRCallArgs[IRCallArgPos++] = CV.first;
1094     IRCallArgs[IRCallArgPos++] = CV.second;
1095   } else {
1096     assert(isa<NoExpansion>(Exp.get()));
1097     auto RV = Arg.getKnownRValue();
1098     assert(RV.isScalar() &&
1099            "Unexpected non-scalar rvalue during struct expansion.");
1100 
1101     // Insert a bitcast as needed.
1102     llvm::Value *V = RV.getScalarVal();
1103     if (IRCallArgPos < IRFuncTy->getNumParams() &&
1104         V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1105       V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
1106 
1107     IRCallArgs[IRCallArgPos++] = V;
1108   }
1109 }
1110 
1111 /// Create a temporary allocation for the purposes of coercion.
1112 static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty,
1113                                            CharUnits MinAlign) {
1114   // Don't use an alignment that's worse than what LLVM would prefer.
1115   auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty);
1116   CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
1117 
1118   return CGF.CreateTempAlloca(Ty, Align);
1119 }
1120 
1121 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
1122 /// accessing some number of bytes out of it, try to gep into the struct to get
1123 /// at its inner goodness.  Dive as deep as possible without entering an element
1124 /// with an in-memory size smaller than DstSize.
1125 static Address
1126 EnterStructPointerForCoercedAccess(Address SrcPtr,
1127                                    llvm::StructType *SrcSTy,
1128                                    uint64_t DstSize, CodeGenFunction &CGF) {
1129   // We can't dive into a zero-element struct.
1130   if (SrcSTy->getNumElements() == 0) return SrcPtr;
1131 
1132   llvm::Type *FirstElt = SrcSTy->getElementType(0);
1133 
1134   // If the first elt is at least as large as what we're looking for, or if the
1135   // first element is the same size as the whole struct, we can enter it. The
1136   // comparison must be made on the store size and not the alloca size. Using
1137   // the alloca size may overstate the size of the load.
1138   uint64_t FirstEltSize =
1139     CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
1140   if (FirstEltSize < DstSize &&
1141       FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
1142     return SrcPtr;
1143 
1144   // GEP into the first element.
1145   SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, CharUnits(), "coerce.dive");
1146 
1147   // If the first element is a struct, recurse.
1148   llvm::Type *SrcTy = SrcPtr.getElementType();
1149   if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1150     return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
1151 
1152   return SrcPtr;
1153 }
1154 
1155 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
1156 /// are either integers or pointers.  This does a truncation of the value if it
1157 /// is too large or a zero extension if it is too small.
1158 ///
1159 /// This behaves as if the value were coerced through memory, so on big-endian
1160 /// targets the high bits are preserved in a truncation, while little-endian
1161 /// targets preserve the low bits.
1162 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
1163                                              llvm::Type *Ty,
1164                                              CodeGenFunction &CGF) {
1165   if (Val->getType() == Ty)
1166     return Val;
1167 
1168   if (isa<llvm::PointerType>(Val->getType())) {
1169     // If this is Pointer->Pointer avoid conversion to and from int.
1170     if (isa<llvm::PointerType>(Ty))
1171       return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
1172 
1173     // Convert the pointer to an integer so we can play with its width.
1174     Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
1175   }
1176 
1177   llvm::Type *DestIntTy = Ty;
1178   if (isa<llvm::PointerType>(DestIntTy))
1179     DestIntTy = CGF.IntPtrTy;
1180 
1181   if (Val->getType() != DestIntTy) {
1182     const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
1183     if (DL.isBigEndian()) {
1184       // Preserve the high bits on big-endian targets.
1185       // That is what memory coercion does.
1186       uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1187       uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1188 
1189       if (SrcSize > DstSize) {
1190         Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
1191         Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
1192       } else {
1193         Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
1194         Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
1195       }
1196     } else {
1197       // Little-endian targets preserve the low bits. No shifts required.
1198       Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
1199     }
1200   }
1201 
1202   if (isa<llvm::PointerType>(Ty))
1203     Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
1204   return Val;
1205 }
1206 
1207 
1208 
1209 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
1210 /// a pointer to an object of type \arg Ty, known to be aligned to
1211 /// \arg SrcAlign bytes.
1212 ///
1213 /// This safely handles the case when the src type is smaller than the
1214 /// destination type; in this situation the values of bits which not
1215 /// present in the src are undefined.
1216 static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
1217                                       CodeGenFunction &CGF) {
1218   llvm::Type *SrcTy = Src.getElementType();
1219 
1220   // If SrcTy and Ty are the same, just do a load.
1221   if (SrcTy == Ty)
1222     return CGF.Builder.CreateLoad(Src);
1223 
1224   uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
1225 
1226   if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1227     Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF);
1228     SrcTy = Src.getType()->getElementType();
1229   }
1230 
1231   uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1232 
1233   // If the source and destination are integer or pointer types, just do an
1234   // extension or truncation to the desired type.
1235   if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1236       (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1237     llvm::Value *Load = CGF.Builder.CreateLoad(Src);
1238     return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
1239   }
1240 
1241   // If load is legal, just bitcast the src pointer.
1242   if (SrcSize >= DstSize) {
1243     // Generally SrcSize is never greater than DstSize, since this means we are
1244     // losing bits. However, this can happen in cases where the structure has
1245     // additional padding, for example due to a user specified alignment.
1246     //
1247     // FIXME: Assert that we aren't truncating non-padding bits when have access
1248     // to that information.
1249     Src = CGF.Builder.CreateBitCast(Src,
1250                                     Ty->getPointerTo(Src.getAddressSpace()));
1251     return CGF.Builder.CreateLoad(Src);
1252   }
1253 
1254   // Otherwise do coercion through memory. This is stupid, but simple.
1255   Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment());
1256   Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.AllocaInt8PtrTy);
1257   Address SrcCasted = CGF.Builder.CreateBitCast(Src, CGF.AllocaInt8PtrTy);
1258   CGF.Builder.CreateMemCpy(Casted, SrcCasted,
1259       llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
1260       false);
1261   return CGF.Builder.CreateLoad(Tmp);
1262 }
1263 
1264 // Function to store a first-class aggregate into memory.  We prefer to
1265 // store the elements rather than the aggregate to be more friendly to
1266 // fast-isel.
1267 // FIXME: Do we need to recurse here?
1268 static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
1269                           Address Dest, bool DestIsVolatile) {
1270   // Prefer scalar stores to first-class aggregate stores.
1271   if (llvm::StructType *STy =
1272         dyn_cast<llvm::StructType>(Val->getType())) {
1273     const llvm::StructLayout *Layout =
1274       CGF.CGM.getDataLayout().getStructLayout(STy);
1275 
1276     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1277       auto EltOffset = CharUnits::fromQuantity(Layout->getElementOffset(i));
1278       Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i, EltOffset);
1279       llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
1280       CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
1281     }
1282   } else {
1283     CGF.Builder.CreateStore(Val, Dest, DestIsVolatile);
1284   }
1285 }
1286 
1287 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1288 /// where the source and destination may have different types.  The
1289 /// destination is known to be aligned to \arg DstAlign bytes.
1290 ///
1291 /// This safely handles the case when the src type is larger than the
1292 /// destination type; the upper bits of the src will be lost.
1293 static void CreateCoercedStore(llvm::Value *Src,
1294                                Address Dst,
1295                                bool DstIsVolatile,
1296                                CodeGenFunction &CGF) {
1297   llvm::Type *SrcTy = Src->getType();
1298   llvm::Type *DstTy = Dst.getType()->getElementType();
1299   if (SrcTy == DstTy) {
1300     CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1301     return;
1302   }
1303 
1304   uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1305 
1306   if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1307     Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF);
1308     DstTy = Dst.getType()->getElementType();
1309   }
1310 
1311   // If the source and destination are integer or pointer types, just do an
1312   // extension or truncation to the desired type.
1313   if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1314       (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1315     Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
1316     CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1317     return;
1318   }
1319 
1320   uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
1321 
1322   // If store is legal, just bitcast the src pointer.
1323   if (SrcSize <= DstSize) {
1324     Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy);
1325     BuildAggStore(CGF, Src, Dst, DstIsVolatile);
1326   } else {
1327     // Otherwise do coercion through memory. This is stupid, but
1328     // simple.
1329 
1330     // Generally SrcSize is never greater than DstSize, since this means we are
1331     // losing bits. However, this can happen in cases where the structure has
1332     // additional padding, for example due to a user specified alignment.
1333     //
1334     // FIXME: Assert that we aren't truncating non-padding bits when have access
1335     // to that information.
1336     Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
1337     CGF.Builder.CreateStore(Src, Tmp);
1338     Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.AllocaInt8PtrTy);
1339     Address DstCasted = CGF.Builder.CreateBitCast(Dst, CGF.AllocaInt8PtrTy);
1340     CGF.Builder.CreateMemCpy(DstCasted, Casted,
1341         llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
1342         false);
1343   }
1344 }
1345 
1346 static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr,
1347                                    const ABIArgInfo &info) {
1348   if (unsigned offset = info.getDirectOffset()) {
1349     addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty);
1350     addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
1351                                              CharUnits::fromQuantity(offset));
1352     addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType());
1353   }
1354   return addr;
1355 }
1356 
1357 namespace {
1358 
1359 /// Encapsulates information about the way function arguments from
1360 /// CGFunctionInfo should be passed to actual LLVM IR function.
1361 class ClangToLLVMArgMapping {
1362   static const unsigned InvalidIndex = ~0U;
1363   unsigned InallocaArgNo;
1364   unsigned SRetArgNo;
1365   unsigned TotalIRArgs;
1366 
1367   /// Arguments of LLVM IR function corresponding to single Clang argument.
1368   struct IRArgs {
1369     unsigned PaddingArgIndex;
1370     // Argument is expanded to IR arguments at positions
1371     // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
1372     unsigned FirstArgIndex;
1373     unsigned NumberOfArgs;
1374 
1375     IRArgs()
1376         : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1377           NumberOfArgs(0) {}
1378   };
1379 
1380   SmallVector<IRArgs, 8> ArgInfo;
1381 
1382 public:
1383   ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
1384                         bool OnlyRequiredArgs = false)
1385       : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1386         ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1387     construct(Context, FI, OnlyRequiredArgs);
1388   }
1389 
1390   bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
1391   unsigned getInallocaArgNo() const {
1392     assert(hasInallocaArg());
1393     return InallocaArgNo;
1394   }
1395 
1396   bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
1397   unsigned getSRetArgNo() const {
1398     assert(hasSRetArg());
1399     return SRetArgNo;
1400   }
1401 
1402   unsigned totalIRArgs() const { return TotalIRArgs; }
1403 
1404   bool hasPaddingArg(unsigned ArgNo) const {
1405     assert(ArgNo < ArgInfo.size());
1406     return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1407   }
1408   unsigned getPaddingArgNo(unsigned ArgNo) const {
1409     assert(hasPaddingArg(ArgNo));
1410     return ArgInfo[ArgNo].PaddingArgIndex;
1411   }
1412 
1413   /// Returns index of first IR argument corresponding to ArgNo, and their
1414   /// quantity.
1415   std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
1416     assert(ArgNo < ArgInfo.size());
1417     return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1418                           ArgInfo[ArgNo].NumberOfArgs);
1419   }
1420 
1421 private:
1422   void construct(const ASTContext &Context, const CGFunctionInfo &FI,
1423                  bool OnlyRequiredArgs);
1424 };
1425 
1426 void ClangToLLVMArgMapping::construct(const ASTContext &Context,
1427                                       const CGFunctionInfo &FI,
1428                                       bool OnlyRequiredArgs) {
1429   unsigned IRArgNo = 0;
1430   bool SwapThisWithSRet = false;
1431   const ABIArgInfo &RetAI = FI.getReturnInfo();
1432 
1433   if (RetAI.getKind() == ABIArgInfo::Indirect) {
1434     SwapThisWithSRet = RetAI.isSRetAfterThis();
1435     SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1436   }
1437 
1438   unsigned ArgNo = 0;
1439   unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
1440   for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
1441        ++I, ++ArgNo) {
1442     assert(I != FI.arg_end());
1443     QualType ArgType = I->type;
1444     const ABIArgInfo &AI = I->info;
1445     // Collect data about IR arguments corresponding to Clang argument ArgNo.
1446     auto &IRArgs = ArgInfo[ArgNo];
1447 
1448     if (AI.getPaddingType())
1449       IRArgs.PaddingArgIndex = IRArgNo++;
1450 
1451     switch (AI.getKind()) {
1452     case ABIArgInfo::Extend:
1453     case ABIArgInfo::Direct: {
1454       // FIXME: handle sseregparm someday...
1455       llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
1456       if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
1457         IRArgs.NumberOfArgs = STy->getNumElements();
1458       } else {
1459         IRArgs.NumberOfArgs = 1;
1460       }
1461       break;
1462     }
1463     case ABIArgInfo::Indirect:
1464       IRArgs.NumberOfArgs = 1;
1465       break;
1466     case ABIArgInfo::Ignore:
1467     case ABIArgInfo::InAlloca:
1468       // ignore and inalloca doesn't have matching LLVM parameters.
1469       IRArgs.NumberOfArgs = 0;
1470       break;
1471     case ABIArgInfo::CoerceAndExpand:
1472       IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size();
1473       break;
1474     case ABIArgInfo::Expand:
1475       IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
1476       break;
1477     }
1478 
1479     if (IRArgs.NumberOfArgs > 0) {
1480       IRArgs.FirstArgIndex = IRArgNo;
1481       IRArgNo += IRArgs.NumberOfArgs;
1482     }
1483 
1484     // Skip over the sret parameter when it comes second.  We already handled it
1485     // above.
1486     if (IRArgNo == 1 && SwapThisWithSRet)
1487       IRArgNo++;
1488   }
1489   assert(ArgNo == ArgInfo.size());
1490 
1491   if (FI.usesInAlloca())
1492     InallocaArgNo = IRArgNo++;
1493 
1494   TotalIRArgs = IRArgNo;
1495 }
1496 }  // namespace
1497 
1498 /***/
1499 
1500 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
1501   const auto &RI = FI.getReturnInfo();
1502   return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
1503 }
1504 
1505 bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) {
1506   return ReturnTypeUsesSRet(FI) &&
1507          getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
1508 }
1509 
1510 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
1511   if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
1512     switch (BT->getKind()) {
1513     default:
1514       return false;
1515     case BuiltinType::Float:
1516       return getTarget().useObjCFPRetForRealType(TargetInfo::Float);
1517     case BuiltinType::Double:
1518       return getTarget().useObjCFPRetForRealType(TargetInfo::Double);
1519     case BuiltinType::LongDouble:
1520       return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble);
1521     }
1522   }
1523 
1524   return false;
1525 }
1526 
1527 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
1528   if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
1529     if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
1530       if (BT->getKind() == BuiltinType::LongDouble)
1531         return getTarget().useObjCFP2RetForComplexLongDouble();
1532     }
1533   }
1534 
1535   return false;
1536 }
1537 
1538 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
1539   const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
1540   return GetFunctionType(FI);
1541 }
1542 
1543 llvm::FunctionType *
1544 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
1545 
1546   bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1547   (void)Inserted;
1548   assert(Inserted && "Recursively being processed?");
1549 
1550   llvm::Type *resultType = nullptr;
1551   const ABIArgInfo &retAI = FI.getReturnInfo();
1552   switch (retAI.getKind()) {
1553   case ABIArgInfo::Expand:
1554     llvm_unreachable("Invalid ABI kind for return argument");
1555 
1556   case ABIArgInfo::Extend:
1557   case ABIArgInfo::Direct:
1558     resultType = retAI.getCoerceToType();
1559     break;
1560 
1561   case ABIArgInfo::InAlloca:
1562     if (retAI.getInAllocaSRet()) {
1563       // sret things on win32 aren't void, they return the sret pointer.
1564       QualType ret = FI.getReturnType();
1565       llvm::Type *ty = ConvertType(ret);
1566       unsigned addressSpace = Context.getTargetAddressSpace(ret);
1567       resultType = llvm::PointerType::get(ty, addressSpace);
1568     } else {
1569       resultType = llvm::Type::getVoidTy(getLLVMContext());
1570     }
1571     break;
1572 
1573   case ABIArgInfo::Indirect:
1574   case ABIArgInfo::Ignore:
1575     resultType = llvm::Type::getVoidTy(getLLVMContext());
1576     break;
1577 
1578   case ABIArgInfo::CoerceAndExpand:
1579     resultType = retAI.getUnpaddedCoerceAndExpandType();
1580     break;
1581   }
1582 
1583   ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
1584   SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
1585 
1586   // Add type for sret argument.
1587   if (IRFunctionArgs.hasSRetArg()) {
1588     QualType Ret = FI.getReturnType();
1589     llvm::Type *Ty = ConvertType(Ret);
1590     unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
1591     ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1592         llvm::PointerType::get(Ty, AddressSpace);
1593   }
1594 
1595   // Add type for inalloca argument.
1596   if (IRFunctionArgs.hasInallocaArg()) {
1597     auto ArgStruct = FI.getArgStruct();
1598     assert(ArgStruct);
1599     ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
1600   }
1601 
1602   // Add in all of the required arguments.
1603   unsigned ArgNo = 0;
1604   CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1605                                      ie = it + FI.getNumRequiredArgs();
1606   for (; it != ie; ++it, ++ArgNo) {
1607     const ABIArgInfo &ArgInfo = it->info;
1608 
1609     // Insert a padding type to ensure proper alignment.
1610     if (IRFunctionArgs.hasPaddingArg(ArgNo))
1611       ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1612           ArgInfo.getPaddingType();
1613 
1614     unsigned FirstIRArg, NumIRArgs;
1615     std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1616 
1617     switch (ArgInfo.getKind()) {
1618     case ABIArgInfo::Ignore:
1619     case ABIArgInfo::InAlloca:
1620       assert(NumIRArgs == 0);
1621       break;
1622 
1623     case ABIArgInfo::Indirect: {
1624       assert(NumIRArgs == 1);
1625       // indirect arguments are always on the stack, which is alloca addr space.
1626       llvm::Type *LTy = ConvertTypeForMem(it->type);
1627       ArgTypes[FirstIRArg] = LTy->getPointerTo(
1628           CGM.getDataLayout().getAllocaAddrSpace());
1629       break;
1630     }
1631 
1632     case ABIArgInfo::Extend:
1633     case ABIArgInfo::Direct: {
1634       // Fast-isel and the optimizer generally like scalar values better than
1635       // FCAs, so we flatten them if this is safe to do for this argument.
1636       llvm::Type *argType = ArgInfo.getCoerceToType();
1637       llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1638       if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
1639         assert(NumIRArgs == st->getNumElements());
1640         for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1641           ArgTypes[FirstIRArg + i] = st->getElementType(i);
1642       } else {
1643         assert(NumIRArgs == 1);
1644         ArgTypes[FirstIRArg] = argType;
1645       }
1646       break;
1647     }
1648 
1649     case ABIArgInfo::CoerceAndExpand: {
1650       auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1651       for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) {
1652         *ArgTypesIter++ = EltTy;
1653       }
1654       assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1655       break;
1656     }
1657 
1658     case ABIArgInfo::Expand:
1659       auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1660       getExpandedTypes(it->type, ArgTypesIter);
1661       assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1662       break;
1663     }
1664   }
1665 
1666   bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1667   assert(Erased && "Not in set?");
1668 
1669   return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
1670 }
1671 
1672 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
1673   const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1674   const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
1675 
1676   if (!isFuncTypeConvertible(FPT))
1677     return llvm::StructType::get(getLLVMContext());
1678 
1679   const CGFunctionInfo *Info;
1680   if (isa<CXXDestructorDecl>(MD))
1681     Info =
1682         &arrangeCXXStructorDeclaration(MD, getFromDtorType(GD.getDtorType()));
1683   else
1684     Info = &arrangeCXXMethodDeclaration(MD);
1685   return GetFunctionType(*Info);
1686 }
1687 
1688 static void AddAttributesFromFunctionProtoType(ASTContext &Ctx,
1689                                                llvm::AttrBuilder &FuncAttrs,
1690                                                const FunctionProtoType *FPT) {
1691   if (!FPT)
1692     return;
1693 
1694   if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) &&
1695       FPT->isNothrow())
1696     FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1697 }
1698 
1699 void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
1700                                                bool AttrOnCallSite,
1701                                                llvm::AttrBuilder &FuncAttrs) {
1702   // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
1703   if (!HasOptnone) {
1704     if (CodeGenOpts.OptimizeSize)
1705       FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1706     if (CodeGenOpts.OptimizeSize == 2)
1707       FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1708   }
1709 
1710   if (CodeGenOpts.DisableRedZone)
1711     FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1712   if (CodeGenOpts.NoImplicitFloat)
1713     FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1714 
1715   if (AttrOnCallSite) {
1716     // Attributes that should go on the call site only.
1717     if (!CodeGenOpts.SimplifyLibCalls ||
1718         CodeGenOpts.isNoBuiltinFunc(Name.data()))
1719       FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1720     if (!CodeGenOpts.TrapFuncName.empty())
1721       FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
1722   } else {
1723     // Attributes that should go on the function, but not the call site.
1724     if (!CodeGenOpts.DisableFPElim) {
1725       FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1726     } else if (CodeGenOpts.OmitLeafFramePointer) {
1727       FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1728       FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1729     } else {
1730       FuncAttrs.addAttribute("no-frame-pointer-elim", "true");
1731       FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1732     }
1733 
1734     FuncAttrs.addAttribute("less-precise-fpmad",
1735                            llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
1736 
1737     if (CodeGenOpts.NullPointerIsValid)
1738       FuncAttrs.addAttribute("null-pointer-is-valid", "true");
1739     if (!CodeGenOpts.FPDenormalMode.empty())
1740       FuncAttrs.addAttribute("denormal-fp-math", CodeGenOpts.FPDenormalMode);
1741 
1742     FuncAttrs.addAttribute("no-trapping-math",
1743                            llvm::toStringRef(CodeGenOpts.NoTrappingMath));
1744 
1745     // Strict (compliant) code is the default, so only add this attribute to
1746     // indicate that we are trying to workaround a problem case.
1747     if (!CodeGenOpts.StrictFloatCastOverflow)
1748       FuncAttrs.addAttribute("strict-float-cast-overflow", "false");
1749 
1750     // TODO: Are these all needed?
1751     // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
1752     FuncAttrs.addAttribute("no-infs-fp-math",
1753                            llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
1754     FuncAttrs.addAttribute("no-nans-fp-math",
1755                            llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
1756     FuncAttrs.addAttribute("unsafe-fp-math",
1757                            llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
1758     FuncAttrs.addAttribute("use-soft-float",
1759                            llvm::toStringRef(CodeGenOpts.SoftFloat));
1760     FuncAttrs.addAttribute("stack-protector-buffer-size",
1761                            llvm::utostr(CodeGenOpts.SSPBufferSize));
1762     FuncAttrs.addAttribute("no-signed-zeros-fp-math",
1763                            llvm::toStringRef(CodeGenOpts.NoSignedZeros));
1764     FuncAttrs.addAttribute(
1765         "correctly-rounded-divide-sqrt-fp-math",
1766         llvm::toStringRef(CodeGenOpts.CorrectlyRoundedDivSqrt));
1767 
1768     if (getLangOpts().OpenCL)
1769       FuncAttrs.addAttribute("denorms-are-zero",
1770                              llvm::toStringRef(CodeGenOpts.FlushDenorm));
1771 
1772     // TODO: Reciprocal estimate codegen options should apply to instructions?
1773     const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals;
1774     if (!Recips.empty())
1775       FuncAttrs.addAttribute("reciprocal-estimates",
1776                              llvm::join(Recips, ","));
1777 
1778     if (!CodeGenOpts.PreferVectorWidth.empty() &&
1779         CodeGenOpts.PreferVectorWidth != "none")
1780       FuncAttrs.addAttribute("prefer-vector-width",
1781                              CodeGenOpts.PreferVectorWidth);
1782 
1783     if (CodeGenOpts.StackRealignment)
1784       FuncAttrs.addAttribute("stackrealign");
1785     if (CodeGenOpts.Backchain)
1786       FuncAttrs.addAttribute("backchain");
1787 
1788     if (CodeGenOpts.SpeculativeLoadHardening)
1789       FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
1790   }
1791 
1792   if (getLangOpts().assumeFunctionsAreConvergent()) {
1793     // Conservatively, mark all functions and calls in CUDA and OpenCL as
1794     // convergent (meaning, they may call an intrinsically convergent op, such
1795     // as __syncthreads() / barrier(), and so can't have certain optimizations
1796     // applied around them).  LLVM will remove this attribute where it safely
1797     // can.
1798     FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1799   }
1800 
1801   if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) {
1802     // Exceptions aren't supported in CUDA device code.
1803     FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1804 
1805     // Respect -fcuda-flush-denormals-to-zero.
1806     if (CodeGenOpts.FlushDenorm)
1807       FuncAttrs.addAttribute("nvptx-f32ftz", "true");
1808   }
1809 }
1810 
1811 void CodeGenModule::AddDefaultFnAttrs(llvm::Function &F) {
1812   llvm::AttrBuilder FuncAttrs;
1813   ConstructDefaultFnAttrList(F.getName(),
1814                              F.hasFnAttribute(llvm::Attribute::OptimizeNone),
1815                              /* AttrOnCallsite = */ false, FuncAttrs);
1816   F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs);
1817 }
1818 
1819 void CodeGenModule::ConstructAttributeList(
1820     StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo,
1821     llvm::AttributeList &AttrList, unsigned &CallingConv, bool AttrOnCallSite) {
1822   llvm::AttrBuilder FuncAttrs;
1823   llvm::AttrBuilder RetAttrs;
1824 
1825   CallingConv = FI.getEffectiveCallingConvention();
1826   if (FI.isNoReturn())
1827     FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1828 
1829   // If we have information about the function prototype, we can learn
1830   // attributes from there.
1831   AddAttributesFromFunctionProtoType(getContext(), FuncAttrs,
1832                                      CalleeInfo.getCalleeFunctionProtoType());
1833 
1834   const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
1835 
1836   bool HasOptnone = false;
1837   // FIXME: handle sseregparm someday...
1838   if (TargetDecl) {
1839     if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
1840       FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
1841     if (TargetDecl->hasAttr<NoThrowAttr>())
1842       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1843     if (TargetDecl->hasAttr<NoReturnAttr>())
1844       FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1845     if (TargetDecl->hasAttr<ColdAttr>())
1846       FuncAttrs.addAttribute(llvm::Attribute::Cold);
1847     if (TargetDecl->hasAttr<NoDuplicateAttr>())
1848       FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
1849     if (TargetDecl->hasAttr<ConvergentAttr>())
1850       FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1851 
1852     if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1853       AddAttributesFromFunctionProtoType(
1854           getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
1855       // Don't use [[noreturn]] or _Noreturn for a call to a virtual function.
1856       // These attributes are not inherited by overloads.
1857       const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
1858       if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual()))
1859         FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1860     }
1861 
1862     // 'const', 'pure' and 'noalias' attributed functions are also nounwind.
1863     if (TargetDecl->hasAttr<ConstAttr>()) {
1864       FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
1865       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1866     } else if (TargetDecl->hasAttr<PureAttr>()) {
1867       FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
1868       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1869     } else if (TargetDecl->hasAttr<NoAliasAttr>()) {
1870       FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly);
1871       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1872     }
1873     if (TargetDecl->hasAttr<RestrictAttr>())
1874       RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1875     if (TargetDecl->hasAttr<ReturnsNonNullAttr>() &&
1876         !CodeGenOpts.NullPointerIsValid)
1877       RetAttrs.addAttribute(llvm::Attribute::NonNull);
1878     if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>())
1879       FuncAttrs.addAttribute("no_caller_saved_registers");
1880     if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>())
1881       FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
1882 
1883     HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
1884     if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) {
1885       Optional<unsigned> NumElemsParam;
1886       if (AllocSize->getNumElemsParam().isValid())
1887         NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
1888       FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
1889                                  NumElemsParam);
1890     }
1891   }
1892 
1893   ConstructDefaultFnAttrList(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
1894 
1895   if (CodeGenOpts.EnableSegmentedStacks &&
1896       !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>()))
1897     FuncAttrs.addAttribute("split-stack");
1898 
1899   // Add NonLazyBind attribute to function declarations when -fno-plt
1900   // is used.
1901   if (TargetDecl && CodeGenOpts.NoPLT) {
1902     if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1903       if (!Fn->isDefined() && !AttrOnCallSite) {
1904         FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
1905       }
1906     }
1907   }
1908 
1909   if (TargetDecl && TargetDecl->hasAttr<OpenCLKernelAttr>()) {
1910     if (getLangOpts().OpenCLVersion <= 120) {
1911       // OpenCL v1.2 Work groups are always uniform
1912       FuncAttrs.addAttribute("uniform-work-group-size", "true");
1913     } else {
1914       // OpenCL v2.0 Work groups may be whether uniform or not.
1915       // '-cl-uniform-work-group-size' compile option gets a hint
1916       // to the compiler that the global work-size be a multiple of
1917       // the work-group size specified to clEnqueueNDRangeKernel
1918       // (i.e. work groups are uniform).
1919       FuncAttrs.addAttribute("uniform-work-group-size",
1920                              llvm::toStringRef(CodeGenOpts.UniformWGSize));
1921     }
1922   }
1923 
1924   if (!AttrOnCallSite) {
1925     bool DisableTailCalls = false;
1926 
1927     if (CodeGenOpts.DisableTailCalls)
1928       DisableTailCalls = true;
1929     else if (TargetDecl) {
1930       if (TargetDecl->hasAttr<DisableTailCallsAttr>() ||
1931           TargetDecl->hasAttr<AnyX86InterruptAttr>())
1932         DisableTailCalls = true;
1933       else if (CodeGenOpts.NoEscapingBlockTailCalls) {
1934         if (const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
1935           if (!BD->doesNotEscape())
1936             DisableTailCalls = true;
1937       }
1938     }
1939 
1940     FuncAttrs.addAttribute("disable-tail-calls",
1941                            llvm::toStringRef(DisableTailCalls));
1942     GetCPUAndFeaturesAttributes(TargetDecl, FuncAttrs);
1943   }
1944 
1945   ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
1946 
1947   QualType RetTy = FI.getReturnType();
1948   const ABIArgInfo &RetAI = FI.getReturnInfo();
1949   switch (RetAI.getKind()) {
1950   case ABIArgInfo::Extend:
1951     if (RetAI.isSignExt())
1952       RetAttrs.addAttribute(llvm::Attribute::SExt);
1953     else
1954       RetAttrs.addAttribute(llvm::Attribute::ZExt);
1955     LLVM_FALLTHROUGH;
1956   case ABIArgInfo::Direct:
1957     if (RetAI.getInReg())
1958       RetAttrs.addAttribute(llvm::Attribute::InReg);
1959     break;
1960   case ABIArgInfo::Ignore:
1961     break;
1962 
1963   case ABIArgInfo::InAlloca:
1964   case ABIArgInfo::Indirect: {
1965     // inalloca and sret disable readnone and readonly
1966     FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1967       .removeAttribute(llvm::Attribute::ReadNone);
1968     break;
1969   }
1970 
1971   case ABIArgInfo::CoerceAndExpand:
1972     break;
1973 
1974   case ABIArgInfo::Expand:
1975     llvm_unreachable("Invalid ABI kind for return argument");
1976   }
1977 
1978   if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
1979     QualType PTy = RefTy->getPointeeType();
1980     if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
1981       RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
1982                                         .getQuantity());
1983     else if (getContext().getTargetAddressSpace(PTy) == 0 &&
1984              !CodeGenOpts.NullPointerIsValid)
1985       RetAttrs.addAttribute(llvm::Attribute::NonNull);
1986   }
1987 
1988   bool hasUsedSRet = false;
1989   SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs());
1990 
1991   // Attach attributes to sret.
1992   if (IRFunctionArgs.hasSRetArg()) {
1993     llvm::AttrBuilder SRETAttrs;
1994     if (!RetAI.getSuppressSRet())
1995       SRETAttrs.addAttribute(llvm::Attribute::StructRet);
1996     hasUsedSRet = true;
1997     if (RetAI.getInReg())
1998       SRETAttrs.addAttribute(llvm::Attribute::InReg);
1999     ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
2000         llvm::AttributeSet::get(getLLVMContext(), SRETAttrs);
2001   }
2002 
2003   // Attach attributes to inalloca argument.
2004   if (IRFunctionArgs.hasInallocaArg()) {
2005     llvm::AttrBuilder Attrs;
2006     Attrs.addAttribute(llvm::Attribute::InAlloca);
2007     ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
2008         llvm::AttributeSet::get(getLLVMContext(), Attrs);
2009   }
2010 
2011   unsigned ArgNo = 0;
2012   for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(),
2013                                           E = FI.arg_end();
2014        I != E; ++I, ++ArgNo) {
2015     QualType ParamType = I->type;
2016     const ABIArgInfo &AI = I->info;
2017     llvm::AttrBuilder Attrs;
2018 
2019     // Add attribute for padding argument, if necessary.
2020     if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
2021       if (AI.getPaddingInReg()) {
2022         ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
2023             llvm::AttributeSet::get(
2024                 getLLVMContext(),
2025                 llvm::AttrBuilder().addAttribute(llvm::Attribute::InReg));
2026       }
2027     }
2028 
2029     // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
2030     // have the corresponding parameter variable.  It doesn't make
2031     // sense to do it here because parameters are so messed up.
2032     switch (AI.getKind()) {
2033     case ABIArgInfo::Extend:
2034       if (AI.isSignExt())
2035         Attrs.addAttribute(llvm::Attribute::SExt);
2036       else
2037         Attrs.addAttribute(llvm::Attribute::ZExt);
2038       LLVM_FALLTHROUGH;
2039     case ABIArgInfo::Direct:
2040       if (ArgNo == 0 && FI.isChainCall())
2041         Attrs.addAttribute(llvm::Attribute::Nest);
2042       else if (AI.getInReg())
2043         Attrs.addAttribute(llvm::Attribute::InReg);
2044       break;
2045 
2046     case ABIArgInfo::Indirect: {
2047       if (AI.getInReg())
2048         Attrs.addAttribute(llvm::Attribute::InReg);
2049 
2050       if (AI.getIndirectByVal())
2051         Attrs.addAttribute(llvm::Attribute::ByVal);
2052 
2053       CharUnits Align = AI.getIndirectAlign();
2054 
2055       // In a byval argument, it is important that the required
2056       // alignment of the type is honored, as LLVM might be creating a
2057       // *new* stack object, and needs to know what alignment to give
2058       // it. (Sometimes it can deduce a sensible alignment on its own,
2059       // but not if clang decides it must emit a packed struct, or the
2060       // user specifies increased alignment requirements.)
2061       //
2062       // This is different from indirect *not* byval, where the object
2063       // exists already, and the align attribute is purely
2064       // informative.
2065       assert(!Align.isZero());
2066 
2067       // For now, only add this when we have a byval argument.
2068       // TODO: be less lazy about updating test cases.
2069       if (AI.getIndirectByVal())
2070         Attrs.addAlignmentAttr(Align.getQuantity());
2071 
2072       // byval disables readnone and readonly.
2073       FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2074         .removeAttribute(llvm::Attribute::ReadNone);
2075       break;
2076     }
2077     case ABIArgInfo::Ignore:
2078     case ABIArgInfo::Expand:
2079     case ABIArgInfo::CoerceAndExpand:
2080       break;
2081 
2082     case ABIArgInfo::InAlloca:
2083       // inalloca disables readnone and readonly.
2084       FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2085           .removeAttribute(llvm::Attribute::ReadNone);
2086       continue;
2087     }
2088 
2089     if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
2090       QualType PTy = RefTy->getPointeeType();
2091       if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2092         Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
2093                                        .getQuantity());
2094       else if (getContext().getTargetAddressSpace(PTy) == 0 &&
2095                !CodeGenOpts.NullPointerIsValid)
2096         Attrs.addAttribute(llvm::Attribute::NonNull);
2097     }
2098 
2099     switch (FI.getExtParameterInfo(ArgNo).getABI()) {
2100     case ParameterABI::Ordinary:
2101       break;
2102 
2103     case ParameterABI::SwiftIndirectResult: {
2104       // Add 'sret' if we haven't already used it for something, but
2105       // only if the result is void.
2106       if (!hasUsedSRet && RetTy->isVoidType()) {
2107         Attrs.addAttribute(llvm::Attribute::StructRet);
2108         hasUsedSRet = true;
2109       }
2110 
2111       // Add 'noalias' in either case.
2112       Attrs.addAttribute(llvm::Attribute::NoAlias);
2113 
2114       // Add 'dereferenceable' and 'alignment'.
2115       auto PTy = ParamType->getPointeeType();
2116       if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2117         auto info = getContext().getTypeInfoInChars(PTy);
2118         Attrs.addDereferenceableAttr(info.first.getQuantity());
2119         Attrs.addAttribute(llvm::Attribute::getWithAlignment(getLLVMContext(),
2120                                                  info.second.getQuantity()));
2121       }
2122       break;
2123     }
2124 
2125     case ParameterABI::SwiftErrorResult:
2126       Attrs.addAttribute(llvm::Attribute::SwiftError);
2127       break;
2128 
2129     case ParameterABI::SwiftContext:
2130       Attrs.addAttribute(llvm::Attribute::SwiftSelf);
2131       break;
2132     }
2133 
2134     if (FI.getExtParameterInfo(ArgNo).isNoEscape())
2135       Attrs.addAttribute(llvm::Attribute::NoCapture);
2136 
2137     if (Attrs.hasAttributes()) {
2138       unsigned FirstIRArg, NumIRArgs;
2139       std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2140       for (unsigned i = 0; i < NumIRArgs; i++)
2141         ArgAttrs[FirstIRArg + i] =
2142             llvm::AttributeSet::get(getLLVMContext(), Attrs);
2143     }
2144   }
2145   assert(ArgNo == FI.arg_size());
2146 
2147   AttrList = llvm::AttributeList::get(
2148       getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs),
2149       llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs);
2150 }
2151 
2152 /// An argument came in as a promoted argument; demote it back to its
2153 /// declared type.
2154 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
2155                                          const VarDecl *var,
2156                                          llvm::Value *value) {
2157   llvm::Type *varType = CGF.ConvertType(var->getType());
2158 
2159   // This can happen with promotions that actually don't change the
2160   // underlying type, like the enum promotions.
2161   if (value->getType() == varType) return value;
2162 
2163   assert((varType->isIntegerTy() || varType->isFloatingPointTy())
2164          && "unexpected promotion type");
2165 
2166   if (isa<llvm::IntegerType>(varType))
2167     return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
2168 
2169   return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
2170 }
2171 
2172 /// Returns the attribute (either parameter attribute, or function
2173 /// attribute), which declares argument ArgNo to be non-null.
2174 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
2175                                          QualType ArgType, unsigned ArgNo) {
2176   // FIXME: __attribute__((nonnull)) can also be applied to:
2177   //   - references to pointers, where the pointee is known to be
2178   //     nonnull (apparently a Clang extension)
2179   //   - transparent unions containing pointers
2180   // In the former case, LLVM IR cannot represent the constraint. In
2181   // the latter case, we have no guarantee that the transparent union
2182   // is in fact passed as a pointer.
2183   if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
2184     return nullptr;
2185   // First, check attribute on parameter itself.
2186   if (PVD) {
2187     if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
2188       return ParmNNAttr;
2189   }
2190   // Check function attributes.
2191   if (!FD)
2192     return nullptr;
2193   for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
2194     if (NNAttr->isNonNull(ArgNo))
2195       return NNAttr;
2196   }
2197   return nullptr;
2198 }
2199 
2200 namespace {
2201   struct CopyBackSwiftError final : EHScopeStack::Cleanup {
2202     Address Temp;
2203     Address Arg;
2204     CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {}
2205     void Emit(CodeGenFunction &CGF, Flags flags) override {
2206       llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp);
2207       CGF.Builder.CreateStore(errorValue, Arg);
2208     }
2209   };
2210 }
2211 
2212 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
2213                                          llvm::Function *Fn,
2214                                          const FunctionArgList &Args) {
2215   if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
2216     // Naked functions don't have prologues.
2217     return;
2218 
2219   // If this is an implicit-return-zero function, go ahead and
2220   // initialize the return value.  TODO: it might be nice to have
2221   // a more general mechanism for this that didn't require synthesized
2222   // return statements.
2223   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
2224     if (FD->hasImplicitReturnZero()) {
2225       QualType RetTy = FD->getReturnType().getUnqualifiedType();
2226       llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
2227       llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
2228       Builder.CreateStore(Zero, ReturnValue);
2229     }
2230   }
2231 
2232   // FIXME: We no longer need the types from FunctionArgList; lift up and
2233   // simplify.
2234 
2235   ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
2236   // Flattened function arguments.
2237   SmallVector<llvm::Value *, 16> FnArgs;
2238   FnArgs.reserve(IRFunctionArgs.totalIRArgs());
2239   for (auto &Arg : Fn->args()) {
2240     FnArgs.push_back(&Arg);
2241   }
2242   assert(FnArgs.size() == IRFunctionArgs.totalIRArgs());
2243 
2244   // If we're using inalloca, all the memory arguments are GEPs off of the last
2245   // parameter, which is a pointer to the complete memory area.
2246   Address ArgStruct = Address::invalid();
2247   const llvm::StructLayout *ArgStructLayout = nullptr;
2248   if (IRFunctionArgs.hasInallocaArg()) {
2249     ArgStructLayout = CGM.getDataLayout().getStructLayout(FI.getArgStruct());
2250     ArgStruct = Address(FnArgs[IRFunctionArgs.getInallocaArgNo()],
2251                         FI.getArgStructAlignment());
2252 
2253     assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo());
2254   }
2255 
2256   // Name the struct return parameter.
2257   if (IRFunctionArgs.hasSRetArg()) {
2258     auto AI = cast<llvm::Argument>(FnArgs[IRFunctionArgs.getSRetArgNo()]);
2259     AI->setName("agg.result");
2260     AI->addAttr(llvm::Attribute::NoAlias);
2261   }
2262 
2263   // Track if we received the parameter as a pointer (indirect, byval, or
2264   // inalloca).  If already have a pointer, EmitParmDecl doesn't need to copy it
2265   // into a local alloca for us.
2266   SmallVector<ParamValue, 16> ArgVals;
2267   ArgVals.reserve(Args.size());
2268 
2269   // Create a pointer value for every parameter declaration.  This usually
2270   // entails copying one or more LLVM IR arguments into an alloca.  Don't push
2271   // any cleanups or do anything that might unwind.  We do that separately, so
2272   // we can push the cleanups in the correct order for the ABI.
2273   assert(FI.arg_size() == Args.size() &&
2274          "Mismatch between function signature & arguments.");
2275   unsigned ArgNo = 0;
2276   CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
2277   for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
2278        i != e; ++i, ++info_it, ++ArgNo) {
2279     const VarDecl *Arg = *i;
2280     const ABIArgInfo &ArgI = info_it->info;
2281 
2282     bool isPromoted =
2283       isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
2284     // We are converting from ABIArgInfo type to VarDecl type directly, unless
2285     // the parameter is promoted. In this case we convert to
2286     // CGFunctionInfo::ArgInfo type with subsequent argument demotion.
2287     QualType Ty = isPromoted ? info_it->type : Arg->getType();
2288     assert(hasScalarEvaluationKind(Ty) ==
2289            hasScalarEvaluationKind(Arg->getType()));
2290 
2291     unsigned FirstIRArg, NumIRArgs;
2292     std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2293 
2294     switch (ArgI.getKind()) {
2295     case ABIArgInfo::InAlloca: {
2296       assert(NumIRArgs == 0);
2297       auto FieldIndex = ArgI.getInAllocaFieldIndex();
2298       CharUnits FieldOffset =
2299         CharUnits::fromQuantity(ArgStructLayout->getElementOffset(FieldIndex));
2300       Address V = Builder.CreateStructGEP(ArgStruct, FieldIndex, FieldOffset,
2301                                           Arg->getName());
2302       ArgVals.push_back(ParamValue::forIndirect(V));
2303       break;
2304     }
2305 
2306     case ABIArgInfo::Indirect: {
2307       assert(NumIRArgs == 1);
2308       Address ParamAddr = Address(FnArgs[FirstIRArg], ArgI.getIndirectAlign());
2309 
2310       if (!hasScalarEvaluationKind(Ty)) {
2311         // Aggregates and complex variables are accessed by reference.  All we
2312         // need to do is realign the value, if requested.
2313         Address V = ParamAddr;
2314         if (ArgI.getIndirectRealign()) {
2315           Address AlignedTemp = CreateMemTemp(Ty, "coerce");
2316 
2317           // Copy from the incoming argument pointer to the temporary with the
2318           // appropriate alignment.
2319           //
2320           // FIXME: We should have a common utility for generating an aggregate
2321           // copy.
2322           CharUnits Size = getContext().getTypeSizeInChars(Ty);
2323           auto SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity());
2324           Address Dst = Builder.CreateBitCast(AlignedTemp, Int8PtrTy);
2325           Address Src = Builder.CreateBitCast(ParamAddr, Int8PtrTy);
2326           Builder.CreateMemCpy(Dst, Src, SizeVal, false);
2327           V = AlignedTemp;
2328         }
2329         ArgVals.push_back(ParamValue::forIndirect(V));
2330       } else {
2331         // Load scalar value from indirect argument.
2332         llvm::Value *V =
2333             EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getBeginLoc());
2334 
2335         if (isPromoted)
2336           V = emitArgumentDemotion(*this, Arg, V);
2337         ArgVals.push_back(ParamValue::forDirect(V));
2338       }
2339       break;
2340     }
2341 
2342     case ABIArgInfo::Extend:
2343     case ABIArgInfo::Direct: {
2344 
2345       // If we have the trivial case, handle it with no muss and fuss.
2346       if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
2347           ArgI.getCoerceToType() == ConvertType(Ty) &&
2348           ArgI.getDirectOffset() == 0) {
2349         assert(NumIRArgs == 1);
2350         llvm::Value *V = FnArgs[FirstIRArg];
2351         auto AI = cast<llvm::Argument>(V);
2352 
2353         if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
2354           if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
2355                              PVD->getFunctionScopeIndex()) &&
2356               !CGM.getCodeGenOpts().NullPointerIsValid)
2357             AI->addAttr(llvm::Attribute::NonNull);
2358 
2359           QualType OTy = PVD->getOriginalType();
2360           if (const auto *ArrTy =
2361               getContext().getAsConstantArrayType(OTy)) {
2362             // A C99 array parameter declaration with the static keyword also
2363             // indicates dereferenceability, and if the size is constant we can
2364             // use the dereferenceable attribute (which requires the size in
2365             // bytes).
2366             if (ArrTy->getSizeModifier() == ArrayType::Static) {
2367               QualType ETy = ArrTy->getElementType();
2368               uint64_t ArrSize = ArrTy->getSize().getZExtValue();
2369               if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
2370                   ArrSize) {
2371                 llvm::AttrBuilder Attrs;
2372                 Attrs.addDereferenceableAttr(
2373                   getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize);
2374                 AI->addAttrs(Attrs);
2375               } else if (getContext().getTargetAddressSpace(ETy) == 0 &&
2376                          !CGM.getCodeGenOpts().NullPointerIsValid) {
2377                 AI->addAttr(llvm::Attribute::NonNull);
2378               }
2379             }
2380           } else if (const auto *ArrTy =
2381                      getContext().getAsVariableArrayType(OTy)) {
2382             // For C99 VLAs with the static keyword, we don't know the size so
2383             // we can't use the dereferenceable attribute, but in addrspace(0)
2384             // we know that it must be nonnull.
2385             if (ArrTy->getSizeModifier() == VariableArrayType::Static &&
2386                 !getContext().getTargetAddressSpace(ArrTy->getElementType()) &&
2387                 !CGM.getCodeGenOpts().NullPointerIsValid)
2388               AI->addAttr(llvm::Attribute::NonNull);
2389           }
2390 
2391           const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
2392           if (!AVAttr)
2393             if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
2394               AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
2395           if (AVAttr) {
2396             llvm::Value *AlignmentValue =
2397               EmitScalarExpr(AVAttr->getAlignment());
2398             llvm::ConstantInt *AlignmentCI =
2399               cast<llvm::ConstantInt>(AlignmentValue);
2400             unsigned Alignment = std::min((unsigned)AlignmentCI->getZExtValue(),
2401                                           +llvm::Value::MaximumAlignment);
2402             AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment));
2403           }
2404         }
2405 
2406         if (Arg->getType().isRestrictQualified())
2407           AI->addAttr(llvm::Attribute::NoAlias);
2408 
2409         // LLVM expects swifterror parameters to be used in very restricted
2410         // ways.  Copy the value into a less-restricted temporary.
2411         if (FI.getExtParameterInfo(ArgNo).getABI()
2412               == ParameterABI::SwiftErrorResult) {
2413           QualType pointeeTy = Ty->getPointeeType();
2414           assert(pointeeTy->isPointerType());
2415           Address temp =
2416             CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
2417           Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy));
2418           llvm::Value *incomingErrorValue = Builder.CreateLoad(arg);
2419           Builder.CreateStore(incomingErrorValue, temp);
2420           V = temp.getPointer();
2421 
2422           // Push a cleanup to copy the value back at the end of the function.
2423           // The convention does not guarantee that the value will be written
2424           // back if the function exits with an unwind exception.
2425           EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg);
2426         }
2427 
2428         // Ensure the argument is the correct type.
2429         if (V->getType() != ArgI.getCoerceToType())
2430           V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
2431 
2432         if (isPromoted)
2433           V = emitArgumentDemotion(*this, Arg, V);
2434 
2435         // Because of merging of function types from multiple decls it is
2436         // possible for the type of an argument to not match the corresponding
2437         // type in the function type. Since we are codegening the callee
2438         // in here, add a cast to the argument type.
2439         llvm::Type *LTy = ConvertType(Arg->getType());
2440         if (V->getType() != LTy)
2441           V = Builder.CreateBitCast(V, LTy);
2442 
2443         ArgVals.push_back(ParamValue::forDirect(V));
2444         break;
2445       }
2446 
2447       Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
2448                                      Arg->getName());
2449 
2450       // Pointer to store into.
2451       Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
2452 
2453       // Fast-isel and the optimizer generally like scalar values better than
2454       // FCAs, so we flatten them if this is safe to do for this argument.
2455       llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
2456       if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
2457           STy->getNumElements() > 1) {
2458         auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
2459         uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
2460         llvm::Type *DstTy = Ptr.getElementType();
2461         uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
2462 
2463         Address AddrToStoreInto = Address::invalid();
2464         if (SrcSize <= DstSize) {
2465           AddrToStoreInto = Builder.CreateElementBitCast(Ptr, STy);
2466         } else {
2467           AddrToStoreInto =
2468             CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
2469         }
2470 
2471         assert(STy->getNumElements() == NumIRArgs);
2472         for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2473           auto AI = FnArgs[FirstIRArg + i];
2474           AI->setName(Arg->getName() + ".coerce" + Twine(i));
2475           auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
2476           Address EltPtr =
2477             Builder.CreateStructGEP(AddrToStoreInto, i, Offset);
2478           Builder.CreateStore(AI, EltPtr);
2479         }
2480 
2481         if (SrcSize > DstSize) {
2482           Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
2483         }
2484 
2485       } else {
2486         // Simple case, just do a coerced store of the argument into the alloca.
2487         assert(NumIRArgs == 1);
2488         auto AI = FnArgs[FirstIRArg];
2489         AI->setName(Arg->getName() + ".coerce");
2490         CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, *this);
2491       }
2492 
2493       // Match to what EmitParmDecl is expecting for this type.
2494       if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
2495         llvm::Value *V =
2496             EmitLoadOfScalar(Alloca, false, Ty, Arg->getBeginLoc());
2497         if (isPromoted)
2498           V = emitArgumentDemotion(*this, Arg, V);
2499         ArgVals.push_back(ParamValue::forDirect(V));
2500       } else {
2501         ArgVals.push_back(ParamValue::forIndirect(Alloca));
2502       }
2503       break;
2504     }
2505 
2506     case ABIArgInfo::CoerceAndExpand: {
2507       // Reconstruct into a temporary.
2508       Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2509       ArgVals.push_back(ParamValue::forIndirect(alloca));
2510 
2511       auto coercionType = ArgI.getCoerceAndExpandType();
2512       alloca = Builder.CreateElementBitCast(alloca, coercionType);
2513       auto layout = CGM.getDataLayout().getStructLayout(coercionType);
2514 
2515       unsigned argIndex = FirstIRArg;
2516       for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2517         llvm::Type *eltType = coercionType->getElementType(i);
2518         if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType))
2519           continue;
2520 
2521         auto eltAddr = Builder.CreateStructGEP(alloca, i, layout);
2522         auto elt = FnArgs[argIndex++];
2523         Builder.CreateStore(elt, eltAddr);
2524       }
2525       assert(argIndex == FirstIRArg + NumIRArgs);
2526       break;
2527     }
2528 
2529     case ABIArgInfo::Expand: {
2530       // If this structure was expanded into multiple arguments then
2531       // we need to create a temporary and reconstruct it from the
2532       // arguments.
2533       Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2534       LValue LV = MakeAddrLValue(Alloca, Ty);
2535       ArgVals.push_back(ParamValue::forIndirect(Alloca));
2536 
2537       auto FnArgIter = FnArgs.begin() + FirstIRArg;
2538       ExpandTypeFromArgs(Ty, LV, FnArgIter);
2539       assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs);
2540       for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
2541         auto AI = FnArgs[FirstIRArg + i];
2542         AI->setName(Arg->getName() + "." + Twine(i));
2543       }
2544       break;
2545     }
2546 
2547     case ABIArgInfo::Ignore:
2548       assert(NumIRArgs == 0);
2549       // Initialize the local variable appropriately.
2550       if (!hasScalarEvaluationKind(Ty)) {
2551         ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty)));
2552       } else {
2553         llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
2554         ArgVals.push_back(ParamValue::forDirect(U));
2555       }
2556       break;
2557     }
2558   }
2559 
2560   if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2561     for (int I = Args.size() - 1; I >= 0; --I)
2562       EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2563   } else {
2564     for (unsigned I = 0, E = Args.size(); I != E; ++I)
2565       EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2566   }
2567 }
2568 
2569 static void eraseUnusedBitCasts(llvm::Instruction *insn) {
2570   while (insn->use_empty()) {
2571     llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
2572     if (!bitcast) return;
2573 
2574     // This is "safe" because we would have used a ConstantExpr otherwise.
2575     insn = cast<llvm::Instruction>(bitcast->getOperand(0));
2576     bitcast->eraseFromParent();
2577   }
2578 }
2579 
2580 /// Try to emit a fused autorelease of a return result.
2581 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
2582                                                     llvm::Value *result) {
2583   // We must be immediately followed the cast.
2584   llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
2585   if (BB->empty()) return nullptr;
2586   if (&BB->back() != result) return nullptr;
2587 
2588   llvm::Type *resultType = result->getType();
2589 
2590   // result is in a BasicBlock and is therefore an Instruction.
2591   llvm::Instruction *generator = cast<llvm::Instruction>(result);
2592 
2593   SmallVector<llvm::Instruction *, 4> InstsToKill;
2594 
2595   // Look for:
2596   //  %generator = bitcast %type1* %generator2 to %type2*
2597   while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
2598     // We would have emitted this as a constant if the operand weren't
2599     // an Instruction.
2600     generator = cast<llvm::Instruction>(bitcast->getOperand(0));
2601 
2602     // Require the generator to be immediately followed by the cast.
2603     if (generator->getNextNode() != bitcast)
2604       return nullptr;
2605 
2606     InstsToKill.push_back(bitcast);
2607   }
2608 
2609   // Look for:
2610   //   %generator = call i8* @objc_retain(i8* %originalResult)
2611   // or
2612   //   %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
2613   llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
2614   if (!call) return nullptr;
2615 
2616   bool doRetainAutorelease;
2617 
2618   if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints().objc_retain) {
2619     doRetainAutorelease = true;
2620   } else if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints()
2621                                           .objc_retainAutoreleasedReturnValue) {
2622     doRetainAutorelease = false;
2623 
2624     // If we emitted an assembly marker for this call (and the
2625     // ARCEntrypoints field should have been set if so), go looking
2626     // for that call.  If we can't find it, we can't do this
2627     // optimization.  But it should always be the immediately previous
2628     // instruction, unless we needed bitcasts around the call.
2629     if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) {
2630       llvm::Instruction *prev = call->getPrevNode();
2631       assert(prev);
2632       if (isa<llvm::BitCastInst>(prev)) {
2633         prev = prev->getPrevNode();
2634         assert(prev);
2635       }
2636       assert(isa<llvm::CallInst>(prev));
2637       assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
2638                CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker);
2639       InstsToKill.push_back(prev);
2640     }
2641   } else {
2642     return nullptr;
2643   }
2644 
2645   result = call->getArgOperand(0);
2646   InstsToKill.push_back(call);
2647 
2648   // Keep killing bitcasts, for sanity.  Note that we no longer care
2649   // about precise ordering as long as there's exactly one use.
2650   while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
2651     if (!bitcast->hasOneUse()) break;
2652     InstsToKill.push_back(bitcast);
2653     result = bitcast->getOperand(0);
2654   }
2655 
2656   // Delete all the unnecessary instructions, from latest to earliest.
2657   for (auto *I : InstsToKill)
2658     I->eraseFromParent();
2659 
2660   // Do the fused retain/autorelease if we were asked to.
2661   if (doRetainAutorelease)
2662     result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
2663 
2664   // Cast back to the result type.
2665   return CGF.Builder.CreateBitCast(result, resultType);
2666 }
2667 
2668 /// If this is a +1 of the value of an immutable 'self', remove it.
2669 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
2670                                           llvm::Value *result) {
2671   // This is only applicable to a method with an immutable 'self'.
2672   const ObjCMethodDecl *method =
2673     dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
2674   if (!method) return nullptr;
2675   const VarDecl *self = method->getSelfDecl();
2676   if (!self->getType().isConstQualified()) return nullptr;
2677 
2678   // Look for a retain call.
2679   llvm::CallInst *retainCall =
2680     dyn_cast<llvm::CallInst>(result->stripPointerCasts());
2681   if (!retainCall ||
2682       retainCall->getCalledValue() != CGF.CGM.getObjCEntrypoints().objc_retain)
2683     return nullptr;
2684 
2685   // Look for an ordinary load of 'self'.
2686   llvm::Value *retainedValue = retainCall->getArgOperand(0);
2687   llvm::LoadInst *load =
2688     dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
2689   if (!load || load->isAtomic() || load->isVolatile() ||
2690       load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
2691     return nullptr;
2692 
2693   // Okay!  Burn it all down.  This relies for correctness on the
2694   // assumption that the retain is emitted as part of the return and
2695   // that thereafter everything is used "linearly".
2696   llvm::Type *resultType = result->getType();
2697   eraseUnusedBitCasts(cast<llvm::Instruction>(result));
2698   assert(retainCall->use_empty());
2699   retainCall->eraseFromParent();
2700   eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
2701 
2702   return CGF.Builder.CreateBitCast(load, resultType);
2703 }
2704 
2705 /// Emit an ARC autorelease of the result of a function.
2706 ///
2707 /// \return the value to actually return from the function
2708 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
2709                                             llvm::Value *result) {
2710   // If we're returning 'self', kill the initial retain.  This is a
2711   // heuristic attempt to "encourage correctness" in the really unfortunate
2712   // case where we have a return of self during a dealloc and we desperately
2713   // need to avoid the possible autorelease.
2714   if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
2715     return self;
2716 
2717   // At -O0, try to emit a fused retain/autorelease.
2718   if (CGF.shouldUseFusedARCCalls())
2719     if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
2720       return fused;
2721 
2722   return CGF.EmitARCAutoreleaseReturnValue(result);
2723 }
2724 
2725 /// Heuristically search for a dominating store to the return-value slot.
2726 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
2727   // Check if a User is a store which pointerOperand is the ReturnValue.
2728   // We are looking for stores to the ReturnValue, not for stores of the
2729   // ReturnValue to some other location.
2730   auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
2731     auto *SI = dyn_cast<llvm::StoreInst>(U);
2732     if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer())
2733       return nullptr;
2734     // These aren't actually possible for non-coerced returns, and we
2735     // only care about non-coerced returns on this code path.
2736     assert(!SI->isAtomic() && !SI->isVolatile());
2737     return SI;
2738   };
2739   // If there are multiple uses of the return-value slot, just check
2740   // for something immediately preceding the IP.  Sometimes this can
2741   // happen with how we generate implicit-returns; it can also happen
2742   // with noreturn cleanups.
2743   if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
2744     llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2745     if (IP->empty()) return nullptr;
2746     llvm::Instruction *I = &IP->back();
2747 
2748     // Skip lifetime markers
2749     for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(),
2750                                             IE = IP->rend();
2751          II != IE; ++II) {
2752       if (llvm::IntrinsicInst *Intrinsic =
2753               dyn_cast<llvm::IntrinsicInst>(&*II)) {
2754         if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) {
2755           const llvm::Value *CastAddr = Intrinsic->getArgOperand(1);
2756           ++II;
2757           if (II == IE)
2758             break;
2759           if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II))
2760             continue;
2761         }
2762       }
2763       I = &*II;
2764       break;
2765     }
2766 
2767     return GetStoreIfValid(I);
2768   }
2769 
2770   llvm::StoreInst *store =
2771       GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
2772   if (!store) return nullptr;
2773 
2774   // Now do a first-and-dirty dominance check: just walk up the
2775   // single-predecessors chain from the current insertion point.
2776   llvm::BasicBlock *StoreBB = store->getParent();
2777   llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2778   while (IP != StoreBB) {
2779     if (!(IP = IP->getSinglePredecessor()))
2780       return nullptr;
2781   }
2782 
2783   // Okay, the store's basic block dominates the insertion point; we
2784   // can do our thing.
2785   return store;
2786 }
2787 
2788 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
2789                                          bool EmitRetDbgLoc,
2790                                          SourceLocation EndLoc) {
2791   if (FI.isNoReturn()) {
2792     // Noreturn functions don't return.
2793     EmitUnreachable(EndLoc);
2794     return;
2795   }
2796 
2797   if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
2798     // Naked functions don't have epilogues.
2799     Builder.CreateUnreachable();
2800     return;
2801   }
2802 
2803   // Functions with no result always return void.
2804   if (!ReturnValue.isValid()) {
2805     Builder.CreateRetVoid();
2806     return;
2807   }
2808 
2809   llvm::DebugLoc RetDbgLoc;
2810   llvm::Value *RV = nullptr;
2811   QualType RetTy = FI.getReturnType();
2812   const ABIArgInfo &RetAI = FI.getReturnInfo();
2813 
2814   switch (RetAI.getKind()) {
2815   case ABIArgInfo::InAlloca:
2816     // Aggregrates get evaluated directly into the destination.  Sometimes we
2817     // need to return the sret value in a register, though.
2818     assert(hasAggregateEvaluationKind(RetTy));
2819     if (RetAI.getInAllocaSRet()) {
2820       llvm::Function::arg_iterator EI = CurFn->arg_end();
2821       --EI;
2822       llvm::Value *ArgStruct = &*EI;
2823       llvm::Value *SRet = Builder.CreateStructGEP(
2824           nullptr, ArgStruct, RetAI.getInAllocaFieldIndex());
2825       RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret");
2826     }
2827     break;
2828 
2829   case ABIArgInfo::Indirect: {
2830     auto AI = CurFn->arg_begin();
2831     if (RetAI.isSRetAfterThis())
2832       ++AI;
2833     switch (getEvaluationKind(RetTy)) {
2834     case TEK_Complex: {
2835       ComplexPairTy RT =
2836         EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
2837       EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy),
2838                          /*isInit*/ true);
2839       break;
2840     }
2841     case TEK_Aggregate:
2842       // Do nothing; aggregrates get evaluated directly into the destination.
2843       break;
2844     case TEK_Scalar:
2845       EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
2846                         MakeNaturalAlignAddrLValue(&*AI, RetTy),
2847                         /*isInit*/ true);
2848       break;
2849     }
2850     break;
2851   }
2852 
2853   case ABIArgInfo::Extend:
2854   case ABIArgInfo::Direct:
2855     if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
2856         RetAI.getDirectOffset() == 0) {
2857       // The internal return value temp always will have pointer-to-return-type
2858       // type, just do a load.
2859 
2860       // If there is a dominating store to ReturnValue, we can elide
2861       // the load, zap the store, and usually zap the alloca.
2862       if (llvm::StoreInst *SI =
2863               findDominatingStoreToReturnValue(*this)) {
2864         // Reuse the debug location from the store unless there is
2865         // cleanup code to be emitted between the store and return
2866         // instruction.
2867         if (EmitRetDbgLoc && !AutoreleaseResult)
2868           RetDbgLoc = SI->getDebugLoc();
2869         // Get the stored value and nuke the now-dead store.
2870         RV = SI->getValueOperand();
2871         SI->eraseFromParent();
2872 
2873         // If that was the only use of the return value, nuke it as well now.
2874         auto returnValueInst = ReturnValue.getPointer();
2875         if (returnValueInst->use_empty()) {
2876           if (auto alloca = dyn_cast<llvm::AllocaInst>(returnValueInst)) {
2877             alloca->eraseFromParent();
2878             ReturnValue = Address::invalid();
2879           }
2880         }
2881 
2882       // Otherwise, we have to do a simple load.
2883       } else {
2884         RV = Builder.CreateLoad(ReturnValue);
2885       }
2886     } else {
2887       // If the value is offset in memory, apply the offset now.
2888       Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
2889 
2890       RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
2891     }
2892 
2893     // In ARC, end functions that return a retainable type with a call
2894     // to objc_autoreleaseReturnValue.
2895     if (AutoreleaseResult) {
2896 #ifndef NDEBUG
2897       // Type::isObjCRetainabletype has to be called on a QualType that hasn't
2898       // been stripped of the typedefs, so we cannot use RetTy here. Get the
2899       // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from
2900       // CurCodeDecl or BlockInfo.
2901       QualType RT;
2902 
2903       if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl))
2904         RT = FD->getReturnType();
2905       else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl))
2906         RT = MD->getReturnType();
2907       else if (isa<BlockDecl>(CurCodeDecl))
2908         RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType();
2909       else
2910         llvm_unreachable("Unexpected function/method type");
2911 
2912       assert(getLangOpts().ObjCAutoRefCount &&
2913              !FI.isReturnsRetained() &&
2914              RT->isObjCRetainableType());
2915 #endif
2916       RV = emitAutoreleaseOfResult(*this, RV);
2917     }
2918 
2919     break;
2920 
2921   case ABIArgInfo::Ignore:
2922     break;
2923 
2924   case ABIArgInfo::CoerceAndExpand: {
2925     auto coercionType = RetAI.getCoerceAndExpandType();
2926     auto layout = CGM.getDataLayout().getStructLayout(coercionType);
2927 
2928     // Load all of the coerced elements out into results.
2929     llvm::SmallVector<llvm::Value*, 4> results;
2930     Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType);
2931     for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2932       auto coercedEltType = coercionType->getElementType(i);
2933       if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType))
2934         continue;
2935 
2936       auto eltAddr = Builder.CreateStructGEP(addr, i, layout);
2937       auto elt = Builder.CreateLoad(eltAddr);
2938       results.push_back(elt);
2939     }
2940 
2941     // If we have one result, it's the single direct result type.
2942     if (results.size() == 1) {
2943       RV = results[0];
2944 
2945     // Otherwise, we need to make a first-class aggregate.
2946     } else {
2947       // Construct a return type that lacks padding elements.
2948       llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType();
2949 
2950       RV = llvm::UndefValue::get(returnType);
2951       for (unsigned i = 0, e = results.size(); i != e; ++i) {
2952         RV = Builder.CreateInsertValue(RV, results[i], i);
2953       }
2954     }
2955     break;
2956   }
2957 
2958   case ABIArgInfo::Expand:
2959     llvm_unreachable("Invalid ABI kind for return argument");
2960   }
2961 
2962   llvm::Instruction *Ret;
2963   if (RV) {
2964     EmitReturnValueCheck(RV);
2965     Ret = Builder.CreateRet(RV);
2966   } else {
2967     Ret = Builder.CreateRetVoid();
2968   }
2969 
2970   if (RetDbgLoc)
2971     Ret->setDebugLoc(std::move(RetDbgLoc));
2972 }
2973 
2974 void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV) {
2975   // A current decl may not be available when emitting vtable thunks.
2976   if (!CurCodeDecl)
2977     return;
2978 
2979   ReturnsNonNullAttr *RetNNAttr = nullptr;
2980   if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute))
2981     RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>();
2982 
2983   if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
2984     return;
2985 
2986   // Prefer the returns_nonnull attribute if it's present.
2987   SourceLocation AttrLoc;
2988   SanitizerMask CheckKind;
2989   SanitizerHandler Handler;
2990   if (RetNNAttr) {
2991     assert(!requiresReturnValueNullabilityCheck() &&
2992            "Cannot check nullability and the nonnull attribute");
2993     AttrLoc = RetNNAttr->getLocation();
2994     CheckKind = SanitizerKind::ReturnsNonnullAttribute;
2995     Handler = SanitizerHandler::NonnullReturn;
2996   } else {
2997     if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl))
2998       if (auto *TSI = DD->getTypeSourceInfo())
2999         if (auto FTL = TSI->getTypeLoc().castAs<FunctionTypeLoc>())
3000           AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
3001     CheckKind = SanitizerKind::NullabilityReturn;
3002     Handler = SanitizerHandler::NullabilityReturn;
3003   }
3004 
3005   SanitizerScope SanScope(this);
3006 
3007   // Make sure the "return" source location is valid. If we're checking a
3008   // nullability annotation, make sure the preconditions for the check are met.
3009   llvm::BasicBlock *Check = createBasicBlock("nullcheck");
3010   llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck");
3011   llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load");
3012   llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr);
3013   if (requiresReturnValueNullabilityCheck())
3014     CanNullCheck =
3015         Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
3016   Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
3017   EmitBlock(Check);
3018 
3019   // Now do the null check.
3020   llvm::Value *Cond = Builder.CreateIsNotNull(RV);
3021   llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)};
3022   llvm::Value *DynamicData[] = {SLocPtr};
3023   EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData);
3024 
3025   EmitBlock(NoCheck);
3026 
3027 #ifndef NDEBUG
3028   // The return location should not be used after the check has been emitted.
3029   ReturnLocation = Address::invalid();
3030 #endif
3031 }
3032 
3033 static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) {
3034   const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
3035   return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
3036 }
3037 
3038 static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF,
3039                                           QualType Ty) {
3040   // FIXME: Generate IR in one pass, rather than going back and fixing up these
3041   // placeholders.
3042   llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
3043   llvm::Type *IRPtrTy = IRTy->getPointerTo();
3044   llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo());
3045 
3046   // FIXME: When we generate this IR in one pass, we shouldn't need
3047   // this win32-specific alignment hack.
3048   CharUnits Align = CharUnits::fromQuantity(4);
3049   Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align);
3050 
3051   return AggValueSlot::forAddr(Address(Placeholder, Align),
3052                                Ty.getQualifiers(),
3053                                AggValueSlot::IsNotDestructed,
3054                                AggValueSlot::DoesNotNeedGCBarriers,
3055                                AggValueSlot::IsNotAliased,
3056                                AggValueSlot::DoesNotOverlap);
3057 }
3058 
3059 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
3060                                           const VarDecl *param,
3061                                           SourceLocation loc) {
3062   // StartFunction converted the ABI-lowered parameter(s) into a
3063   // local alloca.  We need to turn that into an r-value suitable
3064   // for EmitCall.
3065   Address local = GetAddrOfLocalVar(param);
3066 
3067   QualType type = param->getType();
3068 
3069   assert(!isInAllocaArgument(CGM.getCXXABI(), type) &&
3070          "cannot emit delegate call arguments for inalloca arguments!");
3071 
3072   // GetAddrOfLocalVar returns a pointer-to-pointer for references,
3073   // but the argument needs to be the original pointer.
3074   if (type->isReferenceType()) {
3075     args.add(RValue::get(Builder.CreateLoad(local)), type);
3076 
3077   // In ARC, move out of consumed arguments so that the release cleanup
3078   // entered by StartFunction doesn't cause an over-release.  This isn't
3079   // optimal -O0 code generation, but it should get cleaned up when
3080   // optimization is enabled.  This also assumes that delegate calls are
3081   // performed exactly once for a set of arguments, but that should be safe.
3082   } else if (getLangOpts().ObjCAutoRefCount &&
3083              param->hasAttr<NSConsumedAttr>() &&
3084              type->isObjCRetainableType()) {
3085     llvm::Value *ptr = Builder.CreateLoad(local);
3086     auto null =
3087       llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
3088     Builder.CreateStore(null, local);
3089     args.add(RValue::get(ptr), type);
3090 
3091   // For the most part, we just need to load the alloca, except that
3092   // aggregate r-values are actually pointers to temporaries.
3093   } else {
3094     args.add(convertTempToRValue(local, type, loc), type);
3095   }
3096 
3097   // Deactivate the cleanup for the callee-destructed param that was pushed.
3098   if (hasAggregateEvaluationKind(type) && !CurFuncIsThunk &&
3099       type->getAs<RecordType>()->getDecl()->isParamDestroyedInCallee() &&
3100       type.isDestructedType()) {
3101     EHScopeStack::stable_iterator cleanup =
3102         CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param));
3103     assert(cleanup.isValid() &&
3104            "cleanup for callee-destructed param not recorded");
3105     // This unreachable is a temporary marker which will be removed later.
3106     llvm::Instruction *isActive = Builder.CreateUnreachable();
3107     args.addArgCleanupDeactivation(cleanup, isActive);
3108   }
3109 }
3110 
3111 static bool isProvablyNull(llvm::Value *addr) {
3112   return isa<llvm::ConstantPointerNull>(addr);
3113 }
3114 
3115 /// Emit the actual writing-back of a writeback.
3116 static void emitWriteback(CodeGenFunction &CGF,
3117                           const CallArgList::Writeback &writeback) {
3118   const LValue &srcLV = writeback.Source;
3119   Address srcAddr = srcLV.getAddress();
3120   assert(!isProvablyNull(srcAddr.getPointer()) &&
3121          "shouldn't have writeback for provably null argument");
3122 
3123   llvm::BasicBlock *contBB = nullptr;
3124 
3125   // If the argument wasn't provably non-null, we need to null check
3126   // before doing the store.
3127   bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3128                                               CGF.CGM.getDataLayout());
3129   if (!provablyNonNull) {
3130     llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
3131     contBB = CGF.createBasicBlock("icr.done");
3132 
3133     llvm::Value *isNull =
3134       CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3135     CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
3136     CGF.EmitBlock(writebackBB);
3137   }
3138 
3139   // Load the value to writeback.
3140   llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
3141 
3142   // Cast it back, in case we're writing an id to a Foo* or something.
3143   value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(),
3144                                     "icr.writeback-cast");
3145 
3146   // Perform the writeback.
3147 
3148   // If we have a "to use" value, it's something we need to emit a use
3149   // of.  This has to be carefully threaded in: if it's done after the
3150   // release it's potentially undefined behavior (and the optimizer
3151   // will ignore it), and if it happens before the retain then the
3152   // optimizer could move the release there.
3153   if (writeback.ToUse) {
3154     assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
3155 
3156     // Retain the new value.  No need to block-copy here:  the block's
3157     // being passed up the stack.
3158     value = CGF.EmitARCRetainNonBlock(value);
3159 
3160     // Emit the intrinsic use here.
3161     CGF.EmitARCIntrinsicUse(writeback.ToUse);
3162 
3163     // Load the old value (primitively).
3164     llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
3165 
3166     // Put the new value in place (primitively).
3167     CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
3168 
3169     // Release the old value.
3170     CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
3171 
3172   // Otherwise, we can just do a normal lvalue store.
3173   } else {
3174     CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
3175   }
3176 
3177   // Jump to the continuation block.
3178   if (!provablyNonNull)
3179     CGF.EmitBlock(contBB);
3180 }
3181 
3182 static void emitWritebacks(CodeGenFunction &CGF,
3183                            const CallArgList &args) {
3184   for (const auto &I : args.writebacks())
3185     emitWriteback(CGF, I);
3186 }
3187 
3188 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
3189                                             const CallArgList &CallArgs) {
3190   ArrayRef<CallArgList::CallArgCleanup> Cleanups =
3191     CallArgs.getCleanupsToDeactivate();
3192   // Iterate in reverse to increase the likelihood of popping the cleanup.
3193   for (const auto &I : llvm::reverse(Cleanups)) {
3194     CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP);
3195     I.IsActiveIP->eraseFromParent();
3196   }
3197 }
3198 
3199 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
3200   if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
3201     if (uop->getOpcode() == UO_AddrOf)
3202       return uop->getSubExpr();
3203   return nullptr;
3204 }
3205 
3206 /// Emit an argument that's being passed call-by-writeback.  That is,
3207 /// we are passing the address of an __autoreleased temporary; it
3208 /// might be copy-initialized with the current value of the given
3209 /// address, but it will definitely be copied out of after the call.
3210 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
3211                              const ObjCIndirectCopyRestoreExpr *CRE) {
3212   LValue srcLV;
3213 
3214   // Make an optimistic effort to emit the address as an l-value.
3215   // This can fail if the argument expression is more complicated.
3216   if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
3217     srcLV = CGF.EmitLValue(lvExpr);
3218 
3219   // Otherwise, just emit it as a scalar.
3220   } else {
3221     Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
3222 
3223     QualType srcAddrType =
3224       CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
3225     srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
3226   }
3227   Address srcAddr = srcLV.getAddress();
3228 
3229   // The dest and src types don't necessarily match in LLVM terms
3230   // because of the crazy ObjC compatibility rules.
3231 
3232   llvm::PointerType *destType =
3233     cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
3234 
3235   // If the address is a constant null, just pass the appropriate null.
3236   if (isProvablyNull(srcAddr.getPointer())) {
3237     args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
3238              CRE->getType());
3239     return;
3240   }
3241 
3242   // Create the temporary.
3243   Address temp = CGF.CreateTempAlloca(destType->getElementType(),
3244                                       CGF.getPointerAlign(),
3245                                       "icr.temp");
3246   // Loading an l-value can introduce a cleanup if the l-value is __weak,
3247   // and that cleanup will be conditional if we can't prove that the l-value
3248   // isn't null, so we need to register a dominating point so that the cleanups
3249   // system will make valid IR.
3250   CodeGenFunction::ConditionalEvaluation condEval(CGF);
3251 
3252   // Zero-initialize it if we're not doing a copy-initialization.
3253   bool shouldCopy = CRE->shouldCopy();
3254   if (!shouldCopy) {
3255     llvm::Value *null =
3256       llvm::ConstantPointerNull::get(
3257         cast<llvm::PointerType>(destType->getElementType()));
3258     CGF.Builder.CreateStore(null, temp);
3259   }
3260 
3261   llvm::BasicBlock *contBB = nullptr;
3262   llvm::BasicBlock *originBB = nullptr;
3263 
3264   // If the address is *not* known to be non-null, we need to switch.
3265   llvm::Value *finalArgument;
3266 
3267   bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3268                                               CGF.CGM.getDataLayout());
3269   if (provablyNonNull) {
3270     finalArgument = temp.getPointer();
3271   } else {
3272     llvm::Value *isNull =
3273       CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3274 
3275     finalArgument = CGF.Builder.CreateSelect(isNull,
3276                                    llvm::ConstantPointerNull::get(destType),
3277                                              temp.getPointer(), "icr.argument");
3278 
3279     // If we need to copy, then the load has to be conditional, which
3280     // means we need control flow.
3281     if (shouldCopy) {
3282       originBB = CGF.Builder.GetInsertBlock();
3283       contBB = CGF.createBasicBlock("icr.cont");
3284       llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
3285       CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
3286       CGF.EmitBlock(copyBB);
3287       condEval.begin(CGF);
3288     }
3289   }
3290 
3291   llvm::Value *valueToUse = nullptr;
3292 
3293   // Perform a copy if necessary.
3294   if (shouldCopy) {
3295     RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
3296     assert(srcRV.isScalar());
3297 
3298     llvm::Value *src = srcRV.getScalarVal();
3299     src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
3300                                     "icr.cast");
3301 
3302     // Use an ordinary store, not a store-to-lvalue.
3303     CGF.Builder.CreateStore(src, temp);
3304 
3305     // If optimization is enabled, and the value was held in a
3306     // __strong variable, we need to tell the optimizer that this
3307     // value has to stay alive until we're doing the store back.
3308     // This is because the temporary is effectively unretained,
3309     // and so otherwise we can violate the high-level semantics.
3310     if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3311         srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) {
3312       valueToUse = src;
3313     }
3314   }
3315 
3316   // Finish the control flow if we needed it.
3317   if (shouldCopy && !provablyNonNull) {
3318     llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
3319     CGF.EmitBlock(contBB);
3320 
3321     // Make a phi for the value to intrinsically use.
3322     if (valueToUse) {
3323       llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
3324                                                       "icr.to-use");
3325       phiToUse->addIncoming(valueToUse, copyBB);
3326       phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
3327                             originBB);
3328       valueToUse = phiToUse;
3329     }
3330 
3331     condEval.end(CGF);
3332   }
3333 
3334   args.addWriteback(srcLV, temp, valueToUse);
3335   args.add(RValue::get(finalArgument), CRE->getType());
3336 }
3337 
3338 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
3339   assert(!StackBase);
3340 
3341   // Save the stack.
3342   llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
3343   StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
3344 }
3345 
3346 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
3347   if (StackBase) {
3348     // Restore the stack after the call.
3349     llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
3350     CGF.Builder.CreateCall(F, StackBase);
3351   }
3352 }
3353 
3354 void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
3355                                           SourceLocation ArgLoc,
3356                                           AbstractCallee AC,
3357                                           unsigned ParmNum) {
3358   if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) ||
3359                          SanOpts.has(SanitizerKind::NullabilityArg)))
3360     return;
3361 
3362   // The param decl may be missing in a variadic function.
3363   auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr;
3364   unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
3365 
3366   // Prefer the nonnull attribute if it's present.
3367   const NonNullAttr *NNAttr = nullptr;
3368   if (SanOpts.has(SanitizerKind::NonnullAttribute))
3369     NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo);
3370 
3371   bool CanCheckNullability = false;
3372   if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) {
3373     auto Nullability = PVD->getType()->getNullability(getContext());
3374     CanCheckNullability = Nullability &&
3375                           *Nullability == NullabilityKind::NonNull &&
3376                           PVD->getTypeSourceInfo();
3377   }
3378 
3379   if (!NNAttr && !CanCheckNullability)
3380     return;
3381 
3382   SourceLocation AttrLoc;
3383   SanitizerMask CheckKind;
3384   SanitizerHandler Handler;
3385   if (NNAttr) {
3386     AttrLoc = NNAttr->getLocation();
3387     CheckKind = SanitizerKind::NonnullAttribute;
3388     Handler = SanitizerHandler::NonnullArg;
3389   } else {
3390     AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
3391     CheckKind = SanitizerKind::NullabilityArg;
3392     Handler = SanitizerHandler::NullabilityArg;
3393   }
3394 
3395   SanitizerScope SanScope(this);
3396   assert(RV.isScalar());
3397   llvm::Value *V = RV.getScalarVal();
3398   llvm::Value *Cond =
3399       Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
3400   llvm::Constant *StaticData[] = {
3401       EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc),
3402       llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
3403   };
3404   EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None);
3405 }
3406 
3407 void CodeGenFunction::EmitCallArgs(
3408     CallArgList &Args, ArrayRef<QualType> ArgTypes,
3409     llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
3410     AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) {
3411   assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
3412 
3413   // We *have* to evaluate arguments from right to left in the MS C++ ABI,
3414   // because arguments are destroyed left to right in the callee. As a special
3415   // case, there are certain language constructs that require left-to-right
3416   // evaluation, and in those cases we consider the evaluation order requirement
3417   // to trump the "destruction order is reverse construction order" guarantee.
3418   bool LeftToRight =
3419       CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()
3420           ? Order == EvaluationOrder::ForceLeftToRight
3421           : Order != EvaluationOrder::ForceRightToLeft;
3422 
3423   auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg,
3424                                          RValue EmittedArg) {
3425     if (!AC.hasFunctionDecl() || I >= AC.getNumParams())
3426       return;
3427     auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>();
3428     if (PS == nullptr)
3429       return;
3430 
3431     const auto &Context = getContext();
3432     auto SizeTy = Context.getSizeType();
3433     auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
3434     assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?");
3435     llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T,
3436                                                      EmittedArg.getScalarVal());
3437     Args.add(RValue::get(V), SizeTy);
3438     // If we're emitting args in reverse, be sure to do so with
3439     // pass_object_size, as well.
3440     if (!LeftToRight)
3441       std::swap(Args.back(), *(&Args.back() - 1));
3442   };
3443 
3444   // Insert a stack save if we're going to need any inalloca args.
3445   bool HasInAllocaArgs = false;
3446   if (CGM.getTarget().getCXXABI().isMicrosoft()) {
3447     for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end();
3448          I != E && !HasInAllocaArgs; ++I)
3449       HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I);
3450     if (HasInAllocaArgs) {
3451       assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3452       Args.allocateArgumentMemory(*this);
3453     }
3454   }
3455 
3456   // Evaluate each argument in the appropriate order.
3457   size_t CallArgsStart = Args.size();
3458   for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
3459     unsigned Idx = LeftToRight ? I : E - I - 1;
3460     CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx;
3461     unsigned InitialArgSize = Args.size();
3462     // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of
3463     // the argument and parameter match or the objc method is parameterized.
3464     assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) ||
3465             getContext().hasSameUnqualifiedType((*Arg)->getType(),
3466                                                 ArgTypes[Idx]) ||
3467             (isa<ObjCMethodDecl>(AC.getDecl()) &&
3468              isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) &&
3469            "Argument and parameter types don't match");
3470     EmitCallArg(Args, *Arg, ArgTypes[Idx]);
3471     // In particular, we depend on it being the last arg in Args, and the
3472     // objectsize bits depend on there only being one arg if !LeftToRight.
3473     assert(InitialArgSize + 1 == Args.size() &&
3474            "The code below depends on only adding one arg per EmitCallArg");
3475     (void)InitialArgSize;
3476     // Since pointer argument are never emitted as LValue, it is safe to emit
3477     // non-null argument check for r-value only.
3478     if (!Args.back().hasLValue()) {
3479       RValue RVArg = Args.back().getKnownRValue();
3480       EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC,
3481                           ParamsToSkip + Idx);
3482       // @llvm.objectsize should never have side-effects and shouldn't need
3483       // destruction/cleanups, so we can safely "emit" it after its arg,
3484       // regardless of right-to-leftness
3485       MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
3486     }
3487   }
3488 
3489   if (!LeftToRight) {
3490     // Un-reverse the arguments we just evaluated so they match up with the LLVM
3491     // IR function.
3492     std::reverse(Args.begin() + CallArgsStart, Args.end());
3493   }
3494 }
3495 
3496 namespace {
3497 
3498 struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
3499   DestroyUnpassedArg(Address Addr, QualType Ty)
3500       : Addr(Addr), Ty(Ty) {}
3501 
3502   Address Addr;
3503   QualType Ty;
3504 
3505   void Emit(CodeGenFunction &CGF, Flags flags) override {
3506     QualType::DestructionKind DtorKind = Ty.isDestructedType();
3507     if (DtorKind == QualType::DK_cxx_destructor) {
3508       const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
3509       assert(!Dtor->isTrivial());
3510       CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
3511                                 /*Delegating=*/false, Addr);
3512     } else {
3513       CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty));
3514     }
3515   }
3516 };
3517 
3518 struct DisableDebugLocationUpdates {
3519   CodeGenFunction &CGF;
3520   bool disabledDebugInfo;
3521   DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
3522     if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
3523       CGF.disableDebugInfo();
3524   }
3525   ~DisableDebugLocationUpdates() {
3526     if (disabledDebugInfo)
3527       CGF.enableDebugInfo();
3528   }
3529 };
3530 
3531 } // end anonymous namespace
3532 
3533 RValue CallArg::getRValue(CodeGenFunction &CGF) const {
3534   if (!HasLV)
3535     return RV;
3536   LValue Copy = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty), Ty);
3537   CGF.EmitAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap,
3538                         LV.isVolatile());
3539   IsUsed = true;
3540   return RValue::getAggregate(Copy.getAddress());
3541 }
3542 
3543 void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const {
3544   LValue Dst = CGF.MakeAddrLValue(Addr, Ty);
3545   if (!HasLV && RV.isScalar())
3546     CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*init=*/true);
3547   else if (!HasLV && RV.isComplex())
3548     CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true);
3549   else {
3550     auto Addr = HasLV ? LV.getAddress() : RV.getAggregateAddress();
3551     LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty);
3552     // We assume that call args are never copied into subobjects.
3553     CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap,
3554                           HasLV ? LV.isVolatileQualified()
3555                                 : RV.isVolatileQualified());
3556   }
3557   IsUsed = true;
3558 }
3559 
3560 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
3561                                   QualType type) {
3562   DisableDebugLocationUpdates Dis(*this, E);
3563   if (const ObjCIndirectCopyRestoreExpr *CRE
3564         = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
3565     assert(getLangOpts().ObjCAutoRefCount);
3566     return emitWritebackArg(*this, args, CRE);
3567   }
3568 
3569   assert(type->isReferenceType() == E->isGLValue() &&
3570          "reference binding to unmaterialized r-value!");
3571 
3572   if (E->isGLValue()) {
3573     assert(E->getObjectKind() == OK_Ordinary);
3574     return args.add(EmitReferenceBindingToExpr(E), type);
3575   }
3576 
3577   bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
3578 
3579   // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
3580   // However, we still have to push an EH-only cleanup in case we unwind before
3581   // we make it to the call.
3582   if (HasAggregateEvalKind &&
3583       type->getAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
3584     // If we're using inalloca, use the argument memory.  Otherwise, use a
3585     // temporary.
3586     AggValueSlot Slot;
3587     if (args.isUsingInAlloca())
3588       Slot = createPlaceholderSlot(*this, type);
3589     else
3590       Slot = CreateAggTemp(type, "agg.tmp");
3591 
3592     bool DestroyedInCallee = true, NeedsEHCleanup = true;
3593     if (const auto *RD = type->getAsCXXRecordDecl())
3594       DestroyedInCallee = RD->hasNonTrivialDestructor();
3595     else
3596       NeedsEHCleanup = needsEHCleanup(type.isDestructedType());
3597 
3598     if (DestroyedInCallee)
3599       Slot.setExternallyDestructed();
3600 
3601     EmitAggExpr(E, Slot);
3602     RValue RV = Slot.asRValue();
3603     args.add(RV, type);
3604 
3605     if (DestroyedInCallee && NeedsEHCleanup) {
3606       // Create a no-op GEP between the placeholder and the cleanup so we can
3607       // RAUW it successfully.  It also serves as a marker of the first
3608       // instruction where the cleanup is active.
3609       pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
3610                                               type);
3611       // This unreachable is a temporary marker which will be removed later.
3612       llvm::Instruction *IsActive = Builder.CreateUnreachable();
3613       args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
3614     }
3615     return;
3616   }
3617 
3618   if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
3619       cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
3620     LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
3621     assert(L.isSimple());
3622     args.addUncopiedAggregate(L, type);
3623     return;
3624   }
3625 
3626   args.add(EmitAnyExprToTemp(E), type);
3627 }
3628 
3629 QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
3630   // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
3631   // implicitly widens null pointer constants that are arguments to varargs
3632   // functions to pointer-sized ints.
3633   if (!getTarget().getTriple().isOSWindows())
3634     return Arg->getType();
3635 
3636   if (Arg->getType()->isIntegerType() &&
3637       getContext().getTypeSize(Arg->getType()) <
3638           getContext().getTargetInfo().getPointerWidth(0) &&
3639       Arg->isNullPointerConstant(getContext(),
3640                                  Expr::NPC_ValueDependentIsNotNull)) {
3641     return getContext().getIntPtrType();
3642   }
3643 
3644   return Arg->getType();
3645 }
3646 
3647 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3648 // optimizer it can aggressively ignore unwind edges.
3649 void
3650 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
3651   if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3652       !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
3653     Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
3654                       CGM.getNoObjCARCExceptionsMetadata());
3655 }
3656 
3657 /// Emits a call to the given no-arguments nounwind runtime function.
3658 llvm::CallInst *
3659 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
3660                                          const llvm::Twine &name) {
3661   return EmitNounwindRuntimeCall(callee, None, name);
3662 }
3663 
3664 /// Emits a call to the given nounwind runtime function.
3665 llvm::CallInst *
3666 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
3667                                          ArrayRef<llvm::Value*> args,
3668                                          const llvm::Twine &name) {
3669   llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
3670   call->setDoesNotThrow();
3671   return call;
3672 }
3673 
3674 /// Emits a simple call (never an invoke) to the given no-arguments
3675 /// runtime function.
3676 llvm::CallInst *
3677 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
3678                                  const llvm::Twine &name) {
3679   return EmitRuntimeCall(callee, None, name);
3680 }
3681 
3682 // Calls which may throw must have operand bundles indicating which funclet
3683 // they are nested within.
3684 SmallVector<llvm::OperandBundleDef, 1>
3685 CodeGenFunction::getBundlesForFunclet(llvm::Value *Callee) {
3686   SmallVector<llvm::OperandBundleDef, 1> BundleList;
3687   // There is no need for a funclet operand bundle if we aren't inside a
3688   // funclet.
3689   if (!CurrentFuncletPad)
3690     return BundleList;
3691 
3692   // Skip intrinsics which cannot throw.
3693   auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts());
3694   if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow())
3695     return BundleList;
3696 
3697   BundleList.emplace_back("funclet", CurrentFuncletPad);
3698   return BundleList;
3699 }
3700 
3701 /// Emits a simple call (never an invoke) to the given runtime function.
3702 llvm::CallInst *
3703 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
3704                                  ArrayRef<llvm::Value*> args,
3705                                  const llvm::Twine &name) {
3706   llvm::CallInst *call =
3707       Builder.CreateCall(callee, args, getBundlesForFunclet(callee), name);
3708   call->setCallingConv(getRuntimeCC());
3709   return call;
3710 }
3711 
3712 /// Emits a call or invoke to the given noreturn runtime function.
3713 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee,
3714                                                ArrayRef<llvm::Value*> args) {
3715   SmallVector<llvm::OperandBundleDef, 1> BundleList =
3716       getBundlesForFunclet(callee);
3717 
3718   if (getInvokeDest()) {
3719     llvm::InvokeInst *invoke =
3720       Builder.CreateInvoke(callee,
3721                            getUnreachableBlock(),
3722                            getInvokeDest(),
3723                            args,
3724                            BundleList);
3725     invoke->setDoesNotReturn();
3726     invoke->setCallingConv(getRuntimeCC());
3727   } else {
3728     llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList);
3729     call->setDoesNotReturn();
3730     call->setCallingConv(getRuntimeCC());
3731     Builder.CreateUnreachable();
3732   }
3733 }
3734 
3735 /// Emits a call or invoke instruction to the given nullary runtime function.
3736 llvm::CallSite
3737 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
3738                                          const Twine &name) {
3739   return EmitRuntimeCallOrInvoke(callee, None, name);
3740 }
3741 
3742 /// Emits a call or invoke instruction to the given runtime function.
3743 llvm::CallSite
3744 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
3745                                          ArrayRef<llvm::Value*> args,
3746                                          const Twine &name) {
3747   llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name);
3748   callSite.setCallingConv(getRuntimeCC());
3749   return callSite;
3750 }
3751 
3752 /// Emits a call or invoke instruction to the given function, depending
3753 /// on the current state of the EH stack.
3754 llvm::CallSite
3755 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
3756                                   ArrayRef<llvm::Value *> Args,
3757                                   const Twine &Name) {
3758   llvm::BasicBlock *InvokeDest = getInvokeDest();
3759   SmallVector<llvm::OperandBundleDef, 1> BundleList =
3760       getBundlesForFunclet(Callee);
3761 
3762   llvm::Instruction *Inst;
3763   if (!InvokeDest)
3764     Inst = Builder.CreateCall(Callee, Args, BundleList, Name);
3765   else {
3766     llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
3767     Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
3768                                 Name);
3769     EmitBlock(ContBB);
3770   }
3771 
3772   // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3773   // optimizer it can aggressively ignore unwind edges.
3774   if (CGM.getLangOpts().ObjCAutoRefCount)
3775     AddObjCARCExceptionMetadata(Inst);
3776 
3777   return llvm::CallSite(Inst);
3778 }
3779 
3780 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
3781                                                   llvm::Value *New) {
3782   DeferredReplacements.push_back(std::make_pair(Old, New));
3783 }
3784 
3785 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
3786                                  const CGCallee &Callee,
3787                                  ReturnValueSlot ReturnValue,
3788                                  const CallArgList &CallArgs,
3789                                  llvm::Instruction **callOrInvoke,
3790                                  SourceLocation Loc) {
3791   // FIXME: We no longer need the types from CallArgs; lift up and simplify.
3792 
3793   assert(Callee.isOrdinary() || Callee.isVirtual());
3794 
3795   // Handle struct-return functions by passing a pointer to the
3796   // location that we would like to return into.
3797   QualType RetTy = CallInfo.getReturnType();
3798   const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
3799 
3800   llvm::FunctionType *IRFuncTy = Callee.getFunctionType();
3801 
3802   // 1. Set up the arguments.
3803 
3804   // If we're using inalloca, insert the allocation after the stack save.
3805   // FIXME: Do this earlier rather than hacking it in here!
3806   Address ArgMemory = Address::invalid();
3807   const llvm::StructLayout *ArgMemoryLayout = nullptr;
3808   if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
3809     const llvm::DataLayout &DL = CGM.getDataLayout();
3810     ArgMemoryLayout = DL.getStructLayout(ArgStruct);
3811     llvm::Instruction *IP = CallArgs.getStackBase();
3812     llvm::AllocaInst *AI;
3813     if (IP) {
3814       IP = IP->getNextNode();
3815       AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
3816                                 "argmem", IP);
3817     } else {
3818       AI = CreateTempAlloca(ArgStruct, "argmem");
3819     }
3820     auto Align = CallInfo.getArgStructAlignment();
3821     AI->setAlignment(Align.getQuantity());
3822     AI->setUsedWithInAlloca(true);
3823     assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
3824     ArgMemory = Address(AI, Align);
3825   }
3826 
3827   // Helper function to drill into the inalloca allocation.
3828   auto createInAllocaStructGEP = [&](unsigned FieldIndex) -> Address {
3829     auto FieldOffset =
3830       CharUnits::fromQuantity(ArgMemoryLayout->getElementOffset(FieldIndex));
3831     return Builder.CreateStructGEP(ArgMemory, FieldIndex, FieldOffset);
3832   };
3833 
3834   ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
3835   SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
3836 
3837   // If the call returns a temporary with struct return, create a temporary
3838   // alloca to hold the result, unless one is given to us.
3839   Address SRetPtr = Address::invalid();
3840   Address SRetAlloca = Address::invalid();
3841   llvm::Value *UnusedReturnSizePtr = nullptr;
3842   if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
3843     if (!ReturnValue.isNull()) {
3844       SRetPtr = ReturnValue.getValue();
3845     } else {
3846       SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca);
3847       if (HaveInsertPoint() && ReturnValue.isUnused()) {
3848         uint64_t size =
3849             CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
3850         UnusedReturnSizePtr = EmitLifetimeStart(size, SRetAlloca.getPointer());
3851       }
3852     }
3853     if (IRFunctionArgs.hasSRetArg()) {
3854       IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
3855     } else if (RetAI.isInAlloca()) {
3856       Address Addr = createInAllocaStructGEP(RetAI.getInAllocaFieldIndex());
3857       Builder.CreateStore(SRetPtr.getPointer(), Addr);
3858     }
3859   }
3860 
3861   Address swiftErrorTemp = Address::invalid();
3862   Address swiftErrorArg = Address::invalid();
3863 
3864   // Translate all of the arguments as necessary to match the IR lowering.
3865   assert(CallInfo.arg_size() == CallArgs.size() &&
3866          "Mismatch between function signature & arguments.");
3867   unsigned ArgNo = 0;
3868   CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
3869   for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
3870        I != E; ++I, ++info_it, ++ArgNo) {
3871     const ABIArgInfo &ArgInfo = info_it->info;
3872 
3873     // Insert a padding argument to ensure proper alignment.
3874     if (IRFunctionArgs.hasPaddingArg(ArgNo))
3875       IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
3876           llvm::UndefValue::get(ArgInfo.getPaddingType());
3877 
3878     unsigned FirstIRArg, NumIRArgs;
3879     std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3880 
3881     switch (ArgInfo.getKind()) {
3882     case ABIArgInfo::InAlloca: {
3883       assert(NumIRArgs == 0);
3884       assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3885       if (I->isAggregate()) {
3886         // Replace the placeholder with the appropriate argument slot GEP.
3887         Address Addr = I->hasLValue()
3888                            ? I->getKnownLValue().getAddress()
3889                            : I->getKnownRValue().getAggregateAddress();
3890         llvm::Instruction *Placeholder =
3891             cast<llvm::Instruction>(Addr.getPointer());
3892         CGBuilderTy::InsertPoint IP = Builder.saveIP();
3893         Builder.SetInsertPoint(Placeholder);
3894         Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
3895         Builder.restoreIP(IP);
3896         deferPlaceholderReplacement(Placeholder, Addr.getPointer());
3897       } else {
3898         // Store the RValue into the argument struct.
3899         Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
3900         unsigned AS = Addr.getType()->getPointerAddressSpace();
3901         llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
3902         // There are some cases where a trivial bitcast is not avoidable.  The
3903         // definition of a type later in a translation unit may change it's type
3904         // from {}* to (%struct.foo*)*.
3905         if (Addr.getType() != MemType)
3906           Addr = Builder.CreateBitCast(Addr, MemType);
3907         I->copyInto(*this, Addr);
3908       }
3909       break;
3910     }
3911 
3912     case ABIArgInfo::Indirect: {
3913       assert(NumIRArgs == 1);
3914       if (!I->isAggregate()) {
3915         // Make a temporary alloca to pass the argument.
3916         Address Addr = CreateMemTempWithoutCast(
3917             I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp");
3918         IRCallArgs[FirstIRArg] = Addr.getPointer();
3919 
3920         I->copyInto(*this, Addr);
3921       } else {
3922         // We want to avoid creating an unnecessary temporary+copy here;
3923         // however, we need one in three cases:
3924         // 1. If the argument is not byval, and we are required to copy the
3925         //    source.  (This case doesn't occur on any common architecture.)
3926         // 2. If the argument is byval, RV is not sufficiently aligned, and
3927         //    we cannot force it to be sufficiently aligned.
3928         // 3. If the argument is byval, but RV is not located in default
3929         //    or alloca address space.
3930         Address Addr = I->hasLValue()
3931                            ? I->getKnownLValue().getAddress()
3932                            : I->getKnownRValue().getAggregateAddress();
3933         llvm::Value *V = Addr.getPointer();
3934         CharUnits Align = ArgInfo.getIndirectAlign();
3935         const llvm::DataLayout *TD = &CGM.getDataLayout();
3936 
3937         assert((FirstIRArg >= IRFuncTy->getNumParams() ||
3938                 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==
3939                     TD->getAllocaAddrSpace()) &&
3940                "indirect argument must be in alloca address space");
3941 
3942         bool NeedCopy = false;
3943 
3944         if (Addr.getAlignment() < Align &&
3945             llvm::getOrEnforceKnownAlignment(V, Align.getQuantity(), *TD) <
3946                 Align.getQuantity()) {
3947           NeedCopy = true;
3948         } else if (I->hasLValue()) {
3949           auto LV = I->getKnownLValue();
3950           auto AS = LV.getAddressSpace();
3951           if ((!ArgInfo.getIndirectByVal() &&
3952                (LV.getAlignment() >=
3953                 getContext().getTypeAlignInChars(I->Ty))) ||
3954               (ArgInfo.getIndirectByVal() &&
3955                ((AS != LangAS::Default && AS != LangAS::opencl_private &&
3956                  AS != CGM.getASTAllocaAddressSpace())))) {
3957             NeedCopy = true;
3958           }
3959         }
3960         if (NeedCopy) {
3961           // Create an aligned temporary, and copy to it.
3962           Address AI = CreateMemTempWithoutCast(
3963               I->Ty, ArgInfo.getIndirectAlign(), "byval-temp");
3964           IRCallArgs[FirstIRArg] = AI.getPointer();
3965           I->copyInto(*this, AI);
3966         } else {
3967           // Skip the extra memcpy call.
3968           auto *T = V->getType()->getPointerElementType()->getPointerTo(
3969               CGM.getDataLayout().getAllocaAddrSpace());
3970           IRCallArgs[FirstIRArg] = getTargetHooks().performAddrSpaceCast(
3971               *this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T,
3972               true);
3973         }
3974       }
3975       break;
3976     }
3977 
3978     case ABIArgInfo::Ignore:
3979       assert(NumIRArgs == 0);
3980       break;
3981 
3982     case ABIArgInfo::Extend:
3983     case ABIArgInfo::Direct: {
3984       if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
3985           ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
3986           ArgInfo.getDirectOffset() == 0) {
3987         assert(NumIRArgs == 1);
3988         llvm::Value *V;
3989         if (!I->isAggregate())
3990           V = I->getKnownRValue().getScalarVal();
3991         else
3992           V = Builder.CreateLoad(
3993               I->hasLValue() ? I->getKnownLValue().getAddress()
3994                              : I->getKnownRValue().getAggregateAddress());
3995 
3996         // Implement swifterror by copying into a new swifterror argument.
3997         // We'll write back in the normal path out of the call.
3998         if (CallInfo.getExtParameterInfo(ArgNo).getABI()
3999               == ParameterABI::SwiftErrorResult) {
4000           assert(!swiftErrorTemp.isValid() && "multiple swifterror args");
4001 
4002           QualType pointeeTy = I->Ty->getPointeeType();
4003           swiftErrorArg =
4004             Address(V, getContext().getTypeAlignInChars(pointeeTy));
4005 
4006           swiftErrorTemp =
4007             CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
4008           V = swiftErrorTemp.getPointer();
4009           cast<llvm::AllocaInst>(V)->setSwiftError(true);
4010 
4011           llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg);
4012           Builder.CreateStore(errorValue, swiftErrorTemp);
4013         }
4014 
4015         // We might have to widen integers, but we should never truncate.
4016         if (ArgInfo.getCoerceToType() != V->getType() &&
4017             V->getType()->isIntegerTy())
4018           V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
4019 
4020         // If the argument doesn't match, perform a bitcast to coerce it.  This
4021         // can happen due to trivial type mismatches.
4022         if (FirstIRArg < IRFuncTy->getNumParams() &&
4023             V->getType() != IRFuncTy->getParamType(FirstIRArg))
4024           V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
4025 
4026         IRCallArgs[FirstIRArg] = V;
4027         break;
4028       }
4029 
4030       // FIXME: Avoid the conversion through memory if possible.
4031       Address Src = Address::invalid();
4032       if (!I->isAggregate()) {
4033         Src = CreateMemTemp(I->Ty, "coerce");
4034         I->copyInto(*this, Src);
4035       } else {
4036         Src = I->hasLValue() ? I->getKnownLValue().getAddress()
4037                              : I->getKnownRValue().getAggregateAddress();
4038       }
4039 
4040       // If the value is offset in memory, apply the offset now.
4041       Src = emitAddressAtOffset(*this, Src, ArgInfo);
4042 
4043       // Fast-isel and the optimizer generally like scalar values better than
4044       // FCAs, so we flatten them if this is safe to do for this argument.
4045       llvm::StructType *STy =
4046             dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
4047       if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
4048         llvm::Type *SrcTy = Src.getType()->getElementType();
4049         uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
4050         uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
4051 
4052         // If the source type is smaller than the destination type of the
4053         // coerce-to logic, copy the source value into a temp alloca the size
4054         // of the destination type to allow loading all of it. The bits past
4055         // the source value are left undef.
4056         if (SrcSize < DstSize) {
4057           Address TempAlloca
4058             = CreateTempAlloca(STy, Src.getAlignment(),
4059                                Src.getName() + ".coerce");
4060           Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
4061           Src = TempAlloca;
4062         } else {
4063           Src = Builder.CreateBitCast(Src,
4064                                       STy->getPointerTo(Src.getAddressSpace()));
4065         }
4066 
4067         auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
4068         assert(NumIRArgs == STy->getNumElements());
4069         for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
4070           auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
4071           Address EltPtr = Builder.CreateStructGEP(Src, i, Offset);
4072           llvm::Value *LI = Builder.CreateLoad(EltPtr);
4073           IRCallArgs[FirstIRArg + i] = LI;
4074         }
4075       } else {
4076         // In the simple case, just pass the coerced loaded value.
4077         assert(NumIRArgs == 1);
4078         IRCallArgs[FirstIRArg] =
4079           CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
4080       }
4081 
4082       break;
4083     }
4084 
4085     case ABIArgInfo::CoerceAndExpand: {
4086       auto coercionType = ArgInfo.getCoerceAndExpandType();
4087       auto layout = CGM.getDataLayout().getStructLayout(coercionType);
4088 
4089       llvm::Value *tempSize = nullptr;
4090       Address addr = Address::invalid();
4091       Address AllocaAddr = Address::invalid();
4092       if (I->isAggregate()) {
4093         addr = I->hasLValue() ? I->getKnownLValue().getAddress()
4094                               : I->getKnownRValue().getAggregateAddress();
4095 
4096       } else {
4097         RValue RV = I->getKnownRValue();
4098         assert(RV.isScalar()); // complex should always just be direct
4099 
4100         llvm::Type *scalarType = RV.getScalarVal()->getType();
4101         auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType);
4102         auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType);
4103 
4104         // Materialize to a temporary.
4105         addr = CreateTempAlloca(RV.getScalarVal()->getType(),
4106                                 CharUnits::fromQuantity(std::max(
4107                                     layout->getAlignment(), scalarAlign)),
4108                                 "tmp",
4109                                 /*ArraySize=*/nullptr, &AllocaAddr);
4110         tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer());
4111 
4112         Builder.CreateStore(RV.getScalarVal(), addr);
4113       }
4114 
4115       addr = Builder.CreateElementBitCast(addr, coercionType);
4116 
4117       unsigned IRArgPos = FirstIRArg;
4118       for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4119         llvm::Type *eltType = coercionType->getElementType(i);
4120         if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4121         Address eltAddr = Builder.CreateStructGEP(addr, i, layout);
4122         llvm::Value *elt = Builder.CreateLoad(eltAddr);
4123         IRCallArgs[IRArgPos++] = elt;
4124       }
4125       assert(IRArgPos == FirstIRArg + NumIRArgs);
4126 
4127       if (tempSize) {
4128         EmitLifetimeEnd(tempSize, AllocaAddr.getPointer());
4129       }
4130 
4131       break;
4132     }
4133 
4134     case ABIArgInfo::Expand:
4135       unsigned IRArgPos = FirstIRArg;
4136       ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
4137       assert(IRArgPos == FirstIRArg + NumIRArgs);
4138       break;
4139     }
4140   }
4141 
4142   const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this);
4143   llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer();
4144 
4145   // If we're using inalloca, set up that argument.
4146   if (ArgMemory.isValid()) {
4147     llvm::Value *Arg = ArgMemory.getPointer();
4148     if (CallInfo.isVariadic()) {
4149       // When passing non-POD arguments by value to variadic functions, we will
4150       // end up with a variadic prototype and an inalloca call site.  In such
4151       // cases, we can't do any parameter mismatch checks.  Give up and bitcast
4152       // the callee.
4153       unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace();
4154       auto FnTy = getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS);
4155       CalleePtr = Builder.CreateBitCast(CalleePtr, FnTy);
4156     } else {
4157       llvm::Type *LastParamTy =
4158           IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
4159       if (Arg->getType() != LastParamTy) {
4160 #ifndef NDEBUG
4161         // Assert that these structs have equivalent element types.
4162         llvm::StructType *FullTy = CallInfo.getArgStruct();
4163         llvm::StructType *DeclaredTy = cast<llvm::StructType>(
4164             cast<llvm::PointerType>(LastParamTy)->getElementType());
4165         assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
4166         for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
4167                                                 DE = DeclaredTy->element_end(),
4168                                                 FI = FullTy->element_begin();
4169              DI != DE; ++DI, ++FI)
4170           assert(*DI == *FI);
4171 #endif
4172         Arg = Builder.CreateBitCast(Arg, LastParamTy);
4173       }
4174     }
4175     assert(IRFunctionArgs.hasInallocaArg());
4176     IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
4177   }
4178 
4179   // 2. Prepare the function pointer.
4180 
4181   // If the callee is a bitcast of a non-variadic function to have a
4182   // variadic function pointer type, check to see if we can remove the
4183   // bitcast.  This comes up with unprototyped functions.
4184   //
4185   // This makes the IR nicer, but more importantly it ensures that we
4186   // can inline the function at -O0 if it is marked always_inline.
4187   auto simplifyVariadicCallee = [](llvm::Value *Ptr) -> llvm::Value* {
4188     llvm::FunctionType *CalleeFT =
4189       cast<llvm::FunctionType>(Ptr->getType()->getPointerElementType());
4190     if (!CalleeFT->isVarArg())
4191       return Ptr;
4192 
4193     llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr);
4194     if (!CE || CE->getOpcode() != llvm::Instruction::BitCast)
4195       return Ptr;
4196 
4197     llvm::Function *OrigFn = dyn_cast<llvm::Function>(CE->getOperand(0));
4198     if (!OrigFn)
4199       return Ptr;
4200 
4201     llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
4202 
4203     // If the original type is variadic, or if any of the component types
4204     // disagree, we cannot remove the cast.
4205     if (OrigFT->isVarArg() ||
4206         OrigFT->getNumParams() != CalleeFT->getNumParams() ||
4207         OrigFT->getReturnType() != CalleeFT->getReturnType())
4208       return Ptr;
4209 
4210     for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
4211       if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
4212         return Ptr;
4213 
4214     return OrigFn;
4215   };
4216   CalleePtr = simplifyVariadicCallee(CalleePtr);
4217 
4218   // 3. Perform the actual call.
4219 
4220   // Deactivate any cleanups that we're supposed to do immediately before
4221   // the call.
4222   if (!CallArgs.getCleanupsToDeactivate().empty())
4223     deactivateArgCleanupsBeforeCall(*this, CallArgs);
4224 
4225   // Assert that the arguments we computed match up.  The IR verifier
4226   // will catch this, but this is a common enough source of problems
4227   // during IRGen changes that it's way better for debugging to catch
4228   // it ourselves here.
4229 #ifndef NDEBUG
4230   assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
4231   for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
4232     // Inalloca argument can have different type.
4233     if (IRFunctionArgs.hasInallocaArg() &&
4234         i == IRFunctionArgs.getInallocaArgNo())
4235       continue;
4236     if (i < IRFuncTy->getNumParams())
4237       assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
4238   }
4239 #endif
4240 
4241   // Compute the calling convention and attributes.
4242   unsigned CallingConv;
4243   llvm::AttributeList Attrs;
4244   CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo,
4245                              Callee.getAbstractInfo(), Attrs, CallingConv,
4246                              /*AttrOnCallSite=*/true);
4247 
4248   // Apply some call-site-specific attributes.
4249   // TODO: work this into building the attribute set.
4250 
4251   // Apply always_inline to all calls within flatten functions.
4252   // FIXME: should this really take priority over __try, below?
4253   if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
4254       !(Callee.getAbstractInfo().getCalleeDecl() &&
4255         Callee.getAbstractInfo().getCalleeDecl()->hasAttr<NoInlineAttr>())) {
4256     Attrs =
4257         Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4258                            llvm::Attribute::AlwaysInline);
4259   }
4260 
4261   // Disable inlining inside SEH __try blocks.
4262   if (isSEHTryScope()) {
4263     Attrs =
4264         Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4265                            llvm::Attribute::NoInline);
4266   }
4267 
4268   // Decide whether to use a call or an invoke.
4269   bool CannotThrow;
4270   if (currentFunctionUsesSEHTry()) {
4271     // SEH cares about asynchronous exceptions, so everything can "throw."
4272     CannotThrow = false;
4273   } else if (isCleanupPadScope() &&
4274              EHPersonality::get(*this).isMSVCXXPersonality()) {
4275     // The MSVC++ personality will implicitly terminate the program if an
4276     // exception is thrown during a cleanup outside of a try/catch.
4277     // We don't need to model anything in IR to get this behavior.
4278     CannotThrow = true;
4279   } else {
4280     // Otherwise, nounwind call sites will never throw.
4281     CannotThrow = Attrs.hasAttribute(llvm::AttributeList::FunctionIndex,
4282                                      llvm::Attribute::NoUnwind);
4283   }
4284 
4285   // If we made a temporary, be sure to clean up after ourselves. Note that we
4286   // can't depend on being inside of an ExprWithCleanups, so we need to manually
4287   // pop this cleanup later on. Being eager about this is OK, since this
4288   // temporary is 'invisible' outside of the callee.
4289   if (UnusedReturnSizePtr)
4290     pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetAlloca,
4291                                          UnusedReturnSizePtr);
4292 
4293   llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
4294 
4295   SmallVector<llvm::OperandBundleDef, 1> BundleList =
4296       getBundlesForFunclet(CalleePtr);
4297 
4298   // Emit the actual call/invoke instruction.
4299   llvm::CallSite CS;
4300   if (!InvokeDest) {
4301     CS = Builder.CreateCall(CalleePtr, IRCallArgs, BundleList);
4302   } else {
4303     llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
4304     CS = Builder.CreateInvoke(CalleePtr, Cont, InvokeDest, IRCallArgs,
4305                               BundleList);
4306     EmitBlock(Cont);
4307   }
4308   llvm::Instruction *CI = CS.getInstruction();
4309   if (callOrInvoke)
4310     *callOrInvoke = CI;
4311 
4312   // Apply the attributes and calling convention.
4313   CS.setAttributes(Attrs);
4314   CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
4315 
4316   // Apply various metadata.
4317 
4318   if (!CI->getType()->isVoidTy())
4319     CI->setName("call");
4320 
4321   // Insert instrumentation or attach profile metadata at indirect call sites.
4322   // For more details, see the comment before the definition of
4323   // IPVK_IndirectCallTarget in InstrProfData.inc.
4324   if (!CS.getCalledFunction())
4325     PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget,
4326                      CI, CalleePtr);
4327 
4328   // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4329   // optimizer it can aggressively ignore unwind edges.
4330   if (CGM.getLangOpts().ObjCAutoRefCount)
4331     AddObjCARCExceptionMetadata(CI);
4332 
4333   // Suppress tail calls if requested.
4334   if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
4335     const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl();
4336     if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
4337       Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
4338   }
4339 
4340   // 4. Finish the call.
4341 
4342   // If the call doesn't return, finish the basic block and clear the
4343   // insertion point; this allows the rest of IRGen to discard
4344   // unreachable code.
4345   if (CS.doesNotReturn()) {
4346     if (UnusedReturnSizePtr)
4347       PopCleanupBlock();
4348 
4349     // Strip away the noreturn attribute to better diagnose unreachable UB.
4350     if (SanOpts.has(SanitizerKind::Unreachable)) {
4351       if (auto *F = CS.getCalledFunction())
4352         F->removeFnAttr(llvm::Attribute::NoReturn);
4353       CS.removeAttribute(llvm::AttributeList::FunctionIndex,
4354                          llvm::Attribute::NoReturn);
4355     }
4356 
4357     EmitUnreachable(Loc);
4358     Builder.ClearInsertionPoint();
4359 
4360     // FIXME: For now, emit a dummy basic block because expr emitters in
4361     // generally are not ready to handle emitting expressions at unreachable
4362     // points.
4363     EnsureInsertPoint();
4364 
4365     // Return a reasonable RValue.
4366     return GetUndefRValue(RetTy);
4367   }
4368 
4369   // Perform the swifterror writeback.
4370   if (swiftErrorTemp.isValid()) {
4371     llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp);
4372     Builder.CreateStore(errorResult, swiftErrorArg);
4373   }
4374 
4375   // Emit any call-associated writebacks immediately.  Arguably this
4376   // should happen after any return-value munging.
4377   if (CallArgs.hasWritebacks())
4378     emitWritebacks(*this, CallArgs);
4379 
4380   // The stack cleanup for inalloca arguments has to run out of the normal
4381   // lexical order, so deactivate it and run it manually here.
4382   CallArgs.freeArgumentMemory(*this);
4383 
4384   // Extract the return value.
4385   RValue Ret = [&] {
4386     switch (RetAI.getKind()) {
4387     case ABIArgInfo::CoerceAndExpand: {
4388       auto coercionType = RetAI.getCoerceAndExpandType();
4389       auto layout = CGM.getDataLayout().getStructLayout(coercionType);
4390 
4391       Address addr = SRetPtr;
4392       addr = Builder.CreateElementBitCast(addr, coercionType);
4393 
4394       assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType());
4395       bool requiresExtract = isa<llvm::StructType>(CI->getType());
4396 
4397       unsigned unpaddedIndex = 0;
4398       for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4399         llvm::Type *eltType = coercionType->getElementType(i);
4400         if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4401         Address eltAddr = Builder.CreateStructGEP(addr, i, layout);
4402         llvm::Value *elt = CI;
4403         if (requiresExtract)
4404           elt = Builder.CreateExtractValue(elt, unpaddedIndex++);
4405         else
4406           assert(unpaddedIndex == 0);
4407         Builder.CreateStore(elt, eltAddr);
4408       }
4409       // FALLTHROUGH
4410       LLVM_FALLTHROUGH;
4411     }
4412 
4413     case ABIArgInfo::InAlloca:
4414     case ABIArgInfo::Indirect: {
4415       RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
4416       if (UnusedReturnSizePtr)
4417         PopCleanupBlock();
4418       return ret;
4419     }
4420 
4421     case ABIArgInfo::Ignore:
4422       // If we are ignoring an argument that had a result, make sure to
4423       // construct the appropriate return value for our caller.
4424       return GetUndefRValue(RetTy);
4425 
4426     case ABIArgInfo::Extend:
4427     case ABIArgInfo::Direct: {
4428       llvm::Type *RetIRTy = ConvertType(RetTy);
4429       if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
4430         switch (getEvaluationKind(RetTy)) {
4431         case TEK_Complex: {
4432           llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
4433           llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
4434           return RValue::getComplex(std::make_pair(Real, Imag));
4435         }
4436         case TEK_Aggregate: {
4437           Address DestPtr = ReturnValue.getValue();
4438           bool DestIsVolatile = ReturnValue.isVolatile();
4439 
4440           if (!DestPtr.isValid()) {
4441             DestPtr = CreateMemTemp(RetTy, "agg.tmp");
4442             DestIsVolatile = false;
4443           }
4444           BuildAggStore(*this, CI, DestPtr, DestIsVolatile);
4445           return RValue::getAggregate(DestPtr);
4446         }
4447         case TEK_Scalar: {
4448           // If the argument doesn't match, perform a bitcast to coerce it.  This
4449           // can happen due to trivial type mismatches.
4450           llvm::Value *V = CI;
4451           if (V->getType() != RetIRTy)
4452             V = Builder.CreateBitCast(V, RetIRTy);
4453           return RValue::get(V);
4454         }
4455         }
4456         llvm_unreachable("bad evaluation kind");
4457       }
4458 
4459       Address DestPtr = ReturnValue.getValue();
4460       bool DestIsVolatile = ReturnValue.isVolatile();
4461 
4462       if (!DestPtr.isValid()) {
4463         DestPtr = CreateMemTemp(RetTy, "coerce");
4464         DestIsVolatile = false;
4465       }
4466 
4467       // If the value is offset in memory, apply the offset now.
4468       Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
4469       CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
4470 
4471       return convertTempToRValue(DestPtr, RetTy, SourceLocation());
4472     }
4473 
4474     case ABIArgInfo::Expand:
4475       llvm_unreachable("Invalid ABI kind for return argument");
4476     }
4477 
4478     llvm_unreachable("Unhandled ABIArgInfo::Kind");
4479   } ();
4480 
4481   // Emit the assume_aligned check on the return value.
4482   const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl();
4483   if (Ret.isScalar() && TargetDecl) {
4484     if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
4485       llvm::Value *OffsetValue = nullptr;
4486       if (const auto *Offset = AA->getOffset())
4487         OffsetValue = EmitScalarExpr(Offset);
4488 
4489       llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment());
4490       llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment);
4491       EmitAlignmentAssumption(Ret.getScalarVal(), AlignmentCI->getZExtValue(),
4492                               OffsetValue);
4493     } else if (const auto *AA = TargetDecl->getAttr<AllocAlignAttr>()) {
4494       llvm::Value *ParamVal =
4495           CallArgs[AA->getParamIndex().getLLVMIndex()].getRValue(
4496               *this).getScalarVal();
4497       EmitAlignmentAssumption(Ret.getScalarVal(), ParamVal);
4498     }
4499   }
4500 
4501   return Ret;
4502 }
4503 
4504 CGCallee CGCallee::prepareConcreteCallee(CodeGenFunction &CGF) const {
4505   if (isVirtual()) {
4506     const CallExpr *CE = getVirtualCallExpr();
4507     return CGF.CGM.getCXXABI().getVirtualFunctionPointer(
4508         CGF, getVirtualMethodDecl(), getThisAddress(), getFunctionType(),
4509         CE ? CE->getBeginLoc() : SourceLocation());
4510   }
4511 
4512   return *this;
4513 }
4514 
4515 /* VarArg handling */
4516 
4517 Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) {
4518   VAListAddr = VE->isMicrosoftABI()
4519                  ? EmitMSVAListRef(VE->getSubExpr())
4520                  : EmitVAListRef(VE->getSubExpr());
4521   QualType Ty = VE->getType();
4522   if (VE->isMicrosoftABI())
4523     return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty);
4524   return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty);
4525 }
4526