1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // These classes wrap the information about a call or function
10 // definition used to handle ABI compliancy.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CGCall.h"
15 #include "ABIInfo.h"
16 #include "CGBlocks.h"
17 #include "CGCXXABI.h"
18 #include "CGCleanup.h"
19 #include "CodeGenFunction.h"
20 #include "CodeGenModule.h"
21 #include "TargetInfo.h"
22 #include "clang/AST/Attr.h"
23 #include "clang/AST/Decl.h"
24 #include "clang/AST/DeclCXX.h"
25 #include "clang/AST/DeclObjC.h"
26 #include "clang/Basic/CodeGenOptions.h"
27 #include "clang/Basic/TargetBuiltins.h"
28 #include "clang/Basic/TargetInfo.h"
29 #include "clang/CodeGen/CGFunctionInfo.h"
30 #include "clang/CodeGen/SwiftCallingConv.h"
31 #include "llvm/ADT/StringExtras.h"
32 #include "llvm/Analysis/ValueTracking.h"
33 #include "llvm/IR/Attributes.h"
34 #include "llvm/IR/CallingConv.h"
35 #include "llvm/IR/DataLayout.h"
36 #include "llvm/IR/InlineAsm.h"
37 #include "llvm/IR/IntrinsicInst.h"
38 #include "llvm/IR/Intrinsics.h"
39 #include "llvm/Transforms/Utils/Local.h"
40 using namespace clang;
41 using namespace CodeGen;
42 
43 /***/
44 
45 unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
46   switch (CC) {
47   default: return llvm::CallingConv::C;
48   case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
49   case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
50   case CC_X86RegCall: return llvm::CallingConv::X86_RegCall;
51   case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
52   case CC_Win64: return llvm::CallingConv::Win64;
53   case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
54   case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
55   case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
56   case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
57   // TODO: Add support for __pascal to LLVM.
58   case CC_X86Pascal: return llvm::CallingConv::C;
59   // TODO: Add support for __vectorcall to LLVM.
60   case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
61   case CC_AArch64VectorCall: return llvm::CallingConv::AArch64_VectorCall;
62   case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
63   case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv();
64   case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
65   case CC_PreserveAll: return llvm::CallingConv::PreserveAll;
66   case CC_Swift: return llvm::CallingConv::Swift;
67   }
68 }
69 
70 /// Derives the 'this' type for codegen purposes, i.e. ignoring method CVR
71 /// qualification. Either or both of RD and MD may be null. A null RD indicates
72 /// that there is no meaningful 'this' type, and a null MD can occur when
73 /// calling a method pointer.
74 CanQualType CodeGenTypes::DeriveThisType(const CXXRecordDecl *RD,
75                                          const CXXMethodDecl *MD) {
76   QualType RecTy;
77   if (RD)
78     RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
79   else
80     RecTy = Context.VoidTy;
81 
82   if (MD)
83     RecTy = Context.getAddrSpaceQualType(RecTy, MD->getMethodQualifiers().getAddressSpace());
84   return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
85 }
86 
87 /// Returns the canonical formal type of the given C++ method.
88 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
89   return MD->getType()->getCanonicalTypeUnqualified()
90            .getAs<FunctionProtoType>();
91 }
92 
93 /// Returns the "extra-canonicalized" return type, which discards
94 /// qualifiers on the return type.  Codegen doesn't care about them,
95 /// and it makes ABI code a little easier to be able to assume that
96 /// all parameter and return types are top-level unqualified.
97 static CanQualType GetReturnType(QualType RetTy) {
98   return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
99 }
100 
101 /// Arrange the argument and result information for a value of the given
102 /// unprototyped freestanding function type.
103 const CGFunctionInfo &
104 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
105   // When translating an unprototyped function type, always use a
106   // variadic type.
107   return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
108                                  /*instanceMethod=*/false,
109                                  /*chainCall=*/false, None,
110                                  FTNP->getExtInfo(), {}, RequiredArgs(0));
111 }
112 
113 static void addExtParameterInfosForCall(
114          llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
115                                         const FunctionProtoType *proto,
116                                         unsigned prefixArgs,
117                                         unsigned totalArgs) {
118   assert(proto->hasExtParameterInfos());
119   assert(paramInfos.size() <= prefixArgs);
120   assert(proto->getNumParams() + prefixArgs <= totalArgs);
121 
122   paramInfos.reserve(totalArgs);
123 
124   // Add default infos for any prefix args that don't already have infos.
125   paramInfos.resize(prefixArgs);
126 
127   // Add infos for the prototype.
128   for (const auto &ParamInfo : proto->getExtParameterInfos()) {
129     paramInfos.push_back(ParamInfo);
130     // pass_object_size params have no parameter info.
131     if (ParamInfo.hasPassObjectSize())
132       paramInfos.emplace_back();
133   }
134 
135   assert(paramInfos.size() <= totalArgs &&
136          "Did we forget to insert pass_object_size args?");
137   // Add default infos for the variadic and/or suffix arguments.
138   paramInfos.resize(totalArgs);
139 }
140 
141 /// Adds the formal parameters in FPT to the given prefix. If any parameter in
142 /// FPT has pass_object_size attrs, then we'll add parameters for those, too.
143 static void appendParameterTypes(const CodeGenTypes &CGT,
144                                  SmallVectorImpl<CanQualType> &prefix,
145               SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
146                                  CanQual<FunctionProtoType> FPT) {
147   // Fast path: don't touch param info if we don't need to.
148   if (!FPT->hasExtParameterInfos()) {
149     assert(paramInfos.empty() &&
150            "We have paramInfos, but the prototype doesn't?");
151     prefix.append(FPT->param_type_begin(), FPT->param_type_end());
152     return;
153   }
154 
155   unsigned PrefixSize = prefix.size();
156   // In the vast majority of cases, we'll have precisely FPT->getNumParams()
157   // parameters; the only thing that can change this is the presence of
158   // pass_object_size. So, we preallocate for the common case.
159   prefix.reserve(prefix.size() + FPT->getNumParams());
160 
161   auto ExtInfos = FPT->getExtParameterInfos();
162   assert(ExtInfos.size() == FPT->getNumParams());
163   for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
164     prefix.push_back(FPT->getParamType(I));
165     if (ExtInfos[I].hasPassObjectSize())
166       prefix.push_back(CGT.getContext().getSizeType());
167   }
168 
169   addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize,
170                               prefix.size());
171 }
172 
173 /// Arrange the LLVM function layout for a value of the given function
174 /// type, on top of any implicit parameters already stored.
175 static const CGFunctionInfo &
176 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
177                         SmallVectorImpl<CanQualType> &prefix,
178                         CanQual<FunctionProtoType> FTP) {
179   SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
180   RequiredArgs Required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
181   // FIXME: Kill copy.
182   appendParameterTypes(CGT, prefix, paramInfos, FTP);
183   CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
184 
185   return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
186                                      /*chainCall=*/false, prefix,
187                                      FTP->getExtInfo(), paramInfos,
188                                      Required);
189 }
190 
191 /// Arrange the argument and result information for a value of the
192 /// given freestanding function type.
193 const CGFunctionInfo &
194 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) {
195   SmallVector<CanQualType, 16> argTypes;
196   return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
197                                    FTP);
198 }
199 
200 static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
201   // Set the appropriate calling convention for the Function.
202   if (D->hasAttr<StdCallAttr>())
203     return CC_X86StdCall;
204 
205   if (D->hasAttr<FastCallAttr>())
206     return CC_X86FastCall;
207 
208   if (D->hasAttr<RegCallAttr>())
209     return CC_X86RegCall;
210 
211   if (D->hasAttr<ThisCallAttr>())
212     return CC_X86ThisCall;
213 
214   if (D->hasAttr<VectorCallAttr>())
215     return CC_X86VectorCall;
216 
217   if (D->hasAttr<PascalAttr>())
218     return CC_X86Pascal;
219 
220   if (PcsAttr *PCS = D->getAttr<PcsAttr>())
221     return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
222 
223   if (D->hasAttr<AArch64VectorPcsAttr>())
224     return CC_AArch64VectorCall;
225 
226   if (D->hasAttr<IntelOclBiccAttr>())
227     return CC_IntelOclBicc;
228 
229   if (D->hasAttr<MSABIAttr>())
230     return IsWindows ? CC_C : CC_Win64;
231 
232   if (D->hasAttr<SysVABIAttr>())
233     return IsWindows ? CC_X86_64SysV : CC_C;
234 
235   if (D->hasAttr<PreserveMostAttr>())
236     return CC_PreserveMost;
237 
238   if (D->hasAttr<PreserveAllAttr>())
239     return CC_PreserveAll;
240 
241   return CC_C;
242 }
243 
244 /// Arrange the argument and result information for a call to an
245 /// unknown C++ non-static member function of the given abstract type.
246 /// (A null RD means we don't have any meaningful "this" argument type,
247 ///  so fall back to a generic pointer type).
248 /// The member function must be an ordinary function, i.e. not a
249 /// constructor or destructor.
250 const CGFunctionInfo &
251 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
252                                    const FunctionProtoType *FTP,
253                                    const CXXMethodDecl *MD) {
254   SmallVector<CanQualType, 16> argTypes;
255 
256   // Add the 'this' pointer.
257   argTypes.push_back(DeriveThisType(RD, MD));
258 
259   return ::arrangeLLVMFunctionInfo(
260       *this, true, argTypes,
261       FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
262 }
263 
264 /// Set calling convention for CUDA/HIP kernel.
265 static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM,
266                                            const FunctionDecl *FD) {
267   if (FD->hasAttr<CUDAGlobalAttr>()) {
268     const FunctionType *FT = FTy->getAs<FunctionType>();
269     CGM.getTargetCodeGenInfo().setCUDAKernelCallingConvention(FT);
270     FTy = FT->getCanonicalTypeUnqualified();
271   }
272 }
273 
274 /// Arrange the argument and result information for a declaration or
275 /// definition of the given C++ non-static member function.  The
276 /// member function must be an ordinary function, i.e. not a
277 /// constructor or destructor.
278 const CGFunctionInfo &
279 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
280   assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
281   assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
282 
283   CanQualType FT = GetFormalType(MD).getAs<Type>();
284   setCUDAKernelCallingConvention(FT, CGM, MD);
285   auto prototype = FT.getAs<FunctionProtoType>();
286 
287   if (MD->isInstance()) {
288     // The abstract case is perfectly fine.
289     const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
290     return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
291   }
292 
293   return arrangeFreeFunctionType(prototype);
294 }
295 
296 bool CodeGenTypes::inheritingCtorHasParams(
297     const InheritedConstructor &Inherited, CXXCtorType Type) {
298   // Parameters are unnecessary if we're constructing a base class subobject
299   // and the inherited constructor lives in a virtual base.
300   return Type == Ctor_Complete ||
301          !Inherited.getShadowDecl()->constructsVirtualBase() ||
302          !Target.getCXXABI().hasConstructorVariants();
303 }
304 
305 const CGFunctionInfo &
306 CodeGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) {
307   auto *MD = cast<CXXMethodDecl>(GD.getDecl());
308 
309   SmallVector<CanQualType, 16> argTypes;
310   SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
311   argTypes.push_back(DeriveThisType(MD->getParent(), MD));
312 
313   bool PassParams = true;
314 
315   if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
316     // A base class inheriting constructor doesn't get forwarded arguments
317     // needed to construct a virtual base (or base class thereof).
318     if (auto Inherited = CD->getInheritedConstructor())
319       PassParams = inheritingCtorHasParams(Inherited, GD.getCtorType());
320   }
321 
322   CanQual<FunctionProtoType> FTP = GetFormalType(MD);
323 
324   // Add the formal parameters.
325   if (PassParams)
326     appendParameterTypes(*this, argTypes, paramInfos, FTP);
327 
328   CGCXXABI::AddedStructorArgs AddedArgs =
329       TheCXXABI.buildStructorSignature(GD, argTypes);
330   if (!paramInfos.empty()) {
331     // Note: prefix implies after the first param.
332     if (AddedArgs.Prefix)
333       paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix,
334                         FunctionProtoType::ExtParameterInfo{});
335     if (AddedArgs.Suffix)
336       paramInfos.append(AddedArgs.Suffix,
337                         FunctionProtoType::ExtParameterInfo{});
338   }
339 
340   RequiredArgs required =
341       (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size())
342                                       : RequiredArgs::All);
343 
344   FunctionType::ExtInfo extInfo = FTP->getExtInfo();
345   CanQualType resultType = TheCXXABI.HasThisReturn(GD)
346                                ? argTypes.front()
347                                : TheCXXABI.hasMostDerivedReturn(GD)
348                                      ? CGM.getContext().VoidPtrTy
349                                      : Context.VoidTy;
350   return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
351                                  /*chainCall=*/false, argTypes, extInfo,
352                                  paramInfos, required);
353 }
354 
355 static SmallVector<CanQualType, 16>
356 getArgTypesForCall(ASTContext &ctx, const CallArgList &args) {
357   SmallVector<CanQualType, 16> argTypes;
358   for (auto &arg : args)
359     argTypes.push_back(ctx.getCanonicalParamType(arg.Ty));
360   return argTypes;
361 }
362 
363 static SmallVector<CanQualType, 16>
364 getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) {
365   SmallVector<CanQualType, 16> argTypes;
366   for (auto &arg : args)
367     argTypes.push_back(ctx.getCanonicalParamType(arg->getType()));
368   return argTypes;
369 }
370 
371 static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16>
372 getExtParameterInfosForCall(const FunctionProtoType *proto,
373                             unsigned prefixArgs, unsigned totalArgs) {
374   llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result;
375   if (proto->hasExtParameterInfos()) {
376     addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs);
377   }
378   return result;
379 }
380 
381 /// Arrange a call to a C++ method, passing the given arguments.
382 ///
383 /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this`
384 /// parameter.
385 /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of
386 /// args.
387 /// PassProtoArgs indicates whether `args` has args for the parameters in the
388 /// given CXXConstructorDecl.
389 const CGFunctionInfo &
390 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
391                                         const CXXConstructorDecl *D,
392                                         CXXCtorType CtorKind,
393                                         unsigned ExtraPrefixArgs,
394                                         unsigned ExtraSuffixArgs,
395                                         bool PassProtoArgs) {
396   // FIXME: Kill copy.
397   SmallVector<CanQualType, 16> ArgTypes;
398   for (const auto &Arg : args)
399     ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
400 
401   // +1 for implicit this, which should always be args[0].
402   unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
403 
404   CanQual<FunctionProtoType> FPT = GetFormalType(D);
405   RequiredArgs Required = PassProtoArgs
406                               ? RequiredArgs::forPrototypePlus(
407                                     FPT, TotalPrefixArgs + ExtraSuffixArgs)
408                               : RequiredArgs::All;
409 
410   GlobalDecl GD(D, CtorKind);
411   CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
412                                ? ArgTypes.front()
413                                : TheCXXABI.hasMostDerivedReturn(GD)
414                                      ? CGM.getContext().VoidPtrTy
415                                      : Context.VoidTy;
416 
417   FunctionType::ExtInfo Info = FPT->getExtInfo();
418   llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> ParamInfos;
419   // If the prototype args are elided, we should only have ABI-specific args,
420   // which never have param info.
421   if (PassProtoArgs && FPT->hasExtParameterInfos()) {
422     // ABI-specific suffix arguments are treated the same as variadic arguments.
423     addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs,
424                                 ArgTypes.size());
425   }
426   return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
427                                  /*chainCall=*/false, ArgTypes, Info,
428                                  ParamInfos, Required);
429 }
430 
431 /// Arrange the argument and result information for the declaration or
432 /// definition of the given function.
433 const CGFunctionInfo &
434 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
435   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
436     if (MD->isInstance())
437       return arrangeCXXMethodDeclaration(MD);
438 
439   CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
440 
441   assert(isa<FunctionType>(FTy));
442   setCUDAKernelCallingConvention(FTy, CGM, FD);
443 
444   // When declaring a function without a prototype, always use a
445   // non-variadic type.
446   if (CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>()) {
447     return arrangeLLVMFunctionInfo(
448         noProto->getReturnType(), /*instanceMethod=*/false,
449         /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All);
450   }
451 
452   return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>());
453 }
454 
455 /// Arrange the argument and result information for the declaration or
456 /// definition of an Objective-C method.
457 const CGFunctionInfo &
458 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
459   // It happens that this is the same as a call with no optional
460   // arguments, except also using the formal 'self' type.
461   return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
462 }
463 
464 /// Arrange the argument and result information for the function type
465 /// through which to perform a send to the given Objective-C method,
466 /// using the given receiver type.  The receiver type is not always
467 /// the 'self' type of the method or even an Objective-C pointer type.
468 /// This is *not* the right method for actually performing such a
469 /// message send, due to the possibility of optional arguments.
470 const CGFunctionInfo &
471 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
472                                               QualType receiverType) {
473   SmallVector<CanQualType, 16> argTys;
474   SmallVector<FunctionProtoType::ExtParameterInfo, 4> extParamInfos(2);
475   argTys.push_back(Context.getCanonicalParamType(receiverType));
476   argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
477   // FIXME: Kill copy?
478   for (const auto *I : MD->parameters()) {
479     argTys.push_back(Context.getCanonicalParamType(I->getType()));
480     auto extParamInfo = FunctionProtoType::ExtParameterInfo().withIsNoEscape(
481         I->hasAttr<NoEscapeAttr>());
482     extParamInfos.push_back(extParamInfo);
483   }
484 
485   FunctionType::ExtInfo einfo;
486   bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
487   einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
488 
489   if (getContext().getLangOpts().ObjCAutoRefCount &&
490       MD->hasAttr<NSReturnsRetainedAttr>())
491     einfo = einfo.withProducesResult(true);
492 
493   RequiredArgs required =
494     (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
495 
496   return arrangeLLVMFunctionInfo(
497       GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
498       /*chainCall=*/false, argTys, einfo, extParamInfos, required);
499 }
500 
501 const CGFunctionInfo &
502 CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType,
503                                                  const CallArgList &args) {
504   auto argTypes = getArgTypesForCall(Context, args);
505   FunctionType::ExtInfo einfo;
506 
507   return arrangeLLVMFunctionInfo(
508       GetReturnType(returnType), /*instanceMethod=*/false,
509       /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All);
510 }
511 
512 const CGFunctionInfo &
513 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
514   // FIXME: Do we need to handle ObjCMethodDecl?
515   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
516 
517   if (isa<CXXConstructorDecl>(GD.getDecl()) ||
518       isa<CXXDestructorDecl>(GD.getDecl()))
519     return arrangeCXXStructorDeclaration(GD);
520 
521   return arrangeFunctionDeclaration(FD);
522 }
523 
524 /// Arrange a thunk that takes 'this' as the first parameter followed by
525 /// varargs.  Return a void pointer, regardless of the actual return type.
526 /// The body of the thunk will end in a musttail call to a function of the
527 /// correct type, and the caller will bitcast the function to the correct
528 /// prototype.
529 const CGFunctionInfo &
530 CodeGenTypes::arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD) {
531   assert(MD->isVirtual() && "only methods have thunks");
532   CanQual<FunctionProtoType> FTP = GetFormalType(MD);
533   CanQualType ArgTys[] = {DeriveThisType(MD->getParent(), MD)};
534   return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
535                                  /*chainCall=*/false, ArgTys,
536                                  FTP->getExtInfo(), {}, RequiredArgs(1));
537 }
538 
539 const CGFunctionInfo &
540 CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD,
541                                    CXXCtorType CT) {
542   assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure);
543 
544   CanQual<FunctionProtoType> FTP = GetFormalType(CD);
545   SmallVector<CanQualType, 2> ArgTys;
546   const CXXRecordDecl *RD = CD->getParent();
547   ArgTys.push_back(DeriveThisType(RD, CD));
548   if (CT == Ctor_CopyingClosure)
549     ArgTys.push_back(*FTP->param_type_begin());
550   if (RD->getNumVBases() > 0)
551     ArgTys.push_back(Context.IntTy);
552   CallingConv CC = Context.getDefaultCallingConvention(
553       /*IsVariadic=*/false, /*IsCXXMethod=*/true);
554   return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
555                                  /*chainCall=*/false, ArgTys,
556                                  FunctionType::ExtInfo(CC), {},
557                                  RequiredArgs::All);
558 }
559 
560 /// Arrange a call as unto a free function, except possibly with an
561 /// additional number of formal parameters considered required.
562 static const CGFunctionInfo &
563 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
564                             CodeGenModule &CGM,
565                             const CallArgList &args,
566                             const FunctionType *fnType,
567                             unsigned numExtraRequiredArgs,
568                             bool chainCall) {
569   assert(args.size() >= numExtraRequiredArgs);
570 
571   llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
572 
573   // In most cases, there are no optional arguments.
574   RequiredArgs required = RequiredArgs::All;
575 
576   // If we have a variadic prototype, the required arguments are the
577   // extra prefix plus the arguments in the prototype.
578   if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
579     if (proto->isVariadic())
580       required = RequiredArgs::forPrototypePlus(proto, numExtraRequiredArgs);
581 
582     if (proto->hasExtParameterInfos())
583       addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs,
584                                   args.size());
585 
586   // If we don't have a prototype at all, but we're supposed to
587   // explicitly use the variadic convention for unprototyped calls,
588   // treat all of the arguments as required but preserve the nominal
589   // possibility of variadics.
590   } else if (CGM.getTargetCodeGenInfo()
591                 .isNoProtoCallVariadic(args,
592                                        cast<FunctionNoProtoType>(fnType))) {
593     required = RequiredArgs(args.size());
594   }
595 
596   // FIXME: Kill copy.
597   SmallVector<CanQualType, 16> argTypes;
598   for (const auto &arg : args)
599     argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
600   return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()),
601                                      /*instanceMethod=*/false, chainCall,
602                                      argTypes, fnType->getExtInfo(), paramInfos,
603                                      required);
604 }
605 
606 /// Figure out the rules for calling a function with the given formal
607 /// type using the given arguments.  The arguments are necessary
608 /// because the function might be unprototyped, in which case it's
609 /// target-dependent in crazy ways.
610 const CGFunctionInfo &
611 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
612                                       const FunctionType *fnType,
613                                       bool chainCall) {
614   return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
615                                      chainCall ? 1 : 0, chainCall);
616 }
617 
618 /// A block function is essentially a free function with an
619 /// extra implicit argument.
620 const CGFunctionInfo &
621 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
622                                        const FunctionType *fnType) {
623   return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
624                                      /*chainCall=*/false);
625 }
626 
627 const CGFunctionInfo &
628 CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto,
629                                               const FunctionArgList &params) {
630   auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size());
631   auto argTypes = getArgTypesForDeclaration(Context, params);
632 
633   return arrangeLLVMFunctionInfo(GetReturnType(proto->getReturnType()),
634                                  /*instanceMethod*/ false, /*chainCall*/ false,
635                                  argTypes, proto->getExtInfo(), paramInfos,
636                                  RequiredArgs::forPrototypePlus(proto, 1));
637 }
638 
639 const CGFunctionInfo &
640 CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType,
641                                          const CallArgList &args) {
642   // FIXME: Kill copy.
643   SmallVector<CanQualType, 16> argTypes;
644   for (const auto &Arg : args)
645     argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
646   return arrangeLLVMFunctionInfo(
647       GetReturnType(resultType), /*instanceMethod=*/false,
648       /*chainCall=*/false, argTypes, FunctionType::ExtInfo(),
649       /*paramInfos=*/ {}, RequiredArgs::All);
650 }
651 
652 const CGFunctionInfo &
653 CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType,
654                                                 const FunctionArgList &args) {
655   auto argTypes = getArgTypesForDeclaration(Context, args);
656 
657   return arrangeLLVMFunctionInfo(
658       GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false,
659       argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
660 }
661 
662 const CGFunctionInfo &
663 CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType,
664                                               ArrayRef<CanQualType> argTypes) {
665   return arrangeLLVMFunctionInfo(
666       resultType, /*instanceMethod=*/false, /*chainCall=*/false,
667       argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
668 }
669 
670 /// Arrange a call to a C++ method, passing the given arguments.
671 ///
672 /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It
673 /// does not count `this`.
674 const CGFunctionInfo &
675 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
676                                    const FunctionProtoType *proto,
677                                    RequiredArgs required,
678                                    unsigned numPrefixArgs) {
679   assert(numPrefixArgs + 1 <= args.size() &&
680          "Emitting a call with less args than the required prefix?");
681   // Add one to account for `this`. It's a bit awkward here, but we don't count
682   // `this` in similar places elsewhere.
683   auto paramInfos =
684     getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size());
685 
686   // FIXME: Kill copy.
687   auto argTypes = getArgTypesForCall(Context, args);
688 
689   FunctionType::ExtInfo info = proto->getExtInfo();
690   return arrangeLLVMFunctionInfo(
691       GetReturnType(proto->getReturnType()), /*instanceMethod=*/true,
692       /*chainCall=*/false, argTypes, info, paramInfos, required);
693 }
694 
695 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
696   return arrangeLLVMFunctionInfo(
697       getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
698       None, FunctionType::ExtInfo(), {}, RequiredArgs::All);
699 }
700 
701 const CGFunctionInfo &
702 CodeGenTypes::arrangeCall(const CGFunctionInfo &signature,
703                           const CallArgList &args) {
704   assert(signature.arg_size() <= args.size());
705   if (signature.arg_size() == args.size())
706     return signature;
707 
708   SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
709   auto sigParamInfos = signature.getExtParameterInfos();
710   if (!sigParamInfos.empty()) {
711     paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
712     paramInfos.resize(args.size());
713   }
714 
715   auto argTypes = getArgTypesForCall(Context, args);
716 
717   assert(signature.getRequiredArgs().allowsOptionalArgs());
718   return arrangeLLVMFunctionInfo(signature.getReturnType(),
719                                  signature.isInstanceMethod(),
720                                  signature.isChainCall(),
721                                  argTypes,
722                                  signature.getExtInfo(),
723                                  paramInfos,
724                                  signature.getRequiredArgs());
725 }
726 
727 namespace clang {
728 namespace CodeGen {
729 void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI);
730 }
731 }
732 
733 /// Arrange the argument and result information for an abstract value
734 /// of a given function type.  This is the method which all of the
735 /// above functions ultimately defer to.
736 const CGFunctionInfo &
737 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
738                                       bool instanceMethod,
739                                       bool chainCall,
740                                       ArrayRef<CanQualType> argTypes,
741                                       FunctionType::ExtInfo info,
742                      ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos,
743                                       RequiredArgs required) {
744   assert(llvm::all_of(argTypes,
745                       [](CanQualType T) { return T.isCanonicalAsParam(); }));
746 
747   // Lookup or create unique function info.
748   llvm::FoldingSetNodeID ID;
749   CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos,
750                           required, resultType, argTypes);
751 
752   void *insertPos = nullptr;
753   CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
754   if (FI)
755     return *FI;
756 
757   unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
758 
759   // Construct the function info.  We co-allocate the ArgInfos.
760   FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
761                               paramInfos, resultType, argTypes, required);
762   FunctionInfos.InsertNode(FI, insertPos);
763 
764   bool inserted = FunctionsBeingProcessed.insert(FI).second;
765   (void)inserted;
766   assert(inserted && "Recursively being processed?");
767 
768   // Compute ABI information.
769   if (CC == llvm::CallingConv::SPIR_KERNEL) {
770     // Force target independent argument handling for the host visible
771     // kernel functions.
772     computeSPIRKernelABIInfo(CGM, *FI);
773   } else if (info.getCC() == CC_Swift) {
774     swiftcall::computeABIInfo(CGM, *FI);
775   } else {
776     getABIInfo().computeInfo(*FI);
777   }
778 
779   // Loop over all of the computed argument and return value info.  If any of
780   // them are direct or extend without a specified coerce type, specify the
781   // default now.
782   ABIArgInfo &retInfo = FI->getReturnInfo();
783   if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
784     retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
785 
786   for (auto &I : FI->arguments())
787     if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
788       I.info.setCoerceToType(ConvertType(I.type));
789 
790   bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
791   assert(erased && "Not in set?");
792 
793   return *FI;
794 }
795 
796 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
797                                        bool instanceMethod,
798                                        bool chainCall,
799                                        const FunctionType::ExtInfo &info,
800                                        ArrayRef<ExtParameterInfo> paramInfos,
801                                        CanQualType resultType,
802                                        ArrayRef<CanQualType> argTypes,
803                                        RequiredArgs required) {
804   assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
805   assert(!required.allowsOptionalArgs() ||
806          required.getNumRequiredArgs() <= argTypes.size());
807 
808   void *buffer =
809     operator new(totalSizeToAlloc<ArgInfo,             ExtParameterInfo>(
810                                   argTypes.size() + 1, paramInfos.size()));
811 
812   CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
813   FI->CallingConvention = llvmCC;
814   FI->EffectiveCallingConvention = llvmCC;
815   FI->ASTCallingConvention = info.getCC();
816   FI->InstanceMethod = instanceMethod;
817   FI->ChainCall = chainCall;
818   FI->CmseNSCall = info.getCmseNSCall();
819   FI->NoReturn = info.getNoReturn();
820   FI->ReturnsRetained = info.getProducesResult();
821   FI->NoCallerSavedRegs = info.getNoCallerSavedRegs();
822   FI->NoCfCheck = info.getNoCfCheck();
823   FI->Required = required;
824   FI->HasRegParm = info.getHasRegParm();
825   FI->RegParm = info.getRegParm();
826   FI->ArgStruct = nullptr;
827   FI->ArgStructAlign = 0;
828   FI->NumArgs = argTypes.size();
829   FI->HasExtParameterInfos = !paramInfos.empty();
830   FI->getArgsBuffer()[0].type = resultType;
831   for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
832     FI->getArgsBuffer()[i + 1].type = argTypes[i];
833   for (unsigned i = 0, e = paramInfos.size(); i != e; ++i)
834     FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
835   return FI;
836 }
837 
838 /***/
839 
840 namespace {
841 // ABIArgInfo::Expand implementation.
842 
843 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
844 struct TypeExpansion {
845   enum TypeExpansionKind {
846     // Elements of constant arrays are expanded recursively.
847     TEK_ConstantArray,
848     // Record fields are expanded recursively (but if record is a union, only
849     // the field with the largest size is expanded).
850     TEK_Record,
851     // For complex types, real and imaginary parts are expanded recursively.
852     TEK_Complex,
853     // All other types are not expandable.
854     TEK_None
855   };
856 
857   const TypeExpansionKind Kind;
858 
859   TypeExpansion(TypeExpansionKind K) : Kind(K) {}
860   virtual ~TypeExpansion() {}
861 };
862 
863 struct ConstantArrayExpansion : TypeExpansion {
864   QualType EltTy;
865   uint64_t NumElts;
866 
867   ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
868       : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
869   static bool classof(const TypeExpansion *TE) {
870     return TE->Kind == TEK_ConstantArray;
871   }
872 };
873 
874 struct RecordExpansion : TypeExpansion {
875   SmallVector<const CXXBaseSpecifier *, 1> Bases;
876 
877   SmallVector<const FieldDecl *, 1> Fields;
878 
879   RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
880                   SmallVector<const FieldDecl *, 1> &&Fields)
881       : TypeExpansion(TEK_Record), Bases(std::move(Bases)),
882         Fields(std::move(Fields)) {}
883   static bool classof(const TypeExpansion *TE) {
884     return TE->Kind == TEK_Record;
885   }
886 };
887 
888 struct ComplexExpansion : TypeExpansion {
889   QualType EltTy;
890 
891   ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
892   static bool classof(const TypeExpansion *TE) {
893     return TE->Kind == TEK_Complex;
894   }
895 };
896 
897 struct NoExpansion : TypeExpansion {
898   NoExpansion() : TypeExpansion(TEK_None) {}
899   static bool classof(const TypeExpansion *TE) {
900     return TE->Kind == TEK_None;
901   }
902 };
903 }  // namespace
904 
905 static std::unique_ptr<TypeExpansion>
906 getTypeExpansion(QualType Ty, const ASTContext &Context) {
907   if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
908     return std::make_unique<ConstantArrayExpansion>(
909         AT->getElementType(), AT->getSize().getZExtValue());
910   }
911   if (const RecordType *RT = Ty->getAs<RecordType>()) {
912     SmallVector<const CXXBaseSpecifier *, 1> Bases;
913     SmallVector<const FieldDecl *, 1> Fields;
914     const RecordDecl *RD = RT->getDecl();
915     assert(!RD->hasFlexibleArrayMember() &&
916            "Cannot expand structure with flexible array.");
917     if (RD->isUnion()) {
918       // Unions can be here only in degenerative cases - all the fields are same
919       // after flattening. Thus we have to use the "largest" field.
920       const FieldDecl *LargestFD = nullptr;
921       CharUnits UnionSize = CharUnits::Zero();
922 
923       for (const auto *FD : RD->fields()) {
924         if (FD->isZeroLengthBitField(Context))
925           continue;
926         assert(!FD->isBitField() &&
927                "Cannot expand structure with bit-field members.");
928         CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
929         if (UnionSize < FieldSize) {
930           UnionSize = FieldSize;
931           LargestFD = FD;
932         }
933       }
934       if (LargestFD)
935         Fields.push_back(LargestFD);
936     } else {
937       if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
938         assert(!CXXRD->isDynamicClass() &&
939                "cannot expand vtable pointers in dynamic classes");
940         for (const CXXBaseSpecifier &BS : CXXRD->bases())
941           Bases.push_back(&BS);
942       }
943 
944       for (const auto *FD : RD->fields()) {
945         if (FD->isZeroLengthBitField(Context))
946           continue;
947         assert(!FD->isBitField() &&
948                "Cannot expand structure with bit-field members.");
949         Fields.push_back(FD);
950       }
951     }
952     return std::make_unique<RecordExpansion>(std::move(Bases),
953                                               std::move(Fields));
954   }
955   if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
956     return std::make_unique<ComplexExpansion>(CT->getElementType());
957   }
958   return std::make_unique<NoExpansion>();
959 }
960 
961 static int getExpansionSize(QualType Ty, const ASTContext &Context) {
962   auto Exp = getTypeExpansion(Ty, Context);
963   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
964     return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
965   }
966   if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
967     int Res = 0;
968     for (auto BS : RExp->Bases)
969       Res += getExpansionSize(BS->getType(), Context);
970     for (auto FD : RExp->Fields)
971       Res += getExpansionSize(FD->getType(), Context);
972     return Res;
973   }
974   if (isa<ComplexExpansion>(Exp.get()))
975     return 2;
976   assert(isa<NoExpansion>(Exp.get()));
977   return 1;
978 }
979 
980 void
981 CodeGenTypes::getExpandedTypes(QualType Ty,
982                                SmallVectorImpl<llvm::Type *>::iterator &TI) {
983   auto Exp = getTypeExpansion(Ty, Context);
984   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
985     for (int i = 0, n = CAExp->NumElts; i < n; i++) {
986       getExpandedTypes(CAExp->EltTy, TI);
987     }
988   } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
989     for (auto BS : RExp->Bases)
990       getExpandedTypes(BS->getType(), TI);
991     for (auto FD : RExp->Fields)
992       getExpandedTypes(FD->getType(), TI);
993   } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
994     llvm::Type *EltTy = ConvertType(CExp->EltTy);
995     *TI++ = EltTy;
996     *TI++ = EltTy;
997   } else {
998     assert(isa<NoExpansion>(Exp.get()));
999     *TI++ = ConvertType(Ty);
1000   }
1001 }
1002 
1003 static void forConstantArrayExpansion(CodeGenFunction &CGF,
1004                                       ConstantArrayExpansion *CAE,
1005                                       Address BaseAddr,
1006                                       llvm::function_ref<void(Address)> Fn) {
1007   CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
1008   CharUnits EltAlign =
1009     BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
1010 
1011   for (int i = 0, n = CAE->NumElts; i < n; i++) {
1012     llvm::Value *EltAddr =
1013       CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i);
1014     Fn(Address(EltAddr, EltAlign));
1015   }
1016 }
1017 
1018 void CodeGenFunction::ExpandTypeFromArgs(
1019     QualType Ty, LValue LV, SmallVectorImpl<llvm::Value *>::iterator &AI) {
1020   assert(LV.isSimple() &&
1021          "Unexpected non-simple lvalue during struct expansion.");
1022 
1023   auto Exp = getTypeExpansion(Ty, getContext());
1024   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1025     forConstantArrayExpansion(
1026         *this, CAExp, LV.getAddress(*this), [&](Address EltAddr) {
1027           LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1028           ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1029         });
1030   } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1031     Address This = LV.getAddress(*this);
1032     for (const CXXBaseSpecifier *BS : RExp->Bases) {
1033       // Perform a single step derived-to-base conversion.
1034       Address Base =
1035           GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1036                                 /*NullCheckValue=*/false, SourceLocation());
1037       LValue SubLV = MakeAddrLValue(Base, BS->getType());
1038 
1039       // Recurse onto bases.
1040       ExpandTypeFromArgs(BS->getType(), SubLV, AI);
1041     }
1042     for (auto FD : RExp->Fields) {
1043       // FIXME: What are the right qualifiers here?
1044       LValue SubLV = EmitLValueForFieldInitialization(LV, FD);
1045       ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1046     }
1047   } else if (isa<ComplexExpansion>(Exp.get())) {
1048     auto realValue = *AI++;
1049     auto imagValue = *AI++;
1050     EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
1051   } else {
1052     // Call EmitStoreOfScalar except when the lvalue is a bitfield to emit a
1053     // primitive store.
1054     assert(isa<NoExpansion>(Exp.get()));
1055     if (LV.isBitField())
1056       EmitStoreThroughLValue(RValue::get(*AI++), LV);
1057     else
1058       EmitStoreOfScalar(*AI++, LV);
1059   }
1060 }
1061 
1062 void CodeGenFunction::ExpandTypeToArgs(
1063     QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
1064     SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
1065   auto Exp = getTypeExpansion(Ty, getContext());
1066   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1067     Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this)
1068                                    : Arg.getKnownRValue().getAggregateAddress();
1069     forConstantArrayExpansion(
1070         *this, CAExp, Addr, [&](Address EltAddr) {
1071           CallArg EltArg = CallArg(
1072               convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()),
1073               CAExp->EltTy);
1074           ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
1075                            IRCallArgPos);
1076         });
1077   } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1078     Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this)
1079                                    : Arg.getKnownRValue().getAggregateAddress();
1080     for (const CXXBaseSpecifier *BS : RExp->Bases) {
1081       // Perform a single step derived-to-base conversion.
1082       Address Base =
1083           GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1084                                 /*NullCheckValue=*/false, SourceLocation());
1085       CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType());
1086 
1087       // Recurse onto bases.
1088       ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs,
1089                        IRCallArgPos);
1090     }
1091 
1092     LValue LV = MakeAddrLValue(This, Ty);
1093     for (auto FD : RExp->Fields) {
1094       CallArg FldArg =
1095           CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType());
1096       ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
1097                        IRCallArgPos);
1098     }
1099   } else if (isa<ComplexExpansion>(Exp.get())) {
1100     ComplexPairTy CV = Arg.getKnownRValue().getComplexVal();
1101     IRCallArgs[IRCallArgPos++] = CV.first;
1102     IRCallArgs[IRCallArgPos++] = CV.second;
1103   } else {
1104     assert(isa<NoExpansion>(Exp.get()));
1105     auto RV = Arg.getKnownRValue();
1106     assert(RV.isScalar() &&
1107            "Unexpected non-scalar rvalue during struct expansion.");
1108 
1109     // Insert a bitcast as needed.
1110     llvm::Value *V = RV.getScalarVal();
1111     if (IRCallArgPos < IRFuncTy->getNumParams() &&
1112         V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1113       V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
1114 
1115     IRCallArgs[IRCallArgPos++] = V;
1116   }
1117 }
1118 
1119 /// Create a temporary allocation for the purposes of coercion.
1120 static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty,
1121                                            CharUnits MinAlign) {
1122   // Don't use an alignment that's worse than what LLVM would prefer.
1123   auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty);
1124   CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
1125 
1126   return CGF.CreateTempAlloca(Ty, Align);
1127 }
1128 
1129 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
1130 /// accessing some number of bytes out of it, try to gep into the struct to get
1131 /// at its inner goodness.  Dive as deep as possible without entering an element
1132 /// with an in-memory size smaller than DstSize.
1133 static Address
1134 EnterStructPointerForCoercedAccess(Address SrcPtr,
1135                                    llvm::StructType *SrcSTy,
1136                                    uint64_t DstSize, CodeGenFunction &CGF) {
1137   // We can't dive into a zero-element struct.
1138   if (SrcSTy->getNumElements() == 0) return SrcPtr;
1139 
1140   llvm::Type *FirstElt = SrcSTy->getElementType(0);
1141 
1142   // If the first elt is at least as large as what we're looking for, or if the
1143   // first element is the same size as the whole struct, we can enter it. The
1144   // comparison must be made on the store size and not the alloca size. Using
1145   // the alloca size may overstate the size of the load.
1146   uint64_t FirstEltSize =
1147     CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
1148   if (FirstEltSize < DstSize &&
1149       FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
1150     return SrcPtr;
1151 
1152   // GEP into the first element.
1153   SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, "coerce.dive");
1154 
1155   // If the first element is a struct, recurse.
1156   llvm::Type *SrcTy = SrcPtr.getElementType();
1157   if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1158     return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
1159 
1160   return SrcPtr;
1161 }
1162 
1163 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
1164 /// are either integers or pointers.  This does a truncation of the value if it
1165 /// is too large or a zero extension if it is too small.
1166 ///
1167 /// This behaves as if the value were coerced through memory, so on big-endian
1168 /// targets the high bits are preserved in a truncation, while little-endian
1169 /// targets preserve the low bits.
1170 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
1171                                              llvm::Type *Ty,
1172                                              CodeGenFunction &CGF) {
1173   if (Val->getType() == Ty)
1174     return Val;
1175 
1176   if (isa<llvm::PointerType>(Val->getType())) {
1177     // If this is Pointer->Pointer avoid conversion to and from int.
1178     if (isa<llvm::PointerType>(Ty))
1179       return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
1180 
1181     // Convert the pointer to an integer so we can play with its width.
1182     Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
1183   }
1184 
1185   llvm::Type *DestIntTy = Ty;
1186   if (isa<llvm::PointerType>(DestIntTy))
1187     DestIntTy = CGF.IntPtrTy;
1188 
1189   if (Val->getType() != DestIntTy) {
1190     const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
1191     if (DL.isBigEndian()) {
1192       // Preserve the high bits on big-endian targets.
1193       // That is what memory coercion does.
1194       uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1195       uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1196 
1197       if (SrcSize > DstSize) {
1198         Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
1199         Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
1200       } else {
1201         Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
1202         Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
1203       }
1204     } else {
1205       // Little-endian targets preserve the low bits. No shifts required.
1206       Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
1207     }
1208   }
1209 
1210   if (isa<llvm::PointerType>(Ty))
1211     Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
1212   return Val;
1213 }
1214 
1215 
1216 
1217 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
1218 /// a pointer to an object of type \arg Ty, known to be aligned to
1219 /// \arg SrcAlign bytes.
1220 ///
1221 /// This safely handles the case when the src type is smaller than the
1222 /// destination type; in this situation the values of bits which not
1223 /// present in the src are undefined.
1224 static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
1225                                       CodeGenFunction &CGF) {
1226   llvm::Type *SrcTy = Src.getElementType();
1227 
1228   // If SrcTy and Ty are the same, just do a load.
1229   if (SrcTy == Ty)
1230     return CGF.Builder.CreateLoad(Src);
1231 
1232   uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
1233 
1234   if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1235     Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF);
1236     SrcTy = Src.getElementType();
1237   }
1238 
1239   uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1240 
1241   // If the source and destination are integer or pointer types, just do an
1242   // extension or truncation to the desired type.
1243   if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1244       (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1245     llvm::Value *Load = CGF.Builder.CreateLoad(Src);
1246     return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
1247   }
1248 
1249   // If load is legal, just bitcast the src pointer.
1250   if (SrcSize >= DstSize) {
1251     // Generally SrcSize is never greater than DstSize, since this means we are
1252     // losing bits. However, this can happen in cases where the structure has
1253     // additional padding, for example due to a user specified alignment.
1254     //
1255     // FIXME: Assert that we aren't truncating non-padding bits when have access
1256     // to that information.
1257     Src = CGF.Builder.CreateBitCast(Src,
1258                                     Ty->getPointerTo(Src.getAddressSpace()));
1259     return CGF.Builder.CreateLoad(Src);
1260   }
1261 
1262   // Otherwise do coercion through memory. This is stupid, but simple.
1263   Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment());
1264   Address Casted = CGF.Builder.CreateElementBitCast(Tmp,CGF.Int8Ty);
1265   Address SrcCasted = CGF.Builder.CreateElementBitCast(Src,CGF.Int8Ty);
1266   CGF.Builder.CreateMemCpy(Casted, SrcCasted,
1267       llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
1268       false);
1269   return CGF.Builder.CreateLoad(Tmp);
1270 }
1271 
1272 // Function to store a first-class aggregate into memory.  We prefer to
1273 // store the elements rather than the aggregate to be more friendly to
1274 // fast-isel.
1275 // FIXME: Do we need to recurse here?
1276 static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
1277                           Address Dest, bool DestIsVolatile) {
1278   // Prefer scalar stores to first-class aggregate stores.
1279   if (llvm::StructType *STy =
1280         dyn_cast<llvm::StructType>(Val->getType())) {
1281     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1282       Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i);
1283       llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
1284       CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
1285     }
1286   } else {
1287     CGF.Builder.CreateStore(Val, Dest, DestIsVolatile);
1288   }
1289 }
1290 
1291 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1292 /// where the source and destination may have different types.  The
1293 /// destination is known to be aligned to \arg DstAlign bytes.
1294 ///
1295 /// This safely handles the case when the src type is larger than the
1296 /// destination type; the upper bits of the src will be lost.
1297 static void CreateCoercedStore(llvm::Value *Src,
1298                                Address Dst,
1299                                bool DstIsVolatile,
1300                                CodeGenFunction &CGF) {
1301   llvm::Type *SrcTy = Src->getType();
1302   llvm::Type *DstTy = Dst.getElementType();
1303   if (SrcTy == DstTy) {
1304     CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1305     return;
1306   }
1307 
1308   uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1309 
1310   if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1311     Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF);
1312     DstTy = Dst.getElementType();
1313   }
1314 
1315   llvm::PointerType *SrcPtrTy = llvm::dyn_cast<llvm::PointerType>(SrcTy);
1316   llvm::PointerType *DstPtrTy = llvm::dyn_cast<llvm::PointerType>(DstTy);
1317   if (SrcPtrTy && DstPtrTy &&
1318       SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) {
1319     Src = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy);
1320     CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1321     return;
1322   }
1323 
1324   // If the source and destination are integer or pointer types, just do an
1325   // extension or truncation to the desired type.
1326   if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1327       (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1328     Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
1329     CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1330     return;
1331   }
1332 
1333   uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
1334 
1335   // If store is legal, just bitcast the src pointer.
1336   if (SrcSize <= DstSize) {
1337     Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy);
1338     BuildAggStore(CGF, Src, Dst, DstIsVolatile);
1339   } else {
1340     // Otherwise do coercion through memory. This is stupid, but
1341     // simple.
1342 
1343     // Generally SrcSize is never greater than DstSize, since this means we are
1344     // losing bits. However, this can happen in cases where the structure has
1345     // additional padding, for example due to a user specified alignment.
1346     //
1347     // FIXME: Assert that we aren't truncating non-padding bits when have access
1348     // to that information.
1349     Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
1350     CGF.Builder.CreateStore(Src, Tmp);
1351     Address Casted = CGF.Builder.CreateElementBitCast(Tmp,CGF.Int8Ty);
1352     Address DstCasted = CGF.Builder.CreateElementBitCast(Dst,CGF.Int8Ty);
1353     CGF.Builder.CreateMemCpy(DstCasted, Casted,
1354         llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
1355         false);
1356   }
1357 }
1358 
1359 static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr,
1360                                    const ABIArgInfo &info) {
1361   if (unsigned offset = info.getDirectOffset()) {
1362     addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty);
1363     addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
1364                                              CharUnits::fromQuantity(offset));
1365     addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType());
1366   }
1367   return addr;
1368 }
1369 
1370 namespace {
1371 
1372 /// Encapsulates information about the way function arguments from
1373 /// CGFunctionInfo should be passed to actual LLVM IR function.
1374 class ClangToLLVMArgMapping {
1375   static const unsigned InvalidIndex = ~0U;
1376   unsigned InallocaArgNo;
1377   unsigned SRetArgNo;
1378   unsigned TotalIRArgs;
1379 
1380   /// Arguments of LLVM IR function corresponding to single Clang argument.
1381   struct IRArgs {
1382     unsigned PaddingArgIndex;
1383     // Argument is expanded to IR arguments at positions
1384     // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
1385     unsigned FirstArgIndex;
1386     unsigned NumberOfArgs;
1387 
1388     IRArgs()
1389         : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1390           NumberOfArgs(0) {}
1391   };
1392 
1393   SmallVector<IRArgs, 8> ArgInfo;
1394 
1395 public:
1396   ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
1397                         bool OnlyRequiredArgs = false)
1398       : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1399         ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1400     construct(Context, FI, OnlyRequiredArgs);
1401   }
1402 
1403   bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
1404   unsigned getInallocaArgNo() const {
1405     assert(hasInallocaArg());
1406     return InallocaArgNo;
1407   }
1408 
1409   bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
1410   unsigned getSRetArgNo() const {
1411     assert(hasSRetArg());
1412     return SRetArgNo;
1413   }
1414 
1415   unsigned totalIRArgs() const { return TotalIRArgs; }
1416 
1417   bool hasPaddingArg(unsigned ArgNo) const {
1418     assert(ArgNo < ArgInfo.size());
1419     return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1420   }
1421   unsigned getPaddingArgNo(unsigned ArgNo) const {
1422     assert(hasPaddingArg(ArgNo));
1423     return ArgInfo[ArgNo].PaddingArgIndex;
1424   }
1425 
1426   /// Returns index of first IR argument corresponding to ArgNo, and their
1427   /// quantity.
1428   std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
1429     assert(ArgNo < ArgInfo.size());
1430     return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1431                           ArgInfo[ArgNo].NumberOfArgs);
1432   }
1433 
1434 private:
1435   void construct(const ASTContext &Context, const CGFunctionInfo &FI,
1436                  bool OnlyRequiredArgs);
1437 };
1438 
1439 void ClangToLLVMArgMapping::construct(const ASTContext &Context,
1440                                       const CGFunctionInfo &FI,
1441                                       bool OnlyRequiredArgs) {
1442   unsigned IRArgNo = 0;
1443   bool SwapThisWithSRet = false;
1444   const ABIArgInfo &RetAI = FI.getReturnInfo();
1445 
1446   if (RetAI.getKind() == ABIArgInfo::Indirect) {
1447     SwapThisWithSRet = RetAI.isSRetAfterThis();
1448     SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1449   }
1450 
1451   unsigned ArgNo = 0;
1452   unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
1453   for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
1454        ++I, ++ArgNo) {
1455     assert(I != FI.arg_end());
1456     QualType ArgType = I->type;
1457     const ABIArgInfo &AI = I->info;
1458     // Collect data about IR arguments corresponding to Clang argument ArgNo.
1459     auto &IRArgs = ArgInfo[ArgNo];
1460 
1461     if (AI.getPaddingType())
1462       IRArgs.PaddingArgIndex = IRArgNo++;
1463 
1464     switch (AI.getKind()) {
1465     case ABIArgInfo::Extend:
1466     case ABIArgInfo::Direct: {
1467       // FIXME: handle sseregparm someday...
1468       llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
1469       if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
1470         IRArgs.NumberOfArgs = STy->getNumElements();
1471       } else {
1472         IRArgs.NumberOfArgs = 1;
1473       }
1474       break;
1475     }
1476     case ABIArgInfo::Indirect:
1477       IRArgs.NumberOfArgs = 1;
1478       break;
1479     case ABIArgInfo::Ignore:
1480     case ABIArgInfo::InAlloca:
1481       // ignore and inalloca doesn't have matching LLVM parameters.
1482       IRArgs.NumberOfArgs = 0;
1483       break;
1484     case ABIArgInfo::CoerceAndExpand:
1485       IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size();
1486       break;
1487     case ABIArgInfo::Expand:
1488       IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
1489       break;
1490     }
1491 
1492     if (IRArgs.NumberOfArgs > 0) {
1493       IRArgs.FirstArgIndex = IRArgNo;
1494       IRArgNo += IRArgs.NumberOfArgs;
1495     }
1496 
1497     // Skip over the sret parameter when it comes second.  We already handled it
1498     // above.
1499     if (IRArgNo == 1 && SwapThisWithSRet)
1500       IRArgNo++;
1501   }
1502   assert(ArgNo == ArgInfo.size());
1503 
1504   if (FI.usesInAlloca())
1505     InallocaArgNo = IRArgNo++;
1506 
1507   TotalIRArgs = IRArgNo;
1508 }
1509 }  // namespace
1510 
1511 /***/
1512 
1513 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
1514   const auto &RI = FI.getReturnInfo();
1515   return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
1516 }
1517 
1518 bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) {
1519   return ReturnTypeUsesSRet(FI) &&
1520          getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
1521 }
1522 
1523 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
1524   if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
1525     switch (BT->getKind()) {
1526     default:
1527       return false;
1528     case BuiltinType::Float:
1529       return getTarget().useObjCFPRetForRealType(TargetInfo::Float);
1530     case BuiltinType::Double:
1531       return getTarget().useObjCFPRetForRealType(TargetInfo::Double);
1532     case BuiltinType::LongDouble:
1533       return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble);
1534     }
1535   }
1536 
1537   return false;
1538 }
1539 
1540 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
1541   if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
1542     if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
1543       if (BT->getKind() == BuiltinType::LongDouble)
1544         return getTarget().useObjCFP2RetForComplexLongDouble();
1545     }
1546   }
1547 
1548   return false;
1549 }
1550 
1551 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
1552   const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
1553   return GetFunctionType(FI);
1554 }
1555 
1556 llvm::FunctionType *
1557 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
1558 
1559   bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1560   (void)Inserted;
1561   assert(Inserted && "Recursively being processed?");
1562 
1563   llvm::Type *resultType = nullptr;
1564   const ABIArgInfo &retAI = FI.getReturnInfo();
1565   switch (retAI.getKind()) {
1566   case ABIArgInfo::Expand:
1567     llvm_unreachable("Invalid ABI kind for return argument");
1568 
1569   case ABIArgInfo::Extend:
1570   case ABIArgInfo::Direct:
1571     resultType = retAI.getCoerceToType();
1572     break;
1573 
1574   case ABIArgInfo::InAlloca:
1575     if (retAI.getInAllocaSRet()) {
1576       // sret things on win32 aren't void, they return the sret pointer.
1577       QualType ret = FI.getReturnType();
1578       llvm::Type *ty = ConvertType(ret);
1579       unsigned addressSpace = Context.getTargetAddressSpace(ret);
1580       resultType = llvm::PointerType::get(ty, addressSpace);
1581     } else {
1582       resultType = llvm::Type::getVoidTy(getLLVMContext());
1583     }
1584     break;
1585 
1586   case ABIArgInfo::Indirect:
1587   case ABIArgInfo::Ignore:
1588     resultType = llvm::Type::getVoidTy(getLLVMContext());
1589     break;
1590 
1591   case ABIArgInfo::CoerceAndExpand:
1592     resultType = retAI.getUnpaddedCoerceAndExpandType();
1593     break;
1594   }
1595 
1596   ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
1597   SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
1598 
1599   // Add type for sret argument.
1600   if (IRFunctionArgs.hasSRetArg()) {
1601     QualType Ret = FI.getReturnType();
1602     llvm::Type *Ty = ConvertType(Ret);
1603     unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
1604     ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1605         llvm::PointerType::get(Ty, AddressSpace);
1606   }
1607 
1608   // Add type for inalloca argument.
1609   if (IRFunctionArgs.hasInallocaArg()) {
1610     auto ArgStruct = FI.getArgStruct();
1611     assert(ArgStruct);
1612     ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
1613   }
1614 
1615   // Add in all of the required arguments.
1616   unsigned ArgNo = 0;
1617   CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1618                                      ie = it + FI.getNumRequiredArgs();
1619   for (; it != ie; ++it, ++ArgNo) {
1620     const ABIArgInfo &ArgInfo = it->info;
1621 
1622     // Insert a padding type to ensure proper alignment.
1623     if (IRFunctionArgs.hasPaddingArg(ArgNo))
1624       ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1625           ArgInfo.getPaddingType();
1626 
1627     unsigned FirstIRArg, NumIRArgs;
1628     std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1629 
1630     switch (ArgInfo.getKind()) {
1631     case ABIArgInfo::Ignore:
1632     case ABIArgInfo::InAlloca:
1633       assert(NumIRArgs == 0);
1634       break;
1635 
1636     case ABIArgInfo::Indirect: {
1637       assert(NumIRArgs == 1);
1638       // indirect arguments are always on the stack, which is alloca addr space.
1639       llvm::Type *LTy = ConvertTypeForMem(it->type);
1640       ArgTypes[FirstIRArg] = LTy->getPointerTo(
1641           CGM.getDataLayout().getAllocaAddrSpace());
1642       break;
1643     }
1644 
1645     case ABIArgInfo::Extend:
1646     case ABIArgInfo::Direct: {
1647       // Fast-isel and the optimizer generally like scalar values better than
1648       // FCAs, so we flatten them if this is safe to do for this argument.
1649       llvm::Type *argType = ArgInfo.getCoerceToType();
1650       llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1651       if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
1652         assert(NumIRArgs == st->getNumElements());
1653         for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1654           ArgTypes[FirstIRArg + i] = st->getElementType(i);
1655       } else {
1656         assert(NumIRArgs == 1);
1657         ArgTypes[FirstIRArg] = argType;
1658       }
1659       break;
1660     }
1661 
1662     case ABIArgInfo::CoerceAndExpand: {
1663       auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1664       for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) {
1665         *ArgTypesIter++ = EltTy;
1666       }
1667       assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1668       break;
1669     }
1670 
1671     case ABIArgInfo::Expand:
1672       auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1673       getExpandedTypes(it->type, ArgTypesIter);
1674       assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1675       break;
1676     }
1677   }
1678 
1679   bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1680   assert(Erased && "Not in set?");
1681 
1682   return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
1683 }
1684 
1685 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
1686   const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1687   const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
1688 
1689   if (!isFuncTypeConvertible(FPT))
1690     return llvm::StructType::get(getLLVMContext());
1691 
1692   return GetFunctionType(GD);
1693 }
1694 
1695 static void AddAttributesFromFunctionProtoType(ASTContext &Ctx,
1696                                                llvm::AttrBuilder &FuncAttrs,
1697                                                const FunctionProtoType *FPT) {
1698   if (!FPT)
1699     return;
1700 
1701   if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) &&
1702       FPT->isNothrow())
1703     FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1704 }
1705 
1706 void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
1707                                                bool AttrOnCallSite,
1708                                                llvm::AttrBuilder &FuncAttrs) {
1709   // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
1710   if (!HasOptnone) {
1711     if (CodeGenOpts.OptimizeSize)
1712       FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1713     if (CodeGenOpts.OptimizeSize == 2)
1714       FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1715   }
1716 
1717   if (CodeGenOpts.DisableRedZone)
1718     FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1719   if (CodeGenOpts.IndirectTlsSegRefs)
1720     FuncAttrs.addAttribute("indirect-tls-seg-refs");
1721   if (CodeGenOpts.NoImplicitFloat)
1722     FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1723 
1724   if (AttrOnCallSite) {
1725     // Attributes that should go on the call site only.
1726     if (!CodeGenOpts.SimplifyLibCalls ||
1727         CodeGenOpts.isNoBuiltinFunc(Name.data()))
1728       FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1729     if (!CodeGenOpts.TrapFuncName.empty())
1730       FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
1731   } else {
1732     StringRef FpKind;
1733     switch (CodeGenOpts.getFramePointer()) {
1734     case CodeGenOptions::FramePointerKind::None:
1735       FpKind = "none";
1736       break;
1737     case CodeGenOptions::FramePointerKind::NonLeaf:
1738       FpKind = "non-leaf";
1739       break;
1740     case CodeGenOptions::FramePointerKind::All:
1741       FpKind = "all";
1742       break;
1743     }
1744     FuncAttrs.addAttribute("frame-pointer", FpKind);
1745 
1746     FuncAttrs.addAttribute("less-precise-fpmad",
1747                            llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
1748 
1749     if (CodeGenOpts.NullPointerIsValid)
1750       FuncAttrs.addAttribute("null-pointer-is-valid", "true");
1751 
1752     if (CodeGenOpts.FPDenormalMode != llvm::DenormalMode::getIEEE())
1753       FuncAttrs.addAttribute("denormal-fp-math",
1754                              CodeGenOpts.FPDenormalMode.str());
1755     if (CodeGenOpts.FP32DenormalMode != CodeGenOpts.FPDenormalMode) {
1756       FuncAttrs.addAttribute(
1757           "denormal-fp-math-f32",
1758           CodeGenOpts.FP32DenormalMode.str());
1759     }
1760 
1761     FuncAttrs.addAttribute("no-trapping-math",
1762                            llvm::toStringRef(CodeGenOpts.NoTrappingMath));
1763 
1764     // Strict (compliant) code is the default, so only add this attribute to
1765     // indicate that we are trying to workaround a problem case.
1766     if (!CodeGenOpts.StrictFloatCastOverflow)
1767       FuncAttrs.addAttribute("strict-float-cast-overflow", "false");
1768 
1769     // TODO: Are these all needed?
1770     // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
1771     FuncAttrs.addAttribute("no-infs-fp-math",
1772                            llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
1773     FuncAttrs.addAttribute("no-nans-fp-math",
1774                            llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
1775     FuncAttrs.addAttribute("unsafe-fp-math",
1776                            llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
1777     FuncAttrs.addAttribute("use-soft-float",
1778                            llvm::toStringRef(CodeGenOpts.SoftFloat));
1779     FuncAttrs.addAttribute("stack-protector-buffer-size",
1780                            llvm::utostr(CodeGenOpts.SSPBufferSize));
1781     FuncAttrs.addAttribute("no-signed-zeros-fp-math",
1782                            llvm::toStringRef(CodeGenOpts.NoSignedZeros));
1783     FuncAttrs.addAttribute(
1784         "correctly-rounded-divide-sqrt-fp-math",
1785         llvm::toStringRef(CodeGenOpts.CorrectlyRoundedDivSqrt));
1786 
1787     // TODO: Reciprocal estimate codegen options should apply to instructions?
1788     const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals;
1789     if (!Recips.empty())
1790       FuncAttrs.addAttribute("reciprocal-estimates",
1791                              llvm::join(Recips, ","));
1792 
1793     if (!CodeGenOpts.PreferVectorWidth.empty() &&
1794         CodeGenOpts.PreferVectorWidth != "none")
1795       FuncAttrs.addAttribute("prefer-vector-width",
1796                              CodeGenOpts.PreferVectorWidth);
1797 
1798     if (CodeGenOpts.StackRealignment)
1799       FuncAttrs.addAttribute("stackrealign");
1800     if (CodeGenOpts.Backchain)
1801       FuncAttrs.addAttribute("backchain");
1802 
1803     if (CodeGenOpts.SpeculativeLoadHardening)
1804       FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
1805   }
1806 
1807   if (getLangOpts().assumeFunctionsAreConvergent()) {
1808     // Conservatively, mark all functions and calls in CUDA and OpenCL as
1809     // convergent (meaning, they may call an intrinsically convergent op, such
1810     // as __syncthreads() / barrier(), and so can't have certain optimizations
1811     // applied around them).  LLVM will remove this attribute where it safely
1812     // can.
1813     FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1814   }
1815 
1816   if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) {
1817     // Exceptions aren't supported in CUDA device code.
1818     FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1819   }
1820 
1821   for (StringRef Attr : CodeGenOpts.DefaultFunctionAttrs) {
1822     StringRef Var, Value;
1823     std::tie(Var, Value) = Attr.split('=');
1824     FuncAttrs.addAttribute(Var, Value);
1825   }
1826 }
1827 
1828 void CodeGenModule::AddDefaultFnAttrs(llvm::Function &F) {
1829   llvm::AttrBuilder FuncAttrs;
1830   ConstructDefaultFnAttrList(F.getName(), F.hasOptNone(),
1831                              /* AttrOnCallSite = */ false, FuncAttrs);
1832   F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs);
1833 }
1834 
1835 static void addNoBuiltinAttributes(llvm::AttrBuilder &FuncAttrs,
1836                                    const LangOptions &LangOpts,
1837                                    const NoBuiltinAttr *NBA = nullptr) {
1838   auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) {
1839     SmallString<32> AttributeName;
1840     AttributeName += "no-builtin-";
1841     AttributeName += BuiltinName;
1842     FuncAttrs.addAttribute(AttributeName);
1843   };
1844 
1845   // First, handle the language options passed through -fno-builtin.
1846   if (LangOpts.NoBuiltin) {
1847     // -fno-builtin disables them all.
1848     FuncAttrs.addAttribute("no-builtins");
1849     return;
1850   }
1851 
1852   // Then, add attributes for builtins specified through -fno-builtin-<name>.
1853   llvm::for_each(LangOpts.NoBuiltinFuncs, AddNoBuiltinAttr);
1854 
1855   // Now, let's check the __attribute__((no_builtin("...")) attribute added to
1856   // the source.
1857   if (!NBA)
1858     return;
1859 
1860   // If there is a wildcard in the builtin names specified through the
1861   // attribute, disable them all.
1862   if (llvm::is_contained(NBA->builtinNames(), "*")) {
1863     FuncAttrs.addAttribute("no-builtins");
1864     return;
1865   }
1866 
1867   // And last, add the rest of the builtin names.
1868   llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr);
1869 }
1870 
1871 static void addVectLibAttributes(llvm::AttrBuilder &FuncAttrs,
1872                                  const CodeGenOptions &CodeGenOpts) {
1873   StringRef AttributeName = "veclib";
1874   switch (CodeGenOpts.getVecLib()) {
1875   case CodeGenOptions::Accelerate:
1876     FuncAttrs.addAttribute(AttributeName, "Accelerate");
1877     break;
1878   case CodeGenOptions::MASSV:
1879     FuncAttrs.addAttribute(AttributeName, "MASSV");
1880     break;
1881   case CodeGenOptions::SVML:
1882     FuncAttrs.addAttribute(AttributeName, "SVML");
1883     break;
1884   case CodeGenOptions::NoLibrary:
1885     break;
1886   }
1887 }
1888 
1889 void CodeGenModule::ConstructAttributeList(
1890     StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo,
1891     llvm::AttributeList &AttrList, unsigned &CallingConv, bool AttrOnCallSite) {
1892   llvm::AttrBuilder FuncAttrs;
1893   llvm::AttrBuilder RetAttrs;
1894 
1895   CallingConv = FI.getEffectiveCallingConvention();
1896   if (FI.isNoReturn())
1897     FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1898 
1899   if (FI.isCmseNSCall())
1900     FuncAttrs.addAttribute("cmse_nonsecure_call");
1901 
1902   // If we have information about the function prototype, we can learn
1903   // attributes from there.
1904   AddAttributesFromFunctionProtoType(getContext(), FuncAttrs,
1905                                      CalleeInfo.getCalleeFunctionProtoType());
1906 
1907   const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl();
1908 
1909   bool HasOptnone = false;
1910   // The NoBuiltinAttr attached to a TargetDecl (only allowed on FunctionDecls).
1911   const NoBuiltinAttr *NBA = nullptr;
1912   // FIXME: handle sseregparm someday...
1913   if (TargetDecl) {
1914     if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
1915       FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
1916     if (TargetDecl->hasAttr<NoThrowAttr>())
1917       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1918     if (TargetDecl->hasAttr<NoReturnAttr>())
1919       FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1920     if (TargetDecl->hasAttr<ColdAttr>())
1921       FuncAttrs.addAttribute(llvm::Attribute::Cold);
1922     if (TargetDecl->hasAttr<NoDuplicateAttr>())
1923       FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
1924     if (TargetDecl->hasAttr<ConvergentAttr>())
1925       FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1926 
1927     if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1928       AddAttributesFromFunctionProtoType(
1929           getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
1930       if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) {
1931         // A sane operator new returns a non-aliasing pointer.
1932         auto Kind = Fn->getDeclName().getCXXOverloadedOperator();
1933         if (getCodeGenOpts().AssumeSaneOperatorNew &&
1934             (Kind == OO_New || Kind == OO_Array_New))
1935           RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1936       }
1937       const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
1938       const bool IsVirtualCall = MD && MD->isVirtual();
1939       // Don't use [[noreturn]], _Noreturn or [[no_builtin]] for a call to a
1940       // virtual function. These attributes are not inherited by overloads.
1941       if (!(AttrOnCallSite && IsVirtualCall)) {
1942         if (Fn->isNoReturn())
1943           FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1944         NBA = Fn->getAttr<NoBuiltinAttr>();
1945       }
1946     }
1947 
1948     // 'const', 'pure' and 'noalias' attributed functions are also nounwind.
1949     if (TargetDecl->hasAttr<ConstAttr>()) {
1950       FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
1951       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1952     } else if (TargetDecl->hasAttr<PureAttr>()) {
1953       FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
1954       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1955     } else if (TargetDecl->hasAttr<NoAliasAttr>()) {
1956       FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly);
1957       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1958     }
1959     if (TargetDecl->hasAttr<RestrictAttr>())
1960       RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1961     if (TargetDecl->hasAttr<ReturnsNonNullAttr>() &&
1962         !CodeGenOpts.NullPointerIsValid)
1963       RetAttrs.addAttribute(llvm::Attribute::NonNull);
1964     if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>())
1965       FuncAttrs.addAttribute("no_caller_saved_registers");
1966     if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>())
1967       FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
1968 
1969     HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
1970     if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) {
1971       Optional<unsigned> NumElemsParam;
1972       if (AllocSize->getNumElemsParam().isValid())
1973         NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
1974       FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
1975                                  NumElemsParam);
1976     }
1977   }
1978 
1979   // Attach "no-builtins" attributes to:
1980   // * call sites: both `nobuiltin` and "no-builtins" or "no-builtin-<name>".
1981   // * definitions: "no-builtins" or "no-builtin-<name>" only.
1982   // The attributes can come from:
1983   // * LangOpts: -ffreestanding, -fno-builtin, -fno-builtin-<name>
1984   // * FunctionDecl attributes: __attribute__((no_builtin(...)))
1985   addNoBuiltinAttributes(FuncAttrs, getLangOpts(), NBA);
1986 
1987   // Attach "veclib" attribute to function based on '-fveclib' setting.
1988   addVectLibAttributes(FuncAttrs, getCodeGenOpts());
1989 
1990   ConstructDefaultFnAttrList(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
1991 
1992   // This must run after constructing the default function attribute list
1993   // to ensure that the speculative load hardening attribute is removed
1994   // in the case where the -mspeculative-load-hardening flag was passed.
1995   if (TargetDecl) {
1996     if (TargetDecl->hasAttr<NoSpeculativeLoadHardeningAttr>())
1997       FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening);
1998     if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>())
1999       FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
2000   }
2001 
2002   if (CodeGenOpts.EnableSegmentedStacks &&
2003       !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>()))
2004     FuncAttrs.addAttribute("split-stack");
2005 
2006   // Add NonLazyBind attribute to function declarations when -fno-plt
2007   // is used.
2008   if (TargetDecl && CodeGenOpts.NoPLT) {
2009     if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2010       if (!Fn->isDefined() && !AttrOnCallSite) {
2011         FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
2012       }
2013     }
2014   }
2015 
2016   if (TargetDecl && TargetDecl->hasAttr<OpenCLKernelAttr>()) {
2017     if (getLangOpts().OpenCLVersion <= 120) {
2018       // OpenCL v1.2 Work groups are always uniform
2019       FuncAttrs.addAttribute("uniform-work-group-size", "true");
2020     } else {
2021       // OpenCL v2.0 Work groups may be whether uniform or not.
2022       // '-cl-uniform-work-group-size' compile option gets a hint
2023       // to the compiler that the global work-size be a multiple of
2024       // the work-group size specified to clEnqueueNDRangeKernel
2025       // (i.e. work groups are uniform).
2026       FuncAttrs.addAttribute("uniform-work-group-size",
2027                              llvm::toStringRef(CodeGenOpts.UniformWGSize));
2028     }
2029   }
2030 
2031   if (!AttrOnCallSite) {
2032     if (TargetDecl && TargetDecl->hasAttr<CmseNSEntryAttr>())
2033       FuncAttrs.addAttribute("cmse_nonsecure_entry");
2034 
2035     bool DisableTailCalls = false;
2036 
2037     if (CodeGenOpts.DisableTailCalls)
2038       DisableTailCalls = true;
2039     else if (TargetDecl) {
2040       if (TargetDecl->hasAttr<DisableTailCallsAttr>() ||
2041           TargetDecl->hasAttr<AnyX86InterruptAttr>())
2042         DisableTailCalls = true;
2043       else if (CodeGenOpts.NoEscapingBlockTailCalls) {
2044         if (const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
2045           if (!BD->doesNotEscape())
2046             DisableTailCalls = true;
2047       }
2048     }
2049 
2050     FuncAttrs.addAttribute("disable-tail-calls",
2051                            llvm::toStringRef(DisableTailCalls));
2052     GetCPUAndFeaturesAttributes(CalleeInfo.getCalleeDecl(), FuncAttrs);
2053   }
2054 
2055   ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
2056 
2057   QualType RetTy = FI.getReturnType();
2058   const ABIArgInfo &RetAI = FI.getReturnInfo();
2059   switch (RetAI.getKind()) {
2060   case ABIArgInfo::Extend:
2061     if (RetAI.isSignExt())
2062       RetAttrs.addAttribute(llvm::Attribute::SExt);
2063     else
2064       RetAttrs.addAttribute(llvm::Attribute::ZExt);
2065     LLVM_FALLTHROUGH;
2066   case ABIArgInfo::Direct:
2067     if (RetAI.getInReg())
2068       RetAttrs.addAttribute(llvm::Attribute::InReg);
2069     break;
2070   case ABIArgInfo::Ignore:
2071     break;
2072 
2073   case ABIArgInfo::InAlloca:
2074   case ABIArgInfo::Indirect: {
2075     // inalloca and sret disable readnone and readonly
2076     FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2077       .removeAttribute(llvm::Attribute::ReadNone);
2078     break;
2079   }
2080 
2081   case ABIArgInfo::CoerceAndExpand:
2082     break;
2083 
2084   case ABIArgInfo::Expand:
2085     llvm_unreachable("Invalid ABI kind for return argument");
2086   }
2087 
2088   if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
2089     QualType PTy = RefTy->getPointeeType();
2090     if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2091       RetAttrs.addDereferenceableAttr(
2092           getMinimumObjectSize(PTy).getQuantity());
2093     else if (getContext().getTargetAddressSpace(PTy) == 0 &&
2094              !CodeGenOpts.NullPointerIsValid)
2095       RetAttrs.addAttribute(llvm::Attribute::NonNull);
2096   }
2097 
2098   bool hasUsedSRet = false;
2099   SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs());
2100 
2101   // Attach attributes to sret.
2102   if (IRFunctionArgs.hasSRetArg()) {
2103     llvm::AttrBuilder SRETAttrs;
2104     SRETAttrs.addAttribute(llvm::Attribute::StructRet);
2105     hasUsedSRet = true;
2106     if (RetAI.getInReg())
2107       SRETAttrs.addAttribute(llvm::Attribute::InReg);
2108     SRETAttrs.addAlignmentAttr(RetAI.getIndirectAlign().getQuantity());
2109     ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
2110         llvm::AttributeSet::get(getLLVMContext(), SRETAttrs);
2111   }
2112 
2113   // Attach attributes to inalloca argument.
2114   if (IRFunctionArgs.hasInallocaArg()) {
2115     llvm::AttrBuilder Attrs;
2116     Attrs.addAttribute(llvm::Attribute::InAlloca);
2117     ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
2118         llvm::AttributeSet::get(getLLVMContext(), Attrs);
2119   }
2120 
2121   unsigned ArgNo = 0;
2122   for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(),
2123                                           E = FI.arg_end();
2124        I != E; ++I, ++ArgNo) {
2125     QualType ParamType = I->type;
2126     const ABIArgInfo &AI = I->info;
2127     llvm::AttrBuilder Attrs;
2128 
2129     // Add attribute for padding argument, if necessary.
2130     if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
2131       if (AI.getPaddingInReg()) {
2132         ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
2133             llvm::AttributeSet::get(
2134                 getLLVMContext(),
2135                 llvm::AttrBuilder().addAttribute(llvm::Attribute::InReg));
2136       }
2137     }
2138 
2139     // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
2140     // have the corresponding parameter variable.  It doesn't make
2141     // sense to do it here because parameters are so messed up.
2142     switch (AI.getKind()) {
2143     case ABIArgInfo::Extend:
2144       if (AI.isSignExt())
2145         Attrs.addAttribute(llvm::Attribute::SExt);
2146       else
2147         Attrs.addAttribute(llvm::Attribute::ZExt);
2148       LLVM_FALLTHROUGH;
2149     case ABIArgInfo::Direct:
2150       if (ArgNo == 0 && FI.isChainCall())
2151         Attrs.addAttribute(llvm::Attribute::Nest);
2152       else if (AI.getInReg())
2153         Attrs.addAttribute(llvm::Attribute::InReg);
2154       break;
2155 
2156     case ABIArgInfo::Indirect: {
2157       if (AI.getInReg())
2158         Attrs.addAttribute(llvm::Attribute::InReg);
2159 
2160       if (AI.getIndirectByVal())
2161         Attrs.addByValAttr(getTypes().ConvertTypeForMem(ParamType));
2162 
2163       CharUnits Align = AI.getIndirectAlign();
2164 
2165       // In a byval argument, it is important that the required
2166       // alignment of the type is honored, as LLVM might be creating a
2167       // *new* stack object, and needs to know what alignment to give
2168       // it. (Sometimes it can deduce a sensible alignment on its own,
2169       // but not if clang decides it must emit a packed struct, or the
2170       // user specifies increased alignment requirements.)
2171       //
2172       // This is different from indirect *not* byval, where the object
2173       // exists already, and the align attribute is purely
2174       // informative.
2175       assert(!Align.isZero());
2176 
2177       // For now, only add this when we have a byval argument.
2178       // TODO: be less lazy about updating test cases.
2179       if (AI.getIndirectByVal())
2180         Attrs.addAlignmentAttr(Align.getQuantity());
2181 
2182       // byval disables readnone and readonly.
2183       FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2184         .removeAttribute(llvm::Attribute::ReadNone);
2185       break;
2186     }
2187     case ABIArgInfo::Ignore:
2188     case ABIArgInfo::Expand:
2189     case ABIArgInfo::CoerceAndExpand:
2190       break;
2191 
2192     case ABIArgInfo::InAlloca:
2193       // inalloca disables readnone and readonly.
2194       FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2195           .removeAttribute(llvm::Attribute::ReadNone);
2196       continue;
2197     }
2198 
2199     if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
2200       QualType PTy = RefTy->getPointeeType();
2201       if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2202         Attrs.addDereferenceableAttr(
2203             getMinimumObjectSize(PTy).getQuantity());
2204       else if (getContext().getTargetAddressSpace(PTy) == 0 &&
2205                !CodeGenOpts.NullPointerIsValid)
2206         Attrs.addAttribute(llvm::Attribute::NonNull);
2207     }
2208 
2209     switch (FI.getExtParameterInfo(ArgNo).getABI()) {
2210     case ParameterABI::Ordinary:
2211       break;
2212 
2213     case ParameterABI::SwiftIndirectResult: {
2214       // Add 'sret' if we haven't already used it for something, but
2215       // only if the result is void.
2216       if (!hasUsedSRet && RetTy->isVoidType()) {
2217         Attrs.addAttribute(llvm::Attribute::StructRet);
2218         hasUsedSRet = true;
2219       }
2220 
2221       // Add 'noalias' in either case.
2222       Attrs.addAttribute(llvm::Attribute::NoAlias);
2223 
2224       // Add 'dereferenceable' and 'alignment'.
2225       auto PTy = ParamType->getPointeeType();
2226       if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2227         auto info = getContext().getTypeInfoInChars(PTy);
2228         Attrs.addDereferenceableAttr(info.first.getQuantity());
2229         Attrs.addAttribute(llvm::Attribute::getWithAlignment(
2230             getLLVMContext(), info.second.getAsAlign()));
2231       }
2232       break;
2233     }
2234 
2235     case ParameterABI::SwiftErrorResult:
2236       Attrs.addAttribute(llvm::Attribute::SwiftError);
2237       break;
2238 
2239     case ParameterABI::SwiftContext:
2240       Attrs.addAttribute(llvm::Attribute::SwiftSelf);
2241       break;
2242     }
2243 
2244     if (FI.getExtParameterInfo(ArgNo).isNoEscape())
2245       Attrs.addAttribute(llvm::Attribute::NoCapture);
2246 
2247     if (Attrs.hasAttributes()) {
2248       unsigned FirstIRArg, NumIRArgs;
2249       std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2250       for (unsigned i = 0; i < NumIRArgs; i++)
2251         ArgAttrs[FirstIRArg + i] =
2252             llvm::AttributeSet::get(getLLVMContext(), Attrs);
2253     }
2254   }
2255   assert(ArgNo == FI.arg_size());
2256 
2257   AttrList = llvm::AttributeList::get(
2258       getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs),
2259       llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs);
2260 }
2261 
2262 /// An argument came in as a promoted argument; demote it back to its
2263 /// declared type.
2264 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
2265                                          const VarDecl *var,
2266                                          llvm::Value *value) {
2267   llvm::Type *varType = CGF.ConvertType(var->getType());
2268 
2269   // This can happen with promotions that actually don't change the
2270   // underlying type, like the enum promotions.
2271   if (value->getType() == varType) return value;
2272 
2273   assert((varType->isIntegerTy() || varType->isFloatingPointTy())
2274          && "unexpected promotion type");
2275 
2276   if (isa<llvm::IntegerType>(varType))
2277     return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
2278 
2279   return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
2280 }
2281 
2282 /// Returns the attribute (either parameter attribute, or function
2283 /// attribute), which declares argument ArgNo to be non-null.
2284 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
2285                                          QualType ArgType, unsigned ArgNo) {
2286   // FIXME: __attribute__((nonnull)) can also be applied to:
2287   //   - references to pointers, where the pointee is known to be
2288   //     nonnull (apparently a Clang extension)
2289   //   - transparent unions containing pointers
2290   // In the former case, LLVM IR cannot represent the constraint. In
2291   // the latter case, we have no guarantee that the transparent union
2292   // is in fact passed as a pointer.
2293   if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
2294     return nullptr;
2295   // First, check attribute on parameter itself.
2296   if (PVD) {
2297     if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
2298       return ParmNNAttr;
2299   }
2300   // Check function attributes.
2301   if (!FD)
2302     return nullptr;
2303   for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
2304     if (NNAttr->isNonNull(ArgNo))
2305       return NNAttr;
2306   }
2307   return nullptr;
2308 }
2309 
2310 namespace {
2311   struct CopyBackSwiftError final : EHScopeStack::Cleanup {
2312     Address Temp;
2313     Address Arg;
2314     CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {}
2315     void Emit(CodeGenFunction &CGF, Flags flags) override {
2316       llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp);
2317       CGF.Builder.CreateStore(errorValue, Arg);
2318     }
2319   };
2320 }
2321 
2322 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
2323                                          llvm::Function *Fn,
2324                                          const FunctionArgList &Args) {
2325   if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
2326     // Naked functions don't have prologues.
2327     return;
2328 
2329   // If this is an implicit-return-zero function, go ahead and
2330   // initialize the return value.  TODO: it might be nice to have
2331   // a more general mechanism for this that didn't require synthesized
2332   // return statements.
2333   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
2334     if (FD->hasImplicitReturnZero()) {
2335       QualType RetTy = FD->getReturnType().getUnqualifiedType();
2336       llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
2337       llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
2338       Builder.CreateStore(Zero, ReturnValue);
2339     }
2340   }
2341 
2342   // FIXME: We no longer need the types from FunctionArgList; lift up and
2343   // simplify.
2344 
2345   ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
2346   // Flattened function arguments.
2347   SmallVector<llvm::Value *, 16> FnArgs;
2348   FnArgs.reserve(IRFunctionArgs.totalIRArgs());
2349   for (auto &Arg : Fn->args()) {
2350     FnArgs.push_back(&Arg);
2351   }
2352   assert(FnArgs.size() == IRFunctionArgs.totalIRArgs());
2353 
2354   // If we're using inalloca, all the memory arguments are GEPs off of the last
2355   // parameter, which is a pointer to the complete memory area.
2356   Address ArgStruct = Address::invalid();
2357   if (IRFunctionArgs.hasInallocaArg()) {
2358     ArgStruct = Address(FnArgs[IRFunctionArgs.getInallocaArgNo()],
2359                         FI.getArgStructAlignment());
2360 
2361     assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo());
2362   }
2363 
2364   // Name the struct return parameter.
2365   if (IRFunctionArgs.hasSRetArg()) {
2366     auto AI = cast<llvm::Argument>(FnArgs[IRFunctionArgs.getSRetArgNo()]);
2367     AI->setName("agg.result");
2368     AI->addAttr(llvm::Attribute::NoAlias);
2369   }
2370 
2371   // Track if we received the parameter as a pointer (indirect, byval, or
2372   // inalloca).  If already have a pointer, EmitParmDecl doesn't need to copy it
2373   // into a local alloca for us.
2374   SmallVector<ParamValue, 16> ArgVals;
2375   ArgVals.reserve(Args.size());
2376 
2377   // Create a pointer value for every parameter declaration.  This usually
2378   // entails copying one or more LLVM IR arguments into an alloca.  Don't push
2379   // any cleanups or do anything that might unwind.  We do that separately, so
2380   // we can push the cleanups in the correct order for the ABI.
2381   assert(FI.arg_size() == Args.size() &&
2382          "Mismatch between function signature & arguments.");
2383   unsigned ArgNo = 0;
2384   CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
2385   for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
2386        i != e; ++i, ++info_it, ++ArgNo) {
2387     const VarDecl *Arg = *i;
2388     const ABIArgInfo &ArgI = info_it->info;
2389 
2390     bool isPromoted =
2391       isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
2392     // We are converting from ABIArgInfo type to VarDecl type directly, unless
2393     // the parameter is promoted. In this case we convert to
2394     // CGFunctionInfo::ArgInfo type with subsequent argument demotion.
2395     QualType Ty = isPromoted ? info_it->type : Arg->getType();
2396     assert(hasScalarEvaluationKind(Ty) ==
2397            hasScalarEvaluationKind(Arg->getType()));
2398 
2399     unsigned FirstIRArg, NumIRArgs;
2400     std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2401 
2402     switch (ArgI.getKind()) {
2403     case ABIArgInfo::InAlloca: {
2404       assert(NumIRArgs == 0);
2405       auto FieldIndex = ArgI.getInAllocaFieldIndex();
2406       Address V =
2407           Builder.CreateStructGEP(ArgStruct, FieldIndex, Arg->getName());
2408       if (ArgI.getInAllocaIndirect())
2409         V = Address(Builder.CreateLoad(V),
2410                     getContext().getTypeAlignInChars(Ty));
2411       ArgVals.push_back(ParamValue::forIndirect(V));
2412       break;
2413     }
2414 
2415     case ABIArgInfo::Indirect: {
2416       assert(NumIRArgs == 1);
2417       Address ParamAddr = Address(FnArgs[FirstIRArg], ArgI.getIndirectAlign());
2418 
2419       if (!hasScalarEvaluationKind(Ty)) {
2420         // Aggregates and complex variables are accessed by reference.  All we
2421         // need to do is realign the value, if requested.
2422         Address V = ParamAddr;
2423         if (ArgI.getIndirectRealign()) {
2424           Address AlignedTemp = CreateMemTemp(Ty, "coerce");
2425 
2426           // Copy from the incoming argument pointer to the temporary with the
2427           // appropriate alignment.
2428           //
2429           // FIXME: We should have a common utility for generating an aggregate
2430           // copy.
2431           CharUnits Size = getContext().getTypeSizeInChars(Ty);
2432           auto SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity());
2433           Address Dst = Builder.CreateBitCast(AlignedTemp, Int8PtrTy);
2434           Address Src = Builder.CreateBitCast(ParamAddr, Int8PtrTy);
2435           Builder.CreateMemCpy(Dst, Src, SizeVal, false);
2436           V = AlignedTemp;
2437         }
2438         ArgVals.push_back(ParamValue::forIndirect(V));
2439       } else {
2440         // Load scalar value from indirect argument.
2441         llvm::Value *V =
2442             EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getBeginLoc());
2443 
2444         if (isPromoted)
2445           V = emitArgumentDemotion(*this, Arg, V);
2446         ArgVals.push_back(ParamValue::forDirect(V));
2447       }
2448       break;
2449     }
2450 
2451     case ABIArgInfo::Extend:
2452     case ABIArgInfo::Direct: {
2453 
2454       // If we have the trivial case, handle it with no muss and fuss.
2455       if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
2456           ArgI.getCoerceToType() == ConvertType(Ty) &&
2457           ArgI.getDirectOffset() == 0) {
2458         assert(NumIRArgs == 1);
2459         llvm::Value *V = FnArgs[FirstIRArg];
2460         auto AI = cast<llvm::Argument>(V);
2461 
2462         if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
2463           if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
2464                              PVD->getFunctionScopeIndex()) &&
2465               !CGM.getCodeGenOpts().NullPointerIsValid)
2466             AI->addAttr(llvm::Attribute::NonNull);
2467 
2468           QualType OTy = PVD->getOriginalType();
2469           if (const auto *ArrTy =
2470               getContext().getAsConstantArrayType(OTy)) {
2471             // A C99 array parameter declaration with the static keyword also
2472             // indicates dereferenceability, and if the size is constant we can
2473             // use the dereferenceable attribute (which requires the size in
2474             // bytes).
2475             if (ArrTy->getSizeModifier() == ArrayType::Static) {
2476               QualType ETy = ArrTy->getElementType();
2477               uint64_t ArrSize = ArrTy->getSize().getZExtValue();
2478               if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
2479                   ArrSize) {
2480                 llvm::AttrBuilder Attrs;
2481                 Attrs.addDereferenceableAttr(
2482                   getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize);
2483                 AI->addAttrs(Attrs);
2484               } else if (getContext().getTargetAddressSpace(ETy) == 0 &&
2485                          !CGM.getCodeGenOpts().NullPointerIsValid) {
2486                 AI->addAttr(llvm::Attribute::NonNull);
2487               }
2488             }
2489           } else if (const auto *ArrTy =
2490                      getContext().getAsVariableArrayType(OTy)) {
2491             // For C99 VLAs with the static keyword, we don't know the size so
2492             // we can't use the dereferenceable attribute, but in addrspace(0)
2493             // we know that it must be nonnull.
2494             if (ArrTy->getSizeModifier() == VariableArrayType::Static &&
2495                 !getContext().getTargetAddressSpace(ArrTy->getElementType()) &&
2496                 !CGM.getCodeGenOpts().NullPointerIsValid)
2497               AI->addAttr(llvm::Attribute::NonNull);
2498           }
2499 
2500           const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
2501           if (!AVAttr)
2502             if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
2503               AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
2504           if (AVAttr && !SanOpts.has(SanitizerKind::Alignment)) {
2505             // If alignment-assumption sanitizer is enabled, we do *not* add
2506             // alignment attribute here, but emit normal alignment assumption,
2507             // so the UBSAN check could function.
2508             llvm::Value *AlignmentValue =
2509               EmitScalarExpr(AVAttr->getAlignment());
2510             llvm::ConstantInt *AlignmentCI =
2511               cast<llvm::ConstantInt>(AlignmentValue);
2512             AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(llvm::MaybeAlign(
2513                 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment))));
2514           }
2515         }
2516 
2517         if (Arg->getType().isRestrictQualified())
2518           AI->addAttr(llvm::Attribute::NoAlias);
2519 
2520         // LLVM expects swifterror parameters to be used in very restricted
2521         // ways.  Copy the value into a less-restricted temporary.
2522         if (FI.getExtParameterInfo(ArgNo).getABI()
2523               == ParameterABI::SwiftErrorResult) {
2524           QualType pointeeTy = Ty->getPointeeType();
2525           assert(pointeeTy->isPointerType());
2526           Address temp =
2527             CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
2528           Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy));
2529           llvm::Value *incomingErrorValue = Builder.CreateLoad(arg);
2530           Builder.CreateStore(incomingErrorValue, temp);
2531           V = temp.getPointer();
2532 
2533           // Push a cleanup to copy the value back at the end of the function.
2534           // The convention does not guarantee that the value will be written
2535           // back if the function exits with an unwind exception.
2536           EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg);
2537         }
2538 
2539         // Ensure the argument is the correct type.
2540         if (V->getType() != ArgI.getCoerceToType())
2541           V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
2542 
2543         if (isPromoted)
2544           V = emitArgumentDemotion(*this, Arg, V);
2545 
2546         // Because of merging of function types from multiple decls it is
2547         // possible for the type of an argument to not match the corresponding
2548         // type in the function type. Since we are codegening the callee
2549         // in here, add a cast to the argument type.
2550         llvm::Type *LTy = ConvertType(Arg->getType());
2551         if (V->getType() != LTy)
2552           V = Builder.CreateBitCast(V, LTy);
2553 
2554         ArgVals.push_back(ParamValue::forDirect(V));
2555         break;
2556       }
2557 
2558       Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
2559                                      Arg->getName());
2560 
2561       // Pointer to store into.
2562       Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
2563 
2564       // Fast-isel and the optimizer generally like scalar values better than
2565       // FCAs, so we flatten them if this is safe to do for this argument.
2566       llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
2567       if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
2568           STy->getNumElements() > 1) {
2569         uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
2570         llvm::Type *DstTy = Ptr.getElementType();
2571         uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
2572 
2573         Address AddrToStoreInto = Address::invalid();
2574         if (SrcSize <= DstSize) {
2575           AddrToStoreInto = Builder.CreateElementBitCast(Ptr, STy);
2576         } else {
2577           AddrToStoreInto =
2578             CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
2579         }
2580 
2581         assert(STy->getNumElements() == NumIRArgs);
2582         for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2583           auto AI = FnArgs[FirstIRArg + i];
2584           AI->setName(Arg->getName() + ".coerce" + Twine(i));
2585           Address EltPtr = Builder.CreateStructGEP(AddrToStoreInto, i);
2586           Builder.CreateStore(AI, EltPtr);
2587         }
2588 
2589         if (SrcSize > DstSize) {
2590           Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
2591         }
2592 
2593       } else {
2594         // Simple case, just do a coerced store of the argument into the alloca.
2595         assert(NumIRArgs == 1);
2596         auto AI = FnArgs[FirstIRArg];
2597         AI->setName(Arg->getName() + ".coerce");
2598         CreateCoercedStore(AI, Ptr, /*DstIsVolatile=*/false, *this);
2599       }
2600 
2601       // Match to what EmitParmDecl is expecting for this type.
2602       if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
2603         llvm::Value *V =
2604             EmitLoadOfScalar(Alloca, false, Ty, Arg->getBeginLoc());
2605         if (isPromoted)
2606           V = emitArgumentDemotion(*this, Arg, V);
2607         ArgVals.push_back(ParamValue::forDirect(V));
2608       } else {
2609         ArgVals.push_back(ParamValue::forIndirect(Alloca));
2610       }
2611       break;
2612     }
2613 
2614     case ABIArgInfo::CoerceAndExpand: {
2615       // Reconstruct into a temporary.
2616       Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2617       ArgVals.push_back(ParamValue::forIndirect(alloca));
2618 
2619       auto coercionType = ArgI.getCoerceAndExpandType();
2620       alloca = Builder.CreateElementBitCast(alloca, coercionType);
2621 
2622       unsigned argIndex = FirstIRArg;
2623       for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2624         llvm::Type *eltType = coercionType->getElementType(i);
2625         if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType))
2626           continue;
2627 
2628         auto eltAddr = Builder.CreateStructGEP(alloca, i);
2629         auto elt = FnArgs[argIndex++];
2630         Builder.CreateStore(elt, eltAddr);
2631       }
2632       assert(argIndex == FirstIRArg + NumIRArgs);
2633       break;
2634     }
2635 
2636     case ABIArgInfo::Expand: {
2637       // If this structure was expanded into multiple arguments then
2638       // we need to create a temporary and reconstruct it from the
2639       // arguments.
2640       Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2641       LValue LV = MakeAddrLValue(Alloca, Ty);
2642       ArgVals.push_back(ParamValue::forIndirect(Alloca));
2643 
2644       auto FnArgIter = FnArgs.begin() + FirstIRArg;
2645       ExpandTypeFromArgs(Ty, LV, FnArgIter);
2646       assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs);
2647       for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
2648         auto AI = FnArgs[FirstIRArg + i];
2649         AI->setName(Arg->getName() + "." + Twine(i));
2650       }
2651       break;
2652     }
2653 
2654     case ABIArgInfo::Ignore:
2655       assert(NumIRArgs == 0);
2656       // Initialize the local variable appropriately.
2657       if (!hasScalarEvaluationKind(Ty)) {
2658         ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty)));
2659       } else {
2660         llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
2661         ArgVals.push_back(ParamValue::forDirect(U));
2662       }
2663       break;
2664     }
2665   }
2666 
2667   if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2668     for (int I = Args.size() - 1; I >= 0; --I)
2669       EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2670   } else {
2671     for (unsigned I = 0, E = Args.size(); I != E; ++I)
2672       EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2673   }
2674 }
2675 
2676 static void eraseUnusedBitCasts(llvm::Instruction *insn) {
2677   while (insn->use_empty()) {
2678     llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
2679     if (!bitcast) return;
2680 
2681     // This is "safe" because we would have used a ConstantExpr otherwise.
2682     insn = cast<llvm::Instruction>(bitcast->getOperand(0));
2683     bitcast->eraseFromParent();
2684   }
2685 }
2686 
2687 /// Try to emit a fused autorelease of a return result.
2688 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
2689                                                     llvm::Value *result) {
2690   // We must be immediately followed the cast.
2691   llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
2692   if (BB->empty()) return nullptr;
2693   if (&BB->back() != result) return nullptr;
2694 
2695   llvm::Type *resultType = result->getType();
2696 
2697   // result is in a BasicBlock and is therefore an Instruction.
2698   llvm::Instruction *generator = cast<llvm::Instruction>(result);
2699 
2700   SmallVector<llvm::Instruction *, 4> InstsToKill;
2701 
2702   // Look for:
2703   //  %generator = bitcast %type1* %generator2 to %type2*
2704   while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
2705     // We would have emitted this as a constant if the operand weren't
2706     // an Instruction.
2707     generator = cast<llvm::Instruction>(bitcast->getOperand(0));
2708 
2709     // Require the generator to be immediately followed by the cast.
2710     if (generator->getNextNode() != bitcast)
2711       return nullptr;
2712 
2713     InstsToKill.push_back(bitcast);
2714   }
2715 
2716   // Look for:
2717   //   %generator = call i8* @objc_retain(i8* %originalResult)
2718   // or
2719   //   %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
2720   llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
2721   if (!call) return nullptr;
2722 
2723   bool doRetainAutorelease;
2724 
2725   if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints().objc_retain) {
2726     doRetainAutorelease = true;
2727   } else if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints()
2728                                           .objc_retainAutoreleasedReturnValue) {
2729     doRetainAutorelease = false;
2730 
2731     // If we emitted an assembly marker for this call (and the
2732     // ARCEntrypoints field should have been set if so), go looking
2733     // for that call.  If we can't find it, we can't do this
2734     // optimization.  But it should always be the immediately previous
2735     // instruction, unless we needed bitcasts around the call.
2736     if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) {
2737       llvm::Instruction *prev = call->getPrevNode();
2738       assert(prev);
2739       if (isa<llvm::BitCastInst>(prev)) {
2740         prev = prev->getPrevNode();
2741         assert(prev);
2742       }
2743       assert(isa<llvm::CallInst>(prev));
2744       assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
2745                CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker);
2746       InstsToKill.push_back(prev);
2747     }
2748   } else {
2749     return nullptr;
2750   }
2751 
2752   result = call->getArgOperand(0);
2753   InstsToKill.push_back(call);
2754 
2755   // Keep killing bitcasts, for sanity.  Note that we no longer care
2756   // about precise ordering as long as there's exactly one use.
2757   while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
2758     if (!bitcast->hasOneUse()) break;
2759     InstsToKill.push_back(bitcast);
2760     result = bitcast->getOperand(0);
2761   }
2762 
2763   // Delete all the unnecessary instructions, from latest to earliest.
2764   for (auto *I : InstsToKill)
2765     I->eraseFromParent();
2766 
2767   // Do the fused retain/autorelease if we were asked to.
2768   if (doRetainAutorelease)
2769     result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
2770 
2771   // Cast back to the result type.
2772   return CGF.Builder.CreateBitCast(result, resultType);
2773 }
2774 
2775 /// If this is a +1 of the value of an immutable 'self', remove it.
2776 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
2777                                           llvm::Value *result) {
2778   // This is only applicable to a method with an immutable 'self'.
2779   const ObjCMethodDecl *method =
2780     dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
2781   if (!method) return nullptr;
2782   const VarDecl *self = method->getSelfDecl();
2783   if (!self->getType().isConstQualified()) return nullptr;
2784 
2785   // Look for a retain call.
2786   llvm::CallInst *retainCall =
2787     dyn_cast<llvm::CallInst>(result->stripPointerCasts());
2788   if (!retainCall ||
2789       retainCall->getCalledValue() != CGF.CGM.getObjCEntrypoints().objc_retain)
2790     return nullptr;
2791 
2792   // Look for an ordinary load of 'self'.
2793   llvm::Value *retainedValue = retainCall->getArgOperand(0);
2794   llvm::LoadInst *load =
2795     dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
2796   if (!load || load->isAtomic() || load->isVolatile() ||
2797       load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
2798     return nullptr;
2799 
2800   // Okay!  Burn it all down.  This relies for correctness on the
2801   // assumption that the retain is emitted as part of the return and
2802   // that thereafter everything is used "linearly".
2803   llvm::Type *resultType = result->getType();
2804   eraseUnusedBitCasts(cast<llvm::Instruction>(result));
2805   assert(retainCall->use_empty());
2806   retainCall->eraseFromParent();
2807   eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
2808 
2809   return CGF.Builder.CreateBitCast(load, resultType);
2810 }
2811 
2812 /// Emit an ARC autorelease of the result of a function.
2813 ///
2814 /// \return the value to actually return from the function
2815 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
2816                                             llvm::Value *result) {
2817   // If we're returning 'self', kill the initial retain.  This is a
2818   // heuristic attempt to "encourage correctness" in the really unfortunate
2819   // case where we have a return of self during a dealloc and we desperately
2820   // need to avoid the possible autorelease.
2821   if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
2822     return self;
2823 
2824   // At -O0, try to emit a fused retain/autorelease.
2825   if (CGF.shouldUseFusedARCCalls())
2826     if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
2827       return fused;
2828 
2829   return CGF.EmitARCAutoreleaseReturnValue(result);
2830 }
2831 
2832 /// Heuristically search for a dominating store to the return-value slot.
2833 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
2834   // Check if a User is a store which pointerOperand is the ReturnValue.
2835   // We are looking for stores to the ReturnValue, not for stores of the
2836   // ReturnValue to some other location.
2837   auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
2838     auto *SI = dyn_cast<llvm::StoreInst>(U);
2839     if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer())
2840       return nullptr;
2841     // These aren't actually possible for non-coerced returns, and we
2842     // only care about non-coerced returns on this code path.
2843     assert(!SI->isAtomic() && !SI->isVolatile());
2844     return SI;
2845   };
2846   // If there are multiple uses of the return-value slot, just check
2847   // for something immediately preceding the IP.  Sometimes this can
2848   // happen with how we generate implicit-returns; it can also happen
2849   // with noreturn cleanups.
2850   if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
2851     llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2852     if (IP->empty()) return nullptr;
2853     llvm::Instruction *I = &IP->back();
2854 
2855     // Skip lifetime markers
2856     for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(),
2857                                             IE = IP->rend();
2858          II != IE; ++II) {
2859       if (llvm::IntrinsicInst *Intrinsic =
2860               dyn_cast<llvm::IntrinsicInst>(&*II)) {
2861         if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) {
2862           const llvm::Value *CastAddr = Intrinsic->getArgOperand(1);
2863           ++II;
2864           if (II == IE)
2865             break;
2866           if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II))
2867             continue;
2868         }
2869       }
2870       I = &*II;
2871       break;
2872     }
2873 
2874     return GetStoreIfValid(I);
2875   }
2876 
2877   llvm::StoreInst *store =
2878       GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
2879   if (!store) return nullptr;
2880 
2881   // Now do a first-and-dirty dominance check: just walk up the
2882   // single-predecessors chain from the current insertion point.
2883   llvm::BasicBlock *StoreBB = store->getParent();
2884   llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2885   while (IP != StoreBB) {
2886     if (!(IP = IP->getSinglePredecessor()))
2887       return nullptr;
2888   }
2889 
2890   // Okay, the store's basic block dominates the insertion point; we
2891   // can do our thing.
2892   return store;
2893 }
2894 
2895 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
2896                                          bool EmitRetDbgLoc,
2897                                          SourceLocation EndLoc) {
2898   if (FI.isNoReturn()) {
2899     // Noreturn functions don't return.
2900     EmitUnreachable(EndLoc);
2901     return;
2902   }
2903 
2904   if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
2905     // Naked functions don't have epilogues.
2906     Builder.CreateUnreachable();
2907     return;
2908   }
2909 
2910   // Functions with no result always return void.
2911   if (!ReturnValue.isValid()) {
2912     Builder.CreateRetVoid();
2913     return;
2914   }
2915 
2916   llvm::DebugLoc RetDbgLoc;
2917   llvm::Value *RV = nullptr;
2918   QualType RetTy = FI.getReturnType();
2919   const ABIArgInfo &RetAI = FI.getReturnInfo();
2920 
2921   switch (RetAI.getKind()) {
2922   case ABIArgInfo::InAlloca:
2923     // Aggregrates get evaluated directly into the destination.  Sometimes we
2924     // need to return the sret value in a register, though.
2925     assert(hasAggregateEvaluationKind(RetTy));
2926     if (RetAI.getInAllocaSRet()) {
2927       llvm::Function::arg_iterator EI = CurFn->arg_end();
2928       --EI;
2929       llvm::Value *ArgStruct = &*EI;
2930       llvm::Value *SRet = Builder.CreateStructGEP(
2931           nullptr, ArgStruct, RetAI.getInAllocaFieldIndex());
2932       RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret");
2933     }
2934     break;
2935 
2936   case ABIArgInfo::Indirect: {
2937     auto AI = CurFn->arg_begin();
2938     if (RetAI.isSRetAfterThis())
2939       ++AI;
2940     switch (getEvaluationKind(RetTy)) {
2941     case TEK_Complex: {
2942       ComplexPairTy RT =
2943         EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
2944       EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy),
2945                          /*isInit*/ true);
2946       break;
2947     }
2948     case TEK_Aggregate:
2949       // Do nothing; aggregrates get evaluated directly into the destination.
2950       break;
2951     case TEK_Scalar:
2952       EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
2953                         MakeNaturalAlignAddrLValue(&*AI, RetTy),
2954                         /*isInit*/ true);
2955       break;
2956     }
2957     break;
2958   }
2959 
2960   case ABIArgInfo::Extend:
2961   case ABIArgInfo::Direct:
2962     if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
2963         RetAI.getDirectOffset() == 0) {
2964       // The internal return value temp always will have pointer-to-return-type
2965       // type, just do a load.
2966 
2967       // If there is a dominating store to ReturnValue, we can elide
2968       // the load, zap the store, and usually zap the alloca.
2969       if (llvm::StoreInst *SI =
2970               findDominatingStoreToReturnValue(*this)) {
2971         // Reuse the debug location from the store unless there is
2972         // cleanup code to be emitted between the store and return
2973         // instruction.
2974         if (EmitRetDbgLoc && !AutoreleaseResult)
2975           RetDbgLoc = SI->getDebugLoc();
2976         // Get the stored value and nuke the now-dead store.
2977         RV = SI->getValueOperand();
2978         SI->eraseFromParent();
2979 
2980       // Otherwise, we have to do a simple load.
2981       } else {
2982         RV = Builder.CreateLoad(ReturnValue);
2983       }
2984     } else {
2985       // If the value is offset in memory, apply the offset now.
2986       Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
2987 
2988       RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
2989     }
2990 
2991     // In ARC, end functions that return a retainable type with a call
2992     // to objc_autoreleaseReturnValue.
2993     if (AutoreleaseResult) {
2994 #ifndef NDEBUG
2995       // Type::isObjCRetainabletype has to be called on a QualType that hasn't
2996       // been stripped of the typedefs, so we cannot use RetTy here. Get the
2997       // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from
2998       // CurCodeDecl or BlockInfo.
2999       QualType RT;
3000 
3001       if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl))
3002         RT = FD->getReturnType();
3003       else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl))
3004         RT = MD->getReturnType();
3005       else if (isa<BlockDecl>(CurCodeDecl))
3006         RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType();
3007       else
3008         llvm_unreachable("Unexpected function/method type");
3009 
3010       assert(getLangOpts().ObjCAutoRefCount &&
3011              !FI.isReturnsRetained() &&
3012              RT->isObjCRetainableType());
3013 #endif
3014       RV = emitAutoreleaseOfResult(*this, RV);
3015     }
3016 
3017     break;
3018 
3019   case ABIArgInfo::Ignore:
3020     break;
3021 
3022   case ABIArgInfo::CoerceAndExpand: {
3023     auto coercionType = RetAI.getCoerceAndExpandType();
3024 
3025     // Load all of the coerced elements out into results.
3026     llvm::SmallVector<llvm::Value*, 4> results;
3027     Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType);
3028     for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3029       auto coercedEltType = coercionType->getElementType(i);
3030       if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType))
3031         continue;
3032 
3033       auto eltAddr = Builder.CreateStructGEP(addr, i);
3034       auto elt = Builder.CreateLoad(eltAddr);
3035       results.push_back(elt);
3036     }
3037 
3038     // If we have one result, it's the single direct result type.
3039     if (results.size() == 1) {
3040       RV = results[0];
3041 
3042     // Otherwise, we need to make a first-class aggregate.
3043     } else {
3044       // Construct a return type that lacks padding elements.
3045       llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType();
3046 
3047       RV = llvm::UndefValue::get(returnType);
3048       for (unsigned i = 0, e = results.size(); i != e; ++i) {
3049         RV = Builder.CreateInsertValue(RV, results[i], i);
3050       }
3051     }
3052     break;
3053   }
3054 
3055   case ABIArgInfo::Expand:
3056     llvm_unreachable("Invalid ABI kind for return argument");
3057   }
3058 
3059   llvm::Instruction *Ret;
3060   if (RV) {
3061     EmitReturnValueCheck(RV);
3062     Ret = Builder.CreateRet(RV);
3063   } else {
3064     Ret = Builder.CreateRetVoid();
3065   }
3066 
3067   if (RetDbgLoc)
3068     Ret->setDebugLoc(std::move(RetDbgLoc));
3069 }
3070 
3071 void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV) {
3072   // A current decl may not be available when emitting vtable thunks.
3073   if (!CurCodeDecl)
3074     return;
3075 
3076   // If the return block isn't reachable, neither is this check, so don't emit
3077   // it.
3078   if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty())
3079     return;
3080 
3081   ReturnsNonNullAttr *RetNNAttr = nullptr;
3082   if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute))
3083     RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>();
3084 
3085   if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
3086     return;
3087 
3088   // Prefer the returns_nonnull attribute if it's present.
3089   SourceLocation AttrLoc;
3090   SanitizerMask CheckKind;
3091   SanitizerHandler Handler;
3092   if (RetNNAttr) {
3093     assert(!requiresReturnValueNullabilityCheck() &&
3094            "Cannot check nullability and the nonnull attribute");
3095     AttrLoc = RetNNAttr->getLocation();
3096     CheckKind = SanitizerKind::ReturnsNonnullAttribute;
3097     Handler = SanitizerHandler::NonnullReturn;
3098   } else {
3099     if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl))
3100       if (auto *TSI = DD->getTypeSourceInfo())
3101         if (auto FTL = TSI->getTypeLoc().getAsAdjusted<FunctionTypeLoc>())
3102           AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
3103     CheckKind = SanitizerKind::NullabilityReturn;
3104     Handler = SanitizerHandler::NullabilityReturn;
3105   }
3106 
3107   SanitizerScope SanScope(this);
3108 
3109   // Make sure the "return" source location is valid. If we're checking a
3110   // nullability annotation, make sure the preconditions for the check are met.
3111   llvm::BasicBlock *Check = createBasicBlock("nullcheck");
3112   llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck");
3113   llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load");
3114   llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr);
3115   if (requiresReturnValueNullabilityCheck())
3116     CanNullCheck =
3117         Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
3118   Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
3119   EmitBlock(Check);
3120 
3121   // Now do the null check.
3122   llvm::Value *Cond = Builder.CreateIsNotNull(RV);
3123   llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)};
3124   llvm::Value *DynamicData[] = {SLocPtr};
3125   EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData);
3126 
3127   EmitBlock(NoCheck);
3128 
3129 #ifndef NDEBUG
3130   // The return location should not be used after the check has been emitted.
3131   ReturnLocation = Address::invalid();
3132 #endif
3133 }
3134 
3135 static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) {
3136   const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
3137   return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
3138 }
3139 
3140 static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF,
3141                                           QualType Ty) {
3142   // FIXME: Generate IR in one pass, rather than going back and fixing up these
3143   // placeholders.
3144   llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
3145   llvm::Type *IRPtrTy = IRTy->getPointerTo();
3146   llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo());
3147 
3148   // FIXME: When we generate this IR in one pass, we shouldn't need
3149   // this win32-specific alignment hack.
3150   CharUnits Align = CharUnits::fromQuantity(4);
3151   Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align);
3152 
3153   return AggValueSlot::forAddr(Address(Placeholder, Align),
3154                                Ty.getQualifiers(),
3155                                AggValueSlot::IsNotDestructed,
3156                                AggValueSlot::DoesNotNeedGCBarriers,
3157                                AggValueSlot::IsNotAliased,
3158                                AggValueSlot::DoesNotOverlap);
3159 }
3160 
3161 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
3162                                           const VarDecl *param,
3163                                           SourceLocation loc) {
3164   // StartFunction converted the ABI-lowered parameter(s) into a
3165   // local alloca.  We need to turn that into an r-value suitable
3166   // for EmitCall.
3167   Address local = GetAddrOfLocalVar(param);
3168 
3169   QualType type = param->getType();
3170 
3171   if (isInAllocaArgument(CGM.getCXXABI(), type)) {
3172     CGM.ErrorUnsupported(param, "forwarded non-trivially copyable parameter");
3173   }
3174 
3175   // GetAddrOfLocalVar returns a pointer-to-pointer for references,
3176   // but the argument needs to be the original pointer.
3177   if (type->isReferenceType()) {
3178     args.add(RValue::get(Builder.CreateLoad(local)), type);
3179 
3180   // In ARC, move out of consumed arguments so that the release cleanup
3181   // entered by StartFunction doesn't cause an over-release.  This isn't
3182   // optimal -O0 code generation, but it should get cleaned up when
3183   // optimization is enabled.  This also assumes that delegate calls are
3184   // performed exactly once for a set of arguments, but that should be safe.
3185   } else if (getLangOpts().ObjCAutoRefCount &&
3186              param->hasAttr<NSConsumedAttr>() &&
3187              type->isObjCRetainableType()) {
3188     llvm::Value *ptr = Builder.CreateLoad(local);
3189     auto null =
3190       llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
3191     Builder.CreateStore(null, local);
3192     args.add(RValue::get(ptr), type);
3193 
3194   // For the most part, we just need to load the alloca, except that
3195   // aggregate r-values are actually pointers to temporaries.
3196   } else {
3197     args.add(convertTempToRValue(local, type, loc), type);
3198   }
3199 
3200   // Deactivate the cleanup for the callee-destructed param that was pushed.
3201   if (hasAggregateEvaluationKind(type) && !CurFuncIsThunk &&
3202       type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee() &&
3203       param->needsDestruction(getContext())) {
3204     EHScopeStack::stable_iterator cleanup =
3205         CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param));
3206     assert(cleanup.isValid() &&
3207            "cleanup for callee-destructed param not recorded");
3208     // This unreachable is a temporary marker which will be removed later.
3209     llvm::Instruction *isActive = Builder.CreateUnreachable();
3210     args.addArgCleanupDeactivation(cleanup, isActive);
3211   }
3212 }
3213 
3214 static bool isProvablyNull(llvm::Value *addr) {
3215   return isa<llvm::ConstantPointerNull>(addr);
3216 }
3217 
3218 /// Emit the actual writing-back of a writeback.
3219 static void emitWriteback(CodeGenFunction &CGF,
3220                           const CallArgList::Writeback &writeback) {
3221   const LValue &srcLV = writeback.Source;
3222   Address srcAddr = srcLV.getAddress(CGF);
3223   assert(!isProvablyNull(srcAddr.getPointer()) &&
3224          "shouldn't have writeback for provably null argument");
3225 
3226   llvm::BasicBlock *contBB = nullptr;
3227 
3228   // If the argument wasn't provably non-null, we need to null check
3229   // before doing the store.
3230   bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3231                                               CGF.CGM.getDataLayout());
3232   if (!provablyNonNull) {
3233     llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
3234     contBB = CGF.createBasicBlock("icr.done");
3235 
3236     llvm::Value *isNull =
3237       CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3238     CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
3239     CGF.EmitBlock(writebackBB);
3240   }
3241 
3242   // Load the value to writeback.
3243   llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
3244 
3245   // Cast it back, in case we're writing an id to a Foo* or something.
3246   value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(),
3247                                     "icr.writeback-cast");
3248 
3249   // Perform the writeback.
3250 
3251   // If we have a "to use" value, it's something we need to emit a use
3252   // of.  This has to be carefully threaded in: if it's done after the
3253   // release it's potentially undefined behavior (and the optimizer
3254   // will ignore it), and if it happens before the retain then the
3255   // optimizer could move the release there.
3256   if (writeback.ToUse) {
3257     assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
3258 
3259     // Retain the new value.  No need to block-copy here:  the block's
3260     // being passed up the stack.
3261     value = CGF.EmitARCRetainNonBlock(value);
3262 
3263     // Emit the intrinsic use here.
3264     CGF.EmitARCIntrinsicUse(writeback.ToUse);
3265 
3266     // Load the old value (primitively).
3267     llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
3268 
3269     // Put the new value in place (primitively).
3270     CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
3271 
3272     // Release the old value.
3273     CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
3274 
3275   // Otherwise, we can just do a normal lvalue store.
3276   } else {
3277     CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
3278   }
3279 
3280   // Jump to the continuation block.
3281   if (!provablyNonNull)
3282     CGF.EmitBlock(contBB);
3283 }
3284 
3285 static void emitWritebacks(CodeGenFunction &CGF,
3286                            const CallArgList &args) {
3287   for (const auto &I : args.writebacks())
3288     emitWriteback(CGF, I);
3289 }
3290 
3291 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
3292                                             const CallArgList &CallArgs) {
3293   ArrayRef<CallArgList::CallArgCleanup> Cleanups =
3294     CallArgs.getCleanupsToDeactivate();
3295   // Iterate in reverse to increase the likelihood of popping the cleanup.
3296   for (const auto &I : llvm::reverse(Cleanups)) {
3297     CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP);
3298     I.IsActiveIP->eraseFromParent();
3299   }
3300 }
3301 
3302 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
3303   if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
3304     if (uop->getOpcode() == UO_AddrOf)
3305       return uop->getSubExpr();
3306   return nullptr;
3307 }
3308 
3309 /// Emit an argument that's being passed call-by-writeback.  That is,
3310 /// we are passing the address of an __autoreleased temporary; it
3311 /// might be copy-initialized with the current value of the given
3312 /// address, but it will definitely be copied out of after the call.
3313 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
3314                              const ObjCIndirectCopyRestoreExpr *CRE) {
3315   LValue srcLV;
3316 
3317   // Make an optimistic effort to emit the address as an l-value.
3318   // This can fail if the argument expression is more complicated.
3319   if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
3320     srcLV = CGF.EmitLValue(lvExpr);
3321 
3322   // Otherwise, just emit it as a scalar.
3323   } else {
3324     Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
3325 
3326     QualType srcAddrType =
3327       CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
3328     srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
3329   }
3330   Address srcAddr = srcLV.getAddress(CGF);
3331 
3332   // The dest and src types don't necessarily match in LLVM terms
3333   // because of the crazy ObjC compatibility rules.
3334 
3335   llvm::PointerType *destType =
3336     cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
3337 
3338   // If the address is a constant null, just pass the appropriate null.
3339   if (isProvablyNull(srcAddr.getPointer())) {
3340     args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
3341              CRE->getType());
3342     return;
3343   }
3344 
3345   // Create the temporary.
3346   Address temp = CGF.CreateTempAlloca(destType->getElementType(),
3347                                       CGF.getPointerAlign(),
3348                                       "icr.temp");
3349   // Loading an l-value can introduce a cleanup if the l-value is __weak,
3350   // and that cleanup will be conditional if we can't prove that the l-value
3351   // isn't null, so we need to register a dominating point so that the cleanups
3352   // system will make valid IR.
3353   CodeGenFunction::ConditionalEvaluation condEval(CGF);
3354 
3355   // Zero-initialize it if we're not doing a copy-initialization.
3356   bool shouldCopy = CRE->shouldCopy();
3357   if (!shouldCopy) {
3358     llvm::Value *null =
3359       llvm::ConstantPointerNull::get(
3360         cast<llvm::PointerType>(destType->getElementType()));
3361     CGF.Builder.CreateStore(null, temp);
3362   }
3363 
3364   llvm::BasicBlock *contBB = nullptr;
3365   llvm::BasicBlock *originBB = nullptr;
3366 
3367   // If the address is *not* known to be non-null, we need to switch.
3368   llvm::Value *finalArgument;
3369 
3370   bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3371                                               CGF.CGM.getDataLayout());
3372   if (provablyNonNull) {
3373     finalArgument = temp.getPointer();
3374   } else {
3375     llvm::Value *isNull =
3376       CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3377 
3378     finalArgument = CGF.Builder.CreateSelect(isNull,
3379                                    llvm::ConstantPointerNull::get(destType),
3380                                              temp.getPointer(), "icr.argument");
3381 
3382     // If we need to copy, then the load has to be conditional, which
3383     // means we need control flow.
3384     if (shouldCopy) {
3385       originBB = CGF.Builder.GetInsertBlock();
3386       contBB = CGF.createBasicBlock("icr.cont");
3387       llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
3388       CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
3389       CGF.EmitBlock(copyBB);
3390       condEval.begin(CGF);
3391     }
3392   }
3393 
3394   llvm::Value *valueToUse = nullptr;
3395 
3396   // Perform a copy if necessary.
3397   if (shouldCopy) {
3398     RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
3399     assert(srcRV.isScalar());
3400 
3401     llvm::Value *src = srcRV.getScalarVal();
3402     src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
3403                                     "icr.cast");
3404 
3405     // Use an ordinary store, not a store-to-lvalue.
3406     CGF.Builder.CreateStore(src, temp);
3407 
3408     // If optimization is enabled, and the value was held in a
3409     // __strong variable, we need to tell the optimizer that this
3410     // value has to stay alive until we're doing the store back.
3411     // This is because the temporary is effectively unretained,
3412     // and so otherwise we can violate the high-level semantics.
3413     if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3414         srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) {
3415       valueToUse = src;
3416     }
3417   }
3418 
3419   // Finish the control flow if we needed it.
3420   if (shouldCopy && !provablyNonNull) {
3421     llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
3422     CGF.EmitBlock(contBB);
3423 
3424     // Make a phi for the value to intrinsically use.
3425     if (valueToUse) {
3426       llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
3427                                                       "icr.to-use");
3428       phiToUse->addIncoming(valueToUse, copyBB);
3429       phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
3430                             originBB);
3431       valueToUse = phiToUse;
3432     }
3433 
3434     condEval.end(CGF);
3435   }
3436 
3437   args.addWriteback(srcLV, temp, valueToUse);
3438   args.add(RValue::get(finalArgument), CRE->getType());
3439 }
3440 
3441 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
3442   assert(!StackBase);
3443 
3444   // Save the stack.
3445   llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
3446   StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
3447 }
3448 
3449 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
3450   if (StackBase) {
3451     // Restore the stack after the call.
3452     llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
3453     CGF.Builder.CreateCall(F, StackBase);
3454   }
3455 }
3456 
3457 void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
3458                                           SourceLocation ArgLoc,
3459                                           AbstractCallee AC,
3460                                           unsigned ParmNum) {
3461   if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) ||
3462                          SanOpts.has(SanitizerKind::NullabilityArg)))
3463     return;
3464 
3465   // The param decl may be missing in a variadic function.
3466   auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr;
3467   unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
3468 
3469   // Prefer the nonnull attribute if it's present.
3470   const NonNullAttr *NNAttr = nullptr;
3471   if (SanOpts.has(SanitizerKind::NonnullAttribute))
3472     NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo);
3473 
3474   bool CanCheckNullability = false;
3475   if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) {
3476     auto Nullability = PVD->getType()->getNullability(getContext());
3477     CanCheckNullability = Nullability &&
3478                           *Nullability == NullabilityKind::NonNull &&
3479                           PVD->getTypeSourceInfo();
3480   }
3481 
3482   if (!NNAttr && !CanCheckNullability)
3483     return;
3484 
3485   SourceLocation AttrLoc;
3486   SanitizerMask CheckKind;
3487   SanitizerHandler Handler;
3488   if (NNAttr) {
3489     AttrLoc = NNAttr->getLocation();
3490     CheckKind = SanitizerKind::NonnullAttribute;
3491     Handler = SanitizerHandler::NonnullArg;
3492   } else {
3493     AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
3494     CheckKind = SanitizerKind::NullabilityArg;
3495     Handler = SanitizerHandler::NullabilityArg;
3496   }
3497 
3498   SanitizerScope SanScope(this);
3499   assert(RV.isScalar());
3500   llvm::Value *V = RV.getScalarVal();
3501   llvm::Value *Cond =
3502       Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
3503   llvm::Constant *StaticData[] = {
3504       EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc),
3505       llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
3506   };
3507   EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None);
3508 }
3509 
3510 void CodeGenFunction::EmitCallArgs(
3511     CallArgList &Args, ArrayRef<QualType> ArgTypes,
3512     llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
3513     AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) {
3514   assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
3515 
3516   // We *have* to evaluate arguments from right to left in the MS C++ ABI,
3517   // because arguments are destroyed left to right in the callee. As a special
3518   // case, there are certain language constructs that require left-to-right
3519   // evaluation, and in those cases we consider the evaluation order requirement
3520   // to trump the "destruction order is reverse construction order" guarantee.
3521   bool LeftToRight =
3522       CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()
3523           ? Order == EvaluationOrder::ForceLeftToRight
3524           : Order != EvaluationOrder::ForceRightToLeft;
3525 
3526   auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg,
3527                                          RValue EmittedArg) {
3528     if (!AC.hasFunctionDecl() || I >= AC.getNumParams())
3529       return;
3530     auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>();
3531     if (PS == nullptr)
3532       return;
3533 
3534     const auto &Context = getContext();
3535     auto SizeTy = Context.getSizeType();
3536     auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
3537     assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?");
3538     llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T,
3539                                                      EmittedArg.getScalarVal(),
3540                                                      PS->isDynamic());
3541     Args.add(RValue::get(V), SizeTy);
3542     // If we're emitting args in reverse, be sure to do so with
3543     // pass_object_size, as well.
3544     if (!LeftToRight)
3545       std::swap(Args.back(), *(&Args.back() - 1));
3546   };
3547 
3548   // Insert a stack save if we're going to need any inalloca args.
3549   bool HasInAllocaArgs = false;
3550   if (CGM.getTarget().getCXXABI().isMicrosoft()) {
3551     for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end();
3552          I != E && !HasInAllocaArgs; ++I)
3553       HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I);
3554     if (HasInAllocaArgs) {
3555       assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3556       Args.allocateArgumentMemory(*this);
3557     }
3558   }
3559 
3560   // Evaluate each argument in the appropriate order.
3561   size_t CallArgsStart = Args.size();
3562   for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
3563     unsigned Idx = LeftToRight ? I : E - I - 1;
3564     CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx;
3565     unsigned InitialArgSize = Args.size();
3566     // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of
3567     // the argument and parameter match or the objc method is parameterized.
3568     assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) ||
3569             getContext().hasSameUnqualifiedType((*Arg)->getType(),
3570                                                 ArgTypes[Idx]) ||
3571             (isa<ObjCMethodDecl>(AC.getDecl()) &&
3572              isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) &&
3573            "Argument and parameter types don't match");
3574     EmitCallArg(Args, *Arg, ArgTypes[Idx]);
3575     // In particular, we depend on it being the last arg in Args, and the
3576     // objectsize bits depend on there only being one arg if !LeftToRight.
3577     assert(InitialArgSize + 1 == Args.size() &&
3578            "The code below depends on only adding one arg per EmitCallArg");
3579     (void)InitialArgSize;
3580     // Since pointer argument are never emitted as LValue, it is safe to emit
3581     // non-null argument check for r-value only.
3582     if (!Args.back().hasLValue()) {
3583       RValue RVArg = Args.back().getKnownRValue();
3584       EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC,
3585                           ParamsToSkip + Idx);
3586       // @llvm.objectsize should never have side-effects and shouldn't need
3587       // destruction/cleanups, so we can safely "emit" it after its arg,
3588       // regardless of right-to-leftness
3589       MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
3590     }
3591   }
3592 
3593   if (!LeftToRight) {
3594     // Un-reverse the arguments we just evaluated so they match up with the LLVM
3595     // IR function.
3596     std::reverse(Args.begin() + CallArgsStart, Args.end());
3597   }
3598 }
3599 
3600 namespace {
3601 
3602 struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
3603   DestroyUnpassedArg(Address Addr, QualType Ty)
3604       : Addr(Addr), Ty(Ty) {}
3605 
3606   Address Addr;
3607   QualType Ty;
3608 
3609   void Emit(CodeGenFunction &CGF, Flags flags) override {
3610     QualType::DestructionKind DtorKind = Ty.isDestructedType();
3611     if (DtorKind == QualType::DK_cxx_destructor) {
3612       const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
3613       assert(!Dtor->isTrivial());
3614       CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
3615                                 /*Delegating=*/false, Addr, Ty);
3616     } else {
3617       CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty));
3618     }
3619   }
3620 };
3621 
3622 struct DisableDebugLocationUpdates {
3623   CodeGenFunction &CGF;
3624   bool disabledDebugInfo;
3625   DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
3626     if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
3627       CGF.disableDebugInfo();
3628   }
3629   ~DisableDebugLocationUpdates() {
3630     if (disabledDebugInfo)
3631       CGF.enableDebugInfo();
3632   }
3633 };
3634 
3635 } // end anonymous namespace
3636 
3637 RValue CallArg::getRValue(CodeGenFunction &CGF) const {
3638   if (!HasLV)
3639     return RV;
3640   LValue Copy = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty), Ty);
3641   CGF.EmitAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap,
3642                         LV.isVolatile());
3643   IsUsed = true;
3644   return RValue::getAggregate(Copy.getAddress(CGF));
3645 }
3646 
3647 void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const {
3648   LValue Dst = CGF.MakeAddrLValue(Addr, Ty);
3649   if (!HasLV && RV.isScalar())
3650     CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*isInit=*/true);
3651   else if (!HasLV && RV.isComplex())
3652     CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true);
3653   else {
3654     auto Addr = HasLV ? LV.getAddress(CGF) : RV.getAggregateAddress();
3655     LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty);
3656     // We assume that call args are never copied into subobjects.
3657     CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap,
3658                           HasLV ? LV.isVolatileQualified()
3659                                 : RV.isVolatileQualified());
3660   }
3661   IsUsed = true;
3662 }
3663 
3664 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
3665                                   QualType type) {
3666   DisableDebugLocationUpdates Dis(*this, E);
3667   if (const ObjCIndirectCopyRestoreExpr *CRE
3668         = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
3669     assert(getLangOpts().ObjCAutoRefCount);
3670     return emitWritebackArg(*this, args, CRE);
3671   }
3672 
3673   assert(type->isReferenceType() == E->isGLValue() &&
3674          "reference binding to unmaterialized r-value!");
3675 
3676   if (E->isGLValue()) {
3677     assert(E->getObjectKind() == OK_Ordinary);
3678     return args.add(EmitReferenceBindingToExpr(E), type);
3679   }
3680 
3681   bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
3682 
3683   // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
3684   // However, we still have to push an EH-only cleanup in case we unwind before
3685   // we make it to the call.
3686   if (HasAggregateEvalKind &&
3687       type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
3688     // If we're using inalloca, use the argument memory.  Otherwise, use a
3689     // temporary.
3690     AggValueSlot Slot;
3691     if (args.isUsingInAlloca())
3692       Slot = createPlaceholderSlot(*this, type);
3693     else
3694       Slot = CreateAggTemp(type, "agg.tmp");
3695 
3696     bool DestroyedInCallee = true, NeedsEHCleanup = true;
3697     if (const auto *RD = type->getAsCXXRecordDecl())
3698       DestroyedInCallee = RD->hasNonTrivialDestructor();
3699     else
3700       NeedsEHCleanup = needsEHCleanup(type.isDestructedType());
3701 
3702     if (DestroyedInCallee)
3703       Slot.setExternallyDestructed();
3704 
3705     EmitAggExpr(E, Slot);
3706     RValue RV = Slot.asRValue();
3707     args.add(RV, type);
3708 
3709     if (DestroyedInCallee && NeedsEHCleanup) {
3710       // Create a no-op GEP between the placeholder and the cleanup so we can
3711       // RAUW it successfully.  It also serves as a marker of the first
3712       // instruction where the cleanup is active.
3713       pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
3714                                               type);
3715       // This unreachable is a temporary marker which will be removed later.
3716       llvm::Instruction *IsActive = Builder.CreateUnreachable();
3717       args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
3718     }
3719     return;
3720   }
3721 
3722   if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
3723       cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
3724     LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
3725     assert(L.isSimple());
3726     args.addUncopiedAggregate(L, type);
3727     return;
3728   }
3729 
3730   args.add(EmitAnyExprToTemp(E), type);
3731 }
3732 
3733 QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
3734   // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
3735   // implicitly widens null pointer constants that are arguments to varargs
3736   // functions to pointer-sized ints.
3737   if (!getTarget().getTriple().isOSWindows())
3738     return Arg->getType();
3739 
3740   if (Arg->getType()->isIntegerType() &&
3741       getContext().getTypeSize(Arg->getType()) <
3742           getContext().getTargetInfo().getPointerWidth(0) &&
3743       Arg->isNullPointerConstant(getContext(),
3744                                  Expr::NPC_ValueDependentIsNotNull)) {
3745     return getContext().getIntPtrType();
3746   }
3747 
3748   return Arg->getType();
3749 }
3750 
3751 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3752 // optimizer it can aggressively ignore unwind edges.
3753 void
3754 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
3755   if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3756       !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
3757     Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
3758                       CGM.getNoObjCARCExceptionsMetadata());
3759 }
3760 
3761 /// Emits a call to the given no-arguments nounwind runtime function.
3762 llvm::CallInst *
3763 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
3764                                          const llvm::Twine &name) {
3765   return EmitNounwindRuntimeCall(callee, None, name);
3766 }
3767 
3768 /// Emits a call to the given nounwind runtime function.
3769 llvm::CallInst *
3770 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
3771                                          ArrayRef<llvm::Value *> args,
3772                                          const llvm::Twine &name) {
3773   llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
3774   call->setDoesNotThrow();
3775   return call;
3776 }
3777 
3778 /// Emits a simple call (never an invoke) to the given no-arguments
3779 /// runtime function.
3780 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
3781                                                  const llvm::Twine &name) {
3782   return EmitRuntimeCall(callee, None, name);
3783 }
3784 
3785 // Calls which may throw must have operand bundles indicating which funclet
3786 // they are nested within.
3787 SmallVector<llvm::OperandBundleDef, 1>
3788 CodeGenFunction::getBundlesForFunclet(llvm::Value *Callee) {
3789   SmallVector<llvm::OperandBundleDef, 1> BundleList;
3790   // There is no need for a funclet operand bundle if we aren't inside a
3791   // funclet.
3792   if (!CurrentFuncletPad)
3793     return BundleList;
3794 
3795   // Skip intrinsics which cannot throw.
3796   auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts());
3797   if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow())
3798     return BundleList;
3799 
3800   BundleList.emplace_back("funclet", CurrentFuncletPad);
3801   return BundleList;
3802 }
3803 
3804 /// Emits a simple call (never an invoke) to the given runtime function.
3805 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
3806                                                  ArrayRef<llvm::Value *> args,
3807                                                  const llvm::Twine &name) {
3808   llvm::CallInst *call = Builder.CreateCall(
3809       callee, args, getBundlesForFunclet(callee.getCallee()), name);
3810   call->setCallingConv(getRuntimeCC());
3811   return call;
3812 }
3813 
3814 /// Emits a call or invoke to the given noreturn runtime function.
3815 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(
3816     llvm::FunctionCallee callee, ArrayRef<llvm::Value *> args) {
3817   SmallVector<llvm::OperandBundleDef, 1> BundleList =
3818       getBundlesForFunclet(callee.getCallee());
3819 
3820   if (getInvokeDest()) {
3821     llvm::InvokeInst *invoke =
3822       Builder.CreateInvoke(callee,
3823                            getUnreachableBlock(),
3824                            getInvokeDest(),
3825                            args,
3826                            BundleList);
3827     invoke->setDoesNotReturn();
3828     invoke->setCallingConv(getRuntimeCC());
3829   } else {
3830     llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList);
3831     call->setDoesNotReturn();
3832     call->setCallingConv(getRuntimeCC());
3833     Builder.CreateUnreachable();
3834   }
3835 }
3836 
3837 /// Emits a call or invoke instruction to the given nullary runtime function.
3838 llvm::CallBase *
3839 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
3840                                          const Twine &name) {
3841   return EmitRuntimeCallOrInvoke(callee, None, name);
3842 }
3843 
3844 /// Emits a call or invoke instruction to the given runtime function.
3845 llvm::CallBase *
3846 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
3847                                          ArrayRef<llvm::Value *> args,
3848                                          const Twine &name) {
3849   llvm::CallBase *call = EmitCallOrInvoke(callee, args, name);
3850   call->setCallingConv(getRuntimeCC());
3851   return call;
3852 }
3853 
3854 /// Emits a call or invoke instruction to the given function, depending
3855 /// on the current state of the EH stack.
3856 llvm::CallBase *CodeGenFunction::EmitCallOrInvoke(llvm::FunctionCallee Callee,
3857                                                   ArrayRef<llvm::Value *> Args,
3858                                                   const Twine &Name) {
3859   llvm::BasicBlock *InvokeDest = getInvokeDest();
3860   SmallVector<llvm::OperandBundleDef, 1> BundleList =
3861       getBundlesForFunclet(Callee.getCallee());
3862 
3863   llvm::CallBase *Inst;
3864   if (!InvokeDest)
3865     Inst = Builder.CreateCall(Callee, Args, BundleList, Name);
3866   else {
3867     llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
3868     Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
3869                                 Name);
3870     EmitBlock(ContBB);
3871   }
3872 
3873   // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3874   // optimizer it can aggressively ignore unwind edges.
3875   if (CGM.getLangOpts().ObjCAutoRefCount)
3876     AddObjCARCExceptionMetadata(Inst);
3877 
3878   return Inst;
3879 }
3880 
3881 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
3882                                                   llvm::Value *New) {
3883   DeferredReplacements.push_back(std::make_pair(Old, New));
3884 }
3885 
3886 namespace {
3887 
3888 /// Specify given \p NewAlign as the alignment of return value attribute. If
3889 /// such attribute already exists, re-set it to the maximal one of two options.
3890 LLVM_NODISCARD llvm::AttributeList
3891 maybeRaiseRetAlignmentAttribute(llvm::LLVMContext &Ctx,
3892                                 const llvm::AttributeList &Attrs,
3893                                 llvm::Align NewAlign) {
3894   llvm::Align CurAlign = Attrs.getRetAlignment().valueOrOne();
3895   if (CurAlign >= NewAlign)
3896     return Attrs;
3897   llvm::Attribute AlignAttr = llvm::Attribute::getWithAlignment(Ctx, NewAlign);
3898   return Attrs
3899       .removeAttribute(Ctx, llvm::AttributeList::ReturnIndex,
3900                        llvm::Attribute::AttrKind::Alignment)
3901       .addAttribute(Ctx, llvm::AttributeList::ReturnIndex, AlignAttr);
3902 }
3903 
3904 template <typename AlignedAttrTy> class AbstractAssumeAlignedAttrEmitter {
3905 protected:
3906   CodeGenFunction &CGF;
3907 
3908   /// We do nothing if this is, or becomes, nullptr.
3909   const AlignedAttrTy *AA = nullptr;
3910 
3911   llvm::Value *Alignment = nullptr;      // May or may not be a constant.
3912   llvm::ConstantInt *OffsetCI = nullptr; // Constant, hopefully zero.
3913 
3914   AbstractAssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl)
3915       : CGF(CGF_) {
3916     if (!FuncDecl)
3917       return;
3918     AA = FuncDecl->getAttr<AlignedAttrTy>();
3919   }
3920 
3921 public:
3922   /// If we can, materialize the alignment as an attribute on return value.
3923   LLVM_NODISCARD llvm::AttributeList
3924   TryEmitAsCallSiteAttribute(const llvm::AttributeList &Attrs) {
3925     if (!AA || OffsetCI || CGF.SanOpts.has(SanitizerKind::Alignment))
3926       return Attrs;
3927     const auto *AlignmentCI = dyn_cast<llvm::ConstantInt>(Alignment);
3928     if (!AlignmentCI)
3929       return Attrs;
3930     // We may legitimately have non-power-of-2 alignment here.
3931     // If so, this is UB land, emit it via `@llvm.assume` instead.
3932     if (!AlignmentCI->getValue().isPowerOf2())
3933       return Attrs;
3934     llvm::AttributeList NewAttrs = maybeRaiseRetAlignmentAttribute(
3935         CGF.getLLVMContext(), Attrs,
3936         llvm::Align(
3937             AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment)));
3938     AA = nullptr; // We're done. Disallow doing anything else.
3939     return NewAttrs;
3940   }
3941 
3942   /// Emit alignment assumption.
3943   /// This is a general fallback that we take if either there is an offset,
3944   /// or the alignment is variable or we are sanitizing for alignment.
3945   void EmitAsAnAssumption(SourceLocation Loc, QualType RetTy, RValue &Ret) {
3946     if (!AA)
3947       return;
3948     CGF.emitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc,
3949                                 AA->getLocation(), Alignment, OffsetCI);
3950     AA = nullptr; // We're done. Disallow doing anything else.
3951   }
3952 };
3953 
3954 /// Helper data structure to emit `AssumeAlignedAttr`.
3955 class AssumeAlignedAttrEmitter final
3956     : public AbstractAssumeAlignedAttrEmitter<AssumeAlignedAttr> {
3957 public:
3958   AssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl)
3959       : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
3960     if (!AA)
3961       return;
3962     // It is guaranteed that the alignment/offset are constants.
3963     Alignment = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AA->getAlignment()));
3964     if (Expr *Offset = AA->getOffset()) {
3965       OffsetCI = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(Offset));
3966       if (OffsetCI->isNullValue()) // Canonicalize zero offset to no offset.
3967         OffsetCI = nullptr;
3968     }
3969   }
3970 };
3971 
3972 /// Helper data structure to emit `AllocAlignAttr`.
3973 class AllocAlignAttrEmitter final
3974     : public AbstractAssumeAlignedAttrEmitter<AllocAlignAttr> {
3975 public:
3976   AllocAlignAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl,
3977                         const CallArgList &CallArgs)
3978       : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
3979     if (!AA)
3980       return;
3981     // Alignment may or may not be a constant, and that is okay.
3982     Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()]
3983                     .getRValue(CGF)
3984                     .getScalarVal();
3985   }
3986 };
3987 
3988 } // namespace
3989 
3990 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
3991                                  const CGCallee &Callee,
3992                                  ReturnValueSlot ReturnValue,
3993                                  const CallArgList &CallArgs,
3994                                  llvm::CallBase **callOrInvoke,
3995                                  SourceLocation Loc) {
3996   // FIXME: We no longer need the types from CallArgs; lift up and simplify.
3997 
3998   assert(Callee.isOrdinary() || Callee.isVirtual());
3999 
4000   // Handle struct-return functions by passing a pointer to the
4001   // location that we would like to return into.
4002   QualType RetTy = CallInfo.getReturnType();
4003   const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
4004 
4005   llvm::FunctionType *IRFuncTy = getTypes().GetFunctionType(CallInfo);
4006 
4007   const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
4008   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl))
4009     // We can only guarantee that a function is called from the correct
4010     // context/function based on the appropriate target attributes,
4011     // so only check in the case where we have both always_inline and target
4012     // since otherwise we could be making a conditional call after a check for
4013     // the proper cpu features (and it won't cause code generation issues due to
4014     // function based code generation).
4015     if (TargetDecl->hasAttr<AlwaysInlineAttr>() &&
4016         TargetDecl->hasAttr<TargetAttr>())
4017       checkTargetFeatures(Loc, FD);
4018 
4019 #ifndef NDEBUG
4020   if (!(CallInfo.isVariadic() && CallInfo.getArgStruct())) {
4021     // For an inalloca varargs function, we don't expect CallInfo to match the
4022     // function pointer's type, because the inalloca struct a will have extra
4023     // fields in it for the varargs parameters.  Code later in this function
4024     // bitcasts the function pointer to the type derived from CallInfo.
4025     //
4026     // In other cases, we assert that the types match up (until pointers stop
4027     // having pointee types).
4028     llvm::Type *TypeFromVal;
4029     if (Callee.isVirtual())
4030       TypeFromVal = Callee.getVirtualFunctionType();
4031     else
4032       TypeFromVal =
4033           Callee.getFunctionPointer()->getType()->getPointerElementType();
4034     assert(IRFuncTy == TypeFromVal);
4035   }
4036 #endif
4037 
4038   // 1. Set up the arguments.
4039 
4040   // If we're using inalloca, insert the allocation after the stack save.
4041   // FIXME: Do this earlier rather than hacking it in here!
4042   Address ArgMemory = Address::invalid();
4043   if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
4044     const llvm::DataLayout &DL = CGM.getDataLayout();
4045     llvm::Instruction *IP = CallArgs.getStackBase();
4046     llvm::AllocaInst *AI;
4047     if (IP) {
4048       IP = IP->getNextNode();
4049       AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
4050                                 "argmem", IP);
4051     } else {
4052       AI = CreateTempAlloca(ArgStruct, "argmem");
4053     }
4054     auto Align = CallInfo.getArgStructAlignment();
4055     AI->setAlignment(Align.getAsAlign());
4056     AI->setUsedWithInAlloca(true);
4057     assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
4058     ArgMemory = Address(AI, Align);
4059   }
4060 
4061   ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
4062   SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
4063 
4064   // If the call returns a temporary with struct return, create a temporary
4065   // alloca to hold the result, unless one is given to us.
4066   Address SRetPtr = Address::invalid();
4067   Address SRetAlloca = Address::invalid();
4068   llvm::Value *UnusedReturnSizePtr = nullptr;
4069   if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
4070     if (!ReturnValue.isNull()) {
4071       SRetPtr = ReturnValue.getValue();
4072     } else {
4073       SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca);
4074       if (HaveInsertPoint() && ReturnValue.isUnused()) {
4075         uint64_t size =
4076             CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
4077         UnusedReturnSizePtr = EmitLifetimeStart(size, SRetAlloca.getPointer());
4078       }
4079     }
4080     if (IRFunctionArgs.hasSRetArg()) {
4081       IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
4082     } else if (RetAI.isInAlloca()) {
4083       Address Addr =
4084           Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex());
4085       Builder.CreateStore(SRetPtr.getPointer(), Addr);
4086     }
4087   }
4088 
4089   Address swiftErrorTemp = Address::invalid();
4090   Address swiftErrorArg = Address::invalid();
4091 
4092   // When passing arguments using temporary allocas, we need to add the
4093   // appropriate lifetime markers. This vector keeps track of all the lifetime
4094   // markers that need to be ended right after the call.
4095   SmallVector<CallLifetimeEnd, 2> CallLifetimeEndAfterCall;
4096 
4097   // Translate all of the arguments as necessary to match the IR lowering.
4098   assert(CallInfo.arg_size() == CallArgs.size() &&
4099          "Mismatch between function signature & arguments.");
4100   unsigned ArgNo = 0;
4101   CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
4102   for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
4103        I != E; ++I, ++info_it, ++ArgNo) {
4104     const ABIArgInfo &ArgInfo = info_it->info;
4105 
4106     // Insert a padding argument to ensure proper alignment.
4107     if (IRFunctionArgs.hasPaddingArg(ArgNo))
4108       IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
4109           llvm::UndefValue::get(ArgInfo.getPaddingType());
4110 
4111     unsigned FirstIRArg, NumIRArgs;
4112     std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
4113 
4114     switch (ArgInfo.getKind()) {
4115     case ABIArgInfo::InAlloca: {
4116       assert(NumIRArgs == 0);
4117       assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
4118       if (I->isAggregate()) {
4119         Address Addr = I->hasLValue()
4120                            ? I->getKnownLValue().getAddress(*this)
4121                            : I->getKnownRValue().getAggregateAddress();
4122         llvm::Instruction *Placeholder =
4123             cast<llvm::Instruction>(Addr.getPointer());
4124 
4125         if (!ArgInfo.getInAllocaIndirect()) {
4126           // Replace the placeholder with the appropriate argument slot GEP.
4127           CGBuilderTy::InsertPoint IP = Builder.saveIP();
4128           Builder.SetInsertPoint(Placeholder);
4129           Addr = Builder.CreateStructGEP(ArgMemory,
4130                                          ArgInfo.getInAllocaFieldIndex());
4131           Builder.restoreIP(IP);
4132         } else {
4133           // For indirect things such as overaligned structs, replace the
4134           // placeholder with a regular aggregate temporary alloca. Store the
4135           // address of this alloca into the struct.
4136           Addr = CreateMemTemp(info_it->type, "inalloca.indirect.tmp");
4137           Address ArgSlot = Builder.CreateStructGEP(
4138               ArgMemory, ArgInfo.getInAllocaFieldIndex());
4139           Builder.CreateStore(Addr.getPointer(), ArgSlot);
4140         }
4141         deferPlaceholderReplacement(Placeholder, Addr.getPointer());
4142       } else if (ArgInfo.getInAllocaIndirect()) {
4143         // Make a temporary alloca and store the address of it into the argument
4144         // struct.
4145         Address Addr = CreateMemTempWithoutCast(
4146             I->Ty, getContext().getTypeAlignInChars(I->Ty),
4147             "indirect-arg-temp");
4148         I->copyInto(*this, Addr);
4149         Address ArgSlot =
4150             Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
4151         Builder.CreateStore(Addr.getPointer(), ArgSlot);
4152       } else {
4153         // Store the RValue into the argument struct.
4154         Address Addr =
4155             Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
4156         unsigned AS = Addr.getType()->getPointerAddressSpace();
4157         llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
4158         // There are some cases where a trivial bitcast is not avoidable.  The
4159         // definition of a type later in a translation unit may change it's type
4160         // from {}* to (%struct.foo*)*.
4161         if (Addr.getType() != MemType)
4162           Addr = Builder.CreateBitCast(Addr, MemType);
4163         I->copyInto(*this, Addr);
4164       }
4165       break;
4166     }
4167 
4168     case ABIArgInfo::Indirect: {
4169       assert(NumIRArgs == 1);
4170       if (!I->isAggregate()) {
4171         // Make a temporary alloca to pass the argument.
4172         Address Addr = CreateMemTempWithoutCast(
4173             I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp");
4174         IRCallArgs[FirstIRArg] = Addr.getPointer();
4175 
4176         I->copyInto(*this, Addr);
4177       } else {
4178         // We want to avoid creating an unnecessary temporary+copy here;
4179         // however, we need one in three cases:
4180         // 1. If the argument is not byval, and we are required to copy the
4181         //    source.  (This case doesn't occur on any common architecture.)
4182         // 2. If the argument is byval, RV is not sufficiently aligned, and
4183         //    we cannot force it to be sufficiently aligned.
4184         // 3. If the argument is byval, but RV is not located in default
4185         //    or alloca address space.
4186         Address Addr = I->hasLValue()
4187                            ? I->getKnownLValue().getAddress(*this)
4188                            : I->getKnownRValue().getAggregateAddress();
4189         llvm::Value *V = Addr.getPointer();
4190         CharUnits Align = ArgInfo.getIndirectAlign();
4191         const llvm::DataLayout *TD = &CGM.getDataLayout();
4192 
4193         assert((FirstIRArg >= IRFuncTy->getNumParams() ||
4194                 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==
4195                     TD->getAllocaAddrSpace()) &&
4196                "indirect argument must be in alloca address space");
4197 
4198         bool NeedCopy = false;
4199 
4200         if (Addr.getAlignment() < Align &&
4201             llvm::getOrEnforceKnownAlignment(V, Align.getQuantity(), *TD) <
4202                 Align.getQuantity()) {
4203           NeedCopy = true;
4204         } else if (I->hasLValue()) {
4205           auto LV = I->getKnownLValue();
4206           auto AS = LV.getAddressSpace();
4207 
4208           if (!ArgInfo.getIndirectByVal() ||
4209               (LV.getAlignment() < getContext().getTypeAlignInChars(I->Ty))) {
4210             NeedCopy = true;
4211           }
4212           if (!getLangOpts().OpenCL) {
4213             if ((ArgInfo.getIndirectByVal() &&
4214                 (AS != LangAS::Default &&
4215                  AS != CGM.getASTAllocaAddressSpace()))) {
4216               NeedCopy = true;
4217             }
4218           }
4219           // For OpenCL even if RV is located in default or alloca address space
4220           // we don't want to perform address space cast for it.
4221           else if ((ArgInfo.getIndirectByVal() &&
4222                     Addr.getType()->getAddressSpace() != IRFuncTy->
4223                       getParamType(FirstIRArg)->getPointerAddressSpace())) {
4224             NeedCopy = true;
4225           }
4226         }
4227 
4228         if (NeedCopy) {
4229           // Create an aligned temporary, and copy to it.
4230           Address AI = CreateMemTempWithoutCast(
4231               I->Ty, ArgInfo.getIndirectAlign(), "byval-temp");
4232           IRCallArgs[FirstIRArg] = AI.getPointer();
4233 
4234           // Emit lifetime markers for the temporary alloca.
4235           uint64_t ByvalTempElementSize =
4236               CGM.getDataLayout().getTypeAllocSize(AI.getElementType());
4237           llvm::Value *LifetimeSize =
4238               EmitLifetimeStart(ByvalTempElementSize, AI.getPointer());
4239 
4240           // Add cleanup code to emit the end lifetime marker after the call.
4241           if (LifetimeSize) // In case we disabled lifetime markers.
4242             CallLifetimeEndAfterCall.emplace_back(AI, LifetimeSize);
4243 
4244           // Generate the copy.
4245           I->copyInto(*this, AI);
4246         } else {
4247           // Skip the extra memcpy call.
4248           auto *T = V->getType()->getPointerElementType()->getPointerTo(
4249               CGM.getDataLayout().getAllocaAddrSpace());
4250           IRCallArgs[FirstIRArg] = getTargetHooks().performAddrSpaceCast(
4251               *this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T,
4252               true);
4253         }
4254       }
4255       break;
4256     }
4257 
4258     case ABIArgInfo::Ignore:
4259       assert(NumIRArgs == 0);
4260       break;
4261 
4262     case ABIArgInfo::Extend:
4263     case ABIArgInfo::Direct: {
4264       if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
4265           ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
4266           ArgInfo.getDirectOffset() == 0) {
4267         assert(NumIRArgs == 1);
4268         llvm::Value *V;
4269         if (!I->isAggregate())
4270           V = I->getKnownRValue().getScalarVal();
4271         else
4272           V = Builder.CreateLoad(
4273               I->hasLValue() ? I->getKnownLValue().getAddress(*this)
4274                              : I->getKnownRValue().getAggregateAddress());
4275 
4276         // Implement swifterror by copying into a new swifterror argument.
4277         // We'll write back in the normal path out of the call.
4278         if (CallInfo.getExtParameterInfo(ArgNo).getABI()
4279               == ParameterABI::SwiftErrorResult) {
4280           assert(!swiftErrorTemp.isValid() && "multiple swifterror args");
4281 
4282           QualType pointeeTy = I->Ty->getPointeeType();
4283           swiftErrorArg =
4284             Address(V, getContext().getTypeAlignInChars(pointeeTy));
4285 
4286           swiftErrorTemp =
4287             CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
4288           V = swiftErrorTemp.getPointer();
4289           cast<llvm::AllocaInst>(V)->setSwiftError(true);
4290 
4291           llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg);
4292           Builder.CreateStore(errorValue, swiftErrorTemp);
4293         }
4294 
4295         // We might have to widen integers, but we should never truncate.
4296         if (ArgInfo.getCoerceToType() != V->getType() &&
4297             V->getType()->isIntegerTy())
4298           V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
4299 
4300         // If the argument doesn't match, perform a bitcast to coerce it.  This
4301         // can happen due to trivial type mismatches.
4302         if (FirstIRArg < IRFuncTy->getNumParams() &&
4303             V->getType() != IRFuncTy->getParamType(FirstIRArg))
4304           V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
4305 
4306         IRCallArgs[FirstIRArg] = V;
4307         break;
4308       }
4309 
4310       // FIXME: Avoid the conversion through memory if possible.
4311       Address Src = Address::invalid();
4312       if (!I->isAggregate()) {
4313         Src = CreateMemTemp(I->Ty, "coerce");
4314         I->copyInto(*this, Src);
4315       } else {
4316         Src = I->hasLValue() ? I->getKnownLValue().getAddress(*this)
4317                              : I->getKnownRValue().getAggregateAddress();
4318       }
4319 
4320       // If the value is offset in memory, apply the offset now.
4321       Src = emitAddressAtOffset(*this, Src, ArgInfo);
4322 
4323       // Fast-isel and the optimizer generally like scalar values better than
4324       // FCAs, so we flatten them if this is safe to do for this argument.
4325       llvm::StructType *STy =
4326             dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
4327       if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
4328         llvm::Type *SrcTy = Src.getElementType();
4329         uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
4330         uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
4331 
4332         // If the source type is smaller than the destination type of the
4333         // coerce-to logic, copy the source value into a temp alloca the size
4334         // of the destination type to allow loading all of it. The bits past
4335         // the source value are left undef.
4336         if (SrcSize < DstSize) {
4337           Address TempAlloca
4338             = CreateTempAlloca(STy, Src.getAlignment(),
4339                                Src.getName() + ".coerce");
4340           Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
4341           Src = TempAlloca;
4342         } else {
4343           Src = Builder.CreateBitCast(Src,
4344                                       STy->getPointerTo(Src.getAddressSpace()));
4345         }
4346 
4347         assert(NumIRArgs == STy->getNumElements());
4348         for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
4349           Address EltPtr = Builder.CreateStructGEP(Src, i);
4350           llvm::Value *LI = Builder.CreateLoad(EltPtr);
4351           IRCallArgs[FirstIRArg + i] = LI;
4352         }
4353       } else {
4354         // In the simple case, just pass the coerced loaded value.
4355         assert(NumIRArgs == 1);
4356         IRCallArgs[FirstIRArg] =
4357           CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
4358       }
4359 
4360       break;
4361     }
4362 
4363     case ABIArgInfo::CoerceAndExpand: {
4364       auto coercionType = ArgInfo.getCoerceAndExpandType();
4365       auto layout = CGM.getDataLayout().getStructLayout(coercionType);
4366 
4367       llvm::Value *tempSize = nullptr;
4368       Address addr = Address::invalid();
4369       Address AllocaAddr = Address::invalid();
4370       if (I->isAggregate()) {
4371         addr = I->hasLValue() ? I->getKnownLValue().getAddress(*this)
4372                               : I->getKnownRValue().getAggregateAddress();
4373 
4374       } else {
4375         RValue RV = I->getKnownRValue();
4376         assert(RV.isScalar()); // complex should always just be direct
4377 
4378         llvm::Type *scalarType = RV.getScalarVal()->getType();
4379         auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType);
4380         auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType);
4381 
4382         // Materialize to a temporary.
4383         addr = CreateTempAlloca(
4384             RV.getScalarVal()->getType(),
4385             CharUnits::fromQuantity(std::max(
4386                 (unsigned)layout->getAlignment().value(), scalarAlign)),
4387             "tmp",
4388             /*ArraySize=*/nullptr, &AllocaAddr);
4389         tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer());
4390 
4391         Builder.CreateStore(RV.getScalarVal(), addr);
4392       }
4393 
4394       addr = Builder.CreateElementBitCast(addr, coercionType);
4395 
4396       unsigned IRArgPos = FirstIRArg;
4397       for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4398         llvm::Type *eltType = coercionType->getElementType(i);
4399         if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4400         Address eltAddr = Builder.CreateStructGEP(addr, i);
4401         llvm::Value *elt = Builder.CreateLoad(eltAddr);
4402         IRCallArgs[IRArgPos++] = elt;
4403       }
4404       assert(IRArgPos == FirstIRArg + NumIRArgs);
4405 
4406       if (tempSize) {
4407         EmitLifetimeEnd(tempSize, AllocaAddr.getPointer());
4408       }
4409 
4410       break;
4411     }
4412 
4413     case ABIArgInfo::Expand:
4414       unsigned IRArgPos = FirstIRArg;
4415       ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
4416       assert(IRArgPos == FirstIRArg + NumIRArgs);
4417       break;
4418     }
4419   }
4420 
4421   const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this);
4422   llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer();
4423 
4424   // If we're using inalloca, set up that argument.
4425   if (ArgMemory.isValid()) {
4426     llvm::Value *Arg = ArgMemory.getPointer();
4427     if (CallInfo.isVariadic()) {
4428       // When passing non-POD arguments by value to variadic functions, we will
4429       // end up with a variadic prototype and an inalloca call site.  In such
4430       // cases, we can't do any parameter mismatch checks.  Give up and bitcast
4431       // the callee.
4432       unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace();
4433       CalleePtr =
4434           Builder.CreateBitCast(CalleePtr, IRFuncTy->getPointerTo(CalleeAS));
4435     } else {
4436       llvm::Type *LastParamTy =
4437           IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
4438       if (Arg->getType() != LastParamTy) {
4439 #ifndef NDEBUG
4440         // Assert that these structs have equivalent element types.
4441         llvm::StructType *FullTy = CallInfo.getArgStruct();
4442         llvm::StructType *DeclaredTy = cast<llvm::StructType>(
4443             cast<llvm::PointerType>(LastParamTy)->getElementType());
4444         assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
4445         for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
4446                                                 DE = DeclaredTy->element_end(),
4447                                                 FI = FullTy->element_begin();
4448              DI != DE; ++DI, ++FI)
4449           assert(*DI == *FI);
4450 #endif
4451         Arg = Builder.CreateBitCast(Arg, LastParamTy);
4452       }
4453     }
4454     assert(IRFunctionArgs.hasInallocaArg());
4455     IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
4456   }
4457 
4458   // 2. Prepare the function pointer.
4459 
4460   // If the callee is a bitcast of a non-variadic function to have a
4461   // variadic function pointer type, check to see if we can remove the
4462   // bitcast.  This comes up with unprototyped functions.
4463   //
4464   // This makes the IR nicer, but more importantly it ensures that we
4465   // can inline the function at -O0 if it is marked always_inline.
4466   auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT,
4467                                    llvm::Value *Ptr) -> llvm::Function * {
4468     if (!CalleeFT->isVarArg())
4469       return nullptr;
4470 
4471     // Get underlying value if it's a bitcast
4472     if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr)) {
4473       if (CE->getOpcode() == llvm::Instruction::BitCast)
4474         Ptr = CE->getOperand(0);
4475     }
4476 
4477     llvm::Function *OrigFn = dyn_cast<llvm::Function>(Ptr);
4478     if (!OrigFn)
4479       return nullptr;
4480 
4481     llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
4482 
4483     // If the original type is variadic, or if any of the component types
4484     // disagree, we cannot remove the cast.
4485     if (OrigFT->isVarArg() ||
4486         OrigFT->getNumParams() != CalleeFT->getNumParams() ||
4487         OrigFT->getReturnType() != CalleeFT->getReturnType())
4488       return nullptr;
4489 
4490     for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
4491       if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
4492         return nullptr;
4493 
4494     return OrigFn;
4495   };
4496 
4497   if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) {
4498     CalleePtr = OrigFn;
4499     IRFuncTy = OrigFn->getFunctionType();
4500   }
4501 
4502   // 3. Perform the actual call.
4503 
4504   // Deactivate any cleanups that we're supposed to do immediately before
4505   // the call.
4506   if (!CallArgs.getCleanupsToDeactivate().empty())
4507     deactivateArgCleanupsBeforeCall(*this, CallArgs);
4508 
4509   // Assert that the arguments we computed match up.  The IR verifier
4510   // will catch this, but this is a common enough source of problems
4511   // during IRGen changes that it's way better for debugging to catch
4512   // it ourselves here.
4513 #ifndef NDEBUG
4514   assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
4515   for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
4516     // Inalloca argument can have different type.
4517     if (IRFunctionArgs.hasInallocaArg() &&
4518         i == IRFunctionArgs.getInallocaArgNo())
4519       continue;
4520     if (i < IRFuncTy->getNumParams())
4521       assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
4522   }
4523 #endif
4524 
4525   // Update the largest vector width if any arguments have vector types.
4526   for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
4527     if (auto *VT = dyn_cast<llvm::VectorType>(IRCallArgs[i]->getType()))
4528       LargestVectorWidth =
4529           std::max((uint64_t)LargestVectorWidth,
4530                    VT->getPrimitiveSizeInBits().getKnownMinSize());
4531   }
4532 
4533   // Compute the calling convention and attributes.
4534   unsigned CallingConv;
4535   llvm::AttributeList Attrs;
4536   CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo,
4537                              Callee.getAbstractInfo(), Attrs, CallingConv,
4538                              /*AttrOnCallSite=*/true);
4539 
4540   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl))
4541     if (FD->usesFPIntrin())
4542       // All calls within a strictfp function are marked strictfp
4543       Attrs =
4544         Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4545                            llvm::Attribute::StrictFP);
4546 
4547   // Apply some call-site-specific attributes.
4548   // TODO: work this into building the attribute set.
4549 
4550   // Apply always_inline to all calls within flatten functions.
4551   // FIXME: should this really take priority over __try, below?
4552   if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
4553       !(TargetDecl && TargetDecl->hasAttr<NoInlineAttr>())) {
4554     Attrs =
4555         Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4556                            llvm::Attribute::AlwaysInline);
4557   }
4558 
4559   // Disable inlining inside SEH __try blocks.
4560   if (isSEHTryScope()) {
4561     Attrs =
4562         Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4563                            llvm::Attribute::NoInline);
4564   }
4565 
4566   // Decide whether to use a call or an invoke.
4567   bool CannotThrow;
4568   if (currentFunctionUsesSEHTry()) {
4569     // SEH cares about asynchronous exceptions, so everything can "throw."
4570     CannotThrow = false;
4571   } else if (isCleanupPadScope() &&
4572              EHPersonality::get(*this).isMSVCXXPersonality()) {
4573     // The MSVC++ personality will implicitly terminate the program if an
4574     // exception is thrown during a cleanup outside of a try/catch.
4575     // We don't need to model anything in IR to get this behavior.
4576     CannotThrow = true;
4577   } else {
4578     // Otherwise, nounwind call sites will never throw.
4579     CannotThrow = Attrs.hasAttribute(llvm::AttributeList::FunctionIndex,
4580                                      llvm::Attribute::NoUnwind);
4581   }
4582 
4583   // If we made a temporary, be sure to clean up after ourselves. Note that we
4584   // can't depend on being inside of an ExprWithCleanups, so we need to manually
4585   // pop this cleanup later on. Being eager about this is OK, since this
4586   // temporary is 'invisible' outside of the callee.
4587   if (UnusedReturnSizePtr)
4588     pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetAlloca,
4589                                          UnusedReturnSizePtr);
4590 
4591   llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
4592 
4593   SmallVector<llvm::OperandBundleDef, 1> BundleList =
4594       getBundlesForFunclet(CalleePtr);
4595 
4596   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl))
4597     if (FD->usesFPIntrin())
4598       // All calls within a strictfp function are marked strictfp
4599       Attrs =
4600         Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4601                            llvm::Attribute::StrictFP);
4602 
4603   AssumeAlignedAttrEmitter AssumeAlignedAttrEmitter(*this, TargetDecl);
4604   Attrs = AssumeAlignedAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
4605 
4606   AllocAlignAttrEmitter AllocAlignAttrEmitter(*this, TargetDecl, CallArgs);
4607   Attrs = AllocAlignAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
4608 
4609   // Emit the actual call/invoke instruction.
4610   llvm::CallBase *CI;
4611   if (!InvokeDest) {
4612     CI = Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList);
4613   } else {
4614     llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
4615     CI = Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs,
4616                               BundleList);
4617     EmitBlock(Cont);
4618   }
4619   if (callOrInvoke)
4620     *callOrInvoke = CI;
4621 
4622   // If this is within a function that has the guard(nocf) attribute and is an
4623   // indirect call, add the "guard_nocf" attribute to this call to indicate that
4624   // Control Flow Guard checks should not be added, even if the call is inlined.
4625   if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
4626     if (const auto *A = FD->getAttr<CFGuardAttr>()) {
4627       if (A->getGuard() == CFGuardAttr::GuardArg::nocf && !CI->getCalledFunction())
4628         Attrs = Attrs.addAttribute(
4629             getLLVMContext(), llvm::AttributeList::FunctionIndex, "guard_nocf");
4630     }
4631   }
4632 
4633   // Apply the attributes and calling convention.
4634   CI->setAttributes(Attrs);
4635   CI->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
4636 
4637   // Apply various metadata.
4638 
4639   if (!CI->getType()->isVoidTy())
4640     CI->setName("call");
4641 
4642   // Update largest vector width from the return type.
4643   if (auto *VT = dyn_cast<llvm::VectorType>(CI->getType()))
4644     LargestVectorWidth =
4645         std::max((uint64_t)LargestVectorWidth,
4646                  VT->getPrimitiveSizeInBits().getKnownMinSize());
4647 
4648   // Insert instrumentation or attach profile metadata at indirect call sites.
4649   // For more details, see the comment before the definition of
4650   // IPVK_IndirectCallTarget in InstrProfData.inc.
4651   if (!CI->getCalledFunction())
4652     PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget,
4653                      CI, CalleePtr);
4654 
4655   // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4656   // optimizer it can aggressively ignore unwind edges.
4657   if (CGM.getLangOpts().ObjCAutoRefCount)
4658     AddObjCARCExceptionMetadata(CI);
4659 
4660   // Suppress tail calls if requested.
4661   if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
4662     if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
4663       Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
4664   }
4665 
4666   // Add metadata for calls to MSAllocator functions
4667   if (getDebugInfo() && TargetDecl &&
4668       TargetDecl->hasAttr<MSAllocatorAttr>())
4669     getDebugInfo()->addHeapAllocSiteMetadata(CI, RetTy, Loc);
4670 
4671   // 4. Finish the call.
4672 
4673   // If the call doesn't return, finish the basic block and clear the
4674   // insertion point; this allows the rest of IRGen to discard
4675   // unreachable code.
4676   if (CI->doesNotReturn()) {
4677     if (UnusedReturnSizePtr)
4678       PopCleanupBlock();
4679 
4680     // Strip away the noreturn attribute to better diagnose unreachable UB.
4681     if (SanOpts.has(SanitizerKind::Unreachable)) {
4682       // Also remove from function since CallBase::hasFnAttr additionally checks
4683       // attributes of the called function.
4684       if (auto *F = CI->getCalledFunction())
4685         F->removeFnAttr(llvm::Attribute::NoReturn);
4686       CI->removeAttribute(llvm::AttributeList::FunctionIndex,
4687                           llvm::Attribute::NoReturn);
4688 
4689       // Avoid incompatibility with ASan which relies on the `noreturn`
4690       // attribute to insert handler calls.
4691       if (SanOpts.hasOneOf(SanitizerKind::Address |
4692                            SanitizerKind::KernelAddress)) {
4693         SanitizerScope SanScope(this);
4694         llvm::IRBuilder<>::InsertPointGuard IPGuard(Builder);
4695         Builder.SetInsertPoint(CI);
4696         auto *FnType = llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
4697         llvm::FunctionCallee Fn =
4698             CGM.CreateRuntimeFunction(FnType, "__asan_handle_no_return");
4699         EmitNounwindRuntimeCall(Fn);
4700       }
4701     }
4702 
4703     EmitUnreachable(Loc);
4704     Builder.ClearInsertionPoint();
4705 
4706     // FIXME: For now, emit a dummy basic block because expr emitters in
4707     // generally are not ready to handle emitting expressions at unreachable
4708     // points.
4709     EnsureInsertPoint();
4710 
4711     // Return a reasonable RValue.
4712     return GetUndefRValue(RetTy);
4713   }
4714 
4715   // Perform the swifterror writeback.
4716   if (swiftErrorTemp.isValid()) {
4717     llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp);
4718     Builder.CreateStore(errorResult, swiftErrorArg);
4719   }
4720 
4721   // Emit any call-associated writebacks immediately.  Arguably this
4722   // should happen after any return-value munging.
4723   if (CallArgs.hasWritebacks())
4724     emitWritebacks(*this, CallArgs);
4725 
4726   // The stack cleanup for inalloca arguments has to run out of the normal
4727   // lexical order, so deactivate it and run it manually here.
4728   CallArgs.freeArgumentMemory(*this);
4729 
4730   // Extract the return value.
4731   RValue Ret = [&] {
4732     switch (RetAI.getKind()) {
4733     case ABIArgInfo::CoerceAndExpand: {
4734       auto coercionType = RetAI.getCoerceAndExpandType();
4735 
4736       Address addr = SRetPtr;
4737       addr = Builder.CreateElementBitCast(addr, coercionType);
4738 
4739       assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType());
4740       bool requiresExtract = isa<llvm::StructType>(CI->getType());
4741 
4742       unsigned unpaddedIndex = 0;
4743       for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4744         llvm::Type *eltType = coercionType->getElementType(i);
4745         if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4746         Address eltAddr = Builder.CreateStructGEP(addr, i);
4747         llvm::Value *elt = CI;
4748         if (requiresExtract)
4749           elt = Builder.CreateExtractValue(elt, unpaddedIndex++);
4750         else
4751           assert(unpaddedIndex == 0);
4752         Builder.CreateStore(elt, eltAddr);
4753       }
4754       // FALLTHROUGH
4755       LLVM_FALLTHROUGH;
4756     }
4757 
4758     case ABIArgInfo::InAlloca:
4759     case ABIArgInfo::Indirect: {
4760       RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
4761       if (UnusedReturnSizePtr)
4762         PopCleanupBlock();
4763       return ret;
4764     }
4765 
4766     case ABIArgInfo::Ignore:
4767       // If we are ignoring an argument that had a result, make sure to
4768       // construct the appropriate return value for our caller.
4769       return GetUndefRValue(RetTy);
4770 
4771     case ABIArgInfo::Extend:
4772     case ABIArgInfo::Direct: {
4773       llvm::Type *RetIRTy = ConvertType(RetTy);
4774       if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
4775         switch (getEvaluationKind(RetTy)) {
4776         case TEK_Complex: {
4777           llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
4778           llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
4779           return RValue::getComplex(std::make_pair(Real, Imag));
4780         }
4781         case TEK_Aggregate: {
4782           Address DestPtr = ReturnValue.getValue();
4783           bool DestIsVolatile = ReturnValue.isVolatile();
4784 
4785           if (!DestPtr.isValid()) {
4786             DestPtr = CreateMemTemp(RetTy, "agg.tmp");
4787             DestIsVolatile = false;
4788           }
4789           BuildAggStore(*this, CI, DestPtr, DestIsVolatile);
4790           return RValue::getAggregate(DestPtr);
4791         }
4792         case TEK_Scalar: {
4793           // If the argument doesn't match, perform a bitcast to coerce it.  This
4794           // can happen due to trivial type mismatches.
4795           llvm::Value *V = CI;
4796           if (V->getType() != RetIRTy)
4797             V = Builder.CreateBitCast(V, RetIRTy);
4798           return RValue::get(V);
4799         }
4800         }
4801         llvm_unreachable("bad evaluation kind");
4802       }
4803 
4804       Address DestPtr = ReturnValue.getValue();
4805       bool DestIsVolatile = ReturnValue.isVolatile();
4806 
4807       if (!DestPtr.isValid()) {
4808         DestPtr = CreateMemTemp(RetTy, "coerce");
4809         DestIsVolatile = false;
4810       }
4811 
4812       // If the value is offset in memory, apply the offset now.
4813       Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
4814       CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
4815 
4816       return convertTempToRValue(DestPtr, RetTy, SourceLocation());
4817     }
4818 
4819     case ABIArgInfo::Expand:
4820       llvm_unreachable("Invalid ABI kind for return argument");
4821     }
4822 
4823     llvm_unreachable("Unhandled ABIArgInfo::Kind");
4824   } ();
4825 
4826   // Emit the assume_aligned check on the return value.
4827   if (Ret.isScalar() && TargetDecl) {
4828     AssumeAlignedAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret);
4829     AllocAlignAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret);
4830   }
4831 
4832   // Explicitly call CallLifetimeEnd::Emit just to re-use the code even though
4833   // we can't use the full cleanup mechanism.
4834   for (CallLifetimeEnd &LifetimeEnd : CallLifetimeEndAfterCall)
4835     LifetimeEnd.Emit(*this, /*Flags=*/{});
4836 
4837   if (!ReturnValue.isExternallyDestructed() &&
4838       RetTy.isDestructedType() == QualType::DK_nontrivial_c_struct)
4839     pushDestroy(QualType::DK_nontrivial_c_struct, Ret.getAggregateAddress(),
4840                 RetTy);
4841 
4842   return Ret;
4843 }
4844 
4845 CGCallee CGCallee::prepareConcreteCallee(CodeGenFunction &CGF) const {
4846   if (isVirtual()) {
4847     const CallExpr *CE = getVirtualCallExpr();
4848     return CGF.CGM.getCXXABI().getVirtualFunctionPointer(
4849         CGF, getVirtualMethodDecl(), getThisAddress(), getVirtualFunctionType(),
4850         CE ? CE->getBeginLoc() : SourceLocation());
4851   }
4852 
4853   return *this;
4854 }
4855 
4856 /* VarArg handling */
4857 
4858 Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) {
4859   VAListAddr = VE->isMicrosoftABI()
4860                  ? EmitMSVAListRef(VE->getSubExpr())
4861                  : EmitVAListRef(VE->getSubExpr());
4862   QualType Ty = VE->getType();
4863   if (VE->isMicrosoftABI())
4864     return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty);
4865   return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty);
4866 }
4867