1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // These classes wrap the information about a call or function
10 // definition used to handle ABI compliancy.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CGCall.h"
15 #include "ABIInfo.h"
16 #include "CGBlocks.h"
17 #include "CGCXXABI.h"
18 #include "CGCleanup.h"
19 #include "CGRecordLayout.h"
20 #include "CodeGenFunction.h"
21 #include "CodeGenModule.h"
22 #include "TargetInfo.h"
23 #include "clang/AST/Attr.h"
24 #include "clang/AST/Decl.h"
25 #include "clang/AST/DeclCXX.h"
26 #include "clang/AST/DeclObjC.h"
27 #include "clang/Basic/CodeGenOptions.h"
28 #include "clang/Basic/TargetBuiltins.h"
29 #include "clang/Basic/TargetInfo.h"
30 #include "clang/CodeGen/CGFunctionInfo.h"
31 #include "clang/CodeGen/SwiftCallingConv.h"
32 #include "llvm/ADT/StringExtras.h"
33 #include "llvm/Analysis/ValueTracking.h"
34 #include "llvm/IR/Assumptions.h"
35 #include "llvm/IR/Attributes.h"
36 #include "llvm/IR/CallingConv.h"
37 #include "llvm/IR/DataLayout.h"
38 #include "llvm/IR/InlineAsm.h"
39 #include "llvm/IR/IntrinsicInst.h"
40 #include "llvm/IR/Intrinsics.h"
41 #include "llvm/Transforms/Utils/Local.h"
42 using namespace clang;
43 using namespace CodeGen;
44 
45 /***/
46 
47 unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
48   switch (CC) {
49   default: return llvm::CallingConv::C;
50   case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
51   case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
52   case CC_X86RegCall: return llvm::CallingConv::X86_RegCall;
53   case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
54   case CC_Win64: return llvm::CallingConv::Win64;
55   case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
56   case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
57   case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
58   case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
59   // TODO: Add support for __pascal to LLVM.
60   case CC_X86Pascal: return llvm::CallingConv::C;
61   // TODO: Add support for __vectorcall to LLVM.
62   case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
63   case CC_AArch64VectorCall: return llvm::CallingConv::AArch64_VectorCall;
64   case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
65   case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv();
66   case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
67   case CC_PreserveAll: return llvm::CallingConv::PreserveAll;
68   case CC_Swift: return llvm::CallingConv::Swift;
69   }
70 }
71 
72 /// Derives the 'this' type for codegen purposes, i.e. ignoring method CVR
73 /// qualification. Either or both of RD and MD may be null. A null RD indicates
74 /// that there is no meaningful 'this' type, and a null MD can occur when
75 /// calling a method pointer.
76 CanQualType CodeGenTypes::DeriveThisType(const CXXRecordDecl *RD,
77                                          const CXXMethodDecl *MD) {
78   QualType RecTy;
79   if (RD)
80     RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
81   else
82     RecTy = Context.VoidTy;
83 
84   if (MD)
85     RecTy = Context.getAddrSpaceQualType(RecTy, MD->getMethodQualifiers().getAddressSpace());
86   return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
87 }
88 
89 /// Returns the canonical formal type of the given C++ method.
90 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
91   return MD->getType()->getCanonicalTypeUnqualified()
92            .getAs<FunctionProtoType>();
93 }
94 
95 /// Returns the "extra-canonicalized" return type, which discards
96 /// qualifiers on the return type.  Codegen doesn't care about them,
97 /// and it makes ABI code a little easier to be able to assume that
98 /// all parameter and return types are top-level unqualified.
99 static CanQualType GetReturnType(QualType RetTy) {
100   return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
101 }
102 
103 /// Arrange the argument and result information for a value of the given
104 /// unprototyped freestanding function type.
105 const CGFunctionInfo &
106 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
107   // When translating an unprototyped function type, always use a
108   // variadic type.
109   return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
110                                  /*instanceMethod=*/false,
111                                  /*chainCall=*/false, None,
112                                  FTNP->getExtInfo(), {}, RequiredArgs(0));
113 }
114 
115 static void addExtParameterInfosForCall(
116          llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
117                                         const FunctionProtoType *proto,
118                                         unsigned prefixArgs,
119                                         unsigned totalArgs) {
120   assert(proto->hasExtParameterInfos());
121   assert(paramInfos.size() <= prefixArgs);
122   assert(proto->getNumParams() + prefixArgs <= totalArgs);
123 
124   paramInfos.reserve(totalArgs);
125 
126   // Add default infos for any prefix args that don't already have infos.
127   paramInfos.resize(prefixArgs);
128 
129   // Add infos for the prototype.
130   for (const auto &ParamInfo : proto->getExtParameterInfos()) {
131     paramInfos.push_back(ParamInfo);
132     // pass_object_size params have no parameter info.
133     if (ParamInfo.hasPassObjectSize())
134       paramInfos.emplace_back();
135   }
136 
137   assert(paramInfos.size() <= totalArgs &&
138          "Did we forget to insert pass_object_size args?");
139   // Add default infos for the variadic and/or suffix arguments.
140   paramInfos.resize(totalArgs);
141 }
142 
143 /// Adds the formal parameters in FPT to the given prefix. If any parameter in
144 /// FPT has pass_object_size attrs, then we'll add parameters for those, too.
145 static void appendParameterTypes(const CodeGenTypes &CGT,
146                                  SmallVectorImpl<CanQualType> &prefix,
147               SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
148                                  CanQual<FunctionProtoType> FPT) {
149   // Fast path: don't touch param info if we don't need to.
150   if (!FPT->hasExtParameterInfos()) {
151     assert(paramInfos.empty() &&
152            "We have paramInfos, but the prototype doesn't?");
153     prefix.append(FPT->param_type_begin(), FPT->param_type_end());
154     return;
155   }
156 
157   unsigned PrefixSize = prefix.size();
158   // In the vast majority of cases, we'll have precisely FPT->getNumParams()
159   // parameters; the only thing that can change this is the presence of
160   // pass_object_size. So, we preallocate for the common case.
161   prefix.reserve(prefix.size() + FPT->getNumParams());
162 
163   auto ExtInfos = FPT->getExtParameterInfos();
164   assert(ExtInfos.size() == FPT->getNumParams());
165   for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
166     prefix.push_back(FPT->getParamType(I));
167     if (ExtInfos[I].hasPassObjectSize())
168       prefix.push_back(CGT.getContext().getSizeType());
169   }
170 
171   addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize,
172                               prefix.size());
173 }
174 
175 /// Arrange the LLVM function layout for a value of the given function
176 /// type, on top of any implicit parameters already stored.
177 static const CGFunctionInfo &
178 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
179                         SmallVectorImpl<CanQualType> &prefix,
180                         CanQual<FunctionProtoType> FTP) {
181   SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
182   RequiredArgs Required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
183   // FIXME: Kill copy.
184   appendParameterTypes(CGT, prefix, paramInfos, FTP);
185   CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
186 
187   return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
188                                      /*chainCall=*/false, prefix,
189                                      FTP->getExtInfo(), paramInfos,
190                                      Required);
191 }
192 
193 /// Arrange the argument and result information for a value of the
194 /// given freestanding function type.
195 const CGFunctionInfo &
196 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) {
197   SmallVector<CanQualType, 16> argTypes;
198   return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
199                                    FTP);
200 }
201 
202 static CallingConv getCallingConventionForDecl(const ObjCMethodDecl *D,
203                                                bool IsWindows) {
204   // Set the appropriate calling convention for the Function.
205   if (D->hasAttr<StdCallAttr>())
206     return CC_X86StdCall;
207 
208   if (D->hasAttr<FastCallAttr>())
209     return CC_X86FastCall;
210 
211   if (D->hasAttr<RegCallAttr>())
212     return CC_X86RegCall;
213 
214   if (D->hasAttr<ThisCallAttr>())
215     return CC_X86ThisCall;
216 
217   if (D->hasAttr<VectorCallAttr>())
218     return CC_X86VectorCall;
219 
220   if (D->hasAttr<PascalAttr>())
221     return CC_X86Pascal;
222 
223   if (PcsAttr *PCS = D->getAttr<PcsAttr>())
224     return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
225 
226   if (D->hasAttr<AArch64VectorPcsAttr>())
227     return CC_AArch64VectorCall;
228 
229   if (D->hasAttr<IntelOclBiccAttr>())
230     return CC_IntelOclBicc;
231 
232   if (D->hasAttr<MSABIAttr>())
233     return IsWindows ? CC_C : CC_Win64;
234 
235   if (D->hasAttr<SysVABIAttr>())
236     return IsWindows ? CC_X86_64SysV : CC_C;
237 
238   if (D->hasAttr<PreserveMostAttr>())
239     return CC_PreserveMost;
240 
241   if (D->hasAttr<PreserveAllAttr>())
242     return CC_PreserveAll;
243 
244   return CC_C;
245 }
246 
247 /// Arrange the argument and result information for a call to an
248 /// unknown C++ non-static member function of the given abstract type.
249 /// (A null RD means we don't have any meaningful "this" argument type,
250 ///  so fall back to a generic pointer type).
251 /// The member function must be an ordinary function, i.e. not a
252 /// constructor or destructor.
253 const CGFunctionInfo &
254 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
255                                    const FunctionProtoType *FTP,
256                                    const CXXMethodDecl *MD) {
257   SmallVector<CanQualType, 16> argTypes;
258 
259   // Add the 'this' pointer.
260   argTypes.push_back(DeriveThisType(RD, MD));
261 
262   return ::arrangeLLVMFunctionInfo(
263       *this, true, argTypes,
264       FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
265 }
266 
267 /// Set calling convention for CUDA/HIP kernel.
268 static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM,
269                                            const FunctionDecl *FD) {
270   if (FD->hasAttr<CUDAGlobalAttr>()) {
271     const FunctionType *FT = FTy->getAs<FunctionType>();
272     CGM.getTargetCodeGenInfo().setCUDAKernelCallingConvention(FT);
273     FTy = FT->getCanonicalTypeUnqualified();
274   }
275 }
276 
277 /// Arrange the argument and result information for a declaration or
278 /// definition of the given C++ non-static member function.  The
279 /// member function must be an ordinary function, i.e. not a
280 /// constructor or destructor.
281 const CGFunctionInfo &
282 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
283   assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
284   assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
285 
286   CanQualType FT = GetFormalType(MD).getAs<Type>();
287   setCUDAKernelCallingConvention(FT, CGM, MD);
288   auto prototype = FT.getAs<FunctionProtoType>();
289 
290   if (MD->isInstance()) {
291     // The abstract case is perfectly fine.
292     const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
293     return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
294   }
295 
296   return arrangeFreeFunctionType(prototype);
297 }
298 
299 bool CodeGenTypes::inheritingCtorHasParams(
300     const InheritedConstructor &Inherited, CXXCtorType Type) {
301   // Parameters are unnecessary if we're constructing a base class subobject
302   // and the inherited constructor lives in a virtual base.
303   return Type == Ctor_Complete ||
304          !Inherited.getShadowDecl()->constructsVirtualBase() ||
305          !Target.getCXXABI().hasConstructorVariants();
306 }
307 
308 const CGFunctionInfo &
309 CodeGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) {
310   auto *MD = cast<CXXMethodDecl>(GD.getDecl());
311 
312   SmallVector<CanQualType, 16> argTypes;
313   SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
314   argTypes.push_back(DeriveThisType(MD->getParent(), MD));
315 
316   bool PassParams = true;
317 
318   if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
319     // A base class inheriting constructor doesn't get forwarded arguments
320     // needed to construct a virtual base (or base class thereof).
321     if (auto Inherited = CD->getInheritedConstructor())
322       PassParams = inheritingCtorHasParams(Inherited, GD.getCtorType());
323   }
324 
325   CanQual<FunctionProtoType> FTP = GetFormalType(MD);
326 
327   // Add the formal parameters.
328   if (PassParams)
329     appendParameterTypes(*this, argTypes, paramInfos, FTP);
330 
331   CGCXXABI::AddedStructorArgCounts AddedArgs =
332       TheCXXABI.buildStructorSignature(GD, argTypes);
333   if (!paramInfos.empty()) {
334     // Note: prefix implies after the first param.
335     if (AddedArgs.Prefix)
336       paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix,
337                         FunctionProtoType::ExtParameterInfo{});
338     if (AddedArgs.Suffix)
339       paramInfos.append(AddedArgs.Suffix,
340                         FunctionProtoType::ExtParameterInfo{});
341   }
342 
343   RequiredArgs required =
344       (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size())
345                                       : RequiredArgs::All);
346 
347   FunctionType::ExtInfo extInfo = FTP->getExtInfo();
348   CanQualType resultType = TheCXXABI.HasThisReturn(GD)
349                                ? argTypes.front()
350                                : TheCXXABI.hasMostDerivedReturn(GD)
351                                      ? CGM.getContext().VoidPtrTy
352                                      : Context.VoidTy;
353   return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
354                                  /*chainCall=*/false, argTypes, extInfo,
355                                  paramInfos, required);
356 }
357 
358 static SmallVector<CanQualType, 16>
359 getArgTypesForCall(ASTContext &ctx, const CallArgList &args) {
360   SmallVector<CanQualType, 16> argTypes;
361   for (auto &arg : args)
362     argTypes.push_back(ctx.getCanonicalParamType(arg.Ty));
363   return argTypes;
364 }
365 
366 static SmallVector<CanQualType, 16>
367 getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) {
368   SmallVector<CanQualType, 16> argTypes;
369   for (auto &arg : args)
370     argTypes.push_back(ctx.getCanonicalParamType(arg->getType()));
371   return argTypes;
372 }
373 
374 static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16>
375 getExtParameterInfosForCall(const FunctionProtoType *proto,
376                             unsigned prefixArgs, unsigned totalArgs) {
377   llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result;
378   if (proto->hasExtParameterInfos()) {
379     addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs);
380   }
381   return result;
382 }
383 
384 /// Arrange a call to a C++ method, passing the given arguments.
385 ///
386 /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this`
387 /// parameter.
388 /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of
389 /// args.
390 /// PassProtoArgs indicates whether `args` has args for the parameters in the
391 /// given CXXConstructorDecl.
392 const CGFunctionInfo &
393 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
394                                         const CXXConstructorDecl *D,
395                                         CXXCtorType CtorKind,
396                                         unsigned ExtraPrefixArgs,
397                                         unsigned ExtraSuffixArgs,
398                                         bool PassProtoArgs) {
399   // FIXME: Kill copy.
400   SmallVector<CanQualType, 16> ArgTypes;
401   for (const auto &Arg : args)
402     ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
403 
404   // +1 for implicit this, which should always be args[0].
405   unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
406 
407   CanQual<FunctionProtoType> FPT = GetFormalType(D);
408   RequiredArgs Required = PassProtoArgs
409                               ? RequiredArgs::forPrototypePlus(
410                                     FPT, TotalPrefixArgs + ExtraSuffixArgs)
411                               : RequiredArgs::All;
412 
413   GlobalDecl GD(D, CtorKind);
414   CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
415                                ? ArgTypes.front()
416                                : TheCXXABI.hasMostDerivedReturn(GD)
417                                      ? CGM.getContext().VoidPtrTy
418                                      : Context.VoidTy;
419 
420   FunctionType::ExtInfo Info = FPT->getExtInfo();
421   llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> ParamInfos;
422   // If the prototype args are elided, we should only have ABI-specific args,
423   // which never have param info.
424   if (PassProtoArgs && FPT->hasExtParameterInfos()) {
425     // ABI-specific suffix arguments are treated the same as variadic arguments.
426     addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs,
427                                 ArgTypes.size());
428   }
429   return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
430                                  /*chainCall=*/false, ArgTypes, Info,
431                                  ParamInfos, Required);
432 }
433 
434 /// Arrange the argument and result information for the declaration or
435 /// definition of the given function.
436 const CGFunctionInfo &
437 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
438   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
439     if (MD->isInstance())
440       return arrangeCXXMethodDeclaration(MD);
441 
442   CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
443 
444   assert(isa<FunctionType>(FTy));
445   setCUDAKernelCallingConvention(FTy, CGM, FD);
446 
447   // When declaring a function without a prototype, always use a
448   // non-variadic type.
449   if (CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>()) {
450     return arrangeLLVMFunctionInfo(
451         noProto->getReturnType(), /*instanceMethod=*/false,
452         /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All);
453   }
454 
455   return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>());
456 }
457 
458 /// Arrange the argument and result information for the declaration or
459 /// definition of an Objective-C method.
460 const CGFunctionInfo &
461 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
462   // It happens that this is the same as a call with no optional
463   // arguments, except also using the formal 'self' type.
464   return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
465 }
466 
467 /// Arrange the argument and result information for the function type
468 /// through which to perform a send to the given Objective-C method,
469 /// using the given receiver type.  The receiver type is not always
470 /// the 'self' type of the method or even an Objective-C pointer type.
471 /// This is *not* the right method for actually performing such a
472 /// message send, due to the possibility of optional arguments.
473 const CGFunctionInfo &
474 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
475                                               QualType receiverType) {
476   SmallVector<CanQualType, 16> argTys;
477   SmallVector<FunctionProtoType::ExtParameterInfo, 4> extParamInfos(2);
478   argTys.push_back(Context.getCanonicalParamType(receiverType));
479   argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
480   // FIXME: Kill copy?
481   for (const auto *I : MD->parameters()) {
482     argTys.push_back(Context.getCanonicalParamType(I->getType()));
483     auto extParamInfo = FunctionProtoType::ExtParameterInfo().withIsNoEscape(
484         I->hasAttr<NoEscapeAttr>());
485     extParamInfos.push_back(extParamInfo);
486   }
487 
488   FunctionType::ExtInfo einfo;
489   bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
490   einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
491 
492   if (getContext().getLangOpts().ObjCAutoRefCount &&
493       MD->hasAttr<NSReturnsRetainedAttr>())
494     einfo = einfo.withProducesResult(true);
495 
496   RequiredArgs required =
497     (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
498 
499   return arrangeLLVMFunctionInfo(
500       GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
501       /*chainCall=*/false, argTys, einfo, extParamInfos, required);
502 }
503 
504 const CGFunctionInfo &
505 CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType,
506                                                  const CallArgList &args) {
507   auto argTypes = getArgTypesForCall(Context, args);
508   FunctionType::ExtInfo einfo;
509 
510   return arrangeLLVMFunctionInfo(
511       GetReturnType(returnType), /*instanceMethod=*/false,
512       /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All);
513 }
514 
515 const CGFunctionInfo &
516 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
517   // FIXME: Do we need to handle ObjCMethodDecl?
518   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
519 
520   if (isa<CXXConstructorDecl>(GD.getDecl()) ||
521       isa<CXXDestructorDecl>(GD.getDecl()))
522     return arrangeCXXStructorDeclaration(GD);
523 
524   return arrangeFunctionDeclaration(FD);
525 }
526 
527 /// Arrange a thunk that takes 'this' as the first parameter followed by
528 /// varargs.  Return a void pointer, regardless of the actual return type.
529 /// The body of the thunk will end in a musttail call to a function of the
530 /// correct type, and the caller will bitcast the function to the correct
531 /// prototype.
532 const CGFunctionInfo &
533 CodeGenTypes::arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD) {
534   assert(MD->isVirtual() && "only methods have thunks");
535   CanQual<FunctionProtoType> FTP = GetFormalType(MD);
536   CanQualType ArgTys[] = {DeriveThisType(MD->getParent(), MD)};
537   return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
538                                  /*chainCall=*/false, ArgTys,
539                                  FTP->getExtInfo(), {}, RequiredArgs(1));
540 }
541 
542 const CGFunctionInfo &
543 CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD,
544                                    CXXCtorType CT) {
545   assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure);
546 
547   CanQual<FunctionProtoType> FTP = GetFormalType(CD);
548   SmallVector<CanQualType, 2> ArgTys;
549   const CXXRecordDecl *RD = CD->getParent();
550   ArgTys.push_back(DeriveThisType(RD, CD));
551   if (CT == Ctor_CopyingClosure)
552     ArgTys.push_back(*FTP->param_type_begin());
553   if (RD->getNumVBases() > 0)
554     ArgTys.push_back(Context.IntTy);
555   CallingConv CC = Context.getDefaultCallingConvention(
556       /*IsVariadic=*/false, /*IsCXXMethod=*/true);
557   return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
558                                  /*chainCall=*/false, ArgTys,
559                                  FunctionType::ExtInfo(CC), {},
560                                  RequiredArgs::All);
561 }
562 
563 /// Arrange a call as unto a free function, except possibly with an
564 /// additional number of formal parameters considered required.
565 static const CGFunctionInfo &
566 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
567                             CodeGenModule &CGM,
568                             const CallArgList &args,
569                             const FunctionType *fnType,
570                             unsigned numExtraRequiredArgs,
571                             bool chainCall) {
572   assert(args.size() >= numExtraRequiredArgs);
573 
574   llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
575 
576   // In most cases, there are no optional arguments.
577   RequiredArgs required = RequiredArgs::All;
578 
579   // If we have a variadic prototype, the required arguments are the
580   // extra prefix plus the arguments in the prototype.
581   if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
582     if (proto->isVariadic())
583       required = RequiredArgs::forPrototypePlus(proto, numExtraRequiredArgs);
584 
585     if (proto->hasExtParameterInfos())
586       addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs,
587                                   args.size());
588 
589   // If we don't have a prototype at all, but we're supposed to
590   // explicitly use the variadic convention for unprototyped calls,
591   // treat all of the arguments as required but preserve the nominal
592   // possibility of variadics.
593   } else if (CGM.getTargetCodeGenInfo()
594                 .isNoProtoCallVariadic(args,
595                                        cast<FunctionNoProtoType>(fnType))) {
596     required = RequiredArgs(args.size());
597   }
598 
599   // FIXME: Kill copy.
600   SmallVector<CanQualType, 16> argTypes;
601   for (const auto &arg : args)
602     argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
603   return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()),
604                                      /*instanceMethod=*/false, chainCall,
605                                      argTypes, fnType->getExtInfo(), paramInfos,
606                                      required);
607 }
608 
609 /// Figure out the rules for calling a function with the given formal
610 /// type using the given arguments.  The arguments are necessary
611 /// because the function might be unprototyped, in which case it's
612 /// target-dependent in crazy ways.
613 const CGFunctionInfo &
614 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
615                                       const FunctionType *fnType,
616                                       bool chainCall) {
617   return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
618                                      chainCall ? 1 : 0, chainCall);
619 }
620 
621 /// A block function is essentially a free function with an
622 /// extra implicit argument.
623 const CGFunctionInfo &
624 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
625                                        const FunctionType *fnType) {
626   return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
627                                      /*chainCall=*/false);
628 }
629 
630 const CGFunctionInfo &
631 CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto,
632                                               const FunctionArgList &params) {
633   auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size());
634   auto argTypes = getArgTypesForDeclaration(Context, params);
635 
636   return arrangeLLVMFunctionInfo(GetReturnType(proto->getReturnType()),
637                                  /*instanceMethod*/ false, /*chainCall*/ false,
638                                  argTypes, proto->getExtInfo(), paramInfos,
639                                  RequiredArgs::forPrototypePlus(proto, 1));
640 }
641 
642 const CGFunctionInfo &
643 CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType,
644                                          const CallArgList &args) {
645   // FIXME: Kill copy.
646   SmallVector<CanQualType, 16> argTypes;
647   for (const auto &Arg : args)
648     argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
649   return arrangeLLVMFunctionInfo(
650       GetReturnType(resultType), /*instanceMethod=*/false,
651       /*chainCall=*/false, argTypes, FunctionType::ExtInfo(),
652       /*paramInfos=*/ {}, RequiredArgs::All);
653 }
654 
655 const CGFunctionInfo &
656 CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType,
657                                                 const FunctionArgList &args) {
658   auto argTypes = getArgTypesForDeclaration(Context, args);
659 
660   return arrangeLLVMFunctionInfo(
661       GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false,
662       argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
663 }
664 
665 const CGFunctionInfo &
666 CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType,
667                                               ArrayRef<CanQualType> argTypes) {
668   return arrangeLLVMFunctionInfo(
669       resultType, /*instanceMethod=*/false, /*chainCall=*/false,
670       argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
671 }
672 
673 /// Arrange a call to a C++ method, passing the given arguments.
674 ///
675 /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It
676 /// does not count `this`.
677 const CGFunctionInfo &
678 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
679                                    const FunctionProtoType *proto,
680                                    RequiredArgs required,
681                                    unsigned numPrefixArgs) {
682   assert(numPrefixArgs + 1 <= args.size() &&
683          "Emitting a call with less args than the required prefix?");
684   // Add one to account for `this`. It's a bit awkward here, but we don't count
685   // `this` in similar places elsewhere.
686   auto paramInfos =
687     getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size());
688 
689   // FIXME: Kill copy.
690   auto argTypes = getArgTypesForCall(Context, args);
691 
692   FunctionType::ExtInfo info = proto->getExtInfo();
693   return arrangeLLVMFunctionInfo(
694       GetReturnType(proto->getReturnType()), /*instanceMethod=*/true,
695       /*chainCall=*/false, argTypes, info, paramInfos, required);
696 }
697 
698 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
699   return arrangeLLVMFunctionInfo(
700       getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
701       None, FunctionType::ExtInfo(), {}, RequiredArgs::All);
702 }
703 
704 const CGFunctionInfo &
705 CodeGenTypes::arrangeCall(const CGFunctionInfo &signature,
706                           const CallArgList &args) {
707   assert(signature.arg_size() <= args.size());
708   if (signature.arg_size() == args.size())
709     return signature;
710 
711   SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
712   auto sigParamInfos = signature.getExtParameterInfos();
713   if (!sigParamInfos.empty()) {
714     paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
715     paramInfos.resize(args.size());
716   }
717 
718   auto argTypes = getArgTypesForCall(Context, args);
719 
720   assert(signature.getRequiredArgs().allowsOptionalArgs());
721   return arrangeLLVMFunctionInfo(signature.getReturnType(),
722                                  signature.isInstanceMethod(),
723                                  signature.isChainCall(),
724                                  argTypes,
725                                  signature.getExtInfo(),
726                                  paramInfos,
727                                  signature.getRequiredArgs());
728 }
729 
730 namespace clang {
731 namespace CodeGen {
732 void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI);
733 }
734 }
735 
736 /// Arrange the argument and result information for an abstract value
737 /// of a given function type.  This is the method which all of the
738 /// above functions ultimately defer to.
739 const CGFunctionInfo &
740 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
741                                       bool instanceMethod,
742                                       bool chainCall,
743                                       ArrayRef<CanQualType> argTypes,
744                                       FunctionType::ExtInfo info,
745                      ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos,
746                                       RequiredArgs required) {
747   assert(llvm::all_of(argTypes,
748                       [](CanQualType T) { return T.isCanonicalAsParam(); }));
749 
750   // Lookup or create unique function info.
751   llvm::FoldingSetNodeID ID;
752   CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos,
753                           required, resultType, argTypes);
754 
755   void *insertPos = nullptr;
756   CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
757   if (FI)
758     return *FI;
759 
760   unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
761 
762   // Construct the function info.  We co-allocate the ArgInfos.
763   FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
764                               paramInfos, resultType, argTypes, required);
765   FunctionInfos.InsertNode(FI, insertPos);
766 
767   bool inserted = FunctionsBeingProcessed.insert(FI).second;
768   (void)inserted;
769   assert(inserted && "Recursively being processed?");
770 
771   // Compute ABI information.
772   if (CC == llvm::CallingConv::SPIR_KERNEL) {
773     // Force target independent argument handling for the host visible
774     // kernel functions.
775     computeSPIRKernelABIInfo(CGM, *FI);
776   } else if (info.getCC() == CC_Swift) {
777     swiftcall::computeABIInfo(CGM, *FI);
778   } else {
779     getABIInfo().computeInfo(*FI);
780   }
781 
782   // Loop over all of the computed argument and return value info.  If any of
783   // them are direct or extend without a specified coerce type, specify the
784   // default now.
785   ABIArgInfo &retInfo = FI->getReturnInfo();
786   if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
787     retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
788 
789   for (auto &I : FI->arguments())
790     if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
791       I.info.setCoerceToType(ConvertType(I.type));
792 
793   bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
794   assert(erased && "Not in set?");
795 
796   return *FI;
797 }
798 
799 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
800                                        bool instanceMethod,
801                                        bool chainCall,
802                                        const FunctionType::ExtInfo &info,
803                                        ArrayRef<ExtParameterInfo> paramInfos,
804                                        CanQualType resultType,
805                                        ArrayRef<CanQualType> argTypes,
806                                        RequiredArgs required) {
807   assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
808   assert(!required.allowsOptionalArgs() ||
809          required.getNumRequiredArgs() <= argTypes.size());
810 
811   void *buffer =
812     operator new(totalSizeToAlloc<ArgInfo,             ExtParameterInfo>(
813                                   argTypes.size() + 1, paramInfos.size()));
814 
815   CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
816   FI->CallingConvention = llvmCC;
817   FI->EffectiveCallingConvention = llvmCC;
818   FI->ASTCallingConvention = info.getCC();
819   FI->InstanceMethod = instanceMethod;
820   FI->ChainCall = chainCall;
821   FI->CmseNSCall = info.getCmseNSCall();
822   FI->NoReturn = info.getNoReturn();
823   FI->ReturnsRetained = info.getProducesResult();
824   FI->NoCallerSavedRegs = info.getNoCallerSavedRegs();
825   FI->NoCfCheck = info.getNoCfCheck();
826   FI->Required = required;
827   FI->HasRegParm = info.getHasRegParm();
828   FI->RegParm = info.getRegParm();
829   FI->ArgStruct = nullptr;
830   FI->ArgStructAlign = 0;
831   FI->NumArgs = argTypes.size();
832   FI->HasExtParameterInfos = !paramInfos.empty();
833   FI->getArgsBuffer()[0].type = resultType;
834   for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
835     FI->getArgsBuffer()[i + 1].type = argTypes[i];
836   for (unsigned i = 0, e = paramInfos.size(); i != e; ++i)
837     FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
838   return FI;
839 }
840 
841 /***/
842 
843 namespace {
844 // ABIArgInfo::Expand implementation.
845 
846 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
847 struct TypeExpansion {
848   enum TypeExpansionKind {
849     // Elements of constant arrays are expanded recursively.
850     TEK_ConstantArray,
851     // Record fields are expanded recursively (but if record is a union, only
852     // the field with the largest size is expanded).
853     TEK_Record,
854     // For complex types, real and imaginary parts are expanded recursively.
855     TEK_Complex,
856     // All other types are not expandable.
857     TEK_None
858   };
859 
860   const TypeExpansionKind Kind;
861 
862   TypeExpansion(TypeExpansionKind K) : Kind(K) {}
863   virtual ~TypeExpansion() {}
864 };
865 
866 struct ConstantArrayExpansion : TypeExpansion {
867   QualType EltTy;
868   uint64_t NumElts;
869 
870   ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
871       : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
872   static bool classof(const TypeExpansion *TE) {
873     return TE->Kind == TEK_ConstantArray;
874   }
875 };
876 
877 struct RecordExpansion : TypeExpansion {
878   SmallVector<const CXXBaseSpecifier *, 1> Bases;
879 
880   SmallVector<const FieldDecl *, 1> Fields;
881 
882   RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
883                   SmallVector<const FieldDecl *, 1> &&Fields)
884       : TypeExpansion(TEK_Record), Bases(std::move(Bases)),
885         Fields(std::move(Fields)) {}
886   static bool classof(const TypeExpansion *TE) {
887     return TE->Kind == TEK_Record;
888   }
889 };
890 
891 struct ComplexExpansion : TypeExpansion {
892   QualType EltTy;
893 
894   ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
895   static bool classof(const TypeExpansion *TE) {
896     return TE->Kind == TEK_Complex;
897   }
898 };
899 
900 struct NoExpansion : TypeExpansion {
901   NoExpansion() : TypeExpansion(TEK_None) {}
902   static bool classof(const TypeExpansion *TE) {
903     return TE->Kind == TEK_None;
904   }
905 };
906 }  // namespace
907 
908 static std::unique_ptr<TypeExpansion>
909 getTypeExpansion(QualType Ty, const ASTContext &Context) {
910   if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
911     return std::make_unique<ConstantArrayExpansion>(
912         AT->getElementType(), AT->getSize().getZExtValue());
913   }
914   if (const RecordType *RT = Ty->getAs<RecordType>()) {
915     SmallVector<const CXXBaseSpecifier *, 1> Bases;
916     SmallVector<const FieldDecl *, 1> Fields;
917     const RecordDecl *RD = RT->getDecl();
918     assert(!RD->hasFlexibleArrayMember() &&
919            "Cannot expand structure with flexible array.");
920     if (RD->isUnion()) {
921       // Unions can be here only in degenerative cases - all the fields are same
922       // after flattening. Thus we have to use the "largest" field.
923       const FieldDecl *LargestFD = nullptr;
924       CharUnits UnionSize = CharUnits::Zero();
925 
926       for (const auto *FD : RD->fields()) {
927         if (FD->isZeroLengthBitField(Context))
928           continue;
929         assert(!FD->isBitField() &&
930                "Cannot expand structure with bit-field members.");
931         CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
932         if (UnionSize < FieldSize) {
933           UnionSize = FieldSize;
934           LargestFD = FD;
935         }
936       }
937       if (LargestFD)
938         Fields.push_back(LargestFD);
939     } else {
940       if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
941         assert(!CXXRD->isDynamicClass() &&
942                "cannot expand vtable pointers in dynamic classes");
943         for (const CXXBaseSpecifier &BS : CXXRD->bases())
944           Bases.push_back(&BS);
945       }
946 
947       for (const auto *FD : RD->fields()) {
948         if (FD->isZeroLengthBitField(Context))
949           continue;
950         assert(!FD->isBitField() &&
951                "Cannot expand structure with bit-field members.");
952         Fields.push_back(FD);
953       }
954     }
955     return std::make_unique<RecordExpansion>(std::move(Bases),
956                                               std::move(Fields));
957   }
958   if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
959     return std::make_unique<ComplexExpansion>(CT->getElementType());
960   }
961   return std::make_unique<NoExpansion>();
962 }
963 
964 static int getExpansionSize(QualType Ty, const ASTContext &Context) {
965   auto Exp = getTypeExpansion(Ty, Context);
966   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
967     return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
968   }
969   if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
970     int Res = 0;
971     for (auto BS : RExp->Bases)
972       Res += getExpansionSize(BS->getType(), Context);
973     for (auto FD : RExp->Fields)
974       Res += getExpansionSize(FD->getType(), Context);
975     return Res;
976   }
977   if (isa<ComplexExpansion>(Exp.get()))
978     return 2;
979   assert(isa<NoExpansion>(Exp.get()));
980   return 1;
981 }
982 
983 void
984 CodeGenTypes::getExpandedTypes(QualType Ty,
985                                SmallVectorImpl<llvm::Type *>::iterator &TI) {
986   auto Exp = getTypeExpansion(Ty, Context);
987   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
988     for (int i = 0, n = CAExp->NumElts; i < n; i++) {
989       getExpandedTypes(CAExp->EltTy, TI);
990     }
991   } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
992     for (auto BS : RExp->Bases)
993       getExpandedTypes(BS->getType(), TI);
994     for (auto FD : RExp->Fields)
995       getExpandedTypes(FD->getType(), TI);
996   } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
997     llvm::Type *EltTy = ConvertType(CExp->EltTy);
998     *TI++ = EltTy;
999     *TI++ = EltTy;
1000   } else {
1001     assert(isa<NoExpansion>(Exp.get()));
1002     *TI++ = ConvertType(Ty);
1003   }
1004 }
1005 
1006 static void forConstantArrayExpansion(CodeGenFunction &CGF,
1007                                       ConstantArrayExpansion *CAE,
1008                                       Address BaseAddr,
1009                                       llvm::function_ref<void(Address)> Fn) {
1010   CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
1011   CharUnits EltAlign =
1012     BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
1013 
1014   for (int i = 0, n = CAE->NumElts; i < n; i++) {
1015     llvm::Value *EltAddr =
1016       CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i);
1017     Fn(Address(EltAddr, EltAlign));
1018   }
1019 }
1020 
1021 void CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
1022                                          llvm::Function::arg_iterator &AI) {
1023   assert(LV.isSimple() &&
1024          "Unexpected non-simple lvalue during struct expansion.");
1025 
1026   auto Exp = getTypeExpansion(Ty, getContext());
1027   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1028     forConstantArrayExpansion(
1029         *this, CAExp, LV.getAddress(*this), [&](Address EltAddr) {
1030           LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1031           ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1032         });
1033   } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1034     Address This = LV.getAddress(*this);
1035     for (const CXXBaseSpecifier *BS : RExp->Bases) {
1036       // Perform a single step derived-to-base conversion.
1037       Address Base =
1038           GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1039                                 /*NullCheckValue=*/false, SourceLocation());
1040       LValue SubLV = MakeAddrLValue(Base, BS->getType());
1041 
1042       // Recurse onto bases.
1043       ExpandTypeFromArgs(BS->getType(), SubLV, AI);
1044     }
1045     for (auto FD : RExp->Fields) {
1046       // FIXME: What are the right qualifiers here?
1047       LValue SubLV = EmitLValueForFieldInitialization(LV, FD);
1048       ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1049     }
1050   } else if (isa<ComplexExpansion>(Exp.get())) {
1051     auto realValue = &*AI++;
1052     auto imagValue = &*AI++;
1053     EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
1054   } else {
1055     // Call EmitStoreOfScalar except when the lvalue is a bitfield to emit a
1056     // primitive store.
1057     assert(isa<NoExpansion>(Exp.get()));
1058     if (LV.isBitField())
1059       EmitStoreThroughLValue(RValue::get(&*AI++), LV);
1060     else
1061       EmitStoreOfScalar(&*AI++, LV);
1062   }
1063 }
1064 
1065 void CodeGenFunction::ExpandTypeToArgs(
1066     QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
1067     SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
1068   auto Exp = getTypeExpansion(Ty, getContext());
1069   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1070     Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this)
1071                                    : Arg.getKnownRValue().getAggregateAddress();
1072     forConstantArrayExpansion(
1073         *this, CAExp, Addr, [&](Address EltAddr) {
1074           CallArg EltArg = CallArg(
1075               convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()),
1076               CAExp->EltTy);
1077           ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
1078                            IRCallArgPos);
1079         });
1080   } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1081     Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this)
1082                                    : Arg.getKnownRValue().getAggregateAddress();
1083     for (const CXXBaseSpecifier *BS : RExp->Bases) {
1084       // Perform a single step derived-to-base conversion.
1085       Address Base =
1086           GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1087                                 /*NullCheckValue=*/false, SourceLocation());
1088       CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType());
1089 
1090       // Recurse onto bases.
1091       ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs,
1092                        IRCallArgPos);
1093     }
1094 
1095     LValue LV = MakeAddrLValue(This, Ty);
1096     for (auto FD : RExp->Fields) {
1097       CallArg FldArg =
1098           CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType());
1099       ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
1100                        IRCallArgPos);
1101     }
1102   } else if (isa<ComplexExpansion>(Exp.get())) {
1103     ComplexPairTy CV = Arg.getKnownRValue().getComplexVal();
1104     IRCallArgs[IRCallArgPos++] = CV.first;
1105     IRCallArgs[IRCallArgPos++] = CV.second;
1106   } else {
1107     assert(isa<NoExpansion>(Exp.get()));
1108     auto RV = Arg.getKnownRValue();
1109     assert(RV.isScalar() &&
1110            "Unexpected non-scalar rvalue during struct expansion.");
1111 
1112     // Insert a bitcast as needed.
1113     llvm::Value *V = RV.getScalarVal();
1114     if (IRCallArgPos < IRFuncTy->getNumParams() &&
1115         V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1116       V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
1117 
1118     IRCallArgs[IRCallArgPos++] = V;
1119   }
1120 }
1121 
1122 /// Create a temporary allocation for the purposes of coercion.
1123 static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty,
1124                                            CharUnits MinAlign,
1125                                            const Twine &Name = "tmp") {
1126   // Don't use an alignment that's worse than what LLVM would prefer.
1127   auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty);
1128   CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
1129 
1130   return CGF.CreateTempAlloca(Ty, Align, Name + ".coerce");
1131 }
1132 
1133 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
1134 /// accessing some number of bytes out of it, try to gep into the struct to get
1135 /// at its inner goodness.  Dive as deep as possible without entering an element
1136 /// with an in-memory size smaller than DstSize.
1137 static Address
1138 EnterStructPointerForCoercedAccess(Address SrcPtr,
1139                                    llvm::StructType *SrcSTy,
1140                                    uint64_t DstSize, CodeGenFunction &CGF) {
1141   // We can't dive into a zero-element struct.
1142   if (SrcSTy->getNumElements() == 0) return SrcPtr;
1143 
1144   llvm::Type *FirstElt = SrcSTy->getElementType(0);
1145 
1146   // If the first elt is at least as large as what we're looking for, or if the
1147   // first element is the same size as the whole struct, we can enter it. The
1148   // comparison must be made on the store size and not the alloca size. Using
1149   // the alloca size may overstate the size of the load.
1150   uint64_t FirstEltSize =
1151     CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
1152   if (FirstEltSize < DstSize &&
1153       FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
1154     return SrcPtr;
1155 
1156   // GEP into the first element.
1157   SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, "coerce.dive");
1158 
1159   // If the first element is a struct, recurse.
1160   llvm::Type *SrcTy = SrcPtr.getElementType();
1161   if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1162     return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
1163 
1164   return SrcPtr;
1165 }
1166 
1167 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
1168 /// are either integers or pointers.  This does a truncation of the value if it
1169 /// is too large or a zero extension if it is too small.
1170 ///
1171 /// This behaves as if the value were coerced through memory, so on big-endian
1172 /// targets the high bits are preserved in a truncation, while little-endian
1173 /// targets preserve the low bits.
1174 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
1175                                              llvm::Type *Ty,
1176                                              CodeGenFunction &CGF) {
1177   if (Val->getType() == Ty)
1178     return Val;
1179 
1180   if (isa<llvm::PointerType>(Val->getType())) {
1181     // If this is Pointer->Pointer avoid conversion to and from int.
1182     if (isa<llvm::PointerType>(Ty))
1183       return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
1184 
1185     // Convert the pointer to an integer so we can play with its width.
1186     Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
1187   }
1188 
1189   llvm::Type *DestIntTy = Ty;
1190   if (isa<llvm::PointerType>(DestIntTy))
1191     DestIntTy = CGF.IntPtrTy;
1192 
1193   if (Val->getType() != DestIntTy) {
1194     const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
1195     if (DL.isBigEndian()) {
1196       // Preserve the high bits on big-endian targets.
1197       // That is what memory coercion does.
1198       uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1199       uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1200 
1201       if (SrcSize > DstSize) {
1202         Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
1203         Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
1204       } else {
1205         Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
1206         Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
1207       }
1208     } else {
1209       // Little-endian targets preserve the low bits. No shifts required.
1210       Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
1211     }
1212   }
1213 
1214   if (isa<llvm::PointerType>(Ty))
1215     Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
1216   return Val;
1217 }
1218 
1219 
1220 
1221 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
1222 /// a pointer to an object of type \arg Ty, known to be aligned to
1223 /// \arg SrcAlign bytes.
1224 ///
1225 /// This safely handles the case when the src type is smaller than the
1226 /// destination type; in this situation the values of bits which not
1227 /// present in the src are undefined.
1228 static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
1229                                       CodeGenFunction &CGF) {
1230   llvm::Type *SrcTy = Src.getElementType();
1231 
1232   // If SrcTy and Ty are the same, just do a load.
1233   if (SrcTy == Ty)
1234     return CGF.Builder.CreateLoad(Src);
1235 
1236   llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
1237 
1238   if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1239     Src = EnterStructPointerForCoercedAccess(Src, SrcSTy,
1240                                              DstSize.getFixedSize(), CGF);
1241     SrcTy = Src.getElementType();
1242   }
1243 
1244   llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1245 
1246   // If the source and destination are integer or pointer types, just do an
1247   // extension or truncation to the desired type.
1248   if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1249       (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1250     llvm::Value *Load = CGF.Builder.CreateLoad(Src);
1251     return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
1252   }
1253 
1254   // If load is legal, just bitcast the src pointer.
1255   if (!SrcSize.isScalable() && !DstSize.isScalable() &&
1256       SrcSize.getFixedSize() >= DstSize.getFixedSize()) {
1257     // Generally SrcSize is never greater than DstSize, since this means we are
1258     // losing bits. However, this can happen in cases where the structure has
1259     // additional padding, for example due to a user specified alignment.
1260     //
1261     // FIXME: Assert that we aren't truncating non-padding bits when have access
1262     // to that information.
1263     Src = CGF.Builder.CreateBitCast(Src,
1264                                     Ty->getPointerTo(Src.getAddressSpace()));
1265     return CGF.Builder.CreateLoad(Src);
1266   }
1267 
1268   // If coercing a fixed vector to a scalable vector for ABI compatibility, and
1269   // the types match, use the llvm.experimental.vector.insert intrinsic to
1270   // perform the conversion.
1271   if (auto *ScalableDst = dyn_cast<llvm::ScalableVectorType>(Ty)) {
1272     if (auto *FixedSrc = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
1273       if (ScalableDst->getElementType() == FixedSrc->getElementType()) {
1274         auto *Load = CGF.Builder.CreateLoad(Src);
1275         auto *UndefVec = llvm::UndefValue::get(ScalableDst);
1276         auto *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
1277         return CGF.Builder.CreateInsertVector(ScalableDst, UndefVec, Load, Zero,
1278                                               "castScalableSve");
1279       }
1280     }
1281   }
1282 
1283   // Otherwise do coercion through memory. This is stupid, but simple.
1284   Address Tmp =
1285       CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment(), Src.getName());
1286   CGF.Builder.CreateMemCpy(
1287       Tmp.getPointer(), Tmp.getAlignment().getAsAlign(), Src.getPointer(),
1288       Src.getAlignment().getAsAlign(),
1289       llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize.getKnownMinSize()));
1290   return CGF.Builder.CreateLoad(Tmp);
1291 }
1292 
1293 // Function to store a first-class aggregate into memory.  We prefer to
1294 // store the elements rather than the aggregate to be more friendly to
1295 // fast-isel.
1296 // FIXME: Do we need to recurse here?
1297 void CodeGenFunction::EmitAggregateStore(llvm::Value *Val, Address Dest,
1298                                          bool DestIsVolatile) {
1299   // Prefer scalar stores to first-class aggregate stores.
1300   if (llvm::StructType *STy = dyn_cast<llvm::StructType>(Val->getType())) {
1301     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1302       Address EltPtr = Builder.CreateStructGEP(Dest, i);
1303       llvm::Value *Elt = Builder.CreateExtractValue(Val, i);
1304       Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
1305     }
1306   } else {
1307     Builder.CreateStore(Val, Dest, DestIsVolatile);
1308   }
1309 }
1310 
1311 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1312 /// where the source and destination may have different types.  The
1313 /// destination is known to be aligned to \arg DstAlign bytes.
1314 ///
1315 /// This safely handles the case when the src type is larger than the
1316 /// destination type; the upper bits of the src will be lost.
1317 static void CreateCoercedStore(llvm::Value *Src,
1318                                Address Dst,
1319                                bool DstIsVolatile,
1320                                CodeGenFunction &CGF) {
1321   llvm::Type *SrcTy = Src->getType();
1322   llvm::Type *DstTy = Dst.getElementType();
1323   if (SrcTy == DstTy) {
1324     CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1325     return;
1326   }
1327 
1328   llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1329 
1330   if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1331     Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy,
1332                                              SrcSize.getFixedSize(), CGF);
1333     DstTy = Dst.getElementType();
1334   }
1335 
1336   llvm::PointerType *SrcPtrTy = llvm::dyn_cast<llvm::PointerType>(SrcTy);
1337   llvm::PointerType *DstPtrTy = llvm::dyn_cast<llvm::PointerType>(DstTy);
1338   if (SrcPtrTy && DstPtrTy &&
1339       SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) {
1340     Src = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy);
1341     CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1342     return;
1343   }
1344 
1345   // If the source and destination are integer or pointer types, just do an
1346   // extension or truncation to the desired type.
1347   if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1348       (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1349     Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
1350     CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1351     return;
1352   }
1353 
1354   llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
1355 
1356   // If store is legal, just bitcast the src pointer.
1357   if (isa<llvm::ScalableVectorType>(SrcTy) ||
1358       isa<llvm::ScalableVectorType>(DstTy) ||
1359       SrcSize.getFixedSize() <= DstSize.getFixedSize()) {
1360     Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy);
1361     CGF.EmitAggregateStore(Src, Dst, DstIsVolatile);
1362   } else {
1363     // Otherwise do coercion through memory. This is stupid, but
1364     // simple.
1365 
1366     // Generally SrcSize is never greater than DstSize, since this means we are
1367     // losing bits. However, this can happen in cases where the structure has
1368     // additional padding, for example due to a user specified alignment.
1369     //
1370     // FIXME: Assert that we aren't truncating non-padding bits when have access
1371     // to that information.
1372     Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
1373     CGF.Builder.CreateStore(Src, Tmp);
1374     CGF.Builder.CreateMemCpy(
1375         Dst.getPointer(), Dst.getAlignment().getAsAlign(), Tmp.getPointer(),
1376         Tmp.getAlignment().getAsAlign(),
1377         llvm::ConstantInt::get(CGF.IntPtrTy, DstSize.getFixedSize()));
1378   }
1379 }
1380 
1381 static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr,
1382                                    const ABIArgInfo &info) {
1383   if (unsigned offset = info.getDirectOffset()) {
1384     addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty);
1385     addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
1386                                              CharUnits::fromQuantity(offset));
1387     addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType());
1388   }
1389   return addr;
1390 }
1391 
1392 namespace {
1393 
1394 /// Encapsulates information about the way function arguments from
1395 /// CGFunctionInfo should be passed to actual LLVM IR function.
1396 class ClangToLLVMArgMapping {
1397   static const unsigned InvalidIndex = ~0U;
1398   unsigned InallocaArgNo;
1399   unsigned SRetArgNo;
1400   unsigned TotalIRArgs;
1401 
1402   /// Arguments of LLVM IR function corresponding to single Clang argument.
1403   struct IRArgs {
1404     unsigned PaddingArgIndex;
1405     // Argument is expanded to IR arguments at positions
1406     // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
1407     unsigned FirstArgIndex;
1408     unsigned NumberOfArgs;
1409 
1410     IRArgs()
1411         : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1412           NumberOfArgs(0) {}
1413   };
1414 
1415   SmallVector<IRArgs, 8> ArgInfo;
1416 
1417 public:
1418   ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
1419                         bool OnlyRequiredArgs = false)
1420       : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1421         ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1422     construct(Context, FI, OnlyRequiredArgs);
1423   }
1424 
1425   bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
1426   unsigned getInallocaArgNo() const {
1427     assert(hasInallocaArg());
1428     return InallocaArgNo;
1429   }
1430 
1431   bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
1432   unsigned getSRetArgNo() const {
1433     assert(hasSRetArg());
1434     return SRetArgNo;
1435   }
1436 
1437   unsigned totalIRArgs() const { return TotalIRArgs; }
1438 
1439   bool hasPaddingArg(unsigned ArgNo) const {
1440     assert(ArgNo < ArgInfo.size());
1441     return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1442   }
1443   unsigned getPaddingArgNo(unsigned ArgNo) const {
1444     assert(hasPaddingArg(ArgNo));
1445     return ArgInfo[ArgNo].PaddingArgIndex;
1446   }
1447 
1448   /// Returns index of first IR argument corresponding to ArgNo, and their
1449   /// quantity.
1450   std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
1451     assert(ArgNo < ArgInfo.size());
1452     return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1453                           ArgInfo[ArgNo].NumberOfArgs);
1454   }
1455 
1456 private:
1457   void construct(const ASTContext &Context, const CGFunctionInfo &FI,
1458                  bool OnlyRequiredArgs);
1459 };
1460 
1461 void ClangToLLVMArgMapping::construct(const ASTContext &Context,
1462                                       const CGFunctionInfo &FI,
1463                                       bool OnlyRequiredArgs) {
1464   unsigned IRArgNo = 0;
1465   bool SwapThisWithSRet = false;
1466   const ABIArgInfo &RetAI = FI.getReturnInfo();
1467 
1468   if (RetAI.getKind() == ABIArgInfo::Indirect) {
1469     SwapThisWithSRet = RetAI.isSRetAfterThis();
1470     SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1471   }
1472 
1473   unsigned ArgNo = 0;
1474   unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
1475   for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
1476        ++I, ++ArgNo) {
1477     assert(I != FI.arg_end());
1478     QualType ArgType = I->type;
1479     const ABIArgInfo &AI = I->info;
1480     // Collect data about IR arguments corresponding to Clang argument ArgNo.
1481     auto &IRArgs = ArgInfo[ArgNo];
1482 
1483     if (AI.getPaddingType())
1484       IRArgs.PaddingArgIndex = IRArgNo++;
1485 
1486     switch (AI.getKind()) {
1487     case ABIArgInfo::Extend:
1488     case ABIArgInfo::Direct: {
1489       // FIXME: handle sseregparm someday...
1490       llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
1491       if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
1492         IRArgs.NumberOfArgs = STy->getNumElements();
1493       } else {
1494         IRArgs.NumberOfArgs = 1;
1495       }
1496       break;
1497     }
1498     case ABIArgInfo::Indirect:
1499     case ABIArgInfo::IndirectAliased:
1500       IRArgs.NumberOfArgs = 1;
1501       break;
1502     case ABIArgInfo::Ignore:
1503     case ABIArgInfo::InAlloca:
1504       // ignore and inalloca doesn't have matching LLVM parameters.
1505       IRArgs.NumberOfArgs = 0;
1506       break;
1507     case ABIArgInfo::CoerceAndExpand:
1508       IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size();
1509       break;
1510     case ABIArgInfo::Expand:
1511       IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
1512       break;
1513     }
1514 
1515     if (IRArgs.NumberOfArgs > 0) {
1516       IRArgs.FirstArgIndex = IRArgNo;
1517       IRArgNo += IRArgs.NumberOfArgs;
1518     }
1519 
1520     // Skip over the sret parameter when it comes second.  We already handled it
1521     // above.
1522     if (IRArgNo == 1 && SwapThisWithSRet)
1523       IRArgNo++;
1524   }
1525   assert(ArgNo == ArgInfo.size());
1526 
1527   if (FI.usesInAlloca())
1528     InallocaArgNo = IRArgNo++;
1529 
1530   TotalIRArgs = IRArgNo;
1531 }
1532 }  // namespace
1533 
1534 /***/
1535 
1536 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
1537   const auto &RI = FI.getReturnInfo();
1538   return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
1539 }
1540 
1541 bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) {
1542   return ReturnTypeUsesSRet(FI) &&
1543          getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
1544 }
1545 
1546 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
1547   if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
1548     switch (BT->getKind()) {
1549     default:
1550       return false;
1551     case BuiltinType::Float:
1552       return getTarget().useObjCFPRetForRealType(TargetInfo::Float);
1553     case BuiltinType::Double:
1554       return getTarget().useObjCFPRetForRealType(TargetInfo::Double);
1555     case BuiltinType::LongDouble:
1556       return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble);
1557     }
1558   }
1559 
1560   return false;
1561 }
1562 
1563 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
1564   if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
1565     if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
1566       if (BT->getKind() == BuiltinType::LongDouble)
1567         return getTarget().useObjCFP2RetForComplexLongDouble();
1568     }
1569   }
1570 
1571   return false;
1572 }
1573 
1574 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
1575   const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
1576   return GetFunctionType(FI);
1577 }
1578 
1579 llvm::FunctionType *
1580 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
1581 
1582   bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1583   (void)Inserted;
1584   assert(Inserted && "Recursively being processed?");
1585 
1586   llvm::Type *resultType = nullptr;
1587   const ABIArgInfo &retAI = FI.getReturnInfo();
1588   switch (retAI.getKind()) {
1589   case ABIArgInfo::Expand:
1590   case ABIArgInfo::IndirectAliased:
1591     llvm_unreachable("Invalid ABI kind for return argument");
1592 
1593   case ABIArgInfo::Extend:
1594   case ABIArgInfo::Direct:
1595     resultType = retAI.getCoerceToType();
1596     break;
1597 
1598   case ABIArgInfo::InAlloca:
1599     if (retAI.getInAllocaSRet()) {
1600       // sret things on win32 aren't void, they return the sret pointer.
1601       QualType ret = FI.getReturnType();
1602       llvm::Type *ty = ConvertType(ret);
1603       unsigned addressSpace = Context.getTargetAddressSpace(ret);
1604       resultType = llvm::PointerType::get(ty, addressSpace);
1605     } else {
1606       resultType = llvm::Type::getVoidTy(getLLVMContext());
1607     }
1608     break;
1609 
1610   case ABIArgInfo::Indirect:
1611   case ABIArgInfo::Ignore:
1612     resultType = llvm::Type::getVoidTy(getLLVMContext());
1613     break;
1614 
1615   case ABIArgInfo::CoerceAndExpand:
1616     resultType = retAI.getUnpaddedCoerceAndExpandType();
1617     break;
1618   }
1619 
1620   ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
1621   SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
1622 
1623   // Add type for sret argument.
1624   if (IRFunctionArgs.hasSRetArg()) {
1625     QualType Ret = FI.getReturnType();
1626     llvm::Type *Ty = ConvertType(Ret);
1627     unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
1628     ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1629         llvm::PointerType::get(Ty, AddressSpace);
1630   }
1631 
1632   // Add type for inalloca argument.
1633   if (IRFunctionArgs.hasInallocaArg()) {
1634     auto ArgStruct = FI.getArgStruct();
1635     assert(ArgStruct);
1636     ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
1637   }
1638 
1639   // Add in all of the required arguments.
1640   unsigned ArgNo = 0;
1641   CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1642                                      ie = it + FI.getNumRequiredArgs();
1643   for (; it != ie; ++it, ++ArgNo) {
1644     const ABIArgInfo &ArgInfo = it->info;
1645 
1646     // Insert a padding type to ensure proper alignment.
1647     if (IRFunctionArgs.hasPaddingArg(ArgNo))
1648       ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1649           ArgInfo.getPaddingType();
1650 
1651     unsigned FirstIRArg, NumIRArgs;
1652     std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1653 
1654     switch (ArgInfo.getKind()) {
1655     case ABIArgInfo::Ignore:
1656     case ABIArgInfo::InAlloca:
1657       assert(NumIRArgs == 0);
1658       break;
1659 
1660     case ABIArgInfo::Indirect: {
1661       assert(NumIRArgs == 1);
1662       // indirect arguments are always on the stack, which is alloca addr space.
1663       llvm::Type *LTy = ConvertTypeForMem(it->type);
1664       ArgTypes[FirstIRArg] = LTy->getPointerTo(
1665           CGM.getDataLayout().getAllocaAddrSpace());
1666       break;
1667     }
1668     case ABIArgInfo::IndirectAliased: {
1669       assert(NumIRArgs == 1);
1670       llvm::Type *LTy = ConvertTypeForMem(it->type);
1671       ArgTypes[FirstIRArg] = LTy->getPointerTo(ArgInfo.getIndirectAddrSpace());
1672       break;
1673     }
1674     case ABIArgInfo::Extend:
1675     case ABIArgInfo::Direct: {
1676       // Fast-isel and the optimizer generally like scalar values better than
1677       // FCAs, so we flatten them if this is safe to do for this argument.
1678       llvm::Type *argType = ArgInfo.getCoerceToType();
1679       llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1680       if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
1681         assert(NumIRArgs == st->getNumElements());
1682         for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1683           ArgTypes[FirstIRArg + i] = st->getElementType(i);
1684       } else {
1685         assert(NumIRArgs == 1);
1686         ArgTypes[FirstIRArg] = argType;
1687       }
1688       break;
1689     }
1690 
1691     case ABIArgInfo::CoerceAndExpand: {
1692       auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1693       for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) {
1694         *ArgTypesIter++ = EltTy;
1695       }
1696       assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1697       break;
1698     }
1699 
1700     case ABIArgInfo::Expand:
1701       auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1702       getExpandedTypes(it->type, ArgTypesIter);
1703       assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1704       break;
1705     }
1706   }
1707 
1708   bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1709   assert(Erased && "Not in set?");
1710 
1711   return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
1712 }
1713 
1714 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
1715   const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1716   const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
1717 
1718   if (!isFuncTypeConvertible(FPT))
1719     return llvm::StructType::get(getLLVMContext());
1720 
1721   return GetFunctionType(GD);
1722 }
1723 
1724 static void AddAttributesFromFunctionProtoType(ASTContext &Ctx,
1725                                                llvm::AttrBuilder &FuncAttrs,
1726                                                const FunctionProtoType *FPT) {
1727   if (!FPT)
1728     return;
1729 
1730   if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) &&
1731       FPT->isNothrow())
1732     FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1733 }
1734 
1735 bool CodeGenModule::MayDropFunctionReturn(const ASTContext &Context,
1736                                           QualType ReturnType) {
1737   // We can't just discard the return value for a record type with a
1738   // complex destructor or a non-trivially copyable type.
1739   if (const RecordType *RT =
1740           ReturnType.getCanonicalType()->getAs<RecordType>()) {
1741     if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1742       return ClassDecl->hasTrivialDestructor();
1743   }
1744   return ReturnType.isTriviallyCopyableType(Context);
1745 }
1746 
1747 void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
1748                                                  bool HasOptnone,
1749                                                  bool AttrOnCallSite,
1750                                                llvm::AttrBuilder &FuncAttrs) {
1751   // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
1752   if (!HasOptnone) {
1753     if (CodeGenOpts.OptimizeSize)
1754       FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1755     if (CodeGenOpts.OptimizeSize == 2)
1756       FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1757   }
1758 
1759   if (CodeGenOpts.DisableRedZone)
1760     FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1761   if (CodeGenOpts.IndirectTlsSegRefs)
1762     FuncAttrs.addAttribute("indirect-tls-seg-refs");
1763   if (CodeGenOpts.NoImplicitFloat)
1764     FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1765 
1766   if (AttrOnCallSite) {
1767     // Attributes that should go on the call site only.
1768     if (!CodeGenOpts.SimplifyLibCalls || LangOpts.isNoBuiltinFunc(Name))
1769       FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1770     if (!CodeGenOpts.TrapFuncName.empty())
1771       FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
1772   } else {
1773     StringRef FpKind;
1774     switch (CodeGenOpts.getFramePointer()) {
1775     case CodeGenOptions::FramePointerKind::None:
1776       FpKind = "none";
1777       break;
1778     case CodeGenOptions::FramePointerKind::NonLeaf:
1779       FpKind = "non-leaf";
1780       break;
1781     case CodeGenOptions::FramePointerKind::All:
1782       FpKind = "all";
1783       break;
1784     }
1785     FuncAttrs.addAttribute("frame-pointer", FpKind);
1786 
1787     if (CodeGenOpts.LessPreciseFPMAD)
1788       FuncAttrs.addAttribute("less-precise-fpmad", "true");
1789 
1790     if (CodeGenOpts.NullPointerIsValid)
1791       FuncAttrs.addAttribute(llvm::Attribute::NullPointerIsValid);
1792 
1793     if (CodeGenOpts.FPDenormalMode != llvm::DenormalMode::getIEEE())
1794       FuncAttrs.addAttribute("denormal-fp-math",
1795                              CodeGenOpts.FPDenormalMode.str());
1796     if (CodeGenOpts.FP32DenormalMode != CodeGenOpts.FPDenormalMode) {
1797       FuncAttrs.addAttribute(
1798           "denormal-fp-math-f32",
1799           CodeGenOpts.FP32DenormalMode.str());
1800     }
1801 
1802     if (LangOpts.getFPExceptionMode() == LangOptions::FPE_Ignore)
1803       FuncAttrs.addAttribute("no-trapping-math", "true");
1804 
1805     // Strict (compliant) code is the default, so only add this attribute to
1806     // indicate that we are trying to workaround a problem case.
1807     if (!CodeGenOpts.StrictFloatCastOverflow)
1808       FuncAttrs.addAttribute("strict-float-cast-overflow", "false");
1809 
1810     // TODO: Are these all needed?
1811     // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
1812     if (LangOpts.NoHonorInfs)
1813       FuncAttrs.addAttribute("no-infs-fp-math", "true");
1814     if (LangOpts.NoHonorNaNs)
1815       FuncAttrs.addAttribute("no-nans-fp-math", "true");
1816     if (LangOpts.UnsafeFPMath)
1817       FuncAttrs.addAttribute("unsafe-fp-math", "true");
1818     if (CodeGenOpts.SoftFloat)
1819       FuncAttrs.addAttribute("use-soft-float", "true");
1820     FuncAttrs.addAttribute("stack-protector-buffer-size",
1821                            llvm::utostr(CodeGenOpts.SSPBufferSize));
1822     if (LangOpts.NoSignedZero)
1823       FuncAttrs.addAttribute("no-signed-zeros-fp-math", "true");
1824 
1825     // TODO: Reciprocal estimate codegen options should apply to instructions?
1826     const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals;
1827     if (!Recips.empty())
1828       FuncAttrs.addAttribute("reciprocal-estimates",
1829                              llvm::join(Recips, ","));
1830 
1831     if (!CodeGenOpts.PreferVectorWidth.empty() &&
1832         CodeGenOpts.PreferVectorWidth != "none")
1833       FuncAttrs.addAttribute("prefer-vector-width",
1834                              CodeGenOpts.PreferVectorWidth);
1835 
1836     if (CodeGenOpts.StackRealignment)
1837       FuncAttrs.addAttribute("stackrealign");
1838     if (CodeGenOpts.Backchain)
1839       FuncAttrs.addAttribute("backchain");
1840     if (CodeGenOpts.EnableSegmentedStacks)
1841       FuncAttrs.addAttribute("split-stack");
1842 
1843     if (CodeGenOpts.SpeculativeLoadHardening)
1844       FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
1845   }
1846 
1847   if (getLangOpts().assumeFunctionsAreConvergent()) {
1848     // Conservatively, mark all functions and calls in CUDA and OpenCL as
1849     // convergent (meaning, they may call an intrinsically convergent op, such
1850     // as __syncthreads() / barrier(), and so can't have certain optimizations
1851     // applied around them).  LLVM will remove this attribute where it safely
1852     // can.
1853     FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1854   }
1855 
1856   if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) {
1857     // Exceptions aren't supported in CUDA device code.
1858     FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1859   }
1860 
1861   for (StringRef Attr : CodeGenOpts.DefaultFunctionAttrs) {
1862     StringRef Var, Value;
1863     std::tie(Var, Value) = Attr.split('=');
1864     FuncAttrs.addAttribute(Var, Value);
1865   }
1866 }
1867 
1868 void CodeGenModule::addDefaultFunctionDefinitionAttributes(llvm::Function &F) {
1869   llvm::AttrBuilder FuncAttrs;
1870   getDefaultFunctionAttributes(F.getName(), F.hasOptNone(),
1871                                /* AttrOnCallSite = */ false, FuncAttrs);
1872   // TODO: call GetCPUAndFeaturesAttributes?
1873   F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs);
1874 }
1875 
1876 void CodeGenModule::addDefaultFunctionDefinitionAttributes(
1877                                                    llvm::AttrBuilder &attrs) {
1878   getDefaultFunctionAttributes(/*function name*/ "", /*optnone*/ false,
1879                                /*for call*/ false, attrs);
1880   GetCPUAndFeaturesAttributes(GlobalDecl(), attrs);
1881 }
1882 
1883 static void addNoBuiltinAttributes(llvm::AttrBuilder &FuncAttrs,
1884                                    const LangOptions &LangOpts,
1885                                    const NoBuiltinAttr *NBA = nullptr) {
1886   auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) {
1887     SmallString<32> AttributeName;
1888     AttributeName += "no-builtin-";
1889     AttributeName += BuiltinName;
1890     FuncAttrs.addAttribute(AttributeName);
1891   };
1892 
1893   // First, handle the language options passed through -fno-builtin.
1894   if (LangOpts.NoBuiltin) {
1895     // -fno-builtin disables them all.
1896     FuncAttrs.addAttribute("no-builtins");
1897     return;
1898   }
1899 
1900   // Then, add attributes for builtins specified through -fno-builtin-<name>.
1901   llvm::for_each(LangOpts.NoBuiltinFuncs, AddNoBuiltinAttr);
1902 
1903   // Now, let's check the __attribute__((no_builtin("...")) attribute added to
1904   // the source.
1905   if (!NBA)
1906     return;
1907 
1908   // If there is a wildcard in the builtin names specified through the
1909   // attribute, disable them all.
1910   if (llvm::is_contained(NBA->builtinNames(), "*")) {
1911     FuncAttrs.addAttribute("no-builtins");
1912     return;
1913   }
1914 
1915   // And last, add the rest of the builtin names.
1916   llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr);
1917 }
1918 
1919 static bool DetermineNoUndef(QualType QTy, CodeGenTypes &Types,
1920                              const llvm::DataLayout &DL, const ABIArgInfo &AI,
1921                              bool CheckCoerce = true) {
1922   llvm::Type *Ty = Types.ConvertTypeForMem(QTy);
1923   if (AI.getKind() == ABIArgInfo::Indirect)
1924     return true;
1925   if (AI.getKind() == ABIArgInfo::Extend)
1926     return true;
1927   if (!DL.typeSizeEqualsStoreSize(Ty))
1928     // TODO: This will result in a modest amount of values not marked noundef
1929     // when they could be. We care about values that *invisibly* contain undef
1930     // bits from the perspective of LLVM IR.
1931     return false;
1932   if (CheckCoerce && AI.canHaveCoerceToType()) {
1933     llvm::Type *CoerceTy = AI.getCoerceToType();
1934     if (llvm::TypeSize::isKnownGT(DL.getTypeSizeInBits(CoerceTy),
1935                                   DL.getTypeSizeInBits(Ty)))
1936       // If we're coercing to a type with a greater size than the canonical one,
1937       // we're introducing new undef bits.
1938       // Coercing to a type of smaller or equal size is ok, as we know that
1939       // there's no internal padding (typeSizeEqualsStoreSize).
1940       return false;
1941   }
1942   if (QTy->isExtIntType())
1943     return true;
1944   if (QTy->isReferenceType())
1945     return true;
1946   if (QTy->isNullPtrType())
1947     return false;
1948   if (QTy->isMemberPointerType())
1949     // TODO: Some member pointers are `noundef`, but it depends on the ABI. For
1950     // now, never mark them.
1951     return false;
1952   if (QTy->isScalarType()) {
1953     if (const ComplexType *Complex = dyn_cast<ComplexType>(QTy))
1954       return DetermineNoUndef(Complex->getElementType(), Types, DL, AI, false);
1955     return true;
1956   }
1957   if (const VectorType *Vector = dyn_cast<VectorType>(QTy))
1958     return DetermineNoUndef(Vector->getElementType(), Types, DL, AI, false);
1959   if (const MatrixType *Matrix = dyn_cast<MatrixType>(QTy))
1960     return DetermineNoUndef(Matrix->getElementType(), Types, DL, AI, false);
1961   if (const ArrayType *Array = dyn_cast<ArrayType>(QTy))
1962     return DetermineNoUndef(Array->getElementType(), Types, DL, AI, false);
1963 
1964   // TODO: Some structs may be `noundef`, in specific situations.
1965   return false;
1966 }
1967 
1968 /// Construct the IR attribute list of a function or call.
1969 ///
1970 /// When adding an attribute, please consider where it should be handled:
1971 ///
1972 ///   - getDefaultFunctionAttributes is for attributes that are essentially
1973 ///     part of the global target configuration (but perhaps can be
1974 ///     overridden on a per-function basis).  Adding attributes there
1975 ///     will cause them to also be set in frontends that build on Clang's
1976 ///     target-configuration logic, as well as for code defined in library
1977 ///     modules such as CUDA's libdevice.
1978 ///
1979 ///   - ConstructAttributeList builds on top of getDefaultFunctionAttributes
1980 ///     and adds declaration-specific, convention-specific, and
1981 ///     frontend-specific logic.  The last is of particular importance:
1982 ///     attributes that restrict how the frontend generates code must be
1983 ///     added here rather than getDefaultFunctionAttributes.
1984 ///
1985 void CodeGenModule::ConstructAttributeList(StringRef Name,
1986                                            const CGFunctionInfo &FI,
1987                                            CGCalleeInfo CalleeInfo,
1988                                            llvm::AttributeList &AttrList,
1989                                            unsigned &CallingConv,
1990                                            bool AttrOnCallSite, bool IsThunk) {
1991   llvm::AttrBuilder FuncAttrs;
1992   llvm::AttrBuilder RetAttrs;
1993 
1994   // Collect function IR attributes from the CC lowering.
1995   // We'll collect the paramete and result attributes later.
1996   CallingConv = FI.getEffectiveCallingConvention();
1997   if (FI.isNoReturn())
1998     FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1999   if (FI.isCmseNSCall())
2000     FuncAttrs.addAttribute("cmse_nonsecure_call");
2001 
2002   // Collect function IR attributes from the callee prototype if we have one.
2003   AddAttributesFromFunctionProtoType(getContext(), FuncAttrs,
2004                                      CalleeInfo.getCalleeFunctionProtoType());
2005 
2006   const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl();
2007 
2008   bool HasOptnone = false;
2009   // The NoBuiltinAttr attached to the target FunctionDecl.
2010   const NoBuiltinAttr *NBA = nullptr;
2011 
2012   // Collect function IR attributes based on declaration-specific
2013   // information.
2014   // FIXME: handle sseregparm someday...
2015   if (TargetDecl) {
2016     if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
2017       FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
2018     if (TargetDecl->hasAttr<NoThrowAttr>())
2019       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2020     if (TargetDecl->hasAttr<NoReturnAttr>())
2021       FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2022     if (TargetDecl->hasAttr<ColdAttr>())
2023       FuncAttrs.addAttribute(llvm::Attribute::Cold);
2024     if (TargetDecl->hasAttr<HotAttr>())
2025       FuncAttrs.addAttribute(llvm::Attribute::Hot);
2026     if (TargetDecl->hasAttr<NoDuplicateAttr>())
2027       FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
2028     if (TargetDecl->hasAttr<ConvergentAttr>())
2029       FuncAttrs.addAttribute(llvm::Attribute::Convergent);
2030 
2031     if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2032       AddAttributesFromFunctionProtoType(
2033           getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
2034       if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) {
2035         // A sane operator new returns a non-aliasing pointer.
2036         auto Kind = Fn->getDeclName().getCXXOverloadedOperator();
2037         if (getCodeGenOpts().AssumeSaneOperatorNew &&
2038             (Kind == OO_New || Kind == OO_Array_New))
2039           RetAttrs.addAttribute(llvm::Attribute::NoAlias);
2040       }
2041       const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
2042       const bool IsVirtualCall = MD && MD->isVirtual();
2043       // Don't use [[noreturn]], _Noreturn or [[no_builtin]] for a call to a
2044       // virtual function. These attributes are not inherited by overloads.
2045       if (!(AttrOnCallSite && IsVirtualCall)) {
2046         if (Fn->isNoReturn())
2047           FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2048         NBA = Fn->getAttr<NoBuiltinAttr>();
2049       }
2050       // Only place nomerge attribute on call sites, never functions. This
2051       // allows it to work on indirect virtual function calls.
2052       if (AttrOnCallSite && TargetDecl->hasAttr<NoMergeAttr>())
2053         FuncAttrs.addAttribute(llvm::Attribute::NoMerge);
2054 
2055       // Add known guaranteed alignment for allocation functions.
2056       if (unsigned BuiltinID = Fn->getBuiltinID()) {
2057         switch (BuiltinID) {
2058         case Builtin::BIaligned_alloc:
2059         case Builtin::BIcalloc:
2060         case Builtin::BImalloc:
2061         case Builtin::BImemalign:
2062         case Builtin::BIrealloc:
2063         case Builtin::BIstrdup:
2064         case Builtin::BIstrndup:
2065           RetAttrs.addAlignmentAttr(Context.getTargetInfo().getNewAlign() /
2066                                     Context.getTargetInfo().getCharWidth());
2067           break;
2068         default:
2069           break;
2070         }
2071       }
2072     }
2073 
2074     // 'const', 'pure' and 'noalias' attributed functions are also nounwind.
2075     if (TargetDecl->hasAttr<ConstAttr>()) {
2076       FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
2077       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2078       // gcc specifies that 'const' functions have greater restrictions than
2079       // 'pure' functions, so they also cannot have infinite loops.
2080       FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
2081     } else if (TargetDecl->hasAttr<PureAttr>()) {
2082       FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
2083       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2084       // gcc specifies that 'pure' functions cannot have infinite loops.
2085       FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
2086     } else if (TargetDecl->hasAttr<NoAliasAttr>()) {
2087       FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly);
2088       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2089     }
2090     if (TargetDecl->hasAttr<RestrictAttr>())
2091       RetAttrs.addAttribute(llvm::Attribute::NoAlias);
2092     if (TargetDecl->hasAttr<ReturnsNonNullAttr>() &&
2093         !CodeGenOpts.NullPointerIsValid)
2094       RetAttrs.addAttribute(llvm::Attribute::NonNull);
2095     if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>())
2096       FuncAttrs.addAttribute("no_caller_saved_registers");
2097     if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>())
2098       FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
2099     if (TargetDecl->hasAttr<LeafAttr>())
2100       FuncAttrs.addAttribute(llvm::Attribute::NoCallback);
2101 
2102     HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
2103     if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) {
2104       Optional<unsigned> NumElemsParam;
2105       if (AllocSize->getNumElemsParam().isValid())
2106         NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
2107       FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
2108                                  NumElemsParam);
2109     }
2110 
2111     if (TargetDecl->hasAttr<OpenCLKernelAttr>()) {
2112       if (getLangOpts().OpenCLVersion <= 120) {
2113         // OpenCL v1.2 Work groups are always uniform
2114         FuncAttrs.addAttribute("uniform-work-group-size", "true");
2115       } else {
2116         // OpenCL v2.0 Work groups may be whether uniform or not.
2117         // '-cl-uniform-work-group-size' compile option gets a hint
2118         // to the compiler that the global work-size be a multiple of
2119         // the work-group size specified to clEnqueueNDRangeKernel
2120         // (i.e. work groups are uniform).
2121         FuncAttrs.addAttribute("uniform-work-group-size",
2122                                llvm::toStringRef(CodeGenOpts.UniformWGSize));
2123       }
2124     }
2125 
2126     std::string AssumptionValueStr;
2127     for (AssumptionAttr *AssumptionA :
2128          TargetDecl->specific_attrs<AssumptionAttr>()) {
2129       std::string AS = AssumptionA->getAssumption().str();
2130       if (!AS.empty() && !AssumptionValueStr.empty())
2131         AssumptionValueStr += ",";
2132       AssumptionValueStr += AS;
2133     }
2134 
2135     if (!AssumptionValueStr.empty())
2136       FuncAttrs.addAttribute(llvm::AssumptionAttrKey, AssumptionValueStr);
2137   }
2138 
2139   // Attach "no-builtins" attributes to:
2140   // * call sites: both `nobuiltin` and "no-builtins" or "no-builtin-<name>".
2141   // * definitions: "no-builtins" or "no-builtin-<name>" only.
2142   // The attributes can come from:
2143   // * LangOpts: -ffreestanding, -fno-builtin, -fno-builtin-<name>
2144   // * FunctionDecl attributes: __attribute__((no_builtin(...)))
2145   addNoBuiltinAttributes(FuncAttrs, getLangOpts(), NBA);
2146 
2147   // Collect function IR attributes based on global settiings.
2148   getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
2149 
2150   // Override some default IR attributes based on declaration-specific
2151   // information.
2152   if (TargetDecl) {
2153     if (TargetDecl->hasAttr<NoSpeculativeLoadHardeningAttr>())
2154       FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening);
2155     if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>())
2156       FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
2157     if (TargetDecl->hasAttr<NoSplitStackAttr>())
2158       FuncAttrs.removeAttribute("split-stack");
2159 
2160     // Add NonLazyBind attribute to function declarations when -fno-plt
2161     // is used.
2162     // FIXME: what if we just haven't processed the function definition
2163     // yet, or if it's an external definition like C99 inline?
2164     if (CodeGenOpts.NoPLT) {
2165       if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2166         if (!Fn->isDefined() && !AttrOnCallSite) {
2167           FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
2168         }
2169       }
2170     }
2171   }
2172 
2173   // Add "sample-profile-suffix-elision-policy" attribute for internal linkage
2174   // functions with -funique-internal-linkage-names.
2175   if (TargetDecl && CodeGenOpts.UniqueInternalLinkageNames) {
2176     if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2177       if (this->getFunctionLinkage(Fn) == llvm::GlobalValue::InternalLinkage)
2178         FuncAttrs.addAttribute("sample-profile-suffix-elision-policy",
2179                                "selected");
2180     }
2181   }
2182 
2183   // Collect non-call-site function IR attributes from declaration-specific
2184   // information.
2185   if (!AttrOnCallSite) {
2186     if (TargetDecl && TargetDecl->hasAttr<CmseNSEntryAttr>())
2187       FuncAttrs.addAttribute("cmse_nonsecure_entry");
2188 
2189     // Whether tail calls are enabled.
2190     auto shouldDisableTailCalls = [&] {
2191       // Should this be honored in getDefaultFunctionAttributes?
2192       if (CodeGenOpts.DisableTailCalls)
2193         return true;
2194 
2195       if (!TargetDecl)
2196         return false;
2197 
2198       if (TargetDecl->hasAttr<DisableTailCallsAttr>() ||
2199           TargetDecl->hasAttr<AnyX86InterruptAttr>())
2200         return true;
2201 
2202       if (CodeGenOpts.NoEscapingBlockTailCalls) {
2203         if (const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
2204           if (!BD->doesNotEscape())
2205             return true;
2206       }
2207 
2208       return false;
2209     };
2210     if (shouldDisableTailCalls())
2211       FuncAttrs.addAttribute("disable-tail-calls", "true");
2212 
2213     // CPU/feature overrides.  addDefaultFunctionDefinitionAttributes
2214     // handles these separately to set them based on the global defaults.
2215     GetCPUAndFeaturesAttributes(CalleeInfo.getCalleeDecl(), FuncAttrs);
2216   }
2217 
2218   // Collect attributes from arguments and return values.
2219   ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
2220 
2221   QualType RetTy = FI.getReturnType();
2222   const ABIArgInfo &RetAI = FI.getReturnInfo();
2223   const llvm::DataLayout &DL = getDataLayout();
2224 
2225   // C++ explicitly makes returning undefined values UB. C's rule only applies
2226   // to used values, so we never mark them noundef for now.
2227   bool HasStrictReturn = getLangOpts().CPlusPlus;
2228   if (TargetDecl) {
2229     if (const FunctionDecl *FDecl = dyn_cast<FunctionDecl>(TargetDecl))
2230       HasStrictReturn &= !FDecl->isExternC();
2231     else if (const VarDecl *VDecl = dyn_cast<VarDecl>(TargetDecl))
2232       // Function pointer
2233       HasStrictReturn &= !VDecl->isExternC();
2234   }
2235 
2236   // We don't want to be too aggressive with the return checking, unless
2237   // it's explicit in the code opts or we're using an appropriate sanitizer.
2238   // Try to respect what the programmer intended.
2239   HasStrictReturn &= getCodeGenOpts().StrictReturn ||
2240                      !MayDropFunctionReturn(getContext(), RetTy) ||
2241                      getLangOpts().Sanitize.has(SanitizerKind::Memory) ||
2242                      getLangOpts().Sanitize.has(SanitizerKind::Return);
2243 
2244   // Determine if the return type could be partially undef
2245   if (CodeGenOpts.EnableNoundefAttrs && HasStrictReturn) {
2246     if (!RetTy->isVoidType() && RetAI.getKind() != ABIArgInfo::Indirect &&
2247         DetermineNoUndef(RetTy, getTypes(), DL, RetAI))
2248       RetAttrs.addAttribute(llvm::Attribute::NoUndef);
2249   }
2250 
2251   switch (RetAI.getKind()) {
2252   case ABIArgInfo::Extend:
2253     if (RetAI.isSignExt())
2254       RetAttrs.addAttribute(llvm::Attribute::SExt);
2255     else
2256       RetAttrs.addAttribute(llvm::Attribute::ZExt);
2257     LLVM_FALLTHROUGH;
2258   case ABIArgInfo::Direct:
2259     if (RetAI.getInReg())
2260       RetAttrs.addAttribute(llvm::Attribute::InReg);
2261     break;
2262   case ABIArgInfo::Ignore:
2263     break;
2264 
2265   case ABIArgInfo::InAlloca:
2266   case ABIArgInfo::Indirect: {
2267     // inalloca and sret disable readnone and readonly
2268     FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2269       .removeAttribute(llvm::Attribute::ReadNone);
2270     break;
2271   }
2272 
2273   case ABIArgInfo::CoerceAndExpand:
2274     break;
2275 
2276   case ABIArgInfo::Expand:
2277   case ABIArgInfo::IndirectAliased:
2278     llvm_unreachable("Invalid ABI kind for return argument");
2279   }
2280 
2281   if (!IsThunk) {
2282     // FIXME: fix this properly, https://reviews.llvm.org/D100388
2283     if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
2284       QualType PTy = RefTy->getPointeeType();
2285       if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2286         RetAttrs.addDereferenceableAttr(
2287             getMinimumObjectSize(PTy).getQuantity());
2288       if (getContext().getTargetAddressSpace(PTy) == 0 &&
2289           !CodeGenOpts.NullPointerIsValid)
2290         RetAttrs.addAttribute(llvm::Attribute::NonNull);
2291       if (PTy->isObjectType()) {
2292         llvm::Align Alignment =
2293             getNaturalPointeeTypeAlignment(RetTy).getAsAlign();
2294         RetAttrs.addAlignmentAttr(Alignment);
2295       }
2296     }
2297   }
2298 
2299   bool hasUsedSRet = false;
2300   SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs());
2301 
2302   // Attach attributes to sret.
2303   if (IRFunctionArgs.hasSRetArg()) {
2304     llvm::AttrBuilder SRETAttrs;
2305     SRETAttrs.addStructRetAttr(getTypes().ConvertTypeForMem(RetTy));
2306     hasUsedSRet = true;
2307     if (RetAI.getInReg())
2308       SRETAttrs.addAttribute(llvm::Attribute::InReg);
2309     SRETAttrs.addAlignmentAttr(RetAI.getIndirectAlign().getQuantity());
2310     ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
2311         llvm::AttributeSet::get(getLLVMContext(), SRETAttrs);
2312   }
2313 
2314   // Attach attributes to inalloca argument.
2315   if (IRFunctionArgs.hasInallocaArg()) {
2316     llvm::AttrBuilder Attrs;
2317     Attrs.addInAllocaAttr(FI.getArgStruct());
2318     ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
2319         llvm::AttributeSet::get(getLLVMContext(), Attrs);
2320   }
2321 
2322   // Apply `nonnull`, `dereferencable(N)` and `align N` to the `this` argument,
2323   // unless this is a thunk function.
2324   // FIXME: fix this properly, https://reviews.llvm.org/D100388
2325   if (FI.isInstanceMethod() && !IRFunctionArgs.hasInallocaArg() &&
2326       !FI.arg_begin()->type->isVoidPointerType() && !IsThunk) {
2327     auto IRArgs = IRFunctionArgs.getIRArgs(0);
2328 
2329     assert(IRArgs.second == 1 && "Expected only a single `this` pointer.");
2330 
2331     llvm::AttrBuilder Attrs;
2332 
2333     QualType ThisTy =
2334         FI.arg_begin()->type.castAs<PointerType>()->getPointeeType();
2335 
2336     if (!CodeGenOpts.NullPointerIsValid &&
2337         getContext().getTargetAddressSpace(FI.arg_begin()->type) == 0) {
2338       Attrs.addAttribute(llvm::Attribute::NonNull);
2339       Attrs.addDereferenceableAttr(getMinimumObjectSize(ThisTy).getQuantity());
2340     } else {
2341       // FIXME dereferenceable should be correct here, regardless of
2342       // NullPointerIsValid. However, dereferenceable currently does not always
2343       // respect NullPointerIsValid and may imply nonnull and break the program.
2344       // See https://reviews.llvm.org/D66618 for discussions.
2345       Attrs.addDereferenceableOrNullAttr(
2346           getMinimumObjectSize(
2347               FI.arg_begin()->type.castAs<PointerType>()->getPointeeType())
2348               .getQuantity());
2349     }
2350 
2351     llvm::Align Alignment =
2352         getNaturalTypeAlignment(ThisTy, /*BaseInfo=*/nullptr,
2353                                 /*TBAAInfo=*/nullptr, /*forPointeeType=*/true)
2354             .getAsAlign();
2355     Attrs.addAlignmentAttr(Alignment);
2356 
2357     ArgAttrs[IRArgs.first] = llvm::AttributeSet::get(getLLVMContext(), Attrs);
2358   }
2359 
2360   unsigned ArgNo = 0;
2361   for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(),
2362                                           E = FI.arg_end();
2363        I != E; ++I, ++ArgNo) {
2364     QualType ParamType = I->type;
2365     const ABIArgInfo &AI = I->info;
2366     llvm::AttrBuilder Attrs;
2367 
2368     // Add attribute for padding argument, if necessary.
2369     if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
2370       if (AI.getPaddingInReg()) {
2371         ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
2372             llvm::AttributeSet::get(
2373                 getLLVMContext(),
2374                 llvm::AttrBuilder().addAttribute(llvm::Attribute::InReg));
2375       }
2376     }
2377 
2378     // Decide whether the argument we're handling could be partially undef
2379     bool ArgNoUndef = DetermineNoUndef(ParamType, getTypes(), DL, AI);
2380     if (CodeGenOpts.EnableNoundefAttrs && ArgNoUndef)
2381       Attrs.addAttribute(llvm::Attribute::NoUndef);
2382 
2383     // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
2384     // have the corresponding parameter variable.  It doesn't make
2385     // sense to do it here because parameters are so messed up.
2386     switch (AI.getKind()) {
2387     case ABIArgInfo::Extend:
2388       if (AI.isSignExt())
2389         Attrs.addAttribute(llvm::Attribute::SExt);
2390       else
2391         Attrs.addAttribute(llvm::Attribute::ZExt);
2392       LLVM_FALLTHROUGH;
2393     case ABIArgInfo::Direct:
2394       if (ArgNo == 0 && FI.isChainCall())
2395         Attrs.addAttribute(llvm::Attribute::Nest);
2396       else if (AI.getInReg())
2397         Attrs.addAttribute(llvm::Attribute::InReg);
2398       Attrs.addStackAlignmentAttr(llvm::MaybeAlign(AI.getDirectAlign()));
2399       break;
2400 
2401     case ABIArgInfo::Indirect: {
2402       if (AI.getInReg())
2403         Attrs.addAttribute(llvm::Attribute::InReg);
2404 
2405       if (AI.getIndirectByVal())
2406         Attrs.addByValAttr(getTypes().ConvertTypeForMem(ParamType));
2407 
2408       auto *Decl = ParamType->getAsRecordDecl();
2409       if (CodeGenOpts.PassByValueIsNoAlias && Decl &&
2410           Decl->getArgPassingRestrictions() == RecordDecl::APK_CanPassInRegs)
2411         // When calling the function, the pointer passed in will be the only
2412         // reference to the underlying object. Mark it accordingly.
2413         Attrs.addAttribute(llvm::Attribute::NoAlias);
2414 
2415       // TODO: We could add the byref attribute if not byval, but it would
2416       // require updating many testcases.
2417 
2418       CharUnits Align = AI.getIndirectAlign();
2419 
2420       // In a byval argument, it is important that the required
2421       // alignment of the type is honored, as LLVM might be creating a
2422       // *new* stack object, and needs to know what alignment to give
2423       // it. (Sometimes it can deduce a sensible alignment on its own,
2424       // but not if clang decides it must emit a packed struct, or the
2425       // user specifies increased alignment requirements.)
2426       //
2427       // This is different from indirect *not* byval, where the object
2428       // exists already, and the align attribute is purely
2429       // informative.
2430       assert(!Align.isZero());
2431 
2432       // For now, only add this when we have a byval argument.
2433       // TODO: be less lazy about updating test cases.
2434       if (AI.getIndirectByVal())
2435         Attrs.addAlignmentAttr(Align.getQuantity());
2436 
2437       // byval disables readnone and readonly.
2438       FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2439         .removeAttribute(llvm::Attribute::ReadNone);
2440 
2441       break;
2442     }
2443     case ABIArgInfo::IndirectAliased: {
2444       CharUnits Align = AI.getIndirectAlign();
2445       Attrs.addByRefAttr(getTypes().ConvertTypeForMem(ParamType));
2446       Attrs.addAlignmentAttr(Align.getQuantity());
2447       break;
2448     }
2449     case ABIArgInfo::Ignore:
2450     case ABIArgInfo::Expand:
2451     case ABIArgInfo::CoerceAndExpand:
2452       break;
2453 
2454     case ABIArgInfo::InAlloca:
2455       // inalloca disables readnone and readonly.
2456       FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2457           .removeAttribute(llvm::Attribute::ReadNone);
2458       continue;
2459     }
2460 
2461     if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
2462       QualType PTy = RefTy->getPointeeType();
2463       if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2464         Attrs.addDereferenceableAttr(
2465             getMinimumObjectSize(PTy).getQuantity());
2466       if (getContext().getTargetAddressSpace(PTy) == 0 &&
2467           !CodeGenOpts.NullPointerIsValid)
2468         Attrs.addAttribute(llvm::Attribute::NonNull);
2469       if (PTy->isObjectType()) {
2470         llvm::Align Alignment =
2471             getNaturalPointeeTypeAlignment(ParamType).getAsAlign();
2472         Attrs.addAlignmentAttr(Alignment);
2473       }
2474     }
2475 
2476     switch (FI.getExtParameterInfo(ArgNo).getABI()) {
2477     case ParameterABI::Ordinary:
2478       break;
2479 
2480     case ParameterABI::SwiftIndirectResult: {
2481       // Add 'sret' if we haven't already used it for something, but
2482       // only if the result is void.
2483       if (!hasUsedSRet && RetTy->isVoidType()) {
2484         Attrs.addStructRetAttr(getTypes().ConvertTypeForMem(ParamType));
2485         hasUsedSRet = true;
2486       }
2487 
2488       // Add 'noalias' in either case.
2489       Attrs.addAttribute(llvm::Attribute::NoAlias);
2490 
2491       // Add 'dereferenceable' and 'alignment'.
2492       auto PTy = ParamType->getPointeeType();
2493       if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2494         auto info = getContext().getTypeInfoInChars(PTy);
2495         Attrs.addDereferenceableAttr(info.Width.getQuantity());
2496         Attrs.addAlignmentAttr(info.Align.getAsAlign());
2497       }
2498       break;
2499     }
2500 
2501     case ParameterABI::SwiftErrorResult:
2502       Attrs.addAttribute(llvm::Attribute::SwiftError);
2503       break;
2504 
2505     case ParameterABI::SwiftContext:
2506       Attrs.addAttribute(llvm::Attribute::SwiftSelf);
2507       break;
2508 
2509     case ParameterABI::SwiftAsyncContext:
2510       Attrs.addAttribute(llvm::Attribute::SwiftAsync);
2511       break;
2512     }
2513 
2514     if (FI.getExtParameterInfo(ArgNo).isNoEscape())
2515       Attrs.addAttribute(llvm::Attribute::NoCapture);
2516 
2517     if (Attrs.hasAttributes()) {
2518       unsigned FirstIRArg, NumIRArgs;
2519       std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2520       for (unsigned i = 0; i < NumIRArgs; i++)
2521         ArgAttrs[FirstIRArg + i] =
2522             llvm::AttributeSet::get(getLLVMContext(), Attrs);
2523     }
2524   }
2525   assert(ArgNo == FI.arg_size());
2526 
2527   AttrList = llvm::AttributeList::get(
2528       getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs),
2529       llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs);
2530 }
2531 
2532 /// An argument came in as a promoted argument; demote it back to its
2533 /// declared type.
2534 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
2535                                          const VarDecl *var,
2536                                          llvm::Value *value) {
2537   llvm::Type *varType = CGF.ConvertType(var->getType());
2538 
2539   // This can happen with promotions that actually don't change the
2540   // underlying type, like the enum promotions.
2541   if (value->getType() == varType) return value;
2542 
2543   assert((varType->isIntegerTy() || varType->isFloatingPointTy())
2544          && "unexpected promotion type");
2545 
2546   if (isa<llvm::IntegerType>(varType))
2547     return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
2548 
2549   return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
2550 }
2551 
2552 /// Returns the attribute (either parameter attribute, or function
2553 /// attribute), which declares argument ArgNo to be non-null.
2554 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
2555                                          QualType ArgType, unsigned ArgNo) {
2556   // FIXME: __attribute__((nonnull)) can also be applied to:
2557   //   - references to pointers, where the pointee is known to be
2558   //     nonnull (apparently a Clang extension)
2559   //   - transparent unions containing pointers
2560   // In the former case, LLVM IR cannot represent the constraint. In
2561   // the latter case, we have no guarantee that the transparent union
2562   // is in fact passed as a pointer.
2563   if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
2564     return nullptr;
2565   // First, check attribute on parameter itself.
2566   if (PVD) {
2567     if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
2568       return ParmNNAttr;
2569   }
2570   // Check function attributes.
2571   if (!FD)
2572     return nullptr;
2573   for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
2574     if (NNAttr->isNonNull(ArgNo))
2575       return NNAttr;
2576   }
2577   return nullptr;
2578 }
2579 
2580 namespace {
2581   struct CopyBackSwiftError final : EHScopeStack::Cleanup {
2582     Address Temp;
2583     Address Arg;
2584     CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {}
2585     void Emit(CodeGenFunction &CGF, Flags flags) override {
2586       llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp);
2587       CGF.Builder.CreateStore(errorValue, Arg);
2588     }
2589   };
2590 }
2591 
2592 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
2593                                          llvm::Function *Fn,
2594                                          const FunctionArgList &Args) {
2595   if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
2596     // Naked functions don't have prologues.
2597     return;
2598 
2599   // If this is an implicit-return-zero function, go ahead and
2600   // initialize the return value.  TODO: it might be nice to have
2601   // a more general mechanism for this that didn't require synthesized
2602   // return statements.
2603   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
2604     if (FD->hasImplicitReturnZero()) {
2605       QualType RetTy = FD->getReturnType().getUnqualifiedType();
2606       llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
2607       llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
2608       Builder.CreateStore(Zero, ReturnValue);
2609     }
2610   }
2611 
2612   // FIXME: We no longer need the types from FunctionArgList; lift up and
2613   // simplify.
2614 
2615   ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
2616   assert(Fn->arg_size() == IRFunctionArgs.totalIRArgs());
2617 
2618   // If we're using inalloca, all the memory arguments are GEPs off of the last
2619   // parameter, which is a pointer to the complete memory area.
2620   Address ArgStruct = Address::invalid();
2621   if (IRFunctionArgs.hasInallocaArg()) {
2622     ArgStruct = Address(Fn->getArg(IRFunctionArgs.getInallocaArgNo()),
2623                         FI.getArgStructAlignment());
2624 
2625     assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo());
2626   }
2627 
2628   // Name the struct return parameter.
2629   if (IRFunctionArgs.hasSRetArg()) {
2630     auto AI = Fn->getArg(IRFunctionArgs.getSRetArgNo());
2631     AI->setName("agg.result");
2632     AI->addAttr(llvm::Attribute::NoAlias);
2633   }
2634 
2635   // Track if we received the parameter as a pointer (indirect, byval, or
2636   // inalloca).  If already have a pointer, EmitParmDecl doesn't need to copy it
2637   // into a local alloca for us.
2638   SmallVector<ParamValue, 16> ArgVals;
2639   ArgVals.reserve(Args.size());
2640 
2641   // Create a pointer value for every parameter declaration.  This usually
2642   // entails copying one or more LLVM IR arguments into an alloca.  Don't push
2643   // any cleanups or do anything that might unwind.  We do that separately, so
2644   // we can push the cleanups in the correct order for the ABI.
2645   assert(FI.arg_size() == Args.size() &&
2646          "Mismatch between function signature & arguments.");
2647   unsigned ArgNo = 0;
2648   CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
2649   for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
2650        i != e; ++i, ++info_it, ++ArgNo) {
2651     const VarDecl *Arg = *i;
2652     const ABIArgInfo &ArgI = info_it->info;
2653 
2654     bool isPromoted =
2655       isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
2656     // We are converting from ABIArgInfo type to VarDecl type directly, unless
2657     // the parameter is promoted. In this case we convert to
2658     // CGFunctionInfo::ArgInfo type with subsequent argument demotion.
2659     QualType Ty = isPromoted ? info_it->type : Arg->getType();
2660     assert(hasScalarEvaluationKind(Ty) ==
2661            hasScalarEvaluationKind(Arg->getType()));
2662 
2663     unsigned FirstIRArg, NumIRArgs;
2664     std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2665 
2666     switch (ArgI.getKind()) {
2667     case ABIArgInfo::InAlloca: {
2668       assert(NumIRArgs == 0);
2669       auto FieldIndex = ArgI.getInAllocaFieldIndex();
2670       Address V =
2671           Builder.CreateStructGEP(ArgStruct, FieldIndex, Arg->getName());
2672       if (ArgI.getInAllocaIndirect())
2673         V = Address(Builder.CreateLoad(V),
2674                     getContext().getTypeAlignInChars(Ty));
2675       ArgVals.push_back(ParamValue::forIndirect(V));
2676       break;
2677     }
2678 
2679     case ABIArgInfo::Indirect:
2680     case ABIArgInfo::IndirectAliased: {
2681       assert(NumIRArgs == 1);
2682       Address ParamAddr =
2683           Address(Fn->getArg(FirstIRArg), ArgI.getIndirectAlign());
2684 
2685       if (!hasScalarEvaluationKind(Ty)) {
2686         // Aggregates and complex variables are accessed by reference. All we
2687         // need to do is realign the value, if requested. Also, if the address
2688         // may be aliased, copy it to ensure that the parameter variable is
2689         // mutable and has a unique adress, as C requires.
2690         Address V = ParamAddr;
2691         if (ArgI.getIndirectRealign() || ArgI.isIndirectAliased()) {
2692           Address AlignedTemp = CreateMemTemp(Ty, "coerce");
2693 
2694           // Copy from the incoming argument pointer to the temporary with the
2695           // appropriate alignment.
2696           //
2697           // FIXME: We should have a common utility for generating an aggregate
2698           // copy.
2699           CharUnits Size = getContext().getTypeSizeInChars(Ty);
2700           Builder.CreateMemCpy(
2701               AlignedTemp.getPointer(), AlignedTemp.getAlignment().getAsAlign(),
2702               ParamAddr.getPointer(), ParamAddr.getAlignment().getAsAlign(),
2703               llvm::ConstantInt::get(IntPtrTy, Size.getQuantity()));
2704           V = AlignedTemp;
2705         }
2706         ArgVals.push_back(ParamValue::forIndirect(V));
2707       } else {
2708         // Load scalar value from indirect argument.
2709         llvm::Value *V =
2710             EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getBeginLoc());
2711 
2712         if (isPromoted)
2713           V = emitArgumentDemotion(*this, Arg, V);
2714         ArgVals.push_back(ParamValue::forDirect(V));
2715       }
2716       break;
2717     }
2718 
2719     case ABIArgInfo::Extend:
2720     case ABIArgInfo::Direct: {
2721       auto AI = Fn->getArg(FirstIRArg);
2722       llvm::Type *LTy = ConvertType(Arg->getType());
2723 
2724       // Prepare parameter attributes. So far, only attributes for pointer
2725       // parameters are prepared. See
2726       // http://llvm.org/docs/LangRef.html#paramattrs.
2727       if (ArgI.getDirectOffset() == 0 && LTy->isPointerTy() &&
2728           ArgI.getCoerceToType()->isPointerTy()) {
2729         assert(NumIRArgs == 1);
2730 
2731         if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
2732           // Set `nonnull` attribute if any.
2733           if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
2734                              PVD->getFunctionScopeIndex()) &&
2735               !CGM.getCodeGenOpts().NullPointerIsValid)
2736             AI->addAttr(llvm::Attribute::NonNull);
2737 
2738           QualType OTy = PVD->getOriginalType();
2739           if (const auto *ArrTy =
2740               getContext().getAsConstantArrayType(OTy)) {
2741             // A C99 array parameter declaration with the static keyword also
2742             // indicates dereferenceability, and if the size is constant we can
2743             // use the dereferenceable attribute (which requires the size in
2744             // bytes).
2745             if (ArrTy->getSizeModifier() == ArrayType::Static) {
2746               QualType ETy = ArrTy->getElementType();
2747               llvm::Align Alignment =
2748                   CGM.getNaturalTypeAlignment(ETy).getAsAlign();
2749               AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment));
2750               uint64_t ArrSize = ArrTy->getSize().getZExtValue();
2751               if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
2752                   ArrSize) {
2753                 llvm::AttrBuilder Attrs;
2754                 Attrs.addDereferenceableAttr(
2755                     getContext().getTypeSizeInChars(ETy).getQuantity() *
2756                     ArrSize);
2757                 AI->addAttrs(Attrs);
2758               } else if (getContext().getTargetInfo().getNullPointerValue(
2759                              ETy.getAddressSpace()) == 0 &&
2760                          !CGM.getCodeGenOpts().NullPointerIsValid) {
2761                 AI->addAttr(llvm::Attribute::NonNull);
2762               }
2763             }
2764           } else if (const auto *ArrTy =
2765                      getContext().getAsVariableArrayType(OTy)) {
2766             // For C99 VLAs with the static keyword, we don't know the size so
2767             // we can't use the dereferenceable attribute, but in addrspace(0)
2768             // we know that it must be nonnull.
2769             if (ArrTy->getSizeModifier() == VariableArrayType::Static) {
2770               QualType ETy = ArrTy->getElementType();
2771               llvm::Align Alignment =
2772                   CGM.getNaturalTypeAlignment(ETy).getAsAlign();
2773               AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment));
2774               if (!getContext().getTargetAddressSpace(ETy) &&
2775                   !CGM.getCodeGenOpts().NullPointerIsValid)
2776                 AI->addAttr(llvm::Attribute::NonNull);
2777             }
2778           }
2779 
2780           // Set `align` attribute if any.
2781           const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
2782           if (!AVAttr)
2783             if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
2784               AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
2785           if (AVAttr && !SanOpts.has(SanitizerKind::Alignment)) {
2786             // If alignment-assumption sanitizer is enabled, we do *not* add
2787             // alignment attribute here, but emit normal alignment assumption,
2788             // so the UBSAN check could function.
2789             llvm::ConstantInt *AlignmentCI =
2790                 cast<llvm::ConstantInt>(EmitScalarExpr(AVAttr->getAlignment()));
2791             unsigned AlignmentInt =
2792                 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment);
2793             if (AI->getParamAlign().valueOrOne() < AlignmentInt) {
2794               AI->removeAttr(llvm::Attribute::AttrKind::Alignment);
2795               AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(
2796                   llvm::Align(AlignmentInt)));
2797             }
2798           }
2799         }
2800 
2801         // Set 'noalias' if an argument type has the `restrict` qualifier.
2802         if (Arg->getType().isRestrictQualified())
2803           AI->addAttr(llvm::Attribute::NoAlias);
2804       }
2805 
2806       // Prepare the argument value. If we have the trivial case, handle it
2807       // with no muss and fuss.
2808       if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
2809           ArgI.getCoerceToType() == ConvertType(Ty) &&
2810           ArgI.getDirectOffset() == 0) {
2811         assert(NumIRArgs == 1);
2812 
2813         // LLVM expects swifterror parameters to be used in very restricted
2814         // ways.  Copy the value into a less-restricted temporary.
2815         llvm::Value *V = AI;
2816         if (FI.getExtParameterInfo(ArgNo).getABI()
2817               == ParameterABI::SwiftErrorResult) {
2818           QualType pointeeTy = Ty->getPointeeType();
2819           assert(pointeeTy->isPointerType());
2820           Address temp =
2821             CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
2822           Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy));
2823           llvm::Value *incomingErrorValue = Builder.CreateLoad(arg);
2824           Builder.CreateStore(incomingErrorValue, temp);
2825           V = temp.getPointer();
2826 
2827           // Push a cleanup to copy the value back at the end of the function.
2828           // The convention does not guarantee that the value will be written
2829           // back if the function exits with an unwind exception.
2830           EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg);
2831         }
2832 
2833         // Ensure the argument is the correct type.
2834         if (V->getType() != ArgI.getCoerceToType())
2835           V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
2836 
2837         if (isPromoted)
2838           V = emitArgumentDemotion(*this, Arg, V);
2839 
2840         // Because of merging of function types from multiple decls it is
2841         // possible for the type of an argument to not match the corresponding
2842         // type in the function type. Since we are codegening the callee
2843         // in here, add a cast to the argument type.
2844         llvm::Type *LTy = ConvertType(Arg->getType());
2845         if (V->getType() != LTy)
2846           V = Builder.CreateBitCast(V, LTy);
2847 
2848         ArgVals.push_back(ParamValue::forDirect(V));
2849         break;
2850       }
2851 
2852       // VLST arguments are coerced to VLATs at the function boundary for
2853       // ABI consistency. If this is a VLST that was coerced to
2854       // a VLAT at the function boundary and the types match up, use
2855       // llvm.experimental.vector.extract to convert back to the original
2856       // VLST.
2857       if (auto *VecTyTo = dyn_cast<llvm::FixedVectorType>(ConvertType(Ty))) {
2858         auto *Coerced = Fn->getArg(FirstIRArg);
2859         if (auto *VecTyFrom =
2860                 dyn_cast<llvm::ScalableVectorType>(Coerced->getType())) {
2861           if (VecTyFrom->getElementType() == VecTyTo->getElementType()) {
2862             llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty);
2863 
2864             assert(NumIRArgs == 1);
2865             Coerced->setName(Arg->getName() + ".coerce");
2866             ArgVals.push_back(ParamValue::forDirect(Builder.CreateExtractVector(
2867                 VecTyTo, Coerced, Zero, "castFixedSve")));
2868             break;
2869           }
2870         }
2871       }
2872 
2873       Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
2874                                      Arg->getName());
2875 
2876       // Pointer to store into.
2877       Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
2878 
2879       // Fast-isel and the optimizer generally like scalar values better than
2880       // FCAs, so we flatten them if this is safe to do for this argument.
2881       llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
2882       if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
2883           STy->getNumElements() > 1) {
2884         uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
2885         llvm::Type *DstTy = Ptr.getElementType();
2886         uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
2887 
2888         Address AddrToStoreInto = Address::invalid();
2889         if (SrcSize <= DstSize) {
2890           AddrToStoreInto = Builder.CreateElementBitCast(Ptr, STy);
2891         } else {
2892           AddrToStoreInto =
2893             CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
2894         }
2895 
2896         assert(STy->getNumElements() == NumIRArgs);
2897         for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2898           auto AI = Fn->getArg(FirstIRArg + i);
2899           AI->setName(Arg->getName() + ".coerce" + Twine(i));
2900           Address EltPtr = Builder.CreateStructGEP(AddrToStoreInto, i);
2901           Builder.CreateStore(AI, EltPtr);
2902         }
2903 
2904         if (SrcSize > DstSize) {
2905           Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
2906         }
2907 
2908       } else {
2909         // Simple case, just do a coerced store of the argument into the alloca.
2910         assert(NumIRArgs == 1);
2911         auto AI = Fn->getArg(FirstIRArg);
2912         AI->setName(Arg->getName() + ".coerce");
2913         CreateCoercedStore(AI, Ptr, /*DstIsVolatile=*/false, *this);
2914       }
2915 
2916       // Match to what EmitParmDecl is expecting for this type.
2917       if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
2918         llvm::Value *V =
2919             EmitLoadOfScalar(Alloca, false, Ty, Arg->getBeginLoc());
2920         if (isPromoted)
2921           V = emitArgumentDemotion(*this, Arg, V);
2922         ArgVals.push_back(ParamValue::forDirect(V));
2923       } else {
2924         ArgVals.push_back(ParamValue::forIndirect(Alloca));
2925       }
2926       break;
2927     }
2928 
2929     case ABIArgInfo::CoerceAndExpand: {
2930       // Reconstruct into a temporary.
2931       Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2932       ArgVals.push_back(ParamValue::forIndirect(alloca));
2933 
2934       auto coercionType = ArgI.getCoerceAndExpandType();
2935       alloca = Builder.CreateElementBitCast(alloca, coercionType);
2936 
2937       unsigned argIndex = FirstIRArg;
2938       for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2939         llvm::Type *eltType = coercionType->getElementType(i);
2940         if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType))
2941           continue;
2942 
2943         auto eltAddr = Builder.CreateStructGEP(alloca, i);
2944         auto elt = Fn->getArg(argIndex++);
2945         Builder.CreateStore(elt, eltAddr);
2946       }
2947       assert(argIndex == FirstIRArg + NumIRArgs);
2948       break;
2949     }
2950 
2951     case ABIArgInfo::Expand: {
2952       // If this structure was expanded into multiple arguments then
2953       // we need to create a temporary and reconstruct it from the
2954       // arguments.
2955       Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2956       LValue LV = MakeAddrLValue(Alloca, Ty);
2957       ArgVals.push_back(ParamValue::forIndirect(Alloca));
2958 
2959       auto FnArgIter = Fn->arg_begin() + FirstIRArg;
2960       ExpandTypeFromArgs(Ty, LV, FnArgIter);
2961       assert(FnArgIter == Fn->arg_begin() + FirstIRArg + NumIRArgs);
2962       for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
2963         auto AI = Fn->getArg(FirstIRArg + i);
2964         AI->setName(Arg->getName() + "." + Twine(i));
2965       }
2966       break;
2967     }
2968 
2969     case ABIArgInfo::Ignore:
2970       assert(NumIRArgs == 0);
2971       // Initialize the local variable appropriately.
2972       if (!hasScalarEvaluationKind(Ty)) {
2973         ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty)));
2974       } else {
2975         llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
2976         ArgVals.push_back(ParamValue::forDirect(U));
2977       }
2978       break;
2979     }
2980   }
2981 
2982   if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2983     for (int I = Args.size() - 1; I >= 0; --I)
2984       EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2985   } else {
2986     for (unsigned I = 0, E = Args.size(); I != E; ++I)
2987       EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2988   }
2989 }
2990 
2991 static void eraseUnusedBitCasts(llvm::Instruction *insn) {
2992   while (insn->use_empty()) {
2993     llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
2994     if (!bitcast) return;
2995 
2996     // This is "safe" because we would have used a ConstantExpr otherwise.
2997     insn = cast<llvm::Instruction>(bitcast->getOperand(0));
2998     bitcast->eraseFromParent();
2999   }
3000 }
3001 
3002 /// Try to emit a fused autorelease of a return result.
3003 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
3004                                                     llvm::Value *result) {
3005   // We must be immediately followed the cast.
3006   llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
3007   if (BB->empty()) return nullptr;
3008   if (&BB->back() != result) return nullptr;
3009 
3010   llvm::Type *resultType = result->getType();
3011 
3012   // result is in a BasicBlock and is therefore an Instruction.
3013   llvm::Instruction *generator = cast<llvm::Instruction>(result);
3014 
3015   SmallVector<llvm::Instruction *, 4> InstsToKill;
3016 
3017   // Look for:
3018   //  %generator = bitcast %type1* %generator2 to %type2*
3019   while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
3020     // We would have emitted this as a constant if the operand weren't
3021     // an Instruction.
3022     generator = cast<llvm::Instruction>(bitcast->getOperand(0));
3023 
3024     // Require the generator to be immediately followed by the cast.
3025     if (generator->getNextNode() != bitcast)
3026       return nullptr;
3027 
3028     InstsToKill.push_back(bitcast);
3029   }
3030 
3031   // Look for:
3032   //   %generator = call i8* @objc_retain(i8* %originalResult)
3033   // or
3034   //   %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
3035   llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
3036   if (!call) return nullptr;
3037 
3038   bool doRetainAutorelease;
3039 
3040   if (call->getCalledOperand() == CGF.CGM.getObjCEntrypoints().objc_retain) {
3041     doRetainAutorelease = true;
3042   } else if (call->getCalledOperand() ==
3043              CGF.CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue) {
3044     doRetainAutorelease = false;
3045 
3046     // If we emitted an assembly marker for this call (and the
3047     // ARCEntrypoints field should have been set if so), go looking
3048     // for that call.  If we can't find it, we can't do this
3049     // optimization.  But it should always be the immediately previous
3050     // instruction, unless we needed bitcasts around the call.
3051     if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) {
3052       llvm::Instruction *prev = call->getPrevNode();
3053       assert(prev);
3054       if (isa<llvm::BitCastInst>(prev)) {
3055         prev = prev->getPrevNode();
3056         assert(prev);
3057       }
3058       assert(isa<llvm::CallInst>(prev));
3059       assert(cast<llvm::CallInst>(prev)->getCalledOperand() ==
3060              CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker);
3061       InstsToKill.push_back(prev);
3062     }
3063   } else {
3064     return nullptr;
3065   }
3066 
3067   result = call->getArgOperand(0);
3068   InstsToKill.push_back(call);
3069 
3070   // Keep killing bitcasts, for sanity.  Note that we no longer care
3071   // about precise ordering as long as there's exactly one use.
3072   while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
3073     if (!bitcast->hasOneUse()) break;
3074     InstsToKill.push_back(bitcast);
3075     result = bitcast->getOperand(0);
3076   }
3077 
3078   // Delete all the unnecessary instructions, from latest to earliest.
3079   for (auto *I : InstsToKill)
3080     I->eraseFromParent();
3081 
3082   // Do the fused retain/autorelease if we were asked to.
3083   if (doRetainAutorelease)
3084     result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
3085 
3086   // Cast back to the result type.
3087   return CGF.Builder.CreateBitCast(result, resultType);
3088 }
3089 
3090 /// If this is a +1 of the value of an immutable 'self', remove it.
3091 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
3092                                           llvm::Value *result) {
3093   // This is only applicable to a method with an immutable 'self'.
3094   const ObjCMethodDecl *method =
3095     dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
3096   if (!method) return nullptr;
3097   const VarDecl *self = method->getSelfDecl();
3098   if (!self->getType().isConstQualified()) return nullptr;
3099 
3100   // Look for a retain call.
3101   llvm::CallInst *retainCall =
3102     dyn_cast<llvm::CallInst>(result->stripPointerCasts());
3103   if (!retainCall || retainCall->getCalledOperand() !=
3104                          CGF.CGM.getObjCEntrypoints().objc_retain)
3105     return nullptr;
3106 
3107   // Look for an ordinary load of 'self'.
3108   llvm::Value *retainedValue = retainCall->getArgOperand(0);
3109   llvm::LoadInst *load =
3110     dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
3111   if (!load || load->isAtomic() || load->isVolatile() ||
3112       load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
3113     return nullptr;
3114 
3115   // Okay!  Burn it all down.  This relies for correctness on the
3116   // assumption that the retain is emitted as part of the return and
3117   // that thereafter everything is used "linearly".
3118   llvm::Type *resultType = result->getType();
3119   eraseUnusedBitCasts(cast<llvm::Instruction>(result));
3120   assert(retainCall->use_empty());
3121   retainCall->eraseFromParent();
3122   eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
3123 
3124   return CGF.Builder.CreateBitCast(load, resultType);
3125 }
3126 
3127 /// Emit an ARC autorelease of the result of a function.
3128 ///
3129 /// \return the value to actually return from the function
3130 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
3131                                             llvm::Value *result) {
3132   // If we're returning 'self', kill the initial retain.  This is a
3133   // heuristic attempt to "encourage correctness" in the really unfortunate
3134   // case where we have a return of self during a dealloc and we desperately
3135   // need to avoid the possible autorelease.
3136   if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
3137     return self;
3138 
3139   // At -O0, try to emit a fused retain/autorelease.
3140   if (CGF.shouldUseFusedARCCalls())
3141     if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
3142       return fused;
3143 
3144   return CGF.EmitARCAutoreleaseReturnValue(result);
3145 }
3146 
3147 /// Heuristically search for a dominating store to the return-value slot.
3148 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
3149   // Check if a User is a store which pointerOperand is the ReturnValue.
3150   // We are looking for stores to the ReturnValue, not for stores of the
3151   // ReturnValue to some other location.
3152   auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
3153     auto *SI = dyn_cast<llvm::StoreInst>(U);
3154     if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer())
3155       return nullptr;
3156     // These aren't actually possible for non-coerced returns, and we
3157     // only care about non-coerced returns on this code path.
3158     assert(!SI->isAtomic() && !SI->isVolatile());
3159     return SI;
3160   };
3161   // If there are multiple uses of the return-value slot, just check
3162   // for something immediately preceding the IP.  Sometimes this can
3163   // happen with how we generate implicit-returns; it can also happen
3164   // with noreturn cleanups.
3165   if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
3166     llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
3167     if (IP->empty()) return nullptr;
3168     llvm::Instruction *I = &IP->back();
3169 
3170     // Skip lifetime markers
3171     for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(),
3172                                             IE = IP->rend();
3173          II != IE; ++II) {
3174       if (llvm::IntrinsicInst *Intrinsic =
3175               dyn_cast<llvm::IntrinsicInst>(&*II)) {
3176         if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) {
3177           const llvm::Value *CastAddr = Intrinsic->getArgOperand(1);
3178           ++II;
3179           if (II == IE)
3180             break;
3181           if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II))
3182             continue;
3183         }
3184       }
3185       I = &*II;
3186       break;
3187     }
3188 
3189     return GetStoreIfValid(I);
3190   }
3191 
3192   llvm::StoreInst *store =
3193       GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
3194   if (!store) return nullptr;
3195 
3196   // Now do a first-and-dirty dominance check: just walk up the
3197   // single-predecessors chain from the current insertion point.
3198   llvm::BasicBlock *StoreBB = store->getParent();
3199   llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
3200   while (IP != StoreBB) {
3201     if (!(IP = IP->getSinglePredecessor()))
3202       return nullptr;
3203   }
3204 
3205   // Okay, the store's basic block dominates the insertion point; we
3206   // can do our thing.
3207   return store;
3208 }
3209 
3210 // Helper functions for EmitCMSEClearRecord
3211 
3212 // Set the bits corresponding to a field having width `BitWidth` and located at
3213 // offset `BitOffset` (from the least significant bit) within a storage unit of
3214 // `Bits.size()` bytes. Each element of `Bits` corresponds to one target byte.
3215 // Use little-endian layout, i.e.`Bits[0]` is the LSB.
3216 static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int BitOffset,
3217                         int BitWidth, int CharWidth) {
3218   assert(CharWidth <= 64);
3219   assert(static_cast<unsigned>(BitWidth) <= Bits.size() * CharWidth);
3220 
3221   int Pos = 0;
3222   if (BitOffset >= CharWidth) {
3223     Pos += BitOffset / CharWidth;
3224     BitOffset = BitOffset % CharWidth;
3225   }
3226 
3227   const uint64_t Used = (uint64_t(1) << CharWidth) - 1;
3228   if (BitOffset + BitWidth >= CharWidth) {
3229     Bits[Pos++] |= (Used << BitOffset) & Used;
3230     BitWidth -= CharWidth - BitOffset;
3231     BitOffset = 0;
3232   }
3233 
3234   while (BitWidth >= CharWidth) {
3235     Bits[Pos++] = Used;
3236     BitWidth -= CharWidth;
3237   }
3238 
3239   if (BitWidth > 0)
3240     Bits[Pos++] |= (Used >> (CharWidth - BitWidth)) << BitOffset;
3241 }
3242 
3243 // Set the bits corresponding to a field having width `BitWidth` and located at
3244 // offset `BitOffset` (from the least significant bit) within a storage unit of
3245 // `StorageSize` bytes, located at `StorageOffset` in `Bits`. Each element of
3246 // `Bits` corresponds to one target byte. Use target endian layout.
3247 static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int StorageOffset,
3248                         int StorageSize, int BitOffset, int BitWidth,
3249                         int CharWidth, bool BigEndian) {
3250 
3251   SmallVector<uint64_t, 8> TmpBits(StorageSize);
3252   setBitRange(TmpBits, BitOffset, BitWidth, CharWidth);
3253 
3254   if (BigEndian)
3255     std::reverse(TmpBits.begin(), TmpBits.end());
3256 
3257   for (uint64_t V : TmpBits)
3258     Bits[StorageOffset++] |= V;
3259 }
3260 
3261 static void setUsedBits(CodeGenModule &, QualType, int,
3262                         SmallVectorImpl<uint64_t> &);
3263 
3264 // Set the bits in `Bits`, which correspond to the value representations of
3265 // the actual members of the record type `RTy`. Note that this function does
3266 // not handle base classes, virtual tables, etc, since they cannot happen in
3267 // CMSE function arguments or return. The bit mask corresponds to the target
3268 // memory layout, i.e. it's endian dependent.
3269 static void setUsedBits(CodeGenModule &CGM, const RecordType *RTy, int Offset,
3270                         SmallVectorImpl<uint64_t> &Bits) {
3271   ASTContext &Context = CGM.getContext();
3272   int CharWidth = Context.getCharWidth();
3273   const RecordDecl *RD = RTy->getDecl()->getDefinition();
3274   const ASTRecordLayout &ASTLayout = Context.getASTRecordLayout(RD);
3275   const CGRecordLayout &Layout = CGM.getTypes().getCGRecordLayout(RD);
3276 
3277   int Idx = 0;
3278   for (auto I = RD->field_begin(), E = RD->field_end(); I != E; ++I, ++Idx) {
3279     const FieldDecl *F = *I;
3280 
3281     if (F->isUnnamedBitfield() || F->isZeroLengthBitField(Context) ||
3282         F->getType()->isIncompleteArrayType())
3283       continue;
3284 
3285     if (F->isBitField()) {
3286       const CGBitFieldInfo &BFI = Layout.getBitFieldInfo(F);
3287       setBitRange(Bits, Offset + BFI.StorageOffset.getQuantity(),
3288                   BFI.StorageSize / CharWidth, BFI.Offset,
3289                   BFI.Size, CharWidth,
3290                   CGM.getDataLayout().isBigEndian());
3291       continue;
3292     }
3293 
3294     setUsedBits(CGM, F->getType(),
3295                 Offset + ASTLayout.getFieldOffset(Idx) / CharWidth, Bits);
3296   }
3297 }
3298 
3299 // Set the bits in `Bits`, which correspond to the value representations of
3300 // the elements of an array type `ATy`.
3301 static void setUsedBits(CodeGenModule &CGM, const ConstantArrayType *ATy,
3302                         int Offset, SmallVectorImpl<uint64_t> &Bits) {
3303   const ASTContext &Context = CGM.getContext();
3304 
3305   QualType ETy = Context.getBaseElementType(ATy);
3306   int Size = Context.getTypeSizeInChars(ETy).getQuantity();
3307   SmallVector<uint64_t, 4> TmpBits(Size);
3308   setUsedBits(CGM, ETy, 0, TmpBits);
3309 
3310   for (int I = 0, N = Context.getConstantArrayElementCount(ATy); I < N; ++I) {
3311     auto Src = TmpBits.begin();
3312     auto Dst = Bits.begin() + Offset + I * Size;
3313     for (int J = 0; J < Size; ++J)
3314       *Dst++ |= *Src++;
3315   }
3316 }
3317 
3318 // Set the bits in `Bits`, which correspond to the value representations of
3319 // the type `QTy`.
3320 static void setUsedBits(CodeGenModule &CGM, QualType QTy, int Offset,
3321                         SmallVectorImpl<uint64_t> &Bits) {
3322   if (const auto *RTy = QTy->getAs<RecordType>())
3323     return setUsedBits(CGM, RTy, Offset, Bits);
3324 
3325   ASTContext &Context = CGM.getContext();
3326   if (const auto *ATy = Context.getAsConstantArrayType(QTy))
3327     return setUsedBits(CGM, ATy, Offset, Bits);
3328 
3329   int Size = Context.getTypeSizeInChars(QTy).getQuantity();
3330   if (Size <= 0)
3331     return;
3332 
3333   std::fill_n(Bits.begin() + Offset, Size,
3334               (uint64_t(1) << Context.getCharWidth()) - 1);
3335 }
3336 
3337 static uint64_t buildMultiCharMask(const SmallVectorImpl<uint64_t> &Bits,
3338                                    int Pos, int Size, int CharWidth,
3339                                    bool BigEndian) {
3340   assert(Size > 0);
3341   uint64_t Mask = 0;
3342   if (BigEndian) {
3343     for (auto P = Bits.begin() + Pos, E = Bits.begin() + Pos + Size; P != E;
3344          ++P)
3345       Mask = (Mask << CharWidth) | *P;
3346   } else {
3347     auto P = Bits.begin() + Pos + Size, End = Bits.begin() + Pos;
3348     do
3349       Mask = (Mask << CharWidth) | *--P;
3350     while (P != End);
3351   }
3352   return Mask;
3353 }
3354 
3355 // Emit code to clear the bits in a record, which aren't a part of any user
3356 // declared member, when the record is a function return.
3357 llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src,
3358                                                   llvm::IntegerType *ITy,
3359                                                   QualType QTy) {
3360   assert(Src->getType() == ITy);
3361   assert(ITy->getScalarSizeInBits() <= 64);
3362 
3363   const llvm::DataLayout &DataLayout = CGM.getDataLayout();
3364   int Size = DataLayout.getTypeStoreSize(ITy);
3365   SmallVector<uint64_t, 4> Bits(Size);
3366   setUsedBits(CGM, QTy->castAs<RecordType>(), 0, Bits);
3367 
3368   int CharWidth = CGM.getContext().getCharWidth();
3369   uint64_t Mask =
3370       buildMultiCharMask(Bits, 0, Size, CharWidth, DataLayout.isBigEndian());
3371 
3372   return Builder.CreateAnd(Src, Mask, "cmse.clear");
3373 }
3374 
3375 // Emit code to clear the bits in a record, which aren't a part of any user
3376 // declared member, when the record is a function argument.
3377 llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src,
3378                                                   llvm::ArrayType *ATy,
3379                                                   QualType QTy) {
3380   const llvm::DataLayout &DataLayout = CGM.getDataLayout();
3381   int Size = DataLayout.getTypeStoreSize(ATy);
3382   SmallVector<uint64_t, 16> Bits(Size);
3383   setUsedBits(CGM, QTy->castAs<RecordType>(), 0, Bits);
3384 
3385   // Clear each element of the LLVM array.
3386   int CharWidth = CGM.getContext().getCharWidth();
3387   int CharsPerElt =
3388       ATy->getArrayElementType()->getScalarSizeInBits() / CharWidth;
3389   int MaskIndex = 0;
3390   llvm::Value *R = llvm::UndefValue::get(ATy);
3391   for (int I = 0, N = ATy->getArrayNumElements(); I != N; ++I) {
3392     uint64_t Mask = buildMultiCharMask(Bits, MaskIndex, CharsPerElt, CharWidth,
3393                                        DataLayout.isBigEndian());
3394     MaskIndex += CharsPerElt;
3395     llvm::Value *T0 = Builder.CreateExtractValue(Src, I);
3396     llvm::Value *T1 = Builder.CreateAnd(T0, Mask, "cmse.clear");
3397     R = Builder.CreateInsertValue(R, T1, I);
3398   }
3399 
3400   return R;
3401 }
3402 
3403 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
3404                                          bool EmitRetDbgLoc,
3405                                          SourceLocation EndLoc) {
3406   if (FI.isNoReturn()) {
3407     // Noreturn functions don't return.
3408     EmitUnreachable(EndLoc);
3409     return;
3410   }
3411 
3412   if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
3413     // Naked functions don't have epilogues.
3414     Builder.CreateUnreachable();
3415     return;
3416   }
3417 
3418   // Functions with no result always return void.
3419   if (!ReturnValue.isValid()) {
3420     Builder.CreateRetVoid();
3421     return;
3422   }
3423 
3424   llvm::DebugLoc RetDbgLoc;
3425   llvm::Value *RV = nullptr;
3426   QualType RetTy = FI.getReturnType();
3427   const ABIArgInfo &RetAI = FI.getReturnInfo();
3428 
3429   switch (RetAI.getKind()) {
3430   case ABIArgInfo::InAlloca:
3431     // Aggregrates get evaluated directly into the destination.  Sometimes we
3432     // need to return the sret value in a register, though.
3433     assert(hasAggregateEvaluationKind(RetTy));
3434     if (RetAI.getInAllocaSRet()) {
3435       llvm::Function::arg_iterator EI = CurFn->arg_end();
3436       --EI;
3437       llvm::Value *ArgStruct = &*EI;
3438       llvm::Value *SRet = Builder.CreateStructGEP(
3439           nullptr, ArgStruct, RetAI.getInAllocaFieldIndex());
3440       llvm::Type *Ty =
3441           cast<llvm::GetElementPtrInst>(SRet)->getResultElementType();
3442       RV = Builder.CreateAlignedLoad(Ty, SRet, getPointerAlign(), "sret");
3443     }
3444     break;
3445 
3446   case ABIArgInfo::Indirect: {
3447     auto AI = CurFn->arg_begin();
3448     if (RetAI.isSRetAfterThis())
3449       ++AI;
3450     switch (getEvaluationKind(RetTy)) {
3451     case TEK_Complex: {
3452       ComplexPairTy RT =
3453         EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
3454       EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy),
3455                          /*isInit*/ true);
3456       break;
3457     }
3458     case TEK_Aggregate:
3459       // Do nothing; aggregrates get evaluated directly into the destination.
3460       break;
3461     case TEK_Scalar:
3462       EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
3463                         MakeNaturalAlignAddrLValue(&*AI, RetTy),
3464                         /*isInit*/ true);
3465       break;
3466     }
3467     break;
3468   }
3469 
3470   case ABIArgInfo::Extend:
3471   case ABIArgInfo::Direct:
3472     if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
3473         RetAI.getDirectOffset() == 0) {
3474       // The internal return value temp always will have pointer-to-return-type
3475       // type, just do a load.
3476 
3477       // If there is a dominating store to ReturnValue, we can elide
3478       // the load, zap the store, and usually zap the alloca.
3479       if (llvm::StoreInst *SI =
3480               findDominatingStoreToReturnValue(*this)) {
3481         // Reuse the debug location from the store unless there is
3482         // cleanup code to be emitted between the store and return
3483         // instruction.
3484         if (EmitRetDbgLoc && !AutoreleaseResult)
3485           RetDbgLoc = SI->getDebugLoc();
3486         // Get the stored value and nuke the now-dead store.
3487         RV = SI->getValueOperand();
3488         SI->eraseFromParent();
3489 
3490       // Otherwise, we have to do a simple load.
3491       } else {
3492         RV = Builder.CreateLoad(ReturnValue);
3493       }
3494     } else {
3495       // If the value is offset in memory, apply the offset now.
3496       Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
3497 
3498       RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
3499     }
3500 
3501     // In ARC, end functions that return a retainable type with a call
3502     // to objc_autoreleaseReturnValue.
3503     if (AutoreleaseResult) {
3504 #ifndef NDEBUG
3505       // Type::isObjCRetainabletype has to be called on a QualType that hasn't
3506       // been stripped of the typedefs, so we cannot use RetTy here. Get the
3507       // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from
3508       // CurCodeDecl or BlockInfo.
3509       QualType RT;
3510 
3511       if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl))
3512         RT = FD->getReturnType();
3513       else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl))
3514         RT = MD->getReturnType();
3515       else if (isa<BlockDecl>(CurCodeDecl))
3516         RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType();
3517       else
3518         llvm_unreachable("Unexpected function/method type");
3519 
3520       assert(getLangOpts().ObjCAutoRefCount &&
3521              !FI.isReturnsRetained() &&
3522              RT->isObjCRetainableType());
3523 #endif
3524       RV = emitAutoreleaseOfResult(*this, RV);
3525     }
3526 
3527     break;
3528 
3529   case ABIArgInfo::Ignore:
3530     break;
3531 
3532   case ABIArgInfo::CoerceAndExpand: {
3533     auto coercionType = RetAI.getCoerceAndExpandType();
3534 
3535     // Load all of the coerced elements out into results.
3536     llvm::SmallVector<llvm::Value*, 4> results;
3537     Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType);
3538     for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3539       auto coercedEltType = coercionType->getElementType(i);
3540       if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType))
3541         continue;
3542 
3543       auto eltAddr = Builder.CreateStructGEP(addr, i);
3544       auto elt = Builder.CreateLoad(eltAddr);
3545       results.push_back(elt);
3546     }
3547 
3548     // If we have one result, it's the single direct result type.
3549     if (results.size() == 1) {
3550       RV = results[0];
3551 
3552     // Otherwise, we need to make a first-class aggregate.
3553     } else {
3554       // Construct a return type that lacks padding elements.
3555       llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType();
3556 
3557       RV = llvm::UndefValue::get(returnType);
3558       for (unsigned i = 0, e = results.size(); i != e; ++i) {
3559         RV = Builder.CreateInsertValue(RV, results[i], i);
3560       }
3561     }
3562     break;
3563   }
3564   case ABIArgInfo::Expand:
3565   case ABIArgInfo::IndirectAliased:
3566     llvm_unreachable("Invalid ABI kind for return argument");
3567   }
3568 
3569   llvm::Instruction *Ret;
3570   if (RV) {
3571     if (CurFuncDecl && CurFuncDecl->hasAttr<CmseNSEntryAttr>()) {
3572       // For certain return types, clear padding bits, as they may reveal
3573       // sensitive information.
3574       // Small struct/union types are passed as integers.
3575       auto *ITy = dyn_cast<llvm::IntegerType>(RV->getType());
3576       if (ITy != nullptr && isa<RecordType>(RetTy.getCanonicalType()))
3577         RV = EmitCMSEClearRecord(RV, ITy, RetTy);
3578     }
3579     EmitReturnValueCheck(RV);
3580     Ret = Builder.CreateRet(RV);
3581   } else {
3582     Ret = Builder.CreateRetVoid();
3583   }
3584 
3585   if (RetDbgLoc)
3586     Ret->setDebugLoc(std::move(RetDbgLoc));
3587 }
3588 
3589 void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV) {
3590   // A current decl may not be available when emitting vtable thunks.
3591   if (!CurCodeDecl)
3592     return;
3593 
3594   // If the return block isn't reachable, neither is this check, so don't emit
3595   // it.
3596   if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty())
3597     return;
3598 
3599   ReturnsNonNullAttr *RetNNAttr = nullptr;
3600   if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute))
3601     RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>();
3602 
3603   if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
3604     return;
3605 
3606   // Prefer the returns_nonnull attribute if it's present.
3607   SourceLocation AttrLoc;
3608   SanitizerMask CheckKind;
3609   SanitizerHandler Handler;
3610   if (RetNNAttr) {
3611     assert(!requiresReturnValueNullabilityCheck() &&
3612            "Cannot check nullability and the nonnull attribute");
3613     AttrLoc = RetNNAttr->getLocation();
3614     CheckKind = SanitizerKind::ReturnsNonnullAttribute;
3615     Handler = SanitizerHandler::NonnullReturn;
3616   } else {
3617     if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl))
3618       if (auto *TSI = DD->getTypeSourceInfo())
3619         if (auto FTL = TSI->getTypeLoc().getAsAdjusted<FunctionTypeLoc>())
3620           AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
3621     CheckKind = SanitizerKind::NullabilityReturn;
3622     Handler = SanitizerHandler::NullabilityReturn;
3623   }
3624 
3625   SanitizerScope SanScope(this);
3626 
3627   // Make sure the "return" source location is valid. If we're checking a
3628   // nullability annotation, make sure the preconditions for the check are met.
3629   llvm::BasicBlock *Check = createBasicBlock("nullcheck");
3630   llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck");
3631   llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load");
3632   llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr);
3633   if (requiresReturnValueNullabilityCheck())
3634     CanNullCheck =
3635         Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
3636   Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
3637   EmitBlock(Check);
3638 
3639   // Now do the null check.
3640   llvm::Value *Cond = Builder.CreateIsNotNull(RV);
3641   llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)};
3642   llvm::Value *DynamicData[] = {SLocPtr};
3643   EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData);
3644 
3645   EmitBlock(NoCheck);
3646 
3647 #ifndef NDEBUG
3648   // The return location should not be used after the check has been emitted.
3649   ReturnLocation = Address::invalid();
3650 #endif
3651 }
3652 
3653 static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) {
3654   const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
3655   return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
3656 }
3657 
3658 static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF,
3659                                           QualType Ty) {
3660   // FIXME: Generate IR in one pass, rather than going back and fixing up these
3661   // placeholders.
3662   llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
3663   llvm::Type *IRPtrTy = IRTy->getPointerTo();
3664   llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo());
3665 
3666   // FIXME: When we generate this IR in one pass, we shouldn't need
3667   // this win32-specific alignment hack.
3668   CharUnits Align = CharUnits::fromQuantity(4);
3669   Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align);
3670 
3671   return AggValueSlot::forAddr(Address(Placeholder, Align),
3672                                Ty.getQualifiers(),
3673                                AggValueSlot::IsNotDestructed,
3674                                AggValueSlot::DoesNotNeedGCBarriers,
3675                                AggValueSlot::IsNotAliased,
3676                                AggValueSlot::DoesNotOverlap);
3677 }
3678 
3679 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
3680                                           const VarDecl *param,
3681                                           SourceLocation loc) {
3682   // StartFunction converted the ABI-lowered parameter(s) into a
3683   // local alloca.  We need to turn that into an r-value suitable
3684   // for EmitCall.
3685   Address local = GetAddrOfLocalVar(param);
3686 
3687   QualType type = param->getType();
3688 
3689   if (isInAllocaArgument(CGM.getCXXABI(), type)) {
3690     CGM.ErrorUnsupported(param, "forwarded non-trivially copyable parameter");
3691   }
3692 
3693   // GetAddrOfLocalVar returns a pointer-to-pointer for references,
3694   // but the argument needs to be the original pointer.
3695   if (type->isReferenceType()) {
3696     args.add(RValue::get(Builder.CreateLoad(local)), type);
3697 
3698   // In ARC, move out of consumed arguments so that the release cleanup
3699   // entered by StartFunction doesn't cause an over-release.  This isn't
3700   // optimal -O0 code generation, but it should get cleaned up when
3701   // optimization is enabled.  This also assumes that delegate calls are
3702   // performed exactly once for a set of arguments, but that should be safe.
3703   } else if (getLangOpts().ObjCAutoRefCount &&
3704              param->hasAttr<NSConsumedAttr>() &&
3705              type->isObjCRetainableType()) {
3706     llvm::Value *ptr = Builder.CreateLoad(local);
3707     auto null =
3708       llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
3709     Builder.CreateStore(null, local);
3710     args.add(RValue::get(ptr), type);
3711 
3712   // For the most part, we just need to load the alloca, except that
3713   // aggregate r-values are actually pointers to temporaries.
3714   } else {
3715     args.add(convertTempToRValue(local, type, loc), type);
3716   }
3717 
3718   // Deactivate the cleanup for the callee-destructed param that was pushed.
3719   if (type->isRecordType() && !CurFuncIsThunk &&
3720       type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee() &&
3721       param->needsDestruction(getContext())) {
3722     EHScopeStack::stable_iterator cleanup =
3723         CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param));
3724     assert(cleanup.isValid() &&
3725            "cleanup for callee-destructed param not recorded");
3726     // This unreachable is a temporary marker which will be removed later.
3727     llvm::Instruction *isActive = Builder.CreateUnreachable();
3728     args.addArgCleanupDeactivation(cleanup, isActive);
3729   }
3730 }
3731 
3732 static bool isProvablyNull(llvm::Value *addr) {
3733   return isa<llvm::ConstantPointerNull>(addr);
3734 }
3735 
3736 /// Emit the actual writing-back of a writeback.
3737 static void emitWriteback(CodeGenFunction &CGF,
3738                           const CallArgList::Writeback &writeback) {
3739   const LValue &srcLV = writeback.Source;
3740   Address srcAddr = srcLV.getAddress(CGF);
3741   assert(!isProvablyNull(srcAddr.getPointer()) &&
3742          "shouldn't have writeback for provably null argument");
3743 
3744   llvm::BasicBlock *contBB = nullptr;
3745 
3746   // If the argument wasn't provably non-null, we need to null check
3747   // before doing the store.
3748   bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3749                                               CGF.CGM.getDataLayout());
3750   if (!provablyNonNull) {
3751     llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
3752     contBB = CGF.createBasicBlock("icr.done");
3753 
3754     llvm::Value *isNull =
3755       CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3756     CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
3757     CGF.EmitBlock(writebackBB);
3758   }
3759 
3760   // Load the value to writeback.
3761   llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
3762 
3763   // Cast it back, in case we're writing an id to a Foo* or something.
3764   value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(),
3765                                     "icr.writeback-cast");
3766 
3767   // Perform the writeback.
3768 
3769   // If we have a "to use" value, it's something we need to emit a use
3770   // of.  This has to be carefully threaded in: if it's done after the
3771   // release it's potentially undefined behavior (and the optimizer
3772   // will ignore it), and if it happens before the retain then the
3773   // optimizer could move the release there.
3774   if (writeback.ToUse) {
3775     assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
3776 
3777     // Retain the new value.  No need to block-copy here:  the block's
3778     // being passed up the stack.
3779     value = CGF.EmitARCRetainNonBlock(value);
3780 
3781     // Emit the intrinsic use here.
3782     CGF.EmitARCIntrinsicUse(writeback.ToUse);
3783 
3784     // Load the old value (primitively).
3785     llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
3786 
3787     // Put the new value in place (primitively).
3788     CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
3789 
3790     // Release the old value.
3791     CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
3792 
3793   // Otherwise, we can just do a normal lvalue store.
3794   } else {
3795     CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
3796   }
3797 
3798   // Jump to the continuation block.
3799   if (!provablyNonNull)
3800     CGF.EmitBlock(contBB);
3801 }
3802 
3803 static void emitWritebacks(CodeGenFunction &CGF,
3804                            const CallArgList &args) {
3805   for (const auto &I : args.writebacks())
3806     emitWriteback(CGF, I);
3807 }
3808 
3809 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
3810                                             const CallArgList &CallArgs) {
3811   ArrayRef<CallArgList::CallArgCleanup> Cleanups =
3812     CallArgs.getCleanupsToDeactivate();
3813   // Iterate in reverse to increase the likelihood of popping the cleanup.
3814   for (const auto &I : llvm::reverse(Cleanups)) {
3815     CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP);
3816     I.IsActiveIP->eraseFromParent();
3817   }
3818 }
3819 
3820 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
3821   if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
3822     if (uop->getOpcode() == UO_AddrOf)
3823       return uop->getSubExpr();
3824   return nullptr;
3825 }
3826 
3827 /// Emit an argument that's being passed call-by-writeback.  That is,
3828 /// we are passing the address of an __autoreleased temporary; it
3829 /// might be copy-initialized with the current value of the given
3830 /// address, but it will definitely be copied out of after the call.
3831 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
3832                              const ObjCIndirectCopyRestoreExpr *CRE) {
3833   LValue srcLV;
3834 
3835   // Make an optimistic effort to emit the address as an l-value.
3836   // This can fail if the argument expression is more complicated.
3837   if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
3838     srcLV = CGF.EmitLValue(lvExpr);
3839 
3840   // Otherwise, just emit it as a scalar.
3841   } else {
3842     Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
3843 
3844     QualType srcAddrType =
3845       CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
3846     srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
3847   }
3848   Address srcAddr = srcLV.getAddress(CGF);
3849 
3850   // The dest and src types don't necessarily match in LLVM terms
3851   // because of the crazy ObjC compatibility rules.
3852 
3853   llvm::PointerType *destType =
3854     cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
3855 
3856   // If the address is a constant null, just pass the appropriate null.
3857   if (isProvablyNull(srcAddr.getPointer())) {
3858     args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
3859              CRE->getType());
3860     return;
3861   }
3862 
3863   // Create the temporary.
3864   Address temp = CGF.CreateTempAlloca(destType->getElementType(),
3865                                       CGF.getPointerAlign(),
3866                                       "icr.temp");
3867   // Loading an l-value can introduce a cleanup if the l-value is __weak,
3868   // and that cleanup will be conditional if we can't prove that the l-value
3869   // isn't null, so we need to register a dominating point so that the cleanups
3870   // system will make valid IR.
3871   CodeGenFunction::ConditionalEvaluation condEval(CGF);
3872 
3873   // Zero-initialize it if we're not doing a copy-initialization.
3874   bool shouldCopy = CRE->shouldCopy();
3875   if (!shouldCopy) {
3876     llvm::Value *null =
3877       llvm::ConstantPointerNull::get(
3878         cast<llvm::PointerType>(destType->getElementType()));
3879     CGF.Builder.CreateStore(null, temp);
3880   }
3881 
3882   llvm::BasicBlock *contBB = nullptr;
3883   llvm::BasicBlock *originBB = nullptr;
3884 
3885   // If the address is *not* known to be non-null, we need to switch.
3886   llvm::Value *finalArgument;
3887 
3888   bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3889                                               CGF.CGM.getDataLayout());
3890   if (provablyNonNull) {
3891     finalArgument = temp.getPointer();
3892   } else {
3893     llvm::Value *isNull =
3894       CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3895 
3896     finalArgument = CGF.Builder.CreateSelect(isNull,
3897                                    llvm::ConstantPointerNull::get(destType),
3898                                              temp.getPointer(), "icr.argument");
3899 
3900     // If we need to copy, then the load has to be conditional, which
3901     // means we need control flow.
3902     if (shouldCopy) {
3903       originBB = CGF.Builder.GetInsertBlock();
3904       contBB = CGF.createBasicBlock("icr.cont");
3905       llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
3906       CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
3907       CGF.EmitBlock(copyBB);
3908       condEval.begin(CGF);
3909     }
3910   }
3911 
3912   llvm::Value *valueToUse = nullptr;
3913 
3914   // Perform a copy if necessary.
3915   if (shouldCopy) {
3916     RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
3917     assert(srcRV.isScalar());
3918 
3919     llvm::Value *src = srcRV.getScalarVal();
3920     src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
3921                                     "icr.cast");
3922 
3923     // Use an ordinary store, not a store-to-lvalue.
3924     CGF.Builder.CreateStore(src, temp);
3925 
3926     // If optimization is enabled, and the value was held in a
3927     // __strong variable, we need to tell the optimizer that this
3928     // value has to stay alive until we're doing the store back.
3929     // This is because the temporary is effectively unretained,
3930     // and so otherwise we can violate the high-level semantics.
3931     if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3932         srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) {
3933       valueToUse = src;
3934     }
3935   }
3936 
3937   // Finish the control flow if we needed it.
3938   if (shouldCopy && !provablyNonNull) {
3939     llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
3940     CGF.EmitBlock(contBB);
3941 
3942     // Make a phi for the value to intrinsically use.
3943     if (valueToUse) {
3944       llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
3945                                                       "icr.to-use");
3946       phiToUse->addIncoming(valueToUse, copyBB);
3947       phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
3948                             originBB);
3949       valueToUse = phiToUse;
3950     }
3951 
3952     condEval.end(CGF);
3953   }
3954 
3955   args.addWriteback(srcLV, temp, valueToUse);
3956   args.add(RValue::get(finalArgument), CRE->getType());
3957 }
3958 
3959 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
3960   assert(!StackBase);
3961 
3962   // Save the stack.
3963   llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
3964   StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
3965 }
3966 
3967 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
3968   if (StackBase) {
3969     // Restore the stack after the call.
3970     llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
3971     CGF.Builder.CreateCall(F, StackBase);
3972   }
3973 }
3974 
3975 void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
3976                                           SourceLocation ArgLoc,
3977                                           AbstractCallee AC,
3978                                           unsigned ParmNum) {
3979   if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) ||
3980                          SanOpts.has(SanitizerKind::NullabilityArg)))
3981     return;
3982 
3983   // The param decl may be missing in a variadic function.
3984   auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr;
3985   unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
3986 
3987   // Prefer the nonnull attribute if it's present.
3988   const NonNullAttr *NNAttr = nullptr;
3989   if (SanOpts.has(SanitizerKind::NonnullAttribute))
3990     NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo);
3991 
3992   bool CanCheckNullability = false;
3993   if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) {
3994     auto Nullability = PVD->getType()->getNullability(getContext());
3995     CanCheckNullability = Nullability &&
3996                           *Nullability == NullabilityKind::NonNull &&
3997                           PVD->getTypeSourceInfo();
3998   }
3999 
4000   if (!NNAttr && !CanCheckNullability)
4001     return;
4002 
4003   SourceLocation AttrLoc;
4004   SanitizerMask CheckKind;
4005   SanitizerHandler Handler;
4006   if (NNAttr) {
4007     AttrLoc = NNAttr->getLocation();
4008     CheckKind = SanitizerKind::NonnullAttribute;
4009     Handler = SanitizerHandler::NonnullArg;
4010   } else {
4011     AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
4012     CheckKind = SanitizerKind::NullabilityArg;
4013     Handler = SanitizerHandler::NullabilityArg;
4014   }
4015 
4016   SanitizerScope SanScope(this);
4017   llvm::Value *Cond = EmitNonNullRValueCheck(RV, ArgType);
4018   llvm::Constant *StaticData[] = {
4019       EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc),
4020       llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
4021   };
4022   EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None);
4023 }
4024 
4025 // Check if the call is going to use the inalloca convention. This needs to
4026 // agree with CGFunctionInfo::usesInAlloca. The CGFunctionInfo is arranged
4027 // later, so we can't check it directly.
4028 static bool hasInAllocaArgs(CodeGenModule &CGM, CallingConv ExplicitCC,
4029                             ArrayRef<QualType> ArgTypes) {
4030   // The Swift calling convention doesn't go through the target-specific
4031   // argument classification, so it never uses inalloca.
4032   // TODO: Consider limiting inalloca use to only calling conventions supported
4033   // by MSVC.
4034   if (ExplicitCC == CC_Swift)
4035     return false;
4036   if (!CGM.getTarget().getCXXABI().isMicrosoft())
4037     return false;
4038   return llvm::any_of(ArgTypes, [&](QualType Ty) {
4039     return isInAllocaArgument(CGM.getCXXABI(), Ty);
4040   });
4041 }
4042 
4043 #ifndef NDEBUG
4044 // Determine whether the given argument is an Objective-C method
4045 // that may have type parameters in its signature.
4046 static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method) {
4047   const DeclContext *dc = method->getDeclContext();
4048   if (const ObjCInterfaceDecl *classDecl = dyn_cast<ObjCInterfaceDecl>(dc)) {
4049     return classDecl->getTypeParamListAsWritten();
4050   }
4051 
4052   if (const ObjCCategoryDecl *catDecl = dyn_cast<ObjCCategoryDecl>(dc)) {
4053     return catDecl->getTypeParamList();
4054   }
4055 
4056   return false;
4057 }
4058 #endif
4059 
4060 /// EmitCallArgs - Emit call arguments for a function.
4061 void CodeGenFunction::EmitCallArgs(
4062     CallArgList &Args, PrototypeWrapper Prototype,
4063     llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
4064     AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) {
4065   SmallVector<QualType, 16> ArgTypes;
4066 
4067   assert((ParamsToSkip == 0 || Prototype.P) &&
4068          "Can't skip parameters if type info is not provided");
4069 
4070   // This variable only captures *explicitly* written conventions, not those
4071   // applied by default via command line flags or target defaults, such as
4072   // thiscall, aapcs, stdcall via -mrtd, etc. Computing that correctly would
4073   // require knowing if this is a C++ instance method or being able to see
4074   // unprototyped FunctionTypes.
4075   CallingConv ExplicitCC = CC_C;
4076 
4077   // First, if a prototype was provided, use those argument types.
4078   bool IsVariadic = false;
4079   if (Prototype.P) {
4080     const auto *MD = Prototype.P.dyn_cast<const ObjCMethodDecl *>();
4081     if (MD) {
4082       IsVariadic = MD->isVariadic();
4083       ExplicitCC = getCallingConventionForDecl(
4084           MD, CGM.getTarget().getTriple().isOSWindows());
4085       ArgTypes.assign(MD->param_type_begin() + ParamsToSkip,
4086                       MD->param_type_end());
4087     } else {
4088       const auto *FPT = Prototype.P.get<const FunctionProtoType *>();
4089       IsVariadic = FPT->isVariadic();
4090       ExplicitCC = FPT->getExtInfo().getCC();
4091       ArgTypes.assign(FPT->param_type_begin() + ParamsToSkip,
4092                       FPT->param_type_end());
4093     }
4094 
4095 #ifndef NDEBUG
4096     // Check that the prototyped types match the argument expression types.
4097     bool isGenericMethod = MD && isObjCMethodWithTypeParams(MD);
4098     CallExpr::const_arg_iterator Arg = ArgRange.begin();
4099     for (QualType Ty : ArgTypes) {
4100       assert(Arg != ArgRange.end() && "Running over edge of argument list!");
4101       assert(
4102           (isGenericMethod || Ty->isVariablyModifiedType() ||
4103            Ty.getNonReferenceType()->isObjCRetainableType() ||
4104            getContext()
4105                    .getCanonicalType(Ty.getNonReferenceType())
4106                    .getTypePtr() ==
4107                getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) &&
4108           "type mismatch in call argument!");
4109       ++Arg;
4110     }
4111 
4112     // Either we've emitted all the call args, or we have a call to variadic
4113     // function.
4114     assert((Arg == ArgRange.end() || IsVariadic) &&
4115            "Extra arguments in non-variadic function!");
4116 #endif
4117   }
4118 
4119   // If we still have any arguments, emit them using the type of the argument.
4120   for (auto *A : llvm::make_range(std::next(ArgRange.begin(), ArgTypes.size()),
4121                                   ArgRange.end()))
4122     ArgTypes.push_back(IsVariadic ? getVarArgType(A) : A->getType());
4123   assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
4124 
4125   // We must evaluate arguments from right to left in the MS C++ ABI,
4126   // because arguments are destroyed left to right in the callee. As a special
4127   // case, there are certain language constructs that require left-to-right
4128   // evaluation, and in those cases we consider the evaluation order requirement
4129   // to trump the "destruction order is reverse construction order" guarantee.
4130   bool LeftToRight =
4131       CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()
4132           ? Order == EvaluationOrder::ForceLeftToRight
4133           : Order != EvaluationOrder::ForceRightToLeft;
4134 
4135   auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg,
4136                                          RValue EmittedArg) {
4137     if (!AC.hasFunctionDecl() || I >= AC.getNumParams())
4138       return;
4139     auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>();
4140     if (PS == nullptr)
4141       return;
4142 
4143     const auto &Context = getContext();
4144     auto SizeTy = Context.getSizeType();
4145     auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
4146     assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?");
4147     llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T,
4148                                                      EmittedArg.getScalarVal(),
4149                                                      PS->isDynamic());
4150     Args.add(RValue::get(V), SizeTy);
4151     // If we're emitting args in reverse, be sure to do so with
4152     // pass_object_size, as well.
4153     if (!LeftToRight)
4154       std::swap(Args.back(), *(&Args.back() - 1));
4155   };
4156 
4157   // Insert a stack save if we're going to need any inalloca args.
4158   if (hasInAllocaArgs(CGM, ExplicitCC, ArgTypes)) {
4159     assert(getTarget().getTriple().getArch() == llvm::Triple::x86 &&
4160            "inalloca only supported on x86");
4161     Args.allocateArgumentMemory(*this);
4162   }
4163 
4164   // Evaluate each argument in the appropriate order.
4165   size_t CallArgsStart = Args.size();
4166   for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
4167     unsigned Idx = LeftToRight ? I : E - I - 1;
4168     CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx;
4169     unsigned InitialArgSize = Args.size();
4170     // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of
4171     // the argument and parameter match or the objc method is parameterized.
4172     assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) ||
4173             getContext().hasSameUnqualifiedType((*Arg)->getType(),
4174                                                 ArgTypes[Idx]) ||
4175             (isa<ObjCMethodDecl>(AC.getDecl()) &&
4176              isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) &&
4177            "Argument and parameter types don't match");
4178     EmitCallArg(Args, *Arg, ArgTypes[Idx]);
4179     // In particular, we depend on it being the last arg in Args, and the
4180     // objectsize bits depend on there only being one arg if !LeftToRight.
4181     assert(InitialArgSize + 1 == Args.size() &&
4182            "The code below depends on only adding one arg per EmitCallArg");
4183     (void)InitialArgSize;
4184     // Since pointer argument are never emitted as LValue, it is safe to emit
4185     // non-null argument check for r-value only.
4186     if (!Args.back().hasLValue()) {
4187       RValue RVArg = Args.back().getKnownRValue();
4188       EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC,
4189                           ParamsToSkip + Idx);
4190       // @llvm.objectsize should never have side-effects and shouldn't need
4191       // destruction/cleanups, so we can safely "emit" it after its arg,
4192       // regardless of right-to-leftness
4193       MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
4194     }
4195   }
4196 
4197   if (!LeftToRight) {
4198     // Un-reverse the arguments we just evaluated so they match up with the LLVM
4199     // IR function.
4200     std::reverse(Args.begin() + CallArgsStart, Args.end());
4201   }
4202 }
4203 
4204 namespace {
4205 
4206 struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
4207   DestroyUnpassedArg(Address Addr, QualType Ty)
4208       : Addr(Addr), Ty(Ty) {}
4209 
4210   Address Addr;
4211   QualType Ty;
4212 
4213   void Emit(CodeGenFunction &CGF, Flags flags) override {
4214     QualType::DestructionKind DtorKind = Ty.isDestructedType();
4215     if (DtorKind == QualType::DK_cxx_destructor) {
4216       const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
4217       assert(!Dtor->isTrivial());
4218       CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
4219                                 /*Delegating=*/false, Addr, Ty);
4220     } else {
4221       CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty));
4222     }
4223   }
4224 };
4225 
4226 struct DisableDebugLocationUpdates {
4227   CodeGenFunction &CGF;
4228   bool disabledDebugInfo;
4229   DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
4230     if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
4231       CGF.disableDebugInfo();
4232   }
4233   ~DisableDebugLocationUpdates() {
4234     if (disabledDebugInfo)
4235       CGF.enableDebugInfo();
4236   }
4237 };
4238 
4239 } // end anonymous namespace
4240 
4241 RValue CallArg::getRValue(CodeGenFunction &CGF) const {
4242   if (!HasLV)
4243     return RV;
4244   LValue Copy = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty), Ty);
4245   CGF.EmitAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap,
4246                         LV.isVolatile());
4247   IsUsed = true;
4248   return RValue::getAggregate(Copy.getAddress(CGF));
4249 }
4250 
4251 void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const {
4252   LValue Dst = CGF.MakeAddrLValue(Addr, Ty);
4253   if (!HasLV && RV.isScalar())
4254     CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*isInit=*/true);
4255   else if (!HasLV && RV.isComplex())
4256     CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true);
4257   else {
4258     auto Addr = HasLV ? LV.getAddress(CGF) : RV.getAggregateAddress();
4259     LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty);
4260     // We assume that call args are never copied into subobjects.
4261     CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap,
4262                           HasLV ? LV.isVolatileQualified()
4263                                 : RV.isVolatileQualified());
4264   }
4265   IsUsed = true;
4266 }
4267 
4268 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
4269                                   QualType type) {
4270   DisableDebugLocationUpdates Dis(*this, E);
4271   if (const ObjCIndirectCopyRestoreExpr *CRE
4272         = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
4273     assert(getLangOpts().ObjCAutoRefCount);
4274     return emitWritebackArg(*this, args, CRE);
4275   }
4276 
4277   assert(type->isReferenceType() == E->isGLValue() &&
4278          "reference binding to unmaterialized r-value!");
4279 
4280   if (E->isGLValue()) {
4281     assert(E->getObjectKind() == OK_Ordinary);
4282     return args.add(EmitReferenceBindingToExpr(E), type);
4283   }
4284 
4285   bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
4286 
4287   // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
4288   // However, we still have to push an EH-only cleanup in case we unwind before
4289   // we make it to the call.
4290   if (type->isRecordType() &&
4291       type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
4292     // If we're using inalloca, use the argument memory.  Otherwise, use a
4293     // temporary.
4294     AggValueSlot Slot;
4295     if (args.isUsingInAlloca())
4296       Slot = createPlaceholderSlot(*this, type);
4297     else
4298       Slot = CreateAggTemp(type, "agg.tmp");
4299 
4300     bool DestroyedInCallee = true, NeedsEHCleanup = true;
4301     if (const auto *RD = type->getAsCXXRecordDecl())
4302       DestroyedInCallee = RD->hasNonTrivialDestructor();
4303     else
4304       NeedsEHCleanup = needsEHCleanup(type.isDestructedType());
4305 
4306     if (DestroyedInCallee)
4307       Slot.setExternallyDestructed();
4308 
4309     EmitAggExpr(E, Slot);
4310     RValue RV = Slot.asRValue();
4311     args.add(RV, type);
4312 
4313     if (DestroyedInCallee && NeedsEHCleanup) {
4314       // Create a no-op GEP between the placeholder and the cleanup so we can
4315       // RAUW it successfully.  It also serves as a marker of the first
4316       // instruction where the cleanup is active.
4317       pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
4318                                               type);
4319       // This unreachable is a temporary marker which will be removed later.
4320       llvm::Instruction *IsActive = Builder.CreateUnreachable();
4321       args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
4322     }
4323     return;
4324   }
4325 
4326   if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
4327       cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
4328     LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
4329     assert(L.isSimple());
4330     args.addUncopiedAggregate(L, type);
4331     return;
4332   }
4333 
4334   args.add(EmitAnyExprToTemp(E), type);
4335 }
4336 
4337 QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
4338   // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
4339   // implicitly widens null pointer constants that are arguments to varargs
4340   // functions to pointer-sized ints.
4341   if (!getTarget().getTriple().isOSWindows())
4342     return Arg->getType();
4343 
4344   if (Arg->getType()->isIntegerType() &&
4345       getContext().getTypeSize(Arg->getType()) <
4346           getContext().getTargetInfo().getPointerWidth(0) &&
4347       Arg->isNullPointerConstant(getContext(),
4348                                  Expr::NPC_ValueDependentIsNotNull)) {
4349     return getContext().getIntPtrType();
4350   }
4351 
4352   return Arg->getType();
4353 }
4354 
4355 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4356 // optimizer it can aggressively ignore unwind edges.
4357 void
4358 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
4359   if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
4360       !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
4361     Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
4362                       CGM.getNoObjCARCExceptionsMetadata());
4363 }
4364 
4365 /// Emits a call to the given no-arguments nounwind runtime function.
4366 llvm::CallInst *
4367 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
4368                                          const llvm::Twine &name) {
4369   return EmitNounwindRuntimeCall(callee, None, name);
4370 }
4371 
4372 /// Emits a call to the given nounwind runtime function.
4373 llvm::CallInst *
4374 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
4375                                          ArrayRef<llvm::Value *> args,
4376                                          const llvm::Twine &name) {
4377   llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
4378   call->setDoesNotThrow();
4379   return call;
4380 }
4381 
4382 /// Emits a simple call (never an invoke) to the given no-arguments
4383 /// runtime function.
4384 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
4385                                                  const llvm::Twine &name) {
4386   return EmitRuntimeCall(callee, None, name);
4387 }
4388 
4389 // Calls which may throw must have operand bundles indicating which funclet
4390 // they are nested within.
4391 SmallVector<llvm::OperandBundleDef, 1>
4392 CodeGenFunction::getBundlesForFunclet(llvm::Value *Callee) {
4393   SmallVector<llvm::OperandBundleDef, 1> BundleList;
4394   // There is no need for a funclet operand bundle if we aren't inside a
4395   // funclet.
4396   if (!CurrentFuncletPad)
4397     return BundleList;
4398 
4399   // Skip intrinsics which cannot throw.
4400   auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts());
4401   if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow())
4402     return BundleList;
4403 
4404   BundleList.emplace_back("funclet", CurrentFuncletPad);
4405   return BundleList;
4406 }
4407 
4408 /// Emits a simple call (never an invoke) to the given runtime function.
4409 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
4410                                                  ArrayRef<llvm::Value *> args,
4411                                                  const llvm::Twine &name) {
4412   llvm::CallInst *call = Builder.CreateCall(
4413       callee, args, getBundlesForFunclet(callee.getCallee()), name);
4414   call->setCallingConv(getRuntimeCC());
4415   return call;
4416 }
4417 
4418 /// Emits a call or invoke to the given noreturn runtime function.
4419 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(
4420     llvm::FunctionCallee callee, ArrayRef<llvm::Value *> args) {
4421   SmallVector<llvm::OperandBundleDef, 1> BundleList =
4422       getBundlesForFunclet(callee.getCallee());
4423 
4424   if (getInvokeDest()) {
4425     llvm::InvokeInst *invoke =
4426       Builder.CreateInvoke(callee,
4427                            getUnreachableBlock(),
4428                            getInvokeDest(),
4429                            args,
4430                            BundleList);
4431     invoke->setDoesNotReturn();
4432     invoke->setCallingConv(getRuntimeCC());
4433   } else {
4434     llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList);
4435     call->setDoesNotReturn();
4436     call->setCallingConv(getRuntimeCC());
4437     Builder.CreateUnreachable();
4438   }
4439 }
4440 
4441 /// Emits a call or invoke instruction to the given nullary runtime function.
4442 llvm::CallBase *
4443 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
4444                                          const Twine &name) {
4445   return EmitRuntimeCallOrInvoke(callee, None, name);
4446 }
4447 
4448 /// Emits a call or invoke instruction to the given runtime function.
4449 llvm::CallBase *
4450 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
4451                                          ArrayRef<llvm::Value *> args,
4452                                          const Twine &name) {
4453   llvm::CallBase *call = EmitCallOrInvoke(callee, args, name);
4454   call->setCallingConv(getRuntimeCC());
4455   return call;
4456 }
4457 
4458 /// Emits a call or invoke instruction to the given function, depending
4459 /// on the current state of the EH stack.
4460 llvm::CallBase *CodeGenFunction::EmitCallOrInvoke(llvm::FunctionCallee Callee,
4461                                                   ArrayRef<llvm::Value *> Args,
4462                                                   const Twine &Name) {
4463   llvm::BasicBlock *InvokeDest = getInvokeDest();
4464   SmallVector<llvm::OperandBundleDef, 1> BundleList =
4465       getBundlesForFunclet(Callee.getCallee());
4466 
4467   llvm::CallBase *Inst;
4468   if (!InvokeDest)
4469     Inst = Builder.CreateCall(Callee, Args, BundleList, Name);
4470   else {
4471     llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
4472     Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
4473                                 Name);
4474     EmitBlock(ContBB);
4475   }
4476 
4477   // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4478   // optimizer it can aggressively ignore unwind edges.
4479   if (CGM.getLangOpts().ObjCAutoRefCount)
4480     AddObjCARCExceptionMetadata(Inst);
4481 
4482   return Inst;
4483 }
4484 
4485 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
4486                                                   llvm::Value *New) {
4487   DeferredReplacements.push_back(
4488       std::make_pair(llvm::WeakTrackingVH(Old), New));
4489 }
4490 
4491 namespace {
4492 
4493 /// Specify given \p NewAlign as the alignment of return value attribute. If
4494 /// such attribute already exists, re-set it to the maximal one of two options.
4495 LLVM_NODISCARD llvm::AttributeList
4496 maybeRaiseRetAlignmentAttribute(llvm::LLVMContext &Ctx,
4497                                 const llvm::AttributeList &Attrs,
4498                                 llvm::Align NewAlign) {
4499   llvm::Align CurAlign = Attrs.getRetAlignment().valueOrOne();
4500   if (CurAlign >= NewAlign)
4501     return Attrs;
4502   llvm::Attribute AlignAttr = llvm::Attribute::getWithAlignment(Ctx, NewAlign);
4503   return Attrs
4504       .removeAttribute(Ctx, llvm::AttributeList::ReturnIndex,
4505                        llvm::Attribute::AttrKind::Alignment)
4506       .addAttribute(Ctx, llvm::AttributeList::ReturnIndex, AlignAttr);
4507 }
4508 
4509 template <typename AlignedAttrTy> class AbstractAssumeAlignedAttrEmitter {
4510 protected:
4511   CodeGenFunction &CGF;
4512 
4513   /// We do nothing if this is, or becomes, nullptr.
4514   const AlignedAttrTy *AA = nullptr;
4515 
4516   llvm::Value *Alignment = nullptr;      // May or may not be a constant.
4517   llvm::ConstantInt *OffsetCI = nullptr; // Constant, hopefully zero.
4518 
4519   AbstractAssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl)
4520       : CGF(CGF_) {
4521     if (!FuncDecl)
4522       return;
4523     AA = FuncDecl->getAttr<AlignedAttrTy>();
4524   }
4525 
4526 public:
4527   /// If we can, materialize the alignment as an attribute on return value.
4528   LLVM_NODISCARD llvm::AttributeList
4529   TryEmitAsCallSiteAttribute(const llvm::AttributeList &Attrs) {
4530     if (!AA || OffsetCI || CGF.SanOpts.has(SanitizerKind::Alignment))
4531       return Attrs;
4532     const auto *AlignmentCI = dyn_cast<llvm::ConstantInt>(Alignment);
4533     if (!AlignmentCI)
4534       return Attrs;
4535     // We may legitimately have non-power-of-2 alignment here.
4536     // If so, this is UB land, emit it via `@llvm.assume` instead.
4537     if (!AlignmentCI->getValue().isPowerOf2())
4538       return Attrs;
4539     llvm::AttributeList NewAttrs = maybeRaiseRetAlignmentAttribute(
4540         CGF.getLLVMContext(), Attrs,
4541         llvm::Align(
4542             AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment)));
4543     AA = nullptr; // We're done. Disallow doing anything else.
4544     return NewAttrs;
4545   }
4546 
4547   /// Emit alignment assumption.
4548   /// This is a general fallback that we take if either there is an offset,
4549   /// or the alignment is variable or we are sanitizing for alignment.
4550   void EmitAsAnAssumption(SourceLocation Loc, QualType RetTy, RValue &Ret) {
4551     if (!AA)
4552       return;
4553     CGF.emitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc,
4554                                 AA->getLocation(), Alignment, OffsetCI);
4555     AA = nullptr; // We're done. Disallow doing anything else.
4556   }
4557 };
4558 
4559 /// Helper data structure to emit `AssumeAlignedAttr`.
4560 class AssumeAlignedAttrEmitter final
4561     : public AbstractAssumeAlignedAttrEmitter<AssumeAlignedAttr> {
4562 public:
4563   AssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl)
4564       : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
4565     if (!AA)
4566       return;
4567     // It is guaranteed that the alignment/offset are constants.
4568     Alignment = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AA->getAlignment()));
4569     if (Expr *Offset = AA->getOffset()) {
4570       OffsetCI = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(Offset));
4571       if (OffsetCI->isNullValue()) // Canonicalize zero offset to no offset.
4572         OffsetCI = nullptr;
4573     }
4574   }
4575 };
4576 
4577 /// Helper data structure to emit `AllocAlignAttr`.
4578 class AllocAlignAttrEmitter final
4579     : public AbstractAssumeAlignedAttrEmitter<AllocAlignAttr> {
4580 public:
4581   AllocAlignAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl,
4582                         const CallArgList &CallArgs)
4583       : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
4584     if (!AA)
4585       return;
4586     // Alignment may or may not be a constant, and that is okay.
4587     Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()]
4588                     .getRValue(CGF)
4589                     .getScalarVal();
4590   }
4591 };
4592 
4593 } // namespace
4594 
4595 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
4596                                  const CGCallee &Callee,
4597                                  ReturnValueSlot ReturnValue,
4598                                  const CallArgList &CallArgs,
4599                                  llvm::CallBase **callOrInvoke, bool IsMustTail,
4600                                  SourceLocation Loc) {
4601   // FIXME: We no longer need the types from CallArgs; lift up and simplify.
4602 
4603   assert(Callee.isOrdinary() || Callee.isVirtual());
4604 
4605   // Handle struct-return functions by passing a pointer to the
4606   // location that we would like to return into.
4607   QualType RetTy = CallInfo.getReturnType();
4608   const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
4609 
4610   llvm::FunctionType *IRFuncTy = getTypes().GetFunctionType(CallInfo);
4611 
4612   const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
4613   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
4614     // We can only guarantee that a function is called from the correct
4615     // context/function based on the appropriate target attributes,
4616     // so only check in the case where we have both always_inline and target
4617     // since otherwise we could be making a conditional call after a check for
4618     // the proper cpu features (and it won't cause code generation issues due to
4619     // function based code generation).
4620     if (TargetDecl->hasAttr<AlwaysInlineAttr>() &&
4621         TargetDecl->hasAttr<TargetAttr>())
4622       checkTargetFeatures(Loc, FD);
4623 
4624     // Some architectures (such as x86-64) have the ABI changed based on
4625     // attribute-target/features. Give them a chance to diagnose.
4626     CGM.getTargetCodeGenInfo().checkFunctionCallABI(
4627         CGM, Loc, dyn_cast_or_null<FunctionDecl>(CurCodeDecl), FD, CallArgs);
4628   }
4629 
4630 #ifndef NDEBUG
4631   if (!(CallInfo.isVariadic() && CallInfo.getArgStruct())) {
4632     // For an inalloca varargs function, we don't expect CallInfo to match the
4633     // function pointer's type, because the inalloca struct a will have extra
4634     // fields in it for the varargs parameters.  Code later in this function
4635     // bitcasts the function pointer to the type derived from CallInfo.
4636     //
4637     // In other cases, we assert that the types match up (until pointers stop
4638     // having pointee types).
4639     llvm::Type *TypeFromVal;
4640     if (Callee.isVirtual())
4641       TypeFromVal = Callee.getVirtualFunctionType();
4642     else
4643       TypeFromVal =
4644           Callee.getFunctionPointer()->getType()->getPointerElementType();
4645     assert(IRFuncTy == TypeFromVal);
4646   }
4647 #endif
4648 
4649   // 1. Set up the arguments.
4650 
4651   // If we're using inalloca, insert the allocation after the stack save.
4652   // FIXME: Do this earlier rather than hacking it in here!
4653   Address ArgMemory = Address::invalid();
4654   if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
4655     const llvm::DataLayout &DL = CGM.getDataLayout();
4656     llvm::Instruction *IP = CallArgs.getStackBase();
4657     llvm::AllocaInst *AI;
4658     if (IP) {
4659       IP = IP->getNextNode();
4660       AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
4661                                 "argmem", IP);
4662     } else {
4663       AI = CreateTempAlloca(ArgStruct, "argmem");
4664     }
4665     auto Align = CallInfo.getArgStructAlignment();
4666     AI->setAlignment(Align.getAsAlign());
4667     AI->setUsedWithInAlloca(true);
4668     assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
4669     ArgMemory = Address(AI, Align);
4670   }
4671 
4672   ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
4673   SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
4674 
4675   // If the call returns a temporary with struct return, create a temporary
4676   // alloca to hold the result, unless one is given to us.
4677   Address SRetPtr = Address::invalid();
4678   Address SRetAlloca = Address::invalid();
4679   llvm::Value *UnusedReturnSizePtr = nullptr;
4680   if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
4681     if (!ReturnValue.isNull()) {
4682       SRetPtr = ReturnValue.getValue();
4683     } else {
4684       SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca);
4685       if (HaveInsertPoint() && ReturnValue.isUnused()) {
4686         llvm::TypeSize size =
4687             CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
4688         UnusedReturnSizePtr = EmitLifetimeStart(size, SRetAlloca.getPointer());
4689       }
4690     }
4691     if (IRFunctionArgs.hasSRetArg()) {
4692       IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
4693     } else if (RetAI.isInAlloca()) {
4694       Address Addr =
4695           Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex());
4696       Builder.CreateStore(SRetPtr.getPointer(), Addr);
4697     }
4698   }
4699 
4700   Address swiftErrorTemp = Address::invalid();
4701   Address swiftErrorArg = Address::invalid();
4702 
4703   // When passing arguments using temporary allocas, we need to add the
4704   // appropriate lifetime markers. This vector keeps track of all the lifetime
4705   // markers that need to be ended right after the call.
4706   SmallVector<CallLifetimeEnd, 2> CallLifetimeEndAfterCall;
4707 
4708   // Translate all of the arguments as necessary to match the IR lowering.
4709   assert(CallInfo.arg_size() == CallArgs.size() &&
4710          "Mismatch between function signature & arguments.");
4711   unsigned ArgNo = 0;
4712   CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
4713   for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
4714        I != E; ++I, ++info_it, ++ArgNo) {
4715     const ABIArgInfo &ArgInfo = info_it->info;
4716 
4717     // Insert a padding argument to ensure proper alignment.
4718     if (IRFunctionArgs.hasPaddingArg(ArgNo))
4719       IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
4720           llvm::UndefValue::get(ArgInfo.getPaddingType());
4721 
4722     unsigned FirstIRArg, NumIRArgs;
4723     std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
4724 
4725     switch (ArgInfo.getKind()) {
4726     case ABIArgInfo::InAlloca: {
4727       assert(NumIRArgs == 0);
4728       assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
4729       if (I->isAggregate()) {
4730         Address Addr = I->hasLValue()
4731                            ? I->getKnownLValue().getAddress(*this)
4732                            : I->getKnownRValue().getAggregateAddress();
4733         llvm::Instruction *Placeholder =
4734             cast<llvm::Instruction>(Addr.getPointer());
4735 
4736         if (!ArgInfo.getInAllocaIndirect()) {
4737           // Replace the placeholder with the appropriate argument slot GEP.
4738           CGBuilderTy::InsertPoint IP = Builder.saveIP();
4739           Builder.SetInsertPoint(Placeholder);
4740           Addr = Builder.CreateStructGEP(ArgMemory,
4741                                          ArgInfo.getInAllocaFieldIndex());
4742           Builder.restoreIP(IP);
4743         } else {
4744           // For indirect things such as overaligned structs, replace the
4745           // placeholder with a regular aggregate temporary alloca. Store the
4746           // address of this alloca into the struct.
4747           Addr = CreateMemTemp(info_it->type, "inalloca.indirect.tmp");
4748           Address ArgSlot = Builder.CreateStructGEP(
4749               ArgMemory, ArgInfo.getInAllocaFieldIndex());
4750           Builder.CreateStore(Addr.getPointer(), ArgSlot);
4751         }
4752         deferPlaceholderReplacement(Placeholder, Addr.getPointer());
4753       } else if (ArgInfo.getInAllocaIndirect()) {
4754         // Make a temporary alloca and store the address of it into the argument
4755         // struct.
4756         Address Addr = CreateMemTempWithoutCast(
4757             I->Ty, getContext().getTypeAlignInChars(I->Ty),
4758             "indirect-arg-temp");
4759         I->copyInto(*this, Addr);
4760         Address ArgSlot =
4761             Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
4762         Builder.CreateStore(Addr.getPointer(), ArgSlot);
4763       } else {
4764         // Store the RValue into the argument struct.
4765         Address Addr =
4766             Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
4767         unsigned AS = Addr.getType()->getPointerAddressSpace();
4768         llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
4769         // There are some cases where a trivial bitcast is not avoidable.  The
4770         // definition of a type later in a translation unit may change it's type
4771         // from {}* to (%struct.foo*)*.
4772         if (Addr.getType() != MemType)
4773           Addr = Builder.CreateBitCast(Addr, MemType);
4774         I->copyInto(*this, Addr);
4775       }
4776       break;
4777     }
4778 
4779     case ABIArgInfo::Indirect:
4780     case ABIArgInfo::IndirectAliased: {
4781       assert(NumIRArgs == 1);
4782       if (!I->isAggregate()) {
4783         // Make a temporary alloca to pass the argument.
4784         Address Addr = CreateMemTempWithoutCast(
4785             I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp");
4786         IRCallArgs[FirstIRArg] = Addr.getPointer();
4787 
4788         I->copyInto(*this, Addr);
4789       } else {
4790         // We want to avoid creating an unnecessary temporary+copy here;
4791         // however, we need one in three cases:
4792         // 1. If the argument is not byval, and we are required to copy the
4793         //    source.  (This case doesn't occur on any common architecture.)
4794         // 2. If the argument is byval, RV is not sufficiently aligned, and
4795         //    we cannot force it to be sufficiently aligned.
4796         // 3. If the argument is byval, but RV is not located in default
4797         //    or alloca address space.
4798         Address Addr = I->hasLValue()
4799                            ? I->getKnownLValue().getAddress(*this)
4800                            : I->getKnownRValue().getAggregateAddress();
4801         llvm::Value *V = Addr.getPointer();
4802         CharUnits Align = ArgInfo.getIndirectAlign();
4803         const llvm::DataLayout *TD = &CGM.getDataLayout();
4804 
4805         assert((FirstIRArg >= IRFuncTy->getNumParams() ||
4806                 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==
4807                     TD->getAllocaAddrSpace()) &&
4808                "indirect argument must be in alloca address space");
4809 
4810         bool NeedCopy = false;
4811 
4812         if (Addr.getAlignment() < Align &&
4813             llvm::getOrEnforceKnownAlignment(V, Align.getAsAlign(), *TD) <
4814                 Align.getAsAlign()) {
4815           NeedCopy = true;
4816         } else if (I->hasLValue()) {
4817           auto LV = I->getKnownLValue();
4818           auto AS = LV.getAddressSpace();
4819 
4820           if (!ArgInfo.getIndirectByVal() ||
4821               (LV.getAlignment() < getContext().getTypeAlignInChars(I->Ty))) {
4822             NeedCopy = true;
4823           }
4824           if (!getLangOpts().OpenCL) {
4825             if ((ArgInfo.getIndirectByVal() &&
4826                 (AS != LangAS::Default &&
4827                  AS != CGM.getASTAllocaAddressSpace()))) {
4828               NeedCopy = true;
4829             }
4830           }
4831           // For OpenCL even if RV is located in default or alloca address space
4832           // we don't want to perform address space cast for it.
4833           else if ((ArgInfo.getIndirectByVal() &&
4834                     Addr.getType()->getAddressSpace() != IRFuncTy->
4835                       getParamType(FirstIRArg)->getPointerAddressSpace())) {
4836             NeedCopy = true;
4837           }
4838         }
4839 
4840         if (NeedCopy) {
4841           // Create an aligned temporary, and copy to it.
4842           Address AI = CreateMemTempWithoutCast(
4843               I->Ty, ArgInfo.getIndirectAlign(), "byval-temp");
4844           IRCallArgs[FirstIRArg] = AI.getPointer();
4845 
4846           // Emit lifetime markers for the temporary alloca.
4847           llvm::TypeSize ByvalTempElementSize =
4848               CGM.getDataLayout().getTypeAllocSize(AI.getElementType());
4849           llvm::Value *LifetimeSize =
4850               EmitLifetimeStart(ByvalTempElementSize, AI.getPointer());
4851 
4852           // Add cleanup code to emit the end lifetime marker after the call.
4853           if (LifetimeSize) // In case we disabled lifetime markers.
4854             CallLifetimeEndAfterCall.emplace_back(AI, LifetimeSize);
4855 
4856           // Generate the copy.
4857           I->copyInto(*this, AI);
4858         } else {
4859           // Skip the extra memcpy call.
4860           auto *T = V->getType()->getPointerElementType()->getPointerTo(
4861               CGM.getDataLayout().getAllocaAddrSpace());
4862           IRCallArgs[FirstIRArg] = getTargetHooks().performAddrSpaceCast(
4863               *this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T,
4864               true);
4865         }
4866       }
4867       break;
4868     }
4869 
4870     case ABIArgInfo::Ignore:
4871       assert(NumIRArgs == 0);
4872       break;
4873 
4874     case ABIArgInfo::Extend:
4875     case ABIArgInfo::Direct: {
4876       if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
4877           ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
4878           ArgInfo.getDirectOffset() == 0) {
4879         assert(NumIRArgs == 1);
4880         llvm::Value *V;
4881         if (!I->isAggregate())
4882           V = I->getKnownRValue().getScalarVal();
4883         else
4884           V = Builder.CreateLoad(
4885               I->hasLValue() ? I->getKnownLValue().getAddress(*this)
4886                              : I->getKnownRValue().getAggregateAddress());
4887 
4888         // Implement swifterror by copying into a new swifterror argument.
4889         // We'll write back in the normal path out of the call.
4890         if (CallInfo.getExtParameterInfo(ArgNo).getABI()
4891               == ParameterABI::SwiftErrorResult) {
4892           assert(!swiftErrorTemp.isValid() && "multiple swifterror args");
4893 
4894           QualType pointeeTy = I->Ty->getPointeeType();
4895           swiftErrorArg =
4896             Address(V, getContext().getTypeAlignInChars(pointeeTy));
4897 
4898           swiftErrorTemp =
4899             CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
4900           V = swiftErrorTemp.getPointer();
4901           cast<llvm::AllocaInst>(V)->setSwiftError(true);
4902 
4903           llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg);
4904           Builder.CreateStore(errorValue, swiftErrorTemp);
4905         }
4906 
4907         // We might have to widen integers, but we should never truncate.
4908         if (ArgInfo.getCoerceToType() != V->getType() &&
4909             V->getType()->isIntegerTy())
4910           V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
4911 
4912         // If the argument doesn't match, perform a bitcast to coerce it.  This
4913         // can happen due to trivial type mismatches.
4914         if (FirstIRArg < IRFuncTy->getNumParams() &&
4915             V->getType() != IRFuncTy->getParamType(FirstIRArg))
4916           V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
4917 
4918         IRCallArgs[FirstIRArg] = V;
4919         break;
4920       }
4921 
4922       // FIXME: Avoid the conversion through memory if possible.
4923       Address Src = Address::invalid();
4924       if (!I->isAggregate()) {
4925         Src = CreateMemTemp(I->Ty, "coerce");
4926         I->copyInto(*this, Src);
4927       } else {
4928         Src = I->hasLValue() ? I->getKnownLValue().getAddress(*this)
4929                              : I->getKnownRValue().getAggregateAddress();
4930       }
4931 
4932       // If the value is offset in memory, apply the offset now.
4933       Src = emitAddressAtOffset(*this, Src, ArgInfo);
4934 
4935       // Fast-isel and the optimizer generally like scalar values better than
4936       // FCAs, so we flatten them if this is safe to do for this argument.
4937       llvm::StructType *STy =
4938             dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
4939       if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
4940         llvm::Type *SrcTy = Src.getElementType();
4941         uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
4942         uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
4943 
4944         // If the source type is smaller than the destination type of the
4945         // coerce-to logic, copy the source value into a temp alloca the size
4946         // of the destination type to allow loading all of it. The bits past
4947         // the source value are left undef.
4948         if (SrcSize < DstSize) {
4949           Address TempAlloca
4950             = CreateTempAlloca(STy, Src.getAlignment(),
4951                                Src.getName() + ".coerce");
4952           Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
4953           Src = TempAlloca;
4954         } else {
4955           Src = Builder.CreateBitCast(Src,
4956                                       STy->getPointerTo(Src.getAddressSpace()));
4957         }
4958 
4959         assert(NumIRArgs == STy->getNumElements());
4960         for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
4961           Address EltPtr = Builder.CreateStructGEP(Src, i);
4962           llvm::Value *LI = Builder.CreateLoad(EltPtr);
4963           IRCallArgs[FirstIRArg + i] = LI;
4964         }
4965       } else {
4966         // In the simple case, just pass the coerced loaded value.
4967         assert(NumIRArgs == 1);
4968         llvm::Value *Load =
4969             CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
4970 
4971         if (CallInfo.isCmseNSCall()) {
4972           // For certain parameter types, clear padding bits, as they may reveal
4973           // sensitive information.
4974           // Small struct/union types are passed as integer arrays.
4975           auto *ATy = dyn_cast<llvm::ArrayType>(Load->getType());
4976           if (ATy != nullptr && isa<RecordType>(I->Ty.getCanonicalType()))
4977             Load = EmitCMSEClearRecord(Load, ATy, I->Ty);
4978         }
4979         IRCallArgs[FirstIRArg] = Load;
4980       }
4981 
4982       break;
4983     }
4984 
4985     case ABIArgInfo::CoerceAndExpand: {
4986       auto coercionType = ArgInfo.getCoerceAndExpandType();
4987       auto layout = CGM.getDataLayout().getStructLayout(coercionType);
4988 
4989       llvm::Value *tempSize = nullptr;
4990       Address addr = Address::invalid();
4991       Address AllocaAddr = Address::invalid();
4992       if (I->isAggregate()) {
4993         addr = I->hasLValue() ? I->getKnownLValue().getAddress(*this)
4994                               : I->getKnownRValue().getAggregateAddress();
4995 
4996       } else {
4997         RValue RV = I->getKnownRValue();
4998         assert(RV.isScalar()); // complex should always just be direct
4999 
5000         llvm::Type *scalarType = RV.getScalarVal()->getType();
5001         auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType);
5002         auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType);
5003 
5004         // Materialize to a temporary.
5005         addr = CreateTempAlloca(
5006             RV.getScalarVal()->getType(),
5007             CharUnits::fromQuantity(std::max(
5008                 (unsigned)layout->getAlignment().value(), scalarAlign)),
5009             "tmp",
5010             /*ArraySize=*/nullptr, &AllocaAddr);
5011         tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer());
5012 
5013         Builder.CreateStore(RV.getScalarVal(), addr);
5014       }
5015 
5016       addr = Builder.CreateElementBitCast(addr, coercionType);
5017 
5018       unsigned IRArgPos = FirstIRArg;
5019       for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
5020         llvm::Type *eltType = coercionType->getElementType(i);
5021         if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
5022         Address eltAddr = Builder.CreateStructGEP(addr, i);
5023         llvm::Value *elt = Builder.CreateLoad(eltAddr);
5024         IRCallArgs[IRArgPos++] = elt;
5025       }
5026       assert(IRArgPos == FirstIRArg + NumIRArgs);
5027 
5028       if (tempSize) {
5029         EmitLifetimeEnd(tempSize, AllocaAddr.getPointer());
5030       }
5031 
5032       break;
5033     }
5034 
5035     case ABIArgInfo::Expand: {
5036       unsigned IRArgPos = FirstIRArg;
5037       ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
5038       assert(IRArgPos == FirstIRArg + NumIRArgs);
5039       break;
5040     }
5041     }
5042   }
5043 
5044   const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this);
5045   llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer();
5046 
5047   // If we're using inalloca, set up that argument.
5048   if (ArgMemory.isValid()) {
5049     llvm::Value *Arg = ArgMemory.getPointer();
5050     if (CallInfo.isVariadic()) {
5051       // When passing non-POD arguments by value to variadic functions, we will
5052       // end up with a variadic prototype and an inalloca call site.  In such
5053       // cases, we can't do any parameter mismatch checks.  Give up and bitcast
5054       // the callee.
5055       unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace();
5056       CalleePtr =
5057           Builder.CreateBitCast(CalleePtr, IRFuncTy->getPointerTo(CalleeAS));
5058     } else {
5059       llvm::Type *LastParamTy =
5060           IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
5061       if (Arg->getType() != LastParamTy) {
5062 #ifndef NDEBUG
5063         // Assert that these structs have equivalent element types.
5064         llvm::StructType *FullTy = CallInfo.getArgStruct();
5065         llvm::StructType *DeclaredTy = cast<llvm::StructType>(
5066             cast<llvm::PointerType>(LastParamTy)->getElementType());
5067         assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
5068         for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
5069                                                 DE = DeclaredTy->element_end(),
5070                                                 FI = FullTy->element_begin();
5071              DI != DE; ++DI, ++FI)
5072           assert(*DI == *FI);
5073 #endif
5074         Arg = Builder.CreateBitCast(Arg, LastParamTy);
5075       }
5076     }
5077     assert(IRFunctionArgs.hasInallocaArg());
5078     IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
5079   }
5080 
5081   // 2. Prepare the function pointer.
5082 
5083   // If the callee is a bitcast of a non-variadic function to have a
5084   // variadic function pointer type, check to see if we can remove the
5085   // bitcast.  This comes up with unprototyped functions.
5086   //
5087   // This makes the IR nicer, but more importantly it ensures that we
5088   // can inline the function at -O0 if it is marked always_inline.
5089   auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT,
5090                                    llvm::Value *Ptr) -> llvm::Function * {
5091     if (!CalleeFT->isVarArg())
5092       return nullptr;
5093 
5094     // Get underlying value if it's a bitcast
5095     if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr)) {
5096       if (CE->getOpcode() == llvm::Instruction::BitCast)
5097         Ptr = CE->getOperand(0);
5098     }
5099 
5100     llvm::Function *OrigFn = dyn_cast<llvm::Function>(Ptr);
5101     if (!OrigFn)
5102       return nullptr;
5103 
5104     llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
5105 
5106     // If the original type is variadic, or if any of the component types
5107     // disagree, we cannot remove the cast.
5108     if (OrigFT->isVarArg() ||
5109         OrigFT->getNumParams() != CalleeFT->getNumParams() ||
5110         OrigFT->getReturnType() != CalleeFT->getReturnType())
5111       return nullptr;
5112 
5113     for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
5114       if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
5115         return nullptr;
5116 
5117     return OrigFn;
5118   };
5119 
5120   if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) {
5121     CalleePtr = OrigFn;
5122     IRFuncTy = OrigFn->getFunctionType();
5123   }
5124 
5125   // 3. Perform the actual call.
5126 
5127   // Deactivate any cleanups that we're supposed to do immediately before
5128   // the call.
5129   if (!CallArgs.getCleanupsToDeactivate().empty())
5130     deactivateArgCleanupsBeforeCall(*this, CallArgs);
5131 
5132   // Assert that the arguments we computed match up.  The IR verifier
5133   // will catch this, but this is a common enough source of problems
5134   // during IRGen changes that it's way better for debugging to catch
5135   // it ourselves here.
5136 #ifndef NDEBUG
5137   assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
5138   for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
5139     // Inalloca argument can have different type.
5140     if (IRFunctionArgs.hasInallocaArg() &&
5141         i == IRFunctionArgs.getInallocaArgNo())
5142       continue;
5143     if (i < IRFuncTy->getNumParams())
5144       assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
5145   }
5146 #endif
5147 
5148   // Update the largest vector width if any arguments have vector types.
5149   for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
5150     if (auto *VT = dyn_cast<llvm::VectorType>(IRCallArgs[i]->getType()))
5151       LargestVectorWidth =
5152           std::max((uint64_t)LargestVectorWidth,
5153                    VT->getPrimitiveSizeInBits().getKnownMinSize());
5154   }
5155 
5156   // Compute the calling convention and attributes.
5157   unsigned CallingConv;
5158   llvm::AttributeList Attrs;
5159   CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo,
5160                              Callee.getAbstractInfo(), Attrs, CallingConv,
5161                              /*AttrOnCallSite=*/true,
5162                              /*IsThunk=*/false);
5163 
5164   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl))
5165     if (FD->hasAttr<StrictFPAttr>())
5166       // All calls within a strictfp function are marked strictfp
5167       Attrs =
5168         Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
5169                            llvm::Attribute::StrictFP);
5170 
5171   // Add call-site nomerge attribute if exists.
5172   if (InNoMergeAttributedStmt)
5173     Attrs =
5174         Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
5175                            llvm::Attribute::NoMerge);
5176 
5177   // Apply some call-site-specific attributes.
5178   // TODO: work this into building the attribute set.
5179 
5180   // Apply always_inline to all calls within flatten functions.
5181   // FIXME: should this really take priority over __try, below?
5182   if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
5183       !(TargetDecl && TargetDecl->hasAttr<NoInlineAttr>())) {
5184     Attrs =
5185         Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
5186                            llvm::Attribute::AlwaysInline);
5187   }
5188 
5189   // Disable inlining inside SEH __try blocks.
5190   if (isSEHTryScope()) {
5191     Attrs =
5192         Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
5193                            llvm::Attribute::NoInline);
5194   }
5195 
5196   // Decide whether to use a call or an invoke.
5197   bool CannotThrow;
5198   if (currentFunctionUsesSEHTry()) {
5199     // SEH cares about asynchronous exceptions, so everything can "throw."
5200     CannotThrow = false;
5201   } else if (isCleanupPadScope() &&
5202              EHPersonality::get(*this).isMSVCXXPersonality()) {
5203     // The MSVC++ personality will implicitly terminate the program if an
5204     // exception is thrown during a cleanup outside of a try/catch.
5205     // We don't need to model anything in IR to get this behavior.
5206     CannotThrow = true;
5207   } else {
5208     // Otherwise, nounwind call sites will never throw.
5209     CannotThrow = Attrs.hasFnAttribute(llvm::Attribute::NoUnwind);
5210 
5211     if (auto *FPtr = dyn_cast<llvm::Function>(CalleePtr))
5212       if (FPtr->hasFnAttribute(llvm::Attribute::NoUnwind))
5213         CannotThrow = true;
5214   }
5215 
5216   // If we made a temporary, be sure to clean up after ourselves. Note that we
5217   // can't depend on being inside of an ExprWithCleanups, so we need to manually
5218   // pop this cleanup later on. Being eager about this is OK, since this
5219   // temporary is 'invisible' outside of the callee.
5220   if (UnusedReturnSizePtr)
5221     pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetAlloca,
5222                                          UnusedReturnSizePtr);
5223 
5224   llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
5225 
5226   SmallVector<llvm::OperandBundleDef, 1> BundleList =
5227       getBundlesForFunclet(CalleePtr);
5228 
5229   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl))
5230     if (FD->hasAttr<StrictFPAttr>())
5231       // All calls within a strictfp function are marked strictfp
5232       Attrs =
5233         Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
5234                            llvm::Attribute::StrictFP);
5235 
5236   AssumeAlignedAttrEmitter AssumeAlignedAttrEmitter(*this, TargetDecl);
5237   Attrs = AssumeAlignedAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
5238 
5239   AllocAlignAttrEmitter AllocAlignAttrEmitter(*this, TargetDecl, CallArgs);
5240   Attrs = AllocAlignAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
5241 
5242   // Emit the actual call/invoke instruction.
5243   llvm::CallBase *CI;
5244   if (!InvokeDest) {
5245     CI = Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList);
5246   } else {
5247     llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
5248     CI = Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs,
5249                               BundleList);
5250     EmitBlock(Cont);
5251   }
5252   if (callOrInvoke)
5253     *callOrInvoke = CI;
5254 
5255   // If this is within a function that has the guard(nocf) attribute and is an
5256   // indirect call, add the "guard_nocf" attribute to this call to indicate that
5257   // Control Flow Guard checks should not be added, even if the call is inlined.
5258   if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
5259     if (const auto *A = FD->getAttr<CFGuardAttr>()) {
5260       if (A->getGuard() == CFGuardAttr::GuardArg::nocf && !CI->getCalledFunction())
5261         Attrs = Attrs.addAttribute(
5262             getLLVMContext(), llvm::AttributeList::FunctionIndex, "guard_nocf");
5263     }
5264   }
5265 
5266   // Apply the attributes and calling convention.
5267   CI->setAttributes(Attrs);
5268   CI->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
5269 
5270   // Apply various metadata.
5271 
5272   if (!CI->getType()->isVoidTy())
5273     CI->setName("call");
5274 
5275   // Update largest vector width from the return type.
5276   if (auto *VT = dyn_cast<llvm::VectorType>(CI->getType()))
5277     LargestVectorWidth =
5278         std::max((uint64_t)LargestVectorWidth,
5279                  VT->getPrimitiveSizeInBits().getKnownMinSize());
5280 
5281   // Insert instrumentation or attach profile metadata at indirect call sites.
5282   // For more details, see the comment before the definition of
5283   // IPVK_IndirectCallTarget in InstrProfData.inc.
5284   if (!CI->getCalledFunction())
5285     PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget,
5286                      CI, CalleePtr);
5287 
5288   // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
5289   // optimizer it can aggressively ignore unwind edges.
5290   if (CGM.getLangOpts().ObjCAutoRefCount)
5291     AddObjCARCExceptionMetadata(CI);
5292 
5293   // Set tail call kind if necessary.
5294   if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
5295     if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
5296       Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
5297     else if (IsMustTail)
5298       Call->setTailCallKind(llvm::CallInst::TCK_MustTail);
5299   }
5300 
5301   // Add metadata for calls to MSAllocator functions
5302   if (getDebugInfo() && TargetDecl &&
5303       TargetDecl->hasAttr<MSAllocatorAttr>())
5304     getDebugInfo()->addHeapAllocSiteMetadata(CI, RetTy->getPointeeType(), Loc);
5305 
5306   // 4. Finish the call.
5307 
5308   // If the call doesn't return, finish the basic block and clear the
5309   // insertion point; this allows the rest of IRGen to discard
5310   // unreachable code.
5311   if (CI->doesNotReturn()) {
5312     if (UnusedReturnSizePtr)
5313       PopCleanupBlock();
5314 
5315     // Strip away the noreturn attribute to better diagnose unreachable UB.
5316     if (SanOpts.has(SanitizerKind::Unreachable)) {
5317       // Also remove from function since CallBase::hasFnAttr additionally checks
5318       // attributes of the called function.
5319       if (auto *F = CI->getCalledFunction())
5320         F->removeFnAttr(llvm::Attribute::NoReturn);
5321       CI->removeAttribute(llvm::AttributeList::FunctionIndex,
5322                           llvm::Attribute::NoReturn);
5323 
5324       // Avoid incompatibility with ASan which relies on the `noreturn`
5325       // attribute to insert handler calls.
5326       if (SanOpts.hasOneOf(SanitizerKind::Address |
5327                            SanitizerKind::KernelAddress)) {
5328         SanitizerScope SanScope(this);
5329         llvm::IRBuilder<>::InsertPointGuard IPGuard(Builder);
5330         Builder.SetInsertPoint(CI);
5331         auto *FnType = llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
5332         llvm::FunctionCallee Fn =
5333             CGM.CreateRuntimeFunction(FnType, "__asan_handle_no_return");
5334         EmitNounwindRuntimeCall(Fn);
5335       }
5336     }
5337 
5338     EmitUnreachable(Loc);
5339     Builder.ClearInsertionPoint();
5340 
5341     // FIXME: For now, emit a dummy basic block because expr emitters in
5342     // generally are not ready to handle emitting expressions at unreachable
5343     // points.
5344     EnsureInsertPoint();
5345 
5346     // Return a reasonable RValue.
5347     return GetUndefRValue(RetTy);
5348   }
5349 
5350   // If this is a musttail call, return immediately. We do not branch to the
5351   // epilogue in this case.
5352   if (IsMustTail) {
5353     for (auto it = EHStack.find(CurrentCleanupScopeDepth); it != EHStack.end();
5354          ++it) {
5355       EHCleanupScope *Cleanup = dyn_cast<EHCleanupScope>(&*it);
5356       if (!(Cleanup && Cleanup->getCleanup()->isRedundantBeforeReturn()))
5357         CGM.ErrorUnsupported(MustTailCall, "tail call skipping over cleanups");
5358     }
5359     if (CI->getType()->isVoidTy())
5360       Builder.CreateRetVoid();
5361     else
5362       Builder.CreateRet(CI);
5363     Builder.ClearInsertionPoint();
5364     EnsureInsertPoint();
5365     return GetUndefRValue(RetTy);
5366   }
5367 
5368   // Perform the swifterror writeback.
5369   if (swiftErrorTemp.isValid()) {
5370     llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp);
5371     Builder.CreateStore(errorResult, swiftErrorArg);
5372   }
5373 
5374   // Emit any call-associated writebacks immediately.  Arguably this
5375   // should happen after any return-value munging.
5376   if (CallArgs.hasWritebacks())
5377     emitWritebacks(*this, CallArgs);
5378 
5379   // The stack cleanup for inalloca arguments has to run out of the normal
5380   // lexical order, so deactivate it and run it manually here.
5381   CallArgs.freeArgumentMemory(*this);
5382 
5383   // Extract the return value.
5384   RValue Ret = [&] {
5385     switch (RetAI.getKind()) {
5386     case ABIArgInfo::CoerceAndExpand: {
5387       auto coercionType = RetAI.getCoerceAndExpandType();
5388 
5389       Address addr = SRetPtr;
5390       addr = Builder.CreateElementBitCast(addr, coercionType);
5391 
5392       assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType());
5393       bool requiresExtract = isa<llvm::StructType>(CI->getType());
5394 
5395       unsigned unpaddedIndex = 0;
5396       for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
5397         llvm::Type *eltType = coercionType->getElementType(i);
5398         if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
5399         Address eltAddr = Builder.CreateStructGEP(addr, i);
5400         llvm::Value *elt = CI;
5401         if (requiresExtract)
5402           elt = Builder.CreateExtractValue(elt, unpaddedIndex++);
5403         else
5404           assert(unpaddedIndex == 0);
5405         Builder.CreateStore(elt, eltAddr);
5406       }
5407       // FALLTHROUGH
5408       LLVM_FALLTHROUGH;
5409     }
5410 
5411     case ABIArgInfo::InAlloca:
5412     case ABIArgInfo::Indirect: {
5413       RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
5414       if (UnusedReturnSizePtr)
5415         PopCleanupBlock();
5416       return ret;
5417     }
5418 
5419     case ABIArgInfo::Ignore:
5420       // If we are ignoring an argument that had a result, make sure to
5421       // construct the appropriate return value for our caller.
5422       return GetUndefRValue(RetTy);
5423 
5424     case ABIArgInfo::Extend:
5425     case ABIArgInfo::Direct: {
5426       llvm::Type *RetIRTy = ConvertType(RetTy);
5427       if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
5428         switch (getEvaluationKind(RetTy)) {
5429         case TEK_Complex: {
5430           llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
5431           llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
5432           return RValue::getComplex(std::make_pair(Real, Imag));
5433         }
5434         case TEK_Aggregate: {
5435           Address DestPtr = ReturnValue.getValue();
5436           bool DestIsVolatile = ReturnValue.isVolatile();
5437 
5438           if (!DestPtr.isValid()) {
5439             DestPtr = CreateMemTemp(RetTy, "agg.tmp");
5440             DestIsVolatile = false;
5441           }
5442           EmitAggregateStore(CI, DestPtr, DestIsVolatile);
5443           return RValue::getAggregate(DestPtr);
5444         }
5445         case TEK_Scalar: {
5446           // If the argument doesn't match, perform a bitcast to coerce it.  This
5447           // can happen due to trivial type mismatches.
5448           llvm::Value *V = CI;
5449           if (V->getType() != RetIRTy)
5450             V = Builder.CreateBitCast(V, RetIRTy);
5451           return RValue::get(V);
5452         }
5453         }
5454         llvm_unreachable("bad evaluation kind");
5455       }
5456 
5457       Address DestPtr = ReturnValue.getValue();
5458       bool DestIsVolatile = ReturnValue.isVolatile();
5459 
5460       if (!DestPtr.isValid()) {
5461         DestPtr = CreateMemTemp(RetTy, "coerce");
5462         DestIsVolatile = false;
5463       }
5464 
5465       // If the value is offset in memory, apply the offset now.
5466       Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
5467       CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
5468 
5469       return convertTempToRValue(DestPtr, RetTy, SourceLocation());
5470     }
5471 
5472     case ABIArgInfo::Expand:
5473     case ABIArgInfo::IndirectAliased:
5474       llvm_unreachable("Invalid ABI kind for return argument");
5475     }
5476 
5477     llvm_unreachable("Unhandled ABIArgInfo::Kind");
5478   } ();
5479 
5480   // Emit the assume_aligned check on the return value.
5481   if (Ret.isScalar() && TargetDecl) {
5482     AssumeAlignedAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret);
5483     AllocAlignAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret);
5484   }
5485 
5486   // Explicitly call CallLifetimeEnd::Emit just to re-use the code even though
5487   // we can't use the full cleanup mechanism.
5488   for (CallLifetimeEnd &LifetimeEnd : CallLifetimeEndAfterCall)
5489     LifetimeEnd.Emit(*this, /*Flags=*/{});
5490 
5491   if (!ReturnValue.isExternallyDestructed() &&
5492       RetTy.isDestructedType() == QualType::DK_nontrivial_c_struct)
5493     pushDestroy(QualType::DK_nontrivial_c_struct, Ret.getAggregateAddress(),
5494                 RetTy);
5495 
5496   return Ret;
5497 }
5498 
5499 CGCallee CGCallee::prepareConcreteCallee(CodeGenFunction &CGF) const {
5500   if (isVirtual()) {
5501     const CallExpr *CE = getVirtualCallExpr();
5502     return CGF.CGM.getCXXABI().getVirtualFunctionPointer(
5503         CGF, getVirtualMethodDecl(), getThisAddress(), getVirtualFunctionType(),
5504         CE ? CE->getBeginLoc() : SourceLocation());
5505   }
5506 
5507   return *this;
5508 }
5509 
5510 /* VarArg handling */
5511 
5512 Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) {
5513   VAListAddr = VE->isMicrosoftABI()
5514                  ? EmitMSVAListRef(VE->getSubExpr())
5515                  : EmitVAListRef(VE->getSubExpr());
5516   QualType Ty = VE->getType();
5517   if (VE->isMicrosoftABI())
5518     return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty);
5519   return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty);
5520 }
5521