1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // These classes wrap the information about a call or function
10 // definition used to handle ABI compliancy.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CGCall.h"
15 #include "ABIInfo.h"
16 #include "CGBlocks.h"
17 #include "CGCXXABI.h"
18 #include "CGCleanup.h"
19 #include "CGRecordLayout.h"
20 #include "CodeGenFunction.h"
21 #include "CodeGenModule.h"
22 #include "TargetInfo.h"
23 #include "clang/AST/Attr.h"
24 #include "clang/AST/Decl.h"
25 #include "clang/AST/DeclCXX.h"
26 #include "clang/AST/DeclObjC.h"
27 #include "clang/Basic/CodeGenOptions.h"
28 #include "clang/Basic/TargetBuiltins.h"
29 #include "clang/Basic/TargetInfo.h"
30 #include "clang/CodeGen/CGFunctionInfo.h"
31 #include "clang/CodeGen/SwiftCallingConv.h"
32 #include "llvm/ADT/StringExtras.h"
33 #include "llvm/Analysis/ValueTracking.h"
34 #include "llvm/IR/Assumptions.h"
35 #include "llvm/IR/Attributes.h"
36 #include "llvm/IR/CallingConv.h"
37 #include "llvm/IR/DataLayout.h"
38 #include "llvm/IR/InlineAsm.h"
39 #include "llvm/IR/IntrinsicInst.h"
40 #include "llvm/IR/Intrinsics.h"
41 #include "llvm/Transforms/Utils/Local.h"
42 using namespace clang;
43 using namespace CodeGen;
44 
45 /***/
46 
47 unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
48   switch (CC) {
49   default: return llvm::CallingConv::C;
50   case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
51   case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
52   case CC_X86RegCall: return llvm::CallingConv::X86_RegCall;
53   case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
54   case CC_Win64: return llvm::CallingConv::Win64;
55   case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
56   case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
57   case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
58   case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
59   // TODO: Add support for __pascal to LLVM.
60   case CC_X86Pascal: return llvm::CallingConv::C;
61   // TODO: Add support for __vectorcall to LLVM.
62   case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
63   case CC_AArch64VectorCall: return llvm::CallingConv::AArch64_VectorCall;
64   case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
65   case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv();
66   case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
67   case CC_PreserveAll: return llvm::CallingConv::PreserveAll;
68   case CC_Swift: return llvm::CallingConv::Swift;
69   }
70 }
71 
72 /// Derives the 'this' type for codegen purposes, i.e. ignoring method CVR
73 /// qualification. Either or both of RD and MD may be null. A null RD indicates
74 /// that there is no meaningful 'this' type, and a null MD can occur when
75 /// calling a method pointer.
76 CanQualType CodeGenTypes::DeriveThisType(const CXXRecordDecl *RD,
77                                          const CXXMethodDecl *MD) {
78   QualType RecTy;
79   if (RD)
80     RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
81   else
82     RecTy = Context.VoidTy;
83 
84   if (MD)
85     RecTy = Context.getAddrSpaceQualType(RecTy, MD->getMethodQualifiers().getAddressSpace());
86   return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
87 }
88 
89 /// Returns the canonical formal type of the given C++ method.
90 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
91   return MD->getType()->getCanonicalTypeUnqualified()
92            .getAs<FunctionProtoType>();
93 }
94 
95 /// Returns the "extra-canonicalized" return type, which discards
96 /// qualifiers on the return type.  Codegen doesn't care about them,
97 /// and it makes ABI code a little easier to be able to assume that
98 /// all parameter and return types are top-level unqualified.
99 static CanQualType GetReturnType(QualType RetTy) {
100   return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
101 }
102 
103 /// Arrange the argument and result information for a value of the given
104 /// unprototyped freestanding function type.
105 const CGFunctionInfo &
106 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
107   // When translating an unprototyped function type, always use a
108   // variadic type.
109   return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
110                                  /*instanceMethod=*/false,
111                                  /*chainCall=*/false, None,
112                                  FTNP->getExtInfo(), {}, RequiredArgs(0));
113 }
114 
115 static void addExtParameterInfosForCall(
116          llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
117                                         const FunctionProtoType *proto,
118                                         unsigned prefixArgs,
119                                         unsigned totalArgs) {
120   assert(proto->hasExtParameterInfos());
121   assert(paramInfos.size() <= prefixArgs);
122   assert(proto->getNumParams() + prefixArgs <= totalArgs);
123 
124   paramInfos.reserve(totalArgs);
125 
126   // Add default infos for any prefix args that don't already have infos.
127   paramInfos.resize(prefixArgs);
128 
129   // Add infos for the prototype.
130   for (const auto &ParamInfo : proto->getExtParameterInfos()) {
131     paramInfos.push_back(ParamInfo);
132     // pass_object_size params have no parameter info.
133     if (ParamInfo.hasPassObjectSize())
134       paramInfos.emplace_back();
135   }
136 
137   assert(paramInfos.size() <= totalArgs &&
138          "Did we forget to insert pass_object_size args?");
139   // Add default infos for the variadic and/or suffix arguments.
140   paramInfos.resize(totalArgs);
141 }
142 
143 /// Adds the formal parameters in FPT to the given prefix. If any parameter in
144 /// FPT has pass_object_size attrs, then we'll add parameters for those, too.
145 static void appendParameterTypes(const CodeGenTypes &CGT,
146                                  SmallVectorImpl<CanQualType> &prefix,
147               SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
148                                  CanQual<FunctionProtoType> FPT) {
149   // Fast path: don't touch param info if we don't need to.
150   if (!FPT->hasExtParameterInfos()) {
151     assert(paramInfos.empty() &&
152            "We have paramInfos, but the prototype doesn't?");
153     prefix.append(FPT->param_type_begin(), FPT->param_type_end());
154     return;
155   }
156 
157   unsigned PrefixSize = prefix.size();
158   // In the vast majority of cases, we'll have precisely FPT->getNumParams()
159   // parameters; the only thing that can change this is the presence of
160   // pass_object_size. So, we preallocate for the common case.
161   prefix.reserve(prefix.size() + FPT->getNumParams());
162 
163   auto ExtInfos = FPT->getExtParameterInfos();
164   assert(ExtInfos.size() == FPT->getNumParams());
165   for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
166     prefix.push_back(FPT->getParamType(I));
167     if (ExtInfos[I].hasPassObjectSize())
168       prefix.push_back(CGT.getContext().getSizeType());
169   }
170 
171   addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize,
172                               prefix.size());
173 }
174 
175 /// Arrange the LLVM function layout for a value of the given function
176 /// type, on top of any implicit parameters already stored.
177 static const CGFunctionInfo &
178 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
179                         SmallVectorImpl<CanQualType> &prefix,
180                         CanQual<FunctionProtoType> FTP) {
181   SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
182   RequiredArgs Required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
183   // FIXME: Kill copy.
184   appendParameterTypes(CGT, prefix, paramInfos, FTP);
185   CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
186 
187   return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
188                                      /*chainCall=*/false, prefix,
189                                      FTP->getExtInfo(), paramInfos,
190                                      Required);
191 }
192 
193 /// Arrange the argument and result information for a value of the
194 /// given freestanding function type.
195 const CGFunctionInfo &
196 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) {
197   SmallVector<CanQualType, 16> argTypes;
198   return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
199                                    FTP);
200 }
201 
202 static CallingConv getCallingConventionForDecl(const ObjCMethodDecl *D,
203                                                bool IsWindows) {
204   // Set the appropriate calling convention for the Function.
205   if (D->hasAttr<StdCallAttr>())
206     return CC_X86StdCall;
207 
208   if (D->hasAttr<FastCallAttr>())
209     return CC_X86FastCall;
210 
211   if (D->hasAttr<RegCallAttr>())
212     return CC_X86RegCall;
213 
214   if (D->hasAttr<ThisCallAttr>())
215     return CC_X86ThisCall;
216 
217   if (D->hasAttr<VectorCallAttr>())
218     return CC_X86VectorCall;
219 
220   if (D->hasAttr<PascalAttr>())
221     return CC_X86Pascal;
222 
223   if (PcsAttr *PCS = D->getAttr<PcsAttr>())
224     return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
225 
226   if (D->hasAttr<AArch64VectorPcsAttr>())
227     return CC_AArch64VectorCall;
228 
229   if (D->hasAttr<IntelOclBiccAttr>())
230     return CC_IntelOclBicc;
231 
232   if (D->hasAttr<MSABIAttr>())
233     return IsWindows ? CC_C : CC_Win64;
234 
235   if (D->hasAttr<SysVABIAttr>())
236     return IsWindows ? CC_X86_64SysV : CC_C;
237 
238   if (D->hasAttr<PreserveMostAttr>())
239     return CC_PreserveMost;
240 
241   if (D->hasAttr<PreserveAllAttr>())
242     return CC_PreserveAll;
243 
244   return CC_C;
245 }
246 
247 /// Arrange the argument and result information for a call to an
248 /// unknown C++ non-static member function of the given abstract type.
249 /// (A null RD means we don't have any meaningful "this" argument type,
250 ///  so fall back to a generic pointer type).
251 /// The member function must be an ordinary function, i.e. not a
252 /// constructor or destructor.
253 const CGFunctionInfo &
254 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
255                                    const FunctionProtoType *FTP,
256                                    const CXXMethodDecl *MD) {
257   SmallVector<CanQualType, 16> argTypes;
258 
259   // Add the 'this' pointer.
260   argTypes.push_back(DeriveThisType(RD, MD));
261 
262   return ::arrangeLLVMFunctionInfo(
263       *this, true, argTypes,
264       FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
265 }
266 
267 /// Set calling convention for CUDA/HIP kernel.
268 static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM,
269                                            const FunctionDecl *FD) {
270   if (FD->hasAttr<CUDAGlobalAttr>()) {
271     const FunctionType *FT = FTy->getAs<FunctionType>();
272     CGM.getTargetCodeGenInfo().setCUDAKernelCallingConvention(FT);
273     FTy = FT->getCanonicalTypeUnqualified();
274   }
275 }
276 
277 /// Arrange the argument and result information for a declaration or
278 /// definition of the given C++ non-static member function.  The
279 /// member function must be an ordinary function, i.e. not a
280 /// constructor or destructor.
281 const CGFunctionInfo &
282 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
283   assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
284   assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
285 
286   CanQualType FT = GetFormalType(MD).getAs<Type>();
287   setCUDAKernelCallingConvention(FT, CGM, MD);
288   auto prototype = FT.getAs<FunctionProtoType>();
289 
290   if (MD->isInstance()) {
291     // The abstract case is perfectly fine.
292     const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
293     return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
294   }
295 
296   return arrangeFreeFunctionType(prototype);
297 }
298 
299 bool CodeGenTypes::inheritingCtorHasParams(
300     const InheritedConstructor &Inherited, CXXCtorType Type) {
301   // Parameters are unnecessary if we're constructing a base class subobject
302   // and the inherited constructor lives in a virtual base.
303   return Type == Ctor_Complete ||
304          !Inherited.getShadowDecl()->constructsVirtualBase() ||
305          !Target.getCXXABI().hasConstructorVariants();
306 }
307 
308 const CGFunctionInfo &
309 CodeGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) {
310   auto *MD = cast<CXXMethodDecl>(GD.getDecl());
311 
312   SmallVector<CanQualType, 16> argTypes;
313   SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
314   argTypes.push_back(DeriveThisType(MD->getParent(), MD));
315 
316   bool PassParams = true;
317 
318   if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
319     // A base class inheriting constructor doesn't get forwarded arguments
320     // needed to construct a virtual base (or base class thereof).
321     if (auto Inherited = CD->getInheritedConstructor())
322       PassParams = inheritingCtorHasParams(Inherited, GD.getCtorType());
323   }
324 
325   CanQual<FunctionProtoType> FTP = GetFormalType(MD);
326 
327   // Add the formal parameters.
328   if (PassParams)
329     appendParameterTypes(*this, argTypes, paramInfos, FTP);
330 
331   CGCXXABI::AddedStructorArgCounts AddedArgs =
332       TheCXXABI.buildStructorSignature(GD, argTypes);
333   if (!paramInfos.empty()) {
334     // Note: prefix implies after the first param.
335     if (AddedArgs.Prefix)
336       paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix,
337                         FunctionProtoType::ExtParameterInfo{});
338     if (AddedArgs.Suffix)
339       paramInfos.append(AddedArgs.Suffix,
340                         FunctionProtoType::ExtParameterInfo{});
341   }
342 
343   RequiredArgs required =
344       (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size())
345                                       : RequiredArgs::All);
346 
347   FunctionType::ExtInfo extInfo = FTP->getExtInfo();
348   CanQualType resultType = TheCXXABI.HasThisReturn(GD)
349                                ? argTypes.front()
350                                : TheCXXABI.hasMostDerivedReturn(GD)
351                                      ? CGM.getContext().VoidPtrTy
352                                      : Context.VoidTy;
353   return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
354                                  /*chainCall=*/false, argTypes, extInfo,
355                                  paramInfos, required);
356 }
357 
358 static SmallVector<CanQualType, 16>
359 getArgTypesForCall(ASTContext &ctx, const CallArgList &args) {
360   SmallVector<CanQualType, 16> argTypes;
361   for (auto &arg : args)
362     argTypes.push_back(ctx.getCanonicalParamType(arg.Ty));
363   return argTypes;
364 }
365 
366 static SmallVector<CanQualType, 16>
367 getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) {
368   SmallVector<CanQualType, 16> argTypes;
369   for (auto &arg : args)
370     argTypes.push_back(ctx.getCanonicalParamType(arg->getType()));
371   return argTypes;
372 }
373 
374 static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16>
375 getExtParameterInfosForCall(const FunctionProtoType *proto,
376                             unsigned prefixArgs, unsigned totalArgs) {
377   llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result;
378   if (proto->hasExtParameterInfos()) {
379     addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs);
380   }
381   return result;
382 }
383 
384 /// Arrange a call to a C++ method, passing the given arguments.
385 ///
386 /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this`
387 /// parameter.
388 /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of
389 /// args.
390 /// PassProtoArgs indicates whether `args` has args for the parameters in the
391 /// given CXXConstructorDecl.
392 const CGFunctionInfo &
393 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
394                                         const CXXConstructorDecl *D,
395                                         CXXCtorType CtorKind,
396                                         unsigned ExtraPrefixArgs,
397                                         unsigned ExtraSuffixArgs,
398                                         bool PassProtoArgs) {
399   // FIXME: Kill copy.
400   SmallVector<CanQualType, 16> ArgTypes;
401   for (const auto &Arg : args)
402     ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
403 
404   // +1 for implicit this, which should always be args[0].
405   unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
406 
407   CanQual<FunctionProtoType> FPT = GetFormalType(D);
408   RequiredArgs Required = PassProtoArgs
409                               ? RequiredArgs::forPrototypePlus(
410                                     FPT, TotalPrefixArgs + ExtraSuffixArgs)
411                               : RequiredArgs::All;
412 
413   GlobalDecl GD(D, CtorKind);
414   CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
415                                ? ArgTypes.front()
416                                : TheCXXABI.hasMostDerivedReturn(GD)
417                                      ? CGM.getContext().VoidPtrTy
418                                      : Context.VoidTy;
419 
420   FunctionType::ExtInfo Info = FPT->getExtInfo();
421   llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> ParamInfos;
422   // If the prototype args are elided, we should only have ABI-specific args,
423   // which never have param info.
424   if (PassProtoArgs && FPT->hasExtParameterInfos()) {
425     // ABI-specific suffix arguments are treated the same as variadic arguments.
426     addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs,
427                                 ArgTypes.size());
428   }
429   return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
430                                  /*chainCall=*/false, ArgTypes, Info,
431                                  ParamInfos, Required);
432 }
433 
434 /// Arrange the argument and result information for the declaration or
435 /// definition of the given function.
436 const CGFunctionInfo &
437 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
438   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
439     if (MD->isInstance())
440       return arrangeCXXMethodDeclaration(MD);
441 
442   CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
443 
444   assert(isa<FunctionType>(FTy));
445   setCUDAKernelCallingConvention(FTy, CGM, FD);
446 
447   // When declaring a function without a prototype, always use a
448   // non-variadic type.
449   if (CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>()) {
450     return arrangeLLVMFunctionInfo(
451         noProto->getReturnType(), /*instanceMethod=*/false,
452         /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All);
453   }
454 
455   return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>());
456 }
457 
458 /// Arrange the argument and result information for the declaration or
459 /// definition of an Objective-C method.
460 const CGFunctionInfo &
461 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
462   // It happens that this is the same as a call with no optional
463   // arguments, except also using the formal 'self' type.
464   return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
465 }
466 
467 /// Arrange the argument and result information for the function type
468 /// through which to perform a send to the given Objective-C method,
469 /// using the given receiver type.  The receiver type is not always
470 /// the 'self' type of the method or even an Objective-C pointer type.
471 /// This is *not* the right method for actually performing such a
472 /// message send, due to the possibility of optional arguments.
473 const CGFunctionInfo &
474 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
475                                               QualType receiverType) {
476   SmallVector<CanQualType, 16> argTys;
477   SmallVector<FunctionProtoType::ExtParameterInfo, 4> extParamInfos(2);
478   argTys.push_back(Context.getCanonicalParamType(receiverType));
479   argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
480   // FIXME: Kill copy?
481   for (const auto *I : MD->parameters()) {
482     argTys.push_back(Context.getCanonicalParamType(I->getType()));
483     auto extParamInfo = FunctionProtoType::ExtParameterInfo().withIsNoEscape(
484         I->hasAttr<NoEscapeAttr>());
485     extParamInfos.push_back(extParamInfo);
486   }
487 
488   FunctionType::ExtInfo einfo;
489   bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
490   einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
491 
492   if (getContext().getLangOpts().ObjCAutoRefCount &&
493       MD->hasAttr<NSReturnsRetainedAttr>())
494     einfo = einfo.withProducesResult(true);
495 
496   RequiredArgs required =
497     (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
498 
499   return arrangeLLVMFunctionInfo(
500       GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
501       /*chainCall=*/false, argTys, einfo, extParamInfos, required);
502 }
503 
504 const CGFunctionInfo &
505 CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType,
506                                                  const CallArgList &args) {
507   auto argTypes = getArgTypesForCall(Context, args);
508   FunctionType::ExtInfo einfo;
509 
510   return arrangeLLVMFunctionInfo(
511       GetReturnType(returnType), /*instanceMethod=*/false,
512       /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All);
513 }
514 
515 const CGFunctionInfo &
516 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
517   // FIXME: Do we need to handle ObjCMethodDecl?
518   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
519 
520   if (isa<CXXConstructorDecl>(GD.getDecl()) ||
521       isa<CXXDestructorDecl>(GD.getDecl()))
522     return arrangeCXXStructorDeclaration(GD);
523 
524   return arrangeFunctionDeclaration(FD);
525 }
526 
527 /// Arrange a thunk that takes 'this' as the first parameter followed by
528 /// varargs.  Return a void pointer, regardless of the actual return type.
529 /// The body of the thunk will end in a musttail call to a function of the
530 /// correct type, and the caller will bitcast the function to the correct
531 /// prototype.
532 const CGFunctionInfo &
533 CodeGenTypes::arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD) {
534   assert(MD->isVirtual() && "only methods have thunks");
535   CanQual<FunctionProtoType> FTP = GetFormalType(MD);
536   CanQualType ArgTys[] = {DeriveThisType(MD->getParent(), MD)};
537   return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
538                                  /*chainCall=*/false, ArgTys,
539                                  FTP->getExtInfo(), {}, RequiredArgs(1));
540 }
541 
542 const CGFunctionInfo &
543 CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD,
544                                    CXXCtorType CT) {
545   assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure);
546 
547   CanQual<FunctionProtoType> FTP = GetFormalType(CD);
548   SmallVector<CanQualType, 2> ArgTys;
549   const CXXRecordDecl *RD = CD->getParent();
550   ArgTys.push_back(DeriveThisType(RD, CD));
551   if (CT == Ctor_CopyingClosure)
552     ArgTys.push_back(*FTP->param_type_begin());
553   if (RD->getNumVBases() > 0)
554     ArgTys.push_back(Context.IntTy);
555   CallingConv CC = Context.getDefaultCallingConvention(
556       /*IsVariadic=*/false, /*IsCXXMethod=*/true);
557   return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
558                                  /*chainCall=*/false, ArgTys,
559                                  FunctionType::ExtInfo(CC), {},
560                                  RequiredArgs::All);
561 }
562 
563 /// Arrange a call as unto a free function, except possibly with an
564 /// additional number of formal parameters considered required.
565 static const CGFunctionInfo &
566 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
567                             CodeGenModule &CGM,
568                             const CallArgList &args,
569                             const FunctionType *fnType,
570                             unsigned numExtraRequiredArgs,
571                             bool chainCall) {
572   assert(args.size() >= numExtraRequiredArgs);
573 
574   llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
575 
576   // In most cases, there are no optional arguments.
577   RequiredArgs required = RequiredArgs::All;
578 
579   // If we have a variadic prototype, the required arguments are the
580   // extra prefix plus the arguments in the prototype.
581   if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
582     if (proto->isVariadic())
583       required = RequiredArgs::forPrototypePlus(proto, numExtraRequiredArgs);
584 
585     if (proto->hasExtParameterInfos())
586       addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs,
587                                   args.size());
588 
589   // If we don't have a prototype at all, but we're supposed to
590   // explicitly use the variadic convention for unprototyped calls,
591   // treat all of the arguments as required but preserve the nominal
592   // possibility of variadics.
593   } else if (CGM.getTargetCodeGenInfo()
594                 .isNoProtoCallVariadic(args,
595                                        cast<FunctionNoProtoType>(fnType))) {
596     required = RequiredArgs(args.size());
597   }
598 
599   // FIXME: Kill copy.
600   SmallVector<CanQualType, 16> argTypes;
601   for (const auto &arg : args)
602     argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
603   return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()),
604                                      /*instanceMethod=*/false, chainCall,
605                                      argTypes, fnType->getExtInfo(), paramInfos,
606                                      required);
607 }
608 
609 /// Figure out the rules for calling a function with the given formal
610 /// type using the given arguments.  The arguments are necessary
611 /// because the function might be unprototyped, in which case it's
612 /// target-dependent in crazy ways.
613 const CGFunctionInfo &
614 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
615                                       const FunctionType *fnType,
616                                       bool chainCall) {
617   return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
618                                      chainCall ? 1 : 0, chainCall);
619 }
620 
621 /// A block function is essentially a free function with an
622 /// extra implicit argument.
623 const CGFunctionInfo &
624 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
625                                        const FunctionType *fnType) {
626   return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
627                                      /*chainCall=*/false);
628 }
629 
630 const CGFunctionInfo &
631 CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto,
632                                               const FunctionArgList &params) {
633   auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size());
634   auto argTypes = getArgTypesForDeclaration(Context, params);
635 
636   return arrangeLLVMFunctionInfo(GetReturnType(proto->getReturnType()),
637                                  /*instanceMethod*/ false, /*chainCall*/ false,
638                                  argTypes, proto->getExtInfo(), paramInfos,
639                                  RequiredArgs::forPrototypePlus(proto, 1));
640 }
641 
642 const CGFunctionInfo &
643 CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType,
644                                          const CallArgList &args) {
645   // FIXME: Kill copy.
646   SmallVector<CanQualType, 16> argTypes;
647   for (const auto &Arg : args)
648     argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
649   return arrangeLLVMFunctionInfo(
650       GetReturnType(resultType), /*instanceMethod=*/false,
651       /*chainCall=*/false, argTypes, FunctionType::ExtInfo(),
652       /*paramInfos=*/ {}, RequiredArgs::All);
653 }
654 
655 const CGFunctionInfo &
656 CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType,
657                                                 const FunctionArgList &args) {
658   auto argTypes = getArgTypesForDeclaration(Context, args);
659 
660   return arrangeLLVMFunctionInfo(
661       GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false,
662       argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
663 }
664 
665 const CGFunctionInfo &
666 CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType,
667                                               ArrayRef<CanQualType> argTypes) {
668   return arrangeLLVMFunctionInfo(
669       resultType, /*instanceMethod=*/false, /*chainCall=*/false,
670       argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
671 }
672 
673 /// Arrange a call to a C++ method, passing the given arguments.
674 ///
675 /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It
676 /// does not count `this`.
677 const CGFunctionInfo &
678 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
679                                    const FunctionProtoType *proto,
680                                    RequiredArgs required,
681                                    unsigned numPrefixArgs) {
682   assert(numPrefixArgs + 1 <= args.size() &&
683          "Emitting a call with less args than the required prefix?");
684   // Add one to account for `this`. It's a bit awkward here, but we don't count
685   // `this` in similar places elsewhere.
686   auto paramInfos =
687     getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size());
688 
689   // FIXME: Kill copy.
690   auto argTypes = getArgTypesForCall(Context, args);
691 
692   FunctionType::ExtInfo info = proto->getExtInfo();
693   return arrangeLLVMFunctionInfo(
694       GetReturnType(proto->getReturnType()), /*instanceMethod=*/true,
695       /*chainCall=*/false, argTypes, info, paramInfos, required);
696 }
697 
698 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
699   return arrangeLLVMFunctionInfo(
700       getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
701       None, FunctionType::ExtInfo(), {}, RequiredArgs::All);
702 }
703 
704 const CGFunctionInfo &
705 CodeGenTypes::arrangeCall(const CGFunctionInfo &signature,
706                           const CallArgList &args) {
707   assert(signature.arg_size() <= args.size());
708   if (signature.arg_size() == args.size())
709     return signature;
710 
711   SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
712   auto sigParamInfos = signature.getExtParameterInfos();
713   if (!sigParamInfos.empty()) {
714     paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
715     paramInfos.resize(args.size());
716   }
717 
718   auto argTypes = getArgTypesForCall(Context, args);
719 
720   assert(signature.getRequiredArgs().allowsOptionalArgs());
721   return arrangeLLVMFunctionInfo(signature.getReturnType(),
722                                  signature.isInstanceMethod(),
723                                  signature.isChainCall(),
724                                  argTypes,
725                                  signature.getExtInfo(),
726                                  paramInfos,
727                                  signature.getRequiredArgs());
728 }
729 
730 namespace clang {
731 namespace CodeGen {
732 void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI);
733 }
734 }
735 
736 /// Arrange the argument and result information for an abstract value
737 /// of a given function type.  This is the method which all of the
738 /// above functions ultimately defer to.
739 const CGFunctionInfo &
740 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
741                                       bool instanceMethod,
742                                       bool chainCall,
743                                       ArrayRef<CanQualType> argTypes,
744                                       FunctionType::ExtInfo info,
745                      ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos,
746                                       RequiredArgs required) {
747   assert(llvm::all_of(argTypes,
748                       [](CanQualType T) { return T.isCanonicalAsParam(); }));
749 
750   // Lookup or create unique function info.
751   llvm::FoldingSetNodeID ID;
752   CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos,
753                           required, resultType, argTypes);
754 
755   void *insertPos = nullptr;
756   CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
757   if (FI)
758     return *FI;
759 
760   unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
761 
762   // Construct the function info.  We co-allocate the ArgInfos.
763   FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
764                               paramInfos, resultType, argTypes, required);
765   FunctionInfos.InsertNode(FI, insertPos);
766 
767   bool inserted = FunctionsBeingProcessed.insert(FI).second;
768   (void)inserted;
769   assert(inserted && "Recursively being processed?");
770 
771   // Compute ABI information.
772   if (CC == llvm::CallingConv::SPIR_KERNEL) {
773     // Force target independent argument handling for the host visible
774     // kernel functions.
775     computeSPIRKernelABIInfo(CGM, *FI);
776   } else if (info.getCC() == CC_Swift) {
777     swiftcall::computeABIInfo(CGM, *FI);
778   } else {
779     getABIInfo().computeInfo(*FI);
780   }
781 
782   // Loop over all of the computed argument and return value info.  If any of
783   // them are direct or extend without a specified coerce type, specify the
784   // default now.
785   ABIArgInfo &retInfo = FI->getReturnInfo();
786   if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
787     retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
788 
789   for (auto &I : FI->arguments())
790     if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
791       I.info.setCoerceToType(ConvertType(I.type));
792 
793   bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
794   assert(erased && "Not in set?");
795 
796   return *FI;
797 }
798 
799 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
800                                        bool instanceMethod,
801                                        bool chainCall,
802                                        const FunctionType::ExtInfo &info,
803                                        ArrayRef<ExtParameterInfo> paramInfos,
804                                        CanQualType resultType,
805                                        ArrayRef<CanQualType> argTypes,
806                                        RequiredArgs required) {
807   assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
808   assert(!required.allowsOptionalArgs() ||
809          required.getNumRequiredArgs() <= argTypes.size());
810 
811   void *buffer =
812     operator new(totalSizeToAlloc<ArgInfo,             ExtParameterInfo>(
813                                   argTypes.size() + 1, paramInfos.size()));
814 
815   CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
816   FI->CallingConvention = llvmCC;
817   FI->EffectiveCallingConvention = llvmCC;
818   FI->ASTCallingConvention = info.getCC();
819   FI->InstanceMethod = instanceMethod;
820   FI->ChainCall = chainCall;
821   FI->CmseNSCall = info.getCmseNSCall();
822   FI->NoReturn = info.getNoReturn();
823   FI->ReturnsRetained = info.getProducesResult();
824   FI->NoCallerSavedRegs = info.getNoCallerSavedRegs();
825   FI->NoCfCheck = info.getNoCfCheck();
826   FI->Required = required;
827   FI->HasRegParm = info.getHasRegParm();
828   FI->RegParm = info.getRegParm();
829   FI->ArgStruct = nullptr;
830   FI->ArgStructAlign = 0;
831   FI->NumArgs = argTypes.size();
832   FI->HasExtParameterInfos = !paramInfos.empty();
833   FI->getArgsBuffer()[0].type = resultType;
834   for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
835     FI->getArgsBuffer()[i + 1].type = argTypes[i];
836   for (unsigned i = 0, e = paramInfos.size(); i != e; ++i)
837     FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
838   return FI;
839 }
840 
841 /***/
842 
843 namespace {
844 // ABIArgInfo::Expand implementation.
845 
846 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
847 struct TypeExpansion {
848   enum TypeExpansionKind {
849     // Elements of constant arrays are expanded recursively.
850     TEK_ConstantArray,
851     // Record fields are expanded recursively (but if record is a union, only
852     // the field with the largest size is expanded).
853     TEK_Record,
854     // For complex types, real and imaginary parts are expanded recursively.
855     TEK_Complex,
856     // All other types are not expandable.
857     TEK_None
858   };
859 
860   const TypeExpansionKind Kind;
861 
862   TypeExpansion(TypeExpansionKind K) : Kind(K) {}
863   virtual ~TypeExpansion() {}
864 };
865 
866 struct ConstantArrayExpansion : TypeExpansion {
867   QualType EltTy;
868   uint64_t NumElts;
869 
870   ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
871       : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
872   static bool classof(const TypeExpansion *TE) {
873     return TE->Kind == TEK_ConstantArray;
874   }
875 };
876 
877 struct RecordExpansion : TypeExpansion {
878   SmallVector<const CXXBaseSpecifier *, 1> Bases;
879 
880   SmallVector<const FieldDecl *, 1> Fields;
881 
882   RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
883                   SmallVector<const FieldDecl *, 1> &&Fields)
884       : TypeExpansion(TEK_Record), Bases(std::move(Bases)),
885         Fields(std::move(Fields)) {}
886   static bool classof(const TypeExpansion *TE) {
887     return TE->Kind == TEK_Record;
888   }
889 };
890 
891 struct ComplexExpansion : TypeExpansion {
892   QualType EltTy;
893 
894   ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
895   static bool classof(const TypeExpansion *TE) {
896     return TE->Kind == TEK_Complex;
897   }
898 };
899 
900 struct NoExpansion : TypeExpansion {
901   NoExpansion() : TypeExpansion(TEK_None) {}
902   static bool classof(const TypeExpansion *TE) {
903     return TE->Kind == TEK_None;
904   }
905 };
906 }  // namespace
907 
908 static std::unique_ptr<TypeExpansion>
909 getTypeExpansion(QualType Ty, const ASTContext &Context) {
910   if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
911     return std::make_unique<ConstantArrayExpansion>(
912         AT->getElementType(), AT->getSize().getZExtValue());
913   }
914   if (const RecordType *RT = Ty->getAs<RecordType>()) {
915     SmallVector<const CXXBaseSpecifier *, 1> Bases;
916     SmallVector<const FieldDecl *, 1> Fields;
917     const RecordDecl *RD = RT->getDecl();
918     assert(!RD->hasFlexibleArrayMember() &&
919            "Cannot expand structure with flexible array.");
920     if (RD->isUnion()) {
921       // Unions can be here only in degenerative cases - all the fields are same
922       // after flattening. Thus we have to use the "largest" field.
923       const FieldDecl *LargestFD = nullptr;
924       CharUnits UnionSize = CharUnits::Zero();
925 
926       for (const auto *FD : RD->fields()) {
927         if (FD->isZeroLengthBitField(Context))
928           continue;
929         assert(!FD->isBitField() &&
930                "Cannot expand structure with bit-field members.");
931         CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
932         if (UnionSize < FieldSize) {
933           UnionSize = FieldSize;
934           LargestFD = FD;
935         }
936       }
937       if (LargestFD)
938         Fields.push_back(LargestFD);
939     } else {
940       if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
941         assert(!CXXRD->isDynamicClass() &&
942                "cannot expand vtable pointers in dynamic classes");
943         for (const CXXBaseSpecifier &BS : CXXRD->bases())
944           Bases.push_back(&BS);
945       }
946 
947       for (const auto *FD : RD->fields()) {
948         if (FD->isZeroLengthBitField(Context))
949           continue;
950         assert(!FD->isBitField() &&
951                "Cannot expand structure with bit-field members.");
952         Fields.push_back(FD);
953       }
954     }
955     return std::make_unique<RecordExpansion>(std::move(Bases),
956                                               std::move(Fields));
957   }
958   if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
959     return std::make_unique<ComplexExpansion>(CT->getElementType());
960   }
961   return std::make_unique<NoExpansion>();
962 }
963 
964 static int getExpansionSize(QualType Ty, const ASTContext &Context) {
965   auto Exp = getTypeExpansion(Ty, Context);
966   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
967     return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
968   }
969   if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
970     int Res = 0;
971     for (auto BS : RExp->Bases)
972       Res += getExpansionSize(BS->getType(), Context);
973     for (auto FD : RExp->Fields)
974       Res += getExpansionSize(FD->getType(), Context);
975     return Res;
976   }
977   if (isa<ComplexExpansion>(Exp.get()))
978     return 2;
979   assert(isa<NoExpansion>(Exp.get()));
980   return 1;
981 }
982 
983 void
984 CodeGenTypes::getExpandedTypes(QualType Ty,
985                                SmallVectorImpl<llvm::Type *>::iterator &TI) {
986   auto Exp = getTypeExpansion(Ty, Context);
987   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
988     for (int i = 0, n = CAExp->NumElts; i < n; i++) {
989       getExpandedTypes(CAExp->EltTy, TI);
990     }
991   } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
992     for (auto BS : RExp->Bases)
993       getExpandedTypes(BS->getType(), TI);
994     for (auto FD : RExp->Fields)
995       getExpandedTypes(FD->getType(), TI);
996   } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
997     llvm::Type *EltTy = ConvertType(CExp->EltTy);
998     *TI++ = EltTy;
999     *TI++ = EltTy;
1000   } else {
1001     assert(isa<NoExpansion>(Exp.get()));
1002     *TI++ = ConvertType(Ty);
1003   }
1004 }
1005 
1006 static void forConstantArrayExpansion(CodeGenFunction &CGF,
1007                                       ConstantArrayExpansion *CAE,
1008                                       Address BaseAddr,
1009                                       llvm::function_ref<void(Address)> Fn) {
1010   CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
1011   CharUnits EltAlign =
1012     BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
1013 
1014   for (int i = 0, n = CAE->NumElts; i < n; i++) {
1015     llvm::Value *EltAddr =
1016       CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i);
1017     Fn(Address(EltAddr, EltAlign));
1018   }
1019 }
1020 
1021 void CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
1022                                          llvm::Function::arg_iterator &AI) {
1023   assert(LV.isSimple() &&
1024          "Unexpected non-simple lvalue during struct expansion.");
1025 
1026   auto Exp = getTypeExpansion(Ty, getContext());
1027   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1028     forConstantArrayExpansion(
1029         *this, CAExp, LV.getAddress(*this), [&](Address EltAddr) {
1030           LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1031           ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1032         });
1033   } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1034     Address This = LV.getAddress(*this);
1035     for (const CXXBaseSpecifier *BS : RExp->Bases) {
1036       // Perform a single step derived-to-base conversion.
1037       Address Base =
1038           GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1039                                 /*NullCheckValue=*/false, SourceLocation());
1040       LValue SubLV = MakeAddrLValue(Base, BS->getType());
1041 
1042       // Recurse onto bases.
1043       ExpandTypeFromArgs(BS->getType(), SubLV, AI);
1044     }
1045     for (auto FD : RExp->Fields) {
1046       // FIXME: What are the right qualifiers here?
1047       LValue SubLV = EmitLValueForFieldInitialization(LV, FD);
1048       ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1049     }
1050   } else if (isa<ComplexExpansion>(Exp.get())) {
1051     auto realValue = &*AI++;
1052     auto imagValue = &*AI++;
1053     EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
1054   } else {
1055     // Call EmitStoreOfScalar except when the lvalue is a bitfield to emit a
1056     // primitive store.
1057     assert(isa<NoExpansion>(Exp.get()));
1058     if (LV.isBitField())
1059       EmitStoreThroughLValue(RValue::get(&*AI++), LV);
1060     else
1061       EmitStoreOfScalar(&*AI++, LV);
1062   }
1063 }
1064 
1065 void CodeGenFunction::ExpandTypeToArgs(
1066     QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
1067     SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
1068   auto Exp = getTypeExpansion(Ty, getContext());
1069   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1070     Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this)
1071                                    : Arg.getKnownRValue().getAggregateAddress();
1072     forConstantArrayExpansion(
1073         *this, CAExp, Addr, [&](Address EltAddr) {
1074           CallArg EltArg = CallArg(
1075               convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()),
1076               CAExp->EltTy);
1077           ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
1078                            IRCallArgPos);
1079         });
1080   } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1081     Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this)
1082                                    : Arg.getKnownRValue().getAggregateAddress();
1083     for (const CXXBaseSpecifier *BS : RExp->Bases) {
1084       // Perform a single step derived-to-base conversion.
1085       Address Base =
1086           GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1087                                 /*NullCheckValue=*/false, SourceLocation());
1088       CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType());
1089 
1090       // Recurse onto bases.
1091       ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs,
1092                        IRCallArgPos);
1093     }
1094 
1095     LValue LV = MakeAddrLValue(This, Ty);
1096     for (auto FD : RExp->Fields) {
1097       CallArg FldArg =
1098           CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType());
1099       ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
1100                        IRCallArgPos);
1101     }
1102   } else if (isa<ComplexExpansion>(Exp.get())) {
1103     ComplexPairTy CV = Arg.getKnownRValue().getComplexVal();
1104     IRCallArgs[IRCallArgPos++] = CV.first;
1105     IRCallArgs[IRCallArgPos++] = CV.second;
1106   } else {
1107     assert(isa<NoExpansion>(Exp.get()));
1108     auto RV = Arg.getKnownRValue();
1109     assert(RV.isScalar() &&
1110            "Unexpected non-scalar rvalue during struct expansion.");
1111 
1112     // Insert a bitcast as needed.
1113     llvm::Value *V = RV.getScalarVal();
1114     if (IRCallArgPos < IRFuncTy->getNumParams() &&
1115         V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1116       V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
1117 
1118     IRCallArgs[IRCallArgPos++] = V;
1119   }
1120 }
1121 
1122 /// Create a temporary allocation for the purposes of coercion.
1123 static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty,
1124                                            CharUnits MinAlign,
1125                                            const Twine &Name = "tmp") {
1126   // Don't use an alignment that's worse than what LLVM would prefer.
1127   auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty);
1128   CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
1129 
1130   return CGF.CreateTempAlloca(Ty, Align, Name + ".coerce");
1131 }
1132 
1133 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
1134 /// accessing some number of bytes out of it, try to gep into the struct to get
1135 /// at its inner goodness.  Dive as deep as possible without entering an element
1136 /// with an in-memory size smaller than DstSize.
1137 static Address
1138 EnterStructPointerForCoercedAccess(Address SrcPtr,
1139                                    llvm::StructType *SrcSTy,
1140                                    uint64_t DstSize, CodeGenFunction &CGF) {
1141   // We can't dive into a zero-element struct.
1142   if (SrcSTy->getNumElements() == 0) return SrcPtr;
1143 
1144   llvm::Type *FirstElt = SrcSTy->getElementType(0);
1145 
1146   // If the first elt is at least as large as what we're looking for, or if the
1147   // first element is the same size as the whole struct, we can enter it. The
1148   // comparison must be made on the store size and not the alloca size. Using
1149   // the alloca size may overstate the size of the load.
1150   uint64_t FirstEltSize =
1151     CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
1152   if (FirstEltSize < DstSize &&
1153       FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
1154     return SrcPtr;
1155 
1156   // GEP into the first element.
1157   SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, "coerce.dive");
1158 
1159   // If the first element is a struct, recurse.
1160   llvm::Type *SrcTy = SrcPtr.getElementType();
1161   if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1162     return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
1163 
1164   return SrcPtr;
1165 }
1166 
1167 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
1168 /// are either integers or pointers.  This does a truncation of the value if it
1169 /// is too large or a zero extension if it is too small.
1170 ///
1171 /// This behaves as if the value were coerced through memory, so on big-endian
1172 /// targets the high bits are preserved in a truncation, while little-endian
1173 /// targets preserve the low bits.
1174 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
1175                                              llvm::Type *Ty,
1176                                              CodeGenFunction &CGF) {
1177   if (Val->getType() == Ty)
1178     return Val;
1179 
1180   if (isa<llvm::PointerType>(Val->getType())) {
1181     // If this is Pointer->Pointer avoid conversion to and from int.
1182     if (isa<llvm::PointerType>(Ty))
1183       return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
1184 
1185     // Convert the pointer to an integer so we can play with its width.
1186     Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
1187   }
1188 
1189   llvm::Type *DestIntTy = Ty;
1190   if (isa<llvm::PointerType>(DestIntTy))
1191     DestIntTy = CGF.IntPtrTy;
1192 
1193   if (Val->getType() != DestIntTy) {
1194     const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
1195     if (DL.isBigEndian()) {
1196       // Preserve the high bits on big-endian targets.
1197       // That is what memory coercion does.
1198       uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1199       uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1200 
1201       if (SrcSize > DstSize) {
1202         Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
1203         Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
1204       } else {
1205         Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
1206         Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
1207       }
1208     } else {
1209       // Little-endian targets preserve the low bits. No shifts required.
1210       Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
1211     }
1212   }
1213 
1214   if (isa<llvm::PointerType>(Ty))
1215     Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
1216   return Val;
1217 }
1218 
1219 
1220 
1221 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
1222 /// a pointer to an object of type \arg Ty, known to be aligned to
1223 /// \arg SrcAlign bytes.
1224 ///
1225 /// This safely handles the case when the src type is smaller than the
1226 /// destination type; in this situation the values of bits which not
1227 /// present in the src are undefined.
1228 static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
1229                                       CodeGenFunction &CGF) {
1230   llvm::Type *SrcTy = Src.getElementType();
1231 
1232   // If SrcTy and Ty are the same, just do a load.
1233   if (SrcTy == Ty)
1234     return CGF.Builder.CreateLoad(Src);
1235 
1236   llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
1237 
1238   if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1239     Src = EnterStructPointerForCoercedAccess(Src, SrcSTy,
1240                                              DstSize.getFixedSize(), CGF);
1241     SrcTy = Src.getElementType();
1242   }
1243 
1244   llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1245 
1246   // If the source and destination are integer or pointer types, just do an
1247   // extension or truncation to the desired type.
1248   if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1249       (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1250     llvm::Value *Load = CGF.Builder.CreateLoad(Src);
1251     return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
1252   }
1253 
1254   // If load is legal, just bitcast the src pointer.
1255   if (!SrcSize.isScalable() && !DstSize.isScalable() &&
1256       SrcSize.getFixedSize() >= DstSize.getFixedSize()) {
1257     // Generally SrcSize is never greater than DstSize, since this means we are
1258     // losing bits. However, this can happen in cases where the structure has
1259     // additional padding, for example due to a user specified alignment.
1260     //
1261     // FIXME: Assert that we aren't truncating non-padding bits when have access
1262     // to that information.
1263     Src = CGF.Builder.CreateBitCast(Src,
1264                                     Ty->getPointerTo(Src.getAddressSpace()));
1265     return CGF.Builder.CreateLoad(Src);
1266   }
1267 
1268   // If coercing a fixed vector to a scalable vector for ABI compatibility, and
1269   // the types match, use the llvm.experimental.vector.insert intrinsic to
1270   // perform the conversion.
1271   if (auto *ScalableDst = dyn_cast<llvm::ScalableVectorType>(Ty)) {
1272     if (auto *FixedSrc = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
1273       if (ScalableDst->getElementType() == FixedSrc->getElementType()) {
1274         auto *Load = CGF.Builder.CreateLoad(Src);
1275         auto *UndefVec = llvm::UndefValue::get(ScalableDst);
1276         auto *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
1277         return CGF.Builder.CreateInsertVector(ScalableDst, UndefVec, Load, Zero,
1278                                               "castScalableSve");
1279       }
1280     }
1281   }
1282 
1283   // Otherwise do coercion through memory. This is stupid, but simple.
1284   Address Tmp =
1285       CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment(), Src.getName());
1286   CGF.Builder.CreateMemCpy(
1287       Tmp.getPointer(), Tmp.getAlignment().getAsAlign(), Src.getPointer(),
1288       Src.getAlignment().getAsAlign(),
1289       llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize.getKnownMinSize()));
1290   return CGF.Builder.CreateLoad(Tmp);
1291 }
1292 
1293 // Function to store a first-class aggregate into memory.  We prefer to
1294 // store the elements rather than the aggregate to be more friendly to
1295 // fast-isel.
1296 // FIXME: Do we need to recurse here?
1297 void CodeGenFunction::EmitAggregateStore(llvm::Value *Val, Address Dest,
1298                                          bool DestIsVolatile) {
1299   // Prefer scalar stores to first-class aggregate stores.
1300   if (llvm::StructType *STy = dyn_cast<llvm::StructType>(Val->getType())) {
1301     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1302       Address EltPtr = Builder.CreateStructGEP(Dest, i);
1303       llvm::Value *Elt = Builder.CreateExtractValue(Val, i);
1304       Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
1305     }
1306   } else {
1307     Builder.CreateStore(Val, Dest, DestIsVolatile);
1308   }
1309 }
1310 
1311 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1312 /// where the source and destination may have different types.  The
1313 /// destination is known to be aligned to \arg DstAlign bytes.
1314 ///
1315 /// This safely handles the case when the src type is larger than the
1316 /// destination type; the upper bits of the src will be lost.
1317 static void CreateCoercedStore(llvm::Value *Src,
1318                                Address Dst,
1319                                bool DstIsVolatile,
1320                                CodeGenFunction &CGF) {
1321   llvm::Type *SrcTy = Src->getType();
1322   llvm::Type *DstTy = Dst.getElementType();
1323   if (SrcTy == DstTy) {
1324     CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1325     return;
1326   }
1327 
1328   llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1329 
1330   if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1331     Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy,
1332                                              SrcSize.getFixedSize(), CGF);
1333     DstTy = Dst.getElementType();
1334   }
1335 
1336   llvm::PointerType *SrcPtrTy = llvm::dyn_cast<llvm::PointerType>(SrcTy);
1337   llvm::PointerType *DstPtrTy = llvm::dyn_cast<llvm::PointerType>(DstTy);
1338   if (SrcPtrTy && DstPtrTy &&
1339       SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) {
1340     Src = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy);
1341     CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1342     return;
1343   }
1344 
1345   // If the source and destination are integer or pointer types, just do an
1346   // extension or truncation to the desired type.
1347   if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1348       (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1349     Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
1350     CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1351     return;
1352   }
1353 
1354   llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
1355 
1356   // If store is legal, just bitcast the src pointer.
1357   if (isa<llvm::ScalableVectorType>(SrcTy) ||
1358       isa<llvm::ScalableVectorType>(DstTy) ||
1359       SrcSize.getFixedSize() <= DstSize.getFixedSize()) {
1360     Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy);
1361     CGF.EmitAggregateStore(Src, Dst, DstIsVolatile);
1362   } else {
1363     // Otherwise do coercion through memory. This is stupid, but
1364     // simple.
1365 
1366     // Generally SrcSize is never greater than DstSize, since this means we are
1367     // losing bits. However, this can happen in cases where the structure has
1368     // additional padding, for example due to a user specified alignment.
1369     //
1370     // FIXME: Assert that we aren't truncating non-padding bits when have access
1371     // to that information.
1372     Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
1373     CGF.Builder.CreateStore(Src, Tmp);
1374     CGF.Builder.CreateMemCpy(
1375         Dst.getPointer(), Dst.getAlignment().getAsAlign(), Tmp.getPointer(),
1376         Tmp.getAlignment().getAsAlign(),
1377         llvm::ConstantInt::get(CGF.IntPtrTy, DstSize.getFixedSize()));
1378   }
1379 }
1380 
1381 static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr,
1382                                    const ABIArgInfo &info) {
1383   if (unsigned offset = info.getDirectOffset()) {
1384     addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty);
1385     addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
1386                                              CharUnits::fromQuantity(offset));
1387     addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType());
1388   }
1389   return addr;
1390 }
1391 
1392 namespace {
1393 
1394 /// Encapsulates information about the way function arguments from
1395 /// CGFunctionInfo should be passed to actual LLVM IR function.
1396 class ClangToLLVMArgMapping {
1397   static const unsigned InvalidIndex = ~0U;
1398   unsigned InallocaArgNo;
1399   unsigned SRetArgNo;
1400   unsigned TotalIRArgs;
1401 
1402   /// Arguments of LLVM IR function corresponding to single Clang argument.
1403   struct IRArgs {
1404     unsigned PaddingArgIndex;
1405     // Argument is expanded to IR arguments at positions
1406     // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
1407     unsigned FirstArgIndex;
1408     unsigned NumberOfArgs;
1409 
1410     IRArgs()
1411         : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1412           NumberOfArgs(0) {}
1413   };
1414 
1415   SmallVector<IRArgs, 8> ArgInfo;
1416 
1417 public:
1418   ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
1419                         bool OnlyRequiredArgs = false)
1420       : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1421         ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1422     construct(Context, FI, OnlyRequiredArgs);
1423   }
1424 
1425   bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
1426   unsigned getInallocaArgNo() const {
1427     assert(hasInallocaArg());
1428     return InallocaArgNo;
1429   }
1430 
1431   bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
1432   unsigned getSRetArgNo() const {
1433     assert(hasSRetArg());
1434     return SRetArgNo;
1435   }
1436 
1437   unsigned totalIRArgs() const { return TotalIRArgs; }
1438 
1439   bool hasPaddingArg(unsigned ArgNo) const {
1440     assert(ArgNo < ArgInfo.size());
1441     return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1442   }
1443   unsigned getPaddingArgNo(unsigned ArgNo) const {
1444     assert(hasPaddingArg(ArgNo));
1445     return ArgInfo[ArgNo].PaddingArgIndex;
1446   }
1447 
1448   /// Returns index of first IR argument corresponding to ArgNo, and their
1449   /// quantity.
1450   std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
1451     assert(ArgNo < ArgInfo.size());
1452     return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1453                           ArgInfo[ArgNo].NumberOfArgs);
1454   }
1455 
1456 private:
1457   void construct(const ASTContext &Context, const CGFunctionInfo &FI,
1458                  bool OnlyRequiredArgs);
1459 };
1460 
1461 void ClangToLLVMArgMapping::construct(const ASTContext &Context,
1462                                       const CGFunctionInfo &FI,
1463                                       bool OnlyRequiredArgs) {
1464   unsigned IRArgNo = 0;
1465   bool SwapThisWithSRet = false;
1466   const ABIArgInfo &RetAI = FI.getReturnInfo();
1467 
1468   if (RetAI.getKind() == ABIArgInfo::Indirect) {
1469     SwapThisWithSRet = RetAI.isSRetAfterThis();
1470     SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1471   }
1472 
1473   unsigned ArgNo = 0;
1474   unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
1475   for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
1476        ++I, ++ArgNo) {
1477     assert(I != FI.arg_end());
1478     QualType ArgType = I->type;
1479     const ABIArgInfo &AI = I->info;
1480     // Collect data about IR arguments corresponding to Clang argument ArgNo.
1481     auto &IRArgs = ArgInfo[ArgNo];
1482 
1483     if (AI.getPaddingType())
1484       IRArgs.PaddingArgIndex = IRArgNo++;
1485 
1486     switch (AI.getKind()) {
1487     case ABIArgInfo::Extend:
1488     case ABIArgInfo::Direct: {
1489       // FIXME: handle sseregparm someday...
1490       llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
1491       if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
1492         IRArgs.NumberOfArgs = STy->getNumElements();
1493       } else {
1494         IRArgs.NumberOfArgs = 1;
1495       }
1496       break;
1497     }
1498     case ABIArgInfo::Indirect:
1499     case ABIArgInfo::IndirectAliased:
1500       IRArgs.NumberOfArgs = 1;
1501       break;
1502     case ABIArgInfo::Ignore:
1503     case ABIArgInfo::InAlloca:
1504       // ignore and inalloca doesn't have matching LLVM parameters.
1505       IRArgs.NumberOfArgs = 0;
1506       break;
1507     case ABIArgInfo::CoerceAndExpand:
1508       IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size();
1509       break;
1510     case ABIArgInfo::Expand:
1511       IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
1512       break;
1513     }
1514 
1515     if (IRArgs.NumberOfArgs > 0) {
1516       IRArgs.FirstArgIndex = IRArgNo;
1517       IRArgNo += IRArgs.NumberOfArgs;
1518     }
1519 
1520     // Skip over the sret parameter when it comes second.  We already handled it
1521     // above.
1522     if (IRArgNo == 1 && SwapThisWithSRet)
1523       IRArgNo++;
1524   }
1525   assert(ArgNo == ArgInfo.size());
1526 
1527   if (FI.usesInAlloca())
1528     InallocaArgNo = IRArgNo++;
1529 
1530   TotalIRArgs = IRArgNo;
1531 }
1532 }  // namespace
1533 
1534 /***/
1535 
1536 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
1537   const auto &RI = FI.getReturnInfo();
1538   return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
1539 }
1540 
1541 bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) {
1542   return ReturnTypeUsesSRet(FI) &&
1543          getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
1544 }
1545 
1546 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
1547   if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
1548     switch (BT->getKind()) {
1549     default:
1550       return false;
1551     case BuiltinType::Float:
1552       return getTarget().useObjCFPRetForRealType(TargetInfo::Float);
1553     case BuiltinType::Double:
1554       return getTarget().useObjCFPRetForRealType(TargetInfo::Double);
1555     case BuiltinType::LongDouble:
1556       return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble);
1557     }
1558   }
1559 
1560   return false;
1561 }
1562 
1563 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
1564   if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
1565     if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
1566       if (BT->getKind() == BuiltinType::LongDouble)
1567         return getTarget().useObjCFP2RetForComplexLongDouble();
1568     }
1569   }
1570 
1571   return false;
1572 }
1573 
1574 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
1575   const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
1576   return GetFunctionType(FI);
1577 }
1578 
1579 llvm::FunctionType *
1580 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
1581 
1582   bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1583   (void)Inserted;
1584   assert(Inserted && "Recursively being processed?");
1585 
1586   llvm::Type *resultType = nullptr;
1587   const ABIArgInfo &retAI = FI.getReturnInfo();
1588   switch (retAI.getKind()) {
1589   case ABIArgInfo::Expand:
1590   case ABIArgInfo::IndirectAliased:
1591     llvm_unreachable("Invalid ABI kind for return argument");
1592 
1593   case ABIArgInfo::Extend:
1594   case ABIArgInfo::Direct:
1595     resultType = retAI.getCoerceToType();
1596     break;
1597 
1598   case ABIArgInfo::InAlloca:
1599     if (retAI.getInAllocaSRet()) {
1600       // sret things on win32 aren't void, they return the sret pointer.
1601       QualType ret = FI.getReturnType();
1602       llvm::Type *ty = ConvertType(ret);
1603       unsigned addressSpace = Context.getTargetAddressSpace(ret);
1604       resultType = llvm::PointerType::get(ty, addressSpace);
1605     } else {
1606       resultType = llvm::Type::getVoidTy(getLLVMContext());
1607     }
1608     break;
1609 
1610   case ABIArgInfo::Indirect:
1611   case ABIArgInfo::Ignore:
1612     resultType = llvm::Type::getVoidTy(getLLVMContext());
1613     break;
1614 
1615   case ABIArgInfo::CoerceAndExpand:
1616     resultType = retAI.getUnpaddedCoerceAndExpandType();
1617     break;
1618   }
1619 
1620   ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
1621   SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
1622 
1623   // Add type for sret argument.
1624   if (IRFunctionArgs.hasSRetArg()) {
1625     QualType Ret = FI.getReturnType();
1626     llvm::Type *Ty = ConvertType(Ret);
1627     unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
1628     ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1629         llvm::PointerType::get(Ty, AddressSpace);
1630   }
1631 
1632   // Add type for inalloca argument.
1633   if (IRFunctionArgs.hasInallocaArg()) {
1634     auto ArgStruct = FI.getArgStruct();
1635     assert(ArgStruct);
1636     ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
1637   }
1638 
1639   // Add in all of the required arguments.
1640   unsigned ArgNo = 0;
1641   CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1642                                      ie = it + FI.getNumRequiredArgs();
1643   for (; it != ie; ++it, ++ArgNo) {
1644     const ABIArgInfo &ArgInfo = it->info;
1645 
1646     // Insert a padding type to ensure proper alignment.
1647     if (IRFunctionArgs.hasPaddingArg(ArgNo))
1648       ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1649           ArgInfo.getPaddingType();
1650 
1651     unsigned FirstIRArg, NumIRArgs;
1652     std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1653 
1654     switch (ArgInfo.getKind()) {
1655     case ABIArgInfo::Ignore:
1656     case ABIArgInfo::InAlloca:
1657       assert(NumIRArgs == 0);
1658       break;
1659 
1660     case ABIArgInfo::Indirect: {
1661       assert(NumIRArgs == 1);
1662       // indirect arguments are always on the stack, which is alloca addr space.
1663       llvm::Type *LTy = ConvertTypeForMem(it->type);
1664       ArgTypes[FirstIRArg] = LTy->getPointerTo(
1665           CGM.getDataLayout().getAllocaAddrSpace());
1666       break;
1667     }
1668     case ABIArgInfo::IndirectAliased: {
1669       assert(NumIRArgs == 1);
1670       llvm::Type *LTy = ConvertTypeForMem(it->type);
1671       ArgTypes[FirstIRArg] = LTy->getPointerTo(ArgInfo.getIndirectAddrSpace());
1672       break;
1673     }
1674     case ABIArgInfo::Extend:
1675     case ABIArgInfo::Direct: {
1676       // Fast-isel and the optimizer generally like scalar values better than
1677       // FCAs, so we flatten them if this is safe to do for this argument.
1678       llvm::Type *argType = ArgInfo.getCoerceToType();
1679       llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1680       if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
1681         assert(NumIRArgs == st->getNumElements());
1682         for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1683           ArgTypes[FirstIRArg + i] = st->getElementType(i);
1684       } else {
1685         assert(NumIRArgs == 1);
1686         ArgTypes[FirstIRArg] = argType;
1687       }
1688       break;
1689     }
1690 
1691     case ABIArgInfo::CoerceAndExpand: {
1692       auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1693       for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) {
1694         *ArgTypesIter++ = EltTy;
1695       }
1696       assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1697       break;
1698     }
1699 
1700     case ABIArgInfo::Expand:
1701       auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1702       getExpandedTypes(it->type, ArgTypesIter);
1703       assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1704       break;
1705     }
1706   }
1707 
1708   bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1709   assert(Erased && "Not in set?");
1710 
1711   return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
1712 }
1713 
1714 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
1715   const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1716   const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
1717 
1718   if (!isFuncTypeConvertible(FPT))
1719     return llvm::StructType::get(getLLVMContext());
1720 
1721   return GetFunctionType(GD);
1722 }
1723 
1724 static void AddAttributesFromFunctionProtoType(ASTContext &Ctx,
1725                                                llvm::AttrBuilder &FuncAttrs,
1726                                                const FunctionProtoType *FPT) {
1727   if (!FPT)
1728     return;
1729 
1730   if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) &&
1731       FPT->isNothrow())
1732     FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1733 }
1734 
1735 void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
1736                                                  bool HasOptnone,
1737                                                  bool AttrOnCallSite,
1738                                                llvm::AttrBuilder &FuncAttrs) {
1739   // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
1740   if (!HasOptnone) {
1741     if (CodeGenOpts.OptimizeSize)
1742       FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1743     if (CodeGenOpts.OptimizeSize == 2)
1744       FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1745   }
1746 
1747   if (CodeGenOpts.DisableRedZone)
1748     FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1749   if (CodeGenOpts.IndirectTlsSegRefs)
1750     FuncAttrs.addAttribute("indirect-tls-seg-refs");
1751   if (CodeGenOpts.NoImplicitFloat)
1752     FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1753 
1754   if (AttrOnCallSite) {
1755     // Attributes that should go on the call site only.
1756     if (!CodeGenOpts.SimplifyLibCalls ||
1757         CodeGenOpts.isNoBuiltinFunc(Name.data()))
1758       FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1759     if (!CodeGenOpts.TrapFuncName.empty())
1760       FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
1761   } else {
1762     StringRef FpKind;
1763     switch (CodeGenOpts.getFramePointer()) {
1764     case CodeGenOptions::FramePointerKind::None:
1765       FpKind = "none";
1766       break;
1767     case CodeGenOptions::FramePointerKind::NonLeaf:
1768       FpKind = "non-leaf";
1769       break;
1770     case CodeGenOptions::FramePointerKind::All:
1771       FpKind = "all";
1772       break;
1773     }
1774     FuncAttrs.addAttribute("frame-pointer", FpKind);
1775 
1776     FuncAttrs.addAttribute("less-precise-fpmad",
1777                            llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
1778 
1779     if (CodeGenOpts.NullPointerIsValid)
1780       FuncAttrs.addAttribute(llvm::Attribute::NullPointerIsValid);
1781 
1782     if (CodeGenOpts.FPDenormalMode != llvm::DenormalMode::getIEEE())
1783       FuncAttrs.addAttribute("denormal-fp-math",
1784                              CodeGenOpts.FPDenormalMode.str());
1785     if (CodeGenOpts.FP32DenormalMode != CodeGenOpts.FPDenormalMode) {
1786       FuncAttrs.addAttribute(
1787           "denormal-fp-math-f32",
1788           CodeGenOpts.FP32DenormalMode.str());
1789     }
1790 
1791     FuncAttrs.addAttribute("no-trapping-math",
1792                            llvm::toStringRef(LangOpts.getFPExceptionMode() ==
1793                                              LangOptions::FPE_Ignore));
1794 
1795     // Strict (compliant) code is the default, so only add this attribute to
1796     // indicate that we are trying to workaround a problem case.
1797     if (!CodeGenOpts.StrictFloatCastOverflow)
1798       FuncAttrs.addAttribute("strict-float-cast-overflow", "false");
1799 
1800     // TODO: Are these all needed?
1801     // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
1802     FuncAttrs.addAttribute("no-infs-fp-math",
1803                            llvm::toStringRef(LangOpts.NoHonorInfs));
1804     FuncAttrs.addAttribute("no-nans-fp-math",
1805                            llvm::toStringRef(LangOpts.NoHonorNaNs));
1806     FuncAttrs.addAttribute("unsafe-fp-math",
1807                            llvm::toStringRef(LangOpts.UnsafeFPMath));
1808     FuncAttrs.addAttribute("use-soft-float",
1809                            llvm::toStringRef(CodeGenOpts.SoftFloat));
1810     FuncAttrs.addAttribute("stack-protector-buffer-size",
1811                            llvm::utostr(CodeGenOpts.SSPBufferSize));
1812     FuncAttrs.addAttribute("no-signed-zeros-fp-math",
1813                            llvm::toStringRef(LangOpts.NoSignedZero));
1814 
1815     // TODO: Reciprocal estimate codegen options should apply to instructions?
1816     const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals;
1817     if (!Recips.empty())
1818       FuncAttrs.addAttribute("reciprocal-estimates",
1819                              llvm::join(Recips, ","));
1820 
1821     if (!CodeGenOpts.PreferVectorWidth.empty() &&
1822         CodeGenOpts.PreferVectorWidth != "none")
1823       FuncAttrs.addAttribute("prefer-vector-width",
1824                              CodeGenOpts.PreferVectorWidth);
1825 
1826     if (CodeGenOpts.StackRealignment)
1827       FuncAttrs.addAttribute("stackrealign");
1828     if (CodeGenOpts.Backchain)
1829       FuncAttrs.addAttribute("backchain");
1830     if (CodeGenOpts.EnableSegmentedStacks)
1831       FuncAttrs.addAttribute("split-stack");
1832 
1833     if (CodeGenOpts.SpeculativeLoadHardening)
1834       FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
1835   }
1836 
1837   if (getLangOpts().assumeFunctionsAreConvergent()) {
1838     // Conservatively, mark all functions and calls in CUDA and OpenCL as
1839     // convergent (meaning, they may call an intrinsically convergent op, such
1840     // as __syncthreads() / barrier(), and so can't have certain optimizations
1841     // applied around them).  LLVM will remove this attribute where it safely
1842     // can.
1843     FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1844   }
1845 
1846   if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) {
1847     // Exceptions aren't supported in CUDA device code.
1848     FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1849   }
1850 
1851   for (StringRef Attr : CodeGenOpts.DefaultFunctionAttrs) {
1852     StringRef Var, Value;
1853     std::tie(Var, Value) = Attr.split('=');
1854     FuncAttrs.addAttribute(Var, Value);
1855   }
1856 }
1857 
1858 void CodeGenModule::addDefaultFunctionDefinitionAttributes(llvm::Function &F) {
1859   llvm::AttrBuilder FuncAttrs;
1860   getDefaultFunctionAttributes(F.getName(), F.hasOptNone(),
1861                                /* AttrOnCallSite = */ false, FuncAttrs);
1862   // TODO: call GetCPUAndFeaturesAttributes?
1863   F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs);
1864 }
1865 
1866 void CodeGenModule::addDefaultFunctionDefinitionAttributes(
1867                                                    llvm::AttrBuilder &attrs) {
1868   getDefaultFunctionAttributes(/*function name*/ "", /*optnone*/ false,
1869                                /*for call*/ false, attrs);
1870   GetCPUAndFeaturesAttributes(GlobalDecl(), attrs);
1871 }
1872 
1873 static void addNoBuiltinAttributes(llvm::AttrBuilder &FuncAttrs,
1874                                    const LangOptions &LangOpts,
1875                                    const NoBuiltinAttr *NBA = nullptr) {
1876   auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) {
1877     SmallString<32> AttributeName;
1878     AttributeName += "no-builtin-";
1879     AttributeName += BuiltinName;
1880     FuncAttrs.addAttribute(AttributeName);
1881   };
1882 
1883   // First, handle the language options passed through -fno-builtin.
1884   if (LangOpts.NoBuiltin) {
1885     // -fno-builtin disables them all.
1886     FuncAttrs.addAttribute("no-builtins");
1887     return;
1888   }
1889 
1890   // Then, add attributes for builtins specified through -fno-builtin-<name>.
1891   llvm::for_each(LangOpts.NoBuiltinFuncs, AddNoBuiltinAttr);
1892 
1893   // Now, let's check the __attribute__((no_builtin("...")) attribute added to
1894   // the source.
1895   if (!NBA)
1896     return;
1897 
1898   // If there is a wildcard in the builtin names specified through the
1899   // attribute, disable them all.
1900   if (llvm::is_contained(NBA->builtinNames(), "*")) {
1901     FuncAttrs.addAttribute("no-builtins");
1902     return;
1903   }
1904 
1905   // And last, add the rest of the builtin names.
1906   llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr);
1907 }
1908 
1909 /// Construct the IR attribute list of a function or call.
1910 ///
1911 /// When adding an attribute, please consider where it should be handled:
1912 ///
1913 ///   - getDefaultFunctionAttributes is for attributes that are essentially
1914 ///     part of the global target configuration (but perhaps can be
1915 ///     overridden on a per-function basis).  Adding attributes there
1916 ///     will cause them to also be set in frontends that build on Clang's
1917 ///     target-configuration logic, as well as for code defined in library
1918 ///     modules such as CUDA's libdevice.
1919 ///
1920 ///   - ConstructAttributeList builds on top of getDefaultFunctionAttributes
1921 ///     and adds declaration-specific, convention-specific, and
1922 ///     frontend-specific logic.  The last is of particular importance:
1923 ///     attributes that restrict how the frontend generates code must be
1924 ///     added here rather than getDefaultFunctionAttributes.
1925 ///
1926 void CodeGenModule::ConstructAttributeList(
1927     StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo,
1928     llvm::AttributeList &AttrList, unsigned &CallingConv, bool AttrOnCallSite) {
1929   llvm::AttrBuilder FuncAttrs;
1930   llvm::AttrBuilder RetAttrs;
1931 
1932   // Collect function IR attributes from the CC lowering.
1933   // We'll collect the paramete and result attributes later.
1934   CallingConv = FI.getEffectiveCallingConvention();
1935   if (FI.isNoReturn())
1936     FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1937   if (FI.isCmseNSCall())
1938     FuncAttrs.addAttribute("cmse_nonsecure_call");
1939 
1940   // Collect function IR attributes from the callee prototype if we have one.
1941   AddAttributesFromFunctionProtoType(getContext(), FuncAttrs,
1942                                      CalleeInfo.getCalleeFunctionProtoType());
1943 
1944   const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl();
1945 
1946   bool HasOptnone = false;
1947   // The NoBuiltinAttr attached to the target FunctionDecl.
1948   const NoBuiltinAttr *NBA = nullptr;
1949 
1950   // Collect function IR attributes based on declaration-specific
1951   // information.
1952   // FIXME: handle sseregparm someday...
1953   if (TargetDecl) {
1954     if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
1955       FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
1956     if (TargetDecl->hasAttr<NoThrowAttr>())
1957       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1958     if (TargetDecl->hasAttr<NoReturnAttr>())
1959       FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1960     if (TargetDecl->hasAttr<ColdAttr>())
1961       FuncAttrs.addAttribute(llvm::Attribute::Cold);
1962     if (TargetDecl->hasAttr<HotAttr>())
1963       FuncAttrs.addAttribute(llvm::Attribute::Hot);
1964     if (TargetDecl->hasAttr<NoDuplicateAttr>())
1965       FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
1966     if (TargetDecl->hasAttr<ConvergentAttr>())
1967       FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1968 
1969     if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1970       AddAttributesFromFunctionProtoType(
1971           getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
1972       if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) {
1973         // A sane operator new returns a non-aliasing pointer.
1974         auto Kind = Fn->getDeclName().getCXXOverloadedOperator();
1975         if (getCodeGenOpts().AssumeSaneOperatorNew &&
1976             (Kind == OO_New || Kind == OO_Array_New))
1977           RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1978       }
1979       const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
1980       const bool IsVirtualCall = MD && MD->isVirtual();
1981       // Don't use [[noreturn]], _Noreturn or [[no_builtin]] for a call to a
1982       // virtual function. These attributes are not inherited by overloads.
1983       if (!(AttrOnCallSite && IsVirtualCall)) {
1984         if (Fn->isNoReturn())
1985           FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1986         NBA = Fn->getAttr<NoBuiltinAttr>();
1987       }
1988       if (!AttrOnCallSite && TargetDecl->hasAttr<NoMergeAttr>())
1989         FuncAttrs.addAttribute(llvm::Attribute::NoMerge);
1990     }
1991 
1992     // 'const', 'pure' and 'noalias' attributed functions are also nounwind.
1993     if (TargetDecl->hasAttr<ConstAttr>()) {
1994       FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
1995       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1996     } else if (TargetDecl->hasAttr<PureAttr>()) {
1997       FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
1998       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1999     } else if (TargetDecl->hasAttr<NoAliasAttr>()) {
2000       FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly);
2001       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2002     }
2003     if (TargetDecl->hasAttr<RestrictAttr>())
2004       RetAttrs.addAttribute(llvm::Attribute::NoAlias);
2005     if (TargetDecl->hasAttr<ReturnsNonNullAttr>() &&
2006         !CodeGenOpts.NullPointerIsValid)
2007       RetAttrs.addAttribute(llvm::Attribute::NonNull);
2008     if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>())
2009       FuncAttrs.addAttribute("no_caller_saved_registers");
2010     if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>())
2011       FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
2012     if (TargetDecl->hasAttr<LeafAttr>())
2013       FuncAttrs.addAttribute(llvm::Attribute::NoCallback);
2014 
2015     HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
2016     if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) {
2017       Optional<unsigned> NumElemsParam;
2018       if (AllocSize->getNumElemsParam().isValid())
2019         NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
2020       FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
2021                                  NumElemsParam);
2022     }
2023 
2024     if (TargetDecl->hasAttr<OpenCLKernelAttr>()) {
2025       if (getLangOpts().OpenCLVersion <= 120) {
2026         // OpenCL v1.2 Work groups are always uniform
2027         FuncAttrs.addAttribute("uniform-work-group-size", "true");
2028       } else {
2029         // OpenCL v2.0 Work groups may be whether uniform or not.
2030         // '-cl-uniform-work-group-size' compile option gets a hint
2031         // to the compiler that the global work-size be a multiple of
2032         // the work-group size specified to clEnqueueNDRangeKernel
2033         // (i.e. work groups are uniform).
2034         FuncAttrs.addAttribute("uniform-work-group-size",
2035                                llvm::toStringRef(CodeGenOpts.UniformWGSize));
2036       }
2037     }
2038 
2039     std::string AssumptionValueStr;
2040     for (AssumptionAttr *AssumptionA :
2041          TargetDecl->specific_attrs<AssumptionAttr>()) {
2042       std::string AS = AssumptionA->getAssumption().str();
2043       if (!AS.empty() && !AssumptionValueStr.empty())
2044         AssumptionValueStr += ",";
2045       AssumptionValueStr += AS;
2046     }
2047 
2048     if (!AssumptionValueStr.empty())
2049       FuncAttrs.addAttribute(llvm::AssumptionAttrKey, AssumptionValueStr);
2050   }
2051 
2052   // Attach "no-builtins" attributes to:
2053   // * call sites: both `nobuiltin` and "no-builtins" or "no-builtin-<name>".
2054   // * definitions: "no-builtins" or "no-builtin-<name>" only.
2055   // The attributes can come from:
2056   // * LangOpts: -ffreestanding, -fno-builtin, -fno-builtin-<name>
2057   // * FunctionDecl attributes: __attribute__((no_builtin(...)))
2058   addNoBuiltinAttributes(FuncAttrs, getLangOpts(), NBA);
2059 
2060   // Collect function IR attributes based on global settiings.
2061   getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
2062 
2063   // Override some default IR attributes based on declaration-specific
2064   // information.
2065   if (TargetDecl) {
2066     if (TargetDecl->hasAttr<NoSpeculativeLoadHardeningAttr>())
2067       FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening);
2068     if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>())
2069       FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
2070     if (TargetDecl->hasAttr<NoSplitStackAttr>())
2071       FuncAttrs.removeAttribute("split-stack");
2072 
2073     // Add NonLazyBind attribute to function declarations when -fno-plt
2074     // is used.
2075     // FIXME: what if we just haven't processed the function definition
2076     // yet, or if it's an external definition like C99 inline?
2077     if (CodeGenOpts.NoPLT) {
2078       if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2079         if (!Fn->isDefined() && !AttrOnCallSite) {
2080           FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
2081         }
2082       }
2083     }
2084   }
2085 
2086   // Collect non-call-site function IR attributes from declaration-specific
2087   // information.
2088   if (!AttrOnCallSite) {
2089     if (TargetDecl && TargetDecl->hasAttr<CmseNSEntryAttr>())
2090       FuncAttrs.addAttribute("cmse_nonsecure_entry");
2091 
2092     // Whether tail calls are enabled.
2093     auto shouldDisableTailCalls = [&] {
2094       // Should this be honored in getDefaultFunctionAttributes?
2095       if (CodeGenOpts.DisableTailCalls)
2096         return true;
2097 
2098       if (!TargetDecl)
2099         return false;
2100 
2101       if (TargetDecl->hasAttr<DisableTailCallsAttr>() ||
2102           TargetDecl->hasAttr<AnyX86InterruptAttr>())
2103         return true;
2104 
2105       if (CodeGenOpts.NoEscapingBlockTailCalls) {
2106         if (const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
2107           if (!BD->doesNotEscape())
2108             return true;
2109       }
2110 
2111       return false;
2112     };
2113     FuncAttrs.addAttribute("disable-tail-calls",
2114                            llvm::toStringRef(shouldDisableTailCalls()));
2115 
2116     // CPU/feature overrides.  addDefaultFunctionDefinitionAttributes
2117     // handles these separately to set them based on the global defaults.
2118     GetCPUAndFeaturesAttributes(CalleeInfo.getCalleeDecl(), FuncAttrs);
2119   }
2120 
2121   // Collect attributes from arguments and return values.
2122   ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
2123 
2124   QualType RetTy = FI.getReturnType();
2125   const ABIArgInfo &RetAI = FI.getReturnInfo();
2126   switch (RetAI.getKind()) {
2127   case ABIArgInfo::Extend:
2128     if (RetAI.isSignExt())
2129       RetAttrs.addAttribute(llvm::Attribute::SExt);
2130     else
2131       RetAttrs.addAttribute(llvm::Attribute::ZExt);
2132     LLVM_FALLTHROUGH;
2133   case ABIArgInfo::Direct:
2134     if (RetAI.getInReg())
2135       RetAttrs.addAttribute(llvm::Attribute::InReg);
2136     break;
2137   case ABIArgInfo::Ignore:
2138     break;
2139 
2140   case ABIArgInfo::InAlloca:
2141   case ABIArgInfo::Indirect: {
2142     // inalloca and sret disable readnone and readonly
2143     FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2144       .removeAttribute(llvm::Attribute::ReadNone);
2145     break;
2146   }
2147 
2148   case ABIArgInfo::CoerceAndExpand:
2149     break;
2150 
2151   case ABIArgInfo::Expand:
2152   case ABIArgInfo::IndirectAliased:
2153     llvm_unreachable("Invalid ABI kind for return argument");
2154   }
2155 
2156   if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
2157     QualType PTy = RefTy->getPointeeType();
2158     if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2159       RetAttrs.addDereferenceableAttr(
2160           getMinimumObjectSize(PTy).getQuantity());
2161     if (getContext().getTargetAddressSpace(PTy) == 0 &&
2162         !CodeGenOpts.NullPointerIsValid)
2163       RetAttrs.addAttribute(llvm::Attribute::NonNull);
2164     if (PTy->isObjectType()) {
2165       llvm::Align Alignment =
2166           getNaturalPointeeTypeAlignment(RetTy).getAsAlign();
2167       RetAttrs.addAlignmentAttr(Alignment);
2168     }
2169   }
2170 
2171   bool hasUsedSRet = false;
2172   SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs());
2173 
2174   // Attach attributes to sret.
2175   if (IRFunctionArgs.hasSRetArg()) {
2176     llvm::AttrBuilder SRETAttrs;
2177     SRETAttrs.addStructRetAttr(getTypes().ConvertTypeForMem(RetTy));
2178     hasUsedSRet = true;
2179     if (RetAI.getInReg())
2180       SRETAttrs.addAttribute(llvm::Attribute::InReg);
2181     SRETAttrs.addAlignmentAttr(RetAI.getIndirectAlign().getQuantity());
2182     ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
2183         llvm::AttributeSet::get(getLLVMContext(), SRETAttrs);
2184   }
2185 
2186   // Attach attributes to inalloca argument.
2187   if (IRFunctionArgs.hasInallocaArg()) {
2188     llvm::AttrBuilder Attrs;
2189     Attrs.addAttribute(llvm::Attribute::InAlloca);
2190     ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
2191         llvm::AttributeSet::get(getLLVMContext(), Attrs);
2192   }
2193 
2194   // Apply `nonnull` and `dereferencable(N)` to the `this` argument.
2195   if (FI.isInstanceMethod() && !IRFunctionArgs.hasInallocaArg() &&
2196       !FI.arg_begin()->type->isVoidPointerType()) {
2197     auto IRArgs = IRFunctionArgs.getIRArgs(0);
2198 
2199     assert(IRArgs.second == 1 && "Expected only a single `this` pointer.");
2200 
2201     llvm::AttrBuilder Attrs;
2202 
2203     if (!CodeGenOpts.NullPointerIsValid &&
2204         getContext().getTargetAddressSpace(FI.arg_begin()->type) == 0) {
2205       Attrs.addAttribute(llvm::Attribute::NonNull);
2206       Attrs.addDereferenceableAttr(
2207           getMinimumObjectSize(
2208               FI.arg_begin()->type.castAs<PointerType>()->getPointeeType())
2209               .getQuantity());
2210     } else {
2211       // FIXME dereferenceable should be correct here, regardless of
2212       // NullPointerIsValid. However, dereferenceable currently does not always
2213       // respect NullPointerIsValid and may imply nonnull and break the program.
2214       // See https://reviews.llvm.org/D66618 for discussions.
2215       Attrs.addDereferenceableOrNullAttr(
2216           getMinimumObjectSize(
2217               FI.arg_begin()->type.castAs<PointerType>()->getPointeeType())
2218               .getQuantity());
2219     }
2220 
2221     ArgAttrs[IRArgs.first] = llvm::AttributeSet::get(getLLVMContext(), Attrs);
2222   }
2223 
2224   unsigned ArgNo = 0;
2225   for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(),
2226                                           E = FI.arg_end();
2227        I != E; ++I, ++ArgNo) {
2228     QualType ParamType = I->type;
2229     const ABIArgInfo &AI = I->info;
2230     llvm::AttrBuilder Attrs;
2231 
2232     // Add attribute for padding argument, if necessary.
2233     if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
2234       if (AI.getPaddingInReg()) {
2235         ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
2236             llvm::AttributeSet::get(
2237                 getLLVMContext(),
2238                 llvm::AttrBuilder().addAttribute(llvm::Attribute::InReg));
2239       }
2240     }
2241 
2242     // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
2243     // have the corresponding parameter variable.  It doesn't make
2244     // sense to do it here because parameters are so messed up.
2245     switch (AI.getKind()) {
2246     case ABIArgInfo::Extend:
2247       if (AI.isSignExt())
2248         Attrs.addAttribute(llvm::Attribute::SExt);
2249       else
2250         Attrs.addAttribute(llvm::Attribute::ZExt);
2251       LLVM_FALLTHROUGH;
2252     case ABIArgInfo::Direct:
2253       if (ArgNo == 0 && FI.isChainCall())
2254         Attrs.addAttribute(llvm::Attribute::Nest);
2255       else if (AI.getInReg())
2256         Attrs.addAttribute(llvm::Attribute::InReg);
2257       break;
2258 
2259     case ABIArgInfo::Indirect: {
2260       if (AI.getInReg())
2261         Attrs.addAttribute(llvm::Attribute::InReg);
2262 
2263       if (AI.getIndirectByVal())
2264         Attrs.addByValAttr(getTypes().ConvertTypeForMem(ParamType));
2265 
2266       auto *Decl = ParamType->getAsRecordDecl();
2267       if (CodeGenOpts.PassByValueIsNoAlias && Decl &&
2268           Decl->getArgPassingRestrictions() == RecordDecl::APK_CanPassInRegs)
2269         // When calling the function, the pointer passed in will be the only
2270         // reference to the underlying object. Mark it accordingly.
2271         Attrs.addAttribute(llvm::Attribute::NoAlias);
2272 
2273       // TODO: We could add the byref attribute if not byval, but it would
2274       // require updating many testcases.
2275 
2276       CharUnits Align = AI.getIndirectAlign();
2277 
2278       // In a byval argument, it is important that the required
2279       // alignment of the type is honored, as LLVM might be creating a
2280       // *new* stack object, and needs to know what alignment to give
2281       // it. (Sometimes it can deduce a sensible alignment on its own,
2282       // but not if clang decides it must emit a packed struct, or the
2283       // user specifies increased alignment requirements.)
2284       //
2285       // This is different from indirect *not* byval, where the object
2286       // exists already, and the align attribute is purely
2287       // informative.
2288       assert(!Align.isZero());
2289 
2290       // For now, only add this when we have a byval argument.
2291       // TODO: be less lazy about updating test cases.
2292       if (AI.getIndirectByVal())
2293         Attrs.addAlignmentAttr(Align.getQuantity());
2294 
2295       // byval disables readnone and readonly.
2296       FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2297         .removeAttribute(llvm::Attribute::ReadNone);
2298 
2299       break;
2300     }
2301     case ABIArgInfo::IndirectAliased: {
2302       CharUnits Align = AI.getIndirectAlign();
2303       Attrs.addByRefAttr(getTypes().ConvertTypeForMem(ParamType));
2304       Attrs.addAlignmentAttr(Align.getQuantity());
2305       break;
2306     }
2307     case ABIArgInfo::Ignore:
2308     case ABIArgInfo::Expand:
2309     case ABIArgInfo::CoerceAndExpand:
2310       break;
2311 
2312     case ABIArgInfo::InAlloca:
2313       // inalloca disables readnone and readonly.
2314       FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2315           .removeAttribute(llvm::Attribute::ReadNone);
2316       continue;
2317     }
2318 
2319     if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
2320       QualType PTy = RefTy->getPointeeType();
2321       if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2322         Attrs.addDereferenceableAttr(
2323             getMinimumObjectSize(PTy).getQuantity());
2324       if (getContext().getTargetAddressSpace(PTy) == 0 &&
2325           !CodeGenOpts.NullPointerIsValid)
2326         Attrs.addAttribute(llvm::Attribute::NonNull);
2327       if (PTy->isObjectType()) {
2328         llvm::Align Alignment =
2329             getNaturalPointeeTypeAlignment(ParamType).getAsAlign();
2330         Attrs.addAlignmentAttr(Alignment);
2331       }
2332     }
2333 
2334     switch (FI.getExtParameterInfo(ArgNo).getABI()) {
2335     case ParameterABI::Ordinary:
2336       break;
2337 
2338     case ParameterABI::SwiftIndirectResult: {
2339       // Add 'sret' if we haven't already used it for something, but
2340       // only if the result is void.
2341       if (!hasUsedSRet && RetTy->isVoidType()) {
2342         Attrs.addStructRetAttr(getTypes().ConvertTypeForMem(ParamType));
2343         hasUsedSRet = true;
2344       }
2345 
2346       // Add 'noalias' in either case.
2347       Attrs.addAttribute(llvm::Attribute::NoAlias);
2348 
2349       // Add 'dereferenceable' and 'alignment'.
2350       auto PTy = ParamType->getPointeeType();
2351       if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2352         auto info = getContext().getTypeInfoInChars(PTy);
2353         Attrs.addDereferenceableAttr(info.Width.getQuantity());
2354         Attrs.addAlignmentAttr(info.Align.getAsAlign());
2355       }
2356       break;
2357     }
2358 
2359     case ParameterABI::SwiftErrorResult:
2360       Attrs.addAttribute(llvm::Attribute::SwiftError);
2361       break;
2362 
2363     case ParameterABI::SwiftContext:
2364       Attrs.addAttribute(llvm::Attribute::SwiftSelf);
2365       break;
2366     }
2367 
2368     if (FI.getExtParameterInfo(ArgNo).isNoEscape())
2369       Attrs.addAttribute(llvm::Attribute::NoCapture);
2370 
2371     if (Attrs.hasAttributes()) {
2372       unsigned FirstIRArg, NumIRArgs;
2373       std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2374       for (unsigned i = 0; i < NumIRArgs; i++)
2375         ArgAttrs[FirstIRArg + i] =
2376             llvm::AttributeSet::get(getLLVMContext(), Attrs);
2377     }
2378   }
2379   assert(ArgNo == FI.arg_size());
2380 
2381   AttrList = llvm::AttributeList::get(
2382       getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs),
2383       llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs);
2384 }
2385 
2386 /// An argument came in as a promoted argument; demote it back to its
2387 /// declared type.
2388 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
2389                                          const VarDecl *var,
2390                                          llvm::Value *value) {
2391   llvm::Type *varType = CGF.ConvertType(var->getType());
2392 
2393   // This can happen with promotions that actually don't change the
2394   // underlying type, like the enum promotions.
2395   if (value->getType() == varType) return value;
2396 
2397   assert((varType->isIntegerTy() || varType->isFloatingPointTy())
2398          && "unexpected promotion type");
2399 
2400   if (isa<llvm::IntegerType>(varType))
2401     return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
2402 
2403   return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
2404 }
2405 
2406 /// Returns the attribute (either parameter attribute, or function
2407 /// attribute), which declares argument ArgNo to be non-null.
2408 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
2409                                          QualType ArgType, unsigned ArgNo) {
2410   // FIXME: __attribute__((nonnull)) can also be applied to:
2411   //   - references to pointers, where the pointee is known to be
2412   //     nonnull (apparently a Clang extension)
2413   //   - transparent unions containing pointers
2414   // In the former case, LLVM IR cannot represent the constraint. In
2415   // the latter case, we have no guarantee that the transparent union
2416   // is in fact passed as a pointer.
2417   if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
2418     return nullptr;
2419   // First, check attribute on parameter itself.
2420   if (PVD) {
2421     if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
2422       return ParmNNAttr;
2423   }
2424   // Check function attributes.
2425   if (!FD)
2426     return nullptr;
2427   for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
2428     if (NNAttr->isNonNull(ArgNo))
2429       return NNAttr;
2430   }
2431   return nullptr;
2432 }
2433 
2434 namespace {
2435   struct CopyBackSwiftError final : EHScopeStack::Cleanup {
2436     Address Temp;
2437     Address Arg;
2438     CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {}
2439     void Emit(CodeGenFunction &CGF, Flags flags) override {
2440       llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp);
2441       CGF.Builder.CreateStore(errorValue, Arg);
2442     }
2443   };
2444 }
2445 
2446 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
2447                                          llvm::Function *Fn,
2448                                          const FunctionArgList &Args) {
2449   if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
2450     // Naked functions don't have prologues.
2451     return;
2452 
2453   // If this is an implicit-return-zero function, go ahead and
2454   // initialize the return value.  TODO: it might be nice to have
2455   // a more general mechanism for this that didn't require synthesized
2456   // return statements.
2457   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
2458     if (FD->hasImplicitReturnZero()) {
2459       QualType RetTy = FD->getReturnType().getUnqualifiedType();
2460       llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
2461       llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
2462       Builder.CreateStore(Zero, ReturnValue);
2463     }
2464   }
2465 
2466   // FIXME: We no longer need the types from FunctionArgList; lift up and
2467   // simplify.
2468 
2469   ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
2470   assert(Fn->arg_size() == IRFunctionArgs.totalIRArgs());
2471 
2472   // If we're using inalloca, all the memory arguments are GEPs off of the last
2473   // parameter, which is a pointer to the complete memory area.
2474   Address ArgStruct = Address::invalid();
2475   if (IRFunctionArgs.hasInallocaArg()) {
2476     ArgStruct = Address(Fn->getArg(IRFunctionArgs.getInallocaArgNo()),
2477                         FI.getArgStructAlignment());
2478 
2479     assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo());
2480   }
2481 
2482   // Name the struct return parameter.
2483   if (IRFunctionArgs.hasSRetArg()) {
2484     auto AI = Fn->getArg(IRFunctionArgs.getSRetArgNo());
2485     AI->setName("agg.result");
2486     AI->addAttr(llvm::Attribute::NoAlias);
2487   }
2488 
2489   // Track if we received the parameter as a pointer (indirect, byval, or
2490   // inalloca).  If already have a pointer, EmitParmDecl doesn't need to copy it
2491   // into a local alloca for us.
2492   SmallVector<ParamValue, 16> ArgVals;
2493   ArgVals.reserve(Args.size());
2494 
2495   // Create a pointer value for every parameter declaration.  This usually
2496   // entails copying one or more LLVM IR arguments into an alloca.  Don't push
2497   // any cleanups or do anything that might unwind.  We do that separately, so
2498   // we can push the cleanups in the correct order for the ABI.
2499   assert(FI.arg_size() == Args.size() &&
2500          "Mismatch between function signature & arguments.");
2501   unsigned ArgNo = 0;
2502   CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
2503   for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
2504        i != e; ++i, ++info_it, ++ArgNo) {
2505     const VarDecl *Arg = *i;
2506     const ABIArgInfo &ArgI = info_it->info;
2507 
2508     bool isPromoted =
2509       isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
2510     // We are converting from ABIArgInfo type to VarDecl type directly, unless
2511     // the parameter is promoted. In this case we convert to
2512     // CGFunctionInfo::ArgInfo type with subsequent argument demotion.
2513     QualType Ty = isPromoted ? info_it->type : Arg->getType();
2514     assert(hasScalarEvaluationKind(Ty) ==
2515            hasScalarEvaluationKind(Arg->getType()));
2516 
2517     unsigned FirstIRArg, NumIRArgs;
2518     std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2519 
2520     switch (ArgI.getKind()) {
2521     case ABIArgInfo::InAlloca: {
2522       assert(NumIRArgs == 0);
2523       auto FieldIndex = ArgI.getInAllocaFieldIndex();
2524       Address V =
2525           Builder.CreateStructGEP(ArgStruct, FieldIndex, Arg->getName());
2526       if (ArgI.getInAllocaIndirect())
2527         V = Address(Builder.CreateLoad(V),
2528                     getContext().getTypeAlignInChars(Ty));
2529       ArgVals.push_back(ParamValue::forIndirect(V));
2530       break;
2531     }
2532 
2533     case ABIArgInfo::Indirect:
2534     case ABIArgInfo::IndirectAliased: {
2535       assert(NumIRArgs == 1);
2536       Address ParamAddr =
2537           Address(Fn->getArg(FirstIRArg), ArgI.getIndirectAlign());
2538 
2539       if (!hasScalarEvaluationKind(Ty)) {
2540         // Aggregates and complex variables are accessed by reference. All we
2541         // need to do is realign the value, if requested. Also, if the address
2542         // may be aliased, copy it to ensure that the parameter variable is
2543         // mutable and has a unique adress, as C requires.
2544         Address V = ParamAddr;
2545         if (ArgI.getIndirectRealign() || ArgI.isIndirectAliased()) {
2546           Address AlignedTemp = CreateMemTemp(Ty, "coerce");
2547 
2548           // Copy from the incoming argument pointer to the temporary with the
2549           // appropriate alignment.
2550           //
2551           // FIXME: We should have a common utility for generating an aggregate
2552           // copy.
2553           CharUnits Size = getContext().getTypeSizeInChars(Ty);
2554           Builder.CreateMemCpy(
2555               AlignedTemp.getPointer(), AlignedTemp.getAlignment().getAsAlign(),
2556               ParamAddr.getPointer(), ParamAddr.getAlignment().getAsAlign(),
2557               llvm::ConstantInt::get(IntPtrTy, Size.getQuantity()));
2558           V = AlignedTemp;
2559         }
2560         ArgVals.push_back(ParamValue::forIndirect(V));
2561       } else {
2562         // Load scalar value from indirect argument.
2563         llvm::Value *V =
2564             EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getBeginLoc());
2565 
2566         if (isPromoted)
2567           V = emitArgumentDemotion(*this, Arg, V);
2568         ArgVals.push_back(ParamValue::forDirect(V));
2569       }
2570       break;
2571     }
2572 
2573     case ABIArgInfo::Extend:
2574     case ABIArgInfo::Direct: {
2575       auto AI = Fn->getArg(FirstIRArg);
2576       llvm::Type *LTy = ConvertType(Arg->getType());
2577 
2578       // Prepare parameter attributes. So far, only attributes for pointer
2579       // parameters are prepared. See
2580       // http://llvm.org/docs/LangRef.html#paramattrs.
2581       if (ArgI.getDirectOffset() == 0 && LTy->isPointerTy() &&
2582           ArgI.getCoerceToType()->isPointerTy()) {
2583         assert(NumIRArgs == 1);
2584 
2585         if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
2586           // Set `nonnull` attribute if any.
2587           if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
2588                              PVD->getFunctionScopeIndex()) &&
2589               !CGM.getCodeGenOpts().NullPointerIsValid)
2590             AI->addAttr(llvm::Attribute::NonNull);
2591 
2592           QualType OTy = PVD->getOriginalType();
2593           if (const auto *ArrTy =
2594               getContext().getAsConstantArrayType(OTy)) {
2595             // A C99 array parameter declaration with the static keyword also
2596             // indicates dereferenceability, and if the size is constant we can
2597             // use the dereferenceable attribute (which requires the size in
2598             // bytes).
2599             if (ArrTy->getSizeModifier() == ArrayType::Static) {
2600               QualType ETy = ArrTy->getElementType();
2601               llvm::Align Alignment =
2602                   CGM.getNaturalTypeAlignment(ETy).getAsAlign();
2603               AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment));
2604               uint64_t ArrSize = ArrTy->getSize().getZExtValue();
2605               if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
2606                   ArrSize) {
2607                 llvm::AttrBuilder Attrs;
2608                 Attrs.addDereferenceableAttr(
2609                     getContext().getTypeSizeInChars(ETy).getQuantity() *
2610                     ArrSize);
2611                 AI->addAttrs(Attrs);
2612               } else if (getContext().getTargetInfo().getNullPointerValue(
2613                              ETy.getAddressSpace()) == 0 &&
2614                          !CGM.getCodeGenOpts().NullPointerIsValid) {
2615                 AI->addAttr(llvm::Attribute::NonNull);
2616               }
2617             }
2618           } else if (const auto *ArrTy =
2619                      getContext().getAsVariableArrayType(OTy)) {
2620             // For C99 VLAs with the static keyword, we don't know the size so
2621             // we can't use the dereferenceable attribute, but in addrspace(0)
2622             // we know that it must be nonnull.
2623             if (ArrTy->getSizeModifier() == VariableArrayType::Static) {
2624               QualType ETy = ArrTy->getElementType();
2625               llvm::Align Alignment =
2626                   CGM.getNaturalTypeAlignment(ETy).getAsAlign();
2627               AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment));
2628               if (!getContext().getTargetAddressSpace(ETy) &&
2629                   !CGM.getCodeGenOpts().NullPointerIsValid)
2630                 AI->addAttr(llvm::Attribute::NonNull);
2631             }
2632           }
2633 
2634           // Set `align` attribute if any.
2635           const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
2636           if (!AVAttr)
2637             if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
2638               AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
2639           if (AVAttr && !SanOpts.has(SanitizerKind::Alignment)) {
2640             // If alignment-assumption sanitizer is enabled, we do *not* add
2641             // alignment attribute here, but emit normal alignment assumption,
2642             // so the UBSAN check could function.
2643             llvm::ConstantInt *AlignmentCI =
2644                 cast<llvm::ConstantInt>(EmitScalarExpr(AVAttr->getAlignment()));
2645             unsigned AlignmentInt =
2646                 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment);
2647             if (AI->getParamAlign().valueOrOne() < AlignmentInt) {
2648               AI->removeAttr(llvm::Attribute::AttrKind::Alignment);
2649               AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(
2650                   llvm::Align(AlignmentInt)));
2651             }
2652           }
2653         }
2654 
2655         // Set 'noalias' if an argument type has the `restrict` qualifier.
2656         if (Arg->getType().isRestrictQualified())
2657           AI->addAttr(llvm::Attribute::NoAlias);
2658       }
2659 
2660       // Prepare the argument value. If we have the trivial case, handle it
2661       // with no muss and fuss.
2662       if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
2663           ArgI.getCoerceToType() == ConvertType(Ty) &&
2664           ArgI.getDirectOffset() == 0) {
2665         assert(NumIRArgs == 1);
2666 
2667         // LLVM expects swifterror parameters to be used in very restricted
2668         // ways.  Copy the value into a less-restricted temporary.
2669         llvm::Value *V = AI;
2670         if (FI.getExtParameterInfo(ArgNo).getABI()
2671               == ParameterABI::SwiftErrorResult) {
2672           QualType pointeeTy = Ty->getPointeeType();
2673           assert(pointeeTy->isPointerType());
2674           Address temp =
2675             CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
2676           Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy));
2677           llvm::Value *incomingErrorValue = Builder.CreateLoad(arg);
2678           Builder.CreateStore(incomingErrorValue, temp);
2679           V = temp.getPointer();
2680 
2681           // Push a cleanup to copy the value back at the end of the function.
2682           // The convention does not guarantee that the value will be written
2683           // back if the function exits with an unwind exception.
2684           EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg);
2685         }
2686 
2687         // Ensure the argument is the correct type.
2688         if (V->getType() != ArgI.getCoerceToType())
2689           V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
2690 
2691         if (isPromoted)
2692           V = emitArgumentDemotion(*this, Arg, V);
2693 
2694         // Because of merging of function types from multiple decls it is
2695         // possible for the type of an argument to not match the corresponding
2696         // type in the function type. Since we are codegening the callee
2697         // in here, add a cast to the argument type.
2698         llvm::Type *LTy = ConvertType(Arg->getType());
2699         if (V->getType() != LTy)
2700           V = Builder.CreateBitCast(V, LTy);
2701 
2702         ArgVals.push_back(ParamValue::forDirect(V));
2703         break;
2704       }
2705 
2706       // VLST arguments are coerced to VLATs at the function boundary for
2707       // ABI consistency. If this is a VLST that was coerced to
2708       // a VLAT at the function boundary and the types match up, use
2709       // llvm.experimental.vector.extract to convert back to the original
2710       // VLST.
2711       if (auto *VecTyTo = dyn_cast<llvm::FixedVectorType>(ConvertType(Ty))) {
2712         auto *Coerced = Fn->getArg(FirstIRArg);
2713         if (auto *VecTyFrom =
2714                 dyn_cast<llvm::ScalableVectorType>(Coerced->getType())) {
2715           if (VecTyFrom->getElementType() == VecTyTo->getElementType()) {
2716             llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty);
2717 
2718             assert(NumIRArgs == 1);
2719             Coerced->setName(Arg->getName() + ".coerce");
2720             ArgVals.push_back(ParamValue::forDirect(Builder.CreateExtractVector(
2721                 VecTyTo, Coerced, Zero, "castFixedSve")));
2722             break;
2723           }
2724         }
2725       }
2726 
2727       Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
2728                                      Arg->getName());
2729 
2730       // Pointer to store into.
2731       Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
2732 
2733       // Fast-isel and the optimizer generally like scalar values better than
2734       // FCAs, so we flatten them if this is safe to do for this argument.
2735       llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
2736       if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
2737           STy->getNumElements() > 1) {
2738         uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
2739         llvm::Type *DstTy = Ptr.getElementType();
2740         uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
2741 
2742         Address AddrToStoreInto = Address::invalid();
2743         if (SrcSize <= DstSize) {
2744           AddrToStoreInto = Builder.CreateElementBitCast(Ptr, STy);
2745         } else {
2746           AddrToStoreInto =
2747             CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
2748         }
2749 
2750         assert(STy->getNumElements() == NumIRArgs);
2751         for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2752           auto AI = Fn->getArg(FirstIRArg + i);
2753           AI->setName(Arg->getName() + ".coerce" + Twine(i));
2754           Address EltPtr = Builder.CreateStructGEP(AddrToStoreInto, i);
2755           Builder.CreateStore(AI, EltPtr);
2756         }
2757 
2758         if (SrcSize > DstSize) {
2759           Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
2760         }
2761 
2762       } else {
2763         // Simple case, just do a coerced store of the argument into the alloca.
2764         assert(NumIRArgs == 1);
2765         auto AI = Fn->getArg(FirstIRArg);
2766         AI->setName(Arg->getName() + ".coerce");
2767         CreateCoercedStore(AI, Ptr, /*DstIsVolatile=*/false, *this);
2768       }
2769 
2770       // Match to what EmitParmDecl is expecting for this type.
2771       if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
2772         llvm::Value *V =
2773             EmitLoadOfScalar(Alloca, false, Ty, Arg->getBeginLoc());
2774         if (isPromoted)
2775           V = emitArgumentDemotion(*this, Arg, V);
2776         ArgVals.push_back(ParamValue::forDirect(V));
2777       } else {
2778         ArgVals.push_back(ParamValue::forIndirect(Alloca));
2779       }
2780       break;
2781     }
2782 
2783     case ABIArgInfo::CoerceAndExpand: {
2784       // Reconstruct into a temporary.
2785       Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2786       ArgVals.push_back(ParamValue::forIndirect(alloca));
2787 
2788       auto coercionType = ArgI.getCoerceAndExpandType();
2789       alloca = Builder.CreateElementBitCast(alloca, coercionType);
2790 
2791       unsigned argIndex = FirstIRArg;
2792       for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2793         llvm::Type *eltType = coercionType->getElementType(i);
2794         if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType))
2795           continue;
2796 
2797         auto eltAddr = Builder.CreateStructGEP(alloca, i);
2798         auto elt = Fn->getArg(argIndex++);
2799         Builder.CreateStore(elt, eltAddr);
2800       }
2801       assert(argIndex == FirstIRArg + NumIRArgs);
2802       break;
2803     }
2804 
2805     case ABIArgInfo::Expand: {
2806       // If this structure was expanded into multiple arguments then
2807       // we need to create a temporary and reconstruct it from the
2808       // arguments.
2809       Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2810       LValue LV = MakeAddrLValue(Alloca, Ty);
2811       ArgVals.push_back(ParamValue::forIndirect(Alloca));
2812 
2813       auto FnArgIter = Fn->arg_begin() + FirstIRArg;
2814       ExpandTypeFromArgs(Ty, LV, FnArgIter);
2815       assert(FnArgIter == Fn->arg_begin() + FirstIRArg + NumIRArgs);
2816       for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
2817         auto AI = Fn->getArg(FirstIRArg + i);
2818         AI->setName(Arg->getName() + "." + Twine(i));
2819       }
2820       break;
2821     }
2822 
2823     case ABIArgInfo::Ignore:
2824       assert(NumIRArgs == 0);
2825       // Initialize the local variable appropriately.
2826       if (!hasScalarEvaluationKind(Ty)) {
2827         ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty)));
2828       } else {
2829         llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
2830         ArgVals.push_back(ParamValue::forDirect(U));
2831       }
2832       break;
2833     }
2834   }
2835 
2836   if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2837     for (int I = Args.size() - 1; I >= 0; --I)
2838       EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2839   } else {
2840     for (unsigned I = 0, E = Args.size(); I != E; ++I)
2841       EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2842   }
2843 }
2844 
2845 static void eraseUnusedBitCasts(llvm::Instruction *insn) {
2846   while (insn->use_empty()) {
2847     llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
2848     if (!bitcast) return;
2849 
2850     // This is "safe" because we would have used a ConstantExpr otherwise.
2851     insn = cast<llvm::Instruction>(bitcast->getOperand(0));
2852     bitcast->eraseFromParent();
2853   }
2854 }
2855 
2856 /// Try to emit a fused autorelease of a return result.
2857 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
2858                                                     llvm::Value *result) {
2859   // We must be immediately followed the cast.
2860   llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
2861   if (BB->empty()) return nullptr;
2862   if (&BB->back() != result) return nullptr;
2863 
2864   llvm::Type *resultType = result->getType();
2865 
2866   // result is in a BasicBlock and is therefore an Instruction.
2867   llvm::Instruction *generator = cast<llvm::Instruction>(result);
2868 
2869   SmallVector<llvm::Instruction *, 4> InstsToKill;
2870 
2871   // Look for:
2872   //  %generator = bitcast %type1* %generator2 to %type2*
2873   while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
2874     // We would have emitted this as a constant if the operand weren't
2875     // an Instruction.
2876     generator = cast<llvm::Instruction>(bitcast->getOperand(0));
2877 
2878     // Require the generator to be immediately followed by the cast.
2879     if (generator->getNextNode() != bitcast)
2880       return nullptr;
2881 
2882     InstsToKill.push_back(bitcast);
2883   }
2884 
2885   // Look for:
2886   //   %generator = call i8* @objc_retain(i8* %originalResult)
2887   // or
2888   //   %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
2889   llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
2890   if (!call) return nullptr;
2891 
2892   bool doRetainAutorelease;
2893 
2894   if (call->getCalledOperand() == CGF.CGM.getObjCEntrypoints().objc_retain) {
2895     doRetainAutorelease = true;
2896   } else if (call->getCalledOperand() ==
2897              CGF.CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue) {
2898     doRetainAutorelease = false;
2899 
2900     // If we emitted an assembly marker for this call (and the
2901     // ARCEntrypoints field should have been set if so), go looking
2902     // for that call.  If we can't find it, we can't do this
2903     // optimization.  But it should always be the immediately previous
2904     // instruction, unless we needed bitcasts around the call.
2905     if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) {
2906       llvm::Instruction *prev = call->getPrevNode();
2907       assert(prev);
2908       if (isa<llvm::BitCastInst>(prev)) {
2909         prev = prev->getPrevNode();
2910         assert(prev);
2911       }
2912       assert(isa<llvm::CallInst>(prev));
2913       assert(cast<llvm::CallInst>(prev)->getCalledOperand() ==
2914              CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker);
2915       InstsToKill.push_back(prev);
2916     }
2917   } else {
2918     return nullptr;
2919   }
2920 
2921   result = call->getArgOperand(0);
2922   InstsToKill.push_back(call);
2923 
2924   // Keep killing bitcasts, for sanity.  Note that we no longer care
2925   // about precise ordering as long as there's exactly one use.
2926   while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
2927     if (!bitcast->hasOneUse()) break;
2928     InstsToKill.push_back(bitcast);
2929     result = bitcast->getOperand(0);
2930   }
2931 
2932   // Delete all the unnecessary instructions, from latest to earliest.
2933   for (auto *I : InstsToKill)
2934     I->eraseFromParent();
2935 
2936   // Do the fused retain/autorelease if we were asked to.
2937   if (doRetainAutorelease)
2938     result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
2939 
2940   // Cast back to the result type.
2941   return CGF.Builder.CreateBitCast(result, resultType);
2942 }
2943 
2944 /// If this is a +1 of the value of an immutable 'self', remove it.
2945 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
2946                                           llvm::Value *result) {
2947   // This is only applicable to a method with an immutable 'self'.
2948   const ObjCMethodDecl *method =
2949     dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
2950   if (!method) return nullptr;
2951   const VarDecl *self = method->getSelfDecl();
2952   if (!self->getType().isConstQualified()) return nullptr;
2953 
2954   // Look for a retain call.
2955   llvm::CallInst *retainCall =
2956     dyn_cast<llvm::CallInst>(result->stripPointerCasts());
2957   if (!retainCall || retainCall->getCalledOperand() !=
2958                          CGF.CGM.getObjCEntrypoints().objc_retain)
2959     return nullptr;
2960 
2961   // Look for an ordinary load of 'self'.
2962   llvm::Value *retainedValue = retainCall->getArgOperand(0);
2963   llvm::LoadInst *load =
2964     dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
2965   if (!load || load->isAtomic() || load->isVolatile() ||
2966       load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
2967     return nullptr;
2968 
2969   // Okay!  Burn it all down.  This relies for correctness on the
2970   // assumption that the retain is emitted as part of the return and
2971   // that thereafter everything is used "linearly".
2972   llvm::Type *resultType = result->getType();
2973   eraseUnusedBitCasts(cast<llvm::Instruction>(result));
2974   assert(retainCall->use_empty());
2975   retainCall->eraseFromParent();
2976   eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
2977 
2978   return CGF.Builder.CreateBitCast(load, resultType);
2979 }
2980 
2981 /// Emit an ARC autorelease of the result of a function.
2982 ///
2983 /// \return the value to actually return from the function
2984 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
2985                                             llvm::Value *result) {
2986   // If we're returning 'self', kill the initial retain.  This is a
2987   // heuristic attempt to "encourage correctness" in the really unfortunate
2988   // case where we have a return of self during a dealloc and we desperately
2989   // need to avoid the possible autorelease.
2990   if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
2991     return self;
2992 
2993   // At -O0, try to emit a fused retain/autorelease.
2994   if (CGF.shouldUseFusedARCCalls())
2995     if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
2996       return fused;
2997 
2998   return CGF.EmitARCAutoreleaseReturnValue(result);
2999 }
3000 
3001 /// Heuristically search for a dominating store to the return-value slot.
3002 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
3003   // Check if a User is a store which pointerOperand is the ReturnValue.
3004   // We are looking for stores to the ReturnValue, not for stores of the
3005   // ReturnValue to some other location.
3006   auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
3007     auto *SI = dyn_cast<llvm::StoreInst>(U);
3008     if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer())
3009       return nullptr;
3010     // These aren't actually possible for non-coerced returns, and we
3011     // only care about non-coerced returns on this code path.
3012     assert(!SI->isAtomic() && !SI->isVolatile());
3013     return SI;
3014   };
3015   // If there are multiple uses of the return-value slot, just check
3016   // for something immediately preceding the IP.  Sometimes this can
3017   // happen with how we generate implicit-returns; it can also happen
3018   // with noreturn cleanups.
3019   if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
3020     llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
3021     if (IP->empty()) return nullptr;
3022     llvm::Instruction *I = &IP->back();
3023 
3024     // Skip lifetime markers
3025     for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(),
3026                                             IE = IP->rend();
3027          II != IE; ++II) {
3028       if (llvm::IntrinsicInst *Intrinsic =
3029               dyn_cast<llvm::IntrinsicInst>(&*II)) {
3030         if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) {
3031           const llvm::Value *CastAddr = Intrinsic->getArgOperand(1);
3032           ++II;
3033           if (II == IE)
3034             break;
3035           if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II))
3036             continue;
3037         }
3038       }
3039       I = &*II;
3040       break;
3041     }
3042 
3043     return GetStoreIfValid(I);
3044   }
3045 
3046   llvm::StoreInst *store =
3047       GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
3048   if (!store) return nullptr;
3049 
3050   // Now do a first-and-dirty dominance check: just walk up the
3051   // single-predecessors chain from the current insertion point.
3052   llvm::BasicBlock *StoreBB = store->getParent();
3053   llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
3054   while (IP != StoreBB) {
3055     if (!(IP = IP->getSinglePredecessor()))
3056       return nullptr;
3057   }
3058 
3059   // Okay, the store's basic block dominates the insertion point; we
3060   // can do our thing.
3061   return store;
3062 }
3063 
3064 // Helper functions for EmitCMSEClearRecord
3065 
3066 // Set the bits corresponding to a field having width `BitWidth` and located at
3067 // offset `BitOffset` (from the least significant bit) within a storage unit of
3068 // `Bits.size()` bytes. Each element of `Bits` corresponds to one target byte.
3069 // Use little-endian layout, i.e.`Bits[0]` is the LSB.
3070 static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int BitOffset,
3071                         int BitWidth, int CharWidth) {
3072   assert(CharWidth <= 64);
3073   assert(static_cast<unsigned>(BitWidth) <= Bits.size() * CharWidth);
3074 
3075   int Pos = 0;
3076   if (BitOffset >= CharWidth) {
3077     Pos += BitOffset / CharWidth;
3078     BitOffset = BitOffset % CharWidth;
3079   }
3080 
3081   const uint64_t Used = (uint64_t(1) << CharWidth) - 1;
3082   if (BitOffset + BitWidth >= CharWidth) {
3083     Bits[Pos++] |= (Used << BitOffset) & Used;
3084     BitWidth -= CharWidth - BitOffset;
3085     BitOffset = 0;
3086   }
3087 
3088   while (BitWidth >= CharWidth) {
3089     Bits[Pos++] = Used;
3090     BitWidth -= CharWidth;
3091   }
3092 
3093   if (BitWidth > 0)
3094     Bits[Pos++] |= (Used >> (CharWidth - BitWidth)) << BitOffset;
3095 }
3096 
3097 // Set the bits corresponding to a field having width `BitWidth` and located at
3098 // offset `BitOffset` (from the least significant bit) within a storage unit of
3099 // `StorageSize` bytes, located at `StorageOffset` in `Bits`. Each element of
3100 // `Bits` corresponds to one target byte. Use target endian layout.
3101 static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int StorageOffset,
3102                         int StorageSize, int BitOffset, int BitWidth,
3103                         int CharWidth, bool BigEndian) {
3104 
3105   SmallVector<uint64_t, 8> TmpBits(StorageSize);
3106   setBitRange(TmpBits, BitOffset, BitWidth, CharWidth);
3107 
3108   if (BigEndian)
3109     std::reverse(TmpBits.begin(), TmpBits.end());
3110 
3111   for (uint64_t V : TmpBits)
3112     Bits[StorageOffset++] |= V;
3113 }
3114 
3115 static void setUsedBits(CodeGenModule &, QualType, int,
3116                         SmallVectorImpl<uint64_t> &);
3117 
3118 // Set the bits in `Bits`, which correspond to the value representations of
3119 // the actual members of the record type `RTy`. Note that this function does
3120 // not handle base classes, virtual tables, etc, since they cannot happen in
3121 // CMSE function arguments or return. The bit mask corresponds to the target
3122 // memory layout, i.e. it's endian dependent.
3123 static void setUsedBits(CodeGenModule &CGM, const RecordType *RTy, int Offset,
3124                         SmallVectorImpl<uint64_t> &Bits) {
3125   ASTContext &Context = CGM.getContext();
3126   int CharWidth = Context.getCharWidth();
3127   const RecordDecl *RD = RTy->getDecl()->getDefinition();
3128   const ASTRecordLayout &ASTLayout = Context.getASTRecordLayout(RD);
3129   const CGRecordLayout &Layout = CGM.getTypes().getCGRecordLayout(RD);
3130 
3131   int Idx = 0;
3132   for (auto I = RD->field_begin(), E = RD->field_end(); I != E; ++I, ++Idx) {
3133     const FieldDecl *F = *I;
3134 
3135     if (F->isUnnamedBitfield() || F->isZeroLengthBitField(Context) ||
3136         F->getType()->isIncompleteArrayType())
3137       continue;
3138 
3139     if (F->isBitField()) {
3140       const CGBitFieldInfo &BFI = Layout.getBitFieldInfo(F);
3141       setBitRange(Bits, Offset + BFI.StorageOffset.getQuantity(),
3142                   BFI.StorageSize / CharWidth, BFI.Offset,
3143                   BFI.Size, CharWidth,
3144                   CGM.getDataLayout().isBigEndian());
3145       continue;
3146     }
3147 
3148     setUsedBits(CGM, F->getType(),
3149                 Offset + ASTLayout.getFieldOffset(Idx) / CharWidth, Bits);
3150   }
3151 }
3152 
3153 // Set the bits in `Bits`, which correspond to the value representations of
3154 // the elements of an array type `ATy`.
3155 static void setUsedBits(CodeGenModule &CGM, const ConstantArrayType *ATy,
3156                         int Offset, SmallVectorImpl<uint64_t> &Bits) {
3157   const ASTContext &Context = CGM.getContext();
3158 
3159   QualType ETy = Context.getBaseElementType(ATy);
3160   int Size = Context.getTypeSizeInChars(ETy).getQuantity();
3161   SmallVector<uint64_t, 4> TmpBits(Size);
3162   setUsedBits(CGM, ETy, 0, TmpBits);
3163 
3164   for (int I = 0, N = Context.getConstantArrayElementCount(ATy); I < N; ++I) {
3165     auto Src = TmpBits.begin();
3166     auto Dst = Bits.begin() + Offset + I * Size;
3167     for (int J = 0; J < Size; ++J)
3168       *Dst++ |= *Src++;
3169   }
3170 }
3171 
3172 // Set the bits in `Bits`, which correspond to the value representations of
3173 // the type `QTy`.
3174 static void setUsedBits(CodeGenModule &CGM, QualType QTy, int Offset,
3175                         SmallVectorImpl<uint64_t> &Bits) {
3176   if (const auto *RTy = QTy->getAs<RecordType>())
3177     return setUsedBits(CGM, RTy, Offset, Bits);
3178 
3179   ASTContext &Context = CGM.getContext();
3180   if (const auto *ATy = Context.getAsConstantArrayType(QTy))
3181     return setUsedBits(CGM, ATy, Offset, Bits);
3182 
3183   int Size = Context.getTypeSizeInChars(QTy).getQuantity();
3184   if (Size <= 0)
3185     return;
3186 
3187   std::fill_n(Bits.begin() + Offset, Size,
3188               (uint64_t(1) << Context.getCharWidth()) - 1);
3189 }
3190 
3191 static uint64_t buildMultiCharMask(const SmallVectorImpl<uint64_t> &Bits,
3192                                    int Pos, int Size, int CharWidth,
3193                                    bool BigEndian) {
3194   assert(Size > 0);
3195   uint64_t Mask = 0;
3196   if (BigEndian) {
3197     for (auto P = Bits.begin() + Pos, E = Bits.begin() + Pos + Size; P != E;
3198          ++P)
3199       Mask = (Mask << CharWidth) | *P;
3200   } else {
3201     auto P = Bits.begin() + Pos + Size, End = Bits.begin() + Pos;
3202     do
3203       Mask = (Mask << CharWidth) | *--P;
3204     while (P != End);
3205   }
3206   return Mask;
3207 }
3208 
3209 // Emit code to clear the bits in a record, which aren't a part of any user
3210 // declared member, when the record is a function return.
3211 llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src,
3212                                                   llvm::IntegerType *ITy,
3213                                                   QualType QTy) {
3214   assert(Src->getType() == ITy);
3215   assert(ITy->getScalarSizeInBits() <= 64);
3216 
3217   const llvm::DataLayout &DataLayout = CGM.getDataLayout();
3218   int Size = DataLayout.getTypeStoreSize(ITy);
3219   SmallVector<uint64_t, 4> Bits(Size);
3220   setUsedBits(CGM, QTy->castAs<RecordType>(), 0, Bits);
3221 
3222   int CharWidth = CGM.getContext().getCharWidth();
3223   uint64_t Mask =
3224       buildMultiCharMask(Bits, 0, Size, CharWidth, DataLayout.isBigEndian());
3225 
3226   return Builder.CreateAnd(Src, Mask, "cmse.clear");
3227 }
3228 
3229 // Emit code to clear the bits in a record, which aren't a part of any user
3230 // declared member, when the record is a function argument.
3231 llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src,
3232                                                   llvm::ArrayType *ATy,
3233                                                   QualType QTy) {
3234   const llvm::DataLayout &DataLayout = CGM.getDataLayout();
3235   int Size = DataLayout.getTypeStoreSize(ATy);
3236   SmallVector<uint64_t, 16> Bits(Size);
3237   setUsedBits(CGM, QTy->castAs<RecordType>(), 0, Bits);
3238 
3239   // Clear each element of the LLVM array.
3240   int CharWidth = CGM.getContext().getCharWidth();
3241   int CharsPerElt =
3242       ATy->getArrayElementType()->getScalarSizeInBits() / CharWidth;
3243   int MaskIndex = 0;
3244   llvm::Value *R = llvm::UndefValue::get(ATy);
3245   for (int I = 0, N = ATy->getArrayNumElements(); I != N; ++I) {
3246     uint64_t Mask = buildMultiCharMask(Bits, MaskIndex, CharsPerElt, CharWidth,
3247                                        DataLayout.isBigEndian());
3248     MaskIndex += CharsPerElt;
3249     llvm::Value *T0 = Builder.CreateExtractValue(Src, I);
3250     llvm::Value *T1 = Builder.CreateAnd(T0, Mask, "cmse.clear");
3251     R = Builder.CreateInsertValue(R, T1, I);
3252   }
3253 
3254   return R;
3255 }
3256 
3257 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
3258                                          bool EmitRetDbgLoc,
3259                                          SourceLocation EndLoc) {
3260   if (FI.isNoReturn()) {
3261     // Noreturn functions don't return.
3262     EmitUnreachable(EndLoc);
3263     return;
3264   }
3265 
3266   if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
3267     // Naked functions don't have epilogues.
3268     Builder.CreateUnreachable();
3269     return;
3270   }
3271 
3272   // Functions with no result always return void.
3273   if (!ReturnValue.isValid()) {
3274     Builder.CreateRetVoid();
3275     return;
3276   }
3277 
3278   llvm::DebugLoc RetDbgLoc;
3279   llvm::Value *RV = nullptr;
3280   QualType RetTy = FI.getReturnType();
3281   const ABIArgInfo &RetAI = FI.getReturnInfo();
3282 
3283   switch (RetAI.getKind()) {
3284   case ABIArgInfo::InAlloca:
3285     // Aggregrates get evaluated directly into the destination.  Sometimes we
3286     // need to return the sret value in a register, though.
3287     assert(hasAggregateEvaluationKind(RetTy));
3288     if (RetAI.getInAllocaSRet()) {
3289       llvm::Function::arg_iterator EI = CurFn->arg_end();
3290       --EI;
3291       llvm::Value *ArgStruct = &*EI;
3292       llvm::Value *SRet = Builder.CreateStructGEP(
3293           nullptr, ArgStruct, RetAI.getInAllocaFieldIndex());
3294       RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret");
3295     }
3296     break;
3297 
3298   case ABIArgInfo::Indirect: {
3299     auto AI = CurFn->arg_begin();
3300     if (RetAI.isSRetAfterThis())
3301       ++AI;
3302     switch (getEvaluationKind(RetTy)) {
3303     case TEK_Complex: {
3304       ComplexPairTy RT =
3305         EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
3306       EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy),
3307                          /*isInit*/ true);
3308       break;
3309     }
3310     case TEK_Aggregate:
3311       // Do nothing; aggregrates get evaluated directly into the destination.
3312       break;
3313     case TEK_Scalar:
3314       EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
3315                         MakeNaturalAlignAddrLValue(&*AI, RetTy),
3316                         /*isInit*/ true);
3317       break;
3318     }
3319     break;
3320   }
3321 
3322   case ABIArgInfo::Extend:
3323   case ABIArgInfo::Direct:
3324     if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
3325         RetAI.getDirectOffset() == 0) {
3326       // The internal return value temp always will have pointer-to-return-type
3327       // type, just do a load.
3328 
3329       // If there is a dominating store to ReturnValue, we can elide
3330       // the load, zap the store, and usually zap the alloca.
3331       if (llvm::StoreInst *SI =
3332               findDominatingStoreToReturnValue(*this)) {
3333         // Reuse the debug location from the store unless there is
3334         // cleanup code to be emitted between the store and return
3335         // instruction.
3336         if (EmitRetDbgLoc && !AutoreleaseResult)
3337           RetDbgLoc = SI->getDebugLoc();
3338         // Get the stored value and nuke the now-dead store.
3339         RV = SI->getValueOperand();
3340         SI->eraseFromParent();
3341 
3342       // Otherwise, we have to do a simple load.
3343       } else {
3344         RV = Builder.CreateLoad(ReturnValue);
3345       }
3346     } else {
3347       // If the value is offset in memory, apply the offset now.
3348       Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
3349 
3350       RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
3351     }
3352 
3353     // In ARC, end functions that return a retainable type with a call
3354     // to objc_autoreleaseReturnValue.
3355     if (AutoreleaseResult) {
3356 #ifndef NDEBUG
3357       // Type::isObjCRetainabletype has to be called on a QualType that hasn't
3358       // been stripped of the typedefs, so we cannot use RetTy here. Get the
3359       // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from
3360       // CurCodeDecl or BlockInfo.
3361       QualType RT;
3362 
3363       if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl))
3364         RT = FD->getReturnType();
3365       else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl))
3366         RT = MD->getReturnType();
3367       else if (isa<BlockDecl>(CurCodeDecl))
3368         RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType();
3369       else
3370         llvm_unreachable("Unexpected function/method type");
3371 
3372       assert(getLangOpts().ObjCAutoRefCount &&
3373              !FI.isReturnsRetained() &&
3374              RT->isObjCRetainableType());
3375 #endif
3376       RV = emitAutoreleaseOfResult(*this, RV);
3377     }
3378 
3379     break;
3380 
3381   case ABIArgInfo::Ignore:
3382     break;
3383 
3384   case ABIArgInfo::CoerceAndExpand: {
3385     auto coercionType = RetAI.getCoerceAndExpandType();
3386 
3387     // Load all of the coerced elements out into results.
3388     llvm::SmallVector<llvm::Value*, 4> results;
3389     Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType);
3390     for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3391       auto coercedEltType = coercionType->getElementType(i);
3392       if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType))
3393         continue;
3394 
3395       auto eltAddr = Builder.CreateStructGEP(addr, i);
3396       auto elt = Builder.CreateLoad(eltAddr);
3397       results.push_back(elt);
3398     }
3399 
3400     // If we have one result, it's the single direct result type.
3401     if (results.size() == 1) {
3402       RV = results[0];
3403 
3404     // Otherwise, we need to make a first-class aggregate.
3405     } else {
3406       // Construct a return type that lacks padding elements.
3407       llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType();
3408 
3409       RV = llvm::UndefValue::get(returnType);
3410       for (unsigned i = 0, e = results.size(); i != e; ++i) {
3411         RV = Builder.CreateInsertValue(RV, results[i], i);
3412       }
3413     }
3414     break;
3415   }
3416   case ABIArgInfo::Expand:
3417   case ABIArgInfo::IndirectAliased:
3418     llvm_unreachable("Invalid ABI kind for return argument");
3419   }
3420 
3421   llvm::Instruction *Ret;
3422   if (RV) {
3423     if (CurFuncDecl && CurFuncDecl->hasAttr<CmseNSEntryAttr>()) {
3424       // For certain return types, clear padding bits, as they may reveal
3425       // sensitive information.
3426       // Small struct/union types are passed as integers.
3427       auto *ITy = dyn_cast<llvm::IntegerType>(RV->getType());
3428       if (ITy != nullptr && isa<RecordType>(RetTy.getCanonicalType()))
3429         RV = EmitCMSEClearRecord(RV, ITy, RetTy);
3430     }
3431     EmitReturnValueCheck(RV);
3432     Ret = Builder.CreateRet(RV);
3433   } else {
3434     Ret = Builder.CreateRetVoid();
3435   }
3436 
3437   if (RetDbgLoc)
3438     Ret->setDebugLoc(std::move(RetDbgLoc));
3439 }
3440 
3441 void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV) {
3442   // A current decl may not be available when emitting vtable thunks.
3443   if (!CurCodeDecl)
3444     return;
3445 
3446   // If the return block isn't reachable, neither is this check, so don't emit
3447   // it.
3448   if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty())
3449     return;
3450 
3451   ReturnsNonNullAttr *RetNNAttr = nullptr;
3452   if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute))
3453     RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>();
3454 
3455   if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
3456     return;
3457 
3458   // Prefer the returns_nonnull attribute if it's present.
3459   SourceLocation AttrLoc;
3460   SanitizerMask CheckKind;
3461   SanitizerHandler Handler;
3462   if (RetNNAttr) {
3463     assert(!requiresReturnValueNullabilityCheck() &&
3464            "Cannot check nullability and the nonnull attribute");
3465     AttrLoc = RetNNAttr->getLocation();
3466     CheckKind = SanitizerKind::ReturnsNonnullAttribute;
3467     Handler = SanitizerHandler::NonnullReturn;
3468   } else {
3469     if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl))
3470       if (auto *TSI = DD->getTypeSourceInfo())
3471         if (auto FTL = TSI->getTypeLoc().getAsAdjusted<FunctionTypeLoc>())
3472           AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
3473     CheckKind = SanitizerKind::NullabilityReturn;
3474     Handler = SanitizerHandler::NullabilityReturn;
3475   }
3476 
3477   SanitizerScope SanScope(this);
3478 
3479   // Make sure the "return" source location is valid. If we're checking a
3480   // nullability annotation, make sure the preconditions for the check are met.
3481   llvm::BasicBlock *Check = createBasicBlock("nullcheck");
3482   llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck");
3483   llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load");
3484   llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr);
3485   if (requiresReturnValueNullabilityCheck())
3486     CanNullCheck =
3487         Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
3488   Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
3489   EmitBlock(Check);
3490 
3491   // Now do the null check.
3492   llvm::Value *Cond = Builder.CreateIsNotNull(RV);
3493   llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)};
3494   llvm::Value *DynamicData[] = {SLocPtr};
3495   EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData);
3496 
3497   EmitBlock(NoCheck);
3498 
3499 #ifndef NDEBUG
3500   // The return location should not be used after the check has been emitted.
3501   ReturnLocation = Address::invalid();
3502 #endif
3503 }
3504 
3505 static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) {
3506   const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
3507   return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
3508 }
3509 
3510 static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF,
3511                                           QualType Ty) {
3512   // FIXME: Generate IR in one pass, rather than going back and fixing up these
3513   // placeholders.
3514   llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
3515   llvm::Type *IRPtrTy = IRTy->getPointerTo();
3516   llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo());
3517 
3518   // FIXME: When we generate this IR in one pass, we shouldn't need
3519   // this win32-specific alignment hack.
3520   CharUnits Align = CharUnits::fromQuantity(4);
3521   Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align);
3522 
3523   return AggValueSlot::forAddr(Address(Placeholder, Align),
3524                                Ty.getQualifiers(),
3525                                AggValueSlot::IsNotDestructed,
3526                                AggValueSlot::DoesNotNeedGCBarriers,
3527                                AggValueSlot::IsNotAliased,
3528                                AggValueSlot::DoesNotOverlap);
3529 }
3530 
3531 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
3532                                           const VarDecl *param,
3533                                           SourceLocation loc) {
3534   // StartFunction converted the ABI-lowered parameter(s) into a
3535   // local alloca.  We need to turn that into an r-value suitable
3536   // for EmitCall.
3537   Address local = GetAddrOfLocalVar(param);
3538 
3539   QualType type = param->getType();
3540 
3541   if (isInAllocaArgument(CGM.getCXXABI(), type)) {
3542     CGM.ErrorUnsupported(param, "forwarded non-trivially copyable parameter");
3543   }
3544 
3545   // GetAddrOfLocalVar returns a pointer-to-pointer for references,
3546   // but the argument needs to be the original pointer.
3547   if (type->isReferenceType()) {
3548     args.add(RValue::get(Builder.CreateLoad(local)), type);
3549 
3550   // In ARC, move out of consumed arguments so that the release cleanup
3551   // entered by StartFunction doesn't cause an over-release.  This isn't
3552   // optimal -O0 code generation, but it should get cleaned up when
3553   // optimization is enabled.  This also assumes that delegate calls are
3554   // performed exactly once for a set of arguments, but that should be safe.
3555   } else if (getLangOpts().ObjCAutoRefCount &&
3556              param->hasAttr<NSConsumedAttr>() &&
3557              type->isObjCRetainableType()) {
3558     llvm::Value *ptr = Builder.CreateLoad(local);
3559     auto null =
3560       llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
3561     Builder.CreateStore(null, local);
3562     args.add(RValue::get(ptr), type);
3563 
3564   // For the most part, we just need to load the alloca, except that
3565   // aggregate r-values are actually pointers to temporaries.
3566   } else {
3567     args.add(convertTempToRValue(local, type, loc), type);
3568   }
3569 
3570   // Deactivate the cleanup for the callee-destructed param that was pushed.
3571   if (hasAggregateEvaluationKind(type) && !CurFuncIsThunk &&
3572       type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee() &&
3573       param->needsDestruction(getContext())) {
3574     EHScopeStack::stable_iterator cleanup =
3575         CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param));
3576     assert(cleanup.isValid() &&
3577            "cleanup for callee-destructed param not recorded");
3578     // This unreachable is a temporary marker which will be removed later.
3579     llvm::Instruction *isActive = Builder.CreateUnreachable();
3580     args.addArgCleanupDeactivation(cleanup, isActive);
3581   }
3582 }
3583 
3584 static bool isProvablyNull(llvm::Value *addr) {
3585   return isa<llvm::ConstantPointerNull>(addr);
3586 }
3587 
3588 /// Emit the actual writing-back of a writeback.
3589 static void emitWriteback(CodeGenFunction &CGF,
3590                           const CallArgList::Writeback &writeback) {
3591   const LValue &srcLV = writeback.Source;
3592   Address srcAddr = srcLV.getAddress(CGF);
3593   assert(!isProvablyNull(srcAddr.getPointer()) &&
3594          "shouldn't have writeback for provably null argument");
3595 
3596   llvm::BasicBlock *contBB = nullptr;
3597 
3598   // If the argument wasn't provably non-null, we need to null check
3599   // before doing the store.
3600   bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3601                                               CGF.CGM.getDataLayout());
3602   if (!provablyNonNull) {
3603     llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
3604     contBB = CGF.createBasicBlock("icr.done");
3605 
3606     llvm::Value *isNull =
3607       CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3608     CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
3609     CGF.EmitBlock(writebackBB);
3610   }
3611 
3612   // Load the value to writeback.
3613   llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
3614 
3615   // Cast it back, in case we're writing an id to a Foo* or something.
3616   value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(),
3617                                     "icr.writeback-cast");
3618 
3619   // Perform the writeback.
3620 
3621   // If we have a "to use" value, it's something we need to emit a use
3622   // of.  This has to be carefully threaded in: if it's done after the
3623   // release it's potentially undefined behavior (and the optimizer
3624   // will ignore it), and if it happens before the retain then the
3625   // optimizer could move the release there.
3626   if (writeback.ToUse) {
3627     assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
3628 
3629     // Retain the new value.  No need to block-copy here:  the block's
3630     // being passed up the stack.
3631     value = CGF.EmitARCRetainNonBlock(value);
3632 
3633     // Emit the intrinsic use here.
3634     CGF.EmitARCIntrinsicUse(writeback.ToUse);
3635 
3636     // Load the old value (primitively).
3637     llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
3638 
3639     // Put the new value in place (primitively).
3640     CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
3641 
3642     // Release the old value.
3643     CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
3644 
3645   // Otherwise, we can just do a normal lvalue store.
3646   } else {
3647     CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
3648   }
3649 
3650   // Jump to the continuation block.
3651   if (!provablyNonNull)
3652     CGF.EmitBlock(contBB);
3653 }
3654 
3655 static void emitWritebacks(CodeGenFunction &CGF,
3656                            const CallArgList &args) {
3657   for (const auto &I : args.writebacks())
3658     emitWriteback(CGF, I);
3659 }
3660 
3661 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
3662                                             const CallArgList &CallArgs) {
3663   ArrayRef<CallArgList::CallArgCleanup> Cleanups =
3664     CallArgs.getCleanupsToDeactivate();
3665   // Iterate in reverse to increase the likelihood of popping the cleanup.
3666   for (const auto &I : llvm::reverse(Cleanups)) {
3667     CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP);
3668     I.IsActiveIP->eraseFromParent();
3669   }
3670 }
3671 
3672 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
3673   if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
3674     if (uop->getOpcode() == UO_AddrOf)
3675       return uop->getSubExpr();
3676   return nullptr;
3677 }
3678 
3679 /// Emit an argument that's being passed call-by-writeback.  That is,
3680 /// we are passing the address of an __autoreleased temporary; it
3681 /// might be copy-initialized with the current value of the given
3682 /// address, but it will definitely be copied out of after the call.
3683 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
3684                              const ObjCIndirectCopyRestoreExpr *CRE) {
3685   LValue srcLV;
3686 
3687   // Make an optimistic effort to emit the address as an l-value.
3688   // This can fail if the argument expression is more complicated.
3689   if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
3690     srcLV = CGF.EmitLValue(lvExpr);
3691 
3692   // Otherwise, just emit it as a scalar.
3693   } else {
3694     Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
3695 
3696     QualType srcAddrType =
3697       CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
3698     srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
3699   }
3700   Address srcAddr = srcLV.getAddress(CGF);
3701 
3702   // The dest and src types don't necessarily match in LLVM terms
3703   // because of the crazy ObjC compatibility rules.
3704 
3705   llvm::PointerType *destType =
3706     cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
3707 
3708   // If the address is a constant null, just pass the appropriate null.
3709   if (isProvablyNull(srcAddr.getPointer())) {
3710     args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
3711              CRE->getType());
3712     return;
3713   }
3714 
3715   // Create the temporary.
3716   Address temp = CGF.CreateTempAlloca(destType->getElementType(),
3717                                       CGF.getPointerAlign(),
3718                                       "icr.temp");
3719   // Loading an l-value can introduce a cleanup if the l-value is __weak,
3720   // and that cleanup will be conditional if we can't prove that the l-value
3721   // isn't null, so we need to register a dominating point so that the cleanups
3722   // system will make valid IR.
3723   CodeGenFunction::ConditionalEvaluation condEval(CGF);
3724 
3725   // Zero-initialize it if we're not doing a copy-initialization.
3726   bool shouldCopy = CRE->shouldCopy();
3727   if (!shouldCopy) {
3728     llvm::Value *null =
3729       llvm::ConstantPointerNull::get(
3730         cast<llvm::PointerType>(destType->getElementType()));
3731     CGF.Builder.CreateStore(null, temp);
3732   }
3733 
3734   llvm::BasicBlock *contBB = nullptr;
3735   llvm::BasicBlock *originBB = nullptr;
3736 
3737   // If the address is *not* known to be non-null, we need to switch.
3738   llvm::Value *finalArgument;
3739 
3740   bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3741                                               CGF.CGM.getDataLayout());
3742   if (provablyNonNull) {
3743     finalArgument = temp.getPointer();
3744   } else {
3745     llvm::Value *isNull =
3746       CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3747 
3748     finalArgument = CGF.Builder.CreateSelect(isNull,
3749                                    llvm::ConstantPointerNull::get(destType),
3750                                              temp.getPointer(), "icr.argument");
3751 
3752     // If we need to copy, then the load has to be conditional, which
3753     // means we need control flow.
3754     if (shouldCopy) {
3755       originBB = CGF.Builder.GetInsertBlock();
3756       contBB = CGF.createBasicBlock("icr.cont");
3757       llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
3758       CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
3759       CGF.EmitBlock(copyBB);
3760       condEval.begin(CGF);
3761     }
3762   }
3763 
3764   llvm::Value *valueToUse = nullptr;
3765 
3766   // Perform a copy if necessary.
3767   if (shouldCopy) {
3768     RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
3769     assert(srcRV.isScalar());
3770 
3771     llvm::Value *src = srcRV.getScalarVal();
3772     src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
3773                                     "icr.cast");
3774 
3775     // Use an ordinary store, not a store-to-lvalue.
3776     CGF.Builder.CreateStore(src, temp);
3777 
3778     // If optimization is enabled, and the value was held in a
3779     // __strong variable, we need to tell the optimizer that this
3780     // value has to stay alive until we're doing the store back.
3781     // This is because the temporary is effectively unretained,
3782     // and so otherwise we can violate the high-level semantics.
3783     if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3784         srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) {
3785       valueToUse = src;
3786     }
3787   }
3788 
3789   // Finish the control flow if we needed it.
3790   if (shouldCopy && !provablyNonNull) {
3791     llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
3792     CGF.EmitBlock(contBB);
3793 
3794     // Make a phi for the value to intrinsically use.
3795     if (valueToUse) {
3796       llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
3797                                                       "icr.to-use");
3798       phiToUse->addIncoming(valueToUse, copyBB);
3799       phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
3800                             originBB);
3801       valueToUse = phiToUse;
3802     }
3803 
3804     condEval.end(CGF);
3805   }
3806 
3807   args.addWriteback(srcLV, temp, valueToUse);
3808   args.add(RValue::get(finalArgument), CRE->getType());
3809 }
3810 
3811 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
3812   assert(!StackBase);
3813 
3814   // Save the stack.
3815   llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
3816   StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
3817 }
3818 
3819 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
3820   if (StackBase) {
3821     // Restore the stack after the call.
3822     llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
3823     CGF.Builder.CreateCall(F, StackBase);
3824   }
3825 }
3826 
3827 void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
3828                                           SourceLocation ArgLoc,
3829                                           AbstractCallee AC,
3830                                           unsigned ParmNum) {
3831   if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) ||
3832                          SanOpts.has(SanitizerKind::NullabilityArg)))
3833     return;
3834 
3835   // The param decl may be missing in a variadic function.
3836   auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr;
3837   unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
3838 
3839   // Prefer the nonnull attribute if it's present.
3840   const NonNullAttr *NNAttr = nullptr;
3841   if (SanOpts.has(SanitizerKind::NonnullAttribute))
3842     NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo);
3843 
3844   bool CanCheckNullability = false;
3845   if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) {
3846     auto Nullability = PVD->getType()->getNullability(getContext());
3847     CanCheckNullability = Nullability &&
3848                           *Nullability == NullabilityKind::NonNull &&
3849                           PVD->getTypeSourceInfo();
3850   }
3851 
3852   if (!NNAttr && !CanCheckNullability)
3853     return;
3854 
3855   SourceLocation AttrLoc;
3856   SanitizerMask CheckKind;
3857   SanitizerHandler Handler;
3858   if (NNAttr) {
3859     AttrLoc = NNAttr->getLocation();
3860     CheckKind = SanitizerKind::NonnullAttribute;
3861     Handler = SanitizerHandler::NonnullArg;
3862   } else {
3863     AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
3864     CheckKind = SanitizerKind::NullabilityArg;
3865     Handler = SanitizerHandler::NullabilityArg;
3866   }
3867 
3868   SanitizerScope SanScope(this);
3869   llvm::Value *Cond = EmitNonNullRValueCheck(RV, ArgType);
3870   llvm::Constant *StaticData[] = {
3871       EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc),
3872       llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
3873   };
3874   EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None);
3875 }
3876 
3877 // Check if the call is going to use the inalloca convention. This needs to
3878 // agree with CGFunctionInfo::usesInAlloca. The CGFunctionInfo is arranged
3879 // later, so we can't check it directly.
3880 static bool hasInAllocaArgs(CodeGenModule &CGM, CallingConv ExplicitCC,
3881                             ArrayRef<QualType> ArgTypes) {
3882   // The Swift calling convention doesn't go through the target-specific
3883   // argument classification, so it never uses inalloca.
3884   // TODO: Consider limiting inalloca use to only calling conventions supported
3885   // by MSVC.
3886   if (ExplicitCC == CC_Swift)
3887     return false;
3888   if (!CGM.getTarget().getCXXABI().isMicrosoft())
3889     return false;
3890   return llvm::any_of(ArgTypes, [&](QualType Ty) {
3891     return isInAllocaArgument(CGM.getCXXABI(), Ty);
3892   });
3893 }
3894 
3895 #ifndef NDEBUG
3896 // Determine whether the given argument is an Objective-C method
3897 // that may have type parameters in its signature.
3898 static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method) {
3899   const DeclContext *dc = method->getDeclContext();
3900   if (const ObjCInterfaceDecl *classDecl = dyn_cast<ObjCInterfaceDecl>(dc)) {
3901     return classDecl->getTypeParamListAsWritten();
3902   }
3903 
3904   if (const ObjCCategoryDecl *catDecl = dyn_cast<ObjCCategoryDecl>(dc)) {
3905     return catDecl->getTypeParamList();
3906   }
3907 
3908   return false;
3909 }
3910 #endif
3911 
3912 /// EmitCallArgs - Emit call arguments for a function.
3913 void CodeGenFunction::EmitCallArgs(
3914     CallArgList &Args, PrototypeWrapper Prototype,
3915     llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
3916     AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) {
3917   SmallVector<QualType, 16> ArgTypes;
3918 
3919   assert((ParamsToSkip == 0 || Prototype.P) &&
3920          "Can't skip parameters if type info is not provided");
3921 
3922   // This variable only captures *explicitly* written conventions, not those
3923   // applied by default via command line flags or target defaults, such as
3924   // thiscall, aapcs, stdcall via -mrtd, etc. Computing that correctly would
3925   // require knowing if this is a C++ instance method or being able to see
3926   // unprototyped FunctionTypes.
3927   CallingConv ExplicitCC = CC_C;
3928 
3929   // First, if a prototype was provided, use those argument types.
3930   bool IsVariadic = false;
3931   if (Prototype.P) {
3932     const auto *MD = Prototype.P.dyn_cast<const ObjCMethodDecl *>();
3933     if (MD) {
3934       IsVariadic = MD->isVariadic();
3935       ExplicitCC = getCallingConventionForDecl(
3936           MD, CGM.getTarget().getTriple().isOSWindows());
3937       ArgTypes.assign(MD->param_type_begin() + ParamsToSkip,
3938                       MD->param_type_end());
3939     } else {
3940       const auto *FPT = Prototype.P.get<const FunctionProtoType *>();
3941       IsVariadic = FPT->isVariadic();
3942       ExplicitCC = FPT->getExtInfo().getCC();
3943       ArgTypes.assign(FPT->param_type_begin() + ParamsToSkip,
3944                       FPT->param_type_end());
3945     }
3946 
3947 #ifndef NDEBUG
3948     // Check that the prototyped types match the argument expression types.
3949     bool isGenericMethod = MD && isObjCMethodWithTypeParams(MD);
3950     CallExpr::const_arg_iterator Arg = ArgRange.begin();
3951     for (QualType Ty : ArgTypes) {
3952       assert(Arg != ArgRange.end() && "Running over edge of argument list!");
3953       assert(
3954           (isGenericMethod || Ty->isVariablyModifiedType() ||
3955            Ty.getNonReferenceType()->isObjCRetainableType() ||
3956            getContext()
3957                    .getCanonicalType(Ty.getNonReferenceType())
3958                    .getTypePtr() ==
3959                getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) &&
3960           "type mismatch in call argument!");
3961       ++Arg;
3962     }
3963 
3964     // Either we've emitted all the call args, or we have a call to variadic
3965     // function.
3966     assert((Arg == ArgRange.end() || IsVariadic) &&
3967            "Extra arguments in non-variadic function!");
3968 #endif
3969   }
3970 
3971   // If we still have any arguments, emit them using the type of the argument.
3972   for (auto *A : llvm::make_range(std::next(ArgRange.begin(), ArgTypes.size()),
3973                                   ArgRange.end()))
3974     ArgTypes.push_back(IsVariadic ? getVarArgType(A) : A->getType());
3975   assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
3976 
3977   // We must evaluate arguments from right to left in the MS C++ ABI,
3978   // because arguments are destroyed left to right in the callee. As a special
3979   // case, there are certain language constructs that require left-to-right
3980   // evaluation, and in those cases we consider the evaluation order requirement
3981   // to trump the "destruction order is reverse construction order" guarantee.
3982   bool LeftToRight =
3983       CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()
3984           ? Order == EvaluationOrder::ForceLeftToRight
3985           : Order != EvaluationOrder::ForceRightToLeft;
3986 
3987   auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg,
3988                                          RValue EmittedArg) {
3989     if (!AC.hasFunctionDecl() || I >= AC.getNumParams())
3990       return;
3991     auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>();
3992     if (PS == nullptr)
3993       return;
3994 
3995     const auto &Context = getContext();
3996     auto SizeTy = Context.getSizeType();
3997     auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
3998     assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?");
3999     llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T,
4000                                                      EmittedArg.getScalarVal(),
4001                                                      PS->isDynamic());
4002     Args.add(RValue::get(V), SizeTy);
4003     // If we're emitting args in reverse, be sure to do so with
4004     // pass_object_size, as well.
4005     if (!LeftToRight)
4006       std::swap(Args.back(), *(&Args.back() - 1));
4007   };
4008 
4009   // Insert a stack save if we're going to need any inalloca args.
4010   if (hasInAllocaArgs(CGM, ExplicitCC, ArgTypes)) {
4011     assert(getTarget().getTriple().getArch() == llvm::Triple::x86 &&
4012            "inalloca only supported on x86");
4013     Args.allocateArgumentMemory(*this);
4014   }
4015 
4016   // Evaluate each argument in the appropriate order.
4017   size_t CallArgsStart = Args.size();
4018   for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
4019     unsigned Idx = LeftToRight ? I : E - I - 1;
4020     CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx;
4021     unsigned InitialArgSize = Args.size();
4022     // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of
4023     // the argument and parameter match or the objc method is parameterized.
4024     assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) ||
4025             getContext().hasSameUnqualifiedType((*Arg)->getType(),
4026                                                 ArgTypes[Idx]) ||
4027             (isa<ObjCMethodDecl>(AC.getDecl()) &&
4028              isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) &&
4029            "Argument and parameter types don't match");
4030     EmitCallArg(Args, *Arg, ArgTypes[Idx]);
4031     // In particular, we depend on it being the last arg in Args, and the
4032     // objectsize bits depend on there only being one arg if !LeftToRight.
4033     assert(InitialArgSize + 1 == Args.size() &&
4034            "The code below depends on only adding one arg per EmitCallArg");
4035     (void)InitialArgSize;
4036     // Since pointer argument are never emitted as LValue, it is safe to emit
4037     // non-null argument check for r-value only.
4038     if (!Args.back().hasLValue()) {
4039       RValue RVArg = Args.back().getKnownRValue();
4040       EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC,
4041                           ParamsToSkip + Idx);
4042       // @llvm.objectsize should never have side-effects and shouldn't need
4043       // destruction/cleanups, so we can safely "emit" it after its arg,
4044       // regardless of right-to-leftness
4045       MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
4046     }
4047   }
4048 
4049   if (!LeftToRight) {
4050     // Un-reverse the arguments we just evaluated so they match up with the LLVM
4051     // IR function.
4052     std::reverse(Args.begin() + CallArgsStart, Args.end());
4053   }
4054 }
4055 
4056 namespace {
4057 
4058 struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
4059   DestroyUnpassedArg(Address Addr, QualType Ty)
4060       : Addr(Addr), Ty(Ty) {}
4061 
4062   Address Addr;
4063   QualType Ty;
4064 
4065   void Emit(CodeGenFunction &CGF, Flags flags) override {
4066     QualType::DestructionKind DtorKind = Ty.isDestructedType();
4067     if (DtorKind == QualType::DK_cxx_destructor) {
4068       const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
4069       assert(!Dtor->isTrivial());
4070       CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
4071                                 /*Delegating=*/false, Addr, Ty);
4072     } else {
4073       CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty));
4074     }
4075   }
4076 };
4077 
4078 struct DisableDebugLocationUpdates {
4079   CodeGenFunction &CGF;
4080   bool disabledDebugInfo;
4081   DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
4082     if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
4083       CGF.disableDebugInfo();
4084   }
4085   ~DisableDebugLocationUpdates() {
4086     if (disabledDebugInfo)
4087       CGF.enableDebugInfo();
4088   }
4089 };
4090 
4091 } // end anonymous namespace
4092 
4093 RValue CallArg::getRValue(CodeGenFunction &CGF) const {
4094   if (!HasLV)
4095     return RV;
4096   LValue Copy = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty), Ty);
4097   CGF.EmitAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap,
4098                         LV.isVolatile());
4099   IsUsed = true;
4100   return RValue::getAggregate(Copy.getAddress(CGF));
4101 }
4102 
4103 void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const {
4104   LValue Dst = CGF.MakeAddrLValue(Addr, Ty);
4105   if (!HasLV && RV.isScalar())
4106     CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*isInit=*/true);
4107   else if (!HasLV && RV.isComplex())
4108     CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true);
4109   else {
4110     auto Addr = HasLV ? LV.getAddress(CGF) : RV.getAggregateAddress();
4111     LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty);
4112     // We assume that call args are never copied into subobjects.
4113     CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap,
4114                           HasLV ? LV.isVolatileQualified()
4115                                 : RV.isVolatileQualified());
4116   }
4117   IsUsed = true;
4118 }
4119 
4120 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
4121                                   QualType type) {
4122   DisableDebugLocationUpdates Dis(*this, E);
4123   if (const ObjCIndirectCopyRestoreExpr *CRE
4124         = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
4125     assert(getLangOpts().ObjCAutoRefCount);
4126     return emitWritebackArg(*this, args, CRE);
4127   }
4128 
4129   assert(type->isReferenceType() == E->isGLValue() &&
4130          "reference binding to unmaterialized r-value!");
4131 
4132   if (E->isGLValue()) {
4133     assert(E->getObjectKind() == OK_Ordinary);
4134     return args.add(EmitReferenceBindingToExpr(E), type);
4135   }
4136 
4137   bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
4138 
4139   // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
4140   // However, we still have to push an EH-only cleanup in case we unwind before
4141   // we make it to the call.
4142   if (HasAggregateEvalKind &&
4143       type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
4144     // If we're using inalloca, use the argument memory.  Otherwise, use a
4145     // temporary.
4146     AggValueSlot Slot;
4147     if (args.isUsingInAlloca())
4148       Slot = createPlaceholderSlot(*this, type);
4149     else
4150       Slot = CreateAggTemp(type, "agg.tmp");
4151 
4152     bool DestroyedInCallee = true, NeedsEHCleanup = true;
4153     if (const auto *RD = type->getAsCXXRecordDecl())
4154       DestroyedInCallee = RD->hasNonTrivialDestructor();
4155     else
4156       NeedsEHCleanup = needsEHCleanup(type.isDestructedType());
4157 
4158     if (DestroyedInCallee)
4159       Slot.setExternallyDestructed();
4160 
4161     EmitAggExpr(E, Slot);
4162     RValue RV = Slot.asRValue();
4163     args.add(RV, type);
4164 
4165     if (DestroyedInCallee && NeedsEHCleanup) {
4166       // Create a no-op GEP between the placeholder and the cleanup so we can
4167       // RAUW it successfully.  It also serves as a marker of the first
4168       // instruction where the cleanup is active.
4169       pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
4170                                               type);
4171       // This unreachable is a temporary marker which will be removed later.
4172       llvm::Instruction *IsActive = Builder.CreateUnreachable();
4173       args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
4174     }
4175     return;
4176   }
4177 
4178   if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
4179       cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
4180     LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
4181     assert(L.isSimple());
4182     args.addUncopiedAggregate(L, type);
4183     return;
4184   }
4185 
4186   args.add(EmitAnyExprToTemp(E), type);
4187 }
4188 
4189 QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
4190   // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
4191   // implicitly widens null pointer constants that are arguments to varargs
4192   // functions to pointer-sized ints.
4193   if (!getTarget().getTriple().isOSWindows())
4194     return Arg->getType();
4195 
4196   if (Arg->getType()->isIntegerType() &&
4197       getContext().getTypeSize(Arg->getType()) <
4198           getContext().getTargetInfo().getPointerWidth(0) &&
4199       Arg->isNullPointerConstant(getContext(),
4200                                  Expr::NPC_ValueDependentIsNotNull)) {
4201     return getContext().getIntPtrType();
4202   }
4203 
4204   return Arg->getType();
4205 }
4206 
4207 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4208 // optimizer it can aggressively ignore unwind edges.
4209 void
4210 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
4211   if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
4212       !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
4213     Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
4214                       CGM.getNoObjCARCExceptionsMetadata());
4215 }
4216 
4217 /// Emits a call to the given no-arguments nounwind runtime function.
4218 llvm::CallInst *
4219 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
4220                                          const llvm::Twine &name) {
4221   return EmitNounwindRuntimeCall(callee, None, name);
4222 }
4223 
4224 /// Emits a call to the given nounwind runtime function.
4225 llvm::CallInst *
4226 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
4227                                          ArrayRef<llvm::Value *> args,
4228                                          const llvm::Twine &name) {
4229   llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
4230   call->setDoesNotThrow();
4231   return call;
4232 }
4233 
4234 /// Emits a simple call (never an invoke) to the given no-arguments
4235 /// runtime function.
4236 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
4237                                                  const llvm::Twine &name) {
4238   return EmitRuntimeCall(callee, None, name);
4239 }
4240 
4241 // Calls which may throw must have operand bundles indicating which funclet
4242 // they are nested within.
4243 SmallVector<llvm::OperandBundleDef, 1>
4244 CodeGenFunction::getBundlesForFunclet(llvm::Value *Callee) {
4245   SmallVector<llvm::OperandBundleDef, 1> BundleList;
4246   // There is no need for a funclet operand bundle if we aren't inside a
4247   // funclet.
4248   if (!CurrentFuncletPad)
4249     return BundleList;
4250 
4251   // Skip intrinsics which cannot throw.
4252   auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts());
4253   if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow())
4254     return BundleList;
4255 
4256   BundleList.emplace_back("funclet", CurrentFuncletPad);
4257   return BundleList;
4258 }
4259 
4260 /// Emits a simple call (never an invoke) to the given runtime function.
4261 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
4262                                                  ArrayRef<llvm::Value *> args,
4263                                                  const llvm::Twine &name) {
4264   llvm::CallInst *call = Builder.CreateCall(
4265       callee, args, getBundlesForFunclet(callee.getCallee()), name);
4266   call->setCallingConv(getRuntimeCC());
4267   return call;
4268 }
4269 
4270 /// Emits a call or invoke to the given noreturn runtime function.
4271 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(
4272     llvm::FunctionCallee callee, ArrayRef<llvm::Value *> args) {
4273   SmallVector<llvm::OperandBundleDef, 1> BundleList =
4274       getBundlesForFunclet(callee.getCallee());
4275 
4276   if (getInvokeDest()) {
4277     llvm::InvokeInst *invoke =
4278       Builder.CreateInvoke(callee,
4279                            getUnreachableBlock(),
4280                            getInvokeDest(),
4281                            args,
4282                            BundleList);
4283     invoke->setDoesNotReturn();
4284     invoke->setCallingConv(getRuntimeCC());
4285   } else {
4286     llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList);
4287     call->setDoesNotReturn();
4288     call->setCallingConv(getRuntimeCC());
4289     Builder.CreateUnreachable();
4290   }
4291 }
4292 
4293 /// Emits a call or invoke instruction to the given nullary runtime function.
4294 llvm::CallBase *
4295 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
4296                                          const Twine &name) {
4297   return EmitRuntimeCallOrInvoke(callee, None, name);
4298 }
4299 
4300 /// Emits a call or invoke instruction to the given runtime function.
4301 llvm::CallBase *
4302 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
4303                                          ArrayRef<llvm::Value *> args,
4304                                          const Twine &name) {
4305   llvm::CallBase *call = EmitCallOrInvoke(callee, args, name);
4306   call->setCallingConv(getRuntimeCC());
4307   return call;
4308 }
4309 
4310 /// Emits a call or invoke instruction to the given function, depending
4311 /// on the current state of the EH stack.
4312 llvm::CallBase *CodeGenFunction::EmitCallOrInvoke(llvm::FunctionCallee Callee,
4313                                                   ArrayRef<llvm::Value *> Args,
4314                                                   const Twine &Name) {
4315   llvm::BasicBlock *InvokeDest = getInvokeDest();
4316   SmallVector<llvm::OperandBundleDef, 1> BundleList =
4317       getBundlesForFunclet(Callee.getCallee());
4318 
4319   llvm::CallBase *Inst;
4320   if (!InvokeDest)
4321     Inst = Builder.CreateCall(Callee, Args, BundleList, Name);
4322   else {
4323     llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
4324     Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
4325                                 Name);
4326     EmitBlock(ContBB);
4327   }
4328 
4329   // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4330   // optimizer it can aggressively ignore unwind edges.
4331   if (CGM.getLangOpts().ObjCAutoRefCount)
4332     AddObjCARCExceptionMetadata(Inst);
4333 
4334   return Inst;
4335 }
4336 
4337 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
4338                                                   llvm::Value *New) {
4339   DeferredReplacements.push_back(std::make_pair(Old, New));
4340 }
4341 
4342 namespace {
4343 
4344 /// Specify given \p NewAlign as the alignment of return value attribute. If
4345 /// such attribute already exists, re-set it to the maximal one of two options.
4346 LLVM_NODISCARD llvm::AttributeList
4347 maybeRaiseRetAlignmentAttribute(llvm::LLVMContext &Ctx,
4348                                 const llvm::AttributeList &Attrs,
4349                                 llvm::Align NewAlign) {
4350   llvm::Align CurAlign = Attrs.getRetAlignment().valueOrOne();
4351   if (CurAlign >= NewAlign)
4352     return Attrs;
4353   llvm::Attribute AlignAttr = llvm::Attribute::getWithAlignment(Ctx, NewAlign);
4354   return Attrs
4355       .removeAttribute(Ctx, llvm::AttributeList::ReturnIndex,
4356                        llvm::Attribute::AttrKind::Alignment)
4357       .addAttribute(Ctx, llvm::AttributeList::ReturnIndex, AlignAttr);
4358 }
4359 
4360 template <typename AlignedAttrTy> class AbstractAssumeAlignedAttrEmitter {
4361 protected:
4362   CodeGenFunction &CGF;
4363 
4364   /// We do nothing if this is, or becomes, nullptr.
4365   const AlignedAttrTy *AA = nullptr;
4366 
4367   llvm::Value *Alignment = nullptr;      // May or may not be a constant.
4368   llvm::ConstantInt *OffsetCI = nullptr; // Constant, hopefully zero.
4369 
4370   AbstractAssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl)
4371       : CGF(CGF_) {
4372     if (!FuncDecl)
4373       return;
4374     AA = FuncDecl->getAttr<AlignedAttrTy>();
4375   }
4376 
4377 public:
4378   /// If we can, materialize the alignment as an attribute on return value.
4379   LLVM_NODISCARD llvm::AttributeList
4380   TryEmitAsCallSiteAttribute(const llvm::AttributeList &Attrs) {
4381     if (!AA || OffsetCI || CGF.SanOpts.has(SanitizerKind::Alignment))
4382       return Attrs;
4383     const auto *AlignmentCI = dyn_cast<llvm::ConstantInt>(Alignment);
4384     if (!AlignmentCI)
4385       return Attrs;
4386     // We may legitimately have non-power-of-2 alignment here.
4387     // If so, this is UB land, emit it via `@llvm.assume` instead.
4388     if (!AlignmentCI->getValue().isPowerOf2())
4389       return Attrs;
4390     llvm::AttributeList NewAttrs = maybeRaiseRetAlignmentAttribute(
4391         CGF.getLLVMContext(), Attrs,
4392         llvm::Align(
4393             AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment)));
4394     AA = nullptr; // We're done. Disallow doing anything else.
4395     return NewAttrs;
4396   }
4397 
4398   /// Emit alignment assumption.
4399   /// This is a general fallback that we take if either there is an offset,
4400   /// or the alignment is variable or we are sanitizing for alignment.
4401   void EmitAsAnAssumption(SourceLocation Loc, QualType RetTy, RValue &Ret) {
4402     if (!AA)
4403       return;
4404     CGF.emitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc,
4405                                 AA->getLocation(), Alignment, OffsetCI);
4406     AA = nullptr; // We're done. Disallow doing anything else.
4407   }
4408 };
4409 
4410 /// Helper data structure to emit `AssumeAlignedAttr`.
4411 class AssumeAlignedAttrEmitter final
4412     : public AbstractAssumeAlignedAttrEmitter<AssumeAlignedAttr> {
4413 public:
4414   AssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl)
4415       : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
4416     if (!AA)
4417       return;
4418     // It is guaranteed that the alignment/offset are constants.
4419     Alignment = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AA->getAlignment()));
4420     if (Expr *Offset = AA->getOffset()) {
4421       OffsetCI = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(Offset));
4422       if (OffsetCI->isNullValue()) // Canonicalize zero offset to no offset.
4423         OffsetCI = nullptr;
4424     }
4425   }
4426 };
4427 
4428 /// Helper data structure to emit `AllocAlignAttr`.
4429 class AllocAlignAttrEmitter final
4430     : public AbstractAssumeAlignedAttrEmitter<AllocAlignAttr> {
4431 public:
4432   AllocAlignAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl,
4433                         const CallArgList &CallArgs)
4434       : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
4435     if (!AA)
4436       return;
4437     // Alignment may or may not be a constant, and that is okay.
4438     Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()]
4439                     .getRValue(CGF)
4440                     .getScalarVal();
4441   }
4442 };
4443 
4444 } // namespace
4445 
4446 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
4447                                  const CGCallee &Callee,
4448                                  ReturnValueSlot ReturnValue,
4449                                  const CallArgList &CallArgs,
4450                                  llvm::CallBase **callOrInvoke,
4451                                  SourceLocation Loc) {
4452   // FIXME: We no longer need the types from CallArgs; lift up and simplify.
4453 
4454   assert(Callee.isOrdinary() || Callee.isVirtual());
4455 
4456   // Handle struct-return functions by passing a pointer to the
4457   // location that we would like to return into.
4458   QualType RetTy = CallInfo.getReturnType();
4459   const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
4460 
4461   llvm::FunctionType *IRFuncTy = getTypes().GetFunctionType(CallInfo);
4462 
4463   const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
4464   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
4465     // We can only guarantee that a function is called from the correct
4466     // context/function based on the appropriate target attributes,
4467     // so only check in the case where we have both always_inline and target
4468     // since otherwise we could be making a conditional call after a check for
4469     // the proper cpu features (and it won't cause code generation issues due to
4470     // function based code generation).
4471     if (TargetDecl->hasAttr<AlwaysInlineAttr>() &&
4472         TargetDecl->hasAttr<TargetAttr>())
4473       checkTargetFeatures(Loc, FD);
4474 
4475     // Some architectures (such as x86-64) have the ABI changed based on
4476     // attribute-target/features. Give them a chance to diagnose.
4477     CGM.getTargetCodeGenInfo().checkFunctionCallABI(
4478         CGM, Loc, dyn_cast_or_null<FunctionDecl>(CurCodeDecl), FD, CallArgs);
4479   }
4480 
4481 #ifndef NDEBUG
4482   if (!(CallInfo.isVariadic() && CallInfo.getArgStruct())) {
4483     // For an inalloca varargs function, we don't expect CallInfo to match the
4484     // function pointer's type, because the inalloca struct a will have extra
4485     // fields in it for the varargs parameters.  Code later in this function
4486     // bitcasts the function pointer to the type derived from CallInfo.
4487     //
4488     // In other cases, we assert that the types match up (until pointers stop
4489     // having pointee types).
4490     llvm::Type *TypeFromVal;
4491     if (Callee.isVirtual())
4492       TypeFromVal = Callee.getVirtualFunctionType();
4493     else
4494       TypeFromVal =
4495           Callee.getFunctionPointer()->getType()->getPointerElementType();
4496     assert(IRFuncTy == TypeFromVal);
4497   }
4498 #endif
4499 
4500   // 1. Set up the arguments.
4501 
4502   // If we're using inalloca, insert the allocation after the stack save.
4503   // FIXME: Do this earlier rather than hacking it in here!
4504   Address ArgMemory = Address::invalid();
4505   if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
4506     const llvm::DataLayout &DL = CGM.getDataLayout();
4507     llvm::Instruction *IP = CallArgs.getStackBase();
4508     llvm::AllocaInst *AI;
4509     if (IP) {
4510       IP = IP->getNextNode();
4511       AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
4512                                 "argmem", IP);
4513     } else {
4514       AI = CreateTempAlloca(ArgStruct, "argmem");
4515     }
4516     auto Align = CallInfo.getArgStructAlignment();
4517     AI->setAlignment(Align.getAsAlign());
4518     AI->setUsedWithInAlloca(true);
4519     assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
4520     ArgMemory = Address(AI, Align);
4521   }
4522 
4523   ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
4524   SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
4525 
4526   // If the call returns a temporary with struct return, create a temporary
4527   // alloca to hold the result, unless one is given to us.
4528   Address SRetPtr = Address::invalid();
4529   Address SRetAlloca = Address::invalid();
4530   llvm::Value *UnusedReturnSizePtr = nullptr;
4531   if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
4532     if (!ReturnValue.isNull()) {
4533       SRetPtr = ReturnValue.getValue();
4534     } else {
4535       SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca);
4536       if (HaveInsertPoint() && ReturnValue.isUnused()) {
4537         uint64_t size =
4538             CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
4539         UnusedReturnSizePtr = EmitLifetimeStart(size, SRetAlloca.getPointer());
4540       }
4541     }
4542     if (IRFunctionArgs.hasSRetArg()) {
4543       IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
4544     } else if (RetAI.isInAlloca()) {
4545       Address Addr =
4546           Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex());
4547       Builder.CreateStore(SRetPtr.getPointer(), Addr);
4548     }
4549   }
4550 
4551   Address swiftErrorTemp = Address::invalid();
4552   Address swiftErrorArg = Address::invalid();
4553 
4554   // When passing arguments using temporary allocas, we need to add the
4555   // appropriate lifetime markers. This vector keeps track of all the lifetime
4556   // markers that need to be ended right after the call.
4557   SmallVector<CallLifetimeEnd, 2> CallLifetimeEndAfterCall;
4558 
4559   // Translate all of the arguments as necessary to match the IR lowering.
4560   assert(CallInfo.arg_size() == CallArgs.size() &&
4561          "Mismatch between function signature & arguments.");
4562   unsigned ArgNo = 0;
4563   CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
4564   for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
4565        I != E; ++I, ++info_it, ++ArgNo) {
4566     const ABIArgInfo &ArgInfo = info_it->info;
4567 
4568     // Insert a padding argument to ensure proper alignment.
4569     if (IRFunctionArgs.hasPaddingArg(ArgNo))
4570       IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
4571           llvm::UndefValue::get(ArgInfo.getPaddingType());
4572 
4573     unsigned FirstIRArg, NumIRArgs;
4574     std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
4575 
4576     switch (ArgInfo.getKind()) {
4577     case ABIArgInfo::InAlloca: {
4578       assert(NumIRArgs == 0);
4579       assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
4580       if (I->isAggregate()) {
4581         Address Addr = I->hasLValue()
4582                            ? I->getKnownLValue().getAddress(*this)
4583                            : I->getKnownRValue().getAggregateAddress();
4584         llvm::Instruction *Placeholder =
4585             cast<llvm::Instruction>(Addr.getPointer());
4586 
4587         if (!ArgInfo.getInAllocaIndirect()) {
4588           // Replace the placeholder with the appropriate argument slot GEP.
4589           CGBuilderTy::InsertPoint IP = Builder.saveIP();
4590           Builder.SetInsertPoint(Placeholder);
4591           Addr = Builder.CreateStructGEP(ArgMemory,
4592                                          ArgInfo.getInAllocaFieldIndex());
4593           Builder.restoreIP(IP);
4594         } else {
4595           // For indirect things such as overaligned structs, replace the
4596           // placeholder with a regular aggregate temporary alloca. Store the
4597           // address of this alloca into the struct.
4598           Addr = CreateMemTemp(info_it->type, "inalloca.indirect.tmp");
4599           Address ArgSlot = Builder.CreateStructGEP(
4600               ArgMemory, ArgInfo.getInAllocaFieldIndex());
4601           Builder.CreateStore(Addr.getPointer(), ArgSlot);
4602         }
4603         deferPlaceholderReplacement(Placeholder, Addr.getPointer());
4604       } else if (ArgInfo.getInAllocaIndirect()) {
4605         // Make a temporary alloca and store the address of it into the argument
4606         // struct.
4607         Address Addr = CreateMemTempWithoutCast(
4608             I->Ty, getContext().getTypeAlignInChars(I->Ty),
4609             "indirect-arg-temp");
4610         I->copyInto(*this, Addr);
4611         Address ArgSlot =
4612             Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
4613         Builder.CreateStore(Addr.getPointer(), ArgSlot);
4614       } else {
4615         // Store the RValue into the argument struct.
4616         Address Addr =
4617             Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
4618         unsigned AS = Addr.getType()->getPointerAddressSpace();
4619         llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
4620         // There are some cases where a trivial bitcast is not avoidable.  The
4621         // definition of a type later in a translation unit may change it's type
4622         // from {}* to (%struct.foo*)*.
4623         if (Addr.getType() != MemType)
4624           Addr = Builder.CreateBitCast(Addr, MemType);
4625         I->copyInto(*this, Addr);
4626       }
4627       break;
4628     }
4629 
4630     case ABIArgInfo::Indirect:
4631     case ABIArgInfo::IndirectAliased: {
4632       assert(NumIRArgs == 1);
4633       if (!I->isAggregate()) {
4634         // Make a temporary alloca to pass the argument.
4635         Address Addr = CreateMemTempWithoutCast(
4636             I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp");
4637         IRCallArgs[FirstIRArg] = Addr.getPointer();
4638 
4639         I->copyInto(*this, Addr);
4640       } else {
4641         // We want to avoid creating an unnecessary temporary+copy here;
4642         // however, we need one in three cases:
4643         // 1. If the argument is not byval, and we are required to copy the
4644         //    source.  (This case doesn't occur on any common architecture.)
4645         // 2. If the argument is byval, RV is not sufficiently aligned, and
4646         //    we cannot force it to be sufficiently aligned.
4647         // 3. If the argument is byval, but RV is not located in default
4648         //    or alloca address space.
4649         Address Addr = I->hasLValue()
4650                            ? I->getKnownLValue().getAddress(*this)
4651                            : I->getKnownRValue().getAggregateAddress();
4652         llvm::Value *V = Addr.getPointer();
4653         CharUnits Align = ArgInfo.getIndirectAlign();
4654         const llvm::DataLayout *TD = &CGM.getDataLayout();
4655 
4656         assert((FirstIRArg >= IRFuncTy->getNumParams() ||
4657                 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==
4658                     TD->getAllocaAddrSpace()) &&
4659                "indirect argument must be in alloca address space");
4660 
4661         bool NeedCopy = false;
4662 
4663         if (Addr.getAlignment() < Align &&
4664             llvm::getOrEnforceKnownAlignment(V, Align.getAsAlign(), *TD) <
4665                 Align.getAsAlign()) {
4666           NeedCopy = true;
4667         } else if (I->hasLValue()) {
4668           auto LV = I->getKnownLValue();
4669           auto AS = LV.getAddressSpace();
4670 
4671           if (!ArgInfo.getIndirectByVal() ||
4672               (LV.getAlignment() < getContext().getTypeAlignInChars(I->Ty))) {
4673             NeedCopy = true;
4674           }
4675           if (!getLangOpts().OpenCL) {
4676             if ((ArgInfo.getIndirectByVal() &&
4677                 (AS != LangAS::Default &&
4678                  AS != CGM.getASTAllocaAddressSpace()))) {
4679               NeedCopy = true;
4680             }
4681           }
4682           // For OpenCL even if RV is located in default or alloca address space
4683           // we don't want to perform address space cast for it.
4684           else if ((ArgInfo.getIndirectByVal() &&
4685                     Addr.getType()->getAddressSpace() != IRFuncTy->
4686                       getParamType(FirstIRArg)->getPointerAddressSpace())) {
4687             NeedCopy = true;
4688           }
4689         }
4690 
4691         if (NeedCopy) {
4692           // Create an aligned temporary, and copy to it.
4693           Address AI = CreateMemTempWithoutCast(
4694               I->Ty, ArgInfo.getIndirectAlign(), "byval-temp");
4695           IRCallArgs[FirstIRArg] = AI.getPointer();
4696 
4697           // Emit lifetime markers for the temporary alloca.
4698           uint64_t ByvalTempElementSize =
4699               CGM.getDataLayout().getTypeAllocSize(AI.getElementType());
4700           llvm::Value *LifetimeSize =
4701               EmitLifetimeStart(ByvalTempElementSize, AI.getPointer());
4702 
4703           // Add cleanup code to emit the end lifetime marker after the call.
4704           if (LifetimeSize) // In case we disabled lifetime markers.
4705             CallLifetimeEndAfterCall.emplace_back(AI, LifetimeSize);
4706 
4707           // Generate the copy.
4708           I->copyInto(*this, AI);
4709         } else {
4710           // Skip the extra memcpy call.
4711           auto *T = V->getType()->getPointerElementType()->getPointerTo(
4712               CGM.getDataLayout().getAllocaAddrSpace());
4713           IRCallArgs[FirstIRArg] = getTargetHooks().performAddrSpaceCast(
4714               *this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T,
4715               true);
4716         }
4717       }
4718       break;
4719     }
4720 
4721     case ABIArgInfo::Ignore:
4722       assert(NumIRArgs == 0);
4723       break;
4724 
4725     case ABIArgInfo::Extend:
4726     case ABIArgInfo::Direct: {
4727       if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
4728           ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
4729           ArgInfo.getDirectOffset() == 0) {
4730         assert(NumIRArgs == 1);
4731         llvm::Value *V;
4732         if (!I->isAggregate())
4733           V = I->getKnownRValue().getScalarVal();
4734         else
4735           V = Builder.CreateLoad(
4736               I->hasLValue() ? I->getKnownLValue().getAddress(*this)
4737                              : I->getKnownRValue().getAggregateAddress());
4738 
4739         // Implement swifterror by copying into a new swifterror argument.
4740         // We'll write back in the normal path out of the call.
4741         if (CallInfo.getExtParameterInfo(ArgNo).getABI()
4742               == ParameterABI::SwiftErrorResult) {
4743           assert(!swiftErrorTemp.isValid() && "multiple swifterror args");
4744 
4745           QualType pointeeTy = I->Ty->getPointeeType();
4746           swiftErrorArg =
4747             Address(V, getContext().getTypeAlignInChars(pointeeTy));
4748 
4749           swiftErrorTemp =
4750             CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
4751           V = swiftErrorTemp.getPointer();
4752           cast<llvm::AllocaInst>(V)->setSwiftError(true);
4753 
4754           llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg);
4755           Builder.CreateStore(errorValue, swiftErrorTemp);
4756         }
4757 
4758         // We might have to widen integers, but we should never truncate.
4759         if (ArgInfo.getCoerceToType() != V->getType() &&
4760             V->getType()->isIntegerTy())
4761           V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
4762 
4763         // If the argument doesn't match, perform a bitcast to coerce it.  This
4764         // can happen due to trivial type mismatches.
4765         if (FirstIRArg < IRFuncTy->getNumParams() &&
4766             V->getType() != IRFuncTy->getParamType(FirstIRArg))
4767           V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
4768 
4769         IRCallArgs[FirstIRArg] = V;
4770         break;
4771       }
4772 
4773       // FIXME: Avoid the conversion through memory if possible.
4774       Address Src = Address::invalid();
4775       if (!I->isAggregate()) {
4776         Src = CreateMemTemp(I->Ty, "coerce");
4777         I->copyInto(*this, Src);
4778       } else {
4779         Src = I->hasLValue() ? I->getKnownLValue().getAddress(*this)
4780                              : I->getKnownRValue().getAggregateAddress();
4781       }
4782 
4783       // If the value is offset in memory, apply the offset now.
4784       Src = emitAddressAtOffset(*this, Src, ArgInfo);
4785 
4786       // Fast-isel and the optimizer generally like scalar values better than
4787       // FCAs, so we flatten them if this is safe to do for this argument.
4788       llvm::StructType *STy =
4789             dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
4790       if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
4791         llvm::Type *SrcTy = Src.getElementType();
4792         uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
4793         uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
4794 
4795         // If the source type is smaller than the destination type of the
4796         // coerce-to logic, copy the source value into a temp alloca the size
4797         // of the destination type to allow loading all of it. The bits past
4798         // the source value are left undef.
4799         if (SrcSize < DstSize) {
4800           Address TempAlloca
4801             = CreateTempAlloca(STy, Src.getAlignment(),
4802                                Src.getName() + ".coerce");
4803           Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
4804           Src = TempAlloca;
4805         } else {
4806           Src = Builder.CreateBitCast(Src,
4807                                       STy->getPointerTo(Src.getAddressSpace()));
4808         }
4809 
4810         assert(NumIRArgs == STy->getNumElements());
4811         for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
4812           Address EltPtr = Builder.CreateStructGEP(Src, i);
4813           llvm::Value *LI = Builder.CreateLoad(EltPtr);
4814           IRCallArgs[FirstIRArg + i] = LI;
4815         }
4816       } else {
4817         // In the simple case, just pass the coerced loaded value.
4818         assert(NumIRArgs == 1);
4819         llvm::Value *Load =
4820             CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
4821 
4822         if (CallInfo.isCmseNSCall()) {
4823           // For certain parameter types, clear padding bits, as they may reveal
4824           // sensitive information.
4825           // Small struct/union types are passed as integer arrays.
4826           auto *ATy = dyn_cast<llvm::ArrayType>(Load->getType());
4827           if (ATy != nullptr && isa<RecordType>(I->Ty.getCanonicalType()))
4828             Load = EmitCMSEClearRecord(Load, ATy, I->Ty);
4829         }
4830         IRCallArgs[FirstIRArg] = Load;
4831       }
4832 
4833       break;
4834     }
4835 
4836     case ABIArgInfo::CoerceAndExpand: {
4837       auto coercionType = ArgInfo.getCoerceAndExpandType();
4838       auto layout = CGM.getDataLayout().getStructLayout(coercionType);
4839 
4840       llvm::Value *tempSize = nullptr;
4841       Address addr = Address::invalid();
4842       Address AllocaAddr = Address::invalid();
4843       if (I->isAggregate()) {
4844         addr = I->hasLValue() ? I->getKnownLValue().getAddress(*this)
4845                               : I->getKnownRValue().getAggregateAddress();
4846 
4847       } else {
4848         RValue RV = I->getKnownRValue();
4849         assert(RV.isScalar()); // complex should always just be direct
4850 
4851         llvm::Type *scalarType = RV.getScalarVal()->getType();
4852         auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType);
4853         auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType);
4854 
4855         // Materialize to a temporary.
4856         addr = CreateTempAlloca(
4857             RV.getScalarVal()->getType(),
4858             CharUnits::fromQuantity(std::max(
4859                 (unsigned)layout->getAlignment().value(), scalarAlign)),
4860             "tmp",
4861             /*ArraySize=*/nullptr, &AllocaAddr);
4862         tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer());
4863 
4864         Builder.CreateStore(RV.getScalarVal(), addr);
4865       }
4866 
4867       addr = Builder.CreateElementBitCast(addr, coercionType);
4868 
4869       unsigned IRArgPos = FirstIRArg;
4870       for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4871         llvm::Type *eltType = coercionType->getElementType(i);
4872         if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4873         Address eltAddr = Builder.CreateStructGEP(addr, i);
4874         llvm::Value *elt = Builder.CreateLoad(eltAddr);
4875         IRCallArgs[IRArgPos++] = elt;
4876       }
4877       assert(IRArgPos == FirstIRArg + NumIRArgs);
4878 
4879       if (tempSize) {
4880         EmitLifetimeEnd(tempSize, AllocaAddr.getPointer());
4881       }
4882 
4883       break;
4884     }
4885 
4886     case ABIArgInfo::Expand: {
4887       unsigned IRArgPos = FirstIRArg;
4888       ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
4889       assert(IRArgPos == FirstIRArg + NumIRArgs);
4890       break;
4891     }
4892     }
4893   }
4894 
4895   const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this);
4896   llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer();
4897 
4898   // If we're using inalloca, set up that argument.
4899   if (ArgMemory.isValid()) {
4900     llvm::Value *Arg = ArgMemory.getPointer();
4901     if (CallInfo.isVariadic()) {
4902       // When passing non-POD arguments by value to variadic functions, we will
4903       // end up with a variadic prototype and an inalloca call site.  In such
4904       // cases, we can't do any parameter mismatch checks.  Give up and bitcast
4905       // the callee.
4906       unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace();
4907       CalleePtr =
4908           Builder.CreateBitCast(CalleePtr, IRFuncTy->getPointerTo(CalleeAS));
4909     } else {
4910       llvm::Type *LastParamTy =
4911           IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
4912       if (Arg->getType() != LastParamTy) {
4913 #ifndef NDEBUG
4914         // Assert that these structs have equivalent element types.
4915         llvm::StructType *FullTy = CallInfo.getArgStruct();
4916         llvm::StructType *DeclaredTy = cast<llvm::StructType>(
4917             cast<llvm::PointerType>(LastParamTy)->getElementType());
4918         assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
4919         for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
4920                                                 DE = DeclaredTy->element_end(),
4921                                                 FI = FullTy->element_begin();
4922              DI != DE; ++DI, ++FI)
4923           assert(*DI == *FI);
4924 #endif
4925         Arg = Builder.CreateBitCast(Arg, LastParamTy);
4926       }
4927     }
4928     assert(IRFunctionArgs.hasInallocaArg());
4929     IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
4930   }
4931 
4932   // 2. Prepare the function pointer.
4933 
4934   // If the callee is a bitcast of a non-variadic function to have a
4935   // variadic function pointer type, check to see if we can remove the
4936   // bitcast.  This comes up with unprototyped functions.
4937   //
4938   // This makes the IR nicer, but more importantly it ensures that we
4939   // can inline the function at -O0 if it is marked always_inline.
4940   auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT,
4941                                    llvm::Value *Ptr) -> llvm::Function * {
4942     if (!CalleeFT->isVarArg())
4943       return nullptr;
4944 
4945     // Get underlying value if it's a bitcast
4946     if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr)) {
4947       if (CE->getOpcode() == llvm::Instruction::BitCast)
4948         Ptr = CE->getOperand(0);
4949     }
4950 
4951     llvm::Function *OrigFn = dyn_cast<llvm::Function>(Ptr);
4952     if (!OrigFn)
4953       return nullptr;
4954 
4955     llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
4956 
4957     // If the original type is variadic, or if any of the component types
4958     // disagree, we cannot remove the cast.
4959     if (OrigFT->isVarArg() ||
4960         OrigFT->getNumParams() != CalleeFT->getNumParams() ||
4961         OrigFT->getReturnType() != CalleeFT->getReturnType())
4962       return nullptr;
4963 
4964     for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
4965       if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
4966         return nullptr;
4967 
4968     return OrigFn;
4969   };
4970 
4971   if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) {
4972     CalleePtr = OrigFn;
4973     IRFuncTy = OrigFn->getFunctionType();
4974   }
4975 
4976   // 3. Perform the actual call.
4977 
4978   // Deactivate any cleanups that we're supposed to do immediately before
4979   // the call.
4980   if (!CallArgs.getCleanupsToDeactivate().empty())
4981     deactivateArgCleanupsBeforeCall(*this, CallArgs);
4982 
4983   // Assert that the arguments we computed match up.  The IR verifier
4984   // will catch this, but this is a common enough source of problems
4985   // during IRGen changes that it's way better for debugging to catch
4986   // it ourselves here.
4987 #ifndef NDEBUG
4988   assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
4989   for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
4990     // Inalloca argument can have different type.
4991     if (IRFunctionArgs.hasInallocaArg() &&
4992         i == IRFunctionArgs.getInallocaArgNo())
4993       continue;
4994     if (i < IRFuncTy->getNumParams())
4995       assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
4996   }
4997 #endif
4998 
4999   // Update the largest vector width if any arguments have vector types.
5000   for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
5001     if (auto *VT = dyn_cast<llvm::VectorType>(IRCallArgs[i]->getType()))
5002       LargestVectorWidth =
5003           std::max((uint64_t)LargestVectorWidth,
5004                    VT->getPrimitiveSizeInBits().getKnownMinSize());
5005   }
5006 
5007   // Compute the calling convention and attributes.
5008   unsigned CallingConv;
5009   llvm::AttributeList Attrs;
5010   CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo,
5011                              Callee.getAbstractInfo(), Attrs, CallingConv,
5012                              /*AttrOnCallSite=*/true);
5013 
5014   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl))
5015     if (FD->hasAttr<StrictFPAttr>())
5016       // All calls within a strictfp function are marked strictfp
5017       Attrs =
5018         Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
5019                            llvm::Attribute::StrictFP);
5020 
5021   // Add nomerge attribute to the call-site if the callee function doesn't have
5022   // the attribute.
5023   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl))
5024     if (!FD->hasAttr<NoMergeAttr>() && InNoMergeAttributedStmt)
5025       Attrs = Attrs.addAttribute(getLLVMContext(),
5026                                  llvm::AttributeList::FunctionIndex,
5027                                  llvm::Attribute::NoMerge);
5028 
5029   // Apply some call-site-specific attributes.
5030   // TODO: work this into building the attribute set.
5031 
5032   // Apply always_inline to all calls within flatten functions.
5033   // FIXME: should this really take priority over __try, below?
5034   if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
5035       !(TargetDecl && TargetDecl->hasAttr<NoInlineAttr>())) {
5036     Attrs =
5037         Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
5038                            llvm::Attribute::AlwaysInline);
5039   }
5040 
5041   // Disable inlining inside SEH __try blocks.
5042   if (isSEHTryScope()) {
5043     Attrs =
5044         Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
5045                            llvm::Attribute::NoInline);
5046   }
5047 
5048   // Decide whether to use a call or an invoke.
5049   bool CannotThrow;
5050   if (currentFunctionUsesSEHTry()) {
5051     // SEH cares about asynchronous exceptions, so everything can "throw."
5052     CannotThrow = false;
5053   } else if (isCleanupPadScope() &&
5054              EHPersonality::get(*this).isMSVCXXPersonality()) {
5055     // The MSVC++ personality will implicitly terminate the program if an
5056     // exception is thrown during a cleanup outside of a try/catch.
5057     // We don't need to model anything in IR to get this behavior.
5058     CannotThrow = true;
5059   } else {
5060     // Otherwise, nounwind call sites will never throw.
5061     CannotThrow = Attrs.hasFnAttribute(llvm::Attribute::NoUnwind);
5062 
5063     if (auto *FPtr = dyn_cast<llvm::Function>(CalleePtr))
5064       if (FPtr->hasFnAttribute(llvm::Attribute::NoUnwind))
5065         CannotThrow = true;
5066   }
5067 
5068   // If we made a temporary, be sure to clean up after ourselves. Note that we
5069   // can't depend on being inside of an ExprWithCleanups, so we need to manually
5070   // pop this cleanup later on. Being eager about this is OK, since this
5071   // temporary is 'invisible' outside of the callee.
5072   if (UnusedReturnSizePtr)
5073     pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetAlloca,
5074                                          UnusedReturnSizePtr);
5075 
5076   llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
5077 
5078   SmallVector<llvm::OperandBundleDef, 1> BundleList =
5079       getBundlesForFunclet(CalleePtr);
5080 
5081   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl))
5082     if (FD->hasAttr<StrictFPAttr>())
5083       // All calls within a strictfp function are marked strictfp
5084       Attrs =
5085         Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
5086                            llvm::Attribute::StrictFP);
5087 
5088   AssumeAlignedAttrEmitter AssumeAlignedAttrEmitter(*this, TargetDecl);
5089   Attrs = AssumeAlignedAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
5090 
5091   AllocAlignAttrEmitter AllocAlignAttrEmitter(*this, TargetDecl, CallArgs);
5092   Attrs = AllocAlignAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
5093 
5094   // Emit the actual call/invoke instruction.
5095   llvm::CallBase *CI;
5096   if (!InvokeDest) {
5097     CI = Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList);
5098   } else {
5099     llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
5100     CI = Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs,
5101                               BundleList);
5102     EmitBlock(Cont);
5103   }
5104   if (callOrInvoke)
5105     *callOrInvoke = CI;
5106 
5107   // If this is within a function that has the guard(nocf) attribute and is an
5108   // indirect call, add the "guard_nocf" attribute to this call to indicate that
5109   // Control Flow Guard checks should not be added, even if the call is inlined.
5110   if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
5111     if (const auto *A = FD->getAttr<CFGuardAttr>()) {
5112       if (A->getGuard() == CFGuardAttr::GuardArg::nocf && !CI->getCalledFunction())
5113         Attrs = Attrs.addAttribute(
5114             getLLVMContext(), llvm::AttributeList::FunctionIndex, "guard_nocf");
5115     }
5116   }
5117 
5118   // Apply the attributes and calling convention.
5119   CI->setAttributes(Attrs);
5120   CI->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
5121 
5122   // Apply various metadata.
5123 
5124   if (!CI->getType()->isVoidTy())
5125     CI->setName("call");
5126 
5127   // Update largest vector width from the return type.
5128   if (auto *VT = dyn_cast<llvm::VectorType>(CI->getType()))
5129     LargestVectorWidth =
5130         std::max((uint64_t)LargestVectorWidth,
5131                  VT->getPrimitiveSizeInBits().getKnownMinSize());
5132 
5133   // Insert instrumentation or attach profile metadata at indirect call sites.
5134   // For more details, see the comment before the definition of
5135   // IPVK_IndirectCallTarget in InstrProfData.inc.
5136   if (!CI->getCalledFunction())
5137     PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget,
5138                      CI, CalleePtr);
5139 
5140   // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
5141   // optimizer it can aggressively ignore unwind edges.
5142   if (CGM.getLangOpts().ObjCAutoRefCount)
5143     AddObjCARCExceptionMetadata(CI);
5144 
5145   // Suppress tail calls if requested.
5146   if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
5147     if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
5148       Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
5149   }
5150 
5151   // Add metadata for calls to MSAllocator functions
5152   if (getDebugInfo() && TargetDecl &&
5153       TargetDecl->hasAttr<MSAllocatorAttr>())
5154     getDebugInfo()->addHeapAllocSiteMetadata(CI, RetTy->getPointeeType(), Loc);
5155 
5156   // 4. Finish the call.
5157 
5158   // If the call doesn't return, finish the basic block and clear the
5159   // insertion point; this allows the rest of IRGen to discard
5160   // unreachable code.
5161   if (CI->doesNotReturn()) {
5162     if (UnusedReturnSizePtr)
5163       PopCleanupBlock();
5164 
5165     // Strip away the noreturn attribute to better diagnose unreachable UB.
5166     if (SanOpts.has(SanitizerKind::Unreachable)) {
5167       // Also remove from function since CallBase::hasFnAttr additionally checks
5168       // attributes of the called function.
5169       if (auto *F = CI->getCalledFunction())
5170         F->removeFnAttr(llvm::Attribute::NoReturn);
5171       CI->removeAttribute(llvm::AttributeList::FunctionIndex,
5172                           llvm::Attribute::NoReturn);
5173 
5174       // Avoid incompatibility with ASan which relies on the `noreturn`
5175       // attribute to insert handler calls.
5176       if (SanOpts.hasOneOf(SanitizerKind::Address |
5177                            SanitizerKind::KernelAddress)) {
5178         SanitizerScope SanScope(this);
5179         llvm::IRBuilder<>::InsertPointGuard IPGuard(Builder);
5180         Builder.SetInsertPoint(CI);
5181         auto *FnType = llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
5182         llvm::FunctionCallee Fn =
5183             CGM.CreateRuntimeFunction(FnType, "__asan_handle_no_return");
5184         EmitNounwindRuntimeCall(Fn);
5185       }
5186     }
5187 
5188     EmitUnreachable(Loc);
5189     Builder.ClearInsertionPoint();
5190 
5191     // FIXME: For now, emit a dummy basic block because expr emitters in
5192     // generally are not ready to handle emitting expressions at unreachable
5193     // points.
5194     EnsureInsertPoint();
5195 
5196     // Return a reasonable RValue.
5197     return GetUndefRValue(RetTy);
5198   }
5199 
5200   // Perform the swifterror writeback.
5201   if (swiftErrorTemp.isValid()) {
5202     llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp);
5203     Builder.CreateStore(errorResult, swiftErrorArg);
5204   }
5205 
5206   // Emit any call-associated writebacks immediately.  Arguably this
5207   // should happen after any return-value munging.
5208   if (CallArgs.hasWritebacks())
5209     emitWritebacks(*this, CallArgs);
5210 
5211   // The stack cleanup for inalloca arguments has to run out of the normal
5212   // lexical order, so deactivate it and run it manually here.
5213   CallArgs.freeArgumentMemory(*this);
5214 
5215   // Extract the return value.
5216   RValue Ret = [&] {
5217     switch (RetAI.getKind()) {
5218     case ABIArgInfo::CoerceAndExpand: {
5219       auto coercionType = RetAI.getCoerceAndExpandType();
5220 
5221       Address addr = SRetPtr;
5222       addr = Builder.CreateElementBitCast(addr, coercionType);
5223 
5224       assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType());
5225       bool requiresExtract = isa<llvm::StructType>(CI->getType());
5226 
5227       unsigned unpaddedIndex = 0;
5228       for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
5229         llvm::Type *eltType = coercionType->getElementType(i);
5230         if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
5231         Address eltAddr = Builder.CreateStructGEP(addr, i);
5232         llvm::Value *elt = CI;
5233         if (requiresExtract)
5234           elt = Builder.CreateExtractValue(elt, unpaddedIndex++);
5235         else
5236           assert(unpaddedIndex == 0);
5237         Builder.CreateStore(elt, eltAddr);
5238       }
5239       // FALLTHROUGH
5240       LLVM_FALLTHROUGH;
5241     }
5242 
5243     case ABIArgInfo::InAlloca:
5244     case ABIArgInfo::Indirect: {
5245       RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
5246       if (UnusedReturnSizePtr)
5247         PopCleanupBlock();
5248       return ret;
5249     }
5250 
5251     case ABIArgInfo::Ignore:
5252       // If we are ignoring an argument that had a result, make sure to
5253       // construct the appropriate return value for our caller.
5254       return GetUndefRValue(RetTy);
5255 
5256     case ABIArgInfo::Extend:
5257     case ABIArgInfo::Direct: {
5258       llvm::Type *RetIRTy = ConvertType(RetTy);
5259       if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
5260         switch (getEvaluationKind(RetTy)) {
5261         case TEK_Complex: {
5262           llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
5263           llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
5264           return RValue::getComplex(std::make_pair(Real, Imag));
5265         }
5266         case TEK_Aggregate: {
5267           Address DestPtr = ReturnValue.getValue();
5268           bool DestIsVolatile = ReturnValue.isVolatile();
5269 
5270           if (!DestPtr.isValid()) {
5271             DestPtr = CreateMemTemp(RetTy, "agg.tmp");
5272             DestIsVolatile = false;
5273           }
5274           EmitAggregateStore(CI, DestPtr, DestIsVolatile);
5275           return RValue::getAggregate(DestPtr);
5276         }
5277         case TEK_Scalar: {
5278           // If the argument doesn't match, perform a bitcast to coerce it.  This
5279           // can happen due to trivial type mismatches.
5280           llvm::Value *V = CI;
5281           if (V->getType() != RetIRTy)
5282             V = Builder.CreateBitCast(V, RetIRTy);
5283           return RValue::get(V);
5284         }
5285         }
5286         llvm_unreachable("bad evaluation kind");
5287       }
5288 
5289       Address DestPtr = ReturnValue.getValue();
5290       bool DestIsVolatile = ReturnValue.isVolatile();
5291 
5292       if (!DestPtr.isValid()) {
5293         DestPtr = CreateMemTemp(RetTy, "coerce");
5294         DestIsVolatile = false;
5295       }
5296 
5297       // If the value is offset in memory, apply the offset now.
5298       Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
5299       CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
5300 
5301       return convertTempToRValue(DestPtr, RetTy, SourceLocation());
5302     }
5303 
5304     case ABIArgInfo::Expand:
5305     case ABIArgInfo::IndirectAliased:
5306       llvm_unreachable("Invalid ABI kind for return argument");
5307     }
5308 
5309     llvm_unreachable("Unhandled ABIArgInfo::Kind");
5310   } ();
5311 
5312   // Emit the assume_aligned check on the return value.
5313   if (Ret.isScalar() && TargetDecl) {
5314     AssumeAlignedAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret);
5315     AllocAlignAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret);
5316   }
5317 
5318   // Explicitly call CallLifetimeEnd::Emit just to re-use the code even though
5319   // we can't use the full cleanup mechanism.
5320   for (CallLifetimeEnd &LifetimeEnd : CallLifetimeEndAfterCall)
5321     LifetimeEnd.Emit(*this, /*Flags=*/{});
5322 
5323   if (!ReturnValue.isExternallyDestructed() &&
5324       RetTy.isDestructedType() == QualType::DK_nontrivial_c_struct)
5325     pushDestroy(QualType::DK_nontrivial_c_struct, Ret.getAggregateAddress(),
5326                 RetTy);
5327 
5328   return Ret;
5329 }
5330 
5331 CGCallee CGCallee::prepareConcreteCallee(CodeGenFunction &CGF) const {
5332   if (isVirtual()) {
5333     const CallExpr *CE = getVirtualCallExpr();
5334     return CGF.CGM.getCXXABI().getVirtualFunctionPointer(
5335         CGF, getVirtualMethodDecl(), getThisAddress(), getVirtualFunctionType(),
5336         CE ? CE->getBeginLoc() : SourceLocation());
5337   }
5338 
5339   return *this;
5340 }
5341 
5342 /* VarArg handling */
5343 
5344 Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) {
5345   VAListAddr = VE->isMicrosoftABI()
5346                  ? EmitMSVAListRef(VE->getSubExpr())
5347                  : EmitVAListRef(VE->getSubExpr());
5348   QualType Ty = VE->getType();
5349   if (VE->isMicrosoftABI())
5350     return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty);
5351   return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty);
5352 }
5353