1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // These classes wrap the information about a call or function
10 // definition used to handle ABI compliancy.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CGCall.h"
15 #include "ABIInfo.h"
16 #include "CGBlocks.h"
17 #include "CGCXXABI.h"
18 #include "CGCleanup.h"
19 #include "CGRecordLayout.h"
20 #include "CodeGenFunction.h"
21 #include "CodeGenModule.h"
22 #include "TargetInfo.h"
23 #include "clang/AST/Attr.h"
24 #include "clang/AST/Decl.h"
25 #include "clang/AST/DeclCXX.h"
26 #include "clang/AST/DeclObjC.h"
27 #include "clang/Basic/CodeGenOptions.h"
28 #include "clang/Basic/TargetBuiltins.h"
29 #include "clang/Basic/TargetInfo.h"
30 #include "clang/CodeGen/CGFunctionInfo.h"
31 #include "clang/CodeGen/SwiftCallingConv.h"
32 #include "llvm/ADT/StringExtras.h"
33 #include "llvm/Analysis/ValueTracking.h"
34 #include "llvm/IR/Attributes.h"
35 #include "llvm/IR/CallingConv.h"
36 #include "llvm/IR/DataLayout.h"
37 #include "llvm/IR/InlineAsm.h"
38 #include "llvm/IR/IntrinsicInst.h"
39 #include "llvm/IR/Intrinsics.h"
40 #include "llvm/Transforms/Utils/Local.h"
41 using namespace clang;
42 using namespace CodeGen;
43 
44 /***/
45 
46 unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
47   switch (CC) {
48   default: return llvm::CallingConv::C;
49   case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
50   case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
51   case CC_X86RegCall: return llvm::CallingConv::X86_RegCall;
52   case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
53   case CC_Win64: return llvm::CallingConv::Win64;
54   case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
55   case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
56   case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
57   case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
58   // TODO: Add support for __pascal to LLVM.
59   case CC_X86Pascal: return llvm::CallingConv::C;
60   // TODO: Add support for __vectorcall to LLVM.
61   case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
62   case CC_AArch64VectorCall: return llvm::CallingConv::AArch64_VectorCall;
63   case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
64   case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv();
65   case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
66   case CC_PreserveAll: return llvm::CallingConv::PreserveAll;
67   case CC_Swift: return llvm::CallingConv::Swift;
68   }
69 }
70 
71 /// Derives the 'this' type for codegen purposes, i.e. ignoring method CVR
72 /// qualification. Either or both of RD and MD may be null. A null RD indicates
73 /// that there is no meaningful 'this' type, and a null MD can occur when
74 /// calling a method pointer.
75 CanQualType CodeGenTypes::DeriveThisType(const CXXRecordDecl *RD,
76                                          const CXXMethodDecl *MD) {
77   QualType RecTy;
78   if (RD)
79     RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
80   else
81     RecTy = Context.VoidTy;
82 
83   if (MD)
84     RecTy = Context.getAddrSpaceQualType(RecTy, MD->getMethodQualifiers().getAddressSpace());
85   return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
86 }
87 
88 /// Returns the canonical formal type of the given C++ method.
89 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
90   return MD->getType()->getCanonicalTypeUnqualified()
91            .getAs<FunctionProtoType>();
92 }
93 
94 /// Returns the "extra-canonicalized" return type, which discards
95 /// qualifiers on the return type.  Codegen doesn't care about them,
96 /// and it makes ABI code a little easier to be able to assume that
97 /// all parameter and return types are top-level unqualified.
98 static CanQualType GetReturnType(QualType RetTy) {
99   return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
100 }
101 
102 /// Arrange the argument and result information for a value of the given
103 /// unprototyped freestanding function type.
104 const CGFunctionInfo &
105 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
106   // When translating an unprototyped function type, always use a
107   // variadic type.
108   return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
109                                  /*instanceMethod=*/false,
110                                  /*chainCall=*/false, None,
111                                  FTNP->getExtInfo(), {}, RequiredArgs(0));
112 }
113 
114 static void addExtParameterInfosForCall(
115          llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
116                                         const FunctionProtoType *proto,
117                                         unsigned prefixArgs,
118                                         unsigned totalArgs) {
119   assert(proto->hasExtParameterInfos());
120   assert(paramInfos.size() <= prefixArgs);
121   assert(proto->getNumParams() + prefixArgs <= totalArgs);
122 
123   paramInfos.reserve(totalArgs);
124 
125   // Add default infos for any prefix args that don't already have infos.
126   paramInfos.resize(prefixArgs);
127 
128   // Add infos for the prototype.
129   for (const auto &ParamInfo : proto->getExtParameterInfos()) {
130     paramInfos.push_back(ParamInfo);
131     // pass_object_size params have no parameter info.
132     if (ParamInfo.hasPassObjectSize())
133       paramInfos.emplace_back();
134   }
135 
136   assert(paramInfos.size() <= totalArgs &&
137          "Did we forget to insert pass_object_size args?");
138   // Add default infos for the variadic and/or suffix arguments.
139   paramInfos.resize(totalArgs);
140 }
141 
142 /// Adds the formal parameters in FPT to the given prefix. If any parameter in
143 /// FPT has pass_object_size attrs, then we'll add parameters for those, too.
144 static void appendParameterTypes(const CodeGenTypes &CGT,
145                                  SmallVectorImpl<CanQualType> &prefix,
146               SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
147                                  CanQual<FunctionProtoType> FPT) {
148   // Fast path: don't touch param info if we don't need to.
149   if (!FPT->hasExtParameterInfos()) {
150     assert(paramInfos.empty() &&
151            "We have paramInfos, but the prototype doesn't?");
152     prefix.append(FPT->param_type_begin(), FPT->param_type_end());
153     return;
154   }
155 
156   unsigned PrefixSize = prefix.size();
157   // In the vast majority of cases, we'll have precisely FPT->getNumParams()
158   // parameters; the only thing that can change this is the presence of
159   // pass_object_size. So, we preallocate for the common case.
160   prefix.reserve(prefix.size() + FPT->getNumParams());
161 
162   auto ExtInfos = FPT->getExtParameterInfos();
163   assert(ExtInfos.size() == FPT->getNumParams());
164   for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
165     prefix.push_back(FPT->getParamType(I));
166     if (ExtInfos[I].hasPassObjectSize())
167       prefix.push_back(CGT.getContext().getSizeType());
168   }
169 
170   addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize,
171                               prefix.size());
172 }
173 
174 /// Arrange the LLVM function layout for a value of the given function
175 /// type, on top of any implicit parameters already stored.
176 static const CGFunctionInfo &
177 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
178                         SmallVectorImpl<CanQualType> &prefix,
179                         CanQual<FunctionProtoType> FTP) {
180   SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
181   RequiredArgs Required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
182   // FIXME: Kill copy.
183   appendParameterTypes(CGT, prefix, paramInfos, FTP);
184   CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
185 
186   return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
187                                      /*chainCall=*/false, prefix,
188                                      FTP->getExtInfo(), paramInfos,
189                                      Required);
190 }
191 
192 /// Arrange the argument and result information for a value of the
193 /// given freestanding function type.
194 const CGFunctionInfo &
195 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) {
196   SmallVector<CanQualType, 16> argTypes;
197   return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
198                                    FTP);
199 }
200 
201 static CallingConv getCallingConventionForDecl(const ObjCMethodDecl *D,
202                                                bool IsWindows) {
203   // Set the appropriate calling convention for the Function.
204   if (D->hasAttr<StdCallAttr>())
205     return CC_X86StdCall;
206 
207   if (D->hasAttr<FastCallAttr>())
208     return CC_X86FastCall;
209 
210   if (D->hasAttr<RegCallAttr>())
211     return CC_X86RegCall;
212 
213   if (D->hasAttr<ThisCallAttr>())
214     return CC_X86ThisCall;
215 
216   if (D->hasAttr<VectorCallAttr>())
217     return CC_X86VectorCall;
218 
219   if (D->hasAttr<PascalAttr>())
220     return CC_X86Pascal;
221 
222   if (PcsAttr *PCS = D->getAttr<PcsAttr>())
223     return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
224 
225   if (D->hasAttr<AArch64VectorPcsAttr>())
226     return CC_AArch64VectorCall;
227 
228   if (D->hasAttr<IntelOclBiccAttr>())
229     return CC_IntelOclBicc;
230 
231   if (D->hasAttr<MSABIAttr>())
232     return IsWindows ? CC_C : CC_Win64;
233 
234   if (D->hasAttr<SysVABIAttr>())
235     return IsWindows ? CC_X86_64SysV : CC_C;
236 
237   if (D->hasAttr<PreserveMostAttr>())
238     return CC_PreserveMost;
239 
240   if (D->hasAttr<PreserveAllAttr>())
241     return CC_PreserveAll;
242 
243   return CC_C;
244 }
245 
246 /// Arrange the argument and result information for a call to an
247 /// unknown C++ non-static member function of the given abstract type.
248 /// (A null RD means we don't have any meaningful "this" argument type,
249 ///  so fall back to a generic pointer type).
250 /// The member function must be an ordinary function, i.e. not a
251 /// constructor or destructor.
252 const CGFunctionInfo &
253 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
254                                    const FunctionProtoType *FTP,
255                                    const CXXMethodDecl *MD) {
256   SmallVector<CanQualType, 16> argTypes;
257 
258   // Add the 'this' pointer.
259   argTypes.push_back(DeriveThisType(RD, MD));
260 
261   return ::arrangeLLVMFunctionInfo(
262       *this, true, argTypes,
263       FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
264 }
265 
266 /// Set calling convention for CUDA/HIP kernel.
267 static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM,
268                                            const FunctionDecl *FD) {
269   if (FD->hasAttr<CUDAGlobalAttr>()) {
270     const FunctionType *FT = FTy->getAs<FunctionType>();
271     CGM.getTargetCodeGenInfo().setCUDAKernelCallingConvention(FT);
272     FTy = FT->getCanonicalTypeUnqualified();
273   }
274 }
275 
276 /// Arrange the argument and result information for a declaration or
277 /// definition of the given C++ non-static member function.  The
278 /// member function must be an ordinary function, i.e. not a
279 /// constructor or destructor.
280 const CGFunctionInfo &
281 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
282   assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
283   assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
284 
285   CanQualType FT = GetFormalType(MD).getAs<Type>();
286   setCUDAKernelCallingConvention(FT, CGM, MD);
287   auto prototype = FT.getAs<FunctionProtoType>();
288 
289   if (MD->isInstance()) {
290     // The abstract case is perfectly fine.
291     const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
292     return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
293   }
294 
295   return arrangeFreeFunctionType(prototype);
296 }
297 
298 bool CodeGenTypes::inheritingCtorHasParams(
299     const InheritedConstructor &Inherited, CXXCtorType Type) {
300   // Parameters are unnecessary if we're constructing a base class subobject
301   // and the inherited constructor lives in a virtual base.
302   return Type == Ctor_Complete ||
303          !Inherited.getShadowDecl()->constructsVirtualBase() ||
304          !Target.getCXXABI().hasConstructorVariants();
305 }
306 
307 const CGFunctionInfo &
308 CodeGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) {
309   auto *MD = cast<CXXMethodDecl>(GD.getDecl());
310 
311   SmallVector<CanQualType, 16> argTypes;
312   SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
313   argTypes.push_back(DeriveThisType(MD->getParent(), MD));
314 
315   bool PassParams = true;
316 
317   if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
318     // A base class inheriting constructor doesn't get forwarded arguments
319     // needed to construct a virtual base (or base class thereof).
320     if (auto Inherited = CD->getInheritedConstructor())
321       PassParams = inheritingCtorHasParams(Inherited, GD.getCtorType());
322   }
323 
324   CanQual<FunctionProtoType> FTP = GetFormalType(MD);
325 
326   // Add the formal parameters.
327   if (PassParams)
328     appendParameterTypes(*this, argTypes, paramInfos, FTP);
329 
330   CGCXXABI::AddedStructorArgCounts AddedArgs =
331       TheCXXABI.buildStructorSignature(GD, argTypes);
332   if (!paramInfos.empty()) {
333     // Note: prefix implies after the first param.
334     if (AddedArgs.Prefix)
335       paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix,
336                         FunctionProtoType::ExtParameterInfo{});
337     if (AddedArgs.Suffix)
338       paramInfos.append(AddedArgs.Suffix,
339                         FunctionProtoType::ExtParameterInfo{});
340   }
341 
342   RequiredArgs required =
343       (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size())
344                                       : RequiredArgs::All);
345 
346   FunctionType::ExtInfo extInfo = FTP->getExtInfo();
347   CanQualType resultType = TheCXXABI.HasThisReturn(GD)
348                                ? argTypes.front()
349                                : TheCXXABI.hasMostDerivedReturn(GD)
350                                      ? CGM.getContext().VoidPtrTy
351                                      : Context.VoidTy;
352   return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
353                                  /*chainCall=*/false, argTypes, extInfo,
354                                  paramInfos, required);
355 }
356 
357 static SmallVector<CanQualType, 16>
358 getArgTypesForCall(ASTContext &ctx, const CallArgList &args) {
359   SmallVector<CanQualType, 16> argTypes;
360   for (auto &arg : args)
361     argTypes.push_back(ctx.getCanonicalParamType(arg.Ty));
362   return argTypes;
363 }
364 
365 static SmallVector<CanQualType, 16>
366 getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) {
367   SmallVector<CanQualType, 16> argTypes;
368   for (auto &arg : args)
369     argTypes.push_back(ctx.getCanonicalParamType(arg->getType()));
370   return argTypes;
371 }
372 
373 static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16>
374 getExtParameterInfosForCall(const FunctionProtoType *proto,
375                             unsigned prefixArgs, unsigned totalArgs) {
376   llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result;
377   if (proto->hasExtParameterInfos()) {
378     addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs);
379   }
380   return result;
381 }
382 
383 /// Arrange a call to a C++ method, passing the given arguments.
384 ///
385 /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this`
386 /// parameter.
387 /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of
388 /// args.
389 /// PassProtoArgs indicates whether `args` has args for the parameters in the
390 /// given CXXConstructorDecl.
391 const CGFunctionInfo &
392 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
393                                         const CXXConstructorDecl *D,
394                                         CXXCtorType CtorKind,
395                                         unsigned ExtraPrefixArgs,
396                                         unsigned ExtraSuffixArgs,
397                                         bool PassProtoArgs) {
398   // FIXME: Kill copy.
399   SmallVector<CanQualType, 16> ArgTypes;
400   for (const auto &Arg : args)
401     ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
402 
403   // +1 for implicit this, which should always be args[0].
404   unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
405 
406   CanQual<FunctionProtoType> FPT = GetFormalType(D);
407   RequiredArgs Required = PassProtoArgs
408                               ? RequiredArgs::forPrototypePlus(
409                                     FPT, TotalPrefixArgs + ExtraSuffixArgs)
410                               : RequiredArgs::All;
411 
412   GlobalDecl GD(D, CtorKind);
413   CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
414                                ? ArgTypes.front()
415                                : TheCXXABI.hasMostDerivedReturn(GD)
416                                      ? CGM.getContext().VoidPtrTy
417                                      : Context.VoidTy;
418 
419   FunctionType::ExtInfo Info = FPT->getExtInfo();
420   llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> ParamInfos;
421   // If the prototype args are elided, we should only have ABI-specific args,
422   // which never have param info.
423   if (PassProtoArgs && FPT->hasExtParameterInfos()) {
424     // ABI-specific suffix arguments are treated the same as variadic arguments.
425     addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs,
426                                 ArgTypes.size());
427   }
428   return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
429                                  /*chainCall=*/false, ArgTypes, Info,
430                                  ParamInfos, Required);
431 }
432 
433 /// Arrange the argument and result information for the declaration or
434 /// definition of the given function.
435 const CGFunctionInfo &
436 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
437   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
438     if (MD->isInstance())
439       return arrangeCXXMethodDeclaration(MD);
440 
441   CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
442 
443   assert(isa<FunctionType>(FTy));
444   setCUDAKernelCallingConvention(FTy, CGM, FD);
445 
446   // When declaring a function without a prototype, always use a
447   // non-variadic type.
448   if (CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>()) {
449     return arrangeLLVMFunctionInfo(
450         noProto->getReturnType(), /*instanceMethod=*/false,
451         /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All);
452   }
453 
454   return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>());
455 }
456 
457 /// Arrange the argument and result information for the declaration or
458 /// definition of an Objective-C method.
459 const CGFunctionInfo &
460 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
461   // It happens that this is the same as a call with no optional
462   // arguments, except also using the formal 'self' type.
463   return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
464 }
465 
466 /// Arrange the argument and result information for the function type
467 /// through which to perform a send to the given Objective-C method,
468 /// using the given receiver type.  The receiver type is not always
469 /// the 'self' type of the method or even an Objective-C pointer type.
470 /// This is *not* the right method for actually performing such a
471 /// message send, due to the possibility of optional arguments.
472 const CGFunctionInfo &
473 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
474                                               QualType receiverType) {
475   SmallVector<CanQualType, 16> argTys;
476   SmallVector<FunctionProtoType::ExtParameterInfo, 4> extParamInfos(2);
477   argTys.push_back(Context.getCanonicalParamType(receiverType));
478   argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
479   // FIXME: Kill copy?
480   for (const auto *I : MD->parameters()) {
481     argTys.push_back(Context.getCanonicalParamType(I->getType()));
482     auto extParamInfo = FunctionProtoType::ExtParameterInfo().withIsNoEscape(
483         I->hasAttr<NoEscapeAttr>());
484     extParamInfos.push_back(extParamInfo);
485   }
486 
487   FunctionType::ExtInfo einfo;
488   bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
489   einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
490 
491   if (getContext().getLangOpts().ObjCAutoRefCount &&
492       MD->hasAttr<NSReturnsRetainedAttr>())
493     einfo = einfo.withProducesResult(true);
494 
495   RequiredArgs required =
496     (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
497 
498   return arrangeLLVMFunctionInfo(
499       GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
500       /*chainCall=*/false, argTys, einfo, extParamInfos, required);
501 }
502 
503 const CGFunctionInfo &
504 CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType,
505                                                  const CallArgList &args) {
506   auto argTypes = getArgTypesForCall(Context, args);
507   FunctionType::ExtInfo einfo;
508 
509   return arrangeLLVMFunctionInfo(
510       GetReturnType(returnType), /*instanceMethod=*/false,
511       /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All);
512 }
513 
514 const CGFunctionInfo &
515 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
516   // FIXME: Do we need to handle ObjCMethodDecl?
517   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
518 
519   if (isa<CXXConstructorDecl>(GD.getDecl()) ||
520       isa<CXXDestructorDecl>(GD.getDecl()))
521     return arrangeCXXStructorDeclaration(GD);
522 
523   return arrangeFunctionDeclaration(FD);
524 }
525 
526 /// Arrange a thunk that takes 'this' as the first parameter followed by
527 /// varargs.  Return a void pointer, regardless of the actual return type.
528 /// The body of the thunk will end in a musttail call to a function of the
529 /// correct type, and the caller will bitcast the function to the correct
530 /// prototype.
531 const CGFunctionInfo &
532 CodeGenTypes::arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD) {
533   assert(MD->isVirtual() && "only methods have thunks");
534   CanQual<FunctionProtoType> FTP = GetFormalType(MD);
535   CanQualType ArgTys[] = {DeriveThisType(MD->getParent(), MD)};
536   return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
537                                  /*chainCall=*/false, ArgTys,
538                                  FTP->getExtInfo(), {}, RequiredArgs(1));
539 }
540 
541 const CGFunctionInfo &
542 CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD,
543                                    CXXCtorType CT) {
544   assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure);
545 
546   CanQual<FunctionProtoType> FTP = GetFormalType(CD);
547   SmallVector<CanQualType, 2> ArgTys;
548   const CXXRecordDecl *RD = CD->getParent();
549   ArgTys.push_back(DeriveThisType(RD, CD));
550   if (CT == Ctor_CopyingClosure)
551     ArgTys.push_back(*FTP->param_type_begin());
552   if (RD->getNumVBases() > 0)
553     ArgTys.push_back(Context.IntTy);
554   CallingConv CC = Context.getDefaultCallingConvention(
555       /*IsVariadic=*/false, /*IsCXXMethod=*/true);
556   return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
557                                  /*chainCall=*/false, ArgTys,
558                                  FunctionType::ExtInfo(CC), {},
559                                  RequiredArgs::All);
560 }
561 
562 /// Arrange a call as unto a free function, except possibly with an
563 /// additional number of formal parameters considered required.
564 static const CGFunctionInfo &
565 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
566                             CodeGenModule &CGM,
567                             const CallArgList &args,
568                             const FunctionType *fnType,
569                             unsigned numExtraRequiredArgs,
570                             bool chainCall) {
571   assert(args.size() >= numExtraRequiredArgs);
572 
573   llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
574 
575   // In most cases, there are no optional arguments.
576   RequiredArgs required = RequiredArgs::All;
577 
578   // If we have a variadic prototype, the required arguments are the
579   // extra prefix plus the arguments in the prototype.
580   if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
581     if (proto->isVariadic())
582       required = RequiredArgs::forPrototypePlus(proto, numExtraRequiredArgs);
583 
584     if (proto->hasExtParameterInfos())
585       addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs,
586                                   args.size());
587 
588   // If we don't have a prototype at all, but we're supposed to
589   // explicitly use the variadic convention for unprototyped calls,
590   // treat all of the arguments as required but preserve the nominal
591   // possibility of variadics.
592   } else if (CGM.getTargetCodeGenInfo()
593                 .isNoProtoCallVariadic(args,
594                                        cast<FunctionNoProtoType>(fnType))) {
595     required = RequiredArgs(args.size());
596   }
597 
598   // FIXME: Kill copy.
599   SmallVector<CanQualType, 16> argTypes;
600   for (const auto &arg : args)
601     argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
602   return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()),
603                                      /*instanceMethod=*/false, chainCall,
604                                      argTypes, fnType->getExtInfo(), paramInfos,
605                                      required);
606 }
607 
608 /// Figure out the rules for calling a function with the given formal
609 /// type using the given arguments.  The arguments are necessary
610 /// because the function might be unprototyped, in which case it's
611 /// target-dependent in crazy ways.
612 const CGFunctionInfo &
613 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
614                                       const FunctionType *fnType,
615                                       bool chainCall) {
616   return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
617                                      chainCall ? 1 : 0, chainCall);
618 }
619 
620 /// A block function is essentially a free function with an
621 /// extra implicit argument.
622 const CGFunctionInfo &
623 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
624                                        const FunctionType *fnType) {
625   return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
626                                      /*chainCall=*/false);
627 }
628 
629 const CGFunctionInfo &
630 CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto,
631                                               const FunctionArgList &params) {
632   auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size());
633   auto argTypes = getArgTypesForDeclaration(Context, params);
634 
635   return arrangeLLVMFunctionInfo(GetReturnType(proto->getReturnType()),
636                                  /*instanceMethod*/ false, /*chainCall*/ false,
637                                  argTypes, proto->getExtInfo(), paramInfos,
638                                  RequiredArgs::forPrototypePlus(proto, 1));
639 }
640 
641 const CGFunctionInfo &
642 CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType,
643                                          const CallArgList &args) {
644   // FIXME: Kill copy.
645   SmallVector<CanQualType, 16> argTypes;
646   for (const auto &Arg : args)
647     argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
648   return arrangeLLVMFunctionInfo(
649       GetReturnType(resultType), /*instanceMethod=*/false,
650       /*chainCall=*/false, argTypes, FunctionType::ExtInfo(),
651       /*paramInfos=*/ {}, RequiredArgs::All);
652 }
653 
654 const CGFunctionInfo &
655 CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType,
656                                                 const FunctionArgList &args) {
657   auto argTypes = getArgTypesForDeclaration(Context, args);
658 
659   return arrangeLLVMFunctionInfo(
660       GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false,
661       argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
662 }
663 
664 const CGFunctionInfo &
665 CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType,
666                                               ArrayRef<CanQualType> argTypes) {
667   return arrangeLLVMFunctionInfo(
668       resultType, /*instanceMethod=*/false, /*chainCall=*/false,
669       argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
670 }
671 
672 /// Arrange a call to a C++ method, passing the given arguments.
673 ///
674 /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It
675 /// does not count `this`.
676 const CGFunctionInfo &
677 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
678                                    const FunctionProtoType *proto,
679                                    RequiredArgs required,
680                                    unsigned numPrefixArgs) {
681   assert(numPrefixArgs + 1 <= args.size() &&
682          "Emitting a call with less args than the required prefix?");
683   // Add one to account for `this`. It's a bit awkward here, but we don't count
684   // `this` in similar places elsewhere.
685   auto paramInfos =
686     getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size());
687 
688   // FIXME: Kill copy.
689   auto argTypes = getArgTypesForCall(Context, args);
690 
691   FunctionType::ExtInfo info = proto->getExtInfo();
692   return arrangeLLVMFunctionInfo(
693       GetReturnType(proto->getReturnType()), /*instanceMethod=*/true,
694       /*chainCall=*/false, argTypes, info, paramInfos, required);
695 }
696 
697 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
698   return arrangeLLVMFunctionInfo(
699       getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
700       None, FunctionType::ExtInfo(), {}, RequiredArgs::All);
701 }
702 
703 const CGFunctionInfo &
704 CodeGenTypes::arrangeCall(const CGFunctionInfo &signature,
705                           const CallArgList &args) {
706   assert(signature.arg_size() <= args.size());
707   if (signature.arg_size() == args.size())
708     return signature;
709 
710   SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
711   auto sigParamInfos = signature.getExtParameterInfos();
712   if (!sigParamInfos.empty()) {
713     paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
714     paramInfos.resize(args.size());
715   }
716 
717   auto argTypes = getArgTypesForCall(Context, args);
718 
719   assert(signature.getRequiredArgs().allowsOptionalArgs());
720   return arrangeLLVMFunctionInfo(signature.getReturnType(),
721                                  signature.isInstanceMethod(),
722                                  signature.isChainCall(),
723                                  argTypes,
724                                  signature.getExtInfo(),
725                                  paramInfos,
726                                  signature.getRequiredArgs());
727 }
728 
729 namespace clang {
730 namespace CodeGen {
731 void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI);
732 }
733 }
734 
735 /// Arrange the argument and result information for an abstract value
736 /// of a given function type.  This is the method which all of the
737 /// above functions ultimately defer to.
738 const CGFunctionInfo &
739 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
740                                       bool instanceMethod,
741                                       bool chainCall,
742                                       ArrayRef<CanQualType> argTypes,
743                                       FunctionType::ExtInfo info,
744                      ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos,
745                                       RequiredArgs required) {
746   assert(llvm::all_of(argTypes,
747                       [](CanQualType T) { return T.isCanonicalAsParam(); }));
748 
749   // Lookup or create unique function info.
750   llvm::FoldingSetNodeID ID;
751   CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos,
752                           required, resultType, argTypes);
753 
754   void *insertPos = nullptr;
755   CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
756   if (FI)
757     return *FI;
758 
759   unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
760 
761   // Construct the function info.  We co-allocate the ArgInfos.
762   FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
763                               paramInfos, resultType, argTypes, required);
764   FunctionInfos.InsertNode(FI, insertPos);
765 
766   bool inserted = FunctionsBeingProcessed.insert(FI).second;
767   (void)inserted;
768   assert(inserted && "Recursively being processed?");
769 
770   // Compute ABI information.
771   if (CC == llvm::CallingConv::SPIR_KERNEL) {
772     // Force target independent argument handling for the host visible
773     // kernel functions.
774     computeSPIRKernelABIInfo(CGM, *FI);
775   } else if (info.getCC() == CC_Swift) {
776     swiftcall::computeABIInfo(CGM, *FI);
777   } else {
778     getABIInfo().computeInfo(*FI);
779   }
780 
781   // Loop over all of the computed argument and return value info.  If any of
782   // them are direct or extend without a specified coerce type, specify the
783   // default now.
784   ABIArgInfo &retInfo = FI->getReturnInfo();
785   if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
786     retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
787 
788   for (auto &I : FI->arguments())
789     if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
790       I.info.setCoerceToType(ConvertType(I.type));
791 
792   bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
793   assert(erased && "Not in set?");
794 
795   return *FI;
796 }
797 
798 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
799                                        bool instanceMethod,
800                                        bool chainCall,
801                                        const FunctionType::ExtInfo &info,
802                                        ArrayRef<ExtParameterInfo> paramInfos,
803                                        CanQualType resultType,
804                                        ArrayRef<CanQualType> argTypes,
805                                        RequiredArgs required) {
806   assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
807   assert(!required.allowsOptionalArgs() ||
808          required.getNumRequiredArgs() <= argTypes.size());
809 
810   void *buffer =
811     operator new(totalSizeToAlloc<ArgInfo,             ExtParameterInfo>(
812                                   argTypes.size() + 1, paramInfos.size()));
813 
814   CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
815   FI->CallingConvention = llvmCC;
816   FI->EffectiveCallingConvention = llvmCC;
817   FI->ASTCallingConvention = info.getCC();
818   FI->InstanceMethod = instanceMethod;
819   FI->ChainCall = chainCall;
820   FI->CmseNSCall = info.getCmseNSCall();
821   FI->NoReturn = info.getNoReturn();
822   FI->ReturnsRetained = info.getProducesResult();
823   FI->NoCallerSavedRegs = info.getNoCallerSavedRegs();
824   FI->NoCfCheck = info.getNoCfCheck();
825   FI->Required = required;
826   FI->HasRegParm = info.getHasRegParm();
827   FI->RegParm = info.getRegParm();
828   FI->ArgStruct = nullptr;
829   FI->ArgStructAlign = 0;
830   FI->NumArgs = argTypes.size();
831   FI->HasExtParameterInfos = !paramInfos.empty();
832   FI->getArgsBuffer()[0].type = resultType;
833   for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
834     FI->getArgsBuffer()[i + 1].type = argTypes[i];
835   for (unsigned i = 0, e = paramInfos.size(); i != e; ++i)
836     FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
837   return FI;
838 }
839 
840 /***/
841 
842 namespace {
843 // ABIArgInfo::Expand implementation.
844 
845 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
846 struct TypeExpansion {
847   enum TypeExpansionKind {
848     // Elements of constant arrays are expanded recursively.
849     TEK_ConstantArray,
850     // Record fields are expanded recursively (but if record is a union, only
851     // the field with the largest size is expanded).
852     TEK_Record,
853     // For complex types, real and imaginary parts are expanded recursively.
854     TEK_Complex,
855     // All other types are not expandable.
856     TEK_None
857   };
858 
859   const TypeExpansionKind Kind;
860 
861   TypeExpansion(TypeExpansionKind K) : Kind(K) {}
862   virtual ~TypeExpansion() {}
863 };
864 
865 struct ConstantArrayExpansion : TypeExpansion {
866   QualType EltTy;
867   uint64_t NumElts;
868 
869   ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
870       : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
871   static bool classof(const TypeExpansion *TE) {
872     return TE->Kind == TEK_ConstantArray;
873   }
874 };
875 
876 struct RecordExpansion : TypeExpansion {
877   SmallVector<const CXXBaseSpecifier *, 1> Bases;
878 
879   SmallVector<const FieldDecl *, 1> Fields;
880 
881   RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
882                   SmallVector<const FieldDecl *, 1> &&Fields)
883       : TypeExpansion(TEK_Record), Bases(std::move(Bases)),
884         Fields(std::move(Fields)) {}
885   static bool classof(const TypeExpansion *TE) {
886     return TE->Kind == TEK_Record;
887   }
888 };
889 
890 struct ComplexExpansion : TypeExpansion {
891   QualType EltTy;
892 
893   ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
894   static bool classof(const TypeExpansion *TE) {
895     return TE->Kind == TEK_Complex;
896   }
897 };
898 
899 struct NoExpansion : TypeExpansion {
900   NoExpansion() : TypeExpansion(TEK_None) {}
901   static bool classof(const TypeExpansion *TE) {
902     return TE->Kind == TEK_None;
903   }
904 };
905 }  // namespace
906 
907 static std::unique_ptr<TypeExpansion>
908 getTypeExpansion(QualType Ty, const ASTContext &Context) {
909   if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
910     return std::make_unique<ConstantArrayExpansion>(
911         AT->getElementType(), AT->getSize().getZExtValue());
912   }
913   if (const RecordType *RT = Ty->getAs<RecordType>()) {
914     SmallVector<const CXXBaseSpecifier *, 1> Bases;
915     SmallVector<const FieldDecl *, 1> Fields;
916     const RecordDecl *RD = RT->getDecl();
917     assert(!RD->hasFlexibleArrayMember() &&
918            "Cannot expand structure with flexible array.");
919     if (RD->isUnion()) {
920       // Unions can be here only in degenerative cases - all the fields are same
921       // after flattening. Thus we have to use the "largest" field.
922       const FieldDecl *LargestFD = nullptr;
923       CharUnits UnionSize = CharUnits::Zero();
924 
925       for (const auto *FD : RD->fields()) {
926         if (FD->isZeroLengthBitField(Context))
927           continue;
928         assert(!FD->isBitField() &&
929                "Cannot expand structure with bit-field members.");
930         CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
931         if (UnionSize < FieldSize) {
932           UnionSize = FieldSize;
933           LargestFD = FD;
934         }
935       }
936       if (LargestFD)
937         Fields.push_back(LargestFD);
938     } else {
939       if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
940         assert(!CXXRD->isDynamicClass() &&
941                "cannot expand vtable pointers in dynamic classes");
942         for (const CXXBaseSpecifier &BS : CXXRD->bases())
943           Bases.push_back(&BS);
944       }
945 
946       for (const auto *FD : RD->fields()) {
947         if (FD->isZeroLengthBitField(Context))
948           continue;
949         assert(!FD->isBitField() &&
950                "Cannot expand structure with bit-field members.");
951         Fields.push_back(FD);
952       }
953     }
954     return std::make_unique<RecordExpansion>(std::move(Bases),
955                                               std::move(Fields));
956   }
957   if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
958     return std::make_unique<ComplexExpansion>(CT->getElementType());
959   }
960   return std::make_unique<NoExpansion>();
961 }
962 
963 static int getExpansionSize(QualType Ty, const ASTContext &Context) {
964   auto Exp = getTypeExpansion(Ty, Context);
965   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
966     return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
967   }
968   if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
969     int Res = 0;
970     for (auto BS : RExp->Bases)
971       Res += getExpansionSize(BS->getType(), Context);
972     for (auto FD : RExp->Fields)
973       Res += getExpansionSize(FD->getType(), Context);
974     return Res;
975   }
976   if (isa<ComplexExpansion>(Exp.get()))
977     return 2;
978   assert(isa<NoExpansion>(Exp.get()));
979   return 1;
980 }
981 
982 void
983 CodeGenTypes::getExpandedTypes(QualType Ty,
984                                SmallVectorImpl<llvm::Type *>::iterator &TI) {
985   auto Exp = getTypeExpansion(Ty, Context);
986   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
987     for (int i = 0, n = CAExp->NumElts; i < n; i++) {
988       getExpandedTypes(CAExp->EltTy, TI);
989     }
990   } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
991     for (auto BS : RExp->Bases)
992       getExpandedTypes(BS->getType(), TI);
993     for (auto FD : RExp->Fields)
994       getExpandedTypes(FD->getType(), TI);
995   } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
996     llvm::Type *EltTy = ConvertType(CExp->EltTy);
997     *TI++ = EltTy;
998     *TI++ = EltTy;
999   } else {
1000     assert(isa<NoExpansion>(Exp.get()));
1001     *TI++ = ConvertType(Ty);
1002   }
1003 }
1004 
1005 static void forConstantArrayExpansion(CodeGenFunction &CGF,
1006                                       ConstantArrayExpansion *CAE,
1007                                       Address BaseAddr,
1008                                       llvm::function_ref<void(Address)> Fn) {
1009   CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
1010   CharUnits EltAlign =
1011     BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
1012 
1013   for (int i = 0, n = CAE->NumElts; i < n; i++) {
1014     llvm::Value *EltAddr =
1015       CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i);
1016     Fn(Address(EltAddr, EltAlign));
1017   }
1018 }
1019 
1020 void CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
1021                                          llvm::Function::arg_iterator &AI) {
1022   assert(LV.isSimple() &&
1023          "Unexpected non-simple lvalue during struct expansion.");
1024 
1025   auto Exp = getTypeExpansion(Ty, getContext());
1026   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1027     forConstantArrayExpansion(
1028         *this, CAExp, LV.getAddress(*this), [&](Address EltAddr) {
1029           LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1030           ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1031         });
1032   } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1033     Address This = LV.getAddress(*this);
1034     for (const CXXBaseSpecifier *BS : RExp->Bases) {
1035       // Perform a single step derived-to-base conversion.
1036       Address Base =
1037           GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1038                                 /*NullCheckValue=*/false, SourceLocation());
1039       LValue SubLV = MakeAddrLValue(Base, BS->getType());
1040 
1041       // Recurse onto bases.
1042       ExpandTypeFromArgs(BS->getType(), SubLV, AI);
1043     }
1044     for (auto FD : RExp->Fields) {
1045       // FIXME: What are the right qualifiers here?
1046       LValue SubLV = EmitLValueForFieldInitialization(LV, FD);
1047       ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1048     }
1049   } else if (isa<ComplexExpansion>(Exp.get())) {
1050     auto realValue = &*AI++;
1051     auto imagValue = &*AI++;
1052     EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
1053   } else {
1054     // Call EmitStoreOfScalar except when the lvalue is a bitfield to emit a
1055     // primitive store.
1056     assert(isa<NoExpansion>(Exp.get()));
1057     if (LV.isBitField())
1058       EmitStoreThroughLValue(RValue::get(&*AI++), LV);
1059     else
1060       EmitStoreOfScalar(&*AI++, LV);
1061   }
1062 }
1063 
1064 void CodeGenFunction::ExpandTypeToArgs(
1065     QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
1066     SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
1067   auto Exp = getTypeExpansion(Ty, getContext());
1068   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1069     Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this)
1070                                    : Arg.getKnownRValue().getAggregateAddress();
1071     forConstantArrayExpansion(
1072         *this, CAExp, Addr, [&](Address EltAddr) {
1073           CallArg EltArg = CallArg(
1074               convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()),
1075               CAExp->EltTy);
1076           ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
1077                            IRCallArgPos);
1078         });
1079   } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1080     Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this)
1081                                    : Arg.getKnownRValue().getAggregateAddress();
1082     for (const CXXBaseSpecifier *BS : RExp->Bases) {
1083       // Perform a single step derived-to-base conversion.
1084       Address Base =
1085           GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1086                                 /*NullCheckValue=*/false, SourceLocation());
1087       CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType());
1088 
1089       // Recurse onto bases.
1090       ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs,
1091                        IRCallArgPos);
1092     }
1093 
1094     LValue LV = MakeAddrLValue(This, Ty);
1095     for (auto FD : RExp->Fields) {
1096       CallArg FldArg =
1097           CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType());
1098       ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
1099                        IRCallArgPos);
1100     }
1101   } else if (isa<ComplexExpansion>(Exp.get())) {
1102     ComplexPairTy CV = Arg.getKnownRValue().getComplexVal();
1103     IRCallArgs[IRCallArgPos++] = CV.first;
1104     IRCallArgs[IRCallArgPos++] = CV.second;
1105   } else {
1106     assert(isa<NoExpansion>(Exp.get()));
1107     auto RV = Arg.getKnownRValue();
1108     assert(RV.isScalar() &&
1109            "Unexpected non-scalar rvalue during struct expansion.");
1110 
1111     // Insert a bitcast as needed.
1112     llvm::Value *V = RV.getScalarVal();
1113     if (IRCallArgPos < IRFuncTy->getNumParams() &&
1114         V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1115       V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
1116 
1117     IRCallArgs[IRCallArgPos++] = V;
1118   }
1119 }
1120 
1121 /// Create a temporary allocation for the purposes of coercion.
1122 static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty,
1123                                            CharUnits MinAlign,
1124                                            const Twine &Name = "tmp") {
1125   // Don't use an alignment that's worse than what LLVM would prefer.
1126   auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty);
1127   CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
1128 
1129   return CGF.CreateTempAlloca(Ty, Align, Name + ".coerce");
1130 }
1131 
1132 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
1133 /// accessing some number of bytes out of it, try to gep into the struct to get
1134 /// at its inner goodness.  Dive as deep as possible without entering an element
1135 /// with an in-memory size smaller than DstSize.
1136 static Address
1137 EnterStructPointerForCoercedAccess(Address SrcPtr,
1138                                    llvm::StructType *SrcSTy,
1139                                    uint64_t DstSize, CodeGenFunction &CGF) {
1140   // We can't dive into a zero-element struct.
1141   if (SrcSTy->getNumElements() == 0) return SrcPtr;
1142 
1143   llvm::Type *FirstElt = SrcSTy->getElementType(0);
1144 
1145   // If the first elt is at least as large as what we're looking for, or if the
1146   // first element is the same size as the whole struct, we can enter it. The
1147   // comparison must be made on the store size and not the alloca size. Using
1148   // the alloca size may overstate the size of the load.
1149   uint64_t FirstEltSize =
1150     CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
1151   if (FirstEltSize < DstSize &&
1152       FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
1153     return SrcPtr;
1154 
1155   // GEP into the first element.
1156   SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, "coerce.dive");
1157 
1158   // If the first element is a struct, recurse.
1159   llvm::Type *SrcTy = SrcPtr.getElementType();
1160   if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1161     return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
1162 
1163   return SrcPtr;
1164 }
1165 
1166 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
1167 /// are either integers or pointers.  This does a truncation of the value if it
1168 /// is too large or a zero extension if it is too small.
1169 ///
1170 /// This behaves as if the value were coerced through memory, so on big-endian
1171 /// targets the high bits are preserved in a truncation, while little-endian
1172 /// targets preserve the low bits.
1173 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
1174                                              llvm::Type *Ty,
1175                                              CodeGenFunction &CGF) {
1176   if (Val->getType() == Ty)
1177     return Val;
1178 
1179   if (isa<llvm::PointerType>(Val->getType())) {
1180     // If this is Pointer->Pointer avoid conversion to and from int.
1181     if (isa<llvm::PointerType>(Ty))
1182       return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
1183 
1184     // Convert the pointer to an integer so we can play with its width.
1185     Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
1186   }
1187 
1188   llvm::Type *DestIntTy = Ty;
1189   if (isa<llvm::PointerType>(DestIntTy))
1190     DestIntTy = CGF.IntPtrTy;
1191 
1192   if (Val->getType() != DestIntTy) {
1193     const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
1194     if (DL.isBigEndian()) {
1195       // Preserve the high bits on big-endian targets.
1196       // That is what memory coercion does.
1197       uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1198       uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1199 
1200       if (SrcSize > DstSize) {
1201         Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
1202         Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
1203       } else {
1204         Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
1205         Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
1206       }
1207     } else {
1208       // Little-endian targets preserve the low bits. No shifts required.
1209       Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
1210     }
1211   }
1212 
1213   if (isa<llvm::PointerType>(Ty))
1214     Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
1215   return Val;
1216 }
1217 
1218 
1219 
1220 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
1221 /// a pointer to an object of type \arg Ty, known to be aligned to
1222 /// \arg SrcAlign bytes.
1223 ///
1224 /// This safely handles the case when the src type is smaller than the
1225 /// destination type; in this situation the values of bits which not
1226 /// present in the src are undefined.
1227 static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
1228                                       CodeGenFunction &CGF) {
1229   llvm::Type *SrcTy = Src.getElementType();
1230 
1231   // If SrcTy and Ty are the same, just do a load.
1232   if (SrcTy == Ty)
1233     return CGF.Builder.CreateLoad(Src);
1234 
1235   llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
1236 
1237   if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1238     Src = EnterStructPointerForCoercedAccess(Src, SrcSTy,
1239                                              DstSize.getFixedSize(), CGF);
1240     SrcTy = Src.getElementType();
1241   }
1242 
1243   llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1244 
1245   // If the source and destination are integer or pointer types, just do an
1246   // extension or truncation to the desired type.
1247   if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1248       (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1249     llvm::Value *Load = CGF.Builder.CreateLoad(Src);
1250     return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
1251   }
1252 
1253   // If load is legal, just bitcast the src pointer.
1254   if (!SrcSize.isScalable() && !DstSize.isScalable() &&
1255       SrcSize.getFixedSize() >= DstSize.getFixedSize()) {
1256     // Generally SrcSize is never greater than DstSize, since this means we are
1257     // losing bits. However, this can happen in cases where the structure has
1258     // additional padding, for example due to a user specified alignment.
1259     //
1260     // FIXME: Assert that we aren't truncating non-padding bits when have access
1261     // to that information.
1262     Src = CGF.Builder.CreateBitCast(Src,
1263                                     Ty->getPointerTo(Src.getAddressSpace()));
1264     return CGF.Builder.CreateLoad(Src);
1265   }
1266 
1267   // Otherwise do coercion through memory. This is stupid, but simple.
1268   Address Tmp =
1269       CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment(), Src.getName());
1270   CGF.Builder.CreateMemCpy(
1271       Tmp.getPointer(), Tmp.getAlignment().getAsAlign(), Src.getPointer(),
1272       Src.getAlignment().getAsAlign(),
1273       llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize.getKnownMinSize()));
1274   return CGF.Builder.CreateLoad(Tmp);
1275 }
1276 
1277 // Function to store a first-class aggregate into memory.  We prefer to
1278 // store the elements rather than the aggregate to be more friendly to
1279 // fast-isel.
1280 // FIXME: Do we need to recurse here?
1281 void CodeGenFunction::EmitAggregateStore(llvm::Value *Val, Address Dest,
1282                                          bool DestIsVolatile) {
1283   // Prefer scalar stores to first-class aggregate stores.
1284   if (llvm::StructType *STy = dyn_cast<llvm::StructType>(Val->getType())) {
1285     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1286       Address EltPtr = Builder.CreateStructGEP(Dest, i);
1287       llvm::Value *Elt = Builder.CreateExtractValue(Val, i);
1288       Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
1289     }
1290   } else {
1291     Builder.CreateStore(Val, Dest, DestIsVolatile);
1292   }
1293 }
1294 
1295 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1296 /// where the source and destination may have different types.  The
1297 /// destination is known to be aligned to \arg DstAlign bytes.
1298 ///
1299 /// This safely handles the case when the src type is larger than the
1300 /// destination type; the upper bits of the src will be lost.
1301 static void CreateCoercedStore(llvm::Value *Src,
1302                                Address Dst,
1303                                bool DstIsVolatile,
1304                                CodeGenFunction &CGF) {
1305   llvm::Type *SrcTy = Src->getType();
1306   llvm::Type *DstTy = Dst.getElementType();
1307   if (SrcTy == DstTy) {
1308     CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1309     return;
1310   }
1311 
1312   llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1313 
1314   if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1315     Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy,
1316                                              SrcSize.getFixedSize(), CGF);
1317     DstTy = Dst.getElementType();
1318   }
1319 
1320   llvm::PointerType *SrcPtrTy = llvm::dyn_cast<llvm::PointerType>(SrcTy);
1321   llvm::PointerType *DstPtrTy = llvm::dyn_cast<llvm::PointerType>(DstTy);
1322   if (SrcPtrTy && DstPtrTy &&
1323       SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) {
1324     Src = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy);
1325     CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1326     return;
1327   }
1328 
1329   // If the source and destination are integer or pointer types, just do an
1330   // extension or truncation to the desired type.
1331   if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1332       (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1333     Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
1334     CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1335     return;
1336   }
1337 
1338   llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
1339 
1340   // If store is legal, just bitcast the src pointer.
1341   if (isa<llvm::ScalableVectorType>(SrcTy) ||
1342       isa<llvm::ScalableVectorType>(DstTy) ||
1343       SrcSize.getFixedSize() <= DstSize.getFixedSize()) {
1344     Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy);
1345     CGF.EmitAggregateStore(Src, Dst, DstIsVolatile);
1346   } else {
1347     // Otherwise do coercion through memory. This is stupid, but
1348     // simple.
1349 
1350     // Generally SrcSize is never greater than DstSize, since this means we are
1351     // losing bits. However, this can happen in cases where the structure has
1352     // additional padding, for example due to a user specified alignment.
1353     //
1354     // FIXME: Assert that we aren't truncating non-padding bits when have access
1355     // to that information.
1356     Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
1357     CGF.Builder.CreateStore(Src, Tmp);
1358     CGF.Builder.CreateMemCpy(
1359         Dst.getPointer(), Dst.getAlignment().getAsAlign(), Tmp.getPointer(),
1360         Tmp.getAlignment().getAsAlign(),
1361         llvm::ConstantInt::get(CGF.IntPtrTy, DstSize.getFixedSize()));
1362   }
1363 }
1364 
1365 static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr,
1366                                    const ABIArgInfo &info) {
1367   if (unsigned offset = info.getDirectOffset()) {
1368     addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty);
1369     addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
1370                                              CharUnits::fromQuantity(offset));
1371     addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType());
1372   }
1373   return addr;
1374 }
1375 
1376 namespace {
1377 
1378 /// Encapsulates information about the way function arguments from
1379 /// CGFunctionInfo should be passed to actual LLVM IR function.
1380 class ClangToLLVMArgMapping {
1381   static const unsigned InvalidIndex = ~0U;
1382   unsigned InallocaArgNo;
1383   unsigned SRetArgNo;
1384   unsigned TotalIRArgs;
1385 
1386   /// Arguments of LLVM IR function corresponding to single Clang argument.
1387   struct IRArgs {
1388     unsigned PaddingArgIndex;
1389     // Argument is expanded to IR arguments at positions
1390     // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
1391     unsigned FirstArgIndex;
1392     unsigned NumberOfArgs;
1393 
1394     IRArgs()
1395         : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1396           NumberOfArgs(0) {}
1397   };
1398 
1399   SmallVector<IRArgs, 8> ArgInfo;
1400 
1401 public:
1402   ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
1403                         bool OnlyRequiredArgs = false)
1404       : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1405         ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1406     construct(Context, FI, OnlyRequiredArgs);
1407   }
1408 
1409   bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
1410   unsigned getInallocaArgNo() const {
1411     assert(hasInallocaArg());
1412     return InallocaArgNo;
1413   }
1414 
1415   bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
1416   unsigned getSRetArgNo() const {
1417     assert(hasSRetArg());
1418     return SRetArgNo;
1419   }
1420 
1421   unsigned totalIRArgs() const { return TotalIRArgs; }
1422 
1423   bool hasPaddingArg(unsigned ArgNo) const {
1424     assert(ArgNo < ArgInfo.size());
1425     return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1426   }
1427   unsigned getPaddingArgNo(unsigned ArgNo) const {
1428     assert(hasPaddingArg(ArgNo));
1429     return ArgInfo[ArgNo].PaddingArgIndex;
1430   }
1431 
1432   /// Returns index of first IR argument corresponding to ArgNo, and their
1433   /// quantity.
1434   std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
1435     assert(ArgNo < ArgInfo.size());
1436     return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1437                           ArgInfo[ArgNo].NumberOfArgs);
1438   }
1439 
1440 private:
1441   void construct(const ASTContext &Context, const CGFunctionInfo &FI,
1442                  bool OnlyRequiredArgs);
1443 };
1444 
1445 void ClangToLLVMArgMapping::construct(const ASTContext &Context,
1446                                       const CGFunctionInfo &FI,
1447                                       bool OnlyRequiredArgs) {
1448   unsigned IRArgNo = 0;
1449   bool SwapThisWithSRet = false;
1450   const ABIArgInfo &RetAI = FI.getReturnInfo();
1451 
1452   if (RetAI.getKind() == ABIArgInfo::Indirect) {
1453     SwapThisWithSRet = RetAI.isSRetAfterThis();
1454     SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1455   }
1456 
1457   unsigned ArgNo = 0;
1458   unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
1459   for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
1460        ++I, ++ArgNo) {
1461     assert(I != FI.arg_end());
1462     QualType ArgType = I->type;
1463     const ABIArgInfo &AI = I->info;
1464     // Collect data about IR arguments corresponding to Clang argument ArgNo.
1465     auto &IRArgs = ArgInfo[ArgNo];
1466 
1467     if (AI.getPaddingType())
1468       IRArgs.PaddingArgIndex = IRArgNo++;
1469 
1470     switch (AI.getKind()) {
1471     case ABIArgInfo::Extend:
1472     case ABIArgInfo::Direct: {
1473       // FIXME: handle sseregparm someday...
1474       llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
1475       if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
1476         IRArgs.NumberOfArgs = STy->getNumElements();
1477       } else {
1478         IRArgs.NumberOfArgs = 1;
1479       }
1480       break;
1481     }
1482     case ABIArgInfo::Indirect:
1483     case ABIArgInfo::IndirectAliased:
1484       IRArgs.NumberOfArgs = 1;
1485       break;
1486     case ABIArgInfo::Ignore:
1487     case ABIArgInfo::InAlloca:
1488       // ignore and inalloca doesn't have matching LLVM parameters.
1489       IRArgs.NumberOfArgs = 0;
1490       break;
1491     case ABIArgInfo::CoerceAndExpand:
1492       IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size();
1493       break;
1494     case ABIArgInfo::Expand:
1495       IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
1496       break;
1497     }
1498 
1499     if (IRArgs.NumberOfArgs > 0) {
1500       IRArgs.FirstArgIndex = IRArgNo;
1501       IRArgNo += IRArgs.NumberOfArgs;
1502     }
1503 
1504     // Skip over the sret parameter when it comes second.  We already handled it
1505     // above.
1506     if (IRArgNo == 1 && SwapThisWithSRet)
1507       IRArgNo++;
1508   }
1509   assert(ArgNo == ArgInfo.size());
1510 
1511   if (FI.usesInAlloca())
1512     InallocaArgNo = IRArgNo++;
1513 
1514   TotalIRArgs = IRArgNo;
1515 }
1516 }  // namespace
1517 
1518 /***/
1519 
1520 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
1521   const auto &RI = FI.getReturnInfo();
1522   return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
1523 }
1524 
1525 bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) {
1526   return ReturnTypeUsesSRet(FI) &&
1527          getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
1528 }
1529 
1530 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
1531   if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
1532     switch (BT->getKind()) {
1533     default:
1534       return false;
1535     case BuiltinType::Float:
1536       return getTarget().useObjCFPRetForRealType(TargetInfo::Float);
1537     case BuiltinType::Double:
1538       return getTarget().useObjCFPRetForRealType(TargetInfo::Double);
1539     case BuiltinType::LongDouble:
1540       return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble);
1541     }
1542   }
1543 
1544   return false;
1545 }
1546 
1547 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
1548   if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
1549     if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
1550       if (BT->getKind() == BuiltinType::LongDouble)
1551         return getTarget().useObjCFP2RetForComplexLongDouble();
1552     }
1553   }
1554 
1555   return false;
1556 }
1557 
1558 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
1559   const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
1560   return GetFunctionType(FI);
1561 }
1562 
1563 llvm::FunctionType *
1564 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
1565 
1566   bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1567   (void)Inserted;
1568   assert(Inserted && "Recursively being processed?");
1569 
1570   llvm::Type *resultType = nullptr;
1571   const ABIArgInfo &retAI = FI.getReturnInfo();
1572   switch (retAI.getKind()) {
1573   case ABIArgInfo::Expand:
1574   case ABIArgInfo::IndirectAliased:
1575     llvm_unreachable("Invalid ABI kind for return argument");
1576 
1577   case ABIArgInfo::Extend:
1578   case ABIArgInfo::Direct:
1579     resultType = retAI.getCoerceToType();
1580     break;
1581 
1582   case ABIArgInfo::InAlloca:
1583     if (retAI.getInAllocaSRet()) {
1584       // sret things on win32 aren't void, they return the sret pointer.
1585       QualType ret = FI.getReturnType();
1586       llvm::Type *ty = ConvertType(ret);
1587       unsigned addressSpace = Context.getTargetAddressSpace(ret);
1588       resultType = llvm::PointerType::get(ty, addressSpace);
1589     } else {
1590       resultType = llvm::Type::getVoidTy(getLLVMContext());
1591     }
1592     break;
1593 
1594   case ABIArgInfo::Indirect:
1595   case ABIArgInfo::Ignore:
1596     resultType = llvm::Type::getVoidTy(getLLVMContext());
1597     break;
1598 
1599   case ABIArgInfo::CoerceAndExpand:
1600     resultType = retAI.getUnpaddedCoerceAndExpandType();
1601     break;
1602   }
1603 
1604   ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
1605   SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
1606 
1607   // Add type for sret argument.
1608   if (IRFunctionArgs.hasSRetArg()) {
1609     QualType Ret = FI.getReturnType();
1610     llvm::Type *Ty = ConvertType(Ret);
1611     unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
1612     ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1613         llvm::PointerType::get(Ty, AddressSpace);
1614   }
1615 
1616   // Add type for inalloca argument.
1617   if (IRFunctionArgs.hasInallocaArg()) {
1618     auto ArgStruct = FI.getArgStruct();
1619     assert(ArgStruct);
1620     ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
1621   }
1622 
1623   // Add in all of the required arguments.
1624   unsigned ArgNo = 0;
1625   CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1626                                      ie = it + FI.getNumRequiredArgs();
1627   for (; it != ie; ++it, ++ArgNo) {
1628     const ABIArgInfo &ArgInfo = it->info;
1629 
1630     // Insert a padding type to ensure proper alignment.
1631     if (IRFunctionArgs.hasPaddingArg(ArgNo))
1632       ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1633           ArgInfo.getPaddingType();
1634 
1635     unsigned FirstIRArg, NumIRArgs;
1636     std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1637 
1638     switch (ArgInfo.getKind()) {
1639     case ABIArgInfo::Ignore:
1640     case ABIArgInfo::InAlloca:
1641       assert(NumIRArgs == 0);
1642       break;
1643 
1644     case ABIArgInfo::Indirect: {
1645       assert(NumIRArgs == 1);
1646       // indirect arguments are always on the stack, which is alloca addr space.
1647       llvm::Type *LTy = ConvertTypeForMem(it->type);
1648       ArgTypes[FirstIRArg] = LTy->getPointerTo(
1649           CGM.getDataLayout().getAllocaAddrSpace());
1650       break;
1651     }
1652     case ABIArgInfo::IndirectAliased: {
1653       assert(NumIRArgs == 1);
1654       llvm::Type *LTy = ConvertTypeForMem(it->type);
1655       ArgTypes[FirstIRArg] = LTy->getPointerTo(ArgInfo.getIndirectAddrSpace());
1656       break;
1657     }
1658     case ABIArgInfo::Extend:
1659     case ABIArgInfo::Direct: {
1660       // Fast-isel and the optimizer generally like scalar values better than
1661       // FCAs, so we flatten them if this is safe to do for this argument.
1662       llvm::Type *argType = ArgInfo.getCoerceToType();
1663       llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1664       if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
1665         assert(NumIRArgs == st->getNumElements());
1666         for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1667           ArgTypes[FirstIRArg + i] = st->getElementType(i);
1668       } else {
1669         assert(NumIRArgs == 1);
1670         ArgTypes[FirstIRArg] = argType;
1671       }
1672       break;
1673     }
1674 
1675     case ABIArgInfo::CoerceAndExpand: {
1676       auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1677       for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) {
1678         *ArgTypesIter++ = EltTy;
1679       }
1680       assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1681       break;
1682     }
1683 
1684     case ABIArgInfo::Expand:
1685       auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1686       getExpandedTypes(it->type, ArgTypesIter);
1687       assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1688       break;
1689     }
1690   }
1691 
1692   bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1693   assert(Erased && "Not in set?");
1694 
1695   return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
1696 }
1697 
1698 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
1699   const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1700   const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
1701 
1702   if (!isFuncTypeConvertible(FPT))
1703     return llvm::StructType::get(getLLVMContext());
1704 
1705   return GetFunctionType(GD);
1706 }
1707 
1708 static void AddAttributesFromFunctionProtoType(ASTContext &Ctx,
1709                                                llvm::AttrBuilder &FuncAttrs,
1710                                                const FunctionProtoType *FPT) {
1711   if (!FPT)
1712     return;
1713 
1714   if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) &&
1715       FPT->isNothrow())
1716     FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1717 }
1718 
1719 void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
1720                                                  bool HasOptnone,
1721                                                  bool AttrOnCallSite,
1722                                                llvm::AttrBuilder &FuncAttrs) {
1723   // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
1724   if (!HasOptnone) {
1725     if (CodeGenOpts.OptimizeSize)
1726       FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1727     if (CodeGenOpts.OptimizeSize == 2)
1728       FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1729   }
1730 
1731   if (CodeGenOpts.DisableRedZone)
1732     FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1733   if (CodeGenOpts.IndirectTlsSegRefs)
1734     FuncAttrs.addAttribute("indirect-tls-seg-refs");
1735   if (CodeGenOpts.NoImplicitFloat)
1736     FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1737 
1738   if (AttrOnCallSite) {
1739     // Attributes that should go on the call site only.
1740     if (!CodeGenOpts.SimplifyLibCalls ||
1741         CodeGenOpts.isNoBuiltinFunc(Name.data()))
1742       FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1743     if (!CodeGenOpts.TrapFuncName.empty())
1744       FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
1745   } else {
1746     StringRef FpKind;
1747     switch (CodeGenOpts.getFramePointer()) {
1748     case CodeGenOptions::FramePointerKind::None:
1749       FpKind = "none";
1750       break;
1751     case CodeGenOptions::FramePointerKind::NonLeaf:
1752       FpKind = "non-leaf";
1753       break;
1754     case CodeGenOptions::FramePointerKind::All:
1755       FpKind = "all";
1756       break;
1757     }
1758     FuncAttrs.addAttribute("frame-pointer", FpKind);
1759 
1760     FuncAttrs.addAttribute("less-precise-fpmad",
1761                            llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
1762 
1763     if (CodeGenOpts.NullPointerIsValid)
1764       FuncAttrs.addAttribute(llvm::Attribute::NullPointerIsValid);
1765 
1766     if (CodeGenOpts.FPDenormalMode != llvm::DenormalMode::getIEEE())
1767       FuncAttrs.addAttribute("denormal-fp-math",
1768                              CodeGenOpts.FPDenormalMode.str());
1769     if (CodeGenOpts.FP32DenormalMode != CodeGenOpts.FPDenormalMode) {
1770       FuncAttrs.addAttribute(
1771           "denormal-fp-math-f32",
1772           CodeGenOpts.FP32DenormalMode.str());
1773     }
1774 
1775     FuncAttrs.addAttribute("no-trapping-math",
1776                            llvm::toStringRef(LangOpts.getFPExceptionMode() ==
1777                                              LangOptions::FPE_Ignore));
1778 
1779     // Strict (compliant) code is the default, so only add this attribute to
1780     // indicate that we are trying to workaround a problem case.
1781     if (!CodeGenOpts.StrictFloatCastOverflow)
1782       FuncAttrs.addAttribute("strict-float-cast-overflow", "false");
1783 
1784     // TODO: Are these all needed?
1785     // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
1786     FuncAttrs.addAttribute("no-infs-fp-math",
1787                            llvm::toStringRef(LangOpts.NoHonorInfs));
1788     FuncAttrs.addAttribute("no-nans-fp-math",
1789                            llvm::toStringRef(LangOpts.NoHonorNaNs));
1790     FuncAttrs.addAttribute("unsafe-fp-math",
1791                            llvm::toStringRef(LangOpts.UnsafeFPMath));
1792     FuncAttrs.addAttribute("use-soft-float",
1793                            llvm::toStringRef(CodeGenOpts.SoftFloat));
1794     FuncAttrs.addAttribute("stack-protector-buffer-size",
1795                            llvm::utostr(CodeGenOpts.SSPBufferSize));
1796     FuncAttrs.addAttribute("no-signed-zeros-fp-math",
1797                            llvm::toStringRef(LangOpts.NoSignedZero));
1798 
1799     // TODO: Reciprocal estimate codegen options should apply to instructions?
1800     const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals;
1801     if (!Recips.empty())
1802       FuncAttrs.addAttribute("reciprocal-estimates",
1803                              llvm::join(Recips, ","));
1804 
1805     if (!CodeGenOpts.PreferVectorWidth.empty() &&
1806         CodeGenOpts.PreferVectorWidth != "none")
1807       FuncAttrs.addAttribute("prefer-vector-width",
1808                              CodeGenOpts.PreferVectorWidth);
1809 
1810     if (CodeGenOpts.StackRealignment)
1811       FuncAttrs.addAttribute("stackrealign");
1812     if (CodeGenOpts.Backchain)
1813       FuncAttrs.addAttribute("backchain");
1814     if (CodeGenOpts.EnableSegmentedStacks)
1815       FuncAttrs.addAttribute("split-stack");
1816 
1817     if (CodeGenOpts.SpeculativeLoadHardening)
1818       FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
1819   }
1820 
1821   if (getLangOpts().assumeFunctionsAreConvergent()) {
1822     // Conservatively, mark all functions and calls in CUDA and OpenCL as
1823     // convergent (meaning, they may call an intrinsically convergent op, such
1824     // as __syncthreads() / barrier(), and so can't have certain optimizations
1825     // applied around them).  LLVM will remove this attribute where it safely
1826     // can.
1827     FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1828   }
1829 
1830   if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) {
1831     // Exceptions aren't supported in CUDA device code.
1832     FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1833   }
1834 
1835   for (StringRef Attr : CodeGenOpts.DefaultFunctionAttrs) {
1836     StringRef Var, Value;
1837     std::tie(Var, Value) = Attr.split('=');
1838     FuncAttrs.addAttribute(Var, Value);
1839   }
1840 }
1841 
1842 void CodeGenModule::addDefaultFunctionDefinitionAttributes(llvm::Function &F) {
1843   llvm::AttrBuilder FuncAttrs;
1844   getDefaultFunctionAttributes(F.getName(), F.hasOptNone(),
1845                                /* AttrOnCallSite = */ false, FuncAttrs);
1846   // TODO: call GetCPUAndFeaturesAttributes?
1847   F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs);
1848 }
1849 
1850 void CodeGenModule::addDefaultFunctionDefinitionAttributes(
1851                                                    llvm::AttrBuilder &attrs) {
1852   getDefaultFunctionAttributes(/*function name*/ "", /*optnone*/ false,
1853                                /*for call*/ false, attrs);
1854   GetCPUAndFeaturesAttributes(GlobalDecl(), attrs);
1855 }
1856 
1857 static void addNoBuiltinAttributes(llvm::AttrBuilder &FuncAttrs,
1858                                    const LangOptions &LangOpts,
1859                                    const NoBuiltinAttr *NBA = nullptr) {
1860   auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) {
1861     SmallString<32> AttributeName;
1862     AttributeName += "no-builtin-";
1863     AttributeName += BuiltinName;
1864     FuncAttrs.addAttribute(AttributeName);
1865   };
1866 
1867   // First, handle the language options passed through -fno-builtin.
1868   if (LangOpts.NoBuiltin) {
1869     // -fno-builtin disables them all.
1870     FuncAttrs.addAttribute("no-builtins");
1871     return;
1872   }
1873 
1874   // Then, add attributes for builtins specified through -fno-builtin-<name>.
1875   llvm::for_each(LangOpts.NoBuiltinFuncs, AddNoBuiltinAttr);
1876 
1877   // Now, let's check the __attribute__((no_builtin("...")) attribute added to
1878   // the source.
1879   if (!NBA)
1880     return;
1881 
1882   // If there is a wildcard in the builtin names specified through the
1883   // attribute, disable them all.
1884   if (llvm::is_contained(NBA->builtinNames(), "*")) {
1885     FuncAttrs.addAttribute("no-builtins");
1886     return;
1887   }
1888 
1889   // And last, add the rest of the builtin names.
1890   llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr);
1891 }
1892 
1893 /// Construct the IR attribute list of a function or call.
1894 ///
1895 /// When adding an attribute, please consider where it should be handled:
1896 ///
1897 ///   - getDefaultFunctionAttributes is for attributes that are essentially
1898 ///     part of the global target configuration (but perhaps can be
1899 ///     overridden on a per-function basis).  Adding attributes there
1900 ///     will cause them to also be set in frontends that build on Clang's
1901 ///     target-configuration logic, as well as for code defined in library
1902 ///     modules such as CUDA's libdevice.
1903 ///
1904 ///   - ConstructAttributeList builds on top of getDefaultFunctionAttributes
1905 ///     and adds declaration-specific, convention-specific, and
1906 ///     frontend-specific logic.  The last is of particular importance:
1907 ///     attributes that restrict how the frontend generates code must be
1908 ///     added here rather than getDefaultFunctionAttributes.
1909 ///
1910 void CodeGenModule::ConstructAttributeList(
1911     StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo,
1912     llvm::AttributeList &AttrList, unsigned &CallingConv, bool AttrOnCallSite) {
1913   llvm::AttrBuilder FuncAttrs;
1914   llvm::AttrBuilder RetAttrs;
1915 
1916   // Collect function IR attributes from the CC lowering.
1917   // We'll collect the paramete and result attributes later.
1918   CallingConv = FI.getEffectiveCallingConvention();
1919   if (FI.isNoReturn())
1920     FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1921   if (FI.isCmseNSCall())
1922     FuncAttrs.addAttribute("cmse_nonsecure_call");
1923 
1924   // Collect function IR attributes from the callee prototype if we have one.
1925   AddAttributesFromFunctionProtoType(getContext(), FuncAttrs,
1926                                      CalleeInfo.getCalleeFunctionProtoType());
1927 
1928   const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl();
1929 
1930   bool HasOptnone = false;
1931   // The NoBuiltinAttr attached to the target FunctionDecl.
1932   const NoBuiltinAttr *NBA = nullptr;
1933 
1934   // Collect function IR attributes based on declaration-specific
1935   // information.
1936   // FIXME: handle sseregparm someday...
1937   if (TargetDecl) {
1938     if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
1939       FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
1940     if (TargetDecl->hasAttr<NoThrowAttr>())
1941       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1942     if (TargetDecl->hasAttr<NoReturnAttr>())
1943       FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1944     if (TargetDecl->hasAttr<ColdAttr>())
1945       FuncAttrs.addAttribute(llvm::Attribute::Cold);
1946     if (TargetDecl->hasAttr<NoDuplicateAttr>())
1947       FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
1948     if (TargetDecl->hasAttr<ConvergentAttr>())
1949       FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1950 
1951     if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1952       AddAttributesFromFunctionProtoType(
1953           getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
1954       if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) {
1955         // A sane operator new returns a non-aliasing pointer.
1956         auto Kind = Fn->getDeclName().getCXXOverloadedOperator();
1957         if (getCodeGenOpts().AssumeSaneOperatorNew &&
1958             (Kind == OO_New || Kind == OO_Array_New))
1959           RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1960       }
1961       const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
1962       const bool IsVirtualCall = MD && MD->isVirtual();
1963       // Don't use [[noreturn]], _Noreturn or [[no_builtin]] for a call to a
1964       // virtual function. These attributes are not inherited by overloads.
1965       if (!(AttrOnCallSite && IsVirtualCall)) {
1966         if (Fn->isNoReturn())
1967           FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1968         NBA = Fn->getAttr<NoBuiltinAttr>();
1969       }
1970     }
1971 
1972     // 'const', 'pure' and 'noalias' attributed functions are also nounwind.
1973     if (TargetDecl->hasAttr<ConstAttr>()) {
1974       FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
1975       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1976     } else if (TargetDecl->hasAttr<PureAttr>()) {
1977       FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
1978       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1979     } else if (TargetDecl->hasAttr<NoAliasAttr>()) {
1980       FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly);
1981       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1982     }
1983     if (TargetDecl->hasAttr<RestrictAttr>())
1984       RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1985     if (TargetDecl->hasAttr<ReturnsNonNullAttr>() &&
1986         !CodeGenOpts.NullPointerIsValid)
1987       RetAttrs.addAttribute(llvm::Attribute::NonNull);
1988     if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>())
1989       FuncAttrs.addAttribute("no_caller_saved_registers");
1990     if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>())
1991       FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
1992     if (TargetDecl->hasAttr<LeafAttr>())
1993       FuncAttrs.addAttribute(llvm::Attribute::NoCallback);
1994 
1995     HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
1996     if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) {
1997       Optional<unsigned> NumElemsParam;
1998       if (AllocSize->getNumElemsParam().isValid())
1999         NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
2000       FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
2001                                  NumElemsParam);
2002     }
2003 
2004     if (TargetDecl->hasAttr<OpenCLKernelAttr>()) {
2005       if (getLangOpts().OpenCLVersion <= 120) {
2006         // OpenCL v1.2 Work groups are always uniform
2007         FuncAttrs.addAttribute("uniform-work-group-size", "true");
2008       } else {
2009         // OpenCL v2.0 Work groups may be whether uniform or not.
2010         // '-cl-uniform-work-group-size' compile option gets a hint
2011         // to the compiler that the global work-size be a multiple of
2012         // the work-group size specified to clEnqueueNDRangeKernel
2013         // (i.e. work groups are uniform).
2014         FuncAttrs.addAttribute("uniform-work-group-size",
2015                                llvm::toStringRef(CodeGenOpts.UniformWGSize));
2016       }
2017     }
2018   }
2019 
2020   // Attach "no-builtins" attributes to:
2021   // * call sites: both `nobuiltin` and "no-builtins" or "no-builtin-<name>".
2022   // * definitions: "no-builtins" or "no-builtin-<name>" only.
2023   // The attributes can come from:
2024   // * LangOpts: -ffreestanding, -fno-builtin, -fno-builtin-<name>
2025   // * FunctionDecl attributes: __attribute__((no_builtin(...)))
2026   addNoBuiltinAttributes(FuncAttrs, getLangOpts(), NBA);
2027 
2028   // Collect function IR attributes based on global settiings.
2029   getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
2030 
2031   // Override some default IR attributes based on declaration-specific
2032   // information.
2033   if (TargetDecl) {
2034     if (TargetDecl->hasAttr<NoSpeculativeLoadHardeningAttr>())
2035       FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening);
2036     if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>())
2037       FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
2038     if (TargetDecl->hasAttr<NoSplitStackAttr>())
2039       FuncAttrs.removeAttribute("split-stack");
2040 
2041     // Add NonLazyBind attribute to function declarations when -fno-plt
2042     // is used.
2043     // FIXME: what if we just haven't processed the function definition
2044     // yet, or if it's an external definition like C99 inline?
2045     if (CodeGenOpts.NoPLT) {
2046       if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2047         if (!Fn->isDefined() && !AttrOnCallSite) {
2048           FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
2049         }
2050       }
2051     }
2052   }
2053 
2054   // Collect non-call-site function IR attributes from declaration-specific
2055   // information.
2056   if (!AttrOnCallSite) {
2057     if (TargetDecl && TargetDecl->hasAttr<CmseNSEntryAttr>())
2058       FuncAttrs.addAttribute("cmse_nonsecure_entry");
2059 
2060     // Whether tail calls are enabled.
2061     auto shouldDisableTailCalls = [&] {
2062       // Should this be honored in getDefaultFunctionAttributes?
2063       if (CodeGenOpts.DisableTailCalls)
2064         return true;
2065 
2066       if (!TargetDecl)
2067         return false;
2068 
2069       if (TargetDecl->hasAttr<DisableTailCallsAttr>() ||
2070           TargetDecl->hasAttr<AnyX86InterruptAttr>())
2071         return true;
2072 
2073       if (CodeGenOpts.NoEscapingBlockTailCalls) {
2074         if (const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
2075           if (!BD->doesNotEscape())
2076             return true;
2077       }
2078 
2079       return false;
2080     };
2081     FuncAttrs.addAttribute("disable-tail-calls",
2082                            llvm::toStringRef(shouldDisableTailCalls()));
2083 
2084     // CPU/feature overrides.  addDefaultFunctionDefinitionAttributes
2085     // handles these separately to set them based on the global defaults.
2086     GetCPUAndFeaturesAttributes(CalleeInfo.getCalleeDecl(), FuncAttrs);
2087   }
2088 
2089   // Collect attributes from arguments and return values.
2090   ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
2091 
2092   QualType RetTy = FI.getReturnType();
2093   const ABIArgInfo &RetAI = FI.getReturnInfo();
2094   switch (RetAI.getKind()) {
2095   case ABIArgInfo::Extend:
2096     if (RetAI.isSignExt())
2097       RetAttrs.addAttribute(llvm::Attribute::SExt);
2098     else
2099       RetAttrs.addAttribute(llvm::Attribute::ZExt);
2100     LLVM_FALLTHROUGH;
2101   case ABIArgInfo::Direct:
2102     if (RetAI.getInReg())
2103       RetAttrs.addAttribute(llvm::Attribute::InReg);
2104     break;
2105   case ABIArgInfo::Ignore:
2106     break;
2107 
2108   case ABIArgInfo::InAlloca:
2109   case ABIArgInfo::Indirect: {
2110     // inalloca and sret disable readnone and readonly
2111     FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2112       .removeAttribute(llvm::Attribute::ReadNone);
2113     break;
2114   }
2115 
2116   case ABIArgInfo::CoerceAndExpand:
2117     break;
2118 
2119   case ABIArgInfo::Expand:
2120   case ABIArgInfo::IndirectAliased:
2121     llvm_unreachable("Invalid ABI kind for return argument");
2122   }
2123 
2124   if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
2125     QualType PTy = RefTy->getPointeeType();
2126     if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2127       RetAttrs.addDereferenceableAttr(
2128           getMinimumObjectSize(PTy).getQuantity());
2129     if (getContext().getTargetAddressSpace(PTy) == 0 &&
2130         !CodeGenOpts.NullPointerIsValid)
2131       RetAttrs.addAttribute(llvm::Attribute::NonNull);
2132     if (PTy->isObjectType()) {
2133       llvm::Align Alignment =
2134           getNaturalPointeeTypeAlignment(RetTy).getAsAlign();
2135       RetAttrs.addAlignmentAttr(Alignment);
2136     }
2137   }
2138 
2139   bool hasUsedSRet = false;
2140   SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs());
2141 
2142   // Attach attributes to sret.
2143   if (IRFunctionArgs.hasSRetArg()) {
2144     llvm::AttrBuilder SRETAttrs;
2145     SRETAttrs.addStructRetAttr(getTypes().ConvertTypeForMem(RetTy));
2146     hasUsedSRet = true;
2147     if (RetAI.getInReg())
2148       SRETAttrs.addAttribute(llvm::Attribute::InReg);
2149     SRETAttrs.addAlignmentAttr(RetAI.getIndirectAlign().getQuantity());
2150     ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
2151         llvm::AttributeSet::get(getLLVMContext(), SRETAttrs);
2152   }
2153 
2154   // Attach attributes to inalloca argument.
2155   if (IRFunctionArgs.hasInallocaArg()) {
2156     llvm::AttrBuilder Attrs;
2157     Attrs.addAttribute(llvm::Attribute::InAlloca);
2158     ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
2159         llvm::AttributeSet::get(getLLVMContext(), Attrs);
2160   }
2161 
2162   // Apply `nonnull` and `dereferencable(N)` to the `this` argument.
2163   if (FI.isInstanceMethod() && !IRFunctionArgs.hasInallocaArg() &&
2164       !FI.arg_begin()->type->isVoidPointerType()) {
2165     auto IRArgs = IRFunctionArgs.getIRArgs(0);
2166 
2167     assert(IRArgs.second == 1 && "Expected only a single `this` pointer.");
2168 
2169     llvm::AttrBuilder Attrs;
2170 
2171     if (!CodeGenOpts.NullPointerIsValid &&
2172         getContext().getTargetAddressSpace(FI.arg_begin()->type) == 0) {
2173       Attrs.addAttribute(llvm::Attribute::NonNull);
2174       Attrs.addDereferenceableAttr(
2175           getMinimumObjectSize(
2176               FI.arg_begin()->type.castAs<PointerType>()->getPointeeType())
2177               .getQuantity());
2178     } else {
2179       // FIXME dereferenceable should be correct here, regardless of
2180       // NullPointerIsValid. However, dereferenceable currently does not always
2181       // respect NullPointerIsValid and may imply nonnull and break the program.
2182       // See https://reviews.llvm.org/D66618 for discussions.
2183       Attrs.addDereferenceableOrNullAttr(
2184           getMinimumObjectSize(
2185               FI.arg_begin()->type.castAs<PointerType>()->getPointeeType())
2186               .getQuantity());
2187     }
2188 
2189     ArgAttrs[IRArgs.first] = llvm::AttributeSet::get(getLLVMContext(), Attrs);
2190   }
2191 
2192   unsigned ArgNo = 0;
2193   for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(),
2194                                           E = FI.arg_end();
2195        I != E; ++I, ++ArgNo) {
2196     QualType ParamType = I->type;
2197     const ABIArgInfo &AI = I->info;
2198     llvm::AttrBuilder Attrs;
2199 
2200     // Add attribute for padding argument, if necessary.
2201     if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
2202       if (AI.getPaddingInReg()) {
2203         ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
2204             llvm::AttributeSet::get(
2205                 getLLVMContext(),
2206                 llvm::AttrBuilder().addAttribute(llvm::Attribute::InReg));
2207       }
2208     }
2209 
2210     // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
2211     // have the corresponding parameter variable.  It doesn't make
2212     // sense to do it here because parameters are so messed up.
2213     switch (AI.getKind()) {
2214     case ABIArgInfo::Extend:
2215       if (AI.isSignExt())
2216         Attrs.addAttribute(llvm::Attribute::SExt);
2217       else
2218         Attrs.addAttribute(llvm::Attribute::ZExt);
2219       LLVM_FALLTHROUGH;
2220     case ABIArgInfo::Direct:
2221       if (ArgNo == 0 && FI.isChainCall())
2222         Attrs.addAttribute(llvm::Attribute::Nest);
2223       else if (AI.getInReg())
2224         Attrs.addAttribute(llvm::Attribute::InReg);
2225       break;
2226 
2227     case ABIArgInfo::Indirect: {
2228       if (AI.getInReg())
2229         Attrs.addAttribute(llvm::Attribute::InReg);
2230 
2231       if (AI.getIndirectByVal())
2232         Attrs.addByValAttr(getTypes().ConvertTypeForMem(ParamType));
2233 
2234       auto *Decl = ParamType->getAsRecordDecl();
2235       if (CodeGenOpts.PassByValueIsNoAlias && Decl &&
2236           Decl->getArgPassingRestrictions() == RecordDecl::APK_CanPassInRegs)
2237         // When calling the function, the pointer passed in will be the only
2238         // reference to the underlying object. Mark it accordingly.
2239         Attrs.addAttribute(llvm::Attribute::NoAlias);
2240 
2241       // TODO: We could add the byref attribute if not byval, but it would
2242       // require updating many testcases.
2243 
2244       CharUnits Align = AI.getIndirectAlign();
2245 
2246       // In a byval argument, it is important that the required
2247       // alignment of the type is honored, as LLVM might be creating a
2248       // *new* stack object, and needs to know what alignment to give
2249       // it. (Sometimes it can deduce a sensible alignment on its own,
2250       // but not if clang decides it must emit a packed struct, or the
2251       // user specifies increased alignment requirements.)
2252       //
2253       // This is different from indirect *not* byval, where the object
2254       // exists already, and the align attribute is purely
2255       // informative.
2256       assert(!Align.isZero());
2257 
2258       // For now, only add this when we have a byval argument.
2259       // TODO: be less lazy about updating test cases.
2260       if (AI.getIndirectByVal())
2261         Attrs.addAlignmentAttr(Align.getQuantity());
2262 
2263       // byval disables readnone and readonly.
2264       FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2265         .removeAttribute(llvm::Attribute::ReadNone);
2266 
2267       break;
2268     }
2269     case ABIArgInfo::IndirectAliased: {
2270       CharUnits Align = AI.getIndirectAlign();
2271       Attrs.addByRefAttr(getTypes().ConvertTypeForMem(ParamType));
2272       Attrs.addAlignmentAttr(Align.getQuantity());
2273       break;
2274     }
2275     case ABIArgInfo::Ignore:
2276     case ABIArgInfo::Expand:
2277     case ABIArgInfo::CoerceAndExpand:
2278       break;
2279 
2280     case ABIArgInfo::InAlloca:
2281       // inalloca disables readnone and readonly.
2282       FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2283           .removeAttribute(llvm::Attribute::ReadNone);
2284       continue;
2285     }
2286 
2287     if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
2288       QualType PTy = RefTy->getPointeeType();
2289       if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2290         Attrs.addDereferenceableAttr(
2291             getMinimumObjectSize(PTy).getQuantity());
2292       if (getContext().getTargetAddressSpace(PTy) == 0 &&
2293           !CodeGenOpts.NullPointerIsValid)
2294         Attrs.addAttribute(llvm::Attribute::NonNull);
2295       if (PTy->isObjectType()) {
2296         llvm::Align Alignment =
2297             getNaturalPointeeTypeAlignment(ParamType).getAsAlign();
2298         Attrs.addAlignmentAttr(Alignment);
2299       }
2300     }
2301 
2302     switch (FI.getExtParameterInfo(ArgNo).getABI()) {
2303     case ParameterABI::Ordinary:
2304       break;
2305 
2306     case ParameterABI::SwiftIndirectResult: {
2307       // Add 'sret' if we haven't already used it for something, but
2308       // only if the result is void.
2309       if (!hasUsedSRet && RetTy->isVoidType()) {
2310         Attrs.addStructRetAttr(getTypes().ConvertTypeForMem(ParamType));
2311         hasUsedSRet = true;
2312       }
2313 
2314       // Add 'noalias' in either case.
2315       Attrs.addAttribute(llvm::Attribute::NoAlias);
2316 
2317       // Add 'dereferenceable' and 'alignment'.
2318       auto PTy = ParamType->getPointeeType();
2319       if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2320         auto info = getContext().getTypeInfoInChars(PTy);
2321         Attrs.addDereferenceableAttr(info.Width.getQuantity());
2322         Attrs.addAlignmentAttr(info.Align.getAsAlign());
2323       }
2324       break;
2325     }
2326 
2327     case ParameterABI::SwiftErrorResult:
2328       Attrs.addAttribute(llvm::Attribute::SwiftError);
2329       break;
2330 
2331     case ParameterABI::SwiftContext:
2332       Attrs.addAttribute(llvm::Attribute::SwiftSelf);
2333       break;
2334     }
2335 
2336     if (FI.getExtParameterInfo(ArgNo).isNoEscape())
2337       Attrs.addAttribute(llvm::Attribute::NoCapture);
2338 
2339     if (Attrs.hasAttributes()) {
2340       unsigned FirstIRArg, NumIRArgs;
2341       std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2342       for (unsigned i = 0; i < NumIRArgs; i++)
2343         ArgAttrs[FirstIRArg + i] =
2344             llvm::AttributeSet::get(getLLVMContext(), Attrs);
2345     }
2346   }
2347   assert(ArgNo == FI.arg_size());
2348 
2349   AttrList = llvm::AttributeList::get(
2350       getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs),
2351       llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs);
2352 }
2353 
2354 /// An argument came in as a promoted argument; demote it back to its
2355 /// declared type.
2356 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
2357                                          const VarDecl *var,
2358                                          llvm::Value *value) {
2359   llvm::Type *varType = CGF.ConvertType(var->getType());
2360 
2361   // This can happen with promotions that actually don't change the
2362   // underlying type, like the enum promotions.
2363   if (value->getType() == varType) return value;
2364 
2365   assert((varType->isIntegerTy() || varType->isFloatingPointTy())
2366          && "unexpected promotion type");
2367 
2368   if (isa<llvm::IntegerType>(varType))
2369     return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
2370 
2371   return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
2372 }
2373 
2374 /// Returns the attribute (either parameter attribute, or function
2375 /// attribute), which declares argument ArgNo to be non-null.
2376 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
2377                                          QualType ArgType, unsigned ArgNo) {
2378   // FIXME: __attribute__((nonnull)) can also be applied to:
2379   //   - references to pointers, where the pointee is known to be
2380   //     nonnull (apparently a Clang extension)
2381   //   - transparent unions containing pointers
2382   // In the former case, LLVM IR cannot represent the constraint. In
2383   // the latter case, we have no guarantee that the transparent union
2384   // is in fact passed as a pointer.
2385   if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
2386     return nullptr;
2387   // First, check attribute on parameter itself.
2388   if (PVD) {
2389     if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
2390       return ParmNNAttr;
2391   }
2392   // Check function attributes.
2393   if (!FD)
2394     return nullptr;
2395   for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
2396     if (NNAttr->isNonNull(ArgNo))
2397       return NNAttr;
2398   }
2399   return nullptr;
2400 }
2401 
2402 namespace {
2403   struct CopyBackSwiftError final : EHScopeStack::Cleanup {
2404     Address Temp;
2405     Address Arg;
2406     CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {}
2407     void Emit(CodeGenFunction &CGF, Flags flags) override {
2408       llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp);
2409       CGF.Builder.CreateStore(errorValue, Arg);
2410     }
2411   };
2412 }
2413 
2414 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
2415                                          llvm::Function *Fn,
2416                                          const FunctionArgList &Args) {
2417   if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
2418     // Naked functions don't have prologues.
2419     return;
2420 
2421   // If this is an implicit-return-zero function, go ahead and
2422   // initialize the return value.  TODO: it might be nice to have
2423   // a more general mechanism for this that didn't require synthesized
2424   // return statements.
2425   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
2426     if (FD->hasImplicitReturnZero()) {
2427       QualType RetTy = FD->getReturnType().getUnqualifiedType();
2428       llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
2429       llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
2430       Builder.CreateStore(Zero, ReturnValue);
2431     }
2432   }
2433 
2434   // FIXME: We no longer need the types from FunctionArgList; lift up and
2435   // simplify.
2436 
2437   ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
2438   assert(Fn->arg_size() == IRFunctionArgs.totalIRArgs());
2439 
2440   // If we're using inalloca, all the memory arguments are GEPs off of the last
2441   // parameter, which is a pointer to the complete memory area.
2442   Address ArgStruct = Address::invalid();
2443   if (IRFunctionArgs.hasInallocaArg()) {
2444     ArgStruct = Address(Fn->getArg(IRFunctionArgs.getInallocaArgNo()),
2445                         FI.getArgStructAlignment());
2446 
2447     assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo());
2448   }
2449 
2450   // Name the struct return parameter.
2451   if (IRFunctionArgs.hasSRetArg()) {
2452     auto AI = Fn->getArg(IRFunctionArgs.getSRetArgNo());
2453     AI->setName("agg.result");
2454     AI->addAttr(llvm::Attribute::NoAlias);
2455   }
2456 
2457   // Track if we received the parameter as a pointer (indirect, byval, or
2458   // inalloca).  If already have a pointer, EmitParmDecl doesn't need to copy it
2459   // into a local alloca for us.
2460   SmallVector<ParamValue, 16> ArgVals;
2461   ArgVals.reserve(Args.size());
2462 
2463   // Create a pointer value for every parameter declaration.  This usually
2464   // entails copying one or more LLVM IR arguments into an alloca.  Don't push
2465   // any cleanups or do anything that might unwind.  We do that separately, so
2466   // we can push the cleanups in the correct order for the ABI.
2467   assert(FI.arg_size() == Args.size() &&
2468          "Mismatch between function signature & arguments.");
2469   unsigned ArgNo = 0;
2470   CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
2471   for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
2472        i != e; ++i, ++info_it, ++ArgNo) {
2473     const VarDecl *Arg = *i;
2474     const ABIArgInfo &ArgI = info_it->info;
2475 
2476     bool isPromoted =
2477       isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
2478     // We are converting from ABIArgInfo type to VarDecl type directly, unless
2479     // the parameter is promoted. In this case we convert to
2480     // CGFunctionInfo::ArgInfo type with subsequent argument demotion.
2481     QualType Ty = isPromoted ? info_it->type : Arg->getType();
2482     assert(hasScalarEvaluationKind(Ty) ==
2483            hasScalarEvaluationKind(Arg->getType()));
2484 
2485     unsigned FirstIRArg, NumIRArgs;
2486     std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2487 
2488     switch (ArgI.getKind()) {
2489     case ABIArgInfo::InAlloca: {
2490       assert(NumIRArgs == 0);
2491       auto FieldIndex = ArgI.getInAllocaFieldIndex();
2492       Address V =
2493           Builder.CreateStructGEP(ArgStruct, FieldIndex, Arg->getName());
2494       if (ArgI.getInAllocaIndirect())
2495         V = Address(Builder.CreateLoad(V),
2496                     getContext().getTypeAlignInChars(Ty));
2497       ArgVals.push_back(ParamValue::forIndirect(V));
2498       break;
2499     }
2500 
2501     case ABIArgInfo::Indirect:
2502     case ABIArgInfo::IndirectAliased: {
2503       assert(NumIRArgs == 1);
2504       Address ParamAddr =
2505           Address(Fn->getArg(FirstIRArg), ArgI.getIndirectAlign());
2506 
2507       if (!hasScalarEvaluationKind(Ty)) {
2508         // Aggregates and complex variables are accessed by reference. All we
2509         // need to do is realign the value, if requested. Also, if the address
2510         // may be aliased, copy it to ensure that the parameter variable is
2511         // mutable and has a unique adress, as C requires.
2512         Address V = ParamAddr;
2513         if (ArgI.getIndirectRealign() || ArgI.isIndirectAliased()) {
2514           Address AlignedTemp = CreateMemTemp(Ty, "coerce");
2515 
2516           // Copy from the incoming argument pointer to the temporary with the
2517           // appropriate alignment.
2518           //
2519           // FIXME: We should have a common utility for generating an aggregate
2520           // copy.
2521           CharUnits Size = getContext().getTypeSizeInChars(Ty);
2522           Builder.CreateMemCpy(
2523               AlignedTemp.getPointer(), AlignedTemp.getAlignment().getAsAlign(),
2524               ParamAddr.getPointer(), ParamAddr.getAlignment().getAsAlign(),
2525               llvm::ConstantInt::get(IntPtrTy, Size.getQuantity()));
2526           V = AlignedTemp;
2527         }
2528         ArgVals.push_back(ParamValue::forIndirect(V));
2529       } else {
2530         // Load scalar value from indirect argument.
2531         llvm::Value *V =
2532             EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getBeginLoc());
2533 
2534         if (isPromoted)
2535           V = emitArgumentDemotion(*this, Arg, V);
2536         ArgVals.push_back(ParamValue::forDirect(V));
2537       }
2538       break;
2539     }
2540 
2541     case ABIArgInfo::Extend:
2542     case ABIArgInfo::Direct: {
2543       auto AI = Fn->getArg(FirstIRArg);
2544       llvm::Type *LTy = ConvertType(Arg->getType());
2545 
2546       // Prepare parameter attributes. So far, only attributes for pointer
2547       // parameters are prepared. See
2548       // http://llvm.org/docs/LangRef.html#paramattrs.
2549       if (ArgI.getDirectOffset() == 0 && LTy->isPointerTy() &&
2550           ArgI.getCoerceToType()->isPointerTy()) {
2551         assert(NumIRArgs == 1);
2552 
2553         if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
2554           // Set `nonnull` attribute if any.
2555           if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
2556                              PVD->getFunctionScopeIndex()) &&
2557               !CGM.getCodeGenOpts().NullPointerIsValid)
2558             AI->addAttr(llvm::Attribute::NonNull);
2559 
2560           QualType OTy = PVD->getOriginalType();
2561           if (const auto *ArrTy =
2562               getContext().getAsConstantArrayType(OTy)) {
2563             // A C99 array parameter declaration with the static keyword also
2564             // indicates dereferenceability, and if the size is constant we can
2565             // use the dereferenceable attribute (which requires the size in
2566             // bytes).
2567             if (ArrTy->getSizeModifier() == ArrayType::Static) {
2568               QualType ETy = ArrTy->getElementType();
2569               llvm::Align Alignment =
2570                   CGM.getNaturalTypeAlignment(ETy).getAsAlign();
2571               AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment));
2572               uint64_t ArrSize = ArrTy->getSize().getZExtValue();
2573               if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
2574                   ArrSize) {
2575                 llvm::AttrBuilder Attrs;
2576                 Attrs.addDereferenceableAttr(
2577                     getContext().getTypeSizeInChars(ETy).getQuantity() *
2578                     ArrSize);
2579                 AI->addAttrs(Attrs);
2580               } else if (getContext().getTargetInfo().getNullPointerValue(
2581                              ETy.getAddressSpace()) == 0 &&
2582                          !CGM.getCodeGenOpts().NullPointerIsValid) {
2583                 AI->addAttr(llvm::Attribute::NonNull);
2584               }
2585             }
2586           } else if (const auto *ArrTy =
2587                      getContext().getAsVariableArrayType(OTy)) {
2588             // For C99 VLAs with the static keyword, we don't know the size so
2589             // we can't use the dereferenceable attribute, but in addrspace(0)
2590             // we know that it must be nonnull.
2591             if (ArrTy->getSizeModifier() == VariableArrayType::Static) {
2592               QualType ETy = ArrTy->getElementType();
2593               llvm::Align Alignment =
2594                   CGM.getNaturalTypeAlignment(ETy).getAsAlign();
2595               AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment));
2596               if (!getContext().getTargetAddressSpace(ETy) &&
2597                   !CGM.getCodeGenOpts().NullPointerIsValid)
2598                 AI->addAttr(llvm::Attribute::NonNull);
2599             }
2600           }
2601 
2602           // Set `align` attribute if any.
2603           const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
2604           if (!AVAttr)
2605             if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
2606               AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
2607           if (AVAttr && !SanOpts.has(SanitizerKind::Alignment)) {
2608             // If alignment-assumption sanitizer is enabled, we do *not* add
2609             // alignment attribute here, but emit normal alignment assumption,
2610             // so the UBSAN check could function.
2611             llvm::ConstantInt *AlignmentCI =
2612                 cast<llvm::ConstantInt>(EmitScalarExpr(AVAttr->getAlignment()));
2613             unsigned AlignmentInt =
2614                 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment);
2615             if (AI->getParamAlign().valueOrOne() < AlignmentInt) {
2616               AI->removeAttr(llvm::Attribute::AttrKind::Alignment);
2617               AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(
2618                   llvm::Align(AlignmentInt)));
2619             }
2620           }
2621         }
2622 
2623         // Set 'noalias' if an argument type has the `restrict` qualifier.
2624         if (Arg->getType().isRestrictQualified())
2625           AI->addAttr(llvm::Attribute::NoAlias);
2626       }
2627 
2628       // Prepare the argument value. If we have the trivial case, handle it
2629       // with no muss and fuss.
2630       if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
2631           ArgI.getCoerceToType() == ConvertType(Ty) &&
2632           ArgI.getDirectOffset() == 0) {
2633         assert(NumIRArgs == 1);
2634 
2635         // LLVM expects swifterror parameters to be used in very restricted
2636         // ways.  Copy the value into a less-restricted temporary.
2637         llvm::Value *V = AI;
2638         if (FI.getExtParameterInfo(ArgNo).getABI()
2639               == ParameterABI::SwiftErrorResult) {
2640           QualType pointeeTy = Ty->getPointeeType();
2641           assert(pointeeTy->isPointerType());
2642           Address temp =
2643             CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
2644           Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy));
2645           llvm::Value *incomingErrorValue = Builder.CreateLoad(arg);
2646           Builder.CreateStore(incomingErrorValue, temp);
2647           V = temp.getPointer();
2648 
2649           // Push a cleanup to copy the value back at the end of the function.
2650           // The convention does not guarantee that the value will be written
2651           // back if the function exits with an unwind exception.
2652           EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg);
2653         }
2654 
2655         // Ensure the argument is the correct type.
2656         if (V->getType() != ArgI.getCoerceToType())
2657           V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
2658 
2659         if (isPromoted)
2660           V = emitArgumentDemotion(*this, Arg, V);
2661 
2662         // Because of merging of function types from multiple decls it is
2663         // possible for the type of an argument to not match the corresponding
2664         // type in the function type. Since we are codegening the callee
2665         // in here, add a cast to the argument type.
2666         llvm::Type *LTy = ConvertType(Arg->getType());
2667         if (V->getType() != LTy)
2668           V = Builder.CreateBitCast(V, LTy);
2669 
2670         ArgVals.push_back(ParamValue::forDirect(V));
2671         break;
2672       }
2673 
2674       Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
2675                                      Arg->getName());
2676 
2677       // Pointer to store into.
2678       Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
2679 
2680       // Fast-isel and the optimizer generally like scalar values better than
2681       // FCAs, so we flatten them if this is safe to do for this argument.
2682       llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
2683       if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
2684           STy->getNumElements() > 1) {
2685         uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
2686         llvm::Type *DstTy = Ptr.getElementType();
2687         uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
2688 
2689         Address AddrToStoreInto = Address::invalid();
2690         if (SrcSize <= DstSize) {
2691           AddrToStoreInto = Builder.CreateElementBitCast(Ptr, STy);
2692         } else {
2693           AddrToStoreInto =
2694             CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
2695         }
2696 
2697         assert(STy->getNumElements() == NumIRArgs);
2698         for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2699           auto AI = Fn->getArg(FirstIRArg + i);
2700           AI->setName(Arg->getName() + ".coerce" + Twine(i));
2701           Address EltPtr = Builder.CreateStructGEP(AddrToStoreInto, i);
2702           Builder.CreateStore(AI, EltPtr);
2703         }
2704 
2705         if (SrcSize > DstSize) {
2706           Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
2707         }
2708 
2709       } else {
2710         // Simple case, just do a coerced store of the argument into the alloca.
2711         assert(NumIRArgs == 1);
2712         auto AI = Fn->getArg(FirstIRArg);
2713         AI->setName(Arg->getName() + ".coerce");
2714         CreateCoercedStore(AI, Ptr, /*DstIsVolatile=*/false, *this);
2715       }
2716 
2717       // Match to what EmitParmDecl is expecting for this type.
2718       if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
2719         llvm::Value *V =
2720             EmitLoadOfScalar(Alloca, false, Ty, Arg->getBeginLoc());
2721         if (isPromoted)
2722           V = emitArgumentDemotion(*this, Arg, V);
2723         ArgVals.push_back(ParamValue::forDirect(V));
2724       } else {
2725         ArgVals.push_back(ParamValue::forIndirect(Alloca));
2726       }
2727       break;
2728     }
2729 
2730     case ABIArgInfo::CoerceAndExpand: {
2731       // Reconstruct into a temporary.
2732       Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2733       ArgVals.push_back(ParamValue::forIndirect(alloca));
2734 
2735       auto coercionType = ArgI.getCoerceAndExpandType();
2736       alloca = Builder.CreateElementBitCast(alloca, coercionType);
2737 
2738       unsigned argIndex = FirstIRArg;
2739       for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2740         llvm::Type *eltType = coercionType->getElementType(i);
2741         if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType))
2742           continue;
2743 
2744         auto eltAddr = Builder.CreateStructGEP(alloca, i);
2745         auto elt = Fn->getArg(argIndex++);
2746         Builder.CreateStore(elt, eltAddr);
2747       }
2748       assert(argIndex == FirstIRArg + NumIRArgs);
2749       break;
2750     }
2751 
2752     case ABIArgInfo::Expand: {
2753       // If this structure was expanded into multiple arguments then
2754       // we need to create a temporary and reconstruct it from the
2755       // arguments.
2756       Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2757       LValue LV = MakeAddrLValue(Alloca, Ty);
2758       ArgVals.push_back(ParamValue::forIndirect(Alloca));
2759 
2760       auto FnArgIter = Fn->arg_begin() + FirstIRArg;
2761       ExpandTypeFromArgs(Ty, LV, FnArgIter);
2762       assert(FnArgIter == Fn->arg_begin() + FirstIRArg + NumIRArgs);
2763       for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
2764         auto AI = Fn->getArg(FirstIRArg + i);
2765         AI->setName(Arg->getName() + "." + Twine(i));
2766       }
2767       break;
2768     }
2769 
2770     case ABIArgInfo::Ignore:
2771       assert(NumIRArgs == 0);
2772       // Initialize the local variable appropriately.
2773       if (!hasScalarEvaluationKind(Ty)) {
2774         ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty)));
2775       } else {
2776         llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
2777         ArgVals.push_back(ParamValue::forDirect(U));
2778       }
2779       break;
2780     }
2781   }
2782 
2783   if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2784     for (int I = Args.size() - 1; I >= 0; --I)
2785       EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2786   } else {
2787     for (unsigned I = 0, E = Args.size(); I != E; ++I)
2788       EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2789   }
2790 }
2791 
2792 static void eraseUnusedBitCasts(llvm::Instruction *insn) {
2793   while (insn->use_empty()) {
2794     llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
2795     if (!bitcast) return;
2796 
2797     // This is "safe" because we would have used a ConstantExpr otherwise.
2798     insn = cast<llvm::Instruction>(bitcast->getOperand(0));
2799     bitcast->eraseFromParent();
2800   }
2801 }
2802 
2803 /// Try to emit a fused autorelease of a return result.
2804 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
2805                                                     llvm::Value *result) {
2806   // We must be immediately followed the cast.
2807   llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
2808   if (BB->empty()) return nullptr;
2809   if (&BB->back() != result) return nullptr;
2810 
2811   llvm::Type *resultType = result->getType();
2812 
2813   // result is in a BasicBlock and is therefore an Instruction.
2814   llvm::Instruction *generator = cast<llvm::Instruction>(result);
2815 
2816   SmallVector<llvm::Instruction *, 4> InstsToKill;
2817 
2818   // Look for:
2819   //  %generator = bitcast %type1* %generator2 to %type2*
2820   while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
2821     // We would have emitted this as a constant if the operand weren't
2822     // an Instruction.
2823     generator = cast<llvm::Instruction>(bitcast->getOperand(0));
2824 
2825     // Require the generator to be immediately followed by the cast.
2826     if (generator->getNextNode() != bitcast)
2827       return nullptr;
2828 
2829     InstsToKill.push_back(bitcast);
2830   }
2831 
2832   // Look for:
2833   //   %generator = call i8* @objc_retain(i8* %originalResult)
2834   // or
2835   //   %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
2836   llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
2837   if (!call) return nullptr;
2838 
2839   bool doRetainAutorelease;
2840 
2841   if (call->getCalledOperand() == CGF.CGM.getObjCEntrypoints().objc_retain) {
2842     doRetainAutorelease = true;
2843   } else if (call->getCalledOperand() ==
2844              CGF.CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue) {
2845     doRetainAutorelease = false;
2846 
2847     // If we emitted an assembly marker for this call (and the
2848     // ARCEntrypoints field should have been set if so), go looking
2849     // for that call.  If we can't find it, we can't do this
2850     // optimization.  But it should always be the immediately previous
2851     // instruction, unless we needed bitcasts around the call.
2852     if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) {
2853       llvm::Instruction *prev = call->getPrevNode();
2854       assert(prev);
2855       if (isa<llvm::BitCastInst>(prev)) {
2856         prev = prev->getPrevNode();
2857         assert(prev);
2858       }
2859       assert(isa<llvm::CallInst>(prev));
2860       assert(cast<llvm::CallInst>(prev)->getCalledOperand() ==
2861              CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker);
2862       InstsToKill.push_back(prev);
2863     }
2864   } else {
2865     return nullptr;
2866   }
2867 
2868   result = call->getArgOperand(0);
2869   InstsToKill.push_back(call);
2870 
2871   // Keep killing bitcasts, for sanity.  Note that we no longer care
2872   // about precise ordering as long as there's exactly one use.
2873   while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
2874     if (!bitcast->hasOneUse()) break;
2875     InstsToKill.push_back(bitcast);
2876     result = bitcast->getOperand(0);
2877   }
2878 
2879   // Delete all the unnecessary instructions, from latest to earliest.
2880   for (auto *I : InstsToKill)
2881     I->eraseFromParent();
2882 
2883   // Do the fused retain/autorelease if we were asked to.
2884   if (doRetainAutorelease)
2885     result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
2886 
2887   // Cast back to the result type.
2888   return CGF.Builder.CreateBitCast(result, resultType);
2889 }
2890 
2891 /// If this is a +1 of the value of an immutable 'self', remove it.
2892 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
2893                                           llvm::Value *result) {
2894   // This is only applicable to a method with an immutable 'self'.
2895   const ObjCMethodDecl *method =
2896     dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
2897   if (!method) return nullptr;
2898   const VarDecl *self = method->getSelfDecl();
2899   if (!self->getType().isConstQualified()) return nullptr;
2900 
2901   // Look for a retain call.
2902   llvm::CallInst *retainCall =
2903     dyn_cast<llvm::CallInst>(result->stripPointerCasts());
2904   if (!retainCall || retainCall->getCalledOperand() !=
2905                          CGF.CGM.getObjCEntrypoints().objc_retain)
2906     return nullptr;
2907 
2908   // Look for an ordinary load of 'self'.
2909   llvm::Value *retainedValue = retainCall->getArgOperand(0);
2910   llvm::LoadInst *load =
2911     dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
2912   if (!load || load->isAtomic() || load->isVolatile() ||
2913       load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
2914     return nullptr;
2915 
2916   // Okay!  Burn it all down.  This relies for correctness on the
2917   // assumption that the retain is emitted as part of the return and
2918   // that thereafter everything is used "linearly".
2919   llvm::Type *resultType = result->getType();
2920   eraseUnusedBitCasts(cast<llvm::Instruction>(result));
2921   assert(retainCall->use_empty());
2922   retainCall->eraseFromParent();
2923   eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
2924 
2925   return CGF.Builder.CreateBitCast(load, resultType);
2926 }
2927 
2928 /// Emit an ARC autorelease of the result of a function.
2929 ///
2930 /// \return the value to actually return from the function
2931 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
2932                                             llvm::Value *result) {
2933   // If we're returning 'self', kill the initial retain.  This is a
2934   // heuristic attempt to "encourage correctness" in the really unfortunate
2935   // case where we have a return of self during a dealloc and we desperately
2936   // need to avoid the possible autorelease.
2937   if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
2938     return self;
2939 
2940   // At -O0, try to emit a fused retain/autorelease.
2941   if (CGF.shouldUseFusedARCCalls())
2942     if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
2943       return fused;
2944 
2945   return CGF.EmitARCAutoreleaseReturnValue(result);
2946 }
2947 
2948 /// Heuristically search for a dominating store to the return-value slot.
2949 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
2950   // Check if a User is a store which pointerOperand is the ReturnValue.
2951   // We are looking for stores to the ReturnValue, not for stores of the
2952   // ReturnValue to some other location.
2953   auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
2954     auto *SI = dyn_cast<llvm::StoreInst>(U);
2955     if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer())
2956       return nullptr;
2957     // These aren't actually possible for non-coerced returns, and we
2958     // only care about non-coerced returns on this code path.
2959     assert(!SI->isAtomic() && !SI->isVolatile());
2960     return SI;
2961   };
2962   // If there are multiple uses of the return-value slot, just check
2963   // for something immediately preceding the IP.  Sometimes this can
2964   // happen with how we generate implicit-returns; it can also happen
2965   // with noreturn cleanups.
2966   if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
2967     llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2968     if (IP->empty()) return nullptr;
2969     llvm::Instruction *I = &IP->back();
2970 
2971     // Skip lifetime markers
2972     for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(),
2973                                             IE = IP->rend();
2974          II != IE; ++II) {
2975       if (llvm::IntrinsicInst *Intrinsic =
2976               dyn_cast<llvm::IntrinsicInst>(&*II)) {
2977         if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) {
2978           const llvm::Value *CastAddr = Intrinsic->getArgOperand(1);
2979           ++II;
2980           if (II == IE)
2981             break;
2982           if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II))
2983             continue;
2984         }
2985       }
2986       I = &*II;
2987       break;
2988     }
2989 
2990     return GetStoreIfValid(I);
2991   }
2992 
2993   llvm::StoreInst *store =
2994       GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
2995   if (!store) return nullptr;
2996 
2997   // Now do a first-and-dirty dominance check: just walk up the
2998   // single-predecessors chain from the current insertion point.
2999   llvm::BasicBlock *StoreBB = store->getParent();
3000   llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
3001   while (IP != StoreBB) {
3002     if (!(IP = IP->getSinglePredecessor()))
3003       return nullptr;
3004   }
3005 
3006   // Okay, the store's basic block dominates the insertion point; we
3007   // can do our thing.
3008   return store;
3009 }
3010 
3011 // Helper functions for EmitCMSEClearRecord
3012 
3013 // Set the bits corresponding to a field having width `BitWidth` and located at
3014 // offset `BitOffset` (from the least significant bit) within a storage unit of
3015 // `Bits.size()` bytes. Each element of `Bits` corresponds to one target byte.
3016 // Use little-endian layout, i.e.`Bits[0]` is the LSB.
3017 static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int BitOffset,
3018                         int BitWidth, int CharWidth) {
3019   assert(CharWidth <= 64);
3020   assert(static_cast<unsigned>(BitWidth) <= Bits.size() * CharWidth);
3021 
3022   int Pos = 0;
3023   if (BitOffset >= CharWidth) {
3024     Pos += BitOffset / CharWidth;
3025     BitOffset = BitOffset % CharWidth;
3026   }
3027 
3028   const uint64_t Used = (uint64_t(1) << CharWidth) - 1;
3029   if (BitOffset + BitWidth >= CharWidth) {
3030     Bits[Pos++] |= (Used << BitOffset) & Used;
3031     BitWidth -= CharWidth - BitOffset;
3032     BitOffset = 0;
3033   }
3034 
3035   while (BitWidth >= CharWidth) {
3036     Bits[Pos++] = Used;
3037     BitWidth -= CharWidth;
3038   }
3039 
3040   if (BitWidth > 0)
3041     Bits[Pos++] |= (Used >> (CharWidth - BitWidth)) << BitOffset;
3042 }
3043 
3044 // Set the bits corresponding to a field having width `BitWidth` and located at
3045 // offset `BitOffset` (from the least significant bit) within a storage unit of
3046 // `StorageSize` bytes, located at `StorageOffset` in `Bits`. Each element of
3047 // `Bits` corresponds to one target byte. Use target endian layout.
3048 static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int StorageOffset,
3049                         int StorageSize, int BitOffset, int BitWidth,
3050                         int CharWidth, bool BigEndian) {
3051 
3052   SmallVector<uint64_t, 8> TmpBits(StorageSize);
3053   setBitRange(TmpBits, BitOffset, BitWidth, CharWidth);
3054 
3055   if (BigEndian)
3056     std::reverse(TmpBits.begin(), TmpBits.end());
3057 
3058   for (uint64_t V : TmpBits)
3059     Bits[StorageOffset++] |= V;
3060 }
3061 
3062 static void setUsedBits(CodeGenModule &, QualType, int,
3063                         SmallVectorImpl<uint64_t> &);
3064 
3065 // Set the bits in `Bits`, which correspond to the value representations of
3066 // the actual members of the record type `RTy`. Note that this function does
3067 // not handle base classes, virtual tables, etc, since they cannot happen in
3068 // CMSE function arguments or return. The bit mask corresponds to the target
3069 // memory layout, i.e. it's endian dependent.
3070 static void setUsedBits(CodeGenModule &CGM, const RecordType *RTy, int Offset,
3071                         SmallVectorImpl<uint64_t> &Bits) {
3072   ASTContext &Context = CGM.getContext();
3073   int CharWidth = Context.getCharWidth();
3074   const RecordDecl *RD = RTy->getDecl()->getDefinition();
3075   const ASTRecordLayout &ASTLayout = Context.getASTRecordLayout(RD);
3076   const CGRecordLayout &Layout = CGM.getTypes().getCGRecordLayout(RD);
3077 
3078   int Idx = 0;
3079   for (auto I = RD->field_begin(), E = RD->field_end(); I != E; ++I, ++Idx) {
3080     const FieldDecl *F = *I;
3081 
3082     if (F->isUnnamedBitfield() || F->isZeroLengthBitField(Context) ||
3083         F->getType()->isIncompleteArrayType())
3084       continue;
3085 
3086     if (F->isBitField()) {
3087       const CGBitFieldInfo &BFI = Layout.getBitFieldInfo(F);
3088       setBitRange(Bits, Offset + BFI.StorageOffset.getQuantity(),
3089                   BFI.StorageSize / CharWidth, BFI.Offset,
3090                   BFI.Size, CharWidth,
3091                   CGM.getDataLayout().isBigEndian());
3092       continue;
3093     }
3094 
3095     setUsedBits(CGM, F->getType(),
3096                 Offset + ASTLayout.getFieldOffset(Idx) / CharWidth, Bits);
3097   }
3098 }
3099 
3100 // Set the bits in `Bits`, which correspond to the value representations of
3101 // the elements of an array type `ATy`.
3102 static void setUsedBits(CodeGenModule &CGM, const ConstantArrayType *ATy,
3103                         int Offset, SmallVectorImpl<uint64_t> &Bits) {
3104   const ASTContext &Context = CGM.getContext();
3105 
3106   QualType ETy = Context.getBaseElementType(ATy);
3107   int Size = Context.getTypeSizeInChars(ETy).getQuantity();
3108   SmallVector<uint64_t, 4> TmpBits(Size);
3109   setUsedBits(CGM, ETy, 0, TmpBits);
3110 
3111   for (int I = 0, N = Context.getConstantArrayElementCount(ATy); I < N; ++I) {
3112     auto Src = TmpBits.begin();
3113     auto Dst = Bits.begin() + Offset + I * Size;
3114     for (int J = 0; J < Size; ++J)
3115       *Dst++ |= *Src++;
3116   }
3117 }
3118 
3119 // Set the bits in `Bits`, which correspond to the value representations of
3120 // the type `QTy`.
3121 static void setUsedBits(CodeGenModule &CGM, QualType QTy, int Offset,
3122                         SmallVectorImpl<uint64_t> &Bits) {
3123   if (const auto *RTy = QTy->getAs<RecordType>())
3124     return setUsedBits(CGM, RTy, Offset, Bits);
3125 
3126   ASTContext &Context = CGM.getContext();
3127   if (const auto *ATy = Context.getAsConstantArrayType(QTy))
3128     return setUsedBits(CGM, ATy, Offset, Bits);
3129 
3130   int Size = Context.getTypeSizeInChars(QTy).getQuantity();
3131   if (Size <= 0)
3132     return;
3133 
3134   std::fill_n(Bits.begin() + Offset, Size,
3135               (uint64_t(1) << Context.getCharWidth()) - 1);
3136 }
3137 
3138 static uint64_t buildMultiCharMask(const SmallVectorImpl<uint64_t> &Bits,
3139                                    int Pos, int Size, int CharWidth,
3140                                    bool BigEndian) {
3141   assert(Size > 0);
3142   uint64_t Mask = 0;
3143   if (BigEndian) {
3144     for (auto P = Bits.begin() + Pos, E = Bits.begin() + Pos + Size; P != E;
3145          ++P)
3146       Mask = (Mask << CharWidth) | *P;
3147   } else {
3148     auto P = Bits.begin() + Pos + Size, End = Bits.begin() + Pos;
3149     do
3150       Mask = (Mask << CharWidth) | *--P;
3151     while (P != End);
3152   }
3153   return Mask;
3154 }
3155 
3156 // Emit code to clear the bits in a record, which aren't a part of any user
3157 // declared member, when the record is a function return.
3158 llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src,
3159                                                   llvm::IntegerType *ITy,
3160                                                   QualType QTy) {
3161   assert(Src->getType() == ITy);
3162   assert(ITy->getScalarSizeInBits() <= 64);
3163 
3164   const llvm::DataLayout &DataLayout = CGM.getDataLayout();
3165   int Size = DataLayout.getTypeStoreSize(ITy);
3166   SmallVector<uint64_t, 4> Bits(Size);
3167   setUsedBits(CGM, QTy->castAs<RecordType>(), 0, Bits);
3168 
3169   int CharWidth = CGM.getContext().getCharWidth();
3170   uint64_t Mask =
3171       buildMultiCharMask(Bits, 0, Size, CharWidth, DataLayout.isBigEndian());
3172 
3173   return Builder.CreateAnd(Src, Mask, "cmse.clear");
3174 }
3175 
3176 // Emit code to clear the bits in a record, which aren't a part of any user
3177 // declared member, when the record is a function argument.
3178 llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src,
3179                                                   llvm::ArrayType *ATy,
3180                                                   QualType QTy) {
3181   const llvm::DataLayout &DataLayout = CGM.getDataLayout();
3182   int Size = DataLayout.getTypeStoreSize(ATy);
3183   SmallVector<uint64_t, 16> Bits(Size);
3184   setUsedBits(CGM, QTy->castAs<RecordType>(), 0, Bits);
3185 
3186   // Clear each element of the LLVM array.
3187   int CharWidth = CGM.getContext().getCharWidth();
3188   int CharsPerElt =
3189       ATy->getArrayElementType()->getScalarSizeInBits() / CharWidth;
3190   int MaskIndex = 0;
3191   llvm::Value *R = llvm::UndefValue::get(ATy);
3192   for (int I = 0, N = ATy->getArrayNumElements(); I != N; ++I) {
3193     uint64_t Mask = buildMultiCharMask(Bits, MaskIndex, CharsPerElt, CharWidth,
3194                                        DataLayout.isBigEndian());
3195     MaskIndex += CharsPerElt;
3196     llvm::Value *T0 = Builder.CreateExtractValue(Src, I);
3197     llvm::Value *T1 = Builder.CreateAnd(T0, Mask, "cmse.clear");
3198     R = Builder.CreateInsertValue(R, T1, I);
3199   }
3200 
3201   return R;
3202 }
3203 
3204 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
3205                                          bool EmitRetDbgLoc,
3206                                          SourceLocation EndLoc) {
3207   if (FI.isNoReturn()) {
3208     // Noreturn functions don't return.
3209     EmitUnreachable(EndLoc);
3210     return;
3211   }
3212 
3213   if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
3214     // Naked functions don't have epilogues.
3215     Builder.CreateUnreachable();
3216     return;
3217   }
3218 
3219   // Functions with no result always return void.
3220   if (!ReturnValue.isValid()) {
3221     Builder.CreateRetVoid();
3222     return;
3223   }
3224 
3225   llvm::DebugLoc RetDbgLoc;
3226   llvm::Value *RV = nullptr;
3227   QualType RetTy = FI.getReturnType();
3228   const ABIArgInfo &RetAI = FI.getReturnInfo();
3229 
3230   switch (RetAI.getKind()) {
3231   case ABIArgInfo::InAlloca:
3232     // Aggregrates get evaluated directly into the destination.  Sometimes we
3233     // need to return the sret value in a register, though.
3234     assert(hasAggregateEvaluationKind(RetTy));
3235     if (RetAI.getInAllocaSRet()) {
3236       llvm::Function::arg_iterator EI = CurFn->arg_end();
3237       --EI;
3238       llvm::Value *ArgStruct = &*EI;
3239       llvm::Value *SRet = Builder.CreateStructGEP(
3240           nullptr, ArgStruct, RetAI.getInAllocaFieldIndex());
3241       RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret");
3242     }
3243     break;
3244 
3245   case ABIArgInfo::Indirect: {
3246     auto AI = CurFn->arg_begin();
3247     if (RetAI.isSRetAfterThis())
3248       ++AI;
3249     switch (getEvaluationKind(RetTy)) {
3250     case TEK_Complex: {
3251       ComplexPairTy RT =
3252         EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
3253       EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy),
3254                          /*isInit*/ true);
3255       break;
3256     }
3257     case TEK_Aggregate:
3258       // Do nothing; aggregrates get evaluated directly into the destination.
3259       break;
3260     case TEK_Scalar:
3261       EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
3262                         MakeNaturalAlignAddrLValue(&*AI, RetTy),
3263                         /*isInit*/ true);
3264       break;
3265     }
3266     break;
3267   }
3268 
3269   case ABIArgInfo::Extend:
3270   case ABIArgInfo::Direct:
3271     if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
3272         RetAI.getDirectOffset() == 0) {
3273       // The internal return value temp always will have pointer-to-return-type
3274       // type, just do a load.
3275 
3276       // If there is a dominating store to ReturnValue, we can elide
3277       // the load, zap the store, and usually zap the alloca.
3278       if (llvm::StoreInst *SI =
3279               findDominatingStoreToReturnValue(*this)) {
3280         // Reuse the debug location from the store unless there is
3281         // cleanup code to be emitted between the store and return
3282         // instruction.
3283         if (EmitRetDbgLoc && !AutoreleaseResult)
3284           RetDbgLoc = SI->getDebugLoc();
3285         // Get the stored value and nuke the now-dead store.
3286         RV = SI->getValueOperand();
3287         SI->eraseFromParent();
3288 
3289       // Otherwise, we have to do a simple load.
3290       } else {
3291         RV = Builder.CreateLoad(ReturnValue);
3292       }
3293     } else {
3294       // If the value is offset in memory, apply the offset now.
3295       Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
3296 
3297       RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
3298     }
3299 
3300     // In ARC, end functions that return a retainable type with a call
3301     // to objc_autoreleaseReturnValue.
3302     if (AutoreleaseResult) {
3303 #ifndef NDEBUG
3304       // Type::isObjCRetainabletype has to be called on a QualType that hasn't
3305       // been stripped of the typedefs, so we cannot use RetTy here. Get the
3306       // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from
3307       // CurCodeDecl or BlockInfo.
3308       QualType RT;
3309 
3310       if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl))
3311         RT = FD->getReturnType();
3312       else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl))
3313         RT = MD->getReturnType();
3314       else if (isa<BlockDecl>(CurCodeDecl))
3315         RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType();
3316       else
3317         llvm_unreachable("Unexpected function/method type");
3318 
3319       assert(getLangOpts().ObjCAutoRefCount &&
3320              !FI.isReturnsRetained() &&
3321              RT->isObjCRetainableType());
3322 #endif
3323       RV = emitAutoreleaseOfResult(*this, RV);
3324     }
3325 
3326     break;
3327 
3328   case ABIArgInfo::Ignore:
3329     break;
3330 
3331   case ABIArgInfo::CoerceAndExpand: {
3332     auto coercionType = RetAI.getCoerceAndExpandType();
3333 
3334     // Load all of the coerced elements out into results.
3335     llvm::SmallVector<llvm::Value*, 4> results;
3336     Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType);
3337     for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3338       auto coercedEltType = coercionType->getElementType(i);
3339       if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType))
3340         continue;
3341 
3342       auto eltAddr = Builder.CreateStructGEP(addr, i);
3343       auto elt = Builder.CreateLoad(eltAddr);
3344       results.push_back(elt);
3345     }
3346 
3347     // If we have one result, it's the single direct result type.
3348     if (results.size() == 1) {
3349       RV = results[0];
3350 
3351     // Otherwise, we need to make a first-class aggregate.
3352     } else {
3353       // Construct a return type that lacks padding elements.
3354       llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType();
3355 
3356       RV = llvm::UndefValue::get(returnType);
3357       for (unsigned i = 0, e = results.size(); i != e; ++i) {
3358         RV = Builder.CreateInsertValue(RV, results[i], i);
3359       }
3360     }
3361     break;
3362   }
3363   case ABIArgInfo::Expand:
3364   case ABIArgInfo::IndirectAliased:
3365     llvm_unreachable("Invalid ABI kind for return argument");
3366   }
3367 
3368   llvm::Instruction *Ret;
3369   if (RV) {
3370     if (CurFuncDecl && CurFuncDecl->hasAttr<CmseNSEntryAttr>()) {
3371       // For certain return types, clear padding bits, as they may reveal
3372       // sensitive information.
3373       // Small struct/union types are passed as integers.
3374       auto *ITy = dyn_cast<llvm::IntegerType>(RV->getType());
3375       if (ITy != nullptr && isa<RecordType>(RetTy.getCanonicalType()))
3376         RV = EmitCMSEClearRecord(RV, ITy, RetTy);
3377     }
3378     EmitReturnValueCheck(RV);
3379     Ret = Builder.CreateRet(RV);
3380   } else {
3381     Ret = Builder.CreateRetVoid();
3382   }
3383 
3384   if (RetDbgLoc)
3385     Ret->setDebugLoc(std::move(RetDbgLoc));
3386 }
3387 
3388 void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV) {
3389   // A current decl may not be available when emitting vtable thunks.
3390   if (!CurCodeDecl)
3391     return;
3392 
3393   // If the return block isn't reachable, neither is this check, so don't emit
3394   // it.
3395   if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty())
3396     return;
3397 
3398   ReturnsNonNullAttr *RetNNAttr = nullptr;
3399   if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute))
3400     RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>();
3401 
3402   if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
3403     return;
3404 
3405   // Prefer the returns_nonnull attribute if it's present.
3406   SourceLocation AttrLoc;
3407   SanitizerMask CheckKind;
3408   SanitizerHandler Handler;
3409   if (RetNNAttr) {
3410     assert(!requiresReturnValueNullabilityCheck() &&
3411            "Cannot check nullability and the nonnull attribute");
3412     AttrLoc = RetNNAttr->getLocation();
3413     CheckKind = SanitizerKind::ReturnsNonnullAttribute;
3414     Handler = SanitizerHandler::NonnullReturn;
3415   } else {
3416     if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl))
3417       if (auto *TSI = DD->getTypeSourceInfo())
3418         if (auto FTL = TSI->getTypeLoc().getAsAdjusted<FunctionTypeLoc>())
3419           AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
3420     CheckKind = SanitizerKind::NullabilityReturn;
3421     Handler = SanitizerHandler::NullabilityReturn;
3422   }
3423 
3424   SanitizerScope SanScope(this);
3425 
3426   // Make sure the "return" source location is valid. If we're checking a
3427   // nullability annotation, make sure the preconditions for the check are met.
3428   llvm::BasicBlock *Check = createBasicBlock("nullcheck");
3429   llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck");
3430   llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load");
3431   llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr);
3432   if (requiresReturnValueNullabilityCheck())
3433     CanNullCheck =
3434         Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
3435   Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
3436   EmitBlock(Check);
3437 
3438   // Now do the null check.
3439   llvm::Value *Cond = Builder.CreateIsNotNull(RV);
3440   llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)};
3441   llvm::Value *DynamicData[] = {SLocPtr};
3442   EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData);
3443 
3444   EmitBlock(NoCheck);
3445 
3446 #ifndef NDEBUG
3447   // The return location should not be used after the check has been emitted.
3448   ReturnLocation = Address::invalid();
3449 #endif
3450 }
3451 
3452 static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) {
3453   const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
3454   return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
3455 }
3456 
3457 static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF,
3458                                           QualType Ty) {
3459   // FIXME: Generate IR in one pass, rather than going back and fixing up these
3460   // placeholders.
3461   llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
3462   llvm::Type *IRPtrTy = IRTy->getPointerTo();
3463   llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo());
3464 
3465   // FIXME: When we generate this IR in one pass, we shouldn't need
3466   // this win32-specific alignment hack.
3467   CharUnits Align = CharUnits::fromQuantity(4);
3468   Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align);
3469 
3470   return AggValueSlot::forAddr(Address(Placeholder, Align),
3471                                Ty.getQualifiers(),
3472                                AggValueSlot::IsNotDestructed,
3473                                AggValueSlot::DoesNotNeedGCBarriers,
3474                                AggValueSlot::IsNotAliased,
3475                                AggValueSlot::DoesNotOverlap);
3476 }
3477 
3478 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
3479                                           const VarDecl *param,
3480                                           SourceLocation loc) {
3481   // StartFunction converted the ABI-lowered parameter(s) into a
3482   // local alloca.  We need to turn that into an r-value suitable
3483   // for EmitCall.
3484   Address local = GetAddrOfLocalVar(param);
3485 
3486   QualType type = param->getType();
3487 
3488   if (isInAllocaArgument(CGM.getCXXABI(), type)) {
3489     CGM.ErrorUnsupported(param, "forwarded non-trivially copyable parameter");
3490   }
3491 
3492   // GetAddrOfLocalVar returns a pointer-to-pointer for references,
3493   // but the argument needs to be the original pointer.
3494   if (type->isReferenceType()) {
3495     args.add(RValue::get(Builder.CreateLoad(local)), type);
3496 
3497   // In ARC, move out of consumed arguments so that the release cleanup
3498   // entered by StartFunction doesn't cause an over-release.  This isn't
3499   // optimal -O0 code generation, but it should get cleaned up when
3500   // optimization is enabled.  This also assumes that delegate calls are
3501   // performed exactly once for a set of arguments, but that should be safe.
3502   } else if (getLangOpts().ObjCAutoRefCount &&
3503              param->hasAttr<NSConsumedAttr>() &&
3504              type->isObjCRetainableType()) {
3505     llvm::Value *ptr = Builder.CreateLoad(local);
3506     auto null =
3507       llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
3508     Builder.CreateStore(null, local);
3509     args.add(RValue::get(ptr), type);
3510 
3511   // For the most part, we just need to load the alloca, except that
3512   // aggregate r-values are actually pointers to temporaries.
3513   } else {
3514     args.add(convertTempToRValue(local, type, loc), type);
3515   }
3516 
3517   // Deactivate the cleanup for the callee-destructed param that was pushed.
3518   if (hasAggregateEvaluationKind(type) && !CurFuncIsThunk &&
3519       type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee() &&
3520       param->needsDestruction(getContext())) {
3521     EHScopeStack::stable_iterator cleanup =
3522         CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param));
3523     assert(cleanup.isValid() &&
3524            "cleanup for callee-destructed param not recorded");
3525     // This unreachable is a temporary marker which will be removed later.
3526     llvm::Instruction *isActive = Builder.CreateUnreachable();
3527     args.addArgCleanupDeactivation(cleanup, isActive);
3528   }
3529 }
3530 
3531 static bool isProvablyNull(llvm::Value *addr) {
3532   return isa<llvm::ConstantPointerNull>(addr);
3533 }
3534 
3535 /// Emit the actual writing-back of a writeback.
3536 static void emitWriteback(CodeGenFunction &CGF,
3537                           const CallArgList::Writeback &writeback) {
3538   const LValue &srcLV = writeback.Source;
3539   Address srcAddr = srcLV.getAddress(CGF);
3540   assert(!isProvablyNull(srcAddr.getPointer()) &&
3541          "shouldn't have writeback for provably null argument");
3542 
3543   llvm::BasicBlock *contBB = nullptr;
3544 
3545   // If the argument wasn't provably non-null, we need to null check
3546   // before doing the store.
3547   bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3548                                               CGF.CGM.getDataLayout());
3549   if (!provablyNonNull) {
3550     llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
3551     contBB = CGF.createBasicBlock("icr.done");
3552 
3553     llvm::Value *isNull =
3554       CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3555     CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
3556     CGF.EmitBlock(writebackBB);
3557   }
3558 
3559   // Load the value to writeback.
3560   llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
3561 
3562   // Cast it back, in case we're writing an id to a Foo* or something.
3563   value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(),
3564                                     "icr.writeback-cast");
3565 
3566   // Perform the writeback.
3567 
3568   // If we have a "to use" value, it's something we need to emit a use
3569   // of.  This has to be carefully threaded in: if it's done after the
3570   // release it's potentially undefined behavior (and the optimizer
3571   // will ignore it), and if it happens before the retain then the
3572   // optimizer could move the release there.
3573   if (writeback.ToUse) {
3574     assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
3575 
3576     // Retain the new value.  No need to block-copy here:  the block's
3577     // being passed up the stack.
3578     value = CGF.EmitARCRetainNonBlock(value);
3579 
3580     // Emit the intrinsic use here.
3581     CGF.EmitARCIntrinsicUse(writeback.ToUse);
3582 
3583     // Load the old value (primitively).
3584     llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
3585 
3586     // Put the new value in place (primitively).
3587     CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
3588 
3589     // Release the old value.
3590     CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
3591 
3592   // Otherwise, we can just do a normal lvalue store.
3593   } else {
3594     CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
3595   }
3596 
3597   // Jump to the continuation block.
3598   if (!provablyNonNull)
3599     CGF.EmitBlock(contBB);
3600 }
3601 
3602 static void emitWritebacks(CodeGenFunction &CGF,
3603                            const CallArgList &args) {
3604   for (const auto &I : args.writebacks())
3605     emitWriteback(CGF, I);
3606 }
3607 
3608 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
3609                                             const CallArgList &CallArgs) {
3610   ArrayRef<CallArgList::CallArgCleanup> Cleanups =
3611     CallArgs.getCleanupsToDeactivate();
3612   // Iterate in reverse to increase the likelihood of popping the cleanup.
3613   for (const auto &I : llvm::reverse(Cleanups)) {
3614     CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP);
3615     I.IsActiveIP->eraseFromParent();
3616   }
3617 }
3618 
3619 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
3620   if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
3621     if (uop->getOpcode() == UO_AddrOf)
3622       return uop->getSubExpr();
3623   return nullptr;
3624 }
3625 
3626 /// Emit an argument that's being passed call-by-writeback.  That is,
3627 /// we are passing the address of an __autoreleased temporary; it
3628 /// might be copy-initialized with the current value of the given
3629 /// address, but it will definitely be copied out of after the call.
3630 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
3631                              const ObjCIndirectCopyRestoreExpr *CRE) {
3632   LValue srcLV;
3633 
3634   // Make an optimistic effort to emit the address as an l-value.
3635   // This can fail if the argument expression is more complicated.
3636   if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
3637     srcLV = CGF.EmitLValue(lvExpr);
3638 
3639   // Otherwise, just emit it as a scalar.
3640   } else {
3641     Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
3642 
3643     QualType srcAddrType =
3644       CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
3645     srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
3646   }
3647   Address srcAddr = srcLV.getAddress(CGF);
3648 
3649   // The dest and src types don't necessarily match in LLVM terms
3650   // because of the crazy ObjC compatibility rules.
3651 
3652   llvm::PointerType *destType =
3653     cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
3654 
3655   // If the address is a constant null, just pass the appropriate null.
3656   if (isProvablyNull(srcAddr.getPointer())) {
3657     args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
3658              CRE->getType());
3659     return;
3660   }
3661 
3662   // Create the temporary.
3663   Address temp = CGF.CreateTempAlloca(destType->getElementType(),
3664                                       CGF.getPointerAlign(),
3665                                       "icr.temp");
3666   // Loading an l-value can introduce a cleanup if the l-value is __weak,
3667   // and that cleanup will be conditional if we can't prove that the l-value
3668   // isn't null, so we need to register a dominating point so that the cleanups
3669   // system will make valid IR.
3670   CodeGenFunction::ConditionalEvaluation condEval(CGF);
3671 
3672   // Zero-initialize it if we're not doing a copy-initialization.
3673   bool shouldCopy = CRE->shouldCopy();
3674   if (!shouldCopy) {
3675     llvm::Value *null =
3676       llvm::ConstantPointerNull::get(
3677         cast<llvm::PointerType>(destType->getElementType()));
3678     CGF.Builder.CreateStore(null, temp);
3679   }
3680 
3681   llvm::BasicBlock *contBB = nullptr;
3682   llvm::BasicBlock *originBB = nullptr;
3683 
3684   // If the address is *not* known to be non-null, we need to switch.
3685   llvm::Value *finalArgument;
3686 
3687   bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3688                                               CGF.CGM.getDataLayout());
3689   if (provablyNonNull) {
3690     finalArgument = temp.getPointer();
3691   } else {
3692     llvm::Value *isNull =
3693       CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3694 
3695     finalArgument = CGF.Builder.CreateSelect(isNull,
3696                                    llvm::ConstantPointerNull::get(destType),
3697                                              temp.getPointer(), "icr.argument");
3698 
3699     // If we need to copy, then the load has to be conditional, which
3700     // means we need control flow.
3701     if (shouldCopy) {
3702       originBB = CGF.Builder.GetInsertBlock();
3703       contBB = CGF.createBasicBlock("icr.cont");
3704       llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
3705       CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
3706       CGF.EmitBlock(copyBB);
3707       condEval.begin(CGF);
3708     }
3709   }
3710 
3711   llvm::Value *valueToUse = nullptr;
3712 
3713   // Perform a copy if necessary.
3714   if (shouldCopy) {
3715     RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
3716     assert(srcRV.isScalar());
3717 
3718     llvm::Value *src = srcRV.getScalarVal();
3719     src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
3720                                     "icr.cast");
3721 
3722     // Use an ordinary store, not a store-to-lvalue.
3723     CGF.Builder.CreateStore(src, temp);
3724 
3725     // If optimization is enabled, and the value was held in a
3726     // __strong variable, we need to tell the optimizer that this
3727     // value has to stay alive until we're doing the store back.
3728     // This is because the temporary is effectively unretained,
3729     // and so otherwise we can violate the high-level semantics.
3730     if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3731         srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) {
3732       valueToUse = src;
3733     }
3734   }
3735 
3736   // Finish the control flow if we needed it.
3737   if (shouldCopy && !provablyNonNull) {
3738     llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
3739     CGF.EmitBlock(contBB);
3740 
3741     // Make a phi for the value to intrinsically use.
3742     if (valueToUse) {
3743       llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
3744                                                       "icr.to-use");
3745       phiToUse->addIncoming(valueToUse, copyBB);
3746       phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
3747                             originBB);
3748       valueToUse = phiToUse;
3749     }
3750 
3751     condEval.end(CGF);
3752   }
3753 
3754   args.addWriteback(srcLV, temp, valueToUse);
3755   args.add(RValue::get(finalArgument), CRE->getType());
3756 }
3757 
3758 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
3759   assert(!StackBase);
3760 
3761   // Save the stack.
3762   llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
3763   StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
3764 }
3765 
3766 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
3767   if (StackBase) {
3768     // Restore the stack after the call.
3769     llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
3770     CGF.Builder.CreateCall(F, StackBase);
3771   }
3772 }
3773 
3774 void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
3775                                           SourceLocation ArgLoc,
3776                                           AbstractCallee AC,
3777                                           unsigned ParmNum) {
3778   if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) ||
3779                          SanOpts.has(SanitizerKind::NullabilityArg)))
3780     return;
3781 
3782   // The param decl may be missing in a variadic function.
3783   auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr;
3784   unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
3785 
3786   // Prefer the nonnull attribute if it's present.
3787   const NonNullAttr *NNAttr = nullptr;
3788   if (SanOpts.has(SanitizerKind::NonnullAttribute))
3789     NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo);
3790 
3791   bool CanCheckNullability = false;
3792   if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) {
3793     auto Nullability = PVD->getType()->getNullability(getContext());
3794     CanCheckNullability = Nullability &&
3795                           *Nullability == NullabilityKind::NonNull &&
3796                           PVD->getTypeSourceInfo();
3797   }
3798 
3799   if (!NNAttr && !CanCheckNullability)
3800     return;
3801 
3802   SourceLocation AttrLoc;
3803   SanitizerMask CheckKind;
3804   SanitizerHandler Handler;
3805   if (NNAttr) {
3806     AttrLoc = NNAttr->getLocation();
3807     CheckKind = SanitizerKind::NonnullAttribute;
3808     Handler = SanitizerHandler::NonnullArg;
3809   } else {
3810     AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
3811     CheckKind = SanitizerKind::NullabilityArg;
3812     Handler = SanitizerHandler::NullabilityArg;
3813   }
3814 
3815   SanitizerScope SanScope(this);
3816   llvm::Value *Cond = EmitNonNullRValueCheck(RV, ArgType);
3817   llvm::Constant *StaticData[] = {
3818       EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc),
3819       llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
3820   };
3821   EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None);
3822 }
3823 
3824 // Check if the call is going to use the inalloca convention. This needs to
3825 // agree with CGFunctionInfo::usesInAlloca. The CGFunctionInfo is arranged
3826 // later, so we can't check it directly.
3827 static bool hasInAllocaArgs(CodeGenModule &CGM, CallingConv ExplicitCC,
3828                             ArrayRef<QualType> ArgTypes) {
3829   // The Swift calling convention doesn't go through the target-specific
3830   // argument classification, so it never uses inalloca.
3831   // TODO: Consider limiting inalloca use to only calling conventions supported
3832   // by MSVC.
3833   if (ExplicitCC == CC_Swift)
3834     return false;
3835   if (!CGM.getTarget().getCXXABI().isMicrosoft())
3836     return false;
3837   return llvm::any_of(ArgTypes, [&](QualType Ty) {
3838     return isInAllocaArgument(CGM.getCXXABI(), Ty);
3839   });
3840 }
3841 
3842 #ifndef NDEBUG
3843 // Determine whether the given argument is an Objective-C method
3844 // that may have type parameters in its signature.
3845 static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method) {
3846   const DeclContext *dc = method->getDeclContext();
3847   if (const ObjCInterfaceDecl *classDecl = dyn_cast<ObjCInterfaceDecl>(dc)) {
3848     return classDecl->getTypeParamListAsWritten();
3849   }
3850 
3851   if (const ObjCCategoryDecl *catDecl = dyn_cast<ObjCCategoryDecl>(dc)) {
3852     return catDecl->getTypeParamList();
3853   }
3854 
3855   return false;
3856 }
3857 #endif
3858 
3859 /// EmitCallArgs - Emit call arguments for a function.
3860 void CodeGenFunction::EmitCallArgs(
3861     CallArgList &Args, PrototypeWrapper Prototype,
3862     llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
3863     AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) {
3864   SmallVector<QualType, 16> ArgTypes;
3865 
3866   assert((ParamsToSkip == 0 || Prototype.P) &&
3867          "Can't skip parameters if type info is not provided");
3868 
3869   // This variable only captures *explicitly* written conventions, not those
3870   // applied by default via command line flags or target defaults, such as
3871   // thiscall, aapcs, stdcall via -mrtd, etc. Computing that correctly would
3872   // require knowing if this is a C++ instance method or being able to see
3873   // unprototyped FunctionTypes.
3874   CallingConv ExplicitCC = CC_C;
3875 
3876   // First, if a prototype was provided, use those argument types.
3877   bool IsVariadic = false;
3878   if (Prototype.P) {
3879     const auto *MD = Prototype.P.dyn_cast<const ObjCMethodDecl *>();
3880     if (MD) {
3881       IsVariadic = MD->isVariadic();
3882       ExplicitCC = getCallingConventionForDecl(
3883           MD, CGM.getTarget().getTriple().isOSWindows());
3884       ArgTypes.assign(MD->param_type_begin() + ParamsToSkip,
3885                       MD->param_type_end());
3886     } else {
3887       const auto *FPT = Prototype.P.get<const FunctionProtoType *>();
3888       IsVariadic = FPT->isVariadic();
3889       ExplicitCC = FPT->getExtInfo().getCC();
3890       ArgTypes.assign(FPT->param_type_begin() + ParamsToSkip,
3891                       FPT->param_type_end());
3892     }
3893 
3894 #ifndef NDEBUG
3895     // Check that the prototyped types match the argument expression types.
3896     bool isGenericMethod = MD && isObjCMethodWithTypeParams(MD);
3897     CallExpr::const_arg_iterator Arg = ArgRange.begin();
3898     for (QualType Ty : ArgTypes) {
3899       assert(Arg != ArgRange.end() && "Running over edge of argument list!");
3900       assert(
3901           (isGenericMethod || Ty->isVariablyModifiedType() ||
3902            Ty.getNonReferenceType()->isObjCRetainableType() ||
3903            getContext()
3904                    .getCanonicalType(Ty.getNonReferenceType())
3905                    .getTypePtr() ==
3906                getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) &&
3907           "type mismatch in call argument!");
3908       ++Arg;
3909     }
3910 
3911     // Either we've emitted all the call args, or we have a call to variadic
3912     // function.
3913     assert((Arg == ArgRange.end() || IsVariadic) &&
3914            "Extra arguments in non-variadic function!");
3915 #endif
3916   }
3917 
3918   // If we still have any arguments, emit them using the type of the argument.
3919   for (auto *A : llvm::make_range(std::next(ArgRange.begin(), ArgTypes.size()),
3920                                   ArgRange.end()))
3921     ArgTypes.push_back(IsVariadic ? getVarArgType(A) : A->getType());
3922   assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
3923 
3924   // We must evaluate arguments from right to left in the MS C++ ABI,
3925   // because arguments are destroyed left to right in the callee. As a special
3926   // case, there are certain language constructs that require left-to-right
3927   // evaluation, and in those cases we consider the evaluation order requirement
3928   // to trump the "destruction order is reverse construction order" guarantee.
3929   bool LeftToRight =
3930       CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()
3931           ? Order == EvaluationOrder::ForceLeftToRight
3932           : Order != EvaluationOrder::ForceRightToLeft;
3933 
3934   auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg,
3935                                          RValue EmittedArg) {
3936     if (!AC.hasFunctionDecl() || I >= AC.getNumParams())
3937       return;
3938     auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>();
3939     if (PS == nullptr)
3940       return;
3941 
3942     const auto &Context = getContext();
3943     auto SizeTy = Context.getSizeType();
3944     auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
3945     assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?");
3946     llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T,
3947                                                      EmittedArg.getScalarVal(),
3948                                                      PS->isDynamic());
3949     Args.add(RValue::get(V), SizeTy);
3950     // If we're emitting args in reverse, be sure to do so with
3951     // pass_object_size, as well.
3952     if (!LeftToRight)
3953       std::swap(Args.back(), *(&Args.back() - 1));
3954   };
3955 
3956   // Insert a stack save if we're going to need any inalloca args.
3957   if (hasInAllocaArgs(CGM, ExplicitCC, ArgTypes)) {
3958     assert(getTarget().getTriple().getArch() == llvm::Triple::x86 &&
3959            "inalloca only supported on x86");
3960     Args.allocateArgumentMemory(*this);
3961   }
3962 
3963   // Evaluate each argument in the appropriate order.
3964   size_t CallArgsStart = Args.size();
3965   for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
3966     unsigned Idx = LeftToRight ? I : E - I - 1;
3967     CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx;
3968     unsigned InitialArgSize = Args.size();
3969     // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of
3970     // the argument and parameter match or the objc method is parameterized.
3971     assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) ||
3972             getContext().hasSameUnqualifiedType((*Arg)->getType(),
3973                                                 ArgTypes[Idx]) ||
3974             (isa<ObjCMethodDecl>(AC.getDecl()) &&
3975              isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) &&
3976            "Argument and parameter types don't match");
3977     EmitCallArg(Args, *Arg, ArgTypes[Idx]);
3978     // In particular, we depend on it being the last arg in Args, and the
3979     // objectsize bits depend on there only being one arg if !LeftToRight.
3980     assert(InitialArgSize + 1 == Args.size() &&
3981            "The code below depends on only adding one arg per EmitCallArg");
3982     (void)InitialArgSize;
3983     // Since pointer argument are never emitted as LValue, it is safe to emit
3984     // non-null argument check for r-value only.
3985     if (!Args.back().hasLValue()) {
3986       RValue RVArg = Args.back().getKnownRValue();
3987       EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC,
3988                           ParamsToSkip + Idx);
3989       // @llvm.objectsize should never have side-effects and shouldn't need
3990       // destruction/cleanups, so we can safely "emit" it after its arg,
3991       // regardless of right-to-leftness
3992       MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
3993     }
3994   }
3995 
3996   if (!LeftToRight) {
3997     // Un-reverse the arguments we just evaluated so they match up with the LLVM
3998     // IR function.
3999     std::reverse(Args.begin() + CallArgsStart, Args.end());
4000   }
4001 }
4002 
4003 namespace {
4004 
4005 struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
4006   DestroyUnpassedArg(Address Addr, QualType Ty)
4007       : Addr(Addr), Ty(Ty) {}
4008 
4009   Address Addr;
4010   QualType Ty;
4011 
4012   void Emit(CodeGenFunction &CGF, Flags flags) override {
4013     QualType::DestructionKind DtorKind = Ty.isDestructedType();
4014     if (DtorKind == QualType::DK_cxx_destructor) {
4015       const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
4016       assert(!Dtor->isTrivial());
4017       CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
4018                                 /*Delegating=*/false, Addr, Ty);
4019     } else {
4020       CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty));
4021     }
4022   }
4023 };
4024 
4025 struct DisableDebugLocationUpdates {
4026   CodeGenFunction &CGF;
4027   bool disabledDebugInfo;
4028   DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
4029     if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
4030       CGF.disableDebugInfo();
4031   }
4032   ~DisableDebugLocationUpdates() {
4033     if (disabledDebugInfo)
4034       CGF.enableDebugInfo();
4035   }
4036 };
4037 
4038 } // end anonymous namespace
4039 
4040 RValue CallArg::getRValue(CodeGenFunction &CGF) const {
4041   if (!HasLV)
4042     return RV;
4043   LValue Copy = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty), Ty);
4044   CGF.EmitAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap,
4045                         LV.isVolatile());
4046   IsUsed = true;
4047   return RValue::getAggregate(Copy.getAddress(CGF));
4048 }
4049 
4050 void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const {
4051   LValue Dst = CGF.MakeAddrLValue(Addr, Ty);
4052   if (!HasLV && RV.isScalar())
4053     CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*isInit=*/true);
4054   else if (!HasLV && RV.isComplex())
4055     CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true);
4056   else {
4057     auto Addr = HasLV ? LV.getAddress(CGF) : RV.getAggregateAddress();
4058     LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty);
4059     // We assume that call args are never copied into subobjects.
4060     CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap,
4061                           HasLV ? LV.isVolatileQualified()
4062                                 : RV.isVolatileQualified());
4063   }
4064   IsUsed = true;
4065 }
4066 
4067 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
4068                                   QualType type) {
4069   DisableDebugLocationUpdates Dis(*this, E);
4070   if (const ObjCIndirectCopyRestoreExpr *CRE
4071         = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
4072     assert(getLangOpts().ObjCAutoRefCount);
4073     return emitWritebackArg(*this, args, CRE);
4074   }
4075 
4076   assert(type->isReferenceType() == E->isGLValue() &&
4077          "reference binding to unmaterialized r-value!");
4078 
4079   if (E->isGLValue()) {
4080     assert(E->getObjectKind() == OK_Ordinary);
4081     return args.add(EmitReferenceBindingToExpr(E), type);
4082   }
4083 
4084   bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
4085 
4086   // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
4087   // However, we still have to push an EH-only cleanup in case we unwind before
4088   // we make it to the call.
4089   if (HasAggregateEvalKind &&
4090       type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
4091     // If we're using inalloca, use the argument memory.  Otherwise, use a
4092     // temporary.
4093     AggValueSlot Slot;
4094     if (args.isUsingInAlloca())
4095       Slot = createPlaceholderSlot(*this, type);
4096     else
4097       Slot = CreateAggTemp(type, "agg.tmp");
4098 
4099     bool DestroyedInCallee = true, NeedsEHCleanup = true;
4100     if (const auto *RD = type->getAsCXXRecordDecl())
4101       DestroyedInCallee = RD->hasNonTrivialDestructor();
4102     else
4103       NeedsEHCleanup = needsEHCleanup(type.isDestructedType());
4104 
4105     if (DestroyedInCallee)
4106       Slot.setExternallyDestructed();
4107 
4108     EmitAggExpr(E, Slot);
4109     RValue RV = Slot.asRValue();
4110     args.add(RV, type);
4111 
4112     if (DestroyedInCallee && NeedsEHCleanup) {
4113       // Create a no-op GEP between the placeholder and the cleanup so we can
4114       // RAUW it successfully.  It also serves as a marker of the first
4115       // instruction where the cleanup is active.
4116       pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
4117                                               type);
4118       // This unreachable is a temporary marker which will be removed later.
4119       llvm::Instruction *IsActive = Builder.CreateUnreachable();
4120       args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
4121     }
4122     return;
4123   }
4124 
4125   if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
4126       cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
4127     LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
4128     assert(L.isSimple());
4129     args.addUncopiedAggregate(L, type);
4130     return;
4131   }
4132 
4133   args.add(EmitAnyExprToTemp(E), type);
4134 }
4135 
4136 QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
4137   // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
4138   // implicitly widens null pointer constants that are arguments to varargs
4139   // functions to pointer-sized ints.
4140   if (!getTarget().getTriple().isOSWindows())
4141     return Arg->getType();
4142 
4143   if (Arg->getType()->isIntegerType() &&
4144       getContext().getTypeSize(Arg->getType()) <
4145           getContext().getTargetInfo().getPointerWidth(0) &&
4146       Arg->isNullPointerConstant(getContext(),
4147                                  Expr::NPC_ValueDependentIsNotNull)) {
4148     return getContext().getIntPtrType();
4149   }
4150 
4151   return Arg->getType();
4152 }
4153 
4154 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4155 // optimizer it can aggressively ignore unwind edges.
4156 void
4157 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
4158   if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
4159       !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
4160     Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
4161                       CGM.getNoObjCARCExceptionsMetadata());
4162 }
4163 
4164 /// Emits a call to the given no-arguments nounwind runtime function.
4165 llvm::CallInst *
4166 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
4167                                          const llvm::Twine &name) {
4168   return EmitNounwindRuntimeCall(callee, None, name);
4169 }
4170 
4171 /// Emits a call to the given nounwind runtime function.
4172 llvm::CallInst *
4173 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
4174                                          ArrayRef<llvm::Value *> args,
4175                                          const llvm::Twine &name) {
4176   llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
4177   call->setDoesNotThrow();
4178   return call;
4179 }
4180 
4181 /// Emits a simple call (never an invoke) to the given no-arguments
4182 /// runtime function.
4183 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
4184                                                  const llvm::Twine &name) {
4185   return EmitRuntimeCall(callee, None, name);
4186 }
4187 
4188 // Calls which may throw must have operand bundles indicating which funclet
4189 // they are nested within.
4190 SmallVector<llvm::OperandBundleDef, 1>
4191 CodeGenFunction::getBundlesForFunclet(llvm::Value *Callee) {
4192   SmallVector<llvm::OperandBundleDef, 1> BundleList;
4193   // There is no need for a funclet operand bundle if we aren't inside a
4194   // funclet.
4195   if (!CurrentFuncletPad)
4196     return BundleList;
4197 
4198   // Skip intrinsics which cannot throw.
4199   auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts());
4200   if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow())
4201     return BundleList;
4202 
4203   BundleList.emplace_back("funclet", CurrentFuncletPad);
4204   return BundleList;
4205 }
4206 
4207 /// Emits a simple call (never an invoke) to the given runtime function.
4208 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
4209                                                  ArrayRef<llvm::Value *> args,
4210                                                  const llvm::Twine &name) {
4211   llvm::CallInst *call = Builder.CreateCall(
4212       callee, args, getBundlesForFunclet(callee.getCallee()), name);
4213   call->setCallingConv(getRuntimeCC());
4214   return call;
4215 }
4216 
4217 /// Emits a call or invoke to the given noreturn runtime function.
4218 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(
4219     llvm::FunctionCallee callee, ArrayRef<llvm::Value *> args) {
4220   SmallVector<llvm::OperandBundleDef, 1> BundleList =
4221       getBundlesForFunclet(callee.getCallee());
4222 
4223   if (getInvokeDest()) {
4224     llvm::InvokeInst *invoke =
4225       Builder.CreateInvoke(callee,
4226                            getUnreachableBlock(),
4227                            getInvokeDest(),
4228                            args,
4229                            BundleList);
4230     invoke->setDoesNotReturn();
4231     invoke->setCallingConv(getRuntimeCC());
4232   } else {
4233     llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList);
4234     call->setDoesNotReturn();
4235     call->setCallingConv(getRuntimeCC());
4236     Builder.CreateUnreachable();
4237   }
4238 }
4239 
4240 /// Emits a call or invoke instruction to the given nullary runtime function.
4241 llvm::CallBase *
4242 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
4243                                          const Twine &name) {
4244   return EmitRuntimeCallOrInvoke(callee, None, name);
4245 }
4246 
4247 /// Emits a call or invoke instruction to the given runtime function.
4248 llvm::CallBase *
4249 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
4250                                          ArrayRef<llvm::Value *> args,
4251                                          const Twine &name) {
4252   llvm::CallBase *call = EmitCallOrInvoke(callee, args, name);
4253   call->setCallingConv(getRuntimeCC());
4254   return call;
4255 }
4256 
4257 /// Emits a call or invoke instruction to the given function, depending
4258 /// on the current state of the EH stack.
4259 llvm::CallBase *CodeGenFunction::EmitCallOrInvoke(llvm::FunctionCallee Callee,
4260                                                   ArrayRef<llvm::Value *> Args,
4261                                                   const Twine &Name) {
4262   llvm::BasicBlock *InvokeDest = getInvokeDest();
4263   SmallVector<llvm::OperandBundleDef, 1> BundleList =
4264       getBundlesForFunclet(Callee.getCallee());
4265 
4266   llvm::CallBase *Inst;
4267   if (!InvokeDest)
4268     Inst = Builder.CreateCall(Callee, Args, BundleList, Name);
4269   else {
4270     llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
4271     Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
4272                                 Name);
4273     EmitBlock(ContBB);
4274   }
4275 
4276   // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4277   // optimizer it can aggressively ignore unwind edges.
4278   if (CGM.getLangOpts().ObjCAutoRefCount)
4279     AddObjCARCExceptionMetadata(Inst);
4280 
4281   return Inst;
4282 }
4283 
4284 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
4285                                                   llvm::Value *New) {
4286   DeferredReplacements.push_back(std::make_pair(Old, New));
4287 }
4288 
4289 namespace {
4290 
4291 /// Specify given \p NewAlign as the alignment of return value attribute. If
4292 /// such attribute already exists, re-set it to the maximal one of two options.
4293 LLVM_NODISCARD llvm::AttributeList
4294 maybeRaiseRetAlignmentAttribute(llvm::LLVMContext &Ctx,
4295                                 const llvm::AttributeList &Attrs,
4296                                 llvm::Align NewAlign) {
4297   llvm::Align CurAlign = Attrs.getRetAlignment().valueOrOne();
4298   if (CurAlign >= NewAlign)
4299     return Attrs;
4300   llvm::Attribute AlignAttr = llvm::Attribute::getWithAlignment(Ctx, NewAlign);
4301   return Attrs
4302       .removeAttribute(Ctx, llvm::AttributeList::ReturnIndex,
4303                        llvm::Attribute::AttrKind::Alignment)
4304       .addAttribute(Ctx, llvm::AttributeList::ReturnIndex, AlignAttr);
4305 }
4306 
4307 template <typename AlignedAttrTy> class AbstractAssumeAlignedAttrEmitter {
4308 protected:
4309   CodeGenFunction &CGF;
4310 
4311   /// We do nothing if this is, or becomes, nullptr.
4312   const AlignedAttrTy *AA = nullptr;
4313 
4314   llvm::Value *Alignment = nullptr;      // May or may not be a constant.
4315   llvm::ConstantInt *OffsetCI = nullptr; // Constant, hopefully zero.
4316 
4317   AbstractAssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl)
4318       : CGF(CGF_) {
4319     if (!FuncDecl)
4320       return;
4321     AA = FuncDecl->getAttr<AlignedAttrTy>();
4322   }
4323 
4324 public:
4325   /// If we can, materialize the alignment as an attribute on return value.
4326   LLVM_NODISCARD llvm::AttributeList
4327   TryEmitAsCallSiteAttribute(const llvm::AttributeList &Attrs) {
4328     if (!AA || OffsetCI || CGF.SanOpts.has(SanitizerKind::Alignment))
4329       return Attrs;
4330     const auto *AlignmentCI = dyn_cast<llvm::ConstantInt>(Alignment);
4331     if (!AlignmentCI)
4332       return Attrs;
4333     // We may legitimately have non-power-of-2 alignment here.
4334     // If so, this is UB land, emit it via `@llvm.assume` instead.
4335     if (!AlignmentCI->getValue().isPowerOf2())
4336       return Attrs;
4337     llvm::AttributeList NewAttrs = maybeRaiseRetAlignmentAttribute(
4338         CGF.getLLVMContext(), Attrs,
4339         llvm::Align(
4340             AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment)));
4341     AA = nullptr; // We're done. Disallow doing anything else.
4342     return NewAttrs;
4343   }
4344 
4345   /// Emit alignment assumption.
4346   /// This is a general fallback that we take if either there is an offset,
4347   /// or the alignment is variable or we are sanitizing for alignment.
4348   void EmitAsAnAssumption(SourceLocation Loc, QualType RetTy, RValue &Ret) {
4349     if (!AA)
4350       return;
4351     CGF.emitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc,
4352                                 AA->getLocation(), Alignment, OffsetCI);
4353     AA = nullptr; // We're done. Disallow doing anything else.
4354   }
4355 };
4356 
4357 /// Helper data structure to emit `AssumeAlignedAttr`.
4358 class AssumeAlignedAttrEmitter final
4359     : public AbstractAssumeAlignedAttrEmitter<AssumeAlignedAttr> {
4360 public:
4361   AssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl)
4362       : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
4363     if (!AA)
4364       return;
4365     // It is guaranteed that the alignment/offset are constants.
4366     Alignment = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AA->getAlignment()));
4367     if (Expr *Offset = AA->getOffset()) {
4368       OffsetCI = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(Offset));
4369       if (OffsetCI->isNullValue()) // Canonicalize zero offset to no offset.
4370         OffsetCI = nullptr;
4371     }
4372   }
4373 };
4374 
4375 /// Helper data structure to emit `AllocAlignAttr`.
4376 class AllocAlignAttrEmitter final
4377     : public AbstractAssumeAlignedAttrEmitter<AllocAlignAttr> {
4378 public:
4379   AllocAlignAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl,
4380                         const CallArgList &CallArgs)
4381       : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
4382     if (!AA)
4383       return;
4384     // Alignment may or may not be a constant, and that is okay.
4385     Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()]
4386                     .getRValue(CGF)
4387                     .getScalarVal();
4388   }
4389 };
4390 
4391 } // namespace
4392 
4393 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
4394                                  const CGCallee &Callee,
4395                                  ReturnValueSlot ReturnValue,
4396                                  const CallArgList &CallArgs,
4397                                  llvm::CallBase **callOrInvoke,
4398                                  SourceLocation Loc) {
4399   // FIXME: We no longer need the types from CallArgs; lift up and simplify.
4400 
4401   assert(Callee.isOrdinary() || Callee.isVirtual());
4402 
4403   // Handle struct-return functions by passing a pointer to the
4404   // location that we would like to return into.
4405   QualType RetTy = CallInfo.getReturnType();
4406   const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
4407 
4408   llvm::FunctionType *IRFuncTy = getTypes().GetFunctionType(CallInfo);
4409 
4410   const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
4411   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
4412     // We can only guarantee that a function is called from the correct
4413     // context/function based on the appropriate target attributes,
4414     // so only check in the case where we have both always_inline and target
4415     // since otherwise we could be making a conditional call after a check for
4416     // the proper cpu features (and it won't cause code generation issues due to
4417     // function based code generation).
4418     if (TargetDecl->hasAttr<AlwaysInlineAttr>() &&
4419         TargetDecl->hasAttr<TargetAttr>())
4420       checkTargetFeatures(Loc, FD);
4421 
4422     // Some architectures (such as x86-64) have the ABI changed based on
4423     // attribute-target/features. Give them a chance to diagnose.
4424     CGM.getTargetCodeGenInfo().checkFunctionCallABI(
4425         CGM, Loc, dyn_cast_or_null<FunctionDecl>(CurCodeDecl), FD, CallArgs);
4426   }
4427 
4428 #ifndef NDEBUG
4429   if (!(CallInfo.isVariadic() && CallInfo.getArgStruct())) {
4430     // For an inalloca varargs function, we don't expect CallInfo to match the
4431     // function pointer's type, because the inalloca struct a will have extra
4432     // fields in it for the varargs parameters.  Code later in this function
4433     // bitcasts the function pointer to the type derived from CallInfo.
4434     //
4435     // In other cases, we assert that the types match up (until pointers stop
4436     // having pointee types).
4437     llvm::Type *TypeFromVal;
4438     if (Callee.isVirtual())
4439       TypeFromVal = Callee.getVirtualFunctionType();
4440     else
4441       TypeFromVal =
4442           Callee.getFunctionPointer()->getType()->getPointerElementType();
4443     assert(IRFuncTy == TypeFromVal);
4444   }
4445 #endif
4446 
4447   // 1. Set up the arguments.
4448 
4449   // If we're using inalloca, insert the allocation after the stack save.
4450   // FIXME: Do this earlier rather than hacking it in here!
4451   Address ArgMemory = Address::invalid();
4452   if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
4453     const llvm::DataLayout &DL = CGM.getDataLayout();
4454     llvm::Instruction *IP = CallArgs.getStackBase();
4455     llvm::AllocaInst *AI;
4456     if (IP) {
4457       IP = IP->getNextNode();
4458       AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
4459                                 "argmem", IP);
4460     } else {
4461       AI = CreateTempAlloca(ArgStruct, "argmem");
4462     }
4463     auto Align = CallInfo.getArgStructAlignment();
4464     AI->setAlignment(Align.getAsAlign());
4465     AI->setUsedWithInAlloca(true);
4466     assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
4467     ArgMemory = Address(AI, Align);
4468   }
4469 
4470   ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
4471   SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
4472 
4473   // If the call returns a temporary with struct return, create a temporary
4474   // alloca to hold the result, unless one is given to us.
4475   Address SRetPtr = Address::invalid();
4476   Address SRetAlloca = Address::invalid();
4477   llvm::Value *UnusedReturnSizePtr = nullptr;
4478   if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
4479     if (!ReturnValue.isNull()) {
4480       SRetPtr = ReturnValue.getValue();
4481     } else {
4482       SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca);
4483       if (HaveInsertPoint() && ReturnValue.isUnused()) {
4484         uint64_t size =
4485             CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
4486         UnusedReturnSizePtr = EmitLifetimeStart(size, SRetAlloca.getPointer());
4487       }
4488     }
4489     if (IRFunctionArgs.hasSRetArg()) {
4490       IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
4491     } else if (RetAI.isInAlloca()) {
4492       Address Addr =
4493           Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex());
4494       Builder.CreateStore(SRetPtr.getPointer(), Addr);
4495     }
4496   }
4497 
4498   Address swiftErrorTemp = Address::invalid();
4499   Address swiftErrorArg = Address::invalid();
4500 
4501   // When passing arguments using temporary allocas, we need to add the
4502   // appropriate lifetime markers. This vector keeps track of all the lifetime
4503   // markers that need to be ended right after the call.
4504   SmallVector<CallLifetimeEnd, 2> CallLifetimeEndAfterCall;
4505 
4506   // Translate all of the arguments as necessary to match the IR lowering.
4507   assert(CallInfo.arg_size() == CallArgs.size() &&
4508          "Mismatch between function signature & arguments.");
4509   unsigned ArgNo = 0;
4510   CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
4511   for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
4512        I != E; ++I, ++info_it, ++ArgNo) {
4513     const ABIArgInfo &ArgInfo = info_it->info;
4514 
4515     // Insert a padding argument to ensure proper alignment.
4516     if (IRFunctionArgs.hasPaddingArg(ArgNo))
4517       IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
4518           llvm::UndefValue::get(ArgInfo.getPaddingType());
4519 
4520     unsigned FirstIRArg, NumIRArgs;
4521     std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
4522 
4523     switch (ArgInfo.getKind()) {
4524     case ABIArgInfo::InAlloca: {
4525       assert(NumIRArgs == 0);
4526       assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
4527       if (I->isAggregate()) {
4528         Address Addr = I->hasLValue()
4529                            ? I->getKnownLValue().getAddress(*this)
4530                            : I->getKnownRValue().getAggregateAddress();
4531         llvm::Instruction *Placeholder =
4532             cast<llvm::Instruction>(Addr.getPointer());
4533 
4534         if (!ArgInfo.getInAllocaIndirect()) {
4535           // Replace the placeholder with the appropriate argument slot GEP.
4536           CGBuilderTy::InsertPoint IP = Builder.saveIP();
4537           Builder.SetInsertPoint(Placeholder);
4538           Addr = Builder.CreateStructGEP(ArgMemory,
4539                                          ArgInfo.getInAllocaFieldIndex());
4540           Builder.restoreIP(IP);
4541         } else {
4542           // For indirect things such as overaligned structs, replace the
4543           // placeholder with a regular aggregate temporary alloca. Store the
4544           // address of this alloca into the struct.
4545           Addr = CreateMemTemp(info_it->type, "inalloca.indirect.tmp");
4546           Address ArgSlot = Builder.CreateStructGEP(
4547               ArgMemory, ArgInfo.getInAllocaFieldIndex());
4548           Builder.CreateStore(Addr.getPointer(), ArgSlot);
4549         }
4550         deferPlaceholderReplacement(Placeholder, Addr.getPointer());
4551       } else if (ArgInfo.getInAllocaIndirect()) {
4552         // Make a temporary alloca and store the address of it into the argument
4553         // struct.
4554         Address Addr = CreateMemTempWithoutCast(
4555             I->Ty, getContext().getTypeAlignInChars(I->Ty),
4556             "indirect-arg-temp");
4557         I->copyInto(*this, Addr);
4558         Address ArgSlot =
4559             Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
4560         Builder.CreateStore(Addr.getPointer(), ArgSlot);
4561       } else {
4562         // Store the RValue into the argument struct.
4563         Address Addr =
4564             Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
4565         unsigned AS = Addr.getType()->getPointerAddressSpace();
4566         llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
4567         // There are some cases where a trivial bitcast is not avoidable.  The
4568         // definition of a type later in a translation unit may change it's type
4569         // from {}* to (%struct.foo*)*.
4570         if (Addr.getType() != MemType)
4571           Addr = Builder.CreateBitCast(Addr, MemType);
4572         I->copyInto(*this, Addr);
4573       }
4574       break;
4575     }
4576 
4577     case ABIArgInfo::Indirect:
4578     case ABIArgInfo::IndirectAliased: {
4579       assert(NumIRArgs == 1);
4580       if (!I->isAggregate()) {
4581         // Make a temporary alloca to pass the argument.
4582         Address Addr = CreateMemTempWithoutCast(
4583             I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp");
4584         IRCallArgs[FirstIRArg] = Addr.getPointer();
4585 
4586         I->copyInto(*this, Addr);
4587       } else {
4588         // We want to avoid creating an unnecessary temporary+copy here;
4589         // however, we need one in three cases:
4590         // 1. If the argument is not byval, and we are required to copy the
4591         //    source.  (This case doesn't occur on any common architecture.)
4592         // 2. If the argument is byval, RV is not sufficiently aligned, and
4593         //    we cannot force it to be sufficiently aligned.
4594         // 3. If the argument is byval, but RV is not located in default
4595         //    or alloca address space.
4596         Address Addr = I->hasLValue()
4597                            ? I->getKnownLValue().getAddress(*this)
4598                            : I->getKnownRValue().getAggregateAddress();
4599         llvm::Value *V = Addr.getPointer();
4600         CharUnits Align = ArgInfo.getIndirectAlign();
4601         const llvm::DataLayout *TD = &CGM.getDataLayout();
4602 
4603         assert((FirstIRArg >= IRFuncTy->getNumParams() ||
4604                 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==
4605                     TD->getAllocaAddrSpace()) &&
4606                "indirect argument must be in alloca address space");
4607 
4608         bool NeedCopy = false;
4609 
4610         if (Addr.getAlignment() < Align &&
4611             llvm::getOrEnforceKnownAlignment(V, Align.getAsAlign(), *TD) <
4612                 Align.getAsAlign()) {
4613           NeedCopy = true;
4614         } else if (I->hasLValue()) {
4615           auto LV = I->getKnownLValue();
4616           auto AS = LV.getAddressSpace();
4617 
4618           if (!ArgInfo.getIndirectByVal() ||
4619               (LV.getAlignment() < getContext().getTypeAlignInChars(I->Ty))) {
4620             NeedCopy = true;
4621           }
4622           if (!getLangOpts().OpenCL) {
4623             if ((ArgInfo.getIndirectByVal() &&
4624                 (AS != LangAS::Default &&
4625                  AS != CGM.getASTAllocaAddressSpace()))) {
4626               NeedCopy = true;
4627             }
4628           }
4629           // For OpenCL even if RV is located in default or alloca address space
4630           // we don't want to perform address space cast for it.
4631           else if ((ArgInfo.getIndirectByVal() &&
4632                     Addr.getType()->getAddressSpace() != IRFuncTy->
4633                       getParamType(FirstIRArg)->getPointerAddressSpace())) {
4634             NeedCopy = true;
4635           }
4636         }
4637 
4638         if (NeedCopy) {
4639           // Create an aligned temporary, and copy to it.
4640           Address AI = CreateMemTempWithoutCast(
4641               I->Ty, ArgInfo.getIndirectAlign(), "byval-temp");
4642           IRCallArgs[FirstIRArg] = AI.getPointer();
4643 
4644           // Emit lifetime markers for the temporary alloca.
4645           uint64_t ByvalTempElementSize =
4646               CGM.getDataLayout().getTypeAllocSize(AI.getElementType());
4647           llvm::Value *LifetimeSize =
4648               EmitLifetimeStart(ByvalTempElementSize, AI.getPointer());
4649 
4650           // Add cleanup code to emit the end lifetime marker after the call.
4651           if (LifetimeSize) // In case we disabled lifetime markers.
4652             CallLifetimeEndAfterCall.emplace_back(AI, LifetimeSize);
4653 
4654           // Generate the copy.
4655           I->copyInto(*this, AI);
4656         } else {
4657           // Skip the extra memcpy call.
4658           auto *T = V->getType()->getPointerElementType()->getPointerTo(
4659               CGM.getDataLayout().getAllocaAddrSpace());
4660           IRCallArgs[FirstIRArg] = getTargetHooks().performAddrSpaceCast(
4661               *this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T,
4662               true);
4663         }
4664       }
4665       break;
4666     }
4667 
4668     case ABIArgInfo::Ignore:
4669       assert(NumIRArgs == 0);
4670       break;
4671 
4672     case ABIArgInfo::Extend:
4673     case ABIArgInfo::Direct: {
4674       if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
4675           ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
4676           ArgInfo.getDirectOffset() == 0) {
4677         assert(NumIRArgs == 1);
4678         llvm::Value *V;
4679         if (!I->isAggregate())
4680           V = I->getKnownRValue().getScalarVal();
4681         else
4682           V = Builder.CreateLoad(
4683               I->hasLValue() ? I->getKnownLValue().getAddress(*this)
4684                              : I->getKnownRValue().getAggregateAddress());
4685 
4686         // Implement swifterror by copying into a new swifterror argument.
4687         // We'll write back in the normal path out of the call.
4688         if (CallInfo.getExtParameterInfo(ArgNo).getABI()
4689               == ParameterABI::SwiftErrorResult) {
4690           assert(!swiftErrorTemp.isValid() && "multiple swifterror args");
4691 
4692           QualType pointeeTy = I->Ty->getPointeeType();
4693           swiftErrorArg =
4694             Address(V, getContext().getTypeAlignInChars(pointeeTy));
4695 
4696           swiftErrorTemp =
4697             CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
4698           V = swiftErrorTemp.getPointer();
4699           cast<llvm::AllocaInst>(V)->setSwiftError(true);
4700 
4701           llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg);
4702           Builder.CreateStore(errorValue, swiftErrorTemp);
4703         }
4704 
4705         // We might have to widen integers, but we should never truncate.
4706         if (ArgInfo.getCoerceToType() != V->getType() &&
4707             V->getType()->isIntegerTy())
4708           V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
4709 
4710         // If the argument doesn't match, perform a bitcast to coerce it.  This
4711         // can happen due to trivial type mismatches.
4712         if (FirstIRArg < IRFuncTy->getNumParams() &&
4713             V->getType() != IRFuncTy->getParamType(FirstIRArg))
4714           V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
4715 
4716         IRCallArgs[FirstIRArg] = V;
4717         break;
4718       }
4719 
4720       // FIXME: Avoid the conversion through memory if possible.
4721       Address Src = Address::invalid();
4722       if (!I->isAggregate()) {
4723         Src = CreateMemTemp(I->Ty, "coerce");
4724         I->copyInto(*this, Src);
4725       } else {
4726         Src = I->hasLValue() ? I->getKnownLValue().getAddress(*this)
4727                              : I->getKnownRValue().getAggregateAddress();
4728       }
4729 
4730       // If the value is offset in memory, apply the offset now.
4731       Src = emitAddressAtOffset(*this, Src, ArgInfo);
4732 
4733       // Fast-isel and the optimizer generally like scalar values better than
4734       // FCAs, so we flatten them if this is safe to do for this argument.
4735       llvm::StructType *STy =
4736             dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
4737       if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
4738         llvm::Type *SrcTy = Src.getElementType();
4739         uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
4740         uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
4741 
4742         // If the source type is smaller than the destination type of the
4743         // coerce-to logic, copy the source value into a temp alloca the size
4744         // of the destination type to allow loading all of it. The bits past
4745         // the source value are left undef.
4746         if (SrcSize < DstSize) {
4747           Address TempAlloca
4748             = CreateTempAlloca(STy, Src.getAlignment(),
4749                                Src.getName() + ".coerce");
4750           Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
4751           Src = TempAlloca;
4752         } else {
4753           Src = Builder.CreateBitCast(Src,
4754                                       STy->getPointerTo(Src.getAddressSpace()));
4755         }
4756 
4757         assert(NumIRArgs == STy->getNumElements());
4758         for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
4759           Address EltPtr = Builder.CreateStructGEP(Src, i);
4760           llvm::Value *LI = Builder.CreateLoad(EltPtr);
4761           IRCallArgs[FirstIRArg + i] = LI;
4762         }
4763       } else {
4764         // In the simple case, just pass the coerced loaded value.
4765         assert(NumIRArgs == 1);
4766         llvm::Value *Load =
4767             CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
4768 
4769         if (CallInfo.isCmseNSCall()) {
4770           // For certain parameter types, clear padding bits, as they may reveal
4771           // sensitive information.
4772           // Small struct/union types are passed as integer arrays.
4773           auto *ATy = dyn_cast<llvm::ArrayType>(Load->getType());
4774           if (ATy != nullptr && isa<RecordType>(I->Ty.getCanonicalType()))
4775             Load = EmitCMSEClearRecord(Load, ATy, I->Ty);
4776         }
4777         IRCallArgs[FirstIRArg] = Load;
4778       }
4779 
4780       break;
4781     }
4782 
4783     case ABIArgInfo::CoerceAndExpand: {
4784       auto coercionType = ArgInfo.getCoerceAndExpandType();
4785       auto layout = CGM.getDataLayout().getStructLayout(coercionType);
4786 
4787       llvm::Value *tempSize = nullptr;
4788       Address addr = Address::invalid();
4789       Address AllocaAddr = Address::invalid();
4790       if (I->isAggregate()) {
4791         addr = I->hasLValue() ? I->getKnownLValue().getAddress(*this)
4792                               : I->getKnownRValue().getAggregateAddress();
4793 
4794       } else {
4795         RValue RV = I->getKnownRValue();
4796         assert(RV.isScalar()); // complex should always just be direct
4797 
4798         llvm::Type *scalarType = RV.getScalarVal()->getType();
4799         auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType);
4800         auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType);
4801 
4802         // Materialize to a temporary.
4803         addr = CreateTempAlloca(
4804             RV.getScalarVal()->getType(),
4805             CharUnits::fromQuantity(std::max(
4806                 (unsigned)layout->getAlignment().value(), scalarAlign)),
4807             "tmp",
4808             /*ArraySize=*/nullptr, &AllocaAddr);
4809         tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer());
4810 
4811         Builder.CreateStore(RV.getScalarVal(), addr);
4812       }
4813 
4814       addr = Builder.CreateElementBitCast(addr, coercionType);
4815 
4816       unsigned IRArgPos = FirstIRArg;
4817       for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4818         llvm::Type *eltType = coercionType->getElementType(i);
4819         if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4820         Address eltAddr = Builder.CreateStructGEP(addr, i);
4821         llvm::Value *elt = Builder.CreateLoad(eltAddr);
4822         IRCallArgs[IRArgPos++] = elt;
4823       }
4824       assert(IRArgPos == FirstIRArg + NumIRArgs);
4825 
4826       if (tempSize) {
4827         EmitLifetimeEnd(tempSize, AllocaAddr.getPointer());
4828       }
4829 
4830       break;
4831     }
4832 
4833     case ABIArgInfo::Expand: {
4834       unsigned IRArgPos = FirstIRArg;
4835       ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
4836       assert(IRArgPos == FirstIRArg + NumIRArgs);
4837       break;
4838     }
4839     }
4840   }
4841 
4842   const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this);
4843   llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer();
4844 
4845   // If we're using inalloca, set up that argument.
4846   if (ArgMemory.isValid()) {
4847     llvm::Value *Arg = ArgMemory.getPointer();
4848     if (CallInfo.isVariadic()) {
4849       // When passing non-POD arguments by value to variadic functions, we will
4850       // end up with a variadic prototype and an inalloca call site.  In such
4851       // cases, we can't do any parameter mismatch checks.  Give up and bitcast
4852       // the callee.
4853       unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace();
4854       CalleePtr =
4855           Builder.CreateBitCast(CalleePtr, IRFuncTy->getPointerTo(CalleeAS));
4856     } else {
4857       llvm::Type *LastParamTy =
4858           IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
4859       if (Arg->getType() != LastParamTy) {
4860 #ifndef NDEBUG
4861         // Assert that these structs have equivalent element types.
4862         llvm::StructType *FullTy = CallInfo.getArgStruct();
4863         llvm::StructType *DeclaredTy = cast<llvm::StructType>(
4864             cast<llvm::PointerType>(LastParamTy)->getElementType());
4865         assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
4866         for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
4867                                                 DE = DeclaredTy->element_end(),
4868                                                 FI = FullTy->element_begin();
4869              DI != DE; ++DI, ++FI)
4870           assert(*DI == *FI);
4871 #endif
4872         Arg = Builder.CreateBitCast(Arg, LastParamTy);
4873       }
4874     }
4875     assert(IRFunctionArgs.hasInallocaArg());
4876     IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
4877   }
4878 
4879   // 2. Prepare the function pointer.
4880 
4881   // If the callee is a bitcast of a non-variadic function to have a
4882   // variadic function pointer type, check to see if we can remove the
4883   // bitcast.  This comes up with unprototyped functions.
4884   //
4885   // This makes the IR nicer, but more importantly it ensures that we
4886   // can inline the function at -O0 if it is marked always_inline.
4887   auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT,
4888                                    llvm::Value *Ptr) -> llvm::Function * {
4889     if (!CalleeFT->isVarArg())
4890       return nullptr;
4891 
4892     // Get underlying value if it's a bitcast
4893     if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr)) {
4894       if (CE->getOpcode() == llvm::Instruction::BitCast)
4895         Ptr = CE->getOperand(0);
4896     }
4897 
4898     llvm::Function *OrigFn = dyn_cast<llvm::Function>(Ptr);
4899     if (!OrigFn)
4900       return nullptr;
4901 
4902     llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
4903 
4904     // If the original type is variadic, or if any of the component types
4905     // disagree, we cannot remove the cast.
4906     if (OrigFT->isVarArg() ||
4907         OrigFT->getNumParams() != CalleeFT->getNumParams() ||
4908         OrigFT->getReturnType() != CalleeFT->getReturnType())
4909       return nullptr;
4910 
4911     for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
4912       if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
4913         return nullptr;
4914 
4915     return OrigFn;
4916   };
4917 
4918   if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) {
4919     CalleePtr = OrigFn;
4920     IRFuncTy = OrigFn->getFunctionType();
4921   }
4922 
4923   // 3. Perform the actual call.
4924 
4925   // Deactivate any cleanups that we're supposed to do immediately before
4926   // the call.
4927   if (!CallArgs.getCleanupsToDeactivate().empty())
4928     deactivateArgCleanupsBeforeCall(*this, CallArgs);
4929 
4930   // Assert that the arguments we computed match up.  The IR verifier
4931   // will catch this, but this is a common enough source of problems
4932   // during IRGen changes that it's way better for debugging to catch
4933   // it ourselves here.
4934 #ifndef NDEBUG
4935   assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
4936   for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
4937     // Inalloca argument can have different type.
4938     if (IRFunctionArgs.hasInallocaArg() &&
4939         i == IRFunctionArgs.getInallocaArgNo())
4940       continue;
4941     if (i < IRFuncTy->getNumParams())
4942       assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
4943   }
4944 #endif
4945 
4946   // Update the largest vector width if any arguments have vector types.
4947   for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
4948     if (auto *VT = dyn_cast<llvm::VectorType>(IRCallArgs[i]->getType()))
4949       LargestVectorWidth =
4950           std::max((uint64_t)LargestVectorWidth,
4951                    VT->getPrimitiveSizeInBits().getKnownMinSize());
4952   }
4953 
4954   // Compute the calling convention and attributes.
4955   unsigned CallingConv;
4956   llvm::AttributeList Attrs;
4957   CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo,
4958                              Callee.getAbstractInfo(), Attrs, CallingConv,
4959                              /*AttrOnCallSite=*/true);
4960 
4961   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl))
4962     if (FD->hasAttr<StrictFPAttr>())
4963       // All calls within a strictfp function are marked strictfp
4964       Attrs =
4965         Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4966                            llvm::Attribute::StrictFP);
4967 
4968   // Add call-site nomerge attribute if exists.
4969   if (InNoMergeAttributedStmt)
4970     Attrs =
4971       Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4972                          llvm::Attribute::NoMerge);
4973 
4974   // Apply some call-site-specific attributes.
4975   // TODO: work this into building the attribute set.
4976 
4977   // Apply always_inline to all calls within flatten functions.
4978   // FIXME: should this really take priority over __try, below?
4979   if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
4980       !(TargetDecl && TargetDecl->hasAttr<NoInlineAttr>())) {
4981     Attrs =
4982         Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4983                            llvm::Attribute::AlwaysInline);
4984   }
4985 
4986   // Disable inlining inside SEH __try blocks.
4987   if (isSEHTryScope()) {
4988     Attrs =
4989         Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4990                            llvm::Attribute::NoInline);
4991   }
4992 
4993   // Decide whether to use a call or an invoke.
4994   bool CannotThrow;
4995   if (currentFunctionUsesSEHTry()) {
4996     // SEH cares about asynchronous exceptions, so everything can "throw."
4997     CannotThrow = false;
4998   } else if (isCleanupPadScope() &&
4999              EHPersonality::get(*this).isMSVCXXPersonality()) {
5000     // The MSVC++ personality will implicitly terminate the program if an
5001     // exception is thrown during a cleanup outside of a try/catch.
5002     // We don't need to model anything in IR to get this behavior.
5003     CannotThrow = true;
5004   } else {
5005     // Otherwise, nounwind call sites will never throw.
5006     CannotThrow = Attrs.hasFnAttribute(llvm::Attribute::NoUnwind);
5007 
5008     if (auto *FPtr = dyn_cast<llvm::Function>(CalleePtr))
5009       if (FPtr->hasFnAttribute(llvm::Attribute::NoUnwind))
5010         CannotThrow = true;
5011   }
5012 
5013   // If we made a temporary, be sure to clean up after ourselves. Note that we
5014   // can't depend on being inside of an ExprWithCleanups, so we need to manually
5015   // pop this cleanup later on. Being eager about this is OK, since this
5016   // temporary is 'invisible' outside of the callee.
5017   if (UnusedReturnSizePtr)
5018     pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetAlloca,
5019                                          UnusedReturnSizePtr);
5020 
5021   llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
5022 
5023   SmallVector<llvm::OperandBundleDef, 1> BundleList =
5024       getBundlesForFunclet(CalleePtr);
5025 
5026   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl))
5027     if (FD->hasAttr<StrictFPAttr>())
5028       // All calls within a strictfp function are marked strictfp
5029       Attrs =
5030         Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
5031                            llvm::Attribute::StrictFP);
5032 
5033   AssumeAlignedAttrEmitter AssumeAlignedAttrEmitter(*this, TargetDecl);
5034   Attrs = AssumeAlignedAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
5035 
5036   AllocAlignAttrEmitter AllocAlignAttrEmitter(*this, TargetDecl, CallArgs);
5037   Attrs = AllocAlignAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
5038 
5039   // Emit the actual call/invoke instruction.
5040   llvm::CallBase *CI;
5041   if (!InvokeDest) {
5042     CI = Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList);
5043   } else {
5044     llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
5045     CI = Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs,
5046                               BundleList);
5047     EmitBlock(Cont);
5048   }
5049   if (callOrInvoke)
5050     *callOrInvoke = CI;
5051 
5052   // If this is within a function that has the guard(nocf) attribute and is an
5053   // indirect call, add the "guard_nocf" attribute to this call to indicate that
5054   // Control Flow Guard checks should not be added, even if the call is inlined.
5055   if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
5056     if (const auto *A = FD->getAttr<CFGuardAttr>()) {
5057       if (A->getGuard() == CFGuardAttr::GuardArg::nocf && !CI->getCalledFunction())
5058         Attrs = Attrs.addAttribute(
5059             getLLVMContext(), llvm::AttributeList::FunctionIndex, "guard_nocf");
5060     }
5061   }
5062 
5063   // Apply the attributes and calling convention.
5064   CI->setAttributes(Attrs);
5065   CI->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
5066 
5067   // Apply various metadata.
5068 
5069   if (!CI->getType()->isVoidTy())
5070     CI->setName("call");
5071 
5072   // Update largest vector width from the return type.
5073   if (auto *VT = dyn_cast<llvm::VectorType>(CI->getType()))
5074     LargestVectorWidth =
5075         std::max((uint64_t)LargestVectorWidth,
5076                  VT->getPrimitiveSizeInBits().getKnownMinSize());
5077 
5078   // Insert instrumentation or attach profile metadata at indirect call sites.
5079   // For more details, see the comment before the definition of
5080   // IPVK_IndirectCallTarget in InstrProfData.inc.
5081   if (!CI->getCalledFunction())
5082     PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget,
5083                      CI, CalleePtr);
5084 
5085   // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
5086   // optimizer it can aggressively ignore unwind edges.
5087   if (CGM.getLangOpts().ObjCAutoRefCount)
5088     AddObjCARCExceptionMetadata(CI);
5089 
5090   // Suppress tail calls if requested.
5091   if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
5092     if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
5093       Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
5094   }
5095 
5096   // Add metadata for calls to MSAllocator functions
5097   if (getDebugInfo() && TargetDecl &&
5098       TargetDecl->hasAttr<MSAllocatorAttr>())
5099     getDebugInfo()->addHeapAllocSiteMetadata(CI, RetTy->getPointeeType(), Loc);
5100 
5101   // 4. Finish the call.
5102 
5103   // If the call doesn't return, finish the basic block and clear the
5104   // insertion point; this allows the rest of IRGen to discard
5105   // unreachable code.
5106   if (CI->doesNotReturn()) {
5107     if (UnusedReturnSizePtr)
5108       PopCleanupBlock();
5109 
5110     // Strip away the noreturn attribute to better diagnose unreachable UB.
5111     if (SanOpts.has(SanitizerKind::Unreachable)) {
5112       // Also remove from function since CallBase::hasFnAttr additionally checks
5113       // attributes of the called function.
5114       if (auto *F = CI->getCalledFunction())
5115         F->removeFnAttr(llvm::Attribute::NoReturn);
5116       CI->removeAttribute(llvm::AttributeList::FunctionIndex,
5117                           llvm::Attribute::NoReturn);
5118 
5119       // Avoid incompatibility with ASan which relies on the `noreturn`
5120       // attribute to insert handler calls.
5121       if (SanOpts.hasOneOf(SanitizerKind::Address |
5122                            SanitizerKind::KernelAddress)) {
5123         SanitizerScope SanScope(this);
5124         llvm::IRBuilder<>::InsertPointGuard IPGuard(Builder);
5125         Builder.SetInsertPoint(CI);
5126         auto *FnType = llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
5127         llvm::FunctionCallee Fn =
5128             CGM.CreateRuntimeFunction(FnType, "__asan_handle_no_return");
5129         EmitNounwindRuntimeCall(Fn);
5130       }
5131     }
5132 
5133     EmitUnreachable(Loc);
5134     Builder.ClearInsertionPoint();
5135 
5136     // FIXME: For now, emit a dummy basic block because expr emitters in
5137     // generally are not ready to handle emitting expressions at unreachable
5138     // points.
5139     EnsureInsertPoint();
5140 
5141     // Return a reasonable RValue.
5142     return GetUndefRValue(RetTy);
5143   }
5144 
5145   // Perform the swifterror writeback.
5146   if (swiftErrorTemp.isValid()) {
5147     llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp);
5148     Builder.CreateStore(errorResult, swiftErrorArg);
5149   }
5150 
5151   // Emit any call-associated writebacks immediately.  Arguably this
5152   // should happen after any return-value munging.
5153   if (CallArgs.hasWritebacks())
5154     emitWritebacks(*this, CallArgs);
5155 
5156   // The stack cleanup for inalloca arguments has to run out of the normal
5157   // lexical order, so deactivate it and run it manually here.
5158   CallArgs.freeArgumentMemory(*this);
5159 
5160   // Extract the return value.
5161   RValue Ret = [&] {
5162     switch (RetAI.getKind()) {
5163     case ABIArgInfo::CoerceAndExpand: {
5164       auto coercionType = RetAI.getCoerceAndExpandType();
5165 
5166       Address addr = SRetPtr;
5167       addr = Builder.CreateElementBitCast(addr, coercionType);
5168 
5169       assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType());
5170       bool requiresExtract = isa<llvm::StructType>(CI->getType());
5171 
5172       unsigned unpaddedIndex = 0;
5173       for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
5174         llvm::Type *eltType = coercionType->getElementType(i);
5175         if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
5176         Address eltAddr = Builder.CreateStructGEP(addr, i);
5177         llvm::Value *elt = CI;
5178         if (requiresExtract)
5179           elt = Builder.CreateExtractValue(elt, unpaddedIndex++);
5180         else
5181           assert(unpaddedIndex == 0);
5182         Builder.CreateStore(elt, eltAddr);
5183       }
5184       // FALLTHROUGH
5185       LLVM_FALLTHROUGH;
5186     }
5187 
5188     case ABIArgInfo::InAlloca:
5189     case ABIArgInfo::Indirect: {
5190       RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
5191       if (UnusedReturnSizePtr)
5192         PopCleanupBlock();
5193       return ret;
5194     }
5195 
5196     case ABIArgInfo::Ignore:
5197       // If we are ignoring an argument that had a result, make sure to
5198       // construct the appropriate return value for our caller.
5199       return GetUndefRValue(RetTy);
5200 
5201     case ABIArgInfo::Extend:
5202     case ABIArgInfo::Direct: {
5203       llvm::Type *RetIRTy = ConvertType(RetTy);
5204       if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
5205         switch (getEvaluationKind(RetTy)) {
5206         case TEK_Complex: {
5207           llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
5208           llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
5209           return RValue::getComplex(std::make_pair(Real, Imag));
5210         }
5211         case TEK_Aggregate: {
5212           Address DestPtr = ReturnValue.getValue();
5213           bool DestIsVolatile = ReturnValue.isVolatile();
5214 
5215           if (!DestPtr.isValid()) {
5216             DestPtr = CreateMemTemp(RetTy, "agg.tmp");
5217             DestIsVolatile = false;
5218           }
5219           EmitAggregateStore(CI, DestPtr, DestIsVolatile);
5220           return RValue::getAggregate(DestPtr);
5221         }
5222         case TEK_Scalar: {
5223           // If the argument doesn't match, perform a bitcast to coerce it.  This
5224           // can happen due to trivial type mismatches.
5225           llvm::Value *V = CI;
5226           if (V->getType() != RetIRTy)
5227             V = Builder.CreateBitCast(V, RetIRTy);
5228           return RValue::get(V);
5229         }
5230         }
5231         llvm_unreachable("bad evaluation kind");
5232       }
5233 
5234       Address DestPtr = ReturnValue.getValue();
5235       bool DestIsVolatile = ReturnValue.isVolatile();
5236 
5237       if (!DestPtr.isValid()) {
5238         DestPtr = CreateMemTemp(RetTy, "coerce");
5239         DestIsVolatile = false;
5240       }
5241 
5242       // If the value is offset in memory, apply the offset now.
5243       Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
5244       CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
5245 
5246       return convertTempToRValue(DestPtr, RetTy, SourceLocation());
5247     }
5248 
5249     case ABIArgInfo::Expand:
5250     case ABIArgInfo::IndirectAliased:
5251       llvm_unreachable("Invalid ABI kind for return argument");
5252     }
5253 
5254     llvm_unreachable("Unhandled ABIArgInfo::Kind");
5255   } ();
5256 
5257   // Emit the assume_aligned check on the return value.
5258   if (Ret.isScalar() && TargetDecl) {
5259     AssumeAlignedAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret);
5260     AllocAlignAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret);
5261   }
5262 
5263   // Explicitly call CallLifetimeEnd::Emit just to re-use the code even though
5264   // we can't use the full cleanup mechanism.
5265   for (CallLifetimeEnd &LifetimeEnd : CallLifetimeEndAfterCall)
5266     LifetimeEnd.Emit(*this, /*Flags=*/{});
5267 
5268   if (!ReturnValue.isExternallyDestructed() &&
5269       RetTy.isDestructedType() == QualType::DK_nontrivial_c_struct)
5270     pushDestroy(QualType::DK_nontrivial_c_struct, Ret.getAggregateAddress(),
5271                 RetTy);
5272 
5273   return Ret;
5274 }
5275 
5276 CGCallee CGCallee::prepareConcreteCallee(CodeGenFunction &CGF) const {
5277   if (isVirtual()) {
5278     const CallExpr *CE = getVirtualCallExpr();
5279     return CGF.CGM.getCXXABI().getVirtualFunctionPointer(
5280         CGF, getVirtualMethodDecl(), getThisAddress(), getVirtualFunctionType(),
5281         CE ? CE->getBeginLoc() : SourceLocation());
5282   }
5283 
5284   return *this;
5285 }
5286 
5287 /* VarArg handling */
5288 
5289 Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) {
5290   VAListAddr = VE->isMicrosoftABI()
5291                  ? EmitMSVAListRef(VE->getSubExpr())
5292                  : EmitVAListRef(VE->getSubExpr());
5293   QualType Ty = VE->getType();
5294   if (VE->isMicrosoftABI())
5295     return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty);
5296   return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty);
5297 }
5298