1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "CGCall.h"
16 #include "ABIInfo.h"
17 #include "CGBlocks.h"
18 #include "CGCXXABI.h"
19 #include "CGCleanup.h"
20 #include "CodeGenFunction.h"
21 #include "CodeGenModule.h"
22 #include "TargetInfo.h"
23 #include "clang/AST/Decl.h"
24 #include "clang/AST/DeclCXX.h"
25 #include "clang/AST/DeclObjC.h"
26 #include "clang/Basic/TargetBuiltins.h"
27 #include "clang/Basic/TargetInfo.h"
28 #include "clang/CodeGen/CGFunctionInfo.h"
29 #include "clang/CodeGen/SwiftCallingConv.h"
30 #include "clang/Frontend/CodeGenOptions.h"
31 #include "llvm/ADT/StringExtras.h"
32 #include "llvm/Analysis/Utils/Local.h"
33 #include "llvm/Analysis/ValueTracking.h"
34 #include "llvm/IR/Attributes.h"
35 #include "llvm/IR/CallSite.h"
36 #include "llvm/IR/CallingConv.h"
37 #include "llvm/IR/DataLayout.h"
38 #include "llvm/IR/InlineAsm.h"
39 #include "llvm/IR/IntrinsicInst.h"
40 #include "llvm/IR/Intrinsics.h"
41 using namespace clang;
42 using namespace CodeGen;
43 
44 /***/
45 
46 unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
47   switch (CC) {
48   default: return llvm::CallingConv::C;
49   case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
50   case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
51   case CC_X86RegCall: return llvm::CallingConv::X86_RegCall;
52   case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
53   case CC_Win64: return llvm::CallingConv::Win64;
54   case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
55   case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
56   case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
57   case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
58   // TODO: Add support for __pascal to LLVM.
59   case CC_X86Pascal: return llvm::CallingConv::C;
60   // TODO: Add support for __vectorcall to LLVM.
61   case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
62   case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
63   case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv();
64   case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
65   case CC_PreserveAll: return llvm::CallingConv::PreserveAll;
66   case CC_Swift: return llvm::CallingConv::Swift;
67   }
68 }
69 
70 /// Derives the 'this' type for codegen purposes, i.e. ignoring method
71 /// qualification.
72 /// FIXME: address space qualification?
73 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
74   QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
75   return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
76 }
77 
78 /// Returns the canonical formal type of the given C++ method.
79 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
80   return MD->getType()->getCanonicalTypeUnqualified()
81            .getAs<FunctionProtoType>();
82 }
83 
84 /// Returns the "extra-canonicalized" return type, which discards
85 /// qualifiers on the return type.  Codegen doesn't care about them,
86 /// and it makes ABI code a little easier to be able to assume that
87 /// all parameter and return types are top-level unqualified.
88 static CanQualType GetReturnType(QualType RetTy) {
89   return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
90 }
91 
92 /// Arrange the argument and result information for a value of the given
93 /// unprototyped freestanding function type.
94 const CGFunctionInfo &
95 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
96   // When translating an unprototyped function type, always use a
97   // variadic type.
98   return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
99                                  /*instanceMethod=*/false,
100                                  /*chainCall=*/false, None,
101                                  FTNP->getExtInfo(), {}, RequiredArgs(0));
102 }
103 
104 static void addExtParameterInfosForCall(
105          llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
106                                         const FunctionProtoType *proto,
107                                         unsigned prefixArgs,
108                                         unsigned totalArgs) {
109   assert(proto->hasExtParameterInfos());
110   assert(paramInfos.size() <= prefixArgs);
111   assert(proto->getNumParams() + prefixArgs <= totalArgs);
112 
113   paramInfos.reserve(totalArgs);
114 
115   // Add default infos for any prefix args that don't already have infos.
116   paramInfos.resize(prefixArgs);
117 
118   // Add infos for the prototype.
119   for (const auto &ParamInfo : proto->getExtParameterInfos()) {
120     paramInfos.push_back(ParamInfo);
121     // pass_object_size params have no parameter info.
122     if (ParamInfo.hasPassObjectSize())
123       paramInfos.emplace_back();
124   }
125 
126   assert(paramInfos.size() <= totalArgs &&
127          "Did we forget to insert pass_object_size args?");
128   // Add default infos for the variadic and/or suffix arguments.
129   paramInfos.resize(totalArgs);
130 }
131 
132 /// Adds the formal parameters in FPT to the given prefix. If any parameter in
133 /// FPT has pass_object_size attrs, then we'll add parameters for those, too.
134 static void appendParameterTypes(const CodeGenTypes &CGT,
135                                  SmallVectorImpl<CanQualType> &prefix,
136               SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
137                                  CanQual<FunctionProtoType> FPT) {
138   // Fast path: don't touch param info if we don't need to.
139   if (!FPT->hasExtParameterInfos()) {
140     assert(paramInfos.empty() &&
141            "We have paramInfos, but the prototype doesn't?");
142     prefix.append(FPT->param_type_begin(), FPT->param_type_end());
143     return;
144   }
145 
146   unsigned PrefixSize = prefix.size();
147   // In the vast majority of cases, we'll have precisely FPT->getNumParams()
148   // parameters; the only thing that can change this is the presence of
149   // pass_object_size. So, we preallocate for the common case.
150   prefix.reserve(prefix.size() + FPT->getNumParams());
151 
152   auto ExtInfos = FPT->getExtParameterInfos();
153   assert(ExtInfos.size() == FPT->getNumParams());
154   for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
155     prefix.push_back(FPT->getParamType(I));
156     if (ExtInfos[I].hasPassObjectSize())
157       prefix.push_back(CGT.getContext().getSizeType());
158   }
159 
160   addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize,
161                               prefix.size());
162 }
163 
164 /// Arrange the LLVM function layout for a value of the given function
165 /// type, on top of any implicit parameters already stored.
166 static const CGFunctionInfo &
167 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
168                         SmallVectorImpl<CanQualType> &prefix,
169                         CanQual<FunctionProtoType> FTP,
170                         const FunctionDecl *FD) {
171   SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
172   RequiredArgs Required =
173       RequiredArgs::forPrototypePlus(FTP, prefix.size(), FD);
174   // FIXME: Kill copy.
175   appendParameterTypes(CGT, prefix, paramInfos, FTP);
176   CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
177 
178   return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
179                                      /*chainCall=*/false, prefix,
180                                      FTP->getExtInfo(), paramInfos,
181                                      Required);
182 }
183 
184 /// Arrange the argument and result information for a value of the
185 /// given freestanding function type.
186 const CGFunctionInfo &
187 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP,
188                                       const FunctionDecl *FD) {
189   SmallVector<CanQualType, 16> argTypes;
190   return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
191                                    FTP, FD);
192 }
193 
194 static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
195   // Set the appropriate calling convention for the Function.
196   if (D->hasAttr<StdCallAttr>())
197     return CC_X86StdCall;
198 
199   if (D->hasAttr<FastCallAttr>())
200     return CC_X86FastCall;
201 
202   if (D->hasAttr<RegCallAttr>())
203     return CC_X86RegCall;
204 
205   if (D->hasAttr<ThisCallAttr>())
206     return CC_X86ThisCall;
207 
208   if (D->hasAttr<VectorCallAttr>())
209     return CC_X86VectorCall;
210 
211   if (D->hasAttr<PascalAttr>())
212     return CC_X86Pascal;
213 
214   if (PcsAttr *PCS = D->getAttr<PcsAttr>())
215     return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
216 
217   if (D->hasAttr<IntelOclBiccAttr>())
218     return CC_IntelOclBicc;
219 
220   if (D->hasAttr<MSABIAttr>())
221     return IsWindows ? CC_C : CC_Win64;
222 
223   if (D->hasAttr<SysVABIAttr>())
224     return IsWindows ? CC_X86_64SysV : CC_C;
225 
226   if (D->hasAttr<PreserveMostAttr>())
227     return CC_PreserveMost;
228 
229   if (D->hasAttr<PreserveAllAttr>())
230     return CC_PreserveAll;
231 
232   return CC_C;
233 }
234 
235 /// Arrange the argument and result information for a call to an
236 /// unknown C++ non-static member function of the given abstract type.
237 /// (Zero value of RD means we don't have any meaningful "this" argument type,
238 ///  so fall back to a generic pointer type).
239 /// The member function must be an ordinary function, i.e. not a
240 /// constructor or destructor.
241 const CGFunctionInfo &
242 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
243                                    const FunctionProtoType *FTP,
244                                    const CXXMethodDecl *MD) {
245   SmallVector<CanQualType, 16> argTypes;
246 
247   // Add the 'this' pointer.
248   if (RD)
249     argTypes.push_back(GetThisType(Context, RD));
250   else
251     argTypes.push_back(Context.VoidPtrTy);
252 
253   return ::arrangeLLVMFunctionInfo(
254       *this, true, argTypes,
255       FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>(), MD);
256 }
257 
258 /// Arrange the argument and result information for a declaration or
259 /// definition of the given C++ non-static member function.  The
260 /// member function must be an ordinary function, i.e. not a
261 /// constructor or destructor.
262 const CGFunctionInfo &
263 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
264   assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
265   assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
266 
267   CanQual<FunctionProtoType> prototype = GetFormalType(MD);
268 
269   if (MD->isInstance()) {
270     // The abstract case is perfectly fine.
271     const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
272     return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
273   }
274 
275   return arrangeFreeFunctionType(prototype, MD);
276 }
277 
278 bool CodeGenTypes::inheritingCtorHasParams(
279     const InheritedConstructor &Inherited, CXXCtorType Type) {
280   // Parameters are unnecessary if we're constructing a base class subobject
281   // and the inherited constructor lives in a virtual base.
282   return Type == Ctor_Complete ||
283          !Inherited.getShadowDecl()->constructsVirtualBase() ||
284          !Target.getCXXABI().hasConstructorVariants();
285   }
286 
287 const CGFunctionInfo &
288 CodeGenTypes::arrangeCXXStructorDeclaration(const CXXMethodDecl *MD,
289                                             StructorType Type) {
290 
291   SmallVector<CanQualType, 16> argTypes;
292   SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
293   argTypes.push_back(GetThisType(Context, MD->getParent()));
294 
295   bool PassParams = true;
296 
297   GlobalDecl GD;
298   if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
299     GD = GlobalDecl(CD, toCXXCtorType(Type));
300 
301     // A base class inheriting constructor doesn't get forwarded arguments
302     // needed to construct a virtual base (or base class thereof).
303     if (auto Inherited = CD->getInheritedConstructor())
304       PassParams = inheritingCtorHasParams(Inherited, toCXXCtorType(Type));
305   } else {
306     auto *DD = dyn_cast<CXXDestructorDecl>(MD);
307     GD = GlobalDecl(DD, toCXXDtorType(Type));
308   }
309 
310   CanQual<FunctionProtoType> FTP = GetFormalType(MD);
311 
312   // Add the formal parameters.
313   if (PassParams)
314     appendParameterTypes(*this, argTypes, paramInfos, FTP);
315 
316   CGCXXABI::AddedStructorArgs AddedArgs =
317       TheCXXABI.buildStructorSignature(MD, Type, argTypes);
318   if (!paramInfos.empty()) {
319     // Note: prefix implies after the first param.
320     if (AddedArgs.Prefix)
321       paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix,
322                         FunctionProtoType::ExtParameterInfo{});
323     if (AddedArgs.Suffix)
324       paramInfos.append(AddedArgs.Suffix,
325                         FunctionProtoType::ExtParameterInfo{});
326   }
327 
328   RequiredArgs required =
329       (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size())
330                                       : RequiredArgs::All);
331 
332   FunctionType::ExtInfo extInfo = FTP->getExtInfo();
333   CanQualType resultType = TheCXXABI.HasThisReturn(GD)
334                                ? argTypes.front()
335                                : TheCXXABI.hasMostDerivedReturn(GD)
336                                      ? CGM.getContext().VoidPtrTy
337                                      : Context.VoidTy;
338   return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
339                                  /*chainCall=*/false, argTypes, extInfo,
340                                  paramInfos, required);
341 }
342 
343 static SmallVector<CanQualType, 16>
344 getArgTypesForCall(ASTContext &ctx, const CallArgList &args) {
345   SmallVector<CanQualType, 16> argTypes;
346   for (auto &arg : args)
347     argTypes.push_back(ctx.getCanonicalParamType(arg.Ty));
348   return argTypes;
349 }
350 
351 static SmallVector<CanQualType, 16>
352 getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) {
353   SmallVector<CanQualType, 16> argTypes;
354   for (auto &arg : args)
355     argTypes.push_back(ctx.getCanonicalParamType(arg->getType()));
356   return argTypes;
357 }
358 
359 static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16>
360 getExtParameterInfosForCall(const FunctionProtoType *proto,
361                             unsigned prefixArgs, unsigned totalArgs) {
362   llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result;
363   if (proto->hasExtParameterInfos()) {
364     addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs);
365   }
366   return result;
367 }
368 
369 /// Arrange a call to a C++ method, passing the given arguments.
370 ///
371 /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this`
372 /// parameter.
373 /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of
374 /// args.
375 /// PassProtoArgs indicates whether `args` has args for the parameters in the
376 /// given CXXConstructorDecl.
377 const CGFunctionInfo &
378 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
379                                         const CXXConstructorDecl *D,
380                                         CXXCtorType CtorKind,
381                                         unsigned ExtraPrefixArgs,
382                                         unsigned ExtraSuffixArgs,
383                                         bool PassProtoArgs) {
384   // FIXME: Kill copy.
385   SmallVector<CanQualType, 16> ArgTypes;
386   for (const auto &Arg : args)
387     ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
388 
389   // +1 for implicit this, which should always be args[0].
390   unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
391 
392   CanQual<FunctionProtoType> FPT = GetFormalType(D);
393   RequiredArgs Required =
394       RequiredArgs::forPrototypePlus(FPT, TotalPrefixArgs + ExtraSuffixArgs, D);
395   GlobalDecl GD(D, CtorKind);
396   CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
397                                ? ArgTypes.front()
398                                : TheCXXABI.hasMostDerivedReturn(GD)
399                                      ? CGM.getContext().VoidPtrTy
400                                      : Context.VoidTy;
401 
402   FunctionType::ExtInfo Info = FPT->getExtInfo();
403   llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> ParamInfos;
404   // If the prototype args are elided, we should only have ABI-specific args,
405   // which never have param info.
406   if (PassProtoArgs && FPT->hasExtParameterInfos()) {
407     // ABI-specific suffix arguments are treated the same as variadic arguments.
408     addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs,
409                                 ArgTypes.size());
410   }
411   return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
412                                  /*chainCall=*/false, ArgTypes, Info,
413                                  ParamInfos, Required);
414 }
415 
416 /// Arrange the argument and result information for the declaration or
417 /// definition of the given function.
418 const CGFunctionInfo &
419 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
420   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
421     if (MD->isInstance())
422       return arrangeCXXMethodDeclaration(MD);
423 
424   CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
425 
426   assert(isa<FunctionType>(FTy));
427 
428   // When declaring a function without a prototype, always use a
429   // non-variadic type.
430   if (CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>()) {
431     return arrangeLLVMFunctionInfo(
432         noProto->getReturnType(), /*instanceMethod=*/false,
433         /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All);
434   }
435 
436   return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>(), FD);
437 }
438 
439 /// Arrange the argument and result information for the declaration or
440 /// definition of an Objective-C method.
441 const CGFunctionInfo &
442 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
443   // It happens that this is the same as a call with no optional
444   // arguments, except also using the formal 'self' type.
445   return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
446 }
447 
448 /// Arrange the argument and result information for the function type
449 /// through which to perform a send to the given Objective-C method,
450 /// using the given receiver type.  The receiver type is not always
451 /// the 'self' type of the method or even an Objective-C pointer type.
452 /// This is *not* the right method for actually performing such a
453 /// message send, due to the possibility of optional arguments.
454 const CGFunctionInfo &
455 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
456                                               QualType receiverType) {
457   SmallVector<CanQualType, 16> argTys;
458   SmallVector<FunctionProtoType::ExtParameterInfo, 4> extParamInfos(2);
459   argTys.push_back(Context.getCanonicalParamType(receiverType));
460   argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
461   // FIXME: Kill copy?
462   for (const auto *I : MD->parameters()) {
463     argTys.push_back(Context.getCanonicalParamType(I->getType()));
464     auto extParamInfo = FunctionProtoType::ExtParameterInfo().withIsNoEscape(
465         I->hasAttr<NoEscapeAttr>());
466     extParamInfos.push_back(extParamInfo);
467   }
468 
469   FunctionType::ExtInfo einfo;
470   bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
471   einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
472 
473   if (getContext().getLangOpts().ObjCAutoRefCount &&
474       MD->hasAttr<NSReturnsRetainedAttr>())
475     einfo = einfo.withProducesResult(true);
476 
477   RequiredArgs required =
478     (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
479 
480   return arrangeLLVMFunctionInfo(
481       GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
482       /*chainCall=*/false, argTys, einfo, extParamInfos, required);
483 }
484 
485 const CGFunctionInfo &
486 CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType,
487                                                  const CallArgList &args) {
488   auto argTypes = getArgTypesForCall(Context, args);
489   FunctionType::ExtInfo einfo;
490 
491   return arrangeLLVMFunctionInfo(
492       GetReturnType(returnType), /*instanceMethod=*/false,
493       /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All);
494 }
495 
496 const CGFunctionInfo &
497 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
498   // FIXME: Do we need to handle ObjCMethodDecl?
499   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
500 
501   if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
502     return arrangeCXXStructorDeclaration(CD, getFromCtorType(GD.getCtorType()));
503 
504   if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
505     return arrangeCXXStructorDeclaration(DD, getFromDtorType(GD.getDtorType()));
506 
507   return arrangeFunctionDeclaration(FD);
508 }
509 
510 /// Arrange a thunk that takes 'this' as the first parameter followed by
511 /// varargs.  Return a void pointer, regardless of the actual return type.
512 /// The body of the thunk will end in a musttail call to a function of the
513 /// correct type, and the caller will bitcast the function to the correct
514 /// prototype.
515 const CGFunctionInfo &
516 CodeGenTypes::arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD) {
517   assert(MD->isVirtual() && "only methods have thunks");
518   CanQual<FunctionProtoType> FTP = GetFormalType(MD);
519   CanQualType ArgTys[] = { GetThisType(Context, MD->getParent()) };
520   return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
521                                  /*chainCall=*/false, ArgTys,
522                                  FTP->getExtInfo(), {}, RequiredArgs(1));
523 }
524 
525 const CGFunctionInfo &
526 CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD,
527                                    CXXCtorType CT) {
528   assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure);
529 
530   CanQual<FunctionProtoType> FTP = GetFormalType(CD);
531   SmallVector<CanQualType, 2> ArgTys;
532   const CXXRecordDecl *RD = CD->getParent();
533   ArgTys.push_back(GetThisType(Context, RD));
534   if (CT == Ctor_CopyingClosure)
535     ArgTys.push_back(*FTP->param_type_begin());
536   if (RD->getNumVBases() > 0)
537     ArgTys.push_back(Context.IntTy);
538   CallingConv CC = Context.getDefaultCallingConvention(
539       /*IsVariadic=*/false, /*IsCXXMethod=*/true);
540   return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
541                                  /*chainCall=*/false, ArgTys,
542                                  FunctionType::ExtInfo(CC), {},
543                                  RequiredArgs::All);
544 }
545 
546 /// Arrange a call as unto a free function, except possibly with an
547 /// additional number of formal parameters considered required.
548 static const CGFunctionInfo &
549 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
550                             CodeGenModule &CGM,
551                             const CallArgList &args,
552                             const FunctionType *fnType,
553                             unsigned numExtraRequiredArgs,
554                             bool chainCall) {
555   assert(args.size() >= numExtraRequiredArgs);
556 
557   llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
558 
559   // In most cases, there are no optional arguments.
560   RequiredArgs required = RequiredArgs::All;
561 
562   // If we have a variadic prototype, the required arguments are the
563   // extra prefix plus the arguments in the prototype.
564   if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
565     if (proto->isVariadic())
566       required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs);
567 
568     if (proto->hasExtParameterInfos())
569       addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs,
570                                   args.size());
571 
572   // If we don't have a prototype at all, but we're supposed to
573   // explicitly use the variadic convention for unprototyped calls,
574   // treat all of the arguments as required but preserve the nominal
575   // possibility of variadics.
576   } else if (CGM.getTargetCodeGenInfo()
577                 .isNoProtoCallVariadic(args,
578                                        cast<FunctionNoProtoType>(fnType))) {
579     required = RequiredArgs(args.size());
580   }
581 
582   // FIXME: Kill copy.
583   SmallVector<CanQualType, 16> argTypes;
584   for (const auto &arg : args)
585     argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
586   return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()),
587                                      /*instanceMethod=*/false, chainCall,
588                                      argTypes, fnType->getExtInfo(), paramInfos,
589                                      required);
590 }
591 
592 /// Figure out the rules for calling a function with the given formal
593 /// type using the given arguments.  The arguments are necessary
594 /// because the function might be unprototyped, in which case it's
595 /// target-dependent in crazy ways.
596 const CGFunctionInfo &
597 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
598                                       const FunctionType *fnType,
599                                       bool chainCall) {
600   return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
601                                      chainCall ? 1 : 0, chainCall);
602 }
603 
604 /// A block function is essentially a free function with an
605 /// extra implicit argument.
606 const CGFunctionInfo &
607 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
608                                        const FunctionType *fnType) {
609   return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
610                                      /*chainCall=*/false);
611 }
612 
613 const CGFunctionInfo &
614 CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto,
615                                               const FunctionArgList &params) {
616   auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size());
617   auto argTypes = getArgTypesForDeclaration(Context, params);
618 
619   return arrangeLLVMFunctionInfo(
620       GetReturnType(proto->getReturnType()),
621       /*instanceMethod*/ false, /*chainCall*/ false, argTypes,
622       proto->getExtInfo(), paramInfos,
623       RequiredArgs::forPrototypePlus(proto, 1, nullptr));
624 }
625 
626 const CGFunctionInfo &
627 CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType,
628                                          const CallArgList &args) {
629   // FIXME: Kill copy.
630   SmallVector<CanQualType, 16> argTypes;
631   for (const auto &Arg : args)
632     argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
633   return arrangeLLVMFunctionInfo(
634       GetReturnType(resultType), /*instanceMethod=*/false,
635       /*chainCall=*/false, argTypes, FunctionType::ExtInfo(),
636       /*paramInfos=*/ {}, RequiredArgs::All);
637 }
638 
639 const CGFunctionInfo &
640 CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType,
641                                                 const FunctionArgList &args) {
642   auto argTypes = getArgTypesForDeclaration(Context, args);
643 
644   return arrangeLLVMFunctionInfo(
645       GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false,
646       argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
647 }
648 
649 const CGFunctionInfo &
650 CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType,
651                                               ArrayRef<CanQualType> argTypes) {
652   return arrangeLLVMFunctionInfo(
653       resultType, /*instanceMethod=*/false, /*chainCall=*/false,
654       argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
655 }
656 
657 /// Arrange a call to a C++ method, passing the given arguments.
658 ///
659 /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It
660 /// does not count `this`.
661 const CGFunctionInfo &
662 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
663                                    const FunctionProtoType *proto,
664                                    RequiredArgs required,
665                                    unsigned numPrefixArgs) {
666   assert(numPrefixArgs + 1 <= args.size() &&
667          "Emitting a call with less args than the required prefix?");
668   // Add one to account for `this`. It's a bit awkward here, but we don't count
669   // `this` in similar places elsewhere.
670   auto paramInfos =
671     getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size());
672 
673   // FIXME: Kill copy.
674   auto argTypes = getArgTypesForCall(Context, args);
675 
676   FunctionType::ExtInfo info = proto->getExtInfo();
677   return arrangeLLVMFunctionInfo(
678       GetReturnType(proto->getReturnType()), /*instanceMethod=*/true,
679       /*chainCall=*/false, argTypes, info, paramInfos, required);
680 }
681 
682 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
683   return arrangeLLVMFunctionInfo(
684       getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
685       None, FunctionType::ExtInfo(), {}, RequiredArgs::All);
686 }
687 
688 const CGFunctionInfo &
689 CodeGenTypes::arrangeCall(const CGFunctionInfo &signature,
690                           const CallArgList &args) {
691   assert(signature.arg_size() <= args.size());
692   if (signature.arg_size() == args.size())
693     return signature;
694 
695   SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
696   auto sigParamInfos = signature.getExtParameterInfos();
697   if (!sigParamInfos.empty()) {
698     paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
699     paramInfos.resize(args.size());
700   }
701 
702   auto argTypes = getArgTypesForCall(Context, args);
703 
704   assert(signature.getRequiredArgs().allowsOptionalArgs());
705   return arrangeLLVMFunctionInfo(signature.getReturnType(),
706                                  signature.isInstanceMethod(),
707                                  signature.isChainCall(),
708                                  argTypes,
709                                  signature.getExtInfo(),
710                                  paramInfos,
711                                  signature.getRequiredArgs());
712 }
713 
714 namespace clang {
715 namespace CodeGen {
716 void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI);
717 }
718 }
719 
720 /// Arrange the argument and result information for an abstract value
721 /// of a given function type.  This is the method which all of the
722 /// above functions ultimately defer to.
723 const CGFunctionInfo &
724 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
725                                       bool instanceMethod,
726                                       bool chainCall,
727                                       ArrayRef<CanQualType> argTypes,
728                                       FunctionType::ExtInfo info,
729                      ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos,
730                                       RequiredArgs required) {
731   assert(std::all_of(argTypes.begin(), argTypes.end(),
732                      [](CanQualType T) { return T.isCanonicalAsParam(); }));
733 
734   // Lookup or create unique function info.
735   llvm::FoldingSetNodeID ID;
736   CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos,
737                           required, resultType, argTypes);
738 
739   void *insertPos = nullptr;
740   CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
741   if (FI)
742     return *FI;
743 
744   unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
745 
746   // Construct the function info.  We co-allocate the ArgInfos.
747   FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
748                               paramInfos, resultType, argTypes, required);
749   FunctionInfos.InsertNode(FI, insertPos);
750 
751   bool inserted = FunctionsBeingProcessed.insert(FI).second;
752   (void)inserted;
753   assert(inserted && "Recursively being processed?");
754 
755   // Compute ABI information.
756   if (CC == llvm::CallingConv::SPIR_KERNEL) {
757     // Force target independent argument handling for the host visible
758     // kernel functions.
759     computeSPIRKernelABIInfo(CGM, *FI);
760   } else if (info.getCC() == CC_Swift) {
761     swiftcall::computeABIInfo(CGM, *FI);
762   } else {
763     getABIInfo().computeInfo(*FI);
764   }
765 
766   // Loop over all of the computed argument and return value info.  If any of
767   // them are direct or extend without a specified coerce type, specify the
768   // default now.
769   ABIArgInfo &retInfo = FI->getReturnInfo();
770   if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
771     retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
772 
773   for (auto &I : FI->arguments())
774     if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
775       I.info.setCoerceToType(ConvertType(I.type));
776 
777   bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
778   assert(erased && "Not in set?");
779 
780   return *FI;
781 }
782 
783 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
784                                        bool instanceMethod,
785                                        bool chainCall,
786                                        const FunctionType::ExtInfo &info,
787                                        ArrayRef<ExtParameterInfo> paramInfos,
788                                        CanQualType resultType,
789                                        ArrayRef<CanQualType> argTypes,
790                                        RequiredArgs required) {
791   assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
792 
793   void *buffer =
794     operator new(totalSizeToAlloc<ArgInfo,             ExtParameterInfo>(
795                                   argTypes.size() + 1, paramInfos.size()));
796 
797   CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
798   FI->CallingConvention = llvmCC;
799   FI->EffectiveCallingConvention = llvmCC;
800   FI->ASTCallingConvention = info.getCC();
801   FI->InstanceMethod = instanceMethod;
802   FI->ChainCall = chainCall;
803   FI->NoReturn = info.getNoReturn();
804   FI->ReturnsRetained = info.getProducesResult();
805   FI->NoCallerSavedRegs = info.getNoCallerSavedRegs();
806   FI->NoCfCheck = info.getNoCfCheck();
807   FI->Required = required;
808   FI->HasRegParm = info.getHasRegParm();
809   FI->RegParm = info.getRegParm();
810   FI->ArgStruct = nullptr;
811   FI->ArgStructAlign = 0;
812   FI->NumArgs = argTypes.size();
813   FI->HasExtParameterInfos = !paramInfos.empty();
814   FI->getArgsBuffer()[0].type = resultType;
815   for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
816     FI->getArgsBuffer()[i + 1].type = argTypes[i];
817   for (unsigned i = 0, e = paramInfos.size(); i != e; ++i)
818     FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
819   return FI;
820 }
821 
822 /***/
823 
824 namespace {
825 // ABIArgInfo::Expand implementation.
826 
827 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
828 struct TypeExpansion {
829   enum TypeExpansionKind {
830     // Elements of constant arrays are expanded recursively.
831     TEK_ConstantArray,
832     // Record fields are expanded recursively (but if record is a union, only
833     // the field with the largest size is expanded).
834     TEK_Record,
835     // For complex types, real and imaginary parts are expanded recursively.
836     TEK_Complex,
837     // All other types are not expandable.
838     TEK_None
839   };
840 
841   const TypeExpansionKind Kind;
842 
843   TypeExpansion(TypeExpansionKind K) : Kind(K) {}
844   virtual ~TypeExpansion() {}
845 };
846 
847 struct ConstantArrayExpansion : TypeExpansion {
848   QualType EltTy;
849   uint64_t NumElts;
850 
851   ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
852       : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
853   static bool classof(const TypeExpansion *TE) {
854     return TE->Kind == TEK_ConstantArray;
855   }
856 };
857 
858 struct RecordExpansion : TypeExpansion {
859   SmallVector<const CXXBaseSpecifier *, 1> Bases;
860 
861   SmallVector<const FieldDecl *, 1> Fields;
862 
863   RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
864                   SmallVector<const FieldDecl *, 1> &&Fields)
865       : TypeExpansion(TEK_Record), Bases(std::move(Bases)),
866         Fields(std::move(Fields)) {}
867   static bool classof(const TypeExpansion *TE) {
868     return TE->Kind == TEK_Record;
869   }
870 };
871 
872 struct ComplexExpansion : TypeExpansion {
873   QualType EltTy;
874 
875   ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
876   static bool classof(const TypeExpansion *TE) {
877     return TE->Kind == TEK_Complex;
878   }
879 };
880 
881 struct NoExpansion : TypeExpansion {
882   NoExpansion() : TypeExpansion(TEK_None) {}
883   static bool classof(const TypeExpansion *TE) {
884     return TE->Kind == TEK_None;
885   }
886 };
887 }  // namespace
888 
889 static std::unique_ptr<TypeExpansion>
890 getTypeExpansion(QualType Ty, const ASTContext &Context) {
891   if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
892     return llvm::make_unique<ConstantArrayExpansion>(
893         AT->getElementType(), AT->getSize().getZExtValue());
894   }
895   if (const RecordType *RT = Ty->getAs<RecordType>()) {
896     SmallVector<const CXXBaseSpecifier *, 1> Bases;
897     SmallVector<const FieldDecl *, 1> Fields;
898     const RecordDecl *RD = RT->getDecl();
899     assert(!RD->hasFlexibleArrayMember() &&
900            "Cannot expand structure with flexible array.");
901     if (RD->isUnion()) {
902       // Unions can be here only in degenerative cases - all the fields are same
903       // after flattening. Thus we have to use the "largest" field.
904       const FieldDecl *LargestFD = nullptr;
905       CharUnits UnionSize = CharUnits::Zero();
906 
907       for (const auto *FD : RD->fields()) {
908         if (FD->isZeroLengthBitField(Context))
909           continue;
910         assert(!FD->isBitField() &&
911                "Cannot expand structure with bit-field members.");
912         CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
913         if (UnionSize < FieldSize) {
914           UnionSize = FieldSize;
915           LargestFD = FD;
916         }
917       }
918       if (LargestFD)
919         Fields.push_back(LargestFD);
920     } else {
921       if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
922         assert(!CXXRD->isDynamicClass() &&
923                "cannot expand vtable pointers in dynamic classes");
924         for (const CXXBaseSpecifier &BS : CXXRD->bases())
925           Bases.push_back(&BS);
926       }
927 
928       for (const auto *FD : RD->fields()) {
929         if (FD->isZeroLengthBitField(Context))
930           continue;
931         assert(!FD->isBitField() &&
932                "Cannot expand structure with bit-field members.");
933         Fields.push_back(FD);
934       }
935     }
936     return llvm::make_unique<RecordExpansion>(std::move(Bases),
937                                               std::move(Fields));
938   }
939   if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
940     return llvm::make_unique<ComplexExpansion>(CT->getElementType());
941   }
942   return llvm::make_unique<NoExpansion>();
943 }
944 
945 static int getExpansionSize(QualType Ty, const ASTContext &Context) {
946   auto Exp = getTypeExpansion(Ty, Context);
947   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
948     return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
949   }
950   if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
951     int Res = 0;
952     for (auto BS : RExp->Bases)
953       Res += getExpansionSize(BS->getType(), Context);
954     for (auto FD : RExp->Fields)
955       Res += getExpansionSize(FD->getType(), Context);
956     return Res;
957   }
958   if (isa<ComplexExpansion>(Exp.get()))
959     return 2;
960   assert(isa<NoExpansion>(Exp.get()));
961   return 1;
962 }
963 
964 void
965 CodeGenTypes::getExpandedTypes(QualType Ty,
966                                SmallVectorImpl<llvm::Type *>::iterator &TI) {
967   auto Exp = getTypeExpansion(Ty, Context);
968   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
969     for (int i = 0, n = CAExp->NumElts; i < n; i++) {
970       getExpandedTypes(CAExp->EltTy, TI);
971     }
972   } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
973     for (auto BS : RExp->Bases)
974       getExpandedTypes(BS->getType(), TI);
975     for (auto FD : RExp->Fields)
976       getExpandedTypes(FD->getType(), TI);
977   } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
978     llvm::Type *EltTy = ConvertType(CExp->EltTy);
979     *TI++ = EltTy;
980     *TI++ = EltTy;
981   } else {
982     assert(isa<NoExpansion>(Exp.get()));
983     *TI++ = ConvertType(Ty);
984   }
985 }
986 
987 static void forConstantArrayExpansion(CodeGenFunction &CGF,
988                                       ConstantArrayExpansion *CAE,
989                                       Address BaseAddr,
990                                       llvm::function_ref<void(Address)> Fn) {
991   CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
992   CharUnits EltAlign =
993     BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
994 
995   for (int i = 0, n = CAE->NumElts; i < n; i++) {
996     llvm::Value *EltAddr =
997       CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i);
998     Fn(Address(EltAddr, EltAlign));
999   }
1000 }
1001 
1002 void CodeGenFunction::ExpandTypeFromArgs(
1003     QualType Ty, LValue LV, SmallVectorImpl<llvm::Value *>::iterator &AI) {
1004   assert(LV.isSimple() &&
1005          "Unexpected non-simple lvalue during struct expansion.");
1006 
1007   auto Exp = getTypeExpansion(Ty, getContext());
1008   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1009     forConstantArrayExpansion(*this, CAExp, LV.getAddress(),
1010                               [&](Address EltAddr) {
1011       LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1012       ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1013     });
1014   } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1015     Address This = LV.getAddress();
1016     for (const CXXBaseSpecifier *BS : RExp->Bases) {
1017       // Perform a single step derived-to-base conversion.
1018       Address Base =
1019           GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1020                                 /*NullCheckValue=*/false, SourceLocation());
1021       LValue SubLV = MakeAddrLValue(Base, BS->getType());
1022 
1023       // Recurse onto bases.
1024       ExpandTypeFromArgs(BS->getType(), SubLV, AI);
1025     }
1026     for (auto FD : RExp->Fields) {
1027       // FIXME: What are the right qualifiers here?
1028       LValue SubLV = EmitLValueForFieldInitialization(LV, FD);
1029       ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1030     }
1031   } else if (isa<ComplexExpansion>(Exp.get())) {
1032     auto realValue = *AI++;
1033     auto imagValue = *AI++;
1034     EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
1035   } else {
1036     assert(isa<NoExpansion>(Exp.get()));
1037     EmitStoreThroughLValue(RValue::get(*AI++), LV);
1038   }
1039 }
1040 
1041 void CodeGenFunction::ExpandTypeToArgs(
1042     QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
1043     SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
1044   auto Exp = getTypeExpansion(Ty, getContext());
1045   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1046     Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress()
1047                                    : Arg.getKnownRValue().getAggregateAddress();
1048     forConstantArrayExpansion(
1049         *this, CAExp, Addr, [&](Address EltAddr) {
1050           CallArg EltArg = CallArg(
1051               convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()),
1052               CAExp->EltTy);
1053           ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
1054                            IRCallArgPos);
1055         });
1056   } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1057     Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress()
1058                                    : Arg.getKnownRValue().getAggregateAddress();
1059     for (const CXXBaseSpecifier *BS : RExp->Bases) {
1060       // Perform a single step derived-to-base conversion.
1061       Address Base =
1062           GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1063                                 /*NullCheckValue=*/false, SourceLocation());
1064       CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType());
1065 
1066       // Recurse onto bases.
1067       ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs,
1068                        IRCallArgPos);
1069     }
1070 
1071     LValue LV = MakeAddrLValue(This, Ty);
1072     for (auto FD : RExp->Fields) {
1073       CallArg FldArg =
1074           CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType());
1075       ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
1076                        IRCallArgPos);
1077     }
1078   } else if (isa<ComplexExpansion>(Exp.get())) {
1079     ComplexPairTy CV = Arg.getKnownRValue().getComplexVal();
1080     IRCallArgs[IRCallArgPos++] = CV.first;
1081     IRCallArgs[IRCallArgPos++] = CV.second;
1082   } else {
1083     assert(isa<NoExpansion>(Exp.get()));
1084     auto RV = Arg.getKnownRValue();
1085     assert(RV.isScalar() &&
1086            "Unexpected non-scalar rvalue during struct expansion.");
1087 
1088     // Insert a bitcast as needed.
1089     llvm::Value *V = RV.getScalarVal();
1090     if (IRCallArgPos < IRFuncTy->getNumParams() &&
1091         V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1092       V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
1093 
1094     IRCallArgs[IRCallArgPos++] = V;
1095   }
1096 }
1097 
1098 /// Create a temporary allocation for the purposes of coercion.
1099 static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty,
1100                                            CharUnits MinAlign) {
1101   // Don't use an alignment that's worse than what LLVM would prefer.
1102   auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty);
1103   CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
1104 
1105   return CGF.CreateTempAlloca(Ty, Align);
1106 }
1107 
1108 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
1109 /// accessing some number of bytes out of it, try to gep into the struct to get
1110 /// at its inner goodness.  Dive as deep as possible without entering an element
1111 /// with an in-memory size smaller than DstSize.
1112 static Address
1113 EnterStructPointerForCoercedAccess(Address SrcPtr,
1114                                    llvm::StructType *SrcSTy,
1115                                    uint64_t DstSize, CodeGenFunction &CGF) {
1116   // We can't dive into a zero-element struct.
1117   if (SrcSTy->getNumElements() == 0) return SrcPtr;
1118 
1119   llvm::Type *FirstElt = SrcSTy->getElementType(0);
1120 
1121   // If the first elt is at least as large as what we're looking for, or if the
1122   // first element is the same size as the whole struct, we can enter it. The
1123   // comparison must be made on the store size and not the alloca size. Using
1124   // the alloca size may overstate the size of the load.
1125   uint64_t FirstEltSize =
1126     CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
1127   if (FirstEltSize < DstSize &&
1128       FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
1129     return SrcPtr;
1130 
1131   // GEP into the first element.
1132   SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, CharUnits(), "coerce.dive");
1133 
1134   // If the first element is a struct, recurse.
1135   llvm::Type *SrcTy = SrcPtr.getElementType();
1136   if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1137     return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
1138 
1139   return SrcPtr;
1140 }
1141 
1142 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
1143 /// are either integers or pointers.  This does a truncation of the value if it
1144 /// is too large or a zero extension if it is too small.
1145 ///
1146 /// This behaves as if the value were coerced through memory, so on big-endian
1147 /// targets the high bits are preserved in a truncation, while little-endian
1148 /// targets preserve the low bits.
1149 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
1150                                              llvm::Type *Ty,
1151                                              CodeGenFunction &CGF) {
1152   if (Val->getType() == Ty)
1153     return Val;
1154 
1155   if (isa<llvm::PointerType>(Val->getType())) {
1156     // If this is Pointer->Pointer avoid conversion to and from int.
1157     if (isa<llvm::PointerType>(Ty))
1158       return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
1159 
1160     // Convert the pointer to an integer so we can play with its width.
1161     Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
1162   }
1163 
1164   llvm::Type *DestIntTy = Ty;
1165   if (isa<llvm::PointerType>(DestIntTy))
1166     DestIntTy = CGF.IntPtrTy;
1167 
1168   if (Val->getType() != DestIntTy) {
1169     const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
1170     if (DL.isBigEndian()) {
1171       // Preserve the high bits on big-endian targets.
1172       // That is what memory coercion does.
1173       uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1174       uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1175 
1176       if (SrcSize > DstSize) {
1177         Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
1178         Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
1179       } else {
1180         Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
1181         Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
1182       }
1183     } else {
1184       // Little-endian targets preserve the low bits. No shifts required.
1185       Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
1186     }
1187   }
1188 
1189   if (isa<llvm::PointerType>(Ty))
1190     Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
1191   return Val;
1192 }
1193 
1194 
1195 
1196 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
1197 /// a pointer to an object of type \arg Ty, known to be aligned to
1198 /// \arg SrcAlign bytes.
1199 ///
1200 /// This safely handles the case when the src type is smaller than the
1201 /// destination type; in this situation the values of bits which not
1202 /// present in the src are undefined.
1203 static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
1204                                       CodeGenFunction &CGF) {
1205   llvm::Type *SrcTy = Src.getElementType();
1206 
1207   // If SrcTy and Ty are the same, just do a load.
1208   if (SrcTy == Ty)
1209     return CGF.Builder.CreateLoad(Src);
1210 
1211   uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
1212 
1213   if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1214     Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF);
1215     SrcTy = Src.getType()->getElementType();
1216   }
1217 
1218   uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1219 
1220   // If the source and destination are integer or pointer types, just do an
1221   // extension or truncation to the desired type.
1222   if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1223       (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1224     llvm::Value *Load = CGF.Builder.CreateLoad(Src);
1225     return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
1226   }
1227 
1228   // If load is legal, just bitcast the src pointer.
1229   if (SrcSize >= DstSize) {
1230     // Generally SrcSize is never greater than DstSize, since this means we are
1231     // losing bits. However, this can happen in cases where the structure has
1232     // additional padding, for example due to a user specified alignment.
1233     //
1234     // FIXME: Assert that we aren't truncating non-padding bits when have access
1235     // to that information.
1236     Src = CGF.Builder.CreateBitCast(Src,
1237                                     Ty->getPointerTo(Src.getAddressSpace()));
1238     return CGF.Builder.CreateLoad(Src);
1239   }
1240 
1241   // Otherwise do coercion through memory. This is stupid, but simple.
1242   Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment());
1243   Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.AllocaInt8PtrTy);
1244   Address SrcCasted = CGF.Builder.CreateBitCast(Src, CGF.AllocaInt8PtrTy);
1245   CGF.Builder.CreateMemCpy(Casted, SrcCasted,
1246       llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
1247       false);
1248   return CGF.Builder.CreateLoad(Tmp);
1249 }
1250 
1251 // Function to store a first-class aggregate into memory.  We prefer to
1252 // store the elements rather than the aggregate to be more friendly to
1253 // fast-isel.
1254 // FIXME: Do we need to recurse here?
1255 static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
1256                           Address Dest, bool DestIsVolatile) {
1257   // Prefer scalar stores to first-class aggregate stores.
1258   if (llvm::StructType *STy =
1259         dyn_cast<llvm::StructType>(Val->getType())) {
1260     const llvm::StructLayout *Layout =
1261       CGF.CGM.getDataLayout().getStructLayout(STy);
1262 
1263     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1264       auto EltOffset = CharUnits::fromQuantity(Layout->getElementOffset(i));
1265       Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i, EltOffset);
1266       llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
1267       CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
1268     }
1269   } else {
1270     CGF.Builder.CreateStore(Val, Dest, DestIsVolatile);
1271   }
1272 }
1273 
1274 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1275 /// where the source and destination may have different types.  The
1276 /// destination is known to be aligned to \arg DstAlign bytes.
1277 ///
1278 /// This safely handles the case when the src type is larger than the
1279 /// destination type; the upper bits of the src will be lost.
1280 static void CreateCoercedStore(llvm::Value *Src,
1281                                Address Dst,
1282                                bool DstIsVolatile,
1283                                CodeGenFunction &CGF) {
1284   llvm::Type *SrcTy = Src->getType();
1285   llvm::Type *DstTy = Dst.getType()->getElementType();
1286   if (SrcTy == DstTy) {
1287     CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1288     return;
1289   }
1290 
1291   uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1292 
1293   if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1294     Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF);
1295     DstTy = Dst.getType()->getElementType();
1296   }
1297 
1298   // If the source and destination are integer or pointer types, just do an
1299   // extension or truncation to the desired type.
1300   if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1301       (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1302     Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
1303     CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1304     return;
1305   }
1306 
1307   uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
1308 
1309   // If store is legal, just bitcast the src pointer.
1310   if (SrcSize <= DstSize) {
1311     Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy);
1312     BuildAggStore(CGF, Src, Dst, DstIsVolatile);
1313   } else {
1314     // Otherwise do coercion through memory. This is stupid, but
1315     // simple.
1316 
1317     // Generally SrcSize is never greater than DstSize, since this means we are
1318     // losing bits. However, this can happen in cases where the structure has
1319     // additional padding, for example due to a user specified alignment.
1320     //
1321     // FIXME: Assert that we aren't truncating non-padding bits when have access
1322     // to that information.
1323     Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
1324     CGF.Builder.CreateStore(Src, Tmp);
1325     Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.AllocaInt8PtrTy);
1326     Address DstCasted = CGF.Builder.CreateBitCast(Dst, CGF.AllocaInt8PtrTy);
1327     CGF.Builder.CreateMemCpy(DstCasted, Casted,
1328         llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
1329         false);
1330   }
1331 }
1332 
1333 static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr,
1334                                    const ABIArgInfo &info) {
1335   if (unsigned offset = info.getDirectOffset()) {
1336     addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty);
1337     addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
1338                                              CharUnits::fromQuantity(offset));
1339     addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType());
1340   }
1341   return addr;
1342 }
1343 
1344 namespace {
1345 
1346 /// Encapsulates information about the way function arguments from
1347 /// CGFunctionInfo should be passed to actual LLVM IR function.
1348 class ClangToLLVMArgMapping {
1349   static const unsigned InvalidIndex = ~0U;
1350   unsigned InallocaArgNo;
1351   unsigned SRetArgNo;
1352   unsigned TotalIRArgs;
1353 
1354   /// Arguments of LLVM IR function corresponding to single Clang argument.
1355   struct IRArgs {
1356     unsigned PaddingArgIndex;
1357     // Argument is expanded to IR arguments at positions
1358     // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
1359     unsigned FirstArgIndex;
1360     unsigned NumberOfArgs;
1361 
1362     IRArgs()
1363         : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1364           NumberOfArgs(0) {}
1365   };
1366 
1367   SmallVector<IRArgs, 8> ArgInfo;
1368 
1369 public:
1370   ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
1371                         bool OnlyRequiredArgs = false)
1372       : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1373         ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1374     construct(Context, FI, OnlyRequiredArgs);
1375   }
1376 
1377   bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
1378   unsigned getInallocaArgNo() const {
1379     assert(hasInallocaArg());
1380     return InallocaArgNo;
1381   }
1382 
1383   bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
1384   unsigned getSRetArgNo() const {
1385     assert(hasSRetArg());
1386     return SRetArgNo;
1387   }
1388 
1389   unsigned totalIRArgs() const { return TotalIRArgs; }
1390 
1391   bool hasPaddingArg(unsigned ArgNo) const {
1392     assert(ArgNo < ArgInfo.size());
1393     return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1394   }
1395   unsigned getPaddingArgNo(unsigned ArgNo) const {
1396     assert(hasPaddingArg(ArgNo));
1397     return ArgInfo[ArgNo].PaddingArgIndex;
1398   }
1399 
1400   /// Returns index of first IR argument corresponding to ArgNo, and their
1401   /// quantity.
1402   std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
1403     assert(ArgNo < ArgInfo.size());
1404     return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1405                           ArgInfo[ArgNo].NumberOfArgs);
1406   }
1407 
1408 private:
1409   void construct(const ASTContext &Context, const CGFunctionInfo &FI,
1410                  bool OnlyRequiredArgs);
1411 };
1412 
1413 void ClangToLLVMArgMapping::construct(const ASTContext &Context,
1414                                       const CGFunctionInfo &FI,
1415                                       bool OnlyRequiredArgs) {
1416   unsigned IRArgNo = 0;
1417   bool SwapThisWithSRet = false;
1418   const ABIArgInfo &RetAI = FI.getReturnInfo();
1419 
1420   if (RetAI.getKind() == ABIArgInfo::Indirect) {
1421     SwapThisWithSRet = RetAI.isSRetAfterThis();
1422     SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1423   }
1424 
1425   unsigned ArgNo = 0;
1426   unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
1427   for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
1428        ++I, ++ArgNo) {
1429     assert(I != FI.arg_end());
1430     QualType ArgType = I->type;
1431     const ABIArgInfo &AI = I->info;
1432     // Collect data about IR arguments corresponding to Clang argument ArgNo.
1433     auto &IRArgs = ArgInfo[ArgNo];
1434 
1435     if (AI.getPaddingType())
1436       IRArgs.PaddingArgIndex = IRArgNo++;
1437 
1438     switch (AI.getKind()) {
1439     case ABIArgInfo::Extend:
1440     case ABIArgInfo::Direct: {
1441       // FIXME: handle sseregparm someday...
1442       llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
1443       if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
1444         IRArgs.NumberOfArgs = STy->getNumElements();
1445       } else {
1446         IRArgs.NumberOfArgs = 1;
1447       }
1448       break;
1449     }
1450     case ABIArgInfo::Indirect:
1451       IRArgs.NumberOfArgs = 1;
1452       break;
1453     case ABIArgInfo::Ignore:
1454     case ABIArgInfo::InAlloca:
1455       // ignore and inalloca doesn't have matching LLVM parameters.
1456       IRArgs.NumberOfArgs = 0;
1457       break;
1458     case ABIArgInfo::CoerceAndExpand:
1459       IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size();
1460       break;
1461     case ABIArgInfo::Expand:
1462       IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
1463       break;
1464     }
1465 
1466     if (IRArgs.NumberOfArgs > 0) {
1467       IRArgs.FirstArgIndex = IRArgNo;
1468       IRArgNo += IRArgs.NumberOfArgs;
1469     }
1470 
1471     // Skip over the sret parameter when it comes second.  We already handled it
1472     // above.
1473     if (IRArgNo == 1 && SwapThisWithSRet)
1474       IRArgNo++;
1475   }
1476   assert(ArgNo == ArgInfo.size());
1477 
1478   if (FI.usesInAlloca())
1479     InallocaArgNo = IRArgNo++;
1480 
1481   TotalIRArgs = IRArgNo;
1482 }
1483 }  // namespace
1484 
1485 /***/
1486 
1487 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
1488   const auto &RI = FI.getReturnInfo();
1489   return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
1490 }
1491 
1492 bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) {
1493   return ReturnTypeUsesSRet(FI) &&
1494          getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
1495 }
1496 
1497 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
1498   if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
1499     switch (BT->getKind()) {
1500     default:
1501       return false;
1502     case BuiltinType::Float:
1503       return getTarget().useObjCFPRetForRealType(TargetInfo::Float);
1504     case BuiltinType::Double:
1505       return getTarget().useObjCFPRetForRealType(TargetInfo::Double);
1506     case BuiltinType::LongDouble:
1507       return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble);
1508     }
1509   }
1510 
1511   return false;
1512 }
1513 
1514 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
1515   if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
1516     if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
1517       if (BT->getKind() == BuiltinType::LongDouble)
1518         return getTarget().useObjCFP2RetForComplexLongDouble();
1519     }
1520   }
1521 
1522   return false;
1523 }
1524 
1525 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
1526   const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
1527   return GetFunctionType(FI);
1528 }
1529 
1530 llvm::FunctionType *
1531 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
1532 
1533   bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1534   (void)Inserted;
1535   assert(Inserted && "Recursively being processed?");
1536 
1537   llvm::Type *resultType = nullptr;
1538   const ABIArgInfo &retAI = FI.getReturnInfo();
1539   switch (retAI.getKind()) {
1540   case ABIArgInfo::Expand:
1541     llvm_unreachable("Invalid ABI kind for return argument");
1542 
1543   case ABIArgInfo::Extend:
1544   case ABIArgInfo::Direct:
1545     resultType = retAI.getCoerceToType();
1546     break;
1547 
1548   case ABIArgInfo::InAlloca:
1549     if (retAI.getInAllocaSRet()) {
1550       // sret things on win32 aren't void, they return the sret pointer.
1551       QualType ret = FI.getReturnType();
1552       llvm::Type *ty = ConvertType(ret);
1553       unsigned addressSpace = Context.getTargetAddressSpace(ret);
1554       resultType = llvm::PointerType::get(ty, addressSpace);
1555     } else {
1556       resultType = llvm::Type::getVoidTy(getLLVMContext());
1557     }
1558     break;
1559 
1560   case ABIArgInfo::Indirect:
1561   case ABIArgInfo::Ignore:
1562     resultType = llvm::Type::getVoidTy(getLLVMContext());
1563     break;
1564 
1565   case ABIArgInfo::CoerceAndExpand:
1566     resultType = retAI.getUnpaddedCoerceAndExpandType();
1567     break;
1568   }
1569 
1570   ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
1571   SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
1572 
1573   // Add type for sret argument.
1574   if (IRFunctionArgs.hasSRetArg()) {
1575     QualType Ret = FI.getReturnType();
1576     llvm::Type *Ty = ConvertType(Ret);
1577     unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
1578     ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1579         llvm::PointerType::get(Ty, AddressSpace);
1580   }
1581 
1582   // Add type for inalloca argument.
1583   if (IRFunctionArgs.hasInallocaArg()) {
1584     auto ArgStruct = FI.getArgStruct();
1585     assert(ArgStruct);
1586     ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
1587   }
1588 
1589   // Add in all of the required arguments.
1590   unsigned ArgNo = 0;
1591   CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1592                                      ie = it + FI.getNumRequiredArgs();
1593   for (; it != ie; ++it, ++ArgNo) {
1594     const ABIArgInfo &ArgInfo = it->info;
1595 
1596     // Insert a padding type to ensure proper alignment.
1597     if (IRFunctionArgs.hasPaddingArg(ArgNo))
1598       ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1599           ArgInfo.getPaddingType();
1600 
1601     unsigned FirstIRArg, NumIRArgs;
1602     std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1603 
1604     switch (ArgInfo.getKind()) {
1605     case ABIArgInfo::Ignore:
1606     case ABIArgInfo::InAlloca:
1607       assert(NumIRArgs == 0);
1608       break;
1609 
1610     case ABIArgInfo::Indirect: {
1611       assert(NumIRArgs == 1);
1612       // indirect arguments are always on the stack, which is alloca addr space.
1613       llvm::Type *LTy = ConvertTypeForMem(it->type);
1614       ArgTypes[FirstIRArg] = LTy->getPointerTo(
1615           CGM.getDataLayout().getAllocaAddrSpace());
1616       break;
1617     }
1618 
1619     case ABIArgInfo::Extend:
1620     case ABIArgInfo::Direct: {
1621       // Fast-isel and the optimizer generally like scalar values better than
1622       // FCAs, so we flatten them if this is safe to do for this argument.
1623       llvm::Type *argType = ArgInfo.getCoerceToType();
1624       llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1625       if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
1626         assert(NumIRArgs == st->getNumElements());
1627         for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1628           ArgTypes[FirstIRArg + i] = st->getElementType(i);
1629       } else {
1630         assert(NumIRArgs == 1);
1631         ArgTypes[FirstIRArg] = argType;
1632       }
1633       break;
1634     }
1635 
1636     case ABIArgInfo::CoerceAndExpand: {
1637       auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1638       for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) {
1639         *ArgTypesIter++ = EltTy;
1640       }
1641       assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1642       break;
1643     }
1644 
1645     case ABIArgInfo::Expand:
1646       auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1647       getExpandedTypes(it->type, ArgTypesIter);
1648       assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1649       break;
1650     }
1651   }
1652 
1653   bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1654   assert(Erased && "Not in set?");
1655 
1656   return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
1657 }
1658 
1659 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
1660   const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1661   const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
1662 
1663   if (!isFuncTypeConvertible(FPT))
1664     return llvm::StructType::get(getLLVMContext());
1665 
1666   const CGFunctionInfo *Info;
1667   if (isa<CXXDestructorDecl>(MD))
1668     Info =
1669         &arrangeCXXStructorDeclaration(MD, getFromDtorType(GD.getDtorType()));
1670   else
1671     Info = &arrangeCXXMethodDeclaration(MD);
1672   return GetFunctionType(*Info);
1673 }
1674 
1675 static void AddAttributesFromFunctionProtoType(ASTContext &Ctx,
1676                                                llvm::AttrBuilder &FuncAttrs,
1677                                                const FunctionProtoType *FPT) {
1678   if (!FPT)
1679     return;
1680 
1681   if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) &&
1682       FPT->isNothrow(Ctx))
1683     FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1684 }
1685 
1686 void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
1687                                                bool AttrOnCallSite,
1688                                                llvm::AttrBuilder &FuncAttrs) {
1689   // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
1690   if (!HasOptnone) {
1691     if (CodeGenOpts.OptimizeSize)
1692       FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1693     if (CodeGenOpts.OptimizeSize == 2)
1694       FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1695   }
1696 
1697   if (CodeGenOpts.DisableRedZone)
1698     FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1699   if (CodeGenOpts.NoImplicitFloat)
1700     FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1701 
1702   if (AttrOnCallSite) {
1703     // Attributes that should go on the call site only.
1704     if (!CodeGenOpts.SimplifyLibCalls ||
1705         CodeGenOpts.isNoBuiltinFunc(Name.data()))
1706       FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1707     if (!CodeGenOpts.TrapFuncName.empty())
1708       FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
1709   } else {
1710     // Attributes that should go on the function, but not the call site.
1711     if (!CodeGenOpts.DisableFPElim) {
1712       FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1713     } else if (CodeGenOpts.OmitLeafFramePointer) {
1714       FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1715       FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1716     } else {
1717       FuncAttrs.addAttribute("no-frame-pointer-elim", "true");
1718       FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1719     }
1720 
1721     FuncAttrs.addAttribute("less-precise-fpmad",
1722                            llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
1723 
1724     if (!CodeGenOpts.FPDenormalMode.empty())
1725       FuncAttrs.addAttribute("denormal-fp-math", CodeGenOpts.FPDenormalMode);
1726 
1727     FuncAttrs.addAttribute("no-trapping-math",
1728                            llvm::toStringRef(CodeGenOpts.NoTrappingMath));
1729 
1730     // TODO: Are these all needed?
1731     // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
1732     FuncAttrs.addAttribute("no-infs-fp-math",
1733                            llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
1734     FuncAttrs.addAttribute("no-nans-fp-math",
1735                            llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
1736     FuncAttrs.addAttribute("unsafe-fp-math",
1737                            llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
1738     FuncAttrs.addAttribute("use-soft-float",
1739                            llvm::toStringRef(CodeGenOpts.SoftFloat));
1740     FuncAttrs.addAttribute("stack-protector-buffer-size",
1741                            llvm::utostr(CodeGenOpts.SSPBufferSize));
1742     FuncAttrs.addAttribute("no-signed-zeros-fp-math",
1743                            llvm::toStringRef(CodeGenOpts.NoSignedZeros));
1744     FuncAttrs.addAttribute(
1745         "correctly-rounded-divide-sqrt-fp-math",
1746         llvm::toStringRef(CodeGenOpts.CorrectlyRoundedDivSqrt));
1747 
1748     if (getLangOpts().OpenCL)
1749       FuncAttrs.addAttribute("denorms-are-zero",
1750                              llvm::toStringRef(CodeGenOpts.FlushDenorm));
1751 
1752     // TODO: Reciprocal estimate codegen options should apply to instructions?
1753     const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals;
1754     if (!Recips.empty())
1755       FuncAttrs.addAttribute("reciprocal-estimates",
1756                              llvm::join(Recips, ","));
1757 
1758     if (!CodeGenOpts.PreferVectorWidth.empty() &&
1759         CodeGenOpts.PreferVectorWidth != "none")
1760       FuncAttrs.addAttribute("prefer-vector-width",
1761                              CodeGenOpts.PreferVectorWidth);
1762 
1763     if (CodeGenOpts.StackRealignment)
1764       FuncAttrs.addAttribute("stackrealign");
1765     if (CodeGenOpts.Backchain)
1766       FuncAttrs.addAttribute("backchain");
1767   }
1768 
1769   if (getLangOpts().assumeFunctionsAreConvergent()) {
1770     // Conservatively, mark all functions and calls in CUDA and OpenCL as
1771     // convergent (meaning, they may call an intrinsically convergent op, such
1772     // as __syncthreads() / barrier(), and so can't have certain optimizations
1773     // applied around them).  LLVM will remove this attribute where it safely
1774     // can.
1775     FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1776   }
1777 
1778   if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) {
1779     // Exceptions aren't supported in CUDA device code.
1780     FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1781 
1782     // Respect -fcuda-flush-denormals-to-zero.
1783     if (getLangOpts().CUDADeviceFlushDenormalsToZero)
1784       FuncAttrs.addAttribute("nvptx-f32ftz", "true");
1785   }
1786 }
1787 
1788 void CodeGenModule::AddDefaultFnAttrs(llvm::Function &F) {
1789   llvm::AttrBuilder FuncAttrs;
1790   ConstructDefaultFnAttrList(F.getName(),
1791                              F.hasFnAttribute(llvm::Attribute::OptimizeNone),
1792                              /* AttrOnCallsite = */ false, FuncAttrs);
1793   F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs);
1794 }
1795 
1796 void CodeGenModule::ConstructAttributeList(
1797     StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo,
1798     llvm::AttributeList &AttrList, unsigned &CallingConv, bool AttrOnCallSite) {
1799   llvm::AttrBuilder FuncAttrs;
1800   llvm::AttrBuilder RetAttrs;
1801 
1802   CallingConv = FI.getEffectiveCallingConvention();
1803   if (FI.isNoReturn())
1804     FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1805 
1806   // If we have information about the function prototype, we can learn
1807   // attributes form there.
1808   AddAttributesFromFunctionProtoType(getContext(), FuncAttrs,
1809                                      CalleeInfo.getCalleeFunctionProtoType());
1810 
1811   const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
1812 
1813   bool HasOptnone = false;
1814   // FIXME: handle sseregparm someday...
1815   if (TargetDecl) {
1816     if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
1817       FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
1818     if (TargetDecl->hasAttr<NoThrowAttr>())
1819       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1820     if (TargetDecl->hasAttr<NoReturnAttr>())
1821       FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1822     if (TargetDecl->hasAttr<ColdAttr>())
1823       FuncAttrs.addAttribute(llvm::Attribute::Cold);
1824     if (TargetDecl->hasAttr<NoDuplicateAttr>())
1825       FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
1826     if (TargetDecl->hasAttr<ConvergentAttr>())
1827       FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1828 
1829     if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1830       AddAttributesFromFunctionProtoType(
1831           getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
1832       // Don't use [[noreturn]] or _Noreturn for a call to a virtual function.
1833       // These attributes are not inherited by overloads.
1834       const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
1835       if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual()))
1836         FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1837     }
1838 
1839     // 'const', 'pure' and 'noalias' attributed functions are also nounwind.
1840     if (TargetDecl->hasAttr<ConstAttr>()) {
1841       FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
1842       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1843     } else if (TargetDecl->hasAttr<PureAttr>()) {
1844       FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
1845       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1846     } else if (TargetDecl->hasAttr<NoAliasAttr>()) {
1847       FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly);
1848       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1849     }
1850     if (TargetDecl->hasAttr<RestrictAttr>())
1851       RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1852     if (TargetDecl->hasAttr<ReturnsNonNullAttr>())
1853       RetAttrs.addAttribute(llvm::Attribute::NonNull);
1854     if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>())
1855       FuncAttrs.addAttribute("no_caller_saved_registers");
1856     if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>())
1857       FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
1858 
1859     HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
1860     if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) {
1861       Optional<unsigned> NumElemsParam;
1862       if (AllocSize->getNumElemsParam().isValid())
1863         NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
1864       FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
1865                                  NumElemsParam);
1866     }
1867   }
1868 
1869   ConstructDefaultFnAttrList(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
1870 
1871   if (CodeGenOpts.EnableSegmentedStacks &&
1872       !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>()))
1873     FuncAttrs.addAttribute("split-stack");
1874 
1875   // Add NonLazyBind attribute to function declarations when -fno-plt
1876   // is used.
1877   if (TargetDecl && CodeGenOpts.NoPLT) {
1878     if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1879       if (!Fn->isDefined() && !AttrOnCallSite) {
1880         FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
1881       }
1882     }
1883   }
1884 
1885   if (TargetDecl && TargetDecl->hasAttr<OpenCLKernelAttr>()) {
1886     if (getLangOpts().OpenCLVersion <= 120) {
1887       // OpenCL v1.2 Work groups are always uniform
1888       FuncAttrs.addAttribute("uniform-work-group-size", "true");
1889     } else {
1890       // OpenCL v2.0 Work groups may be whether uniform or not.
1891       // '-cl-uniform-work-group-size' compile option gets a hint
1892       // to the compiler that the global work-size be a multiple of
1893       // the work-group size specified to clEnqueueNDRangeKernel
1894       // (i.e. work groups are uniform).
1895       FuncAttrs.addAttribute("uniform-work-group-size",
1896                              llvm::toStringRef(CodeGenOpts.UniformWGSize));
1897     }
1898   }
1899 
1900   if (!AttrOnCallSite) {
1901     bool DisableTailCalls = false;
1902 
1903     if (CodeGenOpts.DisableTailCalls)
1904       DisableTailCalls = true;
1905     else if (TargetDecl) {
1906       if (TargetDecl->hasAttr<DisableTailCallsAttr>() ||
1907           TargetDecl->hasAttr<AnyX86InterruptAttr>())
1908         DisableTailCalls = true;
1909       else if (CodeGenOpts.NoEscapingBlockTailCalls) {
1910         if (const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
1911           if (!BD->doesNotEscape())
1912             DisableTailCalls = true;
1913       }
1914     }
1915 
1916     FuncAttrs.addAttribute("disable-tail-calls",
1917                            llvm::toStringRef(DisableTailCalls));
1918     GetCPUAndFeaturesAttributes(TargetDecl, FuncAttrs);
1919   }
1920 
1921   ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
1922 
1923   QualType RetTy = FI.getReturnType();
1924   const ABIArgInfo &RetAI = FI.getReturnInfo();
1925   switch (RetAI.getKind()) {
1926   case ABIArgInfo::Extend:
1927     if (RetAI.isSignExt())
1928       RetAttrs.addAttribute(llvm::Attribute::SExt);
1929     else
1930       RetAttrs.addAttribute(llvm::Attribute::ZExt);
1931     LLVM_FALLTHROUGH;
1932   case ABIArgInfo::Direct:
1933     if (RetAI.getInReg())
1934       RetAttrs.addAttribute(llvm::Attribute::InReg);
1935     break;
1936   case ABIArgInfo::Ignore:
1937     break;
1938 
1939   case ABIArgInfo::InAlloca:
1940   case ABIArgInfo::Indirect: {
1941     // inalloca and sret disable readnone and readonly
1942     FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1943       .removeAttribute(llvm::Attribute::ReadNone);
1944     break;
1945   }
1946 
1947   case ABIArgInfo::CoerceAndExpand:
1948     break;
1949 
1950   case ABIArgInfo::Expand:
1951     llvm_unreachable("Invalid ABI kind for return argument");
1952   }
1953 
1954   if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
1955     QualType PTy = RefTy->getPointeeType();
1956     if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
1957       RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
1958                                         .getQuantity());
1959     else if (getContext().getTargetAddressSpace(PTy) == 0)
1960       RetAttrs.addAttribute(llvm::Attribute::NonNull);
1961   }
1962 
1963   bool hasUsedSRet = false;
1964   SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs());
1965 
1966   // Attach attributes to sret.
1967   if (IRFunctionArgs.hasSRetArg()) {
1968     llvm::AttrBuilder SRETAttrs;
1969     SRETAttrs.addAttribute(llvm::Attribute::StructRet);
1970     hasUsedSRet = true;
1971     if (RetAI.getInReg())
1972       SRETAttrs.addAttribute(llvm::Attribute::InReg);
1973     ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
1974         llvm::AttributeSet::get(getLLVMContext(), SRETAttrs);
1975   }
1976 
1977   // Attach attributes to inalloca argument.
1978   if (IRFunctionArgs.hasInallocaArg()) {
1979     llvm::AttrBuilder Attrs;
1980     Attrs.addAttribute(llvm::Attribute::InAlloca);
1981     ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
1982         llvm::AttributeSet::get(getLLVMContext(), Attrs);
1983   }
1984 
1985   unsigned ArgNo = 0;
1986   for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(),
1987                                           E = FI.arg_end();
1988        I != E; ++I, ++ArgNo) {
1989     QualType ParamType = I->type;
1990     const ABIArgInfo &AI = I->info;
1991     llvm::AttrBuilder Attrs;
1992 
1993     // Add attribute for padding argument, if necessary.
1994     if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
1995       if (AI.getPaddingInReg()) {
1996         ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1997             llvm::AttributeSet::get(
1998                 getLLVMContext(),
1999                 llvm::AttrBuilder().addAttribute(llvm::Attribute::InReg));
2000       }
2001     }
2002 
2003     // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
2004     // have the corresponding parameter variable.  It doesn't make
2005     // sense to do it here because parameters are so messed up.
2006     switch (AI.getKind()) {
2007     case ABIArgInfo::Extend:
2008       if (AI.isSignExt())
2009         Attrs.addAttribute(llvm::Attribute::SExt);
2010       else
2011         Attrs.addAttribute(llvm::Attribute::ZExt);
2012       LLVM_FALLTHROUGH;
2013     case ABIArgInfo::Direct:
2014       if (ArgNo == 0 && FI.isChainCall())
2015         Attrs.addAttribute(llvm::Attribute::Nest);
2016       else if (AI.getInReg())
2017         Attrs.addAttribute(llvm::Attribute::InReg);
2018       break;
2019 
2020     case ABIArgInfo::Indirect: {
2021       if (AI.getInReg())
2022         Attrs.addAttribute(llvm::Attribute::InReg);
2023 
2024       if (AI.getIndirectByVal())
2025         Attrs.addAttribute(llvm::Attribute::ByVal);
2026 
2027       CharUnits Align = AI.getIndirectAlign();
2028 
2029       // In a byval argument, it is important that the required
2030       // alignment of the type is honored, as LLVM might be creating a
2031       // *new* stack object, and needs to know what alignment to give
2032       // it. (Sometimes it can deduce a sensible alignment on its own,
2033       // but not if clang decides it must emit a packed struct, or the
2034       // user specifies increased alignment requirements.)
2035       //
2036       // This is different from indirect *not* byval, where the object
2037       // exists already, and the align attribute is purely
2038       // informative.
2039       assert(!Align.isZero());
2040 
2041       // For now, only add this when we have a byval argument.
2042       // TODO: be less lazy about updating test cases.
2043       if (AI.getIndirectByVal())
2044         Attrs.addAlignmentAttr(Align.getQuantity());
2045 
2046       // byval disables readnone and readonly.
2047       FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2048         .removeAttribute(llvm::Attribute::ReadNone);
2049       break;
2050     }
2051     case ABIArgInfo::Ignore:
2052     case ABIArgInfo::Expand:
2053     case ABIArgInfo::CoerceAndExpand:
2054       break;
2055 
2056     case ABIArgInfo::InAlloca:
2057       // inalloca disables readnone and readonly.
2058       FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2059           .removeAttribute(llvm::Attribute::ReadNone);
2060       continue;
2061     }
2062 
2063     if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
2064       QualType PTy = RefTy->getPointeeType();
2065       if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2066         Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
2067                                        .getQuantity());
2068       else if (getContext().getTargetAddressSpace(PTy) == 0)
2069         Attrs.addAttribute(llvm::Attribute::NonNull);
2070     }
2071 
2072     switch (FI.getExtParameterInfo(ArgNo).getABI()) {
2073     case ParameterABI::Ordinary:
2074       break;
2075 
2076     case ParameterABI::SwiftIndirectResult: {
2077       // Add 'sret' if we haven't already used it for something, but
2078       // only if the result is void.
2079       if (!hasUsedSRet && RetTy->isVoidType()) {
2080         Attrs.addAttribute(llvm::Attribute::StructRet);
2081         hasUsedSRet = true;
2082       }
2083 
2084       // Add 'noalias' in either case.
2085       Attrs.addAttribute(llvm::Attribute::NoAlias);
2086 
2087       // Add 'dereferenceable' and 'alignment'.
2088       auto PTy = ParamType->getPointeeType();
2089       if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2090         auto info = getContext().getTypeInfoInChars(PTy);
2091         Attrs.addDereferenceableAttr(info.first.getQuantity());
2092         Attrs.addAttribute(llvm::Attribute::getWithAlignment(getLLVMContext(),
2093                                                  info.second.getQuantity()));
2094       }
2095       break;
2096     }
2097 
2098     case ParameterABI::SwiftErrorResult:
2099       Attrs.addAttribute(llvm::Attribute::SwiftError);
2100       break;
2101 
2102     case ParameterABI::SwiftContext:
2103       Attrs.addAttribute(llvm::Attribute::SwiftSelf);
2104       break;
2105     }
2106 
2107     if (FI.getExtParameterInfo(ArgNo).isNoEscape())
2108       Attrs.addAttribute(llvm::Attribute::NoCapture);
2109 
2110     if (Attrs.hasAttributes()) {
2111       unsigned FirstIRArg, NumIRArgs;
2112       std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2113       for (unsigned i = 0; i < NumIRArgs; i++)
2114         ArgAttrs[FirstIRArg + i] =
2115             llvm::AttributeSet::get(getLLVMContext(), Attrs);
2116     }
2117   }
2118   assert(ArgNo == FI.arg_size());
2119 
2120   AttrList = llvm::AttributeList::get(
2121       getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs),
2122       llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs);
2123 }
2124 
2125 /// An argument came in as a promoted argument; demote it back to its
2126 /// declared type.
2127 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
2128                                          const VarDecl *var,
2129                                          llvm::Value *value) {
2130   llvm::Type *varType = CGF.ConvertType(var->getType());
2131 
2132   // This can happen with promotions that actually don't change the
2133   // underlying type, like the enum promotions.
2134   if (value->getType() == varType) return value;
2135 
2136   assert((varType->isIntegerTy() || varType->isFloatingPointTy())
2137          && "unexpected promotion type");
2138 
2139   if (isa<llvm::IntegerType>(varType))
2140     return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
2141 
2142   return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
2143 }
2144 
2145 /// Returns the attribute (either parameter attribute, or function
2146 /// attribute), which declares argument ArgNo to be non-null.
2147 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
2148                                          QualType ArgType, unsigned ArgNo) {
2149   // FIXME: __attribute__((nonnull)) can also be applied to:
2150   //   - references to pointers, where the pointee is known to be
2151   //     nonnull (apparently a Clang extension)
2152   //   - transparent unions containing pointers
2153   // In the former case, LLVM IR cannot represent the constraint. In
2154   // the latter case, we have no guarantee that the transparent union
2155   // is in fact passed as a pointer.
2156   if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
2157     return nullptr;
2158   // First, check attribute on parameter itself.
2159   if (PVD) {
2160     if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
2161       return ParmNNAttr;
2162   }
2163   // Check function attributes.
2164   if (!FD)
2165     return nullptr;
2166   for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
2167     if (NNAttr->isNonNull(ArgNo))
2168       return NNAttr;
2169   }
2170   return nullptr;
2171 }
2172 
2173 namespace {
2174   struct CopyBackSwiftError final : EHScopeStack::Cleanup {
2175     Address Temp;
2176     Address Arg;
2177     CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {}
2178     void Emit(CodeGenFunction &CGF, Flags flags) override {
2179       llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp);
2180       CGF.Builder.CreateStore(errorValue, Arg);
2181     }
2182   };
2183 }
2184 
2185 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
2186                                          llvm::Function *Fn,
2187                                          const FunctionArgList &Args) {
2188   if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
2189     // Naked functions don't have prologues.
2190     return;
2191 
2192   // If this is an implicit-return-zero function, go ahead and
2193   // initialize the return value.  TODO: it might be nice to have
2194   // a more general mechanism for this that didn't require synthesized
2195   // return statements.
2196   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
2197     if (FD->hasImplicitReturnZero()) {
2198       QualType RetTy = FD->getReturnType().getUnqualifiedType();
2199       llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
2200       llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
2201       Builder.CreateStore(Zero, ReturnValue);
2202     }
2203   }
2204 
2205   // FIXME: We no longer need the types from FunctionArgList; lift up and
2206   // simplify.
2207 
2208   ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
2209   // Flattened function arguments.
2210   SmallVector<llvm::Value *, 16> FnArgs;
2211   FnArgs.reserve(IRFunctionArgs.totalIRArgs());
2212   for (auto &Arg : Fn->args()) {
2213     FnArgs.push_back(&Arg);
2214   }
2215   assert(FnArgs.size() == IRFunctionArgs.totalIRArgs());
2216 
2217   // If we're using inalloca, all the memory arguments are GEPs off of the last
2218   // parameter, which is a pointer to the complete memory area.
2219   Address ArgStruct = Address::invalid();
2220   const llvm::StructLayout *ArgStructLayout = nullptr;
2221   if (IRFunctionArgs.hasInallocaArg()) {
2222     ArgStructLayout = CGM.getDataLayout().getStructLayout(FI.getArgStruct());
2223     ArgStruct = Address(FnArgs[IRFunctionArgs.getInallocaArgNo()],
2224                         FI.getArgStructAlignment());
2225 
2226     assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo());
2227   }
2228 
2229   // Name the struct return parameter.
2230   if (IRFunctionArgs.hasSRetArg()) {
2231     auto AI = cast<llvm::Argument>(FnArgs[IRFunctionArgs.getSRetArgNo()]);
2232     AI->setName("agg.result");
2233     AI->addAttr(llvm::Attribute::NoAlias);
2234   }
2235 
2236   // Track if we received the parameter as a pointer (indirect, byval, or
2237   // inalloca).  If already have a pointer, EmitParmDecl doesn't need to copy it
2238   // into a local alloca for us.
2239   SmallVector<ParamValue, 16> ArgVals;
2240   ArgVals.reserve(Args.size());
2241 
2242   // Create a pointer value for every parameter declaration.  This usually
2243   // entails copying one or more LLVM IR arguments into an alloca.  Don't push
2244   // any cleanups or do anything that might unwind.  We do that separately, so
2245   // we can push the cleanups in the correct order for the ABI.
2246   assert(FI.arg_size() == Args.size() &&
2247          "Mismatch between function signature & arguments.");
2248   unsigned ArgNo = 0;
2249   CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
2250   for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
2251        i != e; ++i, ++info_it, ++ArgNo) {
2252     const VarDecl *Arg = *i;
2253     const ABIArgInfo &ArgI = info_it->info;
2254 
2255     bool isPromoted =
2256       isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
2257     // We are converting from ABIArgInfo type to VarDecl type directly, unless
2258     // the parameter is promoted. In this case we convert to
2259     // CGFunctionInfo::ArgInfo type with subsequent argument demotion.
2260     QualType Ty = isPromoted ? info_it->type : Arg->getType();
2261     assert(hasScalarEvaluationKind(Ty) ==
2262            hasScalarEvaluationKind(Arg->getType()));
2263 
2264     unsigned FirstIRArg, NumIRArgs;
2265     std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2266 
2267     switch (ArgI.getKind()) {
2268     case ABIArgInfo::InAlloca: {
2269       assert(NumIRArgs == 0);
2270       auto FieldIndex = ArgI.getInAllocaFieldIndex();
2271       CharUnits FieldOffset =
2272         CharUnits::fromQuantity(ArgStructLayout->getElementOffset(FieldIndex));
2273       Address V = Builder.CreateStructGEP(ArgStruct, FieldIndex, FieldOffset,
2274                                           Arg->getName());
2275       ArgVals.push_back(ParamValue::forIndirect(V));
2276       break;
2277     }
2278 
2279     case ABIArgInfo::Indirect: {
2280       assert(NumIRArgs == 1);
2281       Address ParamAddr = Address(FnArgs[FirstIRArg], ArgI.getIndirectAlign());
2282 
2283       if (!hasScalarEvaluationKind(Ty)) {
2284         // Aggregates and complex variables are accessed by reference.  All we
2285         // need to do is realign the value, if requested.
2286         Address V = ParamAddr;
2287         if (ArgI.getIndirectRealign()) {
2288           Address AlignedTemp = CreateMemTemp(Ty, "coerce");
2289 
2290           // Copy from the incoming argument pointer to the temporary with the
2291           // appropriate alignment.
2292           //
2293           // FIXME: We should have a common utility for generating an aggregate
2294           // copy.
2295           CharUnits Size = getContext().getTypeSizeInChars(Ty);
2296           auto SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity());
2297           Address Dst = Builder.CreateBitCast(AlignedTemp, Int8PtrTy);
2298           Address Src = Builder.CreateBitCast(ParamAddr, Int8PtrTy);
2299           Builder.CreateMemCpy(Dst, Src, SizeVal, false);
2300           V = AlignedTemp;
2301         }
2302         ArgVals.push_back(ParamValue::forIndirect(V));
2303       } else {
2304         // Load scalar value from indirect argument.
2305         llvm::Value *V =
2306           EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getLocStart());
2307 
2308         if (isPromoted)
2309           V = emitArgumentDemotion(*this, Arg, V);
2310         ArgVals.push_back(ParamValue::forDirect(V));
2311       }
2312       break;
2313     }
2314 
2315     case ABIArgInfo::Extend:
2316     case ABIArgInfo::Direct: {
2317 
2318       // If we have the trivial case, handle it with no muss and fuss.
2319       if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
2320           ArgI.getCoerceToType() == ConvertType(Ty) &&
2321           ArgI.getDirectOffset() == 0) {
2322         assert(NumIRArgs == 1);
2323         llvm::Value *V = FnArgs[FirstIRArg];
2324         auto AI = cast<llvm::Argument>(V);
2325 
2326         if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
2327           if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
2328                              PVD->getFunctionScopeIndex()))
2329             AI->addAttr(llvm::Attribute::NonNull);
2330 
2331           QualType OTy = PVD->getOriginalType();
2332           if (const auto *ArrTy =
2333               getContext().getAsConstantArrayType(OTy)) {
2334             // A C99 array parameter declaration with the static keyword also
2335             // indicates dereferenceability, and if the size is constant we can
2336             // use the dereferenceable attribute (which requires the size in
2337             // bytes).
2338             if (ArrTy->getSizeModifier() == ArrayType::Static) {
2339               QualType ETy = ArrTy->getElementType();
2340               uint64_t ArrSize = ArrTy->getSize().getZExtValue();
2341               if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
2342                   ArrSize) {
2343                 llvm::AttrBuilder Attrs;
2344                 Attrs.addDereferenceableAttr(
2345                   getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize);
2346                 AI->addAttrs(Attrs);
2347               } else if (getContext().getTargetAddressSpace(ETy) == 0) {
2348                 AI->addAttr(llvm::Attribute::NonNull);
2349               }
2350             }
2351           } else if (const auto *ArrTy =
2352                      getContext().getAsVariableArrayType(OTy)) {
2353             // For C99 VLAs with the static keyword, we don't know the size so
2354             // we can't use the dereferenceable attribute, but in addrspace(0)
2355             // we know that it must be nonnull.
2356             if (ArrTy->getSizeModifier() == VariableArrayType::Static &&
2357                 !getContext().getTargetAddressSpace(ArrTy->getElementType()))
2358               AI->addAttr(llvm::Attribute::NonNull);
2359           }
2360 
2361           const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
2362           if (!AVAttr)
2363             if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
2364               AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
2365           if (AVAttr) {
2366             llvm::Value *AlignmentValue =
2367               EmitScalarExpr(AVAttr->getAlignment());
2368             llvm::ConstantInt *AlignmentCI =
2369               cast<llvm::ConstantInt>(AlignmentValue);
2370             unsigned Alignment = std::min((unsigned)AlignmentCI->getZExtValue(),
2371                                           +llvm::Value::MaximumAlignment);
2372             AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment));
2373           }
2374         }
2375 
2376         if (Arg->getType().isRestrictQualified())
2377           AI->addAttr(llvm::Attribute::NoAlias);
2378 
2379         // LLVM expects swifterror parameters to be used in very restricted
2380         // ways.  Copy the value into a less-restricted temporary.
2381         if (FI.getExtParameterInfo(ArgNo).getABI()
2382               == ParameterABI::SwiftErrorResult) {
2383           QualType pointeeTy = Ty->getPointeeType();
2384           assert(pointeeTy->isPointerType());
2385           Address temp =
2386             CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
2387           Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy));
2388           llvm::Value *incomingErrorValue = Builder.CreateLoad(arg);
2389           Builder.CreateStore(incomingErrorValue, temp);
2390           V = temp.getPointer();
2391 
2392           // Push a cleanup to copy the value back at the end of the function.
2393           // The convention does not guarantee that the value will be written
2394           // back if the function exits with an unwind exception.
2395           EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg);
2396         }
2397 
2398         // Ensure the argument is the correct type.
2399         if (V->getType() != ArgI.getCoerceToType())
2400           V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
2401 
2402         if (isPromoted)
2403           V = emitArgumentDemotion(*this, Arg, V);
2404 
2405         // Because of merging of function types from multiple decls it is
2406         // possible for the type of an argument to not match the corresponding
2407         // type in the function type. Since we are codegening the callee
2408         // in here, add a cast to the argument type.
2409         llvm::Type *LTy = ConvertType(Arg->getType());
2410         if (V->getType() != LTy)
2411           V = Builder.CreateBitCast(V, LTy);
2412 
2413         ArgVals.push_back(ParamValue::forDirect(V));
2414         break;
2415       }
2416 
2417       Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
2418                                      Arg->getName());
2419 
2420       // Pointer to store into.
2421       Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
2422 
2423       // Fast-isel and the optimizer generally like scalar values better than
2424       // FCAs, so we flatten them if this is safe to do for this argument.
2425       llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
2426       if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
2427           STy->getNumElements() > 1) {
2428         auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
2429         uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
2430         llvm::Type *DstTy = Ptr.getElementType();
2431         uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
2432 
2433         Address AddrToStoreInto = Address::invalid();
2434         if (SrcSize <= DstSize) {
2435           AddrToStoreInto = Builder.CreateElementBitCast(Ptr, STy);
2436         } else {
2437           AddrToStoreInto =
2438             CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
2439         }
2440 
2441         assert(STy->getNumElements() == NumIRArgs);
2442         for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2443           auto AI = FnArgs[FirstIRArg + i];
2444           AI->setName(Arg->getName() + ".coerce" + Twine(i));
2445           auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
2446           Address EltPtr =
2447             Builder.CreateStructGEP(AddrToStoreInto, i, Offset);
2448           Builder.CreateStore(AI, EltPtr);
2449         }
2450 
2451         if (SrcSize > DstSize) {
2452           Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
2453         }
2454 
2455       } else {
2456         // Simple case, just do a coerced store of the argument into the alloca.
2457         assert(NumIRArgs == 1);
2458         auto AI = FnArgs[FirstIRArg];
2459         AI->setName(Arg->getName() + ".coerce");
2460         CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, *this);
2461       }
2462 
2463       // Match to what EmitParmDecl is expecting for this type.
2464       if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
2465         llvm::Value *V =
2466           EmitLoadOfScalar(Alloca, false, Ty, Arg->getLocStart());
2467         if (isPromoted)
2468           V = emitArgumentDemotion(*this, Arg, V);
2469         ArgVals.push_back(ParamValue::forDirect(V));
2470       } else {
2471         ArgVals.push_back(ParamValue::forIndirect(Alloca));
2472       }
2473       break;
2474     }
2475 
2476     case ABIArgInfo::CoerceAndExpand: {
2477       // Reconstruct into a temporary.
2478       Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2479       ArgVals.push_back(ParamValue::forIndirect(alloca));
2480 
2481       auto coercionType = ArgI.getCoerceAndExpandType();
2482       alloca = Builder.CreateElementBitCast(alloca, coercionType);
2483       auto layout = CGM.getDataLayout().getStructLayout(coercionType);
2484 
2485       unsigned argIndex = FirstIRArg;
2486       for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2487         llvm::Type *eltType = coercionType->getElementType(i);
2488         if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType))
2489           continue;
2490 
2491         auto eltAddr = Builder.CreateStructGEP(alloca, i, layout);
2492         auto elt = FnArgs[argIndex++];
2493         Builder.CreateStore(elt, eltAddr);
2494       }
2495       assert(argIndex == FirstIRArg + NumIRArgs);
2496       break;
2497     }
2498 
2499     case ABIArgInfo::Expand: {
2500       // If this structure was expanded into multiple arguments then
2501       // we need to create a temporary and reconstruct it from the
2502       // arguments.
2503       Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2504       LValue LV = MakeAddrLValue(Alloca, Ty);
2505       ArgVals.push_back(ParamValue::forIndirect(Alloca));
2506 
2507       auto FnArgIter = FnArgs.begin() + FirstIRArg;
2508       ExpandTypeFromArgs(Ty, LV, FnArgIter);
2509       assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs);
2510       for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
2511         auto AI = FnArgs[FirstIRArg + i];
2512         AI->setName(Arg->getName() + "." + Twine(i));
2513       }
2514       break;
2515     }
2516 
2517     case ABIArgInfo::Ignore:
2518       assert(NumIRArgs == 0);
2519       // Initialize the local variable appropriately.
2520       if (!hasScalarEvaluationKind(Ty)) {
2521         ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty)));
2522       } else {
2523         llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
2524         ArgVals.push_back(ParamValue::forDirect(U));
2525       }
2526       break;
2527     }
2528   }
2529 
2530   if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2531     for (int I = Args.size() - 1; I >= 0; --I)
2532       EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2533   } else {
2534     for (unsigned I = 0, E = Args.size(); I != E; ++I)
2535       EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2536   }
2537 }
2538 
2539 static void eraseUnusedBitCasts(llvm::Instruction *insn) {
2540   while (insn->use_empty()) {
2541     llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
2542     if (!bitcast) return;
2543 
2544     // This is "safe" because we would have used a ConstantExpr otherwise.
2545     insn = cast<llvm::Instruction>(bitcast->getOperand(0));
2546     bitcast->eraseFromParent();
2547   }
2548 }
2549 
2550 /// Try to emit a fused autorelease of a return result.
2551 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
2552                                                     llvm::Value *result) {
2553   // We must be immediately followed the cast.
2554   llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
2555   if (BB->empty()) return nullptr;
2556   if (&BB->back() != result) return nullptr;
2557 
2558   llvm::Type *resultType = result->getType();
2559 
2560   // result is in a BasicBlock and is therefore an Instruction.
2561   llvm::Instruction *generator = cast<llvm::Instruction>(result);
2562 
2563   SmallVector<llvm::Instruction *, 4> InstsToKill;
2564 
2565   // Look for:
2566   //  %generator = bitcast %type1* %generator2 to %type2*
2567   while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
2568     // We would have emitted this as a constant if the operand weren't
2569     // an Instruction.
2570     generator = cast<llvm::Instruction>(bitcast->getOperand(0));
2571 
2572     // Require the generator to be immediately followed by the cast.
2573     if (generator->getNextNode() != bitcast)
2574       return nullptr;
2575 
2576     InstsToKill.push_back(bitcast);
2577   }
2578 
2579   // Look for:
2580   //   %generator = call i8* @objc_retain(i8* %originalResult)
2581   // or
2582   //   %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
2583   llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
2584   if (!call) return nullptr;
2585 
2586   bool doRetainAutorelease;
2587 
2588   if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints().objc_retain) {
2589     doRetainAutorelease = true;
2590   } else if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints()
2591                                           .objc_retainAutoreleasedReturnValue) {
2592     doRetainAutorelease = false;
2593 
2594     // If we emitted an assembly marker for this call (and the
2595     // ARCEntrypoints field should have been set if so), go looking
2596     // for that call.  If we can't find it, we can't do this
2597     // optimization.  But it should always be the immediately previous
2598     // instruction, unless we needed bitcasts around the call.
2599     if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) {
2600       llvm::Instruction *prev = call->getPrevNode();
2601       assert(prev);
2602       if (isa<llvm::BitCastInst>(prev)) {
2603         prev = prev->getPrevNode();
2604         assert(prev);
2605       }
2606       assert(isa<llvm::CallInst>(prev));
2607       assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
2608                CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker);
2609       InstsToKill.push_back(prev);
2610     }
2611   } else {
2612     return nullptr;
2613   }
2614 
2615   result = call->getArgOperand(0);
2616   InstsToKill.push_back(call);
2617 
2618   // Keep killing bitcasts, for sanity.  Note that we no longer care
2619   // about precise ordering as long as there's exactly one use.
2620   while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
2621     if (!bitcast->hasOneUse()) break;
2622     InstsToKill.push_back(bitcast);
2623     result = bitcast->getOperand(0);
2624   }
2625 
2626   // Delete all the unnecessary instructions, from latest to earliest.
2627   for (auto *I : InstsToKill)
2628     I->eraseFromParent();
2629 
2630   // Do the fused retain/autorelease if we were asked to.
2631   if (doRetainAutorelease)
2632     result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
2633 
2634   // Cast back to the result type.
2635   return CGF.Builder.CreateBitCast(result, resultType);
2636 }
2637 
2638 /// If this is a +1 of the value of an immutable 'self', remove it.
2639 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
2640                                           llvm::Value *result) {
2641   // This is only applicable to a method with an immutable 'self'.
2642   const ObjCMethodDecl *method =
2643     dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
2644   if (!method) return nullptr;
2645   const VarDecl *self = method->getSelfDecl();
2646   if (!self->getType().isConstQualified()) return nullptr;
2647 
2648   // Look for a retain call.
2649   llvm::CallInst *retainCall =
2650     dyn_cast<llvm::CallInst>(result->stripPointerCasts());
2651   if (!retainCall ||
2652       retainCall->getCalledValue() != CGF.CGM.getObjCEntrypoints().objc_retain)
2653     return nullptr;
2654 
2655   // Look for an ordinary load of 'self'.
2656   llvm::Value *retainedValue = retainCall->getArgOperand(0);
2657   llvm::LoadInst *load =
2658     dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
2659   if (!load || load->isAtomic() || load->isVolatile() ||
2660       load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
2661     return nullptr;
2662 
2663   // Okay!  Burn it all down.  This relies for correctness on the
2664   // assumption that the retain is emitted as part of the return and
2665   // that thereafter everything is used "linearly".
2666   llvm::Type *resultType = result->getType();
2667   eraseUnusedBitCasts(cast<llvm::Instruction>(result));
2668   assert(retainCall->use_empty());
2669   retainCall->eraseFromParent();
2670   eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
2671 
2672   return CGF.Builder.CreateBitCast(load, resultType);
2673 }
2674 
2675 /// Emit an ARC autorelease of the result of a function.
2676 ///
2677 /// \return the value to actually return from the function
2678 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
2679                                             llvm::Value *result) {
2680   // If we're returning 'self', kill the initial retain.  This is a
2681   // heuristic attempt to "encourage correctness" in the really unfortunate
2682   // case where we have a return of self during a dealloc and we desperately
2683   // need to avoid the possible autorelease.
2684   if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
2685     return self;
2686 
2687   // At -O0, try to emit a fused retain/autorelease.
2688   if (CGF.shouldUseFusedARCCalls())
2689     if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
2690       return fused;
2691 
2692   return CGF.EmitARCAutoreleaseReturnValue(result);
2693 }
2694 
2695 /// Heuristically search for a dominating store to the return-value slot.
2696 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
2697   // Check if a User is a store which pointerOperand is the ReturnValue.
2698   // We are looking for stores to the ReturnValue, not for stores of the
2699   // ReturnValue to some other location.
2700   auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
2701     auto *SI = dyn_cast<llvm::StoreInst>(U);
2702     if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer())
2703       return nullptr;
2704     // These aren't actually possible for non-coerced returns, and we
2705     // only care about non-coerced returns on this code path.
2706     assert(!SI->isAtomic() && !SI->isVolatile());
2707     return SI;
2708   };
2709   // If there are multiple uses of the return-value slot, just check
2710   // for something immediately preceding the IP.  Sometimes this can
2711   // happen with how we generate implicit-returns; it can also happen
2712   // with noreturn cleanups.
2713   if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
2714     llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2715     if (IP->empty()) return nullptr;
2716     llvm::Instruction *I = &IP->back();
2717 
2718     // Skip lifetime markers
2719     for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(),
2720                                             IE = IP->rend();
2721          II != IE; ++II) {
2722       if (llvm::IntrinsicInst *Intrinsic =
2723               dyn_cast<llvm::IntrinsicInst>(&*II)) {
2724         if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) {
2725           const llvm::Value *CastAddr = Intrinsic->getArgOperand(1);
2726           ++II;
2727           if (II == IE)
2728             break;
2729           if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II))
2730             continue;
2731         }
2732       }
2733       I = &*II;
2734       break;
2735     }
2736 
2737     return GetStoreIfValid(I);
2738   }
2739 
2740   llvm::StoreInst *store =
2741       GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
2742   if (!store) return nullptr;
2743 
2744   // Now do a first-and-dirty dominance check: just walk up the
2745   // single-predecessors chain from the current insertion point.
2746   llvm::BasicBlock *StoreBB = store->getParent();
2747   llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2748   while (IP != StoreBB) {
2749     if (!(IP = IP->getSinglePredecessor()))
2750       return nullptr;
2751   }
2752 
2753   // Okay, the store's basic block dominates the insertion point; we
2754   // can do our thing.
2755   return store;
2756 }
2757 
2758 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
2759                                          bool EmitRetDbgLoc,
2760                                          SourceLocation EndLoc) {
2761   if (FI.isNoReturn()) {
2762     // Noreturn functions don't return.
2763     EmitUnreachable(EndLoc);
2764     return;
2765   }
2766 
2767   if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
2768     // Naked functions don't have epilogues.
2769     Builder.CreateUnreachable();
2770     return;
2771   }
2772 
2773   // Functions with no result always return void.
2774   if (!ReturnValue.isValid()) {
2775     Builder.CreateRetVoid();
2776     return;
2777   }
2778 
2779   llvm::DebugLoc RetDbgLoc;
2780   llvm::Value *RV = nullptr;
2781   QualType RetTy = FI.getReturnType();
2782   const ABIArgInfo &RetAI = FI.getReturnInfo();
2783 
2784   switch (RetAI.getKind()) {
2785   case ABIArgInfo::InAlloca:
2786     // Aggregrates get evaluated directly into the destination.  Sometimes we
2787     // need to return the sret value in a register, though.
2788     assert(hasAggregateEvaluationKind(RetTy));
2789     if (RetAI.getInAllocaSRet()) {
2790       llvm::Function::arg_iterator EI = CurFn->arg_end();
2791       --EI;
2792       llvm::Value *ArgStruct = &*EI;
2793       llvm::Value *SRet = Builder.CreateStructGEP(
2794           nullptr, ArgStruct, RetAI.getInAllocaFieldIndex());
2795       RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret");
2796     }
2797     break;
2798 
2799   case ABIArgInfo::Indirect: {
2800     auto AI = CurFn->arg_begin();
2801     if (RetAI.isSRetAfterThis())
2802       ++AI;
2803     switch (getEvaluationKind(RetTy)) {
2804     case TEK_Complex: {
2805       ComplexPairTy RT =
2806         EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
2807       EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy),
2808                          /*isInit*/ true);
2809       break;
2810     }
2811     case TEK_Aggregate:
2812       // Do nothing; aggregrates get evaluated directly into the destination.
2813       break;
2814     case TEK_Scalar:
2815       EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
2816                         MakeNaturalAlignAddrLValue(&*AI, RetTy),
2817                         /*isInit*/ true);
2818       break;
2819     }
2820     break;
2821   }
2822 
2823   case ABIArgInfo::Extend:
2824   case ABIArgInfo::Direct:
2825     if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
2826         RetAI.getDirectOffset() == 0) {
2827       // The internal return value temp always will have pointer-to-return-type
2828       // type, just do a load.
2829 
2830       // If there is a dominating store to ReturnValue, we can elide
2831       // the load, zap the store, and usually zap the alloca.
2832       if (llvm::StoreInst *SI =
2833               findDominatingStoreToReturnValue(*this)) {
2834         // Reuse the debug location from the store unless there is
2835         // cleanup code to be emitted between the store and return
2836         // instruction.
2837         if (EmitRetDbgLoc && !AutoreleaseResult)
2838           RetDbgLoc = SI->getDebugLoc();
2839         // Get the stored value and nuke the now-dead store.
2840         RV = SI->getValueOperand();
2841         SI->eraseFromParent();
2842 
2843         // If that was the only use of the return value, nuke it as well now.
2844         auto returnValueInst = ReturnValue.getPointer();
2845         if (returnValueInst->use_empty()) {
2846           if (auto alloca = dyn_cast<llvm::AllocaInst>(returnValueInst)) {
2847             alloca->eraseFromParent();
2848             ReturnValue = Address::invalid();
2849           }
2850         }
2851 
2852       // Otherwise, we have to do a simple load.
2853       } else {
2854         RV = Builder.CreateLoad(ReturnValue);
2855       }
2856     } else {
2857       // If the value is offset in memory, apply the offset now.
2858       Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
2859 
2860       RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
2861     }
2862 
2863     // In ARC, end functions that return a retainable type with a call
2864     // to objc_autoreleaseReturnValue.
2865     if (AutoreleaseResult) {
2866 #ifndef NDEBUG
2867       // Type::isObjCRetainabletype has to be called on a QualType that hasn't
2868       // been stripped of the typedefs, so we cannot use RetTy here. Get the
2869       // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from
2870       // CurCodeDecl or BlockInfo.
2871       QualType RT;
2872 
2873       if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl))
2874         RT = FD->getReturnType();
2875       else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl))
2876         RT = MD->getReturnType();
2877       else if (isa<BlockDecl>(CurCodeDecl))
2878         RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType();
2879       else
2880         llvm_unreachable("Unexpected function/method type");
2881 
2882       assert(getLangOpts().ObjCAutoRefCount &&
2883              !FI.isReturnsRetained() &&
2884              RT->isObjCRetainableType());
2885 #endif
2886       RV = emitAutoreleaseOfResult(*this, RV);
2887     }
2888 
2889     break;
2890 
2891   case ABIArgInfo::Ignore:
2892     break;
2893 
2894   case ABIArgInfo::CoerceAndExpand: {
2895     auto coercionType = RetAI.getCoerceAndExpandType();
2896     auto layout = CGM.getDataLayout().getStructLayout(coercionType);
2897 
2898     // Load all of the coerced elements out into results.
2899     llvm::SmallVector<llvm::Value*, 4> results;
2900     Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType);
2901     for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2902       auto coercedEltType = coercionType->getElementType(i);
2903       if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType))
2904         continue;
2905 
2906       auto eltAddr = Builder.CreateStructGEP(addr, i, layout);
2907       auto elt = Builder.CreateLoad(eltAddr);
2908       results.push_back(elt);
2909     }
2910 
2911     // If we have one result, it's the single direct result type.
2912     if (results.size() == 1) {
2913       RV = results[0];
2914 
2915     // Otherwise, we need to make a first-class aggregate.
2916     } else {
2917       // Construct a return type that lacks padding elements.
2918       llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType();
2919 
2920       RV = llvm::UndefValue::get(returnType);
2921       for (unsigned i = 0, e = results.size(); i != e; ++i) {
2922         RV = Builder.CreateInsertValue(RV, results[i], i);
2923       }
2924     }
2925     break;
2926   }
2927 
2928   case ABIArgInfo::Expand:
2929     llvm_unreachable("Invalid ABI kind for return argument");
2930   }
2931 
2932   llvm::Instruction *Ret;
2933   if (RV) {
2934     EmitReturnValueCheck(RV);
2935     Ret = Builder.CreateRet(RV);
2936   } else {
2937     Ret = Builder.CreateRetVoid();
2938   }
2939 
2940   if (RetDbgLoc)
2941     Ret->setDebugLoc(std::move(RetDbgLoc));
2942 }
2943 
2944 void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV) {
2945   // A current decl may not be available when emitting vtable thunks.
2946   if (!CurCodeDecl)
2947     return;
2948 
2949   ReturnsNonNullAttr *RetNNAttr = nullptr;
2950   if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute))
2951     RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>();
2952 
2953   if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
2954     return;
2955 
2956   // Prefer the returns_nonnull attribute if it's present.
2957   SourceLocation AttrLoc;
2958   SanitizerMask CheckKind;
2959   SanitizerHandler Handler;
2960   if (RetNNAttr) {
2961     assert(!requiresReturnValueNullabilityCheck() &&
2962            "Cannot check nullability and the nonnull attribute");
2963     AttrLoc = RetNNAttr->getLocation();
2964     CheckKind = SanitizerKind::ReturnsNonnullAttribute;
2965     Handler = SanitizerHandler::NonnullReturn;
2966   } else {
2967     if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl))
2968       if (auto *TSI = DD->getTypeSourceInfo())
2969         if (auto FTL = TSI->getTypeLoc().castAs<FunctionTypeLoc>())
2970           AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
2971     CheckKind = SanitizerKind::NullabilityReturn;
2972     Handler = SanitizerHandler::NullabilityReturn;
2973   }
2974 
2975   SanitizerScope SanScope(this);
2976 
2977   // Make sure the "return" source location is valid. If we're checking a
2978   // nullability annotation, make sure the preconditions for the check are met.
2979   llvm::BasicBlock *Check = createBasicBlock("nullcheck");
2980   llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck");
2981   llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load");
2982   llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr);
2983   if (requiresReturnValueNullabilityCheck())
2984     CanNullCheck =
2985         Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
2986   Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
2987   EmitBlock(Check);
2988 
2989   // Now do the null check.
2990   llvm::Value *Cond = Builder.CreateIsNotNull(RV);
2991   llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)};
2992   llvm::Value *DynamicData[] = {SLocPtr};
2993   EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData);
2994 
2995   EmitBlock(NoCheck);
2996 
2997 #ifndef NDEBUG
2998   // The return location should not be used after the check has been emitted.
2999   ReturnLocation = Address::invalid();
3000 #endif
3001 }
3002 
3003 static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) {
3004   const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
3005   return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
3006 }
3007 
3008 static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF,
3009                                           QualType Ty) {
3010   // FIXME: Generate IR in one pass, rather than going back and fixing up these
3011   // placeholders.
3012   llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
3013   llvm::Type *IRPtrTy = IRTy->getPointerTo();
3014   llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo());
3015 
3016   // FIXME: When we generate this IR in one pass, we shouldn't need
3017   // this win32-specific alignment hack.
3018   CharUnits Align = CharUnits::fromQuantity(4);
3019   Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align);
3020 
3021   return AggValueSlot::forAddr(Address(Placeholder, Align),
3022                                Ty.getQualifiers(),
3023                                AggValueSlot::IsNotDestructed,
3024                                AggValueSlot::DoesNotNeedGCBarriers,
3025                                AggValueSlot::IsNotAliased,
3026                                AggValueSlot::DoesNotOverlap);
3027 }
3028 
3029 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
3030                                           const VarDecl *param,
3031                                           SourceLocation loc) {
3032   // StartFunction converted the ABI-lowered parameter(s) into a
3033   // local alloca.  We need to turn that into an r-value suitable
3034   // for EmitCall.
3035   Address local = GetAddrOfLocalVar(param);
3036 
3037   QualType type = param->getType();
3038 
3039   assert(!isInAllocaArgument(CGM.getCXXABI(), type) &&
3040          "cannot emit delegate call arguments for inalloca arguments!");
3041 
3042   // GetAddrOfLocalVar returns a pointer-to-pointer for references,
3043   // but the argument needs to be the original pointer.
3044   if (type->isReferenceType()) {
3045     args.add(RValue::get(Builder.CreateLoad(local)), type);
3046 
3047   // In ARC, move out of consumed arguments so that the release cleanup
3048   // entered by StartFunction doesn't cause an over-release.  This isn't
3049   // optimal -O0 code generation, but it should get cleaned up when
3050   // optimization is enabled.  This also assumes that delegate calls are
3051   // performed exactly once for a set of arguments, but that should be safe.
3052   } else if (getLangOpts().ObjCAutoRefCount &&
3053              param->hasAttr<NSConsumedAttr>() &&
3054              type->isObjCRetainableType()) {
3055     llvm::Value *ptr = Builder.CreateLoad(local);
3056     auto null =
3057       llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
3058     Builder.CreateStore(null, local);
3059     args.add(RValue::get(ptr), type);
3060 
3061   // For the most part, we just need to load the alloca, except that
3062   // aggregate r-values are actually pointers to temporaries.
3063   } else {
3064     args.add(convertTempToRValue(local, type, loc), type);
3065   }
3066 }
3067 
3068 static bool isProvablyNull(llvm::Value *addr) {
3069   return isa<llvm::ConstantPointerNull>(addr);
3070 }
3071 
3072 /// Emit the actual writing-back of a writeback.
3073 static void emitWriteback(CodeGenFunction &CGF,
3074                           const CallArgList::Writeback &writeback) {
3075   const LValue &srcLV = writeback.Source;
3076   Address srcAddr = srcLV.getAddress();
3077   assert(!isProvablyNull(srcAddr.getPointer()) &&
3078          "shouldn't have writeback for provably null argument");
3079 
3080   llvm::BasicBlock *contBB = nullptr;
3081 
3082   // If the argument wasn't provably non-null, we need to null check
3083   // before doing the store.
3084   bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3085                                               CGF.CGM.getDataLayout());
3086   if (!provablyNonNull) {
3087     llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
3088     contBB = CGF.createBasicBlock("icr.done");
3089 
3090     llvm::Value *isNull =
3091       CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3092     CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
3093     CGF.EmitBlock(writebackBB);
3094   }
3095 
3096   // Load the value to writeback.
3097   llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
3098 
3099   // Cast it back, in case we're writing an id to a Foo* or something.
3100   value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(),
3101                                     "icr.writeback-cast");
3102 
3103   // Perform the writeback.
3104 
3105   // If we have a "to use" value, it's something we need to emit a use
3106   // of.  This has to be carefully threaded in: if it's done after the
3107   // release it's potentially undefined behavior (and the optimizer
3108   // will ignore it), and if it happens before the retain then the
3109   // optimizer could move the release there.
3110   if (writeback.ToUse) {
3111     assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
3112 
3113     // Retain the new value.  No need to block-copy here:  the block's
3114     // being passed up the stack.
3115     value = CGF.EmitARCRetainNonBlock(value);
3116 
3117     // Emit the intrinsic use here.
3118     CGF.EmitARCIntrinsicUse(writeback.ToUse);
3119 
3120     // Load the old value (primitively).
3121     llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
3122 
3123     // Put the new value in place (primitively).
3124     CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
3125 
3126     // Release the old value.
3127     CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
3128 
3129   // Otherwise, we can just do a normal lvalue store.
3130   } else {
3131     CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
3132   }
3133 
3134   // Jump to the continuation block.
3135   if (!provablyNonNull)
3136     CGF.EmitBlock(contBB);
3137 }
3138 
3139 static void emitWritebacks(CodeGenFunction &CGF,
3140                            const CallArgList &args) {
3141   for (const auto &I : args.writebacks())
3142     emitWriteback(CGF, I);
3143 }
3144 
3145 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
3146                                             const CallArgList &CallArgs) {
3147   ArrayRef<CallArgList::CallArgCleanup> Cleanups =
3148     CallArgs.getCleanupsToDeactivate();
3149   // Iterate in reverse to increase the likelihood of popping the cleanup.
3150   for (const auto &I : llvm::reverse(Cleanups)) {
3151     CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP);
3152     I.IsActiveIP->eraseFromParent();
3153   }
3154 }
3155 
3156 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
3157   if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
3158     if (uop->getOpcode() == UO_AddrOf)
3159       return uop->getSubExpr();
3160   return nullptr;
3161 }
3162 
3163 /// Emit an argument that's being passed call-by-writeback.  That is,
3164 /// we are passing the address of an __autoreleased temporary; it
3165 /// might be copy-initialized with the current value of the given
3166 /// address, but it will definitely be copied out of after the call.
3167 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
3168                              const ObjCIndirectCopyRestoreExpr *CRE) {
3169   LValue srcLV;
3170 
3171   // Make an optimistic effort to emit the address as an l-value.
3172   // This can fail if the argument expression is more complicated.
3173   if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
3174     srcLV = CGF.EmitLValue(lvExpr);
3175 
3176   // Otherwise, just emit it as a scalar.
3177   } else {
3178     Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
3179 
3180     QualType srcAddrType =
3181       CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
3182     srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
3183   }
3184   Address srcAddr = srcLV.getAddress();
3185 
3186   // The dest and src types don't necessarily match in LLVM terms
3187   // because of the crazy ObjC compatibility rules.
3188 
3189   llvm::PointerType *destType =
3190     cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
3191 
3192   // If the address is a constant null, just pass the appropriate null.
3193   if (isProvablyNull(srcAddr.getPointer())) {
3194     args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
3195              CRE->getType());
3196     return;
3197   }
3198 
3199   // Create the temporary.
3200   Address temp = CGF.CreateTempAlloca(destType->getElementType(),
3201                                       CGF.getPointerAlign(),
3202                                       "icr.temp");
3203   // Loading an l-value can introduce a cleanup if the l-value is __weak,
3204   // and that cleanup will be conditional if we can't prove that the l-value
3205   // isn't null, so we need to register a dominating point so that the cleanups
3206   // system will make valid IR.
3207   CodeGenFunction::ConditionalEvaluation condEval(CGF);
3208 
3209   // Zero-initialize it if we're not doing a copy-initialization.
3210   bool shouldCopy = CRE->shouldCopy();
3211   if (!shouldCopy) {
3212     llvm::Value *null =
3213       llvm::ConstantPointerNull::get(
3214         cast<llvm::PointerType>(destType->getElementType()));
3215     CGF.Builder.CreateStore(null, temp);
3216   }
3217 
3218   llvm::BasicBlock *contBB = nullptr;
3219   llvm::BasicBlock *originBB = nullptr;
3220 
3221   // If the address is *not* known to be non-null, we need to switch.
3222   llvm::Value *finalArgument;
3223 
3224   bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3225                                               CGF.CGM.getDataLayout());
3226   if (provablyNonNull) {
3227     finalArgument = temp.getPointer();
3228   } else {
3229     llvm::Value *isNull =
3230       CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3231 
3232     finalArgument = CGF.Builder.CreateSelect(isNull,
3233                                    llvm::ConstantPointerNull::get(destType),
3234                                              temp.getPointer(), "icr.argument");
3235 
3236     // If we need to copy, then the load has to be conditional, which
3237     // means we need control flow.
3238     if (shouldCopy) {
3239       originBB = CGF.Builder.GetInsertBlock();
3240       contBB = CGF.createBasicBlock("icr.cont");
3241       llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
3242       CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
3243       CGF.EmitBlock(copyBB);
3244       condEval.begin(CGF);
3245     }
3246   }
3247 
3248   llvm::Value *valueToUse = nullptr;
3249 
3250   // Perform a copy if necessary.
3251   if (shouldCopy) {
3252     RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
3253     assert(srcRV.isScalar());
3254 
3255     llvm::Value *src = srcRV.getScalarVal();
3256     src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
3257                                     "icr.cast");
3258 
3259     // Use an ordinary store, not a store-to-lvalue.
3260     CGF.Builder.CreateStore(src, temp);
3261 
3262     // If optimization is enabled, and the value was held in a
3263     // __strong variable, we need to tell the optimizer that this
3264     // value has to stay alive until we're doing the store back.
3265     // This is because the temporary is effectively unretained,
3266     // and so otherwise we can violate the high-level semantics.
3267     if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3268         srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) {
3269       valueToUse = src;
3270     }
3271   }
3272 
3273   // Finish the control flow if we needed it.
3274   if (shouldCopy && !provablyNonNull) {
3275     llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
3276     CGF.EmitBlock(contBB);
3277 
3278     // Make a phi for the value to intrinsically use.
3279     if (valueToUse) {
3280       llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
3281                                                       "icr.to-use");
3282       phiToUse->addIncoming(valueToUse, copyBB);
3283       phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
3284                             originBB);
3285       valueToUse = phiToUse;
3286     }
3287 
3288     condEval.end(CGF);
3289   }
3290 
3291   args.addWriteback(srcLV, temp, valueToUse);
3292   args.add(RValue::get(finalArgument), CRE->getType());
3293 }
3294 
3295 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
3296   assert(!StackBase);
3297 
3298   // Save the stack.
3299   llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
3300   StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
3301 }
3302 
3303 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
3304   if (StackBase) {
3305     // Restore the stack after the call.
3306     llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
3307     CGF.Builder.CreateCall(F, StackBase);
3308   }
3309 }
3310 
3311 void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
3312                                           SourceLocation ArgLoc,
3313                                           AbstractCallee AC,
3314                                           unsigned ParmNum) {
3315   if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) ||
3316                          SanOpts.has(SanitizerKind::NullabilityArg)))
3317     return;
3318 
3319   // The param decl may be missing in a variadic function.
3320   auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr;
3321   unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
3322 
3323   // Prefer the nonnull attribute if it's present.
3324   const NonNullAttr *NNAttr = nullptr;
3325   if (SanOpts.has(SanitizerKind::NonnullAttribute))
3326     NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo);
3327 
3328   bool CanCheckNullability = false;
3329   if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) {
3330     auto Nullability = PVD->getType()->getNullability(getContext());
3331     CanCheckNullability = Nullability &&
3332                           *Nullability == NullabilityKind::NonNull &&
3333                           PVD->getTypeSourceInfo();
3334   }
3335 
3336   if (!NNAttr && !CanCheckNullability)
3337     return;
3338 
3339   SourceLocation AttrLoc;
3340   SanitizerMask CheckKind;
3341   SanitizerHandler Handler;
3342   if (NNAttr) {
3343     AttrLoc = NNAttr->getLocation();
3344     CheckKind = SanitizerKind::NonnullAttribute;
3345     Handler = SanitizerHandler::NonnullArg;
3346   } else {
3347     AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
3348     CheckKind = SanitizerKind::NullabilityArg;
3349     Handler = SanitizerHandler::NullabilityArg;
3350   }
3351 
3352   SanitizerScope SanScope(this);
3353   assert(RV.isScalar());
3354   llvm::Value *V = RV.getScalarVal();
3355   llvm::Value *Cond =
3356       Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
3357   llvm::Constant *StaticData[] = {
3358       EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc),
3359       llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
3360   };
3361   EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None);
3362 }
3363 
3364 void CodeGenFunction::EmitCallArgs(
3365     CallArgList &Args, ArrayRef<QualType> ArgTypes,
3366     llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
3367     AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) {
3368   assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
3369 
3370   // We *have* to evaluate arguments from right to left in the MS C++ ABI,
3371   // because arguments are destroyed left to right in the callee. As a special
3372   // case, there are certain language constructs that require left-to-right
3373   // evaluation, and in those cases we consider the evaluation order requirement
3374   // to trump the "destruction order is reverse construction order" guarantee.
3375   bool LeftToRight =
3376       CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()
3377           ? Order == EvaluationOrder::ForceLeftToRight
3378           : Order != EvaluationOrder::ForceRightToLeft;
3379 
3380   auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg,
3381                                          RValue EmittedArg) {
3382     if (!AC.hasFunctionDecl() || I >= AC.getNumParams())
3383       return;
3384     auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>();
3385     if (PS == nullptr)
3386       return;
3387 
3388     const auto &Context = getContext();
3389     auto SizeTy = Context.getSizeType();
3390     auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
3391     assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?");
3392     llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T,
3393                                                      EmittedArg.getScalarVal());
3394     Args.add(RValue::get(V), SizeTy);
3395     // If we're emitting args in reverse, be sure to do so with
3396     // pass_object_size, as well.
3397     if (!LeftToRight)
3398       std::swap(Args.back(), *(&Args.back() - 1));
3399   };
3400 
3401   // Insert a stack save if we're going to need any inalloca args.
3402   bool HasInAllocaArgs = false;
3403   if (CGM.getTarget().getCXXABI().isMicrosoft()) {
3404     for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end();
3405          I != E && !HasInAllocaArgs; ++I)
3406       HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I);
3407     if (HasInAllocaArgs) {
3408       assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3409       Args.allocateArgumentMemory(*this);
3410     }
3411   }
3412 
3413   // Evaluate each argument in the appropriate order.
3414   size_t CallArgsStart = Args.size();
3415   for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
3416     unsigned Idx = LeftToRight ? I : E - I - 1;
3417     CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx;
3418     unsigned InitialArgSize = Args.size();
3419     // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of
3420     // the argument and parameter match or the objc method is parameterized.
3421     assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) ||
3422             getContext().hasSameUnqualifiedType((*Arg)->getType(),
3423                                                 ArgTypes[Idx]) ||
3424             (isa<ObjCMethodDecl>(AC.getDecl()) &&
3425              isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) &&
3426            "Argument and parameter types don't match");
3427     EmitCallArg(Args, *Arg, ArgTypes[Idx]);
3428     // In particular, we depend on it being the last arg in Args, and the
3429     // objectsize bits depend on there only being one arg if !LeftToRight.
3430     assert(InitialArgSize + 1 == Args.size() &&
3431            "The code below depends on only adding one arg per EmitCallArg");
3432     (void)InitialArgSize;
3433     // Since pointer argument are never emitted as LValue, it is safe to emit
3434     // non-null argument check for r-value only.
3435     if (!Args.back().hasLValue()) {
3436       RValue RVArg = Args.back().getKnownRValue();
3437       EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC,
3438                           ParamsToSkip + Idx);
3439       // @llvm.objectsize should never have side-effects and shouldn't need
3440       // destruction/cleanups, so we can safely "emit" it after its arg,
3441       // regardless of right-to-leftness
3442       MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
3443     }
3444   }
3445 
3446   if (!LeftToRight) {
3447     // Un-reverse the arguments we just evaluated so they match up with the LLVM
3448     // IR function.
3449     std::reverse(Args.begin() + CallArgsStart, Args.end());
3450   }
3451 }
3452 
3453 namespace {
3454 
3455 struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
3456   DestroyUnpassedArg(Address Addr, QualType Ty)
3457       : Addr(Addr), Ty(Ty) {}
3458 
3459   Address Addr;
3460   QualType Ty;
3461 
3462   void Emit(CodeGenFunction &CGF, Flags flags) override {
3463     QualType::DestructionKind DtorKind = Ty.isDestructedType();
3464     if (DtorKind == QualType::DK_cxx_destructor) {
3465       const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
3466       assert(!Dtor->isTrivial());
3467       CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
3468                                 /*Delegating=*/false, Addr);
3469     } else {
3470       CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty));
3471     }
3472   }
3473 };
3474 
3475 struct DisableDebugLocationUpdates {
3476   CodeGenFunction &CGF;
3477   bool disabledDebugInfo;
3478   DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
3479     if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
3480       CGF.disableDebugInfo();
3481   }
3482   ~DisableDebugLocationUpdates() {
3483     if (disabledDebugInfo)
3484       CGF.enableDebugInfo();
3485   }
3486 };
3487 
3488 } // end anonymous namespace
3489 
3490 RValue CallArg::getRValue(CodeGenFunction &CGF) const {
3491   if (!HasLV)
3492     return RV;
3493   LValue Copy = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty), Ty);
3494   CGF.EmitAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap,
3495                         LV.isVolatile());
3496   IsUsed = true;
3497   return RValue::getAggregate(Copy.getAddress());
3498 }
3499 
3500 void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const {
3501   LValue Dst = CGF.MakeAddrLValue(Addr, Ty);
3502   if (!HasLV && RV.isScalar())
3503     CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*init=*/true);
3504   else if (!HasLV && RV.isComplex())
3505     CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true);
3506   else {
3507     auto Addr = HasLV ? LV.getAddress() : RV.getAggregateAddress();
3508     LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty);
3509     // We assume that call args are never copied into subobjects.
3510     CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap,
3511                           HasLV ? LV.isVolatileQualified()
3512                                 : RV.isVolatileQualified());
3513   }
3514   IsUsed = true;
3515 }
3516 
3517 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
3518                                   QualType type) {
3519   DisableDebugLocationUpdates Dis(*this, E);
3520   if (const ObjCIndirectCopyRestoreExpr *CRE
3521         = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
3522     assert(getLangOpts().ObjCAutoRefCount);
3523     return emitWritebackArg(*this, args, CRE);
3524   }
3525 
3526   assert(type->isReferenceType() == E->isGLValue() &&
3527          "reference binding to unmaterialized r-value!");
3528 
3529   if (E->isGLValue()) {
3530     assert(E->getObjectKind() == OK_Ordinary);
3531     return args.add(EmitReferenceBindingToExpr(E), type);
3532   }
3533 
3534   bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
3535 
3536   // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
3537   // However, we still have to push an EH-only cleanup in case we unwind before
3538   // we make it to the call.
3539   if (HasAggregateEvalKind && getContext().isParamDestroyedInCallee(type)) {
3540     // If we're using inalloca, use the argument memory.  Otherwise, use a
3541     // temporary.
3542     AggValueSlot Slot;
3543     if (args.isUsingInAlloca())
3544       Slot = createPlaceholderSlot(*this, type);
3545     else
3546       Slot = CreateAggTemp(type, "agg.tmp");
3547 
3548     bool DestroyedInCallee = true, NeedsEHCleanup = true;
3549     if (const auto *RD = type->getAsCXXRecordDecl())
3550       DestroyedInCallee = RD->hasNonTrivialDestructor();
3551     else
3552       NeedsEHCleanup = needsEHCleanup(type.isDestructedType());
3553 
3554     if (DestroyedInCallee)
3555       Slot.setExternallyDestructed();
3556 
3557     EmitAggExpr(E, Slot);
3558     RValue RV = Slot.asRValue();
3559     args.add(RV, type);
3560 
3561     if (DestroyedInCallee && NeedsEHCleanup) {
3562       // Create a no-op GEP between the placeholder and the cleanup so we can
3563       // RAUW it successfully.  It also serves as a marker of the first
3564       // instruction where the cleanup is active.
3565       pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
3566                                               type);
3567       // This unreachable is a temporary marker which will be removed later.
3568       llvm::Instruction *IsActive = Builder.CreateUnreachable();
3569       args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
3570     }
3571     return;
3572   }
3573 
3574   if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
3575       cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
3576     LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
3577     assert(L.isSimple());
3578     args.addUncopiedAggregate(L, type);
3579     return;
3580   }
3581 
3582   args.add(EmitAnyExprToTemp(E), type);
3583 }
3584 
3585 QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
3586   // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
3587   // implicitly widens null pointer constants that are arguments to varargs
3588   // functions to pointer-sized ints.
3589   if (!getTarget().getTriple().isOSWindows())
3590     return Arg->getType();
3591 
3592   if (Arg->getType()->isIntegerType() &&
3593       getContext().getTypeSize(Arg->getType()) <
3594           getContext().getTargetInfo().getPointerWidth(0) &&
3595       Arg->isNullPointerConstant(getContext(),
3596                                  Expr::NPC_ValueDependentIsNotNull)) {
3597     return getContext().getIntPtrType();
3598   }
3599 
3600   return Arg->getType();
3601 }
3602 
3603 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3604 // optimizer it can aggressively ignore unwind edges.
3605 void
3606 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
3607   if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3608       !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
3609     Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
3610                       CGM.getNoObjCARCExceptionsMetadata());
3611 }
3612 
3613 /// Emits a call to the given no-arguments nounwind runtime function.
3614 llvm::CallInst *
3615 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
3616                                          const llvm::Twine &name) {
3617   return EmitNounwindRuntimeCall(callee, None, name);
3618 }
3619 
3620 /// Emits a call to the given nounwind runtime function.
3621 llvm::CallInst *
3622 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
3623                                          ArrayRef<llvm::Value*> args,
3624                                          const llvm::Twine &name) {
3625   llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
3626   call->setDoesNotThrow();
3627   return call;
3628 }
3629 
3630 /// Emits a simple call (never an invoke) to the given no-arguments
3631 /// runtime function.
3632 llvm::CallInst *
3633 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
3634                                  const llvm::Twine &name) {
3635   return EmitRuntimeCall(callee, None, name);
3636 }
3637 
3638 // Calls which may throw must have operand bundles indicating which funclet
3639 // they are nested within.
3640 SmallVector<llvm::OperandBundleDef, 1>
3641 CodeGenFunction::getBundlesForFunclet(llvm::Value *Callee) {
3642   SmallVector<llvm::OperandBundleDef, 1> BundleList;
3643   // There is no need for a funclet operand bundle if we aren't inside a
3644   // funclet.
3645   if (!CurrentFuncletPad)
3646     return BundleList;
3647 
3648   // Skip intrinsics which cannot throw.
3649   auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts());
3650   if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow())
3651     return BundleList;
3652 
3653   BundleList.emplace_back("funclet", CurrentFuncletPad);
3654   return BundleList;
3655 }
3656 
3657 /// Emits a simple call (never an invoke) to the given runtime function.
3658 llvm::CallInst *
3659 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
3660                                  ArrayRef<llvm::Value*> args,
3661                                  const llvm::Twine &name) {
3662   llvm::CallInst *call =
3663       Builder.CreateCall(callee, args, getBundlesForFunclet(callee), name);
3664   call->setCallingConv(getRuntimeCC());
3665   return call;
3666 }
3667 
3668 /// Emits a call or invoke to the given noreturn runtime function.
3669 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee,
3670                                                ArrayRef<llvm::Value*> args) {
3671   SmallVector<llvm::OperandBundleDef, 1> BundleList =
3672       getBundlesForFunclet(callee);
3673 
3674   if (getInvokeDest()) {
3675     llvm::InvokeInst *invoke =
3676       Builder.CreateInvoke(callee,
3677                            getUnreachableBlock(),
3678                            getInvokeDest(),
3679                            args,
3680                            BundleList);
3681     invoke->setDoesNotReturn();
3682     invoke->setCallingConv(getRuntimeCC());
3683   } else {
3684     llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList);
3685     call->setDoesNotReturn();
3686     call->setCallingConv(getRuntimeCC());
3687     Builder.CreateUnreachable();
3688   }
3689 }
3690 
3691 /// Emits a call or invoke instruction to the given nullary runtime function.
3692 llvm::CallSite
3693 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
3694                                          const Twine &name) {
3695   return EmitRuntimeCallOrInvoke(callee, None, name);
3696 }
3697 
3698 /// Emits a call or invoke instruction to the given runtime function.
3699 llvm::CallSite
3700 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
3701                                          ArrayRef<llvm::Value*> args,
3702                                          const Twine &name) {
3703   llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name);
3704   callSite.setCallingConv(getRuntimeCC());
3705   return callSite;
3706 }
3707 
3708 /// Emits a call or invoke instruction to the given function, depending
3709 /// on the current state of the EH stack.
3710 llvm::CallSite
3711 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
3712                                   ArrayRef<llvm::Value *> Args,
3713                                   const Twine &Name) {
3714   llvm::BasicBlock *InvokeDest = getInvokeDest();
3715   SmallVector<llvm::OperandBundleDef, 1> BundleList =
3716       getBundlesForFunclet(Callee);
3717 
3718   llvm::Instruction *Inst;
3719   if (!InvokeDest)
3720     Inst = Builder.CreateCall(Callee, Args, BundleList, Name);
3721   else {
3722     llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
3723     Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
3724                                 Name);
3725     EmitBlock(ContBB);
3726   }
3727 
3728   // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3729   // optimizer it can aggressively ignore unwind edges.
3730   if (CGM.getLangOpts().ObjCAutoRefCount)
3731     AddObjCARCExceptionMetadata(Inst);
3732 
3733   return llvm::CallSite(Inst);
3734 }
3735 
3736 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
3737                                                   llvm::Value *New) {
3738   DeferredReplacements.push_back(std::make_pair(Old, New));
3739 }
3740 
3741 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
3742                                  const CGCallee &Callee,
3743                                  ReturnValueSlot ReturnValue,
3744                                  const CallArgList &CallArgs,
3745                                  llvm::Instruction **callOrInvoke,
3746                                  SourceLocation Loc) {
3747   // FIXME: We no longer need the types from CallArgs; lift up and simplify.
3748 
3749   assert(Callee.isOrdinary() || Callee.isVirtual());
3750 
3751   // Handle struct-return functions by passing a pointer to the
3752   // location that we would like to return into.
3753   QualType RetTy = CallInfo.getReturnType();
3754   const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
3755 
3756   llvm::FunctionType *IRFuncTy = Callee.getFunctionType();
3757 
3758   // 1. Set up the arguments.
3759 
3760   // If we're using inalloca, insert the allocation after the stack save.
3761   // FIXME: Do this earlier rather than hacking it in here!
3762   Address ArgMemory = Address::invalid();
3763   const llvm::StructLayout *ArgMemoryLayout = nullptr;
3764   if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
3765     const llvm::DataLayout &DL = CGM.getDataLayout();
3766     ArgMemoryLayout = DL.getStructLayout(ArgStruct);
3767     llvm::Instruction *IP = CallArgs.getStackBase();
3768     llvm::AllocaInst *AI;
3769     if (IP) {
3770       IP = IP->getNextNode();
3771       AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
3772                                 "argmem", IP);
3773     } else {
3774       AI = CreateTempAlloca(ArgStruct, "argmem");
3775     }
3776     auto Align = CallInfo.getArgStructAlignment();
3777     AI->setAlignment(Align.getQuantity());
3778     AI->setUsedWithInAlloca(true);
3779     assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
3780     ArgMemory = Address(AI, Align);
3781   }
3782 
3783   // Helper function to drill into the inalloca allocation.
3784   auto createInAllocaStructGEP = [&](unsigned FieldIndex) -> Address {
3785     auto FieldOffset =
3786       CharUnits::fromQuantity(ArgMemoryLayout->getElementOffset(FieldIndex));
3787     return Builder.CreateStructGEP(ArgMemory, FieldIndex, FieldOffset);
3788   };
3789 
3790   ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
3791   SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
3792 
3793   // If the call returns a temporary with struct return, create a temporary
3794   // alloca to hold the result, unless one is given to us.
3795   Address SRetPtr = Address::invalid();
3796   llvm::Value *UnusedReturnSizePtr = nullptr;
3797   if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
3798     if (!ReturnValue.isNull()) {
3799       SRetPtr = ReturnValue.getValue();
3800     } else {
3801       SRetPtr = CreateMemTemp(RetTy);
3802       if (HaveInsertPoint() && ReturnValue.isUnused()) {
3803         uint64_t size =
3804             CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
3805         UnusedReturnSizePtr = EmitLifetimeStart(size, SRetPtr.getPointer());
3806       }
3807     }
3808     if (IRFunctionArgs.hasSRetArg()) {
3809       IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
3810     } else if (RetAI.isInAlloca()) {
3811       Address Addr = createInAllocaStructGEP(RetAI.getInAllocaFieldIndex());
3812       Builder.CreateStore(SRetPtr.getPointer(), Addr);
3813     }
3814   }
3815 
3816   Address swiftErrorTemp = Address::invalid();
3817   Address swiftErrorArg = Address::invalid();
3818 
3819   // Translate all of the arguments as necessary to match the IR lowering.
3820   assert(CallInfo.arg_size() == CallArgs.size() &&
3821          "Mismatch between function signature & arguments.");
3822   unsigned ArgNo = 0;
3823   CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
3824   for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
3825        I != E; ++I, ++info_it, ++ArgNo) {
3826     const ABIArgInfo &ArgInfo = info_it->info;
3827 
3828     // Insert a padding argument to ensure proper alignment.
3829     if (IRFunctionArgs.hasPaddingArg(ArgNo))
3830       IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
3831           llvm::UndefValue::get(ArgInfo.getPaddingType());
3832 
3833     unsigned FirstIRArg, NumIRArgs;
3834     std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3835 
3836     switch (ArgInfo.getKind()) {
3837     case ABIArgInfo::InAlloca: {
3838       assert(NumIRArgs == 0);
3839       assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3840       if (I->isAggregate()) {
3841         // Replace the placeholder with the appropriate argument slot GEP.
3842         Address Addr = I->hasLValue()
3843                            ? I->getKnownLValue().getAddress()
3844                            : I->getKnownRValue().getAggregateAddress();
3845         llvm::Instruction *Placeholder =
3846             cast<llvm::Instruction>(Addr.getPointer());
3847         CGBuilderTy::InsertPoint IP = Builder.saveIP();
3848         Builder.SetInsertPoint(Placeholder);
3849         Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
3850         Builder.restoreIP(IP);
3851         deferPlaceholderReplacement(Placeholder, Addr.getPointer());
3852       } else {
3853         // Store the RValue into the argument struct.
3854         Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
3855         unsigned AS = Addr.getType()->getPointerAddressSpace();
3856         llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
3857         // There are some cases where a trivial bitcast is not avoidable.  The
3858         // definition of a type later in a translation unit may change it's type
3859         // from {}* to (%struct.foo*)*.
3860         if (Addr.getType() != MemType)
3861           Addr = Builder.CreateBitCast(Addr, MemType);
3862         I->copyInto(*this, Addr);
3863       }
3864       break;
3865     }
3866 
3867     case ABIArgInfo::Indirect: {
3868       assert(NumIRArgs == 1);
3869       if (!I->isAggregate()) {
3870         // Make a temporary alloca to pass the argument.
3871         Address Addr = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign(),
3872                                      "indirect-arg-temp", false);
3873         IRCallArgs[FirstIRArg] = Addr.getPointer();
3874 
3875         I->copyInto(*this, Addr);
3876       } else {
3877         // We want to avoid creating an unnecessary temporary+copy here;
3878         // however, we need one in three cases:
3879         // 1. If the argument is not byval, and we are required to copy the
3880         //    source.  (This case doesn't occur on any common architecture.)
3881         // 2. If the argument is byval, RV is not sufficiently aligned, and
3882         //    we cannot force it to be sufficiently aligned.
3883         // 3. If the argument is byval, but RV is not located in default
3884         //    or alloca address space.
3885         Address Addr = I->hasLValue()
3886                            ? I->getKnownLValue().getAddress()
3887                            : I->getKnownRValue().getAggregateAddress();
3888         llvm::Value *V = Addr.getPointer();
3889         CharUnits Align = ArgInfo.getIndirectAlign();
3890         const llvm::DataLayout *TD = &CGM.getDataLayout();
3891 
3892         assert((FirstIRArg >= IRFuncTy->getNumParams() ||
3893                 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==
3894                     TD->getAllocaAddrSpace()) &&
3895                "indirect argument must be in alloca address space");
3896 
3897         bool NeedCopy = false;
3898 
3899         if (Addr.getAlignment() < Align &&
3900             llvm::getOrEnforceKnownAlignment(V, Align.getQuantity(), *TD) <
3901                 Align.getQuantity()) {
3902           NeedCopy = true;
3903         } else if (I->hasLValue()) {
3904           auto LV = I->getKnownLValue();
3905           auto AS = LV.getAddressSpace();
3906           if ((!ArgInfo.getIndirectByVal() &&
3907                (LV.getAlignment() >=
3908                 getContext().getTypeAlignInChars(I->Ty))) ||
3909               (ArgInfo.getIndirectByVal() &&
3910                ((AS != LangAS::Default && AS != LangAS::opencl_private &&
3911                  AS != CGM.getASTAllocaAddressSpace())))) {
3912             NeedCopy = true;
3913           }
3914         }
3915         if (NeedCopy) {
3916           // Create an aligned temporary, and copy to it.
3917           Address AI = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign(),
3918                                      "byval-temp", false);
3919           IRCallArgs[FirstIRArg] = AI.getPointer();
3920           I->copyInto(*this, AI);
3921         } else {
3922           // Skip the extra memcpy call.
3923           auto *T = V->getType()->getPointerElementType()->getPointerTo(
3924               CGM.getDataLayout().getAllocaAddrSpace());
3925           IRCallArgs[FirstIRArg] = getTargetHooks().performAddrSpaceCast(
3926               *this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T,
3927               true);
3928         }
3929       }
3930       break;
3931     }
3932 
3933     case ABIArgInfo::Ignore:
3934       assert(NumIRArgs == 0);
3935       break;
3936 
3937     case ABIArgInfo::Extend:
3938     case ABIArgInfo::Direct: {
3939       if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
3940           ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
3941           ArgInfo.getDirectOffset() == 0) {
3942         assert(NumIRArgs == 1);
3943         llvm::Value *V;
3944         if (!I->isAggregate())
3945           V = I->getKnownRValue().getScalarVal();
3946         else
3947           V = Builder.CreateLoad(
3948               I->hasLValue() ? I->getKnownLValue().getAddress()
3949                              : I->getKnownRValue().getAggregateAddress());
3950 
3951         // Implement swifterror by copying into a new swifterror argument.
3952         // We'll write back in the normal path out of the call.
3953         if (CallInfo.getExtParameterInfo(ArgNo).getABI()
3954               == ParameterABI::SwiftErrorResult) {
3955           assert(!swiftErrorTemp.isValid() && "multiple swifterror args");
3956 
3957           QualType pointeeTy = I->Ty->getPointeeType();
3958           swiftErrorArg =
3959             Address(V, getContext().getTypeAlignInChars(pointeeTy));
3960 
3961           swiftErrorTemp =
3962             CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
3963           V = swiftErrorTemp.getPointer();
3964           cast<llvm::AllocaInst>(V)->setSwiftError(true);
3965 
3966           llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg);
3967           Builder.CreateStore(errorValue, swiftErrorTemp);
3968         }
3969 
3970         // We might have to widen integers, but we should never truncate.
3971         if (ArgInfo.getCoerceToType() != V->getType() &&
3972             V->getType()->isIntegerTy())
3973           V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
3974 
3975         // If the argument doesn't match, perform a bitcast to coerce it.  This
3976         // can happen due to trivial type mismatches.
3977         if (FirstIRArg < IRFuncTy->getNumParams() &&
3978             V->getType() != IRFuncTy->getParamType(FirstIRArg))
3979           V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
3980 
3981         IRCallArgs[FirstIRArg] = V;
3982         break;
3983       }
3984 
3985       // FIXME: Avoid the conversion through memory if possible.
3986       Address Src = Address::invalid();
3987       if (!I->isAggregate()) {
3988         Src = CreateMemTemp(I->Ty, "coerce");
3989         I->copyInto(*this, Src);
3990       } else {
3991         Src = I->hasLValue() ? I->getKnownLValue().getAddress()
3992                              : I->getKnownRValue().getAggregateAddress();
3993       }
3994 
3995       // If the value is offset in memory, apply the offset now.
3996       Src = emitAddressAtOffset(*this, Src, ArgInfo);
3997 
3998       // Fast-isel and the optimizer generally like scalar values better than
3999       // FCAs, so we flatten them if this is safe to do for this argument.
4000       llvm::StructType *STy =
4001             dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
4002       if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
4003         llvm::Type *SrcTy = Src.getType()->getElementType();
4004         uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
4005         uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
4006 
4007         // If the source type is smaller than the destination type of the
4008         // coerce-to logic, copy the source value into a temp alloca the size
4009         // of the destination type to allow loading all of it. The bits past
4010         // the source value are left undef.
4011         if (SrcSize < DstSize) {
4012           Address TempAlloca
4013             = CreateTempAlloca(STy, Src.getAlignment(),
4014                                Src.getName() + ".coerce");
4015           Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
4016           Src = TempAlloca;
4017         } else {
4018           Src = Builder.CreateBitCast(Src,
4019                                       STy->getPointerTo(Src.getAddressSpace()));
4020         }
4021 
4022         auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
4023         assert(NumIRArgs == STy->getNumElements());
4024         for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
4025           auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
4026           Address EltPtr = Builder.CreateStructGEP(Src, i, Offset);
4027           llvm::Value *LI = Builder.CreateLoad(EltPtr);
4028           IRCallArgs[FirstIRArg + i] = LI;
4029         }
4030       } else {
4031         // In the simple case, just pass the coerced loaded value.
4032         assert(NumIRArgs == 1);
4033         IRCallArgs[FirstIRArg] =
4034           CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
4035       }
4036 
4037       break;
4038     }
4039 
4040     case ABIArgInfo::CoerceAndExpand: {
4041       auto coercionType = ArgInfo.getCoerceAndExpandType();
4042       auto layout = CGM.getDataLayout().getStructLayout(coercionType);
4043 
4044       llvm::Value *tempSize = nullptr;
4045       Address addr = Address::invalid();
4046       if (I->isAggregate()) {
4047         addr = I->hasLValue() ? I->getKnownLValue().getAddress()
4048                               : I->getKnownRValue().getAggregateAddress();
4049 
4050       } else {
4051         RValue RV = I->getKnownRValue();
4052         assert(RV.isScalar()); // complex should always just be direct
4053 
4054         llvm::Type *scalarType = RV.getScalarVal()->getType();
4055         auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType);
4056         auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType);
4057 
4058         // Materialize to a temporary.
4059         addr = CreateTempAlloca(RV.getScalarVal()->getType(),
4060                  CharUnits::fromQuantity(std::max(layout->getAlignment(),
4061                                                   scalarAlign)));
4062         tempSize = EmitLifetimeStart(scalarSize, addr.getPointer());
4063 
4064         Builder.CreateStore(RV.getScalarVal(), addr);
4065       }
4066 
4067       addr = Builder.CreateElementBitCast(addr, coercionType);
4068 
4069       unsigned IRArgPos = FirstIRArg;
4070       for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4071         llvm::Type *eltType = coercionType->getElementType(i);
4072         if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4073         Address eltAddr = Builder.CreateStructGEP(addr, i, layout);
4074         llvm::Value *elt = Builder.CreateLoad(eltAddr);
4075         IRCallArgs[IRArgPos++] = elt;
4076       }
4077       assert(IRArgPos == FirstIRArg + NumIRArgs);
4078 
4079       if (tempSize) {
4080         EmitLifetimeEnd(tempSize, addr.getPointer());
4081       }
4082 
4083       break;
4084     }
4085 
4086     case ABIArgInfo::Expand:
4087       unsigned IRArgPos = FirstIRArg;
4088       ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
4089       assert(IRArgPos == FirstIRArg + NumIRArgs);
4090       break;
4091     }
4092   }
4093 
4094   const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this);
4095   llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer();
4096 
4097   // If we're using inalloca, set up that argument.
4098   if (ArgMemory.isValid()) {
4099     llvm::Value *Arg = ArgMemory.getPointer();
4100     if (CallInfo.isVariadic()) {
4101       // When passing non-POD arguments by value to variadic functions, we will
4102       // end up with a variadic prototype and an inalloca call site.  In such
4103       // cases, we can't do any parameter mismatch checks.  Give up and bitcast
4104       // the callee.
4105       unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace();
4106       auto FnTy = getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS);
4107       CalleePtr = Builder.CreateBitCast(CalleePtr, FnTy);
4108     } else {
4109       llvm::Type *LastParamTy =
4110           IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
4111       if (Arg->getType() != LastParamTy) {
4112 #ifndef NDEBUG
4113         // Assert that these structs have equivalent element types.
4114         llvm::StructType *FullTy = CallInfo.getArgStruct();
4115         llvm::StructType *DeclaredTy = cast<llvm::StructType>(
4116             cast<llvm::PointerType>(LastParamTy)->getElementType());
4117         assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
4118         for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
4119                                                 DE = DeclaredTy->element_end(),
4120                                                 FI = FullTy->element_begin();
4121              DI != DE; ++DI, ++FI)
4122           assert(*DI == *FI);
4123 #endif
4124         Arg = Builder.CreateBitCast(Arg, LastParamTy);
4125       }
4126     }
4127     assert(IRFunctionArgs.hasInallocaArg());
4128     IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
4129   }
4130 
4131   // 2. Prepare the function pointer.
4132 
4133   // If the callee is a bitcast of a non-variadic function to have a
4134   // variadic function pointer type, check to see if we can remove the
4135   // bitcast.  This comes up with unprototyped functions.
4136   //
4137   // This makes the IR nicer, but more importantly it ensures that we
4138   // can inline the function at -O0 if it is marked always_inline.
4139   auto simplifyVariadicCallee = [](llvm::Value *Ptr) -> llvm::Value* {
4140     llvm::FunctionType *CalleeFT =
4141       cast<llvm::FunctionType>(Ptr->getType()->getPointerElementType());
4142     if (!CalleeFT->isVarArg())
4143       return Ptr;
4144 
4145     llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr);
4146     if (!CE || CE->getOpcode() != llvm::Instruction::BitCast)
4147       return Ptr;
4148 
4149     llvm::Function *OrigFn = dyn_cast<llvm::Function>(CE->getOperand(0));
4150     if (!OrigFn)
4151       return Ptr;
4152 
4153     llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
4154 
4155     // If the original type is variadic, or if any of the component types
4156     // disagree, we cannot remove the cast.
4157     if (OrigFT->isVarArg() ||
4158         OrigFT->getNumParams() != CalleeFT->getNumParams() ||
4159         OrigFT->getReturnType() != CalleeFT->getReturnType())
4160       return Ptr;
4161 
4162     for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
4163       if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
4164         return Ptr;
4165 
4166     return OrigFn;
4167   };
4168   CalleePtr = simplifyVariadicCallee(CalleePtr);
4169 
4170   // 3. Perform the actual call.
4171 
4172   // Deactivate any cleanups that we're supposed to do immediately before
4173   // the call.
4174   if (!CallArgs.getCleanupsToDeactivate().empty())
4175     deactivateArgCleanupsBeforeCall(*this, CallArgs);
4176 
4177   // Assert that the arguments we computed match up.  The IR verifier
4178   // will catch this, but this is a common enough source of problems
4179   // during IRGen changes that it's way better for debugging to catch
4180   // it ourselves here.
4181 #ifndef NDEBUG
4182   assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
4183   for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
4184     // Inalloca argument can have different type.
4185     if (IRFunctionArgs.hasInallocaArg() &&
4186         i == IRFunctionArgs.getInallocaArgNo())
4187       continue;
4188     if (i < IRFuncTy->getNumParams())
4189       assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
4190   }
4191 #endif
4192 
4193   // Compute the calling convention and attributes.
4194   unsigned CallingConv;
4195   llvm::AttributeList Attrs;
4196   CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo,
4197                              Callee.getAbstractInfo(), Attrs, CallingConv,
4198                              /*AttrOnCallSite=*/true);
4199 
4200   // Apply some call-site-specific attributes.
4201   // TODO: work this into building the attribute set.
4202 
4203   // Apply always_inline to all calls within flatten functions.
4204   // FIXME: should this really take priority over __try, below?
4205   if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
4206       !(Callee.getAbstractInfo().getCalleeDecl() &&
4207         Callee.getAbstractInfo().getCalleeDecl()->hasAttr<NoInlineAttr>())) {
4208     Attrs =
4209         Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4210                            llvm::Attribute::AlwaysInline);
4211   }
4212 
4213   // Disable inlining inside SEH __try blocks.
4214   if (isSEHTryScope()) {
4215     Attrs =
4216         Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4217                            llvm::Attribute::NoInline);
4218   }
4219 
4220   // Decide whether to use a call or an invoke.
4221   bool CannotThrow;
4222   if (currentFunctionUsesSEHTry()) {
4223     // SEH cares about asynchronous exceptions, so everything can "throw."
4224     CannotThrow = false;
4225   } else if (isCleanupPadScope() &&
4226              EHPersonality::get(*this).isMSVCXXPersonality()) {
4227     // The MSVC++ personality will implicitly terminate the program if an
4228     // exception is thrown during a cleanup outside of a try/catch.
4229     // We don't need to model anything in IR to get this behavior.
4230     CannotThrow = true;
4231   } else {
4232     // Otherwise, nounwind call sites will never throw.
4233     CannotThrow = Attrs.hasAttribute(llvm::AttributeList::FunctionIndex,
4234                                      llvm::Attribute::NoUnwind);
4235   }
4236 
4237   // If we made a temporary, be sure to clean up after ourselves. Note that we
4238   // can't depend on being inside of an ExprWithCleanups, so we need to manually
4239   // pop this cleanup later on. Being eager about this is OK, since this
4240   // temporary is 'invisible' outside of the callee.
4241   if (UnusedReturnSizePtr)
4242     pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetPtr,
4243                                          UnusedReturnSizePtr);
4244 
4245   llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
4246 
4247   SmallVector<llvm::OperandBundleDef, 1> BundleList =
4248       getBundlesForFunclet(CalleePtr);
4249 
4250   // Emit the actual call/invoke instruction.
4251   llvm::CallSite CS;
4252   if (!InvokeDest) {
4253     CS = Builder.CreateCall(CalleePtr, IRCallArgs, BundleList);
4254   } else {
4255     llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
4256     CS = Builder.CreateInvoke(CalleePtr, Cont, InvokeDest, IRCallArgs,
4257                               BundleList);
4258     EmitBlock(Cont);
4259   }
4260   llvm::Instruction *CI = CS.getInstruction();
4261   if (callOrInvoke)
4262     *callOrInvoke = CI;
4263 
4264   // Apply the attributes and calling convention.
4265   CS.setAttributes(Attrs);
4266   CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
4267 
4268   // Apply various metadata.
4269 
4270   if (!CI->getType()->isVoidTy())
4271     CI->setName("call");
4272 
4273   // Insert instrumentation or attach profile metadata at indirect call sites.
4274   // For more details, see the comment before the definition of
4275   // IPVK_IndirectCallTarget in InstrProfData.inc.
4276   if (!CS.getCalledFunction())
4277     PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget,
4278                      CI, CalleePtr);
4279 
4280   // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4281   // optimizer it can aggressively ignore unwind edges.
4282   if (CGM.getLangOpts().ObjCAutoRefCount)
4283     AddObjCARCExceptionMetadata(CI);
4284 
4285   // Suppress tail calls if requested.
4286   if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
4287     const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl();
4288     if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
4289       Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
4290   }
4291 
4292   // 4. Finish the call.
4293 
4294   // If the call doesn't return, finish the basic block and clear the
4295   // insertion point; this allows the rest of IRGen to discard
4296   // unreachable code.
4297   if (CS.doesNotReturn()) {
4298     if (UnusedReturnSizePtr)
4299       PopCleanupBlock();
4300 
4301     // Strip away the noreturn attribute to better diagnose unreachable UB.
4302     if (SanOpts.has(SanitizerKind::Unreachable)) {
4303       if (auto *F = CS.getCalledFunction())
4304         F->removeFnAttr(llvm::Attribute::NoReturn);
4305       CS.removeAttribute(llvm::AttributeList::FunctionIndex,
4306                          llvm::Attribute::NoReturn);
4307     }
4308 
4309     EmitUnreachable(Loc);
4310     Builder.ClearInsertionPoint();
4311 
4312     // FIXME: For now, emit a dummy basic block because expr emitters in
4313     // generally are not ready to handle emitting expressions at unreachable
4314     // points.
4315     EnsureInsertPoint();
4316 
4317     // Return a reasonable RValue.
4318     return GetUndefRValue(RetTy);
4319   }
4320 
4321   // Perform the swifterror writeback.
4322   if (swiftErrorTemp.isValid()) {
4323     llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp);
4324     Builder.CreateStore(errorResult, swiftErrorArg);
4325   }
4326 
4327   // Emit any call-associated writebacks immediately.  Arguably this
4328   // should happen after any return-value munging.
4329   if (CallArgs.hasWritebacks())
4330     emitWritebacks(*this, CallArgs);
4331 
4332   // The stack cleanup for inalloca arguments has to run out of the normal
4333   // lexical order, so deactivate it and run it manually here.
4334   CallArgs.freeArgumentMemory(*this);
4335 
4336   // Extract the return value.
4337   RValue Ret = [&] {
4338     switch (RetAI.getKind()) {
4339     case ABIArgInfo::CoerceAndExpand: {
4340       auto coercionType = RetAI.getCoerceAndExpandType();
4341       auto layout = CGM.getDataLayout().getStructLayout(coercionType);
4342 
4343       Address addr = SRetPtr;
4344       addr = Builder.CreateElementBitCast(addr, coercionType);
4345 
4346       assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType());
4347       bool requiresExtract = isa<llvm::StructType>(CI->getType());
4348 
4349       unsigned unpaddedIndex = 0;
4350       for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4351         llvm::Type *eltType = coercionType->getElementType(i);
4352         if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4353         Address eltAddr = Builder.CreateStructGEP(addr, i, layout);
4354         llvm::Value *elt = CI;
4355         if (requiresExtract)
4356           elt = Builder.CreateExtractValue(elt, unpaddedIndex++);
4357         else
4358           assert(unpaddedIndex == 0);
4359         Builder.CreateStore(elt, eltAddr);
4360       }
4361       // FALLTHROUGH
4362       LLVM_FALLTHROUGH;
4363     }
4364 
4365     case ABIArgInfo::InAlloca:
4366     case ABIArgInfo::Indirect: {
4367       RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
4368       if (UnusedReturnSizePtr)
4369         PopCleanupBlock();
4370       return ret;
4371     }
4372 
4373     case ABIArgInfo::Ignore:
4374       // If we are ignoring an argument that had a result, make sure to
4375       // construct the appropriate return value for our caller.
4376       return GetUndefRValue(RetTy);
4377 
4378     case ABIArgInfo::Extend:
4379     case ABIArgInfo::Direct: {
4380       llvm::Type *RetIRTy = ConvertType(RetTy);
4381       if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
4382         switch (getEvaluationKind(RetTy)) {
4383         case TEK_Complex: {
4384           llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
4385           llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
4386           return RValue::getComplex(std::make_pair(Real, Imag));
4387         }
4388         case TEK_Aggregate: {
4389           Address DestPtr = ReturnValue.getValue();
4390           bool DestIsVolatile = ReturnValue.isVolatile();
4391 
4392           if (!DestPtr.isValid()) {
4393             DestPtr = CreateMemTemp(RetTy, "agg.tmp");
4394             DestIsVolatile = false;
4395           }
4396           BuildAggStore(*this, CI, DestPtr, DestIsVolatile);
4397           return RValue::getAggregate(DestPtr);
4398         }
4399         case TEK_Scalar: {
4400           // If the argument doesn't match, perform a bitcast to coerce it.  This
4401           // can happen due to trivial type mismatches.
4402           llvm::Value *V = CI;
4403           if (V->getType() != RetIRTy)
4404             V = Builder.CreateBitCast(V, RetIRTy);
4405           return RValue::get(V);
4406         }
4407         }
4408         llvm_unreachable("bad evaluation kind");
4409       }
4410 
4411       Address DestPtr = ReturnValue.getValue();
4412       bool DestIsVolatile = ReturnValue.isVolatile();
4413 
4414       if (!DestPtr.isValid()) {
4415         DestPtr = CreateMemTemp(RetTy, "coerce");
4416         DestIsVolatile = false;
4417       }
4418 
4419       // If the value is offset in memory, apply the offset now.
4420       Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
4421       CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
4422 
4423       return convertTempToRValue(DestPtr, RetTy, SourceLocation());
4424     }
4425 
4426     case ABIArgInfo::Expand:
4427       llvm_unreachable("Invalid ABI kind for return argument");
4428     }
4429 
4430     llvm_unreachable("Unhandled ABIArgInfo::Kind");
4431   } ();
4432 
4433   // Emit the assume_aligned check on the return value.
4434   const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl();
4435   if (Ret.isScalar() && TargetDecl) {
4436     if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
4437       llvm::Value *OffsetValue = nullptr;
4438       if (const auto *Offset = AA->getOffset())
4439         OffsetValue = EmitScalarExpr(Offset);
4440 
4441       llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment());
4442       llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment);
4443       EmitAlignmentAssumption(Ret.getScalarVal(), AlignmentCI->getZExtValue(),
4444                               OffsetValue);
4445     } else if (const auto *AA = TargetDecl->getAttr<AllocAlignAttr>()) {
4446       llvm::Value *ParamVal =
4447           CallArgs[AA->getParamIndex().getLLVMIndex()].getRValue(
4448               *this).getScalarVal();
4449       EmitAlignmentAssumption(Ret.getScalarVal(), ParamVal);
4450     }
4451   }
4452 
4453   return Ret;
4454 }
4455 
4456 CGCallee CGCallee::prepareConcreteCallee(CodeGenFunction &CGF) const {
4457   if (isVirtual()) {
4458     const CallExpr *CE = getVirtualCallExpr();
4459     return CGF.CGM.getCXXABI().getVirtualFunctionPointer(
4460         CGF, getVirtualMethodDecl(), getThisAddress(),
4461         getFunctionType(), CE ? CE->getLocStart() : SourceLocation());
4462   }
4463 
4464   return *this;
4465 }
4466 
4467 /* VarArg handling */
4468 
4469 Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) {
4470   VAListAddr = VE->isMicrosoftABI()
4471                  ? EmitMSVAListRef(VE->getSubExpr())
4472                  : EmitVAListRef(VE->getSubExpr());
4473   QualType Ty = VE->getType();
4474   if (VE->isMicrosoftABI())
4475     return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty);
4476   return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty);
4477 }
4478