1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "CGCall.h"
16 #include "ABIInfo.h"
17 #include "CGBlocks.h"
18 #include "CGCXXABI.h"
19 #include "CGCleanup.h"
20 #include "CodeGenFunction.h"
21 #include "CodeGenModule.h"
22 #include "TargetInfo.h"
23 #include "clang/AST/Decl.h"
24 #include "clang/AST/DeclCXX.h"
25 #include "clang/AST/DeclObjC.h"
26 #include "clang/Basic/TargetBuiltins.h"
27 #include "clang/Basic/TargetInfo.h"
28 #include "clang/CodeGen/CGFunctionInfo.h"
29 #include "clang/Frontend/CodeGenOptions.h"
30 #include "llvm/ADT/StringExtras.h"
31 #include "llvm/IR/Attributes.h"
32 #include "llvm/IR/CallSite.h"
33 #include "llvm/IR/DataLayout.h"
34 #include "llvm/IR/InlineAsm.h"
35 #include "llvm/IR/Intrinsics.h"
36 #include "llvm/IR/IntrinsicInst.h"
37 #include "llvm/Transforms/Utils/Local.h"
38 using namespace clang;
39 using namespace CodeGen;
40 
41 /***/
42 
43 static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
44   switch (CC) {
45   default: return llvm::CallingConv::C;
46   case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
47   case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
48   case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
49   case CC_X86_64Win64: return llvm::CallingConv::X86_64_Win64;
50   case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
51   case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
52   case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
53   case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
54   // TODO: Add support for __pascal to LLVM.
55   case CC_X86Pascal: return llvm::CallingConv::C;
56   // TODO: Add support for __vectorcall to LLVM.
57   case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
58   case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
59   case CC_SpirKernel: return llvm::CallingConv::SPIR_KERNEL;
60   }
61 }
62 
63 /// Derives the 'this' type for codegen purposes, i.e. ignoring method
64 /// qualification.
65 /// FIXME: address space qualification?
66 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
67   QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
68   return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
69 }
70 
71 /// Returns the canonical formal type of the given C++ method.
72 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
73   return MD->getType()->getCanonicalTypeUnqualified()
74            .getAs<FunctionProtoType>();
75 }
76 
77 /// Returns the "extra-canonicalized" return type, which discards
78 /// qualifiers on the return type.  Codegen doesn't care about them,
79 /// and it makes ABI code a little easier to be able to assume that
80 /// all parameter and return types are top-level unqualified.
81 static CanQualType GetReturnType(QualType RetTy) {
82   return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
83 }
84 
85 /// Arrange the argument and result information for a value of the given
86 /// unprototyped freestanding function type.
87 const CGFunctionInfo &
88 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
89   // When translating an unprototyped function type, always use a
90   // variadic type.
91   return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
92                                  /*instanceMethod=*/false,
93                                  /*chainCall=*/false, None,
94                                  FTNP->getExtInfo(), RequiredArgs(0));
95 }
96 
97 /// Adds the formal paramaters in FPT to the given prefix. If any parameter in
98 /// FPT has pass_object_size attrs, then we'll add parameters for those, too.
99 static void appendParameterTypes(const CodeGenTypes &CGT,
100                                  SmallVectorImpl<CanQualType> &prefix,
101                                  const CanQual<FunctionProtoType> &FPT,
102                                  const FunctionDecl *FD) {
103   // Fast path: unknown target.
104   if (FD == nullptr) {
105     prefix.append(FPT->param_type_begin(), FPT->param_type_end());
106     return;
107   }
108 
109   // In the vast majority cases, we'll have precisely FPT->getNumParams()
110   // parameters; the only thing that can change this is the presence of
111   // pass_object_size. So, we preallocate for the common case.
112   prefix.reserve(prefix.size() + FPT->getNumParams());
113 
114   assert(FD->getNumParams() == FPT->getNumParams());
115   for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
116     prefix.push_back(FPT->getParamType(I));
117     if (FD->getParamDecl(I)->hasAttr<PassObjectSizeAttr>())
118       prefix.push_back(CGT.getContext().getSizeType());
119   }
120 }
121 
122 /// Arrange the LLVM function layout for a value of the given function
123 /// type, on top of any implicit parameters already stored.
124 static const CGFunctionInfo &
125 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
126                         SmallVectorImpl<CanQualType> &prefix,
127                         CanQual<FunctionProtoType> FTP,
128                         const FunctionDecl *FD) {
129   RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
130   // FIXME: Kill copy.
131   appendParameterTypes(CGT, prefix, FTP, FD);
132   CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
133   return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
134                                      /*chainCall=*/false, prefix,
135                                      FTP->getExtInfo(), required);
136 }
137 
138 /// Arrange the argument and result information for a value of the
139 /// given freestanding function type.
140 const CGFunctionInfo &
141 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP,
142                                       const FunctionDecl *FD) {
143   SmallVector<CanQualType, 16> argTypes;
144   return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
145                                    FTP, FD);
146 }
147 
148 static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
149   // Set the appropriate calling convention for the Function.
150   if (D->hasAttr<StdCallAttr>())
151     return CC_X86StdCall;
152 
153   if (D->hasAttr<FastCallAttr>())
154     return CC_X86FastCall;
155 
156   if (D->hasAttr<ThisCallAttr>())
157     return CC_X86ThisCall;
158 
159   if (D->hasAttr<VectorCallAttr>())
160     return CC_X86VectorCall;
161 
162   if (D->hasAttr<PascalAttr>())
163     return CC_X86Pascal;
164 
165   if (PcsAttr *PCS = D->getAttr<PcsAttr>())
166     return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
167 
168   if (D->hasAttr<IntelOclBiccAttr>())
169     return CC_IntelOclBicc;
170 
171   if (D->hasAttr<MSABIAttr>())
172     return IsWindows ? CC_C : CC_X86_64Win64;
173 
174   if (D->hasAttr<SysVABIAttr>())
175     return IsWindows ? CC_X86_64SysV : CC_C;
176 
177   return CC_C;
178 }
179 
180 /// Arrange the argument and result information for a call to an
181 /// unknown C++ non-static member function of the given abstract type.
182 /// (Zero value of RD means we don't have any meaningful "this" argument type,
183 ///  so fall back to a generic pointer type).
184 /// The member function must be an ordinary function, i.e. not a
185 /// constructor or destructor.
186 const CGFunctionInfo &
187 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
188                                    const FunctionProtoType *FTP,
189                                    const CXXMethodDecl *MD) {
190   SmallVector<CanQualType, 16> argTypes;
191 
192   // Add the 'this' pointer.
193   if (RD)
194     argTypes.push_back(GetThisType(Context, RD));
195   else
196     argTypes.push_back(Context.VoidPtrTy);
197 
198   return ::arrangeLLVMFunctionInfo(
199       *this, true, argTypes,
200       FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>(), MD);
201 }
202 
203 /// Arrange the argument and result information for a declaration or
204 /// definition of the given C++ non-static member function.  The
205 /// member function must be an ordinary function, i.e. not a
206 /// constructor or destructor.
207 const CGFunctionInfo &
208 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
209   assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
210   assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
211 
212   CanQual<FunctionProtoType> prototype = GetFormalType(MD);
213 
214   if (MD->isInstance()) {
215     // The abstract case is perfectly fine.
216     const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
217     return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
218   }
219 
220   return arrangeFreeFunctionType(prototype, MD);
221 }
222 
223 const CGFunctionInfo &
224 CodeGenTypes::arrangeCXXStructorDeclaration(const CXXMethodDecl *MD,
225                                             StructorType Type) {
226 
227   SmallVector<CanQualType, 16> argTypes;
228   argTypes.push_back(GetThisType(Context, MD->getParent()));
229 
230   GlobalDecl GD;
231   if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
232     GD = GlobalDecl(CD, toCXXCtorType(Type));
233   } else {
234     auto *DD = dyn_cast<CXXDestructorDecl>(MD);
235     GD = GlobalDecl(DD, toCXXDtorType(Type));
236   }
237 
238   CanQual<FunctionProtoType> FTP = GetFormalType(MD);
239 
240   // Add the formal parameters.
241   appendParameterTypes(*this, argTypes, FTP, MD);
242 
243   TheCXXABI.buildStructorSignature(MD, Type, argTypes);
244 
245   RequiredArgs required =
246       (MD->isVariadic() ? RequiredArgs(argTypes.size()) : RequiredArgs::All);
247 
248   FunctionType::ExtInfo extInfo = FTP->getExtInfo();
249   CanQualType resultType = TheCXXABI.HasThisReturn(GD)
250                                ? argTypes.front()
251                                : TheCXXABI.hasMostDerivedReturn(GD)
252                                      ? CGM.getContext().VoidPtrTy
253                                      : Context.VoidTy;
254   return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
255                                  /*chainCall=*/false, argTypes, extInfo,
256                                  required);
257 }
258 
259 /// Arrange a call to a C++ method, passing the given arguments.
260 const CGFunctionInfo &
261 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
262                                         const CXXConstructorDecl *D,
263                                         CXXCtorType CtorKind,
264                                         unsigned ExtraArgs) {
265   // FIXME: Kill copy.
266   SmallVector<CanQualType, 16> ArgTypes;
267   for (const auto &Arg : args)
268     ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
269 
270   CanQual<FunctionProtoType> FPT = GetFormalType(D);
271   RequiredArgs Required = RequiredArgs::forPrototypePlus(FPT, 1 + ExtraArgs);
272   GlobalDecl GD(D, CtorKind);
273   CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
274                                ? ArgTypes.front()
275                                : TheCXXABI.hasMostDerivedReturn(GD)
276                                      ? CGM.getContext().VoidPtrTy
277                                      : Context.VoidTy;
278 
279   FunctionType::ExtInfo Info = FPT->getExtInfo();
280   return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
281                                  /*chainCall=*/false, ArgTypes, Info,
282                                  Required);
283 }
284 
285 /// Arrange the argument and result information for the declaration or
286 /// definition of the given function.
287 const CGFunctionInfo &
288 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
289   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
290     if (MD->isInstance())
291       return arrangeCXXMethodDeclaration(MD);
292 
293   CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
294 
295   assert(isa<FunctionType>(FTy));
296 
297   // When declaring a function without a prototype, always use a
298   // non-variadic type.
299   if (isa<FunctionNoProtoType>(FTy)) {
300     CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>();
301     return arrangeLLVMFunctionInfo(
302         noProto->getReturnType(), /*instanceMethod=*/false,
303         /*chainCall=*/false, None, noProto->getExtInfo(), RequiredArgs::All);
304   }
305 
306   assert(isa<FunctionProtoType>(FTy));
307   return arrangeFreeFunctionType(FTy.getAs<FunctionProtoType>(), FD);
308 }
309 
310 /// Arrange the argument and result information for the declaration or
311 /// definition of an Objective-C method.
312 const CGFunctionInfo &
313 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
314   // It happens that this is the same as a call with no optional
315   // arguments, except also using the formal 'self' type.
316   return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
317 }
318 
319 /// Arrange the argument and result information for the function type
320 /// through which to perform a send to the given Objective-C method,
321 /// using the given receiver type.  The receiver type is not always
322 /// the 'self' type of the method or even an Objective-C pointer type.
323 /// This is *not* the right method for actually performing such a
324 /// message send, due to the possibility of optional arguments.
325 const CGFunctionInfo &
326 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
327                                               QualType receiverType) {
328   SmallVector<CanQualType, 16> argTys;
329   argTys.push_back(Context.getCanonicalParamType(receiverType));
330   argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
331   // FIXME: Kill copy?
332   for (const auto *I : MD->params()) {
333     argTys.push_back(Context.getCanonicalParamType(I->getType()));
334   }
335 
336   FunctionType::ExtInfo einfo;
337   bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
338   einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
339 
340   if (getContext().getLangOpts().ObjCAutoRefCount &&
341       MD->hasAttr<NSReturnsRetainedAttr>())
342     einfo = einfo.withProducesResult(true);
343 
344   RequiredArgs required =
345     (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
346 
347   return arrangeLLVMFunctionInfo(
348       GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
349       /*chainCall=*/false, argTys, einfo, required);
350 }
351 
352 const CGFunctionInfo &
353 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
354   // FIXME: Do we need to handle ObjCMethodDecl?
355   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
356 
357   if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
358     return arrangeCXXStructorDeclaration(CD, getFromCtorType(GD.getCtorType()));
359 
360   if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
361     return arrangeCXXStructorDeclaration(DD, getFromDtorType(GD.getDtorType()));
362 
363   return arrangeFunctionDeclaration(FD);
364 }
365 
366 /// Arrange a thunk that takes 'this' as the first parameter followed by
367 /// varargs.  Return a void pointer, regardless of the actual return type.
368 /// The body of the thunk will end in a musttail call to a function of the
369 /// correct type, and the caller will bitcast the function to the correct
370 /// prototype.
371 const CGFunctionInfo &
372 CodeGenTypes::arrangeMSMemberPointerThunk(const CXXMethodDecl *MD) {
373   assert(MD->isVirtual() && "only virtual memptrs have thunks");
374   CanQual<FunctionProtoType> FTP = GetFormalType(MD);
375   CanQualType ArgTys[] = { GetThisType(Context, MD->getParent()) };
376   return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
377                                  /*chainCall=*/false, ArgTys,
378                                  FTP->getExtInfo(), RequiredArgs(1));
379 }
380 
381 const CGFunctionInfo &
382 CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD,
383                                    CXXCtorType CT) {
384   assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure);
385 
386   CanQual<FunctionProtoType> FTP = GetFormalType(CD);
387   SmallVector<CanQualType, 2> ArgTys;
388   const CXXRecordDecl *RD = CD->getParent();
389   ArgTys.push_back(GetThisType(Context, RD));
390   if (CT == Ctor_CopyingClosure)
391     ArgTys.push_back(*FTP->param_type_begin());
392   if (RD->getNumVBases() > 0)
393     ArgTys.push_back(Context.IntTy);
394   CallingConv CC = Context.getDefaultCallingConvention(
395       /*IsVariadic=*/false, /*IsCXXMethod=*/true);
396   return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
397                                  /*chainCall=*/false, ArgTys,
398                                  FunctionType::ExtInfo(CC), RequiredArgs::All);
399 }
400 
401 /// Arrange a call as unto a free function, except possibly with an
402 /// additional number of formal parameters considered required.
403 static const CGFunctionInfo &
404 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
405                             CodeGenModule &CGM,
406                             const CallArgList &args,
407                             const FunctionType *fnType,
408                             unsigned numExtraRequiredArgs,
409                             bool chainCall) {
410   assert(args.size() >= numExtraRequiredArgs);
411 
412   // In most cases, there are no optional arguments.
413   RequiredArgs required = RequiredArgs::All;
414 
415   // If we have a variadic prototype, the required arguments are the
416   // extra prefix plus the arguments in the prototype.
417   if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
418     if (proto->isVariadic())
419       required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs);
420 
421   // If we don't have a prototype at all, but we're supposed to
422   // explicitly use the variadic convention for unprototyped calls,
423   // treat all of the arguments as required but preserve the nominal
424   // possibility of variadics.
425   } else if (CGM.getTargetCodeGenInfo()
426                 .isNoProtoCallVariadic(args,
427                                        cast<FunctionNoProtoType>(fnType))) {
428     required = RequiredArgs(args.size());
429   }
430 
431   // FIXME: Kill copy.
432   SmallVector<CanQualType, 16> argTypes;
433   for (const auto &arg : args)
434     argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
435   return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()),
436                                      /*instanceMethod=*/false, chainCall,
437                                      argTypes, fnType->getExtInfo(), required);
438 }
439 
440 /// Figure out the rules for calling a function with the given formal
441 /// type using the given arguments.  The arguments are necessary
442 /// because the function might be unprototyped, in which case it's
443 /// target-dependent in crazy ways.
444 const CGFunctionInfo &
445 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
446                                       const FunctionType *fnType,
447                                       bool chainCall) {
448   return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
449                                      chainCall ? 1 : 0, chainCall);
450 }
451 
452 /// A block function call is essentially a free-function call with an
453 /// extra implicit argument.
454 const CGFunctionInfo &
455 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
456                                        const FunctionType *fnType) {
457   return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
458                                      /*chainCall=*/false);
459 }
460 
461 const CGFunctionInfo &
462 CodeGenTypes::arrangeFreeFunctionCall(QualType resultType,
463                                       const CallArgList &args,
464                                       FunctionType::ExtInfo info,
465                                       RequiredArgs required) {
466   // FIXME: Kill copy.
467   SmallVector<CanQualType, 16> argTypes;
468   for (const auto &Arg : args)
469     argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
470   return arrangeLLVMFunctionInfo(
471       GetReturnType(resultType), /*instanceMethod=*/false,
472       /*chainCall=*/false, argTypes, info, required);
473 }
474 
475 /// Arrange a call to a C++ method, passing the given arguments.
476 const CGFunctionInfo &
477 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
478                                    const FunctionProtoType *FPT,
479                                    RequiredArgs required) {
480   // FIXME: Kill copy.
481   SmallVector<CanQualType, 16> argTypes;
482   for (const auto &Arg : args)
483     argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
484 
485   FunctionType::ExtInfo info = FPT->getExtInfo();
486   return arrangeLLVMFunctionInfo(
487       GetReturnType(FPT->getReturnType()), /*instanceMethod=*/true,
488       /*chainCall=*/false, argTypes, info, required);
489 }
490 
491 const CGFunctionInfo &CodeGenTypes::arrangeFreeFunctionDeclaration(
492     QualType resultType, const FunctionArgList &args,
493     const FunctionType::ExtInfo &info, bool isVariadic) {
494   // FIXME: Kill copy.
495   SmallVector<CanQualType, 16> argTypes;
496   for (auto Arg : args)
497     argTypes.push_back(Context.getCanonicalParamType(Arg->getType()));
498 
499   RequiredArgs required =
500     (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All);
501   return arrangeLLVMFunctionInfo(
502       GetReturnType(resultType), /*instanceMethod=*/false,
503       /*chainCall=*/false, argTypes, info, required);
504 }
505 
506 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
507   return arrangeLLVMFunctionInfo(
508       getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
509       None, FunctionType::ExtInfo(), RequiredArgs::All);
510 }
511 
512 /// Arrange the argument and result information for an abstract value
513 /// of a given function type.  This is the method which all of the
514 /// above functions ultimately defer to.
515 const CGFunctionInfo &
516 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
517                                       bool instanceMethod,
518                                       bool chainCall,
519                                       ArrayRef<CanQualType> argTypes,
520                                       FunctionType::ExtInfo info,
521                                       RequiredArgs required) {
522   assert(std::all_of(argTypes.begin(), argTypes.end(),
523                      std::mem_fun_ref(&CanQualType::isCanonicalAsParam)));
524 
525   unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
526 
527   // Lookup or create unique function info.
528   llvm::FoldingSetNodeID ID;
529   CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, required,
530                           resultType, argTypes);
531 
532   void *insertPos = nullptr;
533   CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
534   if (FI)
535     return *FI;
536 
537   // Construct the function info.  We co-allocate the ArgInfos.
538   FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
539                               resultType, argTypes, required);
540   FunctionInfos.InsertNode(FI, insertPos);
541 
542   bool inserted = FunctionsBeingProcessed.insert(FI).second;
543   (void)inserted;
544   assert(inserted && "Recursively being processed?");
545 
546   // Compute ABI information.
547   getABIInfo().computeInfo(*FI);
548 
549   // Loop over all of the computed argument and return value info.  If any of
550   // them are direct or extend without a specified coerce type, specify the
551   // default now.
552   ABIArgInfo &retInfo = FI->getReturnInfo();
553   if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
554     retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
555 
556   for (auto &I : FI->arguments())
557     if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
558       I.info.setCoerceToType(ConvertType(I.type));
559 
560   bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
561   assert(erased && "Not in set?");
562 
563   return *FI;
564 }
565 
566 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
567                                        bool instanceMethod,
568                                        bool chainCall,
569                                        const FunctionType::ExtInfo &info,
570                                        CanQualType resultType,
571                                        ArrayRef<CanQualType> argTypes,
572                                        RequiredArgs required) {
573   void *buffer = operator new(totalSizeToAlloc<ArgInfo>(argTypes.size() + 1));
574   CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
575   FI->CallingConvention = llvmCC;
576   FI->EffectiveCallingConvention = llvmCC;
577   FI->ASTCallingConvention = info.getCC();
578   FI->InstanceMethod = instanceMethod;
579   FI->ChainCall = chainCall;
580   FI->NoReturn = info.getNoReturn();
581   FI->ReturnsRetained = info.getProducesResult();
582   FI->Required = required;
583   FI->HasRegParm = info.getHasRegParm();
584   FI->RegParm = info.getRegParm();
585   FI->ArgStruct = nullptr;
586   FI->ArgStructAlign = 0;
587   FI->NumArgs = argTypes.size();
588   FI->getArgsBuffer()[0].type = resultType;
589   for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
590     FI->getArgsBuffer()[i + 1].type = argTypes[i];
591   return FI;
592 }
593 
594 /***/
595 
596 namespace {
597 // ABIArgInfo::Expand implementation.
598 
599 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
600 struct TypeExpansion {
601   enum TypeExpansionKind {
602     // Elements of constant arrays are expanded recursively.
603     TEK_ConstantArray,
604     // Record fields are expanded recursively (but if record is a union, only
605     // the field with the largest size is expanded).
606     TEK_Record,
607     // For complex types, real and imaginary parts are expanded recursively.
608     TEK_Complex,
609     // All other types are not expandable.
610     TEK_None
611   };
612 
613   const TypeExpansionKind Kind;
614 
615   TypeExpansion(TypeExpansionKind K) : Kind(K) {}
616   virtual ~TypeExpansion() {}
617 };
618 
619 struct ConstantArrayExpansion : TypeExpansion {
620   QualType EltTy;
621   uint64_t NumElts;
622 
623   ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
624       : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
625   static bool classof(const TypeExpansion *TE) {
626     return TE->Kind == TEK_ConstantArray;
627   }
628 };
629 
630 struct RecordExpansion : TypeExpansion {
631   SmallVector<const CXXBaseSpecifier *, 1> Bases;
632 
633   SmallVector<const FieldDecl *, 1> Fields;
634 
635   RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
636                   SmallVector<const FieldDecl *, 1> &&Fields)
637       : TypeExpansion(TEK_Record), Bases(std::move(Bases)),
638         Fields(std::move(Fields)) {}
639   static bool classof(const TypeExpansion *TE) {
640     return TE->Kind == TEK_Record;
641   }
642 };
643 
644 struct ComplexExpansion : TypeExpansion {
645   QualType EltTy;
646 
647   ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
648   static bool classof(const TypeExpansion *TE) {
649     return TE->Kind == TEK_Complex;
650   }
651 };
652 
653 struct NoExpansion : TypeExpansion {
654   NoExpansion() : TypeExpansion(TEK_None) {}
655   static bool classof(const TypeExpansion *TE) {
656     return TE->Kind == TEK_None;
657   }
658 };
659 }  // namespace
660 
661 static std::unique_ptr<TypeExpansion>
662 getTypeExpansion(QualType Ty, const ASTContext &Context) {
663   if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
664     return llvm::make_unique<ConstantArrayExpansion>(
665         AT->getElementType(), AT->getSize().getZExtValue());
666   }
667   if (const RecordType *RT = Ty->getAs<RecordType>()) {
668     SmallVector<const CXXBaseSpecifier *, 1> Bases;
669     SmallVector<const FieldDecl *, 1> Fields;
670     const RecordDecl *RD = RT->getDecl();
671     assert(!RD->hasFlexibleArrayMember() &&
672            "Cannot expand structure with flexible array.");
673     if (RD->isUnion()) {
674       // Unions can be here only in degenerative cases - all the fields are same
675       // after flattening. Thus we have to use the "largest" field.
676       const FieldDecl *LargestFD = nullptr;
677       CharUnits UnionSize = CharUnits::Zero();
678 
679       for (const auto *FD : RD->fields()) {
680         // Skip zero length bitfields.
681         if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
682           continue;
683         assert(!FD->isBitField() &&
684                "Cannot expand structure with bit-field members.");
685         CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
686         if (UnionSize < FieldSize) {
687           UnionSize = FieldSize;
688           LargestFD = FD;
689         }
690       }
691       if (LargestFD)
692         Fields.push_back(LargestFD);
693     } else {
694       if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
695         assert(!CXXRD->isDynamicClass() &&
696                "cannot expand vtable pointers in dynamic classes");
697         for (const CXXBaseSpecifier &BS : CXXRD->bases())
698           Bases.push_back(&BS);
699       }
700 
701       for (const auto *FD : RD->fields()) {
702         // Skip zero length bitfields.
703         if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
704           continue;
705         assert(!FD->isBitField() &&
706                "Cannot expand structure with bit-field members.");
707         Fields.push_back(FD);
708       }
709     }
710     return llvm::make_unique<RecordExpansion>(std::move(Bases),
711                                               std::move(Fields));
712   }
713   if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
714     return llvm::make_unique<ComplexExpansion>(CT->getElementType());
715   }
716   return llvm::make_unique<NoExpansion>();
717 }
718 
719 static int getExpansionSize(QualType Ty, const ASTContext &Context) {
720   auto Exp = getTypeExpansion(Ty, Context);
721   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
722     return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
723   }
724   if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
725     int Res = 0;
726     for (auto BS : RExp->Bases)
727       Res += getExpansionSize(BS->getType(), Context);
728     for (auto FD : RExp->Fields)
729       Res += getExpansionSize(FD->getType(), Context);
730     return Res;
731   }
732   if (isa<ComplexExpansion>(Exp.get()))
733     return 2;
734   assert(isa<NoExpansion>(Exp.get()));
735   return 1;
736 }
737 
738 void
739 CodeGenTypes::getExpandedTypes(QualType Ty,
740                                SmallVectorImpl<llvm::Type *>::iterator &TI) {
741   auto Exp = getTypeExpansion(Ty, Context);
742   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
743     for (int i = 0, n = CAExp->NumElts; i < n; i++) {
744       getExpandedTypes(CAExp->EltTy, TI);
745     }
746   } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
747     for (auto BS : RExp->Bases)
748       getExpandedTypes(BS->getType(), TI);
749     for (auto FD : RExp->Fields)
750       getExpandedTypes(FD->getType(), TI);
751   } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
752     llvm::Type *EltTy = ConvertType(CExp->EltTy);
753     *TI++ = EltTy;
754     *TI++ = EltTy;
755   } else {
756     assert(isa<NoExpansion>(Exp.get()));
757     *TI++ = ConvertType(Ty);
758   }
759 }
760 
761 static void forConstantArrayExpansion(CodeGenFunction &CGF,
762                                       ConstantArrayExpansion *CAE,
763                                       Address BaseAddr,
764                                       llvm::function_ref<void(Address)> Fn) {
765   CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
766   CharUnits EltAlign =
767     BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
768 
769   for (int i = 0, n = CAE->NumElts; i < n; i++) {
770     llvm::Value *EltAddr =
771       CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i);
772     Fn(Address(EltAddr, EltAlign));
773   }
774 }
775 
776 void CodeGenFunction::ExpandTypeFromArgs(
777     QualType Ty, LValue LV, SmallVectorImpl<llvm::Argument *>::iterator &AI) {
778   assert(LV.isSimple() &&
779          "Unexpected non-simple lvalue during struct expansion.");
780 
781   auto Exp = getTypeExpansion(Ty, getContext());
782   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
783     forConstantArrayExpansion(*this, CAExp, LV.getAddress(),
784                               [&](Address EltAddr) {
785       LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
786       ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
787     });
788   } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
789     Address This = LV.getAddress();
790     for (const CXXBaseSpecifier *BS : RExp->Bases) {
791       // Perform a single step derived-to-base conversion.
792       Address Base =
793           GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
794                                 /*NullCheckValue=*/false, SourceLocation());
795       LValue SubLV = MakeAddrLValue(Base, BS->getType());
796 
797       // Recurse onto bases.
798       ExpandTypeFromArgs(BS->getType(), SubLV, AI);
799     }
800     for (auto FD : RExp->Fields) {
801       // FIXME: What are the right qualifiers here?
802       LValue SubLV = EmitLValueForField(LV, FD);
803       ExpandTypeFromArgs(FD->getType(), SubLV, AI);
804     }
805   } else if (isa<ComplexExpansion>(Exp.get())) {
806     auto realValue = *AI++;
807     auto imagValue = *AI++;
808     EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
809   } else {
810     assert(isa<NoExpansion>(Exp.get()));
811     EmitStoreThroughLValue(RValue::get(*AI++), LV);
812   }
813 }
814 
815 void CodeGenFunction::ExpandTypeToArgs(
816     QualType Ty, RValue RV, llvm::FunctionType *IRFuncTy,
817     SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
818   auto Exp = getTypeExpansion(Ty, getContext());
819   if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
820     forConstantArrayExpansion(*this, CAExp, RV.getAggregateAddress(),
821                               [&](Address EltAddr) {
822       RValue EltRV =
823           convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation());
824       ExpandTypeToArgs(CAExp->EltTy, EltRV, IRFuncTy, IRCallArgs, IRCallArgPos);
825     });
826   } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
827     Address This = RV.getAggregateAddress();
828     for (const CXXBaseSpecifier *BS : RExp->Bases) {
829       // Perform a single step derived-to-base conversion.
830       Address Base =
831           GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
832                                 /*NullCheckValue=*/false, SourceLocation());
833       RValue BaseRV = RValue::getAggregate(Base);
834 
835       // Recurse onto bases.
836       ExpandTypeToArgs(BS->getType(), BaseRV, IRFuncTy, IRCallArgs,
837                        IRCallArgPos);
838     }
839 
840     LValue LV = MakeAddrLValue(This, Ty);
841     for (auto FD : RExp->Fields) {
842       RValue FldRV = EmitRValueForField(LV, FD, SourceLocation());
843       ExpandTypeToArgs(FD->getType(), FldRV, IRFuncTy, IRCallArgs,
844                        IRCallArgPos);
845     }
846   } else if (isa<ComplexExpansion>(Exp.get())) {
847     ComplexPairTy CV = RV.getComplexVal();
848     IRCallArgs[IRCallArgPos++] = CV.first;
849     IRCallArgs[IRCallArgPos++] = CV.second;
850   } else {
851     assert(isa<NoExpansion>(Exp.get()));
852     assert(RV.isScalar() &&
853            "Unexpected non-scalar rvalue during struct expansion.");
854 
855     // Insert a bitcast as needed.
856     llvm::Value *V = RV.getScalarVal();
857     if (IRCallArgPos < IRFuncTy->getNumParams() &&
858         V->getType() != IRFuncTy->getParamType(IRCallArgPos))
859       V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
860 
861     IRCallArgs[IRCallArgPos++] = V;
862   }
863 }
864 
865 /// Create a temporary allocation for the purposes of coercion.
866 static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty,
867                                            CharUnits MinAlign) {
868   // Don't use an alignment that's worse than what LLVM would prefer.
869   auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty);
870   CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
871 
872   return CGF.CreateTempAlloca(Ty, Align);
873 }
874 
875 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
876 /// accessing some number of bytes out of it, try to gep into the struct to get
877 /// at its inner goodness.  Dive as deep as possible without entering an element
878 /// with an in-memory size smaller than DstSize.
879 static Address
880 EnterStructPointerForCoercedAccess(Address SrcPtr,
881                                    llvm::StructType *SrcSTy,
882                                    uint64_t DstSize, CodeGenFunction &CGF) {
883   // We can't dive into a zero-element struct.
884   if (SrcSTy->getNumElements() == 0) return SrcPtr;
885 
886   llvm::Type *FirstElt = SrcSTy->getElementType(0);
887 
888   // If the first elt is at least as large as what we're looking for, or if the
889   // first element is the same size as the whole struct, we can enter it. The
890   // comparison must be made on the store size and not the alloca size. Using
891   // the alloca size may overstate the size of the load.
892   uint64_t FirstEltSize =
893     CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
894   if (FirstEltSize < DstSize &&
895       FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
896     return SrcPtr;
897 
898   // GEP into the first element.
899   SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, CharUnits(), "coerce.dive");
900 
901   // If the first element is a struct, recurse.
902   llvm::Type *SrcTy = SrcPtr.getElementType();
903   if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
904     return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
905 
906   return SrcPtr;
907 }
908 
909 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
910 /// are either integers or pointers.  This does a truncation of the value if it
911 /// is too large or a zero extension if it is too small.
912 ///
913 /// This behaves as if the value were coerced through memory, so on big-endian
914 /// targets the high bits are preserved in a truncation, while little-endian
915 /// targets preserve the low bits.
916 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
917                                              llvm::Type *Ty,
918                                              CodeGenFunction &CGF) {
919   if (Val->getType() == Ty)
920     return Val;
921 
922   if (isa<llvm::PointerType>(Val->getType())) {
923     // If this is Pointer->Pointer avoid conversion to and from int.
924     if (isa<llvm::PointerType>(Ty))
925       return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
926 
927     // Convert the pointer to an integer so we can play with its width.
928     Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
929   }
930 
931   llvm::Type *DestIntTy = Ty;
932   if (isa<llvm::PointerType>(DestIntTy))
933     DestIntTy = CGF.IntPtrTy;
934 
935   if (Val->getType() != DestIntTy) {
936     const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
937     if (DL.isBigEndian()) {
938       // Preserve the high bits on big-endian targets.
939       // That is what memory coercion does.
940       uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
941       uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
942 
943       if (SrcSize > DstSize) {
944         Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
945         Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
946       } else {
947         Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
948         Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
949       }
950     } else {
951       // Little-endian targets preserve the low bits. No shifts required.
952       Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
953     }
954   }
955 
956   if (isa<llvm::PointerType>(Ty))
957     Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
958   return Val;
959 }
960 
961 
962 
963 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
964 /// a pointer to an object of type \arg Ty, known to be aligned to
965 /// \arg SrcAlign bytes.
966 ///
967 /// This safely handles the case when the src type is smaller than the
968 /// destination type; in this situation the values of bits which not
969 /// present in the src are undefined.
970 static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
971                                       CodeGenFunction &CGF) {
972   llvm::Type *SrcTy = Src.getElementType();
973 
974   // If SrcTy and Ty are the same, just do a load.
975   if (SrcTy == Ty)
976     return CGF.Builder.CreateLoad(Src);
977 
978   uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
979 
980   if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
981     Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF);
982     SrcTy = Src.getType()->getElementType();
983   }
984 
985   uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
986 
987   // If the source and destination are integer or pointer types, just do an
988   // extension or truncation to the desired type.
989   if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
990       (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
991     llvm::Value *Load = CGF.Builder.CreateLoad(Src);
992     return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
993   }
994 
995   // If load is legal, just bitcast the src pointer.
996   if (SrcSize >= DstSize) {
997     // Generally SrcSize is never greater than DstSize, since this means we are
998     // losing bits. However, this can happen in cases where the structure has
999     // additional padding, for example due to a user specified alignment.
1000     //
1001     // FIXME: Assert that we aren't truncating non-padding bits when have access
1002     // to that information.
1003     Src = CGF.Builder.CreateBitCast(Src, llvm::PointerType::getUnqual(Ty));
1004     return CGF.Builder.CreateLoad(Src);
1005   }
1006 
1007   // Otherwise do coercion through memory. This is stupid, but simple.
1008   Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment());
1009   Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.Int8PtrTy);
1010   Address SrcCasted = CGF.Builder.CreateBitCast(Src, CGF.Int8PtrTy);
1011   CGF.Builder.CreateMemCpy(Casted, SrcCasted,
1012       llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
1013       false);
1014   return CGF.Builder.CreateLoad(Tmp);
1015 }
1016 
1017 // Function to store a first-class aggregate into memory.  We prefer to
1018 // store the elements rather than the aggregate to be more friendly to
1019 // fast-isel.
1020 // FIXME: Do we need to recurse here?
1021 static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
1022                           Address Dest, bool DestIsVolatile) {
1023   // Prefer scalar stores to first-class aggregate stores.
1024   if (llvm::StructType *STy =
1025         dyn_cast<llvm::StructType>(Val->getType())) {
1026     const llvm::StructLayout *Layout =
1027       CGF.CGM.getDataLayout().getStructLayout(STy);
1028 
1029     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1030       auto EltOffset = CharUnits::fromQuantity(Layout->getElementOffset(i));
1031       Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i, EltOffset);
1032       llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
1033       CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
1034     }
1035   } else {
1036     CGF.Builder.CreateStore(Val, Dest, DestIsVolatile);
1037   }
1038 }
1039 
1040 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1041 /// where the source and destination may have different types.  The
1042 /// destination is known to be aligned to \arg DstAlign bytes.
1043 ///
1044 /// This safely handles the case when the src type is larger than the
1045 /// destination type; the upper bits of the src will be lost.
1046 static void CreateCoercedStore(llvm::Value *Src,
1047                                Address Dst,
1048                                bool DstIsVolatile,
1049                                CodeGenFunction &CGF) {
1050   llvm::Type *SrcTy = Src->getType();
1051   llvm::Type *DstTy = Dst.getType()->getElementType();
1052   if (SrcTy == DstTy) {
1053     CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1054     return;
1055   }
1056 
1057   uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1058 
1059   if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1060     Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF);
1061     DstTy = Dst.getType()->getElementType();
1062   }
1063 
1064   // If the source and destination are integer or pointer types, just do an
1065   // extension or truncation to the desired type.
1066   if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1067       (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1068     Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
1069     CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1070     return;
1071   }
1072 
1073   uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
1074 
1075   // If store is legal, just bitcast the src pointer.
1076   if (SrcSize <= DstSize) {
1077     Dst = CGF.Builder.CreateBitCast(Dst, llvm::PointerType::getUnqual(SrcTy));
1078     BuildAggStore(CGF, Src, Dst, DstIsVolatile);
1079   } else {
1080     // Otherwise do coercion through memory. This is stupid, but
1081     // simple.
1082 
1083     // Generally SrcSize is never greater than DstSize, since this means we are
1084     // losing bits. However, this can happen in cases where the structure has
1085     // additional padding, for example due to a user specified alignment.
1086     //
1087     // FIXME: Assert that we aren't truncating non-padding bits when have access
1088     // to that information.
1089     Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
1090     CGF.Builder.CreateStore(Src, Tmp);
1091     Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.Int8PtrTy);
1092     Address DstCasted = CGF.Builder.CreateBitCast(Dst, CGF.Int8PtrTy);
1093     CGF.Builder.CreateMemCpy(DstCasted, Casted,
1094         llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
1095         false);
1096   }
1097 }
1098 
1099 static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr,
1100                                    const ABIArgInfo &info) {
1101   if (unsigned offset = info.getDirectOffset()) {
1102     addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty);
1103     addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
1104                                              CharUnits::fromQuantity(offset));
1105     addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType());
1106   }
1107   return addr;
1108 }
1109 
1110 namespace {
1111 
1112 /// Encapsulates information about the way function arguments from
1113 /// CGFunctionInfo should be passed to actual LLVM IR function.
1114 class ClangToLLVMArgMapping {
1115   static const unsigned InvalidIndex = ~0U;
1116   unsigned InallocaArgNo;
1117   unsigned SRetArgNo;
1118   unsigned TotalIRArgs;
1119 
1120   /// Arguments of LLVM IR function corresponding to single Clang argument.
1121   struct IRArgs {
1122     unsigned PaddingArgIndex;
1123     // Argument is expanded to IR arguments at positions
1124     // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
1125     unsigned FirstArgIndex;
1126     unsigned NumberOfArgs;
1127 
1128     IRArgs()
1129         : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1130           NumberOfArgs(0) {}
1131   };
1132 
1133   SmallVector<IRArgs, 8> ArgInfo;
1134 
1135 public:
1136   ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
1137                         bool OnlyRequiredArgs = false)
1138       : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1139         ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1140     construct(Context, FI, OnlyRequiredArgs);
1141   }
1142 
1143   bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
1144   unsigned getInallocaArgNo() const {
1145     assert(hasInallocaArg());
1146     return InallocaArgNo;
1147   }
1148 
1149   bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
1150   unsigned getSRetArgNo() const {
1151     assert(hasSRetArg());
1152     return SRetArgNo;
1153   }
1154 
1155   unsigned totalIRArgs() const { return TotalIRArgs; }
1156 
1157   bool hasPaddingArg(unsigned ArgNo) const {
1158     assert(ArgNo < ArgInfo.size());
1159     return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1160   }
1161   unsigned getPaddingArgNo(unsigned ArgNo) const {
1162     assert(hasPaddingArg(ArgNo));
1163     return ArgInfo[ArgNo].PaddingArgIndex;
1164   }
1165 
1166   /// Returns index of first IR argument corresponding to ArgNo, and their
1167   /// quantity.
1168   std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
1169     assert(ArgNo < ArgInfo.size());
1170     return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1171                           ArgInfo[ArgNo].NumberOfArgs);
1172   }
1173 
1174 private:
1175   void construct(const ASTContext &Context, const CGFunctionInfo &FI,
1176                  bool OnlyRequiredArgs);
1177 };
1178 
1179 void ClangToLLVMArgMapping::construct(const ASTContext &Context,
1180                                       const CGFunctionInfo &FI,
1181                                       bool OnlyRequiredArgs) {
1182   unsigned IRArgNo = 0;
1183   bool SwapThisWithSRet = false;
1184   const ABIArgInfo &RetAI = FI.getReturnInfo();
1185 
1186   if (RetAI.getKind() == ABIArgInfo::Indirect) {
1187     SwapThisWithSRet = RetAI.isSRetAfterThis();
1188     SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1189   }
1190 
1191   unsigned ArgNo = 0;
1192   unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
1193   for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
1194        ++I, ++ArgNo) {
1195     assert(I != FI.arg_end());
1196     QualType ArgType = I->type;
1197     const ABIArgInfo &AI = I->info;
1198     // Collect data about IR arguments corresponding to Clang argument ArgNo.
1199     auto &IRArgs = ArgInfo[ArgNo];
1200 
1201     if (AI.getPaddingType())
1202       IRArgs.PaddingArgIndex = IRArgNo++;
1203 
1204     switch (AI.getKind()) {
1205     case ABIArgInfo::Extend:
1206     case ABIArgInfo::Direct: {
1207       // FIXME: handle sseregparm someday...
1208       llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
1209       if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
1210         IRArgs.NumberOfArgs = STy->getNumElements();
1211       } else {
1212         IRArgs.NumberOfArgs = 1;
1213       }
1214       break;
1215     }
1216     case ABIArgInfo::Indirect:
1217       IRArgs.NumberOfArgs = 1;
1218       break;
1219     case ABIArgInfo::Ignore:
1220     case ABIArgInfo::InAlloca:
1221       // ignore and inalloca doesn't have matching LLVM parameters.
1222       IRArgs.NumberOfArgs = 0;
1223       break;
1224     case ABIArgInfo::Expand: {
1225       IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
1226       break;
1227     }
1228     }
1229 
1230     if (IRArgs.NumberOfArgs > 0) {
1231       IRArgs.FirstArgIndex = IRArgNo;
1232       IRArgNo += IRArgs.NumberOfArgs;
1233     }
1234 
1235     // Skip over the sret parameter when it comes second.  We already handled it
1236     // above.
1237     if (IRArgNo == 1 && SwapThisWithSRet)
1238       IRArgNo++;
1239   }
1240   assert(ArgNo == ArgInfo.size());
1241 
1242   if (FI.usesInAlloca())
1243     InallocaArgNo = IRArgNo++;
1244 
1245   TotalIRArgs = IRArgNo;
1246 }
1247 }  // namespace
1248 
1249 /***/
1250 
1251 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
1252   return FI.getReturnInfo().isIndirect();
1253 }
1254 
1255 bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) {
1256   return ReturnTypeUsesSRet(FI) &&
1257          getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
1258 }
1259 
1260 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
1261   if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
1262     switch (BT->getKind()) {
1263     default:
1264       return false;
1265     case BuiltinType::Float:
1266       return getTarget().useObjCFPRetForRealType(TargetInfo::Float);
1267     case BuiltinType::Double:
1268       return getTarget().useObjCFPRetForRealType(TargetInfo::Double);
1269     case BuiltinType::LongDouble:
1270       return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble);
1271     }
1272   }
1273 
1274   return false;
1275 }
1276 
1277 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
1278   if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
1279     if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
1280       if (BT->getKind() == BuiltinType::LongDouble)
1281         return getTarget().useObjCFP2RetForComplexLongDouble();
1282     }
1283   }
1284 
1285   return false;
1286 }
1287 
1288 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
1289   const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
1290   return GetFunctionType(FI);
1291 }
1292 
1293 llvm::FunctionType *
1294 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
1295 
1296   bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1297   (void)Inserted;
1298   assert(Inserted && "Recursively being processed?");
1299 
1300   llvm::Type *resultType = nullptr;
1301   const ABIArgInfo &retAI = FI.getReturnInfo();
1302   switch (retAI.getKind()) {
1303   case ABIArgInfo::Expand:
1304     llvm_unreachable("Invalid ABI kind for return argument");
1305 
1306   case ABIArgInfo::Extend:
1307   case ABIArgInfo::Direct:
1308     resultType = retAI.getCoerceToType();
1309     break;
1310 
1311   case ABIArgInfo::InAlloca:
1312     if (retAI.getInAllocaSRet()) {
1313       // sret things on win32 aren't void, they return the sret pointer.
1314       QualType ret = FI.getReturnType();
1315       llvm::Type *ty = ConvertType(ret);
1316       unsigned addressSpace = Context.getTargetAddressSpace(ret);
1317       resultType = llvm::PointerType::get(ty, addressSpace);
1318     } else {
1319       resultType = llvm::Type::getVoidTy(getLLVMContext());
1320     }
1321     break;
1322 
1323   case ABIArgInfo::Indirect:
1324   case ABIArgInfo::Ignore:
1325     resultType = llvm::Type::getVoidTy(getLLVMContext());
1326     break;
1327   }
1328 
1329   ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
1330   SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
1331 
1332   // Add type for sret argument.
1333   if (IRFunctionArgs.hasSRetArg()) {
1334     QualType Ret = FI.getReturnType();
1335     llvm::Type *Ty = ConvertType(Ret);
1336     unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
1337     ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1338         llvm::PointerType::get(Ty, AddressSpace);
1339   }
1340 
1341   // Add type for inalloca argument.
1342   if (IRFunctionArgs.hasInallocaArg()) {
1343     auto ArgStruct = FI.getArgStruct();
1344     assert(ArgStruct);
1345     ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
1346   }
1347 
1348   // Add in all of the required arguments.
1349   unsigned ArgNo = 0;
1350   CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1351                                      ie = it + FI.getNumRequiredArgs();
1352   for (; it != ie; ++it, ++ArgNo) {
1353     const ABIArgInfo &ArgInfo = it->info;
1354 
1355     // Insert a padding type to ensure proper alignment.
1356     if (IRFunctionArgs.hasPaddingArg(ArgNo))
1357       ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1358           ArgInfo.getPaddingType();
1359 
1360     unsigned FirstIRArg, NumIRArgs;
1361     std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1362 
1363     switch (ArgInfo.getKind()) {
1364     case ABIArgInfo::Ignore:
1365     case ABIArgInfo::InAlloca:
1366       assert(NumIRArgs == 0);
1367       break;
1368 
1369     case ABIArgInfo::Indirect: {
1370       assert(NumIRArgs == 1);
1371       // indirect arguments are always on the stack, which is addr space #0.
1372       llvm::Type *LTy = ConvertTypeForMem(it->type);
1373       ArgTypes[FirstIRArg] = LTy->getPointerTo();
1374       break;
1375     }
1376 
1377     case ABIArgInfo::Extend:
1378     case ABIArgInfo::Direct: {
1379       // Fast-isel and the optimizer generally like scalar values better than
1380       // FCAs, so we flatten them if this is safe to do for this argument.
1381       llvm::Type *argType = ArgInfo.getCoerceToType();
1382       llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1383       if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
1384         assert(NumIRArgs == st->getNumElements());
1385         for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1386           ArgTypes[FirstIRArg + i] = st->getElementType(i);
1387       } else {
1388         assert(NumIRArgs == 1);
1389         ArgTypes[FirstIRArg] = argType;
1390       }
1391       break;
1392     }
1393 
1394     case ABIArgInfo::Expand:
1395       auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1396       getExpandedTypes(it->type, ArgTypesIter);
1397       assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1398       break;
1399     }
1400   }
1401 
1402   bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1403   assert(Erased && "Not in set?");
1404 
1405   return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
1406 }
1407 
1408 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
1409   const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1410   const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
1411 
1412   if (!isFuncTypeConvertible(FPT))
1413     return llvm::StructType::get(getLLVMContext());
1414 
1415   const CGFunctionInfo *Info;
1416   if (isa<CXXDestructorDecl>(MD))
1417     Info =
1418         &arrangeCXXStructorDeclaration(MD, getFromDtorType(GD.getDtorType()));
1419   else
1420     Info = &arrangeCXXMethodDeclaration(MD);
1421   return GetFunctionType(*Info);
1422 }
1423 
1424 static void AddAttributesFromFunctionProtoType(ASTContext &Ctx,
1425                                                llvm::AttrBuilder &FuncAttrs,
1426                                                const FunctionProtoType *FPT) {
1427   if (!FPT)
1428     return;
1429 
1430   if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) &&
1431       FPT->isNothrow(Ctx))
1432     FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1433 }
1434 
1435 void CodeGenModule::ConstructAttributeList(
1436     StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo,
1437     AttributeListType &PAL, unsigned &CallingConv, bool AttrOnCallSite) {
1438   llvm::AttrBuilder FuncAttrs;
1439   llvm::AttrBuilder RetAttrs;
1440   bool HasOptnone = false;
1441 
1442   CallingConv = FI.getEffectiveCallingConvention();
1443 
1444   if (FI.isNoReturn())
1445     FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1446 
1447   // If we have information about the function prototype, we can learn
1448   // attributes form there.
1449   AddAttributesFromFunctionProtoType(getContext(), FuncAttrs,
1450                                      CalleeInfo.getCalleeFunctionProtoType());
1451 
1452   const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
1453 
1454   // FIXME: handle sseregparm someday...
1455   if (TargetDecl) {
1456     if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
1457       FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
1458     if (TargetDecl->hasAttr<NoThrowAttr>())
1459       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1460     if (TargetDecl->hasAttr<NoReturnAttr>())
1461       FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1462     if (TargetDecl->hasAttr<NoDuplicateAttr>())
1463       FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
1464 
1465     if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1466       AddAttributesFromFunctionProtoType(
1467           getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
1468       // Don't use [[noreturn]] or _Noreturn for a call to a virtual function.
1469       // These attributes are not inherited by overloads.
1470       const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
1471       if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual()))
1472         FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1473     }
1474 
1475     // 'const', 'pure' and 'noalias' attributed functions are also nounwind.
1476     if (TargetDecl->hasAttr<ConstAttr>()) {
1477       FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
1478       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1479     } else if (TargetDecl->hasAttr<PureAttr>()) {
1480       FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
1481       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1482     } else if (TargetDecl->hasAttr<NoAliasAttr>()) {
1483       FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly);
1484       FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1485     }
1486     if (TargetDecl->hasAttr<RestrictAttr>())
1487       RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1488     if (TargetDecl->hasAttr<ReturnsNonNullAttr>())
1489       RetAttrs.addAttribute(llvm::Attribute::NonNull);
1490 
1491     HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
1492   }
1493 
1494   // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
1495   if (!HasOptnone) {
1496     if (CodeGenOpts.OptimizeSize)
1497       FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1498     if (CodeGenOpts.OptimizeSize == 2)
1499       FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1500   }
1501 
1502   if (CodeGenOpts.DisableRedZone)
1503     FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1504   if (CodeGenOpts.NoImplicitFloat)
1505     FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1506   if (CodeGenOpts.EnableSegmentedStacks &&
1507       !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>()))
1508     FuncAttrs.addAttribute("split-stack");
1509 
1510   if (AttrOnCallSite) {
1511     // Attributes that should go on the call site only.
1512     if (!CodeGenOpts.SimplifyLibCalls ||
1513         CodeGenOpts.isNoBuiltinFunc(Name.data()))
1514       FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1515     if (!CodeGenOpts.TrapFuncName.empty())
1516       FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
1517   } else {
1518     // Attributes that should go on the function, but not the call site.
1519     if (!CodeGenOpts.DisableFPElim) {
1520       FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1521     } else if (CodeGenOpts.OmitLeafFramePointer) {
1522       FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1523       FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1524     } else {
1525       FuncAttrs.addAttribute("no-frame-pointer-elim", "true");
1526       FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1527     }
1528 
1529     bool DisableTailCalls =
1530         CodeGenOpts.DisableTailCalls ||
1531         (TargetDecl && TargetDecl->hasAttr<DisableTailCallsAttr>());
1532     FuncAttrs.addAttribute("disable-tail-calls",
1533                            llvm::toStringRef(DisableTailCalls));
1534 
1535     FuncAttrs.addAttribute("less-precise-fpmad",
1536                            llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
1537     FuncAttrs.addAttribute("no-infs-fp-math",
1538                            llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
1539     FuncAttrs.addAttribute("no-nans-fp-math",
1540                            llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
1541     FuncAttrs.addAttribute("unsafe-fp-math",
1542                            llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
1543     FuncAttrs.addAttribute("use-soft-float",
1544                            llvm::toStringRef(CodeGenOpts.SoftFloat));
1545     FuncAttrs.addAttribute("stack-protector-buffer-size",
1546                            llvm::utostr(CodeGenOpts.SSPBufferSize));
1547 
1548     if (CodeGenOpts.StackRealignment)
1549       FuncAttrs.addAttribute("stackrealign");
1550 
1551     // Add target-cpu and target-features attributes to functions. If
1552     // we have a decl for the function and it has a target attribute then
1553     // parse that and add it to the feature set.
1554     StringRef TargetCPU = getTarget().getTargetOpts().CPU;
1555     const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl);
1556     if (FD && FD->hasAttr<TargetAttr>()) {
1557       llvm::StringMap<bool> FeatureMap;
1558       getFunctionFeatureMap(FeatureMap, FD);
1559 
1560       // Produce the canonical string for this set of features.
1561       std::vector<std::string> Features;
1562       for (llvm::StringMap<bool>::const_iterator it = FeatureMap.begin(),
1563                                                  ie = FeatureMap.end();
1564            it != ie; ++it)
1565         Features.push_back((it->second ? "+" : "-") + it->first().str());
1566 
1567       // Now add the target-cpu and target-features to the function.
1568       // While we populated the feature map above, we still need to
1569       // get and parse the target attribute so we can get the cpu for
1570       // the function.
1571       const auto *TD = FD->getAttr<TargetAttr>();
1572       TargetAttr::ParsedTargetAttr ParsedAttr = TD->parse();
1573       if (ParsedAttr.second != "")
1574         TargetCPU = ParsedAttr.second;
1575       if (TargetCPU != "")
1576         FuncAttrs.addAttribute("target-cpu", TargetCPU);
1577       if (!Features.empty()) {
1578         std::sort(Features.begin(), Features.end());
1579         FuncAttrs.addAttribute(
1580             "target-features",
1581             llvm::join(Features.begin(), Features.end(), ","));
1582       }
1583     } else {
1584       // Otherwise just add the existing target cpu and target features to the
1585       // function.
1586       std::vector<std::string> &Features = getTarget().getTargetOpts().Features;
1587       if (TargetCPU != "")
1588         FuncAttrs.addAttribute("target-cpu", TargetCPU);
1589       if (!Features.empty()) {
1590         std::sort(Features.begin(), Features.end());
1591         FuncAttrs.addAttribute(
1592             "target-features",
1593             llvm::join(Features.begin(), Features.end(), ","));
1594       }
1595     }
1596   }
1597 
1598   ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
1599 
1600   QualType RetTy = FI.getReturnType();
1601   const ABIArgInfo &RetAI = FI.getReturnInfo();
1602   switch (RetAI.getKind()) {
1603   case ABIArgInfo::Extend:
1604     if (RetTy->hasSignedIntegerRepresentation())
1605       RetAttrs.addAttribute(llvm::Attribute::SExt);
1606     else if (RetTy->hasUnsignedIntegerRepresentation())
1607       RetAttrs.addAttribute(llvm::Attribute::ZExt);
1608     // FALL THROUGH
1609   case ABIArgInfo::Direct:
1610     if (RetAI.getInReg())
1611       RetAttrs.addAttribute(llvm::Attribute::InReg);
1612     break;
1613   case ABIArgInfo::Ignore:
1614     break;
1615 
1616   case ABIArgInfo::InAlloca:
1617   case ABIArgInfo::Indirect: {
1618     // inalloca and sret disable readnone and readonly
1619     FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1620       .removeAttribute(llvm::Attribute::ReadNone);
1621     break;
1622   }
1623 
1624   case ABIArgInfo::Expand:
1625     llvm_unreachable("Invalid ABI kind for return argument");
1626   }
1627 
1628   if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
1629     QualType PTy = RefTy->getPointeeType();
1630     if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
1631       RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
1632                                         .getQuantity());
1633     else if (getContext().getTargetAddressSpace(PTy) == 0)
1634       RetAttrs.addAttribute(llvm::Attribute::NonNull);
1635   }
1636 
1637   // Attach return attributes.
1638   if (RetAttrs.hasAttributes()) {
1639     PAL.push_back(llvm::AttributeSet::get(
1640         getLLVMContext(), llvm::AttributeSet::ReturnIndex, RetAttrs));
1641   }
1642 
1643   // Attach attributes to sret.
1644   if (IRFunctionArgs.hasSRetArg()) {
1645     llvm::AttrBuilder SRETAttrs;
1646     SRETAttrs.addAttribute(llvm::Attribute::StructRet);
1647     if (RetAI.getInReg())
1648       SRETAttrs.addAttribute(llvm::Attribute::InReg);
1649     PAL.push_back(llvm::AttributeSet::get(
1650         getLLVMContext(), IRFunctionArgs.getSRetArgNo() + 1, SRETAttrs));
1651   }
1652 
1653   // Attach attributes to inalloca argument.
1654   if (IRFunctionArgs.hasInallocaArg()) {
1655     llvm::AttrBuilder Attrs;
1656     Attrs.addAttribute(llvm::Attribute::InAlloca);
1657     PAL.push_back(llvm::AttributeSet::get(
1658         getLLVMContext(), IRFunctionArgs.getInallocaArgNo() + 1, Attrs));
1659   }
1660 
1661   unsigned ArgNo = 0;
1662   for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(),
1663                                           E = FI.arg_end();
1664        I != E; ++I, ++ArgNo) {
1665     QualType ParamType = I->type;
1666     const ABIArgInfo &AI = I->info;
1667     llvm::AttrBuilder Attrs;
1668 
1669     // Add attribute for padding argument, if necessary.
1670     if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
1671       if (AI.getPaddingInReg())
1672         PAL.push_back(llvm::AttributeSet::get(
1673             getLLVMContext(), IRFunctionArgs.getPaddingArgNo(ArgNo) + 1,
1674             llvm::Attribute::InReg));
1675     }
1676 
1677     // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
1678     // have the corresponding parameter variable.  It doesn't make
1679     // sense to do it here because parameters are so messed up.
1680     switch (AI.getKind()) {
1681     case ABIArgInfo::Extend:
1682       if (ParamType->isSignedIntegerOrEnumerationType())
1683         Attrs.addAttribute(llvm::Attribute::SExt);
1684       else if (ParamType->isUnsignedIntegerOrEnumerationType()) {
1685         if (getTypes().getABIInfo().shouldSignExtUnsignedType(ParamType))
1686           Attrs.addAttribute(llvm::Attribute::SExt);
1687         else
1688           Attrs.addAttribute(llvm::Attribute::ZExt);
1689       }
1690       // FALL THROUGH
1691     case ABIArgInfo::Direct:
1692       if (ArgNo == 0 && FI.isChainCall())
1693         Attrs.addAttribute(llvm::Attribute::Nest);
1694       else if (AI.getInReg())
1695         Attrs.addAttribute(llvm::Attribute::InReg);
1696       break;
1697 
1698     case ABIArgInfo::Indirect: {
1699       if (AI.getInReg())
1700         Attrs.addAttribute(llvm::Attribute::InReg);
1701 
1702       if (AI.getIndirectByVal())
1703         Attrs.addAttribute(llvm::Attribute::ByVal);
1704 
1705       CharUnits Align = AI.getIndirectAlign();
1706 
1707       // In a byval argument, it is important that the required
1708       // alignment of the type is honored, as LLVM might be creating a
1709       // *new* stack object, and needs to know what alignment to give
1710       // it. (Sometimes it can deduce a sensible alignment on its own,
1711       // but not if clang decides it must emit a packed struct, or the
1712       // user specifies increased alignment requirements.)
1713       //
1714       // This is different from indirect *not* byval, where the object
1715       // exists already, and the align attribute is purely
1716       // informative.
1717       assert(!Align.isZero());
1718 
1719       // For now, only add this when we have a byval argument.
1720       // TODO: be less lazy about updating test cases.
1721       if (AI.getIndirectByVal())
1722         Attrs.addAlignmentAttr(Align.getQuantity());
1723 
1724       // byval disables readnone and readonly.
1725       FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1726         .removeAttribute(llvm::Attribute::ReadNone);
1727       break;
1728     }
1729     case ABIArgInfo::Ignore:
1730     case ABIArgInfo::Expand:
1731       continue;
1732 
1733     case ABIArgInfo::InAlloca:
1734       // inalloca disables readnone and readonly.
1735       FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1736           .removeAttribute(llvm::Attribute::ReadNone);
1737       continue;
1738     }
1739 
1740     if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
1741       QualType PTy = RefTy->getPointeeType();
1742       if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
1743         Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
1744                                        .getQuantity());
1745       else if (getContext().getTargetAddressSpace(PTy) == 0)
1746         Attrs.addAttribute(llvm::Attribute::NonNull);
1747     }
1748 
1749     if (Attrs.hasAttributes()) {
1750       unsigned FirstIRArg, NumIRArgs;
1751       std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1752       for (unsigned i = 0; i < NumIRArgs; i++)
1753         PAL.push_back(llvm::AttributeSet::get(getLLVMContext(),
1754                                               FirstIRArg + i + 1, Attrs));
1755     }
1756   }
1757   assert(ArgNo == FI.arg_size());
1758 
1759   if (FuncAttrs.hasAttributes())
1760     PAL.push_back(llvm::
1761                   AttributeSet::get(getLLVMContext(),
1762                                     llvm::AttributeSet::FunctionIndex,
1763                                     FuncAttrs));
1764 }
1765 
1766 /// An argument came in as a promoted argument; demote it back to its
1767 /// declared type.
1768 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
1769                                          const VarDecl *var,
1770                                          llvm::Value *value) {
1771   llvm::Type *varType = CGF.ConvertType(var->getType());
1772 
1773   // This can happen with promotions that actually don't change the
1774   // underlying type, like the enum promotions.
1775   if (value->getType() == varType) return value;
1776 
1777   assert((varType->isIntegerTy() || varType->isFloatingPointTy())
1778          && "unexpected promotion type");
1779 
1780   if (isa<llvm::IntegerType>(varType))
1781     return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
1782 
1783   return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
1784 }
1785 
1786 /// Returns the attribute (either parameter attribute, or function
1787 /// attribute), which declares argument ArgNo to be non-null.
1788 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
1789                                          QualType ArgType, unsigned ArgNo) {
1790   // FIXME: __attribute__((nonnull)) can also be applied to:
1791   //   - references to pointers, where the pointee is known to be
1792   //     nonnull (apparently a Clang extension)
1793   //   - transparent unions containing pointers
1794   // In the former case, LLVM IR cannot represent the constraint. In
1795   // the latter case, we have no guarantee that the transparent union
1796   // is in fact passed as a pointer.
1797   if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
1798     return nullptr;
1799   // First, check attribute on parameter itself.
1800   if (PVD) {
1801     if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
1802       return ParmNNAttr;
1803   }
1804   // Check function attributes.
1805   if (!FD)
1806     return nullptr;
1807   for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
1808     if (NNAttr->isNonNull(ArgNo))
1809       return NNAttr;
1810   }
1811   return nullptr;
1812 }
1813 
1814 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
1815                                          llvm::Function *Fn,
1816                                          const FunctionArgList &Args) {
1817   if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
1818     // Naked functions don't have prologues.
1819     return;
1820 
1821   // If this is an implicit-return-zero function, go ahead and
1822   // initialize the return value.  TODO: it might be nice to have
1823   // a more general mechanism for this that didn't require synthesized
1824   // return statements.
1825   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
1826     if (FD->hasImplicitReturnZero()) {
1827       QualType RetTy = FD->getReturnType().getUnqualifiedType();
1828       llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
1829       llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
1830       Builder.CreateStore(Zero, ReturnValue);
1831     }
1832   }
1833 
1834   // FIXME: We no longer need the types from FunctionArgList; lift up and
1835   // simplify.
1836 
1837   ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
1838   // Flattened function arguments.
1839   SmallVector<llvm::Argument *, 16> FnArgs;
1840   FnArgs.reserve(IRFunctionArgs.totalIRArgs());
1841   for (auto &Arg : Fn->args()) {
1842     FnArgs.push_back(&Arg);
1843   }
1844   assert(FnArgs.size() == IRFunctionArgs.totalIRArgs());
1845 
1846   // If we're using inalloca, all the memory arguments are GEPs off of the last
1847   // parameter, which is a pointer to the complete memory area.
1848   Address ArgStruct = Address::invalid();
1849   const llvm::StructLayout *ArgStructLayout = nullptr;
1850   if (IRFunctionArgs.hasInallocaArg()) {
1851     ArgStructLayout = CGM.getDataLayout().getStructLayout(FI.getArgStruct());
1852     ArgStruct = Address(FnArgs[IRFunctionArgs.getInallocaArgNo()],
1853                         FI.getArgStructAlignment());
1854 
1855     assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo());
1856   }
1857 
1858   // Name the struct return parameter.
1859   if (IRFunctionArgs.hasSRetArg()) {
1860     auto AI = FnArgs[IRFunctionArgs.getSRetArgNo()];
1861     AI->setName("agg.result");
1862     AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), AI->getArgNo() + 1,
1863                                         llvm::Attribute::NoAlias));
1864   }
1865 
1866   // Track if we received the parameter as a pointer (indirect, byval, or
1867   // inalloca).  If already have a pointer, EmitParmDecl doesn't need to copy it
1868   // into a local alloca for us.
1869   SmallVector<ParamValue, 16> ArgVals;
1870   ArgVals.reserve(Args.size());
1871 
1872   // Create a pointer value for every parameter declaration.  This usually
1873   // entails copying one or more LLVM IR arguments into an alloca.  Don't push
1874   // any cleanups or do anything that might unwind.  We do that separately, so
1875   // we can push the cleanups in the correct order for the ABI.
1876   assert(FI.arg_size() == Args.size() &&
1877          "Mismatch between function signature & arguments.");
1878   unsigned ArgNo = 0;
1879   CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
1880   for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
1881        i != e; ++i, ++info_it, ++ArgNo) {
1882     const VarDecl *Arg = *i;
1883     QualType Ty = info_it->type;
1884     const ABIArgInfo &ArgI = info_it->info;
1885 
1886     bool isPromoted =
1887       isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
1888 
1889     unsigned FirstIRArg, NumIRArgs;
1890     std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1891 
1892     switch (ArgI.getKind()) {
1893     case ABIArgInfo::InAlloca: {
1894       assert(NumIRArgs == 0);
1895       auto FieldIndex = ArgI.getInAllocaFieldIndex();
1896       CharUnits FieldOffset =
1897         CharUnits::fromQuantity(ArgStructLayout->getElementOffset(FieldIndex));
1898       Address V = Builder.CreateStructGEP(ArgStruct, FieldIndex, FieldOffset,
1899                                           Arg->getName());
1900       ArgVals.push_back(ParamValue::forIndirect(V));
1901       break;
1902     }
1903 
1904     case ABIArgInfo::Indirect: {
1905       assert(NumIRArgs == 1);
1906       Address ParamAddr = Address(FnArgs[FirstIRArg], ArgI.getIndirectAlign());
1907 
1908       if (!hasScalarEvaluationKind(Ty)) {
1909         // Aggregates and complex variables are accessed by reference.  All we
1910         // need to do is realign the value, if requested.
1911         Address V = ParamAddr;
1912         if (ArgI.getIndirectRealign()) {
1913           Address AlignedTemp = CreateMemTemp(Ty, "coerce");
1914 
1915           // Copy from the incoming argument pointer to the temporary with the
1916           // appropriate alignment.
1917           //
1918           // FIXME: We should have a common utility for generating an aggregate
1919           // copy.
1920           CharUnits Size = getContext().getTypeSizeInChars(Ty);
1921           auto SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity());
1922           Address Dst = Builder.CreateBitCast(AlignedTemp, Int8PtrTy);
1923           Address Src = Builder.CreateBitCast(ParamAddr, Int8PtrTy);
1924           Builder.CreateMemCpy(Dst, Src, SizeVal, false);
1925           V = AlignedTemp;
1926         }
1927         ArgVals.push_back(ParamValue::forIndirect(V));
1928       } else {
1929         // Load scalar value from indirect argument.
1930         llvm::Value *V =
1931           EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getLocStart());
1932 
1933         if (isPromoted)
1934           V = emitArgumentDemotion(*this, Arg, V);
1935         ArgVals.push_back(ParamValue::forDirect(V));
1936       }
1937       break;
1938     }
1939 
1940     case ABIArgInfo::Extend:
1941     case ABIArgInfo::Direct: {
1942 
1943       // If we have the trivial case, handle it with no muss and fuss.
1944       if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
1945           ArgI.getCoerceToType() == ConvertType(Ty) &&
1946           ArgI.getDirectOffset() == 0) {
1947         assert(NumIRArgs == 1);
1948         auto AI = FnArgs[FirstIRArg];
1949         llvm::Value *V = AI;
1950 
1951         if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
1952           if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
1953                              PVD->getFunctionScopeIndex()))
1954             AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1955                                                 AI->getArgNo() + 1,
1956                                                 llvm::Attribute::NonNull));
1957 
1958           QualType OTy = PVD->getOriginalType();
1959           if (const auto *ArrTy =
1960               getContext().getAsConstantArrayType(OTy)) {
1961             // A C99 array parameter declaration with the static keyword also
1962             // indicates dereferenceability, and if the size is constant we can
1963             // use the dereferenceable attribute (which requires the size in
1964             // bytes).
1965             if (ArrTy->getSizeModifier() == ArrayType::Static) {
1966               QualType ETy = ArrTy->getElementType();
1967               uint64_t ArrSize = ArrTy->getSize().getZExtValue();
1968               if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
1969                   ArrSize) {
1970                 llvm::AttrBuilder Attrs;
1971                 Attrs.addDereferenceableAttr(
1972                   getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize);
1973                 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1974                                                     AI->getArgNo() + 1, Attrs));
1975               } else if (getContext().getTargetAddressSpace(ETy) == 0) {
1976                 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1977                                                     AI->getArgNo() + 1,
1978                                                     llvm::Attribute::NonNull));
1979               }
1980             }
1981           } else if (const auto *ArrTy =
1982                      getContext().getAsVariableArrayType(OTy)) {
1983             // For C99 VLAs with the static keyword, we don't know the size so
1984             // we can't use the dereferenceable attribute, but in addrspace(0)
1985             // we know that it must be nonnull.
1986             if (ArrTy->getSizeModifier() == VariableArrayType::Static &&
1987                 !getContext().getTargetAddressSpace(ArrTy->getElementType()))
1988               AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1989                                                   AI->getArgNo() + 1,
1990                                                   llvm::Attribute::NonNull));
1991           }
1992 
1993           const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
1994           if (!AVAttr)
1995             if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
1996               AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
1997           if (AVAttr) {
1998             llvm::Value *AlignmentValue =
1999               EmitScalarExpr(AVAttr->getAlignment());
2000             llvm::ConstantInt *AlignmentCI =
2001               cast<llvm::ConstantInt>(AlignmentValue);
2002             unsigned Alignment =
2003               std::min((unsigned) AlignmentCI->getZExtValue(),
2004                        +llvm::Value::MaximumAlignment);
2005 
2006             llvm::AttrBuilder Attrs;
2007             Attrs.addAlignmentAttr(Alignment);
2008             AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
2009                                                 AI->getArgNo() + 1, Attrs));
2010           }
2011         }
2012 
2013         if (Arg->getType().isRestrictQualified())
2014           AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
2015                                               AI->getArgNo() + 1,
2016                                               llvm::Attribute::NoAlias));
2017 
2018         // Ensure the argument is the correct type.
2019         if (V->getType() != ArgI.getCoerceToType())
2020           V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
2021 
2022         if (isPromoted)
2023           V = emitArgumentDemotion(*this, Arg, V);
2024 
2025         if (const CXXMethodDecl *MD =
2026             dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl)) {
2027           if (MD->isVirtual() && Arg == CXXABIThisDecl)
2028             V = CGM.getCXXABI().
2029                 adjustThisParameterInVirtualFunctionPrologue(*this, CurGD, V);
2030         }
2031 
2032         // Because of merging of function types from multiple decls it is
2033         // possible for the type of an argument to not match the corresponding
2034         // type in the function type. Since we are codegening the callee
2035         // in here, add a cast to the argument type.
2036         llvm::Type *LTy = ConvertType(Arg->getType());
2037         if (V->getType() != LTy)
2038           V = Builder.CreateBitCast(V, LTy);
2039 
2040         ArgVals.push_back(ParamValue::forDirect(V));
2041         break;
2042       }
2043 
2044       Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
2045                                      Arg->getName());
2046 
2047       // Pointer to store into.
2048       Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
2049 
2050       // Fast-isel and the optimizer generally like scalar values better than
2051       // FCAs, so we flatten them if this is safe to do for this argument.
2052       llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
2053       if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
2054           STy->getNumElements() > 1) {
2055         auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
2056         uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
2057         llvm::Type *DstTy = Ptr.getElementType();
2058         uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
2059 
2060         Address AddrToStoreInto = Address::invalid();
2061         if (SrcSize <= DstSize) {
2062           AddrToStoreInto =
2063             Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
2064         } else {
2065           AddrToStoreInto =
2066             CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
2067         }
2068 
2069         assert(STy->getNumElements() == NumIRArgs);
2070         for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2071           auto AI = FnArgs[FirstIRArg + i];
2072           AI->setName(Arg->getName() + ".coerce" + Twine(i));
2073           auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
2074           Address EltPtr =
2075             Builder.CreateStructGEP(AddrToStoreInto, i, Offset);
2076           Builder.CreateStore(AI, EltPtr);
2077         }
2078 
2079         if (SrcSize > DstSize) {
2080           Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
2081         }
2082 
2083       } else {
2084         // Simple case, just do a coerced store of the argument into the alloca.
2085         assert(NumIRArgs == 1);
2086         auto AI = FnArgs[FirstIRArg];
2087         AI->setName(Arg->getName() + ".coerce");
2088         CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, *this);
2089       }
2090 
2091       // Match to what EmitParmDecl is expecting for this type.
2092       if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
2093         llvm::Value *V =
2094           EmitLoadOfScalar(Alloca, false, Ty, Arg->getLocStart());
2095         if (isPromoted)
2096           V = emitArgumentDemotion(*this, Arg, V);
2097         ArgVals.push_back(ParamValue::forDirect(V));
2098       } else {
2099         ArgVals.push_back(ParamValue::forIndirect(Alloca));
2100       }
2101       break;
2102     }
2103 
2104     case ABIArgInfo::Expand: {
2105       // If this structure was expanded into multiple arguments then
2106       // we need to create a temporary and reconstruct it from the
2107       // arguments.
2108       Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2109       LValue LV = MakeAddrLValue(Alloca, Ty);
2110       ArgVals.push_back(ParamValue::forIndirect(Alloca));
2111 
2112       auto FnArgIter = FnArgs.begin() + FirstIRArg;
2113       ExpandTypeFromArgs(Ty, LV, FnArgIter);
2114       assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs);
2115       for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
2116         auto AI = FnArgs[FirstIRArg + i];
2117         AI->setName(Arg->getName() + "." + Twine(i));
2118       }
2119       break;
2120     }
2121 
2122     case ABIArgInfo::Ignore:
2123       assert(NumIRArgs == 0);
2124       // Initialize the local variable appropriately.
2125       if (!hasScalarEvaluationKind(Ty)) {
2126         ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty)));
2127       } else {
2128         llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
2129         ArgVals.push_back(ParamValue::forDirect(U));
2130       }
2131       break;
2132     }
2133   }
2134 
2135   if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2136     for (int I = Args.size() - 1; I >= 0; --I)
2137       EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2138   } else {
2139     for (unsigned I = 0, E = Args.size(); I != E; ++I)
2140       EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2141   }
2142 }
2143 
2144 static void eraseUnusedBitCasts(llvm::Instruction *insn) {
2145   while (insn->use_empty()) {
2146     llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
2147     if (!bitcast) return;
2148 
2149     // This is "safe" because we would have used a ConstantExpr otherwise.
2150     insn = cast<llvm::Instruction>(bitcast->getOperand(0));
2151     bitcast->eraseFromParent();
2152   }
2153 }
2154 
2155 /// Try to emit a fused autorelease of a return result.
2156 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
2157                                                     llvm::Value *result) {
2158   // We must be immediately followed the cast.
2159   llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
2160   if (BB->empty()) return nullptr;
2161   if (&BB->back() != result) return nullptr;
2162 
2163   llvm::Type *resultType = result->getType();
2164 
2165   // result is in a BasicBlock and is therefore an Instruction.
2166   llvm::Instruction *generator = cast<llvm::Instruction>(result);
2167 
2168   SmallVector<llvm::Instruction*,4> insnsToKill;
2169 
2170   // Look for:
2171   //  %generator = bitcast %type1* %generator2 to %type2*
2172   while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
2173     // We would have emitted this as a constant if the operand weren't
2174     // an Instruction.
2175     generator = cast<llvm::Instruction>(bitcast->getOperand(0));
2176 
2177     // Require the generator to be immediately followed by the cast.
2178     if (generator->getNextNode() != bitcast)
2179       return nullptr;
2180 
2181     insnsToKill.push_back(bitcast);
2182   }
2183 
2184   // Look for:
2185   //   %generator = call i8* @objc_retain(i8* %originalResult)
2186   // or
2187   //   %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
2188   llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
2189   if (!call) return nullptr;
2190 
2191   bool doRetainAutorelease;
2192 
2193   if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints().objc_retain) {
2194     doRetainAutorelease = true;
2195   } else if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints()
2196                                           .objc_retainAutoreleasedReturnValue) {
2197     doRetainAutorelease = false;
2198 
2199     // If we emitted an assembly marker for this call (and the
2200     // ARCEntrypoints field should have been set if so), go looking
2201     // for that call.  If we can't find it, we can't do this
2202     // optimization.  But it should always be the immediately previous
2203     // instruction, unless we needed bitcasts around the call.
2204     if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) {
2205       llvm::Instruction *prev = call->getPrevNode();
2206       assert(prev);
2207       if (isa<llvm::BitCastInst>(prev)) {
2208         prev = prev->getPrevNode();
2209         assert(prev);
2210       }
2211       assert(isa<llvm::CallInst>(prev));
2212       assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
2213                CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker);
2214       insnsToKill.push_back(prev);
2215     }
2216   } else {
2217     return nullptr;
2218   }
2219 
2220   result = call->getArgOperand(0);
2221   insnsToKill.push_back(call);
2222 
2223   // Keep killing bitcasts, for sanity.  Note that we no longer care
2224   // about precise ordering as long as there's exactly one use.
2225   while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
2226     if (!bitcast->hasOneUse()) break;
2227     insnsToKill.push_back(bitcast);
2228     result = bitcast->getOperand(0);
2229   }
2230 
2231   // Delete all the unnecessary instructions, from latest to earliest.
2232   for (SmallVectorImpl<llvm::Instruction*>::iterator
2233          i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i)
2234     (*i)->eraseFromParent();
2235 
2236   // Do the fused retain/autorelease if we were asked to.
2237   if (doRetainAutorelease)
2238     result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
2239 
2240   // Cast back to the result type.
2241   return CGF.Builder.CreateBitCast(result, resultType);
2242 }
2243 
2244 /// If this is a +1 of the value of an immutable 'self', remove it.
2245 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
2246                                           llvm::Value *result) {
2247   // This is only applicable to a method with an immutable 'self'.
2248   const ObjCMethodDecl *method =
2249     dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
2250   if (!method) return nullptr;
2251   const VarDecl *self = method->getSelfDecl();
2252   if (!self->getType().isConstQualified()) return nullptr;
2253 
2254   // Look for a retain call.
2255   llvm::CallInst *retainCall =
2256     dyn_cast<llvm::CallInst>(result->stripPointerCasts());
2257   if (!retainCall ||
2258       retainCall->getCalledValue() != CGF.CGM.getObjCEntrypoints().objc_retain)
2259     return nullptr;
2260 
2261   // Look for an ordinary load of 'self'.
2262   llvm::Value *retainedValue = retainCall->getArgOperand(0);
2263   llvm::LoadInst *load =
2264     dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
2265   if (!load || load->isAtomic() || load->isVolatile() ||
2266       load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
2267     return nullptr;
2268 
2269   // Okay!  Burn it all down.  This relies for correctness on the
2270   // assumption that the retain is emitted as part of the return and
2271   // that thereafter everything is used "linearly".
2272   llvm::Type *resultType = result->getType();
2273   eraseUnusedBitCasts(cast<llvm::Instruction>(result));
2274   assert(retainCall->use_empty());
2275   retainCall->eraseFromParent();
2276   eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
2277 
2278   return CGF.Builder.CreateBitCast(load, resultType);
2279 }
2280 
2281 /// Emit an ARC autorelease of the result of a function.
2282 ///
2283 /// \return the value to actually return from the function
2284 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
2285                                             llvm::Value *result) {
2286   // If we're returning 'self', kill the initial retain.  This is a
2287   // heuristic attempt to "encourage correctness" in the really unfortunate
2288   // case where we have a return of self during a dealloc and we desperately
2289   // need to avoid the possible autorelease.
2290   if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
2291     return self;
2292 
2293   // At -O0, try to emit a fused retain/autorelease.
2294   if (CGF.shouldUseFusedARCCalls())
2295     if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
2296       return fused;
2297 
2298   return CGF.EmitARCAutoreleaseReturnValue(result);
2299 }
2300 
2301 /// Heuristically search for a dominating store to the return-value slot.
2302 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
2303   // Check if a User is a store which pointerOperand is the ReturnValue.
2304   // We are looking for stores to the ReturnValue, not for stores of the
2305   // ReturnValue to some other location.
2306   auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
2307     auto *SI = dyn_cast<llvm::StoreInst>(U);
2308     if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer())
2309       return nullptr;
2310     // These aren't actually possible for non-coerced returns, and we
2311     // only care about non-coerced returns on this code path.
2312     assert(!SI->isAtomic() && !SI->isVolatile());
2313     return SI;
2314   };
2315   // If there are multiple uses of the return-value slot, just check
2316   // for something immediately preceding the IP.  Sometimes this can
2317   // happen with how we generate implicit-returns; it can also happen
2318   // with noreturn cleanups.
2319   if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
2320     llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2321     if (IP->empty()) return nullptr;
2322     llvm::Instruction *I = &IP->back();
2323 
2324     // Skip lifetime markers
2325     for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(),
2326                                             IE = IP->rend();
2327          II != IE; ++II) {
2328       if (llvm::IntrinsicInst *Intrinsic =
2329               dyn_cast<llvm::IntrinsicInst>(&*II)) {
2330         if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) {
2331           const llvm::Value *CastAddr = Intrinsic->getArgOperand(1);
2332           ++II;
2333           if (II == IE)
2334             break;
2335           if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II))
2336             continue;
2337         }
2338       }
2339       I = &*II;
2340       break;
2341     }
2342 
2343     return GetStoreIfValid(I);
2344   }
2345 
2346   llvm::StoreInst *store =
2347       GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
2348   if (!store) return nullptr;
2349 
2350   // Now do a first-and-dirty dominance check: just walk up the
2351   // single-predecessors chain from the current insertion point.
2352   llvm::BasicBlock *StoreBB = store->getParent();
2353   llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2354   while (IP != StoreBB) {
2355     if (!(IP = IP->getSinglePredecessor()))
2356       return nullptr;
2357   }
2358 
2359   // Okay, the store's basic block dominates the insertion point; we
2360   // can do our thing.
2361   return store;
2362 }
2363 
2364 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
2365                                          bool EmitRetDbgLoc,
2366                                          SourceLocation EndLoc) {
2367   if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
2368     // Naked functions don't have epilogues.
2369     Builder.CreateUnreachable();
2370     return;
2371   }
2372 
2373   // Functions with no result always return void.
2374   if (!ReturnValue.isValid()) {
2375     Builder.CreateRetVoid();
2376     return;
2377   }
2378 
2379   llvm::DebugLoc RetDbgLoc;
2380   llvm::Value *RV = nullptr;
2381   QualType RetTy = FI.getReturnType();
2382   const ABIArgInfo &RetAI = FI.getReturnInfo();
2383 
2384   switch (RetAI.getKind()) {
2385   case ABIArgInfo::InAlloca:
2386     // Aggregrates get evaluated directly into the destination.  Sometimes we
2387     // need to return the sret value in a register, though.
2388     assert(hasAggregateEvaluationKind(RetTy));
2389     if (RetAI.getInAllocaSRet()) {
2390       llvm::Function::arg_iterator EI = CurFn->arg_end();
2391       --EI;
2392       llvm::Value *ArgStruct = &*EI;
2393       llvm::Value *SRet = Builder.CreateStructGEP(
2394           nullptr, ArgStruct, RetAI.getInAllocaFieldIndex());
2395       RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret");
2396     }
2397     break;
2398 
2399   case ABIArgInfo::Indirect: {
2400     auto AI = CurFn->arg_begin();
2401     if (RetAI.isSRetAfterThis())
2402       ++AI;
2403     switch (getEvaluationKind(RetTy)) {
2404     case TEK_Complex: {
2405       ComplexPairTy RT =
2406         EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
2407       EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy),
2408                          /*isInit*/ true);
2409       break;
2410     }
2411     case TEK_Aggregate:
2412       // Do nothing; aggregrates get evaluated directly into the destination.
2413       break;
2414     case TEK_Scalar:
2415       EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
2416                         MakeNaturalAlignAddrLValue(&*AI, RetTy),
2417                         /*isInit*/ true);
2418       break;
2419     }
2420     break;
2421   }
2422 
2423   case ABIArgInfo::Extend:
2424   case ABIArgInfo::Direct:
2425     if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
2426         RetAI.getDirectOffset() == 0) {
2427       // The internal return value temp always will have pointer-to-return-type
2428       // type, just do a load.
2429 
2430       // If there is a dominating store to ReturnValue, we can elide
2431       // the load, zap the store, and usually zap the alloca.
2432       if (llvm::StoreInst *SI =
2433               findDominatingStoreToReturnValue(*this)) {
2434         // Reuse the debug location from the store unless there is
2435         // cleanup code to be emitted between the store and return
2436         // instruction.
2437         if (EmitRetDbgLoc && !AutoreleaseResult)
2438           RetDbgLoc = SI->getDebugLoc();
2439         // Get the stored value and nuke the now-dead store.
2440         RV = SI->getValueOperand();
2441         SI->eraseFromParent();
2442 
2443         // If that was the only use of the return value, nuke it as well now.
2444         auto returnValueInst = ReturnValue.getPointer();
2445         if (returnValueInst->use_empty()) {
2446           if (auto alloca = dyn_cast<llvm::AllocaInst>(returnValueInst)) {
2447             alloca->eraseFromParent();
2448             ReturnValue = Address::invalid();
2449           }
2450         }
2451 
2452       // Otherwise, we have to do a simple load.
2453       } else {
2454         RV = Builder.CreateLoad(ReturnValue);
2455       }
2456     } else {
2457       // If the value is offset in memory, apply the offset now.
2458       Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
2459 
2460       RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
2461     }
2462 
2463     // In ARC, end functions that return a retainable type with a call
2464     // to objc_autoreleaseReturnValue.
2465     if (AutoreleaseResult) {
2466 #ifndef NDEBUG
2467       // Type::isObjCRetainabletype has to be called on a QualType that hasn't
2468       // been stripped of the typedefs, so we cannot use RetTy here. Get the
2469       // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from
2470       // CurCodeDecl or BlockInfo.
2471       QualType RT;
2472 
2473       if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl))
2474         RT = FD->getReturnType();
2475       else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl))
2476         RT = MD->getReturnType();
2477       else if (isa<BlockDecl>(CurCodeDecl))
2478         RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType();
2479       else
2480         llvm_unreachable("Unexpected function/method type");
2481 
2482       assert(getLangOpts().ObjCAutoRefCount &&
2483              !FI.isReturnsRetained() &&
2484              RT->isObjCRetainableType());
2485 #endif
2486       RV = emitAutoreleaseOfResult(*this, RV);
2487     }
2488 
2489     break;
2490 
2491   case ABIArgInfo::Ignore:
2492     break;
2493 
2494   case ABIArgInfo::Expand:
2495     llvm_unreachable("Invalid ABI kind for return argument");
2496   }
2497 
2498   llvm::Instruction *Ret;
2499   if (RV) {
2500     if (CurCodeDecl && SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)) {
2501       if (auto RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>()) {
2502         SanitizerScope SanScope(this);
2503         llvm::Value *Cond = Builder.CreateICmpNE(
2504             RV, llvm::Constant::getNullValue(RV->getType()));
2505         llvm::Constant *StaticData[] = {
2506             EmitCheckSourceLocation(EndLoc),
2507             EmitCheckSourceLocation(RetNNAttr->getLocation()),
2508         };
2509         EmitCheck(std::make_pair(Cond, SanitizerKind::ReturnsNonnullAttribute),
2510                   "nonnull_return", StaticData, None);
2511       }
2512     }
2513     Ret = Builder.CreateRet(RV);
2514   } else {
2515     Ret = Builder.CreateRetVoid();
2516   }
2517 
2518   if (RetDbgLoc)
2519     Ret->setDebugLoc(std::move(RetDbgLoc));
2520 }
2521 
2522 static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) {
2523   const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
2524   return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
2525 }
2526 
2527 static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF,
2528                                           QualType Ty) {
2529   // FIXME: Generate IR in one pass, rather than going back and fixing up these
2530   // placeholders.
2531   llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
2532   llvm::Value *Placeholder =
2533     llvm::UndefValue::get(IRTy->getPointerTo()->getPointerTo());
2534   Placeholder = CGF.Builder.CreateDefaultAlignedLoad(Placeholder);
2535 
2536   // FIXME: When we generate this IR in one pass, we shouldn't need
2537   // this win32-specific alignment hack.
2538   CharUnits Align = CharUnits::fromQuantity(4);
2539 
2540   return AggValueSlot::forAddr(Address(Placeholder, Align),
2541                                Ty.getQualifiers(),
2542                                AggValueSlot::IsNotDestructed,
2543                                AggValueSlot::DoesNotNeedGCBarriers,
2544                                AggValueSlot::IsNotAliased);
2545 }
2546 
2547 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
2548                                           const VarDecl *param,
2549                                           SourceLocation loc) {
2550   // StartFunction converted the ABI-lowered parameter(s) into a
2551   // local alloca.  We need to turn that into an r-value suitable
2552   // for EmitCall.
2553   Address local = GetAddrOfLocalVar(param);
2554 
2555   QualType type = param->getType();
2556 
2557   // For the most part, we just need to load the alloca, except:
2558   // 1) aggregate r-values are actually pointers to temporaries, and
2559   // 2) references to non-scalars are pointers directly to the aggregate.
2560   // I don't know why references to scalars are different here.
2561   if (const ReferenceType *ref = type->getAs<ReferenceType>()) {
2562     if (!hasScalarEvaluationKind(ref->getPointeeType()))
2563       return args.add(RValue::getAggregate(local), type);
2564 
2565     // Locals which are references to scalars are represented
2566     // with allocas holding the pointer.
2567     return args.add(RValue::get(Builder.CreateLoad(local)), type);
2568   }
2569 
2570   assert(!isInAllocaArgument(CGM.getCXXABI(), type) &&
2571          "cannot emit delegate call arguments for inalloca arguments!");
2572 
2573   args.add(convertTempToRValue(local, type, loc), type);
2574 }
2575 
2576 static bool isProvablyNull(llvm::Value *addr) {
2577   return isa<llvm::ConstantPointerNull>(addr);
2578 }
2579 
2580 static bool isProvablyNonNull(llvm::Value *addr) {
2581   return isa<llvm::AllocaInst>(addr);
2582 }
2583 
2584 /// Emit the actual writing-back of a writeback.
2585 static void emitWriteback(CodeGenFunction &CGF,
2586                           const CallArgList::Writeback &writeback) {
2587   const LValue &srcLV = writeback.Source;
2588   Address srcAddr = srcLV.getAddress();
2589   assert(!isProvablyNull(srcAddr.getPointer()) &&
2590          "shouldn't have writeback for provably null argument");
2591 
2592   llvm::BasicBlock *contBB = nullptr;
2593 
2594   // If the argument wasn't provably non-null, we need to null check
2595   // before doing the store.
2596   bool provablyNonNull = isProvablyNonNull(srcAddr.getPointer());
2597   if (!provablyNonNull) {
2598     llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
2599     contBB = CGF.createBasicBlock("icr.done");
2600 
2601     llvm::Value *isNull =
2602       CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
2603     CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
2604     CGF.EmitBlock(writebackBB);
2605   }
2606 
2607   // Load the value to writeback.
2608   llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
2609 
2610   // Cast it back, in case we're writing an id to a Foo* or something.
2611   value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(),
2612                                     "icr.writeback-cast");
2613 
2614   // Perform the writeback.
2615 
2616   // If we have a "to use" value, it's something we need to emit a use
2617   // of.  This has to be carefully threaded in: if it's done after the
2618   // release it's potentially undefined behavior (and the optimizer
2619   // will ignore it), and if it happens before the retain then the
2620   // optimizer could move the release there.
2621   if (writeback.ToUse) {
2622     assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
2623 
2624     // Retain the new value.  No need to block-copy here:  the block's
2625     // being passed up the stack.
2626     value = CGF.EmitARCRetainNonBlock(value);
2627 
2628     // Emit the intrinsic use here.
2629     CGF.EmitARCIntrinsicUse(writeback.ToUse);
2630 
2631     // Load the old value (primitively).
2632     llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
2633 
2634     // Put the new value in place (primitively).
2635     CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
2636 
2637     // Release the old value.
2638     CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
2639 
2640   // Otherwise, we can just do a normal lvalue store.
2641   } else {
2642     CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
2643   }
2644 
2645   // Jump to the continuation block.
2646   if (!provablyNonNull)
2647     CGF.EmitBlock(contBB);
2648 }
2649 
2650 static void emitWritebacks(CodeGenFunction &CGF,
2651                            const CallArgList &args) {
2652   for (const auto &I : args.writebacks())
2653     emitWriteback(CGF, I);
2654 }
2655 
2656 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
2657                                             const CallArgList &CallArgs) {
2658   assert(CGF.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee());
2659   ArrayRef<CallArgList::CallArgCleanup> Cleanups =
2660     CallArgs.getCleanupsToDeactivate();
2661   // Iterate in reverse to increase the likelihood of popping the cleanup.
2662   for (const auto &I : llvm::reverse(Cleanups)) {
2663     CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP);
2664     I.IsActiveIP->eraseFromParent();
2665   }
2666 }
2667 
2668 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
2669   if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
2670     if (uop->getOpcode() == UO_AddrOf)
2671       return uop->getSubExpr();
2672   return nullptr;
2673 }
2674 
2675 /// Emit an argument that's being passed call-by-writeback.  That is,
2676 /// we are passing the address of an __autoreleased temporary; it
2677 /// might be copy-initialized with the current value of the given
2678 /// address, but it will definitely be copied out of after the call.
2679 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
2680                              const ObjCIndirectCopyRestoreExpr *CRE) {
2681   LValue srcLV;
2682 
2683   // Make an optimistic effort to emit the address as an l-value.
2684   // This can fail if the argument expression is more complicated.
2685   if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
2686     srcLV = CGF.EmitLValue(lvExpr);
2687 
2688   // Otherwise, just emit it as a scalar.
2689   } else {
2690     Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
2691 
2692     QualType srcAddrType =
2693       CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
2694     srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
2695   }
2696   Address srcAddr = srcLV.getAddress();
2697 
2698   // The dest and src types don't necessarily match in LLVM terms
2699   // because of the crazy ObjC compatibility rules.
2700 
2701   llvm::PointerType *destType =
2702     cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
2703 
2704   // If the address is a constant null, just pass the appropriate null.
2705   if (isProvablyNull(srcAddr.getPointer())) {
2706     args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
2707              CRE->getType());
2708     return;
2709   }
2710 
2711   // Create the temporary.
2712   Address temp = CGF.CreateTempAlloca(destType->getElementType(),
2713                                       CGF.getPointerAlign(),
2714                                       "icr.temp");
2715   // Loading an l-value can introduce a cleanup if the l-value is __weak,
2716   // and that cleanup will be conditional if we can't prove that the l-value
2717   // isn't null, so we need to register a dominating point so that the cleanups
2718   // system will make valid IR.
2719   CodeGenFunction::ConditionalEvaluation condEval(CGF);
2720 
2721   // Zero-initialize it if we're not doing a copy-initialization.
2722   bool shouldCopy = CRE->shouldCopy();
2723   if (!shouldCopy) {
2724     llvm::Value *null =
2725       llvm::ConstantPointerNull::get(
2726         cast<llvm::PointerType>(destType->getElementType()));
2727     CGF.Builder.CreateStore(null, temp);
2728   }
2729 
2730   llvm::BasicBlock *contBB = nullptr;
2731   llvm::BasicBlock *originBB = nullptr;
2732 
2733   // If the address is *not* known to be non-null, we need to switch.
2734   llvm::Value *finalArgument;
2735 
2736   bool provablyNonNull = isProvablyNonNull(srcAddr.getPointer());
2737   if (provablyNonNull) {
2738     finalArgument = temp.getPointer();
2739   } else {
2740     llvm::Value *isNull =
2741       CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
2742 
2743     finalArgument = CGF.Builder.CreateSelect(isNull,
2744                                    llvm::ConstantPointerNull::get(destType),
2745                                              temp.getPointer(), "icr.argument");
2746 
2747     // If we need to copy, then the load has to be conditional, which
2748     // means we need control flow.
2749     if (shouldCopy) {
2750       originBB = CGF.Builder.GetInsertBlock();
2751       contBB = CGF.createBasicBlock("icr.cont");
2752       llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
2753       CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
2754       CGF.EmitBlock(copyBB);
2755       condEval.begin(CGF);
2756     }
2757   }
2758 
2759   llvm::Value *valueToUse = nullptr;
2760 
2761   // Perform a copy if necessary.
2762   if (shouldCopy) {
2763     RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
2764     assert(srcRV.isScalar());
2765 
2766     llvm::Value *src = srcRV.getScalarVal();
2767     src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
2768                                     "icr.cast");
2769 
2770     // Use an ordinary store, not a store-to-lvalue.
2771     CGF.Builder.CreateStore(src, temp);
2772 
2773     // If optimization is enabled, and the value was held in a
2774     // __strong variable, we need to tell the optimizer that this
2775     // value has to stay alive until we're doing the store back.
2776     // This is because the temporary is effectively unretained,
2777     // and so otherwise we can violate the high-level semantics.
2778     if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
2779         srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) {
2780       valueToUse = src;
2781     }
2782   }
2783 
2784   // Finish the control flow if we needed it.
2785   if (shouldCopy && !provablyNonNull) {
2786     llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
2787     CGF.EmitBlock(contBB);
2788 
2789     // Make a phi for the value to intrinsically use.
2790     if (valueToUse) {
2791       llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
2792                                                       "icr.to-use");
2793       phiToUse->addIncoming(valueToUse, copyBB);
2794       phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
2795                             originBB);
2796       valueToUse = phiToUse;
2797     }
2798 
2799     condEval.end(CGF);
2800   }
2801 
2802   args.addWriteback(srcLV, temp, valueToUse);
2803   args.add(RValue::get(finalArgument), CRE->getType());
2804 }
2805 
2806 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
2807   assert(!StackBase && !StackCleanup.isValid());
2808 
2809   // Save the stack.
2810   llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
2811   StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
2812 }
2813 
2814 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
2815   if (StackBase) {
2816     // Restore the stack after the call.
2817     llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
2818     CGF.Builder.CreateCall(F, StackBase);
2819   }
2820 }
2821 
2822 void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
2823                                           SourceLocation ArgLoc,
2824                                           const FunctionDecl *FD,
2825                                           unsigned ParmNum) {
2826   if (!SanOpts.has(SanitizerKind::NonnullAttribute) || !FD)
2827     return;
2828   auto PVD = ParmNum < FD->getNumParams() ? FD->getParamDecl(ParmNum) : nullptr;
2829   unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
2830   auto NNAttr = getNonNullAttr(FD, PVD, ArgType, ArgNo);
2831   if (!NNAttr)
2832     return;
2833   SanitizerScope SanScope(this);
2834   assert(RV.isScalar());
2835   llvm::Value *V = RV.getScalarVal();
2836   llvm::Value *Cond =
2837       Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
2838   llvm::Constant *StaticData[] = {
2839       EmitCheckSourceLocation(ArgLoc),
2840       EmitCheckSourceLocation(NNAttr->getLocation()),
2841       llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
2842   };
2843   EmitCheck(std::make_pair(Cond, SanitizerKind::NonnullAttribute),
2844                 "nonnull_arg", StaticData, None);
2845 }
2846 
2847 void CodeGenFunction::EmitCallArgs(
2848     CallArgList &Args, ArrayRef<QualType> ArgTypes,
2849     llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
2850     const FunctionDecl *CalleeDecl, unsigned ParamsToSkip) {
2851   assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
2852 
2853   auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg) {
2854     if (CalleeDecl == nullptr || I >= CalleeDecl->getNumParams())
2855       return;
2856     auto *PS = CalleeDecl->getParamDecl(I)->getAttr<PassObjectSizeAttr>();
2857     if (PS == nullptr)
2858       return;
2859 
2860     const auto &Context = getContext();
2861     auto SizeTy = Context.getSizeType();
2862     auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
2863     llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T);
2864     Args.add(RValue::get(V), SizeTy);
2865   };
2866 
2867   // We *have* to evaluate arguments from right to left in the MS C++ ABI,
2868   // because arguments are destroyed left to right in the callee.
2869   if (CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2870     // Insert a stack save if we're going to need any inalloca args.
2871     bool HasInAllocaArgs = false;
2872     for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end();
2873          I != E && !HasInAllocaArgs; ++I)
2874       HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I);
2875     if (HasInAllocaArgs) {
2876       assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
2877       Args.allocateArgumentMemory(*this);
2878     }
2879 
2880     // Evaluate each argument.
2881     size_t CallArgsStart = Args.size();
2882     for (int I = ArgTypes.size() - 1; I >= 0; --I) {
2883       CallExpr::const_arg_iterator Arg = ArgRange.begin() + I;
2884       EmitCallArg(Args, *Arg, ArgTypes[I]);
2885       EmitNonNullArgCheck(Args.back().RV, ArgTypes[I], (*Arg)->getExprLoc(),
2886                           CalleeDecl, ParamsToSkip + I);
2887       MaybeEmitImplicitObjectSize(I, *Arg);
2888     }
2889 
2890     // Un-reverse the arguments we just evaluated so they match up with the LLVM
2891     // IR function.
2892     std::reverse(Args.begin() + CallArgsStart, Args.end());
2893     return;
2894   }
2895 
2896   for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
2897     CallExpr::const_arg_iterator Arg = ArgRange.begin() + I;
2898     assert(Arg != ArgRange.end());
2899     EmitCallArg(Args, *Arg, ArgTypes[I]);
2900     EmitNonNullArgCheck(Args.back().RV, ArgTypes[I], (*Arg)->getExprLoc(),
2901                         CalleeDecl, ParamsToSkip + I);
2902     MaybeEmitImplicitObjectSize(I, *Arg);
2903   }
2904 }
2905 
2906 namespace {
2907 
2908 struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
2909   DestroyUnpassedArg(Address Addr, QualType Ty)
2910       : Addr(Addr), Ty(Ty) {}
2911 
2912   Address Addr;
2913   QualType Ty;
2914 
2915   void Emit(CodeGenFunction &CGF, Flags flags) override {
2916     const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
2917     assert(!Dtor->isTrivial());
2918     CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
2919                               /*Delegating=*/false, Addr);
2920   }
2921 };
2922 
2923 struct DisableDebugLocationUpdates {
2924   CodeGenFunction &CGF;
2925   bool disabledDebugInfo;
2926   DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
2927     if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
2928       CGF.disableDebugInfo();
2929   }
2930   ~DisableDebugLocationUpdates() {
2931     if (disabledDebugInfo)
2932       CGF.enableDebugInfo();
2933   }
2934 };
2935 
2936 } // end anonymous namespace
2937 
2938 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
2939                                   QualType type) {
2940   DisableDebugLocationUpdates Dis(*this, E);
2941   if (const ObjCIndirectCopyRestoreExpr *CRE
2942         = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
2943     assert(getLangOpts().ObjCAutoRefCount);
2944     assert(getContext().hasSameType(E->getType(), type));
2945     return emitWritebackArg(*this, args, CRE);
2946   }
2947 
2948   assert(type->isReferenceType() == E->isGLValue() &&
2949          "reference binding to unmaterialized r-value!");
2950 
2951   if (E->isGLValue()) {
2952     assert(E->getObjectKind() == OK_Ordinary);
2953     return args.add(EmitReferenceBindingToExpr(E), type);
2954   }
2955 
2956   bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
2957 
2958   // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
2959   // However, we still have to push an EH-only cleanup in case we unwind before
2960   // we make it to the call.
2961   if (HasAggregateEvalKind &&
2962       CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2963     // If we're using inalloca, use the argument memory.  Otherwise, use a
2964     // temporary.
2965     AggValueSlot Slot;
2966     if (args.isUsingInAlloca())
2967       Slot = createPlaceholderSlot(*this, type);
2968     else
2969       Slot = CreateAggTemp(type, "agg.tmp");
2970 
2971     const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
2972     bool DestroyedInCallee =
2973         RD && RD->hasNonTrivialDestructor() &&
2974         CGM.getCXXABI().getRecordArgABI(RD) != CGCXXABI::RAA_Default;
2975     if (DestroyedInCallee)
2976       Slot.setExternallyDestructed();
2977 
2978     EmitAggExpr(E, Slot);
2979     RValue RV = Slot.asRValue();
2980     args.add(RV, type);
2981 
2982     if (DestroyedInCallee) {
2983       // Create a no-op GEP between the placeholder and the cleanup so we can
2984       // RAUW it successfully.  It also serves as a marker of the first
2985       // instruction where the cleanup is active.
2986       pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
2987                                               type);
2988       // This unreachable is a temporary marker which will be removed later.
2989       llvm::Instruction *IsActive = Builder.CreateUnreachable();
2990       args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
2991     }
2992     return;
2993   }
2994 
2995   if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
2996       cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
2997     LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
2998     assert(L.isSimple());
2999     if (L.getAlignment() >= getContext().getTypeAlignInChars(type)) {
3000       args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true);
3001     } else {
3002       // We can't represent a misaligned lvalue in the CallArgList, so copy
3003       // to an aligned temporary now.
3004       Address tmp = CreateMemTemp(type);
3005       EmitAggregateCopy(tmp, L.getAddress(), type, L.isVolatile());
3006       args.add(RValue::getAggregate(tmp), type);
3007     }
3008     return;
3009   }
3010 
3011   args.add(EmitAnyExprToTemp(E), type);
3012 }
3013 
3014 QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
3015   // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
3016   // implicitly widens null pointer constants that are arguments to varargs
3017   // functions to pointer-sized ints.
3018   if (!getTarget().getTriple().isOSWindows())
3019     return Arg->getType();
3020 
3021   if (Arg->getType()->isIntegerType() &&
3022       getContext().getTypeSize(Arg->getType()) <
3023           getContext().getTargetInfo().getPointerWidth(0) &&
3024       Arg->isNullPointerConstant(getContext(),
3025                                  Expr::NPC_ValueDependentIsNotNull)) {
3026     return getContext().getIntPtrType();
3027   }
3028 
3029   return Arg->getType();
3030 }
3031 
3032 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3033 // optimizer it can aggressively ignore unwind edges.
3034 void
3035 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
3036   if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3037       !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
3038     Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
3039                       CGM.getNoObjCARCExceptionsMetadata());
3040 }
3041 
3042 /// Emits a call to the given no-arguments nounwind runtime function.
3043 llvm::CallInst *
3044 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
3045                                          const llvm::Twine &name) {
3046   return EmitNounwindRuntimeCall(callee, None, name);
3047 }
3048 
3049 /// Emits a call to the given nounwind runtime function.
3050 llvm::CallInst *
3051 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
3052                                          ArrayRef<llvm::Value*> args,
3053                                          const llvm::Twine &name) {
3054   llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
3055   call->setDoesNotThrow();
3056   return call;
3057 }
3058 
3059 /// Emits a simple call (never an invoke) to the given no-arguments
3060 /// runtime function.
3061 llvm::CallInst *
3062 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
3063                                  const llvm::Twine &name) {
3064   return EmitRuntimeCall(callee, None, name);
3065 }
3066 
3067 /// Emits a simple call (never an invoke) to the given runtime function.
3068 llvm::CallInst *
3069 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
3070                                  ArrayRef<llvm::Value*> args,
3071                                  const llvm::Twine &name) {
3072   llvm::CallInst *call = Builder.CreateCall(callee, args, name);
3073   call->setCallingConv(getRuntimeCC());
3074   return call;
3075 }
3076 
3077 // Calls which may throw must have operand bundles indicating which funclet
3078 // they are nested within.
3079 static void
3080 getBundlesForFunclet(llvm::Value *Callee, llvm::Instruction *CurrentFuncletPad,
3081                      SmallVectorImpl<llvm::OperandBundleDef> &BundleList) {
3082   // There is no need for a funclet operand bundle if we aren't inside a
3083   // funclet.
3084   if (!CurrentFuncletPad)
3085     return;
3086 
3087   // Skip intrinsics which cannot throw.
3088   auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts());
3089   if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow())
3090     return;
3091 
3092   BundleList.emplace_back("funclet", CurrentFuncletPad);
3093 }
3094 
3095 /// Emits a call or invoke to the given noreturn runtime function.
3096 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee,
3097                                                ArrayRef<llvm::Value*> args) {
3098   SmallVector<llvm::OperandBundleDef, 1> BundleList;
3099   getBundlesForFunclet(callee, CurrentFuncletPad, BundleList);
3100 
3101   if (getInvokeDest()) {
3102     llvm::InvokeInst *invoke =
3103       Builder.CreateInvoke(callee,
3104                            getUnreachableBlock(),
3105                            getInvokeDest(),
3106                            args,
3107                            BundleList);
3108     invoke->setDoesNotReturn();
3109     invoke->setCallingConv(getRuntimeCC());
3110   } else {
3111     llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList);
3112     call->setDoesNotReturn();
3113     call->setCallingConv(getRuntimeCC());
3114     Builder.CreateUnreachable();
3115   }
3116 }
3117 
3118 /// Emits a call or invoke instruction to the given nullary runtime function.
3119 llvm::CallSite
3120 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
3121                                          const Twine &name) {
3122   return EmitRuntimeCallOrInvoke(callee, None, name);
3123 }
3124 
3125 /// Emits a call or invoke instruction to the given runtime function.
3126 llvm::CallSite
3127 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
3128                                          ArrayRef<llvm::Value*> args,
3129                                          const Twine &name) {
3130   llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name);
3131   callSite.setCallingConv(getRuntimeCC());
3132   return callSite;
3133 }
3134 
3135 /// Emits a call or invoke instruction to the given function, depending
3136 /// on the current state of the EH stack.
3137 llvm::CallSite
3138 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
3139                                   ArrayRef<llvm::Value *> Args,
3140                                   const Twine &Name) {
3141   llvm::BasicBlock *InvokeDest = getInvokeDest();
3142   SmallVector<llvm::OperandBundleDef, 1> BundleList;
3143   getBundlesForFunclet(Callee, CurrentFuncletPad, BundleList);
3144 
3145   llvm::Instruction *Inst;
3146   if (!InvokeDest)
3147     Inst = Builder.CreateCall(Callee, Args, BundleList, Name);
3148   else {
3149     llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
3150     Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
3151                                 Name);
3152     EmitBlock(ContBB);
3153   }
3154 
3155   // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3156   // optimizer it can aggressively ignore unwind edges.
3157   if (CGM.getLangOpts().ObjCAutoRefCount)
3158     AddObjCARCExceptionMetadata(Inst);
3159 
3160   return llvm::CallSite(Inst);
3161 }
3162 
3163 /// \brief Store a non-aggregate value to an address to initialize it.  For
3164 /// initialization, a non-atomic store will be used.
3165 static void EmitInitStoreOfNonAggregate(CodeGenFunction &CGF, RValue Src,
3166                                         LValue Dst) {
3167   if (Src.isScalar())
3168     CGF.EmitStoreOfScalar(Src.getScalarVal(), Dst, /*init=*/true);
3169   else
3170     CGF.EmitStoreOfComplex(Src.getComplexVal(), Dst, /*init=*/true);
3171 }
3172 
3173 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
3174                                                   llvm::Value *New) {
3175   DeferredReplacements.push_back(std::make_pair(Old, New));
3176 }
3177 
3178 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
3179                                  llvm::Value *Callee,
3180                                  ReturnValueSlot ReturnValue,
3181                                  const CallArgList &CallArgs,
3182                                  CGCalleeInfo CalleeInfo,
3183                                  llvm::Instruction **callOrInvoke) {
3184   // FIXME: We no longer need the types from CallArgs; lift up and simplify.
3185 
3186   // Handle struct-return functions by passing a pointer to the
3187   // location that we would like to return into.
3188   QualType RetTy = CallInfo.getReturnType();
3189   const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
3190 
3191   llvm::FunctionType *IRFuncTy =
3192     cast<llvm::FunctionType>(
3193                   cast<llvm::PointerType>(Callee->getType())->getElementType());
3194 
3195   // If we're using inalloca, insert the allocation after the stack save.
3196   // FIXME: Do this earlier rather than hacking it in here!
3197   Address ArgMemory = Address::invalid();
3198   const llvm::StructLayout *ArgMemoryLayout = nullptr;
3199   if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
3200     ArgMemoryLayout = CGM.getDataLayout().getStructLayout(ArgStruct);
3201     llvm::Instruction *IP = CallArgs.getStackBase();
3202     llvm::AllocaInst *AI;
3203     if (IP) {
3204       IP = IP->getNextNode();
3205       AI = new llvm::AllocaInst(ArgStruct, "argmem", IP);
3206     } else {
3207       AI = CreateTempAlloca(ArgStruct, "argmem");
3208     }
3209     auto Align = CallInfo.getArgStructAlignment();
3210     AI->setAlignment(Align.getQuantity());
3211     AI->setUsedWithInAlloca(true);
3212     assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
3213     ArgMemory = Address(AI, Align);
3214   }
3215 
3216   // Helper function to drill into the inalloca allocation.
3217   auto createInAllocaStructGEP = [&](unsigned FieldIndex) -> Address {
3218     auto FieldOffset =
3219       CharUnits::fromQuantity(ArgMemoryLayout->getElementOffset(FieldIndex));
3220     return Builder.CreateStructGEP(ArgMemory, FieldIndex, FieldOffset);
3221   };
3222 
3223   ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
3224   SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
3225 
3226   // If the call returns a temporary with struct return, create a temporary
3227   // alloca to hold the result, unless one is given to us.
3228   Address SRetPtr = Address::invalid();
3229   size_t UnusedReturnSize = 0;
3230   if (RetAI.isIndirect() || RetAI.isInAlloca()) {
3231     if (!ReturnValue.isNull()) {
3232       SRetPtr = ReturnValue.getValue();
3233     } else {
3234       SRetPtr = CreateMemTemp(RetTy);
3235       if (HaveInsertPoint() && ReturnValue.isUnused()) {
3236         uint64_t size =
3237             CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
3238         if (EmitLifetimeStart(size, SRetPtr.getPointer()))
3239           UnusedReturnSize = size;
3240       }
3241     }
3242     if (IRFunctionArgs.hasSRetArg()) {
3243       IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
3244     } else {
3245       Address Addr = createInAllocaStructGEP(RetAI.getInAllocaFieldIndex());
3246       Builder.CreateStore(SRetPtr.getPointer(), Addr);
3247     }
3248   }
3249 
3250   assert(CallInfo.arg_size() == CallArgs.size() &&
3251          "Mismatch between function signature & arguments.");
3252   unsigned ArgNo = 0;
3253   CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
3254   for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
3255        I != E; ++I, ++info_it, ++ArgNo) {
3256     const ABIArgInfo &ArgInfo = info_it->info;
3257     RValue RV = I->RV;
3258 
3259     // Insert a padding argument to ensure proper alignment.
3260     if (IRFunctionArgs.hasPaddingArg(ArgNo))
3261       IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
3262           llvm::UndefValue::get(ArgInfo.getPaddingType());
3263 
3264     unsigned FirstIRArg, NumIRArgs;
3265     std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3266 
3267     switch (ArgInfo.getKind()) {
3268     case ABIArgInfo::InAlloca: {
3269       assert(NumIRArgs == 0);
3270       assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3271       if (RV.isAggregate()) {
3272         // Replace the placeholder with the appropriate argument slot GEP.
3273         llvm::Instruction *Placeholder =
3274             cast<llvm::Instruction>(RV.getAggregatePointer());
3275         CGBuilderTy::InsertPoint IP = Builder.saveIP();
3276         Builder.SetInsertPoint(Placeholder);
3277         Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
3278         Builder.restoreIP(IP);
3279         deferPlaceholderReplacement(Placeholder, Addr.getPointer());
3280       } else {
3281         // Store the RValue into the argument struct.
3282         Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
3283         unsigned AS = Addr.getType()->getPointerAddressSpace();
3284         llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
3285         // There are some cases where a trivial bitcast is not avoidable.  The
3286         // definition of a type later in a translation unit may change it's type
3287         // from {}* to (%struct.foo*)*.
3288         if (Addr.getType() != MemType)
3289           Addr = Builder.CreateBitCast(Addr, MemType);
3290         LValue argLV = MakeAddrLValue(Addr, I->Ty);
3291         EmitInitStoreOfNonAggregate(*this, RV, argLV);
3292       }
3293       break;
3294     }
3295 
3296     case ABIArgInfo::Indirect: {
3297       assert(NumIRArgs == 1);
3298       if (RV.isScalar() || RV.isComplex()) {
3299         // Make a temporary alloca to pass the argument.
3300         Address Addr = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign());
3301         IRCallArgs[FirstIRArg] = Addr.getPointer();
3302 
3303         LValue argLV = MakeAddrLValue(Addr, I->Ty);
3304         EmitInitStoreOfNonAggregate(*this, RV, argLV);
3305       } else {
3306         // We want to avoid creating an unnecessary temporary+copy here;
3307         // however, we need one in three cases:
3308         // 1. If the argument is not byval, and we are required to copy the
3309         //    source.  (This case doesn't occur on any common architecture.)
3310         // 2. If the argument is byval, RV is not sufficiently aligned, and
3311         //    we cannot force it to be sufficiently aligned.
3312         // 3. If the argument is byval, but RV is located in an address space
3313         //    different than that of the argument (0).
3314         Address Addr = RV.getAggregateAddress();
3315         CharUnits Align = ArgInfo.getIndirectAlign();
3316         const llvm::DataLayout *TD = &CGM.getDataLayout();
3317         const unsigned RVAddrSpace = Addr.getType()->getAddressSpace();
3318         const unsigned ArgAddrSpace =
3319             (FirstIRArg < IRFuncTy->getNumParams()
3320                  ? IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace()
3321                  : 0);
3322         if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
3323             (ArgInfo.getIndirectByVal() && Addr.getAlignment() < Align &&
3324              llvm::getOrEnforceKnownAlignment(Addr.getPointer(),
3325                                               Align.getQuantity(), *TD)
3326                < Align.getQuantity()) ||
3327             (ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) {
3328           // Create an aligned temporary, and copy to it.
3329           Address AI = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign());
3330           IRCallArgs[FirstIRArg] = AI.getPointer();
3331           EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
3332         } else {
3333           // Skip the extra memcpy call.
3334           IRCallArgs[FirstIRArg] = Addr.getPointer();
3335         }
3336       }
3337       break;
3338     }
3339 
3340     case ABIArgInfo::Ignore:
3341       assert(NumIRArgs == 0);
3342       break;
3343 
3344     case ABIArgInfo::Extend:
3345     case ABIArgInfo::Direct: {
3346       if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
3347           ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
3348           ArgInfo.getDirectOffset() == 0) {
3349         assert(NumIRArgs == 1);
3350         llvm::Value *V;
3351         if (RV.isScalar())
3352           V = RV.getScalarVal();
3353         else
3354           V = Builder.CreateLoad(RV.getAggregateAddress());
3355 
3356         // We might have to widen integers, but we should never truncate.
3357         if (ArgInfo.getCoerceToType() != V->getType() &&
3358             V->getType()->isIntegerTy())
3359           V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
3360 
3361         // If the argument doesn't match, perform a bitcast to coerce it.  This
3362         // can happen due to trivial type mismatches.
3363         if (FirstIRArg < IRFuncTy->getNumParams() &&
3364             V->getType() != IRFuncTy->getParamType(FirstIRArg))
3365           V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
3366         IRCallArgs[FirstIRArg] = V;
3367         break;
3368       }
3369 
3370       // FIXME: Avoid the conversion through memory if possible.
3371       Address Src = Address::invalid();
3372       if (RV.isScalar() || RV.isComplex()) {
3373         Src = CreateMemTemp(I->Ty, "coerce");
3374         LValue SrcLV = MakeAddrLValue(Src, I->Ty);
3375         EmitInitStoreOfNonAggregate(*this, RV, SrcLV);
3376       } else {
3377         Src = RV.getAggregateAddress();
3378       }
3379 
3380       // If the value is offset in memory, apply the offset now.
3381       Src = emitAddressAtOffset(*this, Src, ArgInfo);
3382 
3383       // Fast-isel and the optimizer generally like scalar values better than
3384       // FCAs, so we flatten them if this is safe to do for this argument.
3385       llvm::StructType *STy =
3386             dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
3387       if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
3388         llvm::Type *SrcTy = Src.getType()->getElementType();
3389         uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
3390         uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
3391 
3392         // If the source type is smaller than the destination type of the
3393         // coerce-to logic, copy the source value into a temp alloca the size
3394         // of the destination type to allow loading all of it. The bits past
3395         // the source value are left undef.
3396         if (SrcSize < DstSize) {
3397           Address TempAlloca
3398             = CreateTempAlloca(STy, Src.getAlignment(),
3399                                Src.getName() + ".coerce");
3400           Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
3401           Src = TempAlloca;
3402         } else {
3403           Src = Builder.CreateBitCast(Src, llvm::PointerType::getUnqual(STy));
3404         }
3405 
3406         auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
3407         assert(NumIRArgs == STy->getNumElements());
3408         for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3409           auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
3410           Address EltPtr = Builder.CreateStructGEP(Src, i, Offset);
3411           llvm::Value *LI = Builder.CreateLoad(EltPtr);
3412           IRCallArgs[FirstIRArg + i] = LI;
3413         }
3414       } else {
3415         // In the simple case, just pass the coerced loaded value.
3416         assert(NumIRArgs == 1);
3417         IRCallArgs[FirstIRArg] =
3418           CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
3419       }
3420 
3421       break;
3422     }
3423 
3424     case ABIArgInfo::Expand:
3425       unsigned IRArgPos = FirstIRArg;
3426       ExpandTypeToArgs(I->Ty, RV, IRFuncTy, IRCallArgs, IRArgPos);
3427       assert(IRArgPos == FirstIRArg + NumIRArgs);
3428       break;
3429     }
3430   }
3431 
3432   if (ArgMemory.isValid()) {
3433     llvm::Value *Arg = ArgMemory.getPointer();
3434     if (CallInfo.isVariadic()) {
3435       // When passing non-POD arguments by value to variadic functions, we will
3436       // end up with a variadic prototype and an inalloca call site.  In such
3437       // cases, we can't do any parameter mismatch checks.  Give up and bitcast
3438       // the callee.
3439       unsigned CalleeAS =
3440           cast<llvm::PointerType>(Callee->getType())->getAddressSpace();
3441       Callee = Builder.CreateBitCast(
3442           Callee, getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS));
3443     } else {
3444       llvm::Type *LastParamTy =
3445           IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
3446       if (Arg->getType() != LastParamTy) {
3447 #ifndef NDEBUG
3448         // Assert that these structs have equivalent element types.
3449         llvm::StructType *FullTy = CallInfo.getArgStruct();
3450         llvm::StructType *DeclaredTy = cast<llvm::StructType>(
3451             cast<llvm::PointerType>(LastParamTy)->getElementType());
3452         assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
3453         for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
3454                                                 DE = DeclaredTy->element_end(),
3455                                                 FI = FullTy->element_begin();
3456              DI != DE; ++DI, ++FI)
3457           assert(*DI == *FI);
3458 #endif
3459         Arg = Builder.CreateBitCast(Arg, LastParamTy);
3460       }
3461     }
3462     assert(IRFunctionArgs.hasInallocaArg());
3463     IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
3464   }
3465 
3466   if (!CallArgs.getCleanupsToDeactivate().empty())
3467     deactivateArgCleanupsBeforeCall(*this, CallArgs);
3468 
3469   // If the callee is a bitcast of a function to a varargs pointer to function
3470   // type, check to see if we can remove the bitcast.  This handles some cases
3471   // with unprototyped functions.
3472   if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
3473     if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
3474       llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
3475       llvm::FunctionType *CurFT =
3476         cast<llvm::FunctionType>(CurPT->getElementType());
3477       llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
3478 
3479       if (CE->getOpcode() == llvm::Instruction::BitCast &&
3480           ActualFT->getReturnType() == CurFT->getReturnType() &&
3481           ActualFT->getNumParams() == CurFT->getNumParams() &&
3482           ActualFT->getNumParams() == IRCallArgs.size() &&
3483           (CurFT->isVarArg() || !ActualFT->isVarArg())) {
3484         bool ArgsMatch = true;
3485         for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
3486           if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
3487             ArgsMatch = false;
3488             break;
3489           }
3490 
3491         // Strip the cast if we can get away with it.  This is a nice cleanup,
3492         // but also allows us to inline the function at -O0 if it is marked
3493         // always_inline.
3494         if (ArgsMatch)
3495           Callee = CalleeF;
3496       }
3497     }
3498 
3499   assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
3500   for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
3501     // Inalloca argument can have different type.
3502     if (IRFunctionArgs.hasInallocaArg() &&
3503         i == IRFunctionArgs.getInallocaArgNo())
3504       continue;
3505     if (i < IRFuncTy->getNumParams())
3506       assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
3507   }
3508 
3509   unsigned CallingConv;
3510   CodeGen::AttributeListType AttributeList;
3511   CGM.ConstructAttributeList(Callee->getName(), CallInfo, CalleeInfo,
3512                              AttributeList, CallingConv,
3513                              /*AttrOnCallSite=*/true);
3514   llvm::AttributeSet Attrs = llvm::AttributeSet::get(getLLVMContext(),
3515                                                      AttributeList);
3516 
3517   bool CannotThrow;
3518   if (currentFunctionUsesSEHTry()) {
3519     // SEH cares about asynchronous exceptions, everything can "throw."
3520     CannotThrow = false;
3521   } else if (isCleanupPadScope() &&
3522              EHPersonality::get(*this).isMSVCXXPersonality()) {
3523     // The MSVC++ personality will implicitly terminate the program if an
3524     // exception is thrown.  An unwind edge cannot be reached.
3525     CannotThrow = true;
3526   } else {
3527     // Otherwise, nowunind callsites will never throw.
3528     CannotThrow = Attrs.hasAttribute(llvm::AttributeSet::FunctionIndex,
3529                                      llvm::Attribute::NoUnwind);
3530   }
3531   llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
3532 
3533   SmallVector<llvm::OperandBundleDef, 1> BundleList;
3534   getBundlesForFunclet(Callee, CurrentFuncletPad, BundleList);
3535 
3536   llvm::CallSite CS;
3537   if (!InvokeDest) {
3538     CS = Builder.CreateCall(Callee, IRCallArgs, BundleList);
3539   } else {
3540     llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
3541     CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, IRCallArgs,
3542                               BundleList);
3543     EmitBlock(Cont);
3544   }
3545   if (callOrInvoke)
3546     *callOrInvoke = CS.getInstruction();
3547 
3548   if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
3549       !CS.hasFnAttr(llvm::Attribute::NoInline))
3550     Attrs =
3551         Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex,
3552                            llvm::Attribute::AlwaysInline);
3553 
3554   // Disable inlining inside SEH __try blocks.
3555   if (isSEHTryScope())
3556     Attrs =
3557         Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex,
3558                            llvm::Attribute::NoInline);
3559 
3560   CS.setAttributes(Attrs);
3561   CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
3562 
3563   // Insert instrumentation or attach profile metadata at indirect call sites
3564   if (!CS.getCalledFunction())
3565     PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget,
3566                      CS.getInstruction(), Callee);
3567 
3568   // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3569   // optimizer it can aggressively ignore unwind edges.
3570   if (CGM.getLangOpts().ObjCAutoRefCount)
3571     AddObjCARCExceptionMetadata(CS.getInstruction());
3572 
3573   // If the call doesn't return, finish the basic block and clear the
3574   // insertion point; this allows the rest of IRgen to discard
3575   // unreachable code.
3576   if (CS.doesNotReturn()) {
3577     if (UnusedReturnSize)
3578       EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize),
3579                       SRetPtr.getPointer());
3580 
3581     Builder.CreateUnreachable();
3582     Builder.ClearInsertionPoint();
3583 
3584     // FIXME: For now, emit a dummy basic block because expr emitters in
3585     // generally are not ready to handle emitting expressions at unreachable
3586     // points.
3587     EnsureInsertPoint();
3588 
3589     // Return a reasonable RValue.
3590     return GetUndefRValue(RetTy);
3591   }
3592 
3593   llvm::Instruction *CI = CS.getInstruction();
3594   if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
3595     CI->setName("call");
3596 
3597   // Emit any writebacks immediately.  Arguably this should happen
3598   // after any return-value munging.
3599   if (CallArgs.hasWritebacks())
3600     emitWritebacks(*this, CallArgs);
3601 
3602   // The stack cleanup for inalloca arguments has to run out of the normal
3603   // lexical order, so deactivate it and run it manually here.
3604   CallArgs.freeArgumentMemory(*this);
3605 
3606   if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
3607     const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
3608     if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
3609       Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
3610   }
3611 
3612   RValue Ret = [&] {
3613     switch (RetAI.getKind()) {
3614     case ABIArgInfo::InAlloca:
3615     case ABIArgInfo::Indirect: {
3616       RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
3617       if (UnusedReturnSize)
3618         EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize),
3619                         SRetPtr.getPointer());
3620       return ret;
3621     }
3622 
3623     case ABIArgInfo::Ignore:
3624       // If we are ignoring an argument that had a result, make sure to
3625       // construct the appropriate return value for our caller.
3626       return GetUndefRValue(RetTy);
3627 
3628     case ABIArgInfo::Extend:
3629     case ABIArgInfo::Direct: {
3630       llvm::Type *RetIRTy = ConvertType(RetTy);
3631       if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
3632         switch (getEvaluationKind(RetTy)) {
3633         case TEK_Complex: {
3634           llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
3635           llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
3636           return RValue::getComplex(std::make_pair(Real, Imag));
3637         }
3638         case TEK_Aggregate: {
3639           Address DestPtr = ReturnValue.getValue();
3640           bool DestIsVolatile = ReturnValue.isVolatile();
3641 
3642           if (!DestPtr.isValid()) {
3643             DestPtr = CreateMemTemp(RetTy, "agg.tmp");
3644             DestIsVolatile = false;
3645           }
3646           BuildAggStore(*this, CI, DestPtr, DestIsVolatile);
3647           return RValue::getAggregate(DestPtr);
3648         }
3649         case TEK_Scalar: {
3650           // If the argument doesn't match, perform a bitcast to coerce it.  This
3651           // can happen due to trivial type mismatches.
3652           llvm::Value *V = CI;
3653           if (V->getType() != RetIRTy)
3654             V = Builder.CreateBitCast(V, RetIRTy);
3655           return RValue::get(V);
3656         }
3657         }
3658         llvm_unreachable("bad evaluation kind");
3659       }
3660 
3661       Address DestPtr = ReturnValue.getValue();
3662       bool DestIsVolatile = ReturnValue.isVolatile();
3663 
3664       if (!DestPtr.isValid()) {
3665         DestPtr = CreateMemTemp(RetTy, "coerce");
3666         DestIsVolatile = false;
3667       }
3668 
3669       // If the value is offset in memory, apply the offset now.
3670       Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
3671       CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
3672 
3673       return convertTempToRValue(DestPtr, RetTy, SourceLocation());
3674     }
3675 
3676     case ABIArgInfo::Expand:
3677       llvm_unreachable("Invalid ABI kind for return argument");
3678     }
3679 
3680     llvm_unreachable("Unhandled ABIArgInfo::Kind");
3681   } ();
3682 
3683   const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
3684 
3685   if (Ret.isScalar() && TargetDecl) {
3686     if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
3687       llvm::Value *OffsetValue = nullptr;
3688       if (const auto *Offset = AA->getOffset())
3689         OffsetValue = EmitScalarExpr(Offset);
3690 
3691       llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment());
3692       llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment);
3693       EmitAlignmentAssumption(Ret.getScalarVal(), AlignmentCI->getZExtValue(),
3694                               OffsetValue);
3695     }
3696   }
3697 
3698   return Ret;
3699 }
3700 
3701 /* VarArg handling */
3702 
3703 Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) {
3704   VAListAddr = VE->isMicrosoftABI()
3705                  ? EmitMSVAListRef(VE->getSubExpr())
3706                  : EmitVAListRef(VE->getSubExpr());
3707   QualType Ty = VE->getType();
3708   if (VE->isMicrosoftABI())
3709     return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty);
3710   return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty);
3711 }
3712