1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "TargetInfo.h"
16 #include "ABIInfo.h"
17 #include "CGCXXABI.h"
18 #include "CodeGenFunction.h"
19 #include "clang/AST/RecordLayout.h"
20 #include "clang/CodeGen/CGFunctionInfo.h"
21 #include "clang/Frontend/CodeGenOptions.h"
22 #include "llvm/ADT/Triple.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/Type.h"
25 #include "llvm/Support/raw_ostream.h"
26 using namespace clang;
27 using namespace CodeGen;
28 
29 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
30                                llvm::Value *Array,
31                                llvm::Value *Value,
32                                unsigned FirstIndex,
33                                unsigned LastIndex) {
34   // Alternatively, we could emit this as a loop in the source.
35   for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
36     llvm::Value *Cell = Builder.CreateConstInBoundsGEP1_32(Array, I);
37     Builder.CreateStore(Value, Cell);
38   }
39 }
40 
41 static bool isAggregateTypeForABI(QualType T) {
42   return !CodeGenFunction::hasScalarEvaluationKind(T) ||
43          T->isMemberFunctionPointerType();
44 }
45 
46 ABIInfo::~ABIInfo() {}
47 
48 static bool isRecordReturnIndirect(const RecordType *RT,
49                                    CGCXXABI &CXXABI) {
50   const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
51   if (!RD)
52     return false;
53   return CXXABI.isReturnTypeIndirect(RD);
54 }
55 
56 
57 static bool isRecordReturnIndirect(QualType T, CGCXXABI &CXXABI) {
58   const RecordType *RT = T->getAs<RecordType>();
59   if (!RT)
60     return false;
61   return isRecordReturnIndirect(RT, CXXABI);
62 }
63 
64 static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT,
65                                               CGCXXABI &CXXABI) {
66   const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
67   if (!RD)
68     return CGCXXABI::RAA_Default;
69   return CXXABI.getRecordArgABI(RD);
70 }
71 
72 static CGCXXABI::RecordArgABI getRecordArgABI(QualType T,
73                                               CGCXXABI &CXXABI) {
74   const RecordType *RT = T->getAs<RecordType>();
75   if (!RT)
76     return CGCXXABI::RAA_Default;
77   return getRecordArgABI(RT, CXXABI);
78 }
79 
80 CGCXXABI &ABIInfo::getCXXABI() const {
81   return CGT.getCXXABI();
82 }
83 
84 ASTContext &ABIInfo::getContext() const {
85   return CGT.getContext();
86 }
87 
88 llvm::LLVMContext &ABIInfo::getVMContext() const {
89   return CGT.getLLVMContext();
90 }
91 
92 const llvm::DataLayout &ABIInfo::getDataLayout() const {
93   return CGT.getDataLayout();
94 }
95 
96 const TargetInfo &ABIInfo::getTarget() const {
97   return CGT.getTarget();
98 }
99 
100 void ABIArgInfo::dump() const {
101   raw_ostream &OS = llvm::errs();
102   OS << "(ABIArgInfo Kind=";
103   switch (TheKind) {
104   case Direct:
105     OS << "Direct Type=";
106     if (llvm::Type *Ty = getCoerceToType())
107       Ty->print(OS);
108     else
109       OS << "null";
110     break;
111   case Extend:
112     OS << "Extend";
113     break;
114   case Ignore:
115     OS << "Ignore";
116     break;
117   case InAlloca:
118     OS << "InAlloca Offset=" << getInAllocaFieldIndex();
119     break;
120   case Indirect:
121     OS << "Indirect Align=" << getIndirectAlign()
122        << " ByVal=" << getIndirectByVal()
123        << " Realign=" << getIndirectRealign();
124     break;
125   case Expand:
126     OS << "Expand";
127     break;
128   }
129   OS << ")\n";
130 }
131 
132 TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; }
133 
134 // If someone can figure out a general rule for this, that would be great.
135 // It's probably just doomed to be platform-dependent, though.
136 unsigned TargetCodeGenInfo::getSizeOfUnwindException() const {
137   // Verified for:
138   //   x86-64     FreeBSD, Linux, Darwin
139   //   x86-32     FreeBSD, Linux, Darwin
140   //   PowerPC    Linux, Darwin
141   //   ARM        Darwin (*not* EABI)
142   //   AArch64    Linux
143   return 32;
144 }
145 
146 bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args,
147                                      const FunctionNoProtoType *fnType) const {
148   // The following conventions are known to require this to be false:
149   //   x86_stdcall
150   //   MIPS
151   // For everything else, we just prefer false unless we opt out.
152   return false;
153 }
154 
155 void
156 TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib,
157                                              llvm::SmallString<24> &Opt) const {
158   // This assumes the user is passing a library name like "rt" instead of a
159   // filename like "librt.a/so", and that they don't care whether it's static or
160   // dynamic.
161   Opt = "-l";
162   Opt += Lib;
163 }
164 
165 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
166 
167 /// isEmptyField - Return true iff a the field is "empty", that is it
168 /// is an unnamed bit-field or an (array of) empty record(s).
169 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
170                          bool AllowArrays) {
171   if (FD->isUnnamedBitfield())
172     return true;
173 
174   QualType FT = FD->getType();
175 
176   // Constant arrays of empty records count as empty, strip them off.
177   // Constant arrays of zero length always count as empty.
178   if (AllowArrays)
179     while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
180       if (AT->getSize() == 0)
181         return true;
182       FT = AT->getElementType();
183     }
184 
185   const RecordType *RT = FT->getAs<RecordType>();
186   if (!RT)
187     return false;
188 
189   // C++ record fields are never empty, at least in the Itanium ABI.
190   //
191   // FIXME: We should use a predicate for whether this behavior is true in the
192   // current ABI.
193   if (isa<CXXRecordDecl>(RT->getDecl()))
194     return false;
195 
196   return isEmptyRecord(Context, FT, AllowArrays);
197 }
198 
199 /// isEmptyRecord - Return true iff a structure contains only empty
200 /// fields. Note that a structure with a flexible array member is not
201 /// considered empty.
202 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
203   const RecordType *RT = T->getAs<RecordType>();
204   if (!RT)
205     return 0;
206   const RecordDecl *RD = RT->getDecl();
207   if (RD->hasFlexibleArrayMember())
208     return false;
209 
210   // If this is a C++ record, check the bases first.
211   if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
212     for (const auto &I : CXXRD->bases())
213       if (!isEmptyRecord(Context, I.getType(), true))
214         return false;
215 
216   for (const auto *I : RD->fields())
217     if (!isEmptyField(Context, I, AllowArrays))
218       return false;
219   return true;
220 }
221 
222 /// isSingleElementStruct - Determine if a structure is a "single
223 /// element struct", i.e. it has exactly one non-empty field or
224 /// exactly one field which is itself a single element
225 /// struct. Structures with flexible array members are never
226 /// considered single element structs.
227 ///
228 /// \return The field declaration for the single non-empty field, if
229 /// it exists.
230 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
231   const RecordType *RT = T->getAsStructureType();
232   if (!RT)
233     return 0;
234 
235   const RecordDecl *RD = RT->getDecl();
236   if (RD->hasFlexibleArrayMember())
237     return 0;
238 
239   const Type *Found = 0;
240 
241   // If this is a C++ record, check the bases first.
242   if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
243     for (const auto &I : CXXRD->bases()) {
244       // Ignore empty records.
245       if (isEmptyRecord(Context, I.getType(), true))
246         continue;
247 
248       // If we already found an element then this isn't a single-element struct.
249       if (Found)
250         return 0;
251 
252       // If this is non-empty and not a single element struct, the composite
253       // cannot be a single element struct.
254       Found = isSingleElementStruct(I.getType(), Context);
255       if (!Found)
256         return 0;
257     }
258   }
259 
260   // Check for single element.
261   for (const auto *FD : RD->fields()) {
262     QualType FT = FD->getType();
263 
264     // Ignore empty fields.
265     if (isEmptyField(Context, FD, true))
266       continue;
267 
268     // If we already found an element then this isn't a single-element
269     // struct.
270     if (Found)
271       return 0;
272 
273     // Treat single element arrays as the element.
274     while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
275       if (AT->getSize().getZExtValue() != 1)
276         break;
277       FT = AT->getElementType();
278     }
279 
280     if (!isAggregateTypeForABI(FT)) {
281       Found = FT.getTypePtr();
282     } else {
283       Found = isSingleElementStruct(FT, Context);
284       if (!Found)
285         return 0;
286     }
287   }
288 
289   // We don't consider a struct a single-element struct if it has
290   // padding beyond the element type.
291   if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T))
292     return 0;
293 
294   return Found;
295 }
296 
297 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
298   // Treat complex types as the element type.
299   if (const ComplexType *CTy = Ty->getAs<ComplexType>())
300     Ty = CTy->getElementType();
301 
302   // Check for a type which we know has a simple scalar argument-passing
303   // convention without any padding.  (We're specifically looking for 32
304   // and 64-bit integer and integer-equivalents, float, and double.)
305   if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
306       !Ty->isEnumeralType() && !Ty->isBlockPointerType())
307     return false;
308 
309   uint64_t Size = Context.getTypeSize(Ty);
310   return Size == 32 || Size == 64;
311 }
312 
313 /// canExpandIndirectArgument - Test whether an argument type which is to be
314 /// passed indirectly (on the stack) would have the equivalent layout if it was
315 /// expanded into separate arguments. If so, we prefer to do the latter to avoid
316 /// inhibiting optimizations.
317 ///
318 // FIXME: This predicate is missing many cases, currently it just follows
319 // llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We
320 // should probably make this smarter, or better yet make the LLVM backend
321 // capable of handling it.
322 static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) {
323   // We can only expand structure types.
324   const RecordType *RT = Ty->getAs<RecordType>();
325   if (!RT)
326     return false;
327 
328   // We can only expand (C) structures.
329   //
330   // FIXME: This needs to be generalized to handle classes as well.
331   const RecordDecl *RD = RT->getDecl();
332   if (!RD->isStruct() || isa<CXXRecordDecl>(RD))
333     return false;
334 
335   uint64_t Size = 0;
336 
337   for (const auto *FD : RD->fields()) {
338     if (!is32Or64BitBasicType(FD->getType(), Context))
339       return false;
340 
341     // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
342     // how to expand them yet, and the predicate for telling if a bitfield still
343     // counts as "basic" is more complicated than what we were doing previously.
344     if (FD->isBitField())
345       return false;
346 
347     Size += Context.getTypeSize(FD->getType());
348   }
349 
350   // Make sure there are not any holes in the struct.
351   if (Size != Context.getTypeSize(Ty))
352     return false;
353 
354   return true;
355 }
356 
357 namespace {
358 /// DefaultABIInfo - The default implementation for ABI specific
359 /// details. This implementation provides information which results in
360 /// self-consistent and sensible LLVM IR generation, but does not
361 /// conform to any particular ABI.
362 class DefaultABIInfo : public ABIInfo {
363 public:
364   DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
365 
366   ABIArgInfo classifyReturnType(QualType RetTy) const;
367   ABIArgInfo classifyArgumentType(QualType RetTy) const;
368 
369   void computeInfo(CGFunctionInfo &FI) const override {
370     FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
371     for (auto &I : FI.arguments())
372       I.info = classifyArgumentType(I.type);
373   }
374 
375   llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
376                          CodeGenFunction &CGF) const override;
377 };
378 
379 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
380 public:
381   DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
382     : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
383 };
384 
385 llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
386                                        CodeGenFunction &CGF) const {
387   return 0;
388 }
389 
390 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
391   if (isAggregateTypeForABI(Ty)) {
392     // Records with non-trivial destructors/constructors should not be passed
393     // by value.
394     if (isRecordReturnIndirect(Ty, getCXXABI()))
395       return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
396 
397     return ABIArgInfo::getIndirect(0);
398   }
399 
400   // Treat an enum type as its underlying type.
401   if (const EnumType *EnumTy = Ty->getAs<EnumType>())
402     Ty = EnumTy->getDecl()->getIntegerType();
403 
404   return (Ty->isPromotableIntegerType() ?
405           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
406 }
407 
408 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
409   if (RetTy->isVoidType())
410     return ABIArgInfo::getIgnore();
411 
412   if (isAggregateTypeForABI(RetTy))
413     return ABIArgInfo::getIndirect(0);
414 
415   // Treat an enum type as its underlying type.
416   if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
417     RetTy = EnumTy->getDecl()->getIntegerType();
418 
419   return (RetTy->isPromotableIntegerType() ?
420           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
421 }
422 
423 //===----------------------------------------------------------------------===//
424 // le32/PNaCl bitcode ABI Implementation
425 //
426 // This is a simplified version of the x86_32 ABI.  Arguments and return values
427 // are always passed on the stack.
428 //===----------------------------------------------------------------------===//
429 
430 class PNaClABIInfo : public ABIInfo {
431  public:
432   PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
433 
434   ABIArgInfo classifyReturnType(QualType RetTy) const;
435   ABIArgInfo classifyArgumentType(QualType RetTy) const;
436 
437   void computeInfo(CGFunctionInfo &FI) const override;
438   llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
439                          CodeGenFunction &CGF) const override;
440 };
441 
442 class PNaClTargetCodeGenInfo : public TargetCodeGenInfo {
443  public:
444   PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
445     : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {}
446 };
447 
448 void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const {
449     FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
450 
451     for (auto &I : FI.arguments())
452       I.info = classifyArgumentType(I.type);
453   }
454 
455 llvm::Value *PNaClABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
456                                        CodeGenFunction &CGF) const {
457   return 0;
458 }
459 
460 /// \brief Classify argument of given type \p Ty.
461 ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const {
462   if (isAggregateTypeForABI(Ty)) {
463     if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
464       return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
465     return ABIArgInfo::getIndirect(0);
466   } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
467     // Treat an enum type as its underlying type.
468     Ty = EnumTy->getDecl()->getIntegerType();
469   } else if (Ty->isFloatingType()) {
470     // Floating-point types don't go inreg.
471     return ABIArgInfo::getDirect();
472   }
473 
474   return (Ty->isPromotableIntegerType() ?
475           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
476 }
477 
478 ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
479   if (RetTy->isVoidType())
480     return ABIArgInfo::getIgnore();
481 
482   // In the PNaCl ABI we always return records/structures on the stack.
483   if (isAggregateTypeForABI(RetTy))
484     return ABIArgInfo::getIndirect(0);
485 
486   // Treat an enum type as its underlying type.
487   if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
488     RetTy = EnumTy->getDecl()->getIntegerType();
489 
490   return (RetTy->isPromotableIntegerType() ?
491           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
492 }
493 
494 /// IsX86_MMXType - Return true if this is an MMX type.
495 bool IsX86_MMXType(llvm::Type *IRType) {
496   // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>.
497   return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
498     cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
499     IRType->getScalarSizeInBits() != 64;
500 }
501 
502 static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
503                                           StringRef Constraint,
504                                           llvm::Type* Ty) {
505   if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy()) {
506     if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) {
507       // Invalid MMX constraint
508       return 0;
509     }
510 
511     return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
512   }
513 
514   // No operation needed
515   return Ty;
516 }
517 
518 //===----------------------------------------------------------------------===//
519 // X86-32 ABI Implementation
520 //===----------------------------------------------------------------------===//
521 
522 /// \brief Similar to llvm::CCState, but for Clang.
523 struct CCState {
524   CCState(unsigned CC) : CC(CC), FreeRegs(0) {}
525 
526   unsigned CC;
527   unsigned FreeRegs;
528   unsigned StackOffset;
529   bool UseInAlloca;
530 };
531 
532 /// X86_32ABIInfo - The X86-32 ABI information.
533 class X86_32ABIInfo : public ABIInfo {
534   enum Class {
535     Integer,
536     Float
537   };
538 
539   static const unsigned MinABIStackAlignInBytes = 4;
540 
541   bool IsDarwinVectorABI;
542   bool IsSmallStructInRegABI;
543   bool IsWin32StructABI;
544   unsigned DefaultNumRegisterParameters;
545 
546   static bool isRegisterSize(unsigned Size) {
547     return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
548   }
549 
550   bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context,
551                                   bool IsInstanceMethod) const;
552 
553   /// getIndirectResult - Give a source type \arg Ty, return a suitable result
554   /// such that the argument will be passed in memory.
555   ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
556 
557   ABIArgInfo getIndirectReturnResult(CCState &State) const;
558 
559   /// \brief Return the alignment to use for the given type on the stack.
560   unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
561 
562   Class classify(QualType Ty) const;
563   ABIArgInfo classifyReturnType(QualType RetTy, CCState &State,
564                                 bool IsInstanceMethod) const;
565   ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
566   bool shouldUseInReg(QualType Ty, CCState &State, bool &NeedsPadding) const;
567 
568   /// \brief Rewrite the function info so that all memory arguments use
569   /// inalloca.
570   void rewriteWithInAlloca(CGFunctionInfo &FI) const;
571 
572   void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
573                            unsigned &StackOffset, ABIArgInfo &Info,
574                            QualType Type) const;
575 
576 public:
577 
578   void computeInfo(CGFunctionInfo &FI) const override;
579   llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
580                          CodeGenFunction &CGF) const override;
581 
582   X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool w,
583                 unsigned r)
584     : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p),
585       IsWin32StructABI(w), DefaultNumRegisterParameters(r) {}
586 };
587 
588 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
589 public:
590   X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
591       bool d, bool p, bool w, unsigned r)
592     :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, w, r)) {}
593 
594   static bool isStructReturnInRegABI(
595       const llvm::Triple &Triple, const CodeGenOptions &Opts);
596 
597   void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
598                            CodeGen::CodeGenModule &CGM) const override;
599 
600   int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
601     // Darwin uses different dwarf register numbers for EH.
602     if (CGM.getTarget().getTriple().isOSDarwin()) return 5;
603     return 4;
604   }
605 
606   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
607                                llvm::Value *Address) const override;
608 
609   llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
610                                   StringRef Constraint,
611                                   llvm::Type* Ty) const override {
612     return X86AdjustInlineAsmType(CGF, Constraint, Ty);
613   }
614 
615   llvm::Constant *
616   getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
617     unsigned Sig = (0xeb << 0) |  // jmp rel8
618                    (0x06 << 8) |  //           .+0x08
619                    ('F' << 16) |
620                    ('T' << 24);
621     return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
622   }
623 
624 };
625 
626 }
627 
628 /// shouldReturnTypeInRegister - Determine if the given type should be
629 /// passed in a register (for the Darwin ABI).
630 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty, ASTContext &Context,
631                                                bool IsInstanceMethod) const {
632   uint64_t Size = Context.getTypeSize(Ty);
633 
634   // Type must be register sized.
635   if (!isRegisterSize(Size))
636     return false;
637 
638   if (Ty->isVectorType()) {
639     // 64- and 128- bit vectors inside structures are not returned in
640     // registers.
641     if (Size == 64 || Size == 128)
642       return false;
643 
644     return true;
645   }
646 
647   // If this is a builtin, pointer, enum, complex type, member pointer, or
648   // member function pointer it is ok.
649   if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
650       Ty->isAnyComplexType() || Ty->isEnumeralType() ||
651       Ty->isBlockPointerType() || Ty->isMemberPointerType())
652     return true;
653 
654   // Arrays are treated like records.
655   if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
656     return shouldReturnTypeInRegister(AT->getElementType(), Context,
657                                       IsInstanceMethod);
658 
659   // Otherwise, it must be a record type.
660   const RecordType *RT = Ty->getAs<RecordType>();
661   if (!RT) return false;
662 
663   // FIXME: Traverse bases here too.
664 
665   // For thiscall conventions, structures will never be returned in
666   // a register.  This is for compatibility with the MSVC ABI
667   if (IsWin32StructABI && IsInstanceMethod && RT->isStructureType())
668     return false;
669 
670   // Structure types are passed in register if all fields would be
671   // passed in a register.
672   for (const auto *FD : RT->getDecl()->fields()) {
673     // Empty fields are ignored.
674     if (isEmptyField(Context, FD, true))
675       continue;
676 
677     // Check fields recursively.
678     if (!shouldReturnTypeInRegister(FD->getType(), Context, IsInstanceMethod))
679       return false;
680   }
681   return true;
682 }
683 
684 ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(CCState &State) const {
685   // If the return value is indirect, then the hidden argument is consuming one
686   // integer register.
687   if (State.FreeRegs) {
688     --State.FreeRegs;
689     return ABIArgInfo::getIndirectInReg(/*Align=*/0, /*ByVal=*/false);
690   }
691   return ABIArgInfo::getIndirect(/*Align=*/0, /*ByVal=*/false);
692 }
693 
694 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, CCState &State,
695                                              bool IsInstanceMethod) const {
696   if (RetTy->isVoidType())
697     return ABIArgInfo::getIgnore();
698 
699   if (const VectorType *VT = RetTy->getAs<VectorType>()) {
700     // On Darwin, some vectors are returned in registers.
701     if (IsDarwinVectorABI) {
702       uint64_t Size = getContext().getTypeSize(RetTy);
703 
704       // 128-bit vectors are a special case; they are returned in
705       // registers and we need to make sure to pick a type the LLVM
706       // backend will like.
707       if (Size == 128)
708         return ABIArgInfo::getDirect(llvm::VectorType::get(
709                   llvm::Type::getInt64Ty(getVMContext()), 2));
710 
711       // Always return in register if it fits in a general purpose
712       // register, or if it is 64 bits and has a single element.
713       if ((Size == 8 || Size == 16 || Size == 32) ||
714           (Size == 64 && VT->getNumElements() == 1))
715         return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
716                                                             Size));
717 
718       return getIndirectReturnResult(State);
719     }
720 
721     return ABIArgInfo::getDirect();
722   }
723 
724   if (isAggregateTypeForABI(RetTy)) {
725     if (const RecordType *RT = RetTy->getAs<RecordType>()) {
726       if (isRecordReturnIndirect(RT, getCXXABI()))
727         return getIndirectReturnResult(State);
728 
729       // Structures with flexible arrays are always indirect.
730       if (RT->getDecl()->hasFlexibleArrayMember())
731         return getIndirectReturnResult(State);
732     }
733 
734     // If specified, structs and unions are always indirect.
735     if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType())
736       return getIndirectReturnResult(State);
737 
738     // Small structures which are register sized are generally returned
739     // in a register.
740     if (shouldReturnTypeInRegister(RetTy, getContext(), IsInstanceMethod)) {
741       uint64_t Size = getContext().getTypeSize(RetTy);
742 
743       // As a special-case, if the struct is a "single-element" struct, and
744       // the field is of type "float" or "double", return it in a
745       // floating-point register. (MSVC does not apply this special case.)
746       // We apply a similar transformation for pointer types to improve the
747       // quality of the generated IR.
748       if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
749         if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
750             || SeltTy->hasPointerRepresentation())
751           return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
752 
753       // FIXME: We should be able to narrow this integer in cases with dead
754       // padding.
755       return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
756     }
757 
758     return getIndirectReturnResult(State);
759   }
760 
761   // Treat an enum type as its underlying type.
762   if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
763     RetTy = EnumTy->getDecl()->getIntegerType();
764 
765   return (RetTy->isPromotableIntegerType() ?
766           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
767 }
768 
769 static bool isSSEVectorType(ASTContext &Context, QualType Ty) {
770   return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128;
771 }
772 
773 static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) {
774   const RecordType *RT = Ty->getAs<RecordType>();
775   if (!RT)
776     return 0;
777   const RecordDecl *RD = RT->getDecl();
778 
779   // If this is a C++ record, check the bases first.
780   if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
781     for (const auto &I : CXXRD->bases())
782       if (!isRecordWithSSEVectorType(Context, I.getType()))
783         return false;
784 
785   for (const auto *i : RD->fields()) {
786     QualType FT = i->getType();
787 
788     if (isSSEVectorType(Context, FT))
789       return true;
790 
791     if (isRecordWithSSEVectorType(Context, FT))
792       return true;
793   }
794 
795   return false;
796 }
797 
798 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
799                                                  unsigned Align) const {
800   // Otherwise, if the alignment is less than or equal to the minimum ABI
801   // alignment, just use the default; the backend will handle this.
802   if (Align <= MinABIStackAlignInBytes)
803     return 0; // Use default alignment.
804 
805   // On non-Darwin, the stack type alignment is always 4.
806   if (!IsDarwinVectorABI) {
807     // Set explicit alignment, since we may need to realign the top.
808     return MinABIStackAlignInBytes;
809   }
810 
811   // Otherwise, if the type contains an SSE vector type, the alignment is 16.
812   if (Align >= 16 && (isSSEVectorType(getContext(), Ty) ||
813                       isRecordWithSSEVectorType(getContext(), Ty)))
814     return 16;
815 
816   return MinABIStackAlignInBytes;
817 }
818 
819 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal,
820                                             CCState &State) const {
821   if (!ByVal) {
822     if (State.FreeRegs) {
823       --State.FreeRegs; // Non-byval indirects just use one pointer.
824       return ABIArgInfo::getIndirectInReg(0, false);
825     }
826     return ABIArgInfo::getIndirect(0, false);
827   }
828 
829   // Compute the byval alignment.
830   unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
831   unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
832   if (StackAlign == 0)
833     return ABIArgInfo::getIndirect(4, /*ByVal=*/true);
834 
835   // If the stack alignment is less than the type alignment, realign the
836   // argument.
837   bool Realign = TypeAlign > StackAlign;
838   return ABIArgInfo::getIndirect(StackAlign, /*ByVal=*/true, Realign);
839 }
840 
841 X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
842   const Type *T = isSingleElementStruct(Ty, getContext());
843   if (!T)
844     T = Ty.getTypePtr();
845 
846   if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
847     BuiltinType::Kind K = BT->getKind();
848     if (K == BuiltinType::Float || K == BuiltinType::Double)
849       return Float;
850   }
851   return Integer;
852 }
853 
854 bool X86_32ABIInfo::shouldUseInReg(QualType Ty, CCState &State,
855                                    bool &NeedsPadding) const {
856   NeedsPadding = false;
857   Class C = classify(Ty);
858   if (C == Float)
859     return false;
860 
861   unsigned Size = getContext().getTypeSize(Ty);
862   unsigned SizeInRegs = (Size + 31) / 32;
863 
864   if (SizeInRegs == 0)
865     return false;
866 
867   if (SizeInRegs > State.FreeRegs) {
868     State.FreeRegs = 0;
869     return false;
870   }
871 
872   State.FreeRegs -= SizeInRegs;
873 
874   if (State.CC == llvm::CallingConv::X86_FastCall) {
875     if (Size > 32)
876       return false;
877 
878     if (Ty->isIntegralOrEnumerationType())
879       return true;
880 
881     if (Ty->isPointerType())
882       return true;
883 
884     if (Ty->isReferenceType())
885       return true;
886 
887     if (State.FreeRegs)
888       NeedsPadding = true;
889 
890     return false;
891   }
892 
893   return true;
894 }
895 
896 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
897                                                CCState &State) const {
898   // FIXME: Set alignment on indirect arguments.
899   if (isAggregateTypeForABI(Ty)) {
900     if (const RecordType *RT = Ty->getAs<RecordType>()) {
901       // Check with the C++ ABI first.
902       CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
903       if (RAA == CGCXXABI::RAA_Indirect) {
904         return getIndirectResult(Ty, false, State);
905       } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
906         // The field index doesn't matter, we'll fix it up later.
907         return ABIArgInfo::getInAlloca(/*FieldIndex=*/0);
908       }
909 
910       // Structs are always byval on win32, regardless of what they contain.
911       if (IsWin32StructABI)
912         return getIndirectResult(Ty, true, State);
913 
914       // Structures with flexible arrays are always indirect.
915       if (RT->getDecl()->hasFlexibleArrayMember())
916         return getIndirectResult(Ty, true, State);
917     }
918 
919     // Ignore empty structs/unions.
920     if (isEmptyRecord(getContext(), Ty, true))
921       return ABIArgInfo::getIgnore();
922 
923     llvm::LLVMContext &LLVMContext = getVMContext();
924     llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
925     bool NeedsPadding;
926     if (shouldUseInReg(Ty, State, NeedsPadding)) {
927       unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
928       SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32);
929       llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
930       return ABIArgInfo::getDirectInReg(Result);
931     }
932     llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : 0;
933 
934     // Expand small (<= 128-bit) record types when we know that the stack layout
935     // of those arguments will match the struct. This is important because the
936     // LLVM backend isn't smart enough to remove byval, which inhibits many
937     // optimizations.
938     if (getContext().getTypeSize(Ty) <= 4*32 &&
939         canExpandIndirectArgument(Ty, getContext()))
940       return ABIArgInfo::getExpandWithPadding(
941           State.CC == llvm::CallingConv::X86_FastCall, PaddingType);
942 
943     return getIndirectResult(Ty, true, State);
944   }
945 
946   if (const VectorType *VT = Ty->getAs<VectorType>()) {
947     // On Darwin, some vectors are passed in memory, we handle this by passing
948     // it as an i8/i16/i32/i64.
949     if (IsDarwinVectorABI) {
950       uint64_t Size = getContext().getTypeSize(Ty);
951       if ((Size == 8 || Size == 16 || Size == 32) ||
952           (Size == 64 && VT->getNumElements() == 1))
953         return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
954                                                             Size));
955     }
956 
957     if (IsX86_MMXType(CGT.ConvertType(Ty)))
958       return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64));
959 
960     return ABIArgInfo::getDirect();
961   }
962 
963 
964   if (const EnumType *EnumTy = Ty->getAs<EnumType>())
965     Ty = EnumTy->getDecl()->getIntegerType();
966 
967   bool NeedsPadding;
968   bool InReg = shouldUseInReg(Ty, State, NeedsPadding);
969 
970   if (Ty->isPromotableIntegerType()) {
971     if (InReg)
972       return ABIArgInfo::getExtendInReg();
973     return ABIArgInfo::getExtend();
974   }
975   if (InReg)
976     return ABIArgInfo::getDirectInReg();
977   return ABIArgInfo::getDirect();
978 }
979 
980 void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
981   CCState State(FI.getCallingConvention());
982   if (State.CC == llvm::CallingConv::X86_FastCall)
983     State.FreeRegs = 2;
984   else if (FI.getHasRegParm())
985     State.FreeRegs = FI.getRegParm();
986   else
987     State.FreeRegs = DefaultNumRegisterParameters;
988 
989   FI.getReturnInfo() =
990       classifyReturnType(FI.getReturnType(), State, FI.isInstanceMethod());
991 
992   // On win32, use the x86_cdeclmethodcc convention for cdecl methods that use
993   // sret.  This convention swaps the order of the first two parameters behind
994   // the scenes to match MSVC.
995   if (IsWin32StructABI && FI.isInstanceMethod() &&
996       FI.getCallingConvention() == llvm::CallingConv::C &&
997       FI.getReturnInfo().isIndirect())
998     FI.setEffectiveCallingConvention(llvm::CallingConv::X86_CDeclMethod);
999 
1000   bool UsedInAlloca = false;
1001   for (auto &I : FI.arguments()) {
1002     I.info = classifyArgumentType(I.type, State);
1003     UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca);
1004   }
1005 
1006   // If we needed to use inalloca for any argument, do a second pass and rewrite
1007   // all the memory arguments to use inalloca.
1008   if (UsedInAlloca)
1009     rewriteWithInAlloca(FI);
1010 }
1011 
1012 void
1013 X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
1014                                    unsigned &StackOffset,
1015                                    ABIArgInfo &Info, QualType Type) const {
1016   assert(StackOffset % 4U == 0 && "unaligned inalloca struct");
1017   Info = ABIArgInfo::getInAlloca(FrameFields.size());
1018   FrameFields.push_back(CGT.ConvertTypeForMem(Type));
1019   StackOffset += getContext().getTypeSizeInChars(Type).getQuantity();
1020 
1021   // Insert padding bytes to respect alignment.  For x86_32, each argument is 4
1022   // byte aligned.
1023   if (StackOffset % 4U) {
1024     unsigned OldOffset = StackOffset;
1025     StackOffset = llvm::RoundUpToAlignment(StackOffset, 4U);
1026     unsigned NumBytes = StackOffset - OldOffset;
1027     assert(NumBytes);
1028     llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
1029     Ty = llvm::ArrayType::get(Ty, NumBytes);
1030     FrameFields.push_back(Ty);
1031   }
1032 }
1033 
1034 void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const {
1035   assert(IsWin32StructABI && "inalloca only supported on win32");
1036 
1037   // Build a packed struct type for all of the arguments in memory.
1038   SmallVector<llvm::Type *, 6> FrameFields;
1039 
1040   unsigned StackOffset = 0;
1041 
1042   // Put the sret parameter into the inalloca struct if it's in memory.
1043   ABIArgInfo &Ret = FI.getReturnInfo();
1044   if (Ret.isIndirect() && !Ret.getInReg()) {
1045     CanQualType PtrTy = getContext().getPointerType(FI.getReturnType());
1046     addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy);
1047     // On Windows, the hidden sret parameter is always returned in eax.
1048     Ret.setInAllocaSRet(IsWin32StructABI);
1049   }
1050 
1051   // Skip the 'this' parameter in ecx.
1052   CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end();
1053   if (FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall)
1054     ++I;
1055 
1056   // Put arguments passed in memory into the struct.
1057   for (; I != E; ++I) {
1058 
1059     // Leave ignored and inreg arguments alone.
1060     switch (I->info.getKind()) {
1061     case ABIArgInfo::Indirect:
1062       assert(I->info.getIndirectByVal());
1063       break;
1064     case ABIArgInfo::Ignore:
1065       continue;
1066     case ABIArgInfo::Direct:
1067     case ABIArgInfo::Extend:
1068       if (I->info.getInReg())
1069         continue;
1070       break;
1071     default:
1072       break;
1073     }
1074 
1075     addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
1076   }
1077 
1078   FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
1079                                         /*isPacked=*/true));
1080 }
1081 
1082 llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1083                                       CodeGenFunction &CGF) const {
1084   llvm::Type *BPP = CGF.Int8PtrPtrTy;
1085 
1086   CGBuilderTy &Builder = CGF.Builder;
1087   llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
1088                                                        "ap");
1089   llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
1090 
1091   // Compute if the address needs to be aligned
1092   unsigned Align = CGF.getContext().getTypeAlignInChars(Ty).getQuantity();
1093   Align = getTypeStackAlignInBytes(Ty, Align);
1094   Align = std::max(Align, 4U);
1095   if (Align > 4) {
1096     // addr = (addr + align - 1) & -align;
1097     llvm::Value *Offset =
1098       llvm::ConstantInt::get(CGF.Int32Ty, Align - 1);
1099     Addr = CGF.Builder.CreateGEP(Addr, Offset);
1100     llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(Addr,
1101                                                     CGF.Int32Ty);
1102     llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -Align);
1103     Addr = CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
1104                                       Addr->getType(),
1105                                       "ap.cur.aligned");
1106   }
1107 
1108   llvm::Type *PTy =
1109     llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
1110   llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
1111 
1112   uint64_t Offset =
1113     llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, Align);
1114   llvm::Value *NextAddr =
1115     Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
1116                       "ap.next");
1117   Builder.CreateStore(NextAddr, VAListAddrAsBPP);
1118 
1119   return AddrTyped;
1120 }
1121 
1122 void X86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
1123                                                   llvm::GlobalValue *GV,
1124                                             CodeGen::CodeGenModule &CGM) const {
1125   if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
1126     if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1127       // Get the LLVM function.
1128       llvm::Function *Fn = cast<llvm::Function>(GV);
1129 
1130       // Now add the 'alignstack' attribute with a value of 16.
1131       llvm::AttrBuilder B;
1132       B.addStackAlignmentAttr(16);
1133       Fn->addAttributes(llvm::AttributeSet::FunctionIndex,
1134                       llvm::AttributeSet::get(CGM.getLLVMContext(),
1135                                               llvm::AttributeSet::FunctionIndex,
1136                                               B));
1137     }
1138   }
1139 }
1140 
1141 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
1142                                                CodeGen::CodeGenFunction &CGF,
1143                                                llvm::Value *Address) const {
1144   CodeGen::CGBuilderTy &Builder = CGF.Builder;
1145 
1146   llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
1147 
1148   // 0-7 are the eight integer registers;  the order is different
1149   //   on Darwin (for EH), but the range is the same.
1150   // 8 is %eip.
1151   AssignToArrayRange(Builder, Address, Four8, 0, 8);
1152 
1153   if (CGF.CGM.getTarget().getTriple().isOSDarwin()) {
1154     // 12-16 are st(0..4).  Not sure why we stop at 4.
1155     // These have size 16, which is sizeof(long double) on
1156     // platforms with 8-byte alignment for that type.
1157     llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
1158     AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
1159 
1160   } else {
1161     // 9 is %eflags, which doesn't get a size on Darwin for some
1162     // reason.
1163     Builder.CreateStore(Four8, Builder.CreateConstInBoundsGEP1_32(Address, 9));
1164 
1165     // 11-16 are st(0..5).  Not sure why we stop at 5.
1166     // These have size 12, which is sizeof(long double) on
1167     // platforms with 4-byte alignment for that type.
1168     llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12);
1169     AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
1170   }
1171 
1172   return false;
1173 }
1174 
1175 //===----------------------------------------------------------------------===//
1176 // X86-64 ABI Implementation
1177 //===----------------------------------------------------------------------===//
1178 
1179 
1180 namespace {
1181 /// X86_64ABIInfo - The X86_64 ABI information.
1182 class X86_64ABIInfo : public ABIInfo {
1183   enum Class {
1184     Integer = 0,
1185     SSE,
1186     SSEUp,
1187     X87,
1188     X87Up,
1189     ComplexX87,
1190     NoClass,
1191     Memory
1192   };
1193 
1194   /// merge - Implement the X86_64 ABI merging algorithm.
1195   ///
1196   /// Merge an accumulating classification \arg Accum with a field
1197   /// classification \arg Field.
1198   ///
1199   /// \param Accum - The accumulating classification. This should
1200   /// always be either NoClass or the result of a previous merge
1201   /// call. In addition, this should never be Memory (the caller
1202   /// should just return Memory for the aggregate).
1203   static Class merge(Class Accum, Class Field);
1204 
1205   /// postMerge - Implement the X86_64 ABI post merging algorithm.
1206   ///
1207   /// Post merger cleanup, reduces a malformed Hi and Lo pair to
1208   /// final MEMORY or SSE classes when necessary.
1209   ///
1210   /// \param AggregateSize - The size of the current aggregate in
1211   /// the classification process.
1212   ///
1213   /// \param Lo - The classification for the parts of the type
1214   /// residing in the low word of the containing object.
1215   ///
1216   /// \param Hi - The classification for the parts of the type
1217   /// residing in the higher words of the containing object.
1218   ///
1219   void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const;
1220 
1221   /// classify - Determine the x86_64 register classes in which the
1222   /// given type T should be passed.
1223   ///
1224   /// \param Lo - The classification for the parts of the type
1225   /// residing in the low word of the containing object.
1226   ///
1227   /// \param Hi - The classification for the parts of the type
1228   /// residing in the high word of the containing object.
1229   ///
1230   /// \param OffsetBase - The bit offset of this type in the
1231   /// containing object.  Some parameters are classified different
1232   /// depending on whether they straddle an eightbyte boundary.
1233   ///
1234   /// \param isNamedArg - Whether the argument in question is a "named"
1235   /// argument, as used in AMD64-ABI 3.5.7.
1236   ///
1237   /// If a word is unused its result will be NoClass; if a type should
1238   /// be passed in Memory then at least the classification of \arg Lo
1239   /// will be Memory.
1240   ///
1241   /// The \arg Lo class will be NoClass iff the argument is ignored.
1242   ///
1243   /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
1244   /// also be ComplexX87.
1245   void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
1246                 bool isNamedArg) const;
1247 
1248   llvm::Type *GetByteVectorType(QualType Ty) const;
1249   llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
1250                                  unsigned IROffset, QualType SourceTy,
1251                                  unsigned SourceOffset) const;
1252   llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
1253                                      unsigned IROffset, QualType SourceTy,
1254                                      unsigned SourceOffset) const;
1255 
1256   /// getIndirectResult - Give a source type \arg Ty, return a suitable result
1257   /// such that the argument will be returned in memory.
1258   ABIArgInfo getIndirectReturnResult(QualType Ty) const;
1259 
1260   /// getIndirectResult - Give a source type \arg Ty, return a suitable result
1261   /// such that the argument will be passed in memory.
1262   ///
1263   /// \param freeIntRegs - The number of free integer registers remaining
1264   /// available.
1265   ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const;
1266 
1267   ABIArgInfo classifyReturnType(QualType RetTy) const;
1268 
1269   ABIArgInfo classifyArgumentType(QualType Ty,
1270                                   unsigned freeIntRegs,
1271                                   unsigned &neededInt,
1272                                   unsigned &neededSSE,
1273                                   bool isNamedArg) const;
1274 
1275   bool IsIllegalVectorType(QualType Ty) const;
1276 
1277   /// The 0.98 ABI revision clarified a lot of ambiguities,
1278   /// unfortunately in ways that were not always consistent with
1279   /// certain previous compilers.  In particular, platforms which
1280   /// required strict binary compatibility with older versions of GCC
1281   /// may need to exempt themselves.
1282   bool honorsRevision0_98() const {
1283     return !getTarget().getTriple().isOSDarwin();
1284   }
1285 
1286   bool HasAVX;
1287   // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on
1288   // 64-bit hardware.
1289   bool Has64BitPointers;
1290 
1291 public:
1292   X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool hasavx) :
1293       ABIInfo(CGT), HasAVX(hasavx),
1294       Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {
1295   }
1296 
1297   bool isPassedUsingAVXType(QualType type) const {
1298     unsigned neededInt, neededSSE;
1299     // The freeIntRegs argument doesn't matter here.
1300     ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE,
1301                                            /*isNamedArg*/true);
1302     if (info.isDirect()) {
1303       llvm::Type *ty = info.getCoerceToType();
1304       if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
1305         return (vectorTy->getBitWidth() > 128);
1306     }
1307     return false;
1308   }
1309 
1310   void computeInfo(CGFunctionInfo &FI) const override;
1311 
1312   llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1313                          CodeGenFunction &CGF) const override;
1314 };
1315 
1316 /// WinX86_64ABIInfo - The Windows X86_64 ABI information.
1317 class WinX86_64ABIInfo : public ABIInfo {
1318 
1319   ABIArgInfo classify(QualType Ty, bool IsReturnType) const;
1320 
1321 public:
1322   WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
1323 
1324   void computeInfo(CGFunctionInfo &FI) const override;
1325 
1326   llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1327                          CodeGenFunction &CGF) const override;
1328 };
1329 
1330 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
1331 public:
1332   X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
1333       : TargetCodeGenInfo(new X86_64ABIInfo(CGT, HasAVX)) {}
1334 
1335   const X86_64ABIInfo &getABIInfo() const {
1336     return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
1337   }
1338 
1339   int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
1340     return 7;
1341   }
1342 
1343   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1344                                llvm::Value *Address) const override {
1345     llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
1346 
1347     // 0-15 are the 16 integer registers.
1348     // 16 is %rip.
1349     AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
1350     return false;
1351   }
1352 
1353   llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
1354                                   StringRef Constraint,
1355                                   llvm::Type* Ty) const override {
1356     return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1357   }
1358 
1359   bool isNoProtoCallVariadic(const CallArgList &args,
1360                              const FunctionNoProtoType *fnType) const override {
1361     // The default CC on x86-64 sets %al to the number of SSA
1362     // registers used, and GCC sets this when calling an unprototyped
1363     // function, so we override the default behavior.  However, don't do
1364     // that when AVX types are involved: the ABI explicitly states it is
1365     // undefined, and it doesn't work in practice because of how the ABI
1366     // defines varargs anyway.
1367     if (fnType->getCallConv() == CC_C) {
1368       bool HasAVXType = false;
1369       for (CallArgList::const_iterator
1370              it = args.begin(), ie = args.end(); it != ie; ++it) {
1371         if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
1372           HasAVXType = true;
1373           break;
1374         }
1375       }
1376 
1377       if (!HasAVXType)
1378         return true;
1379     }
1380 
1381     return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType);
1382   }
1383 
1384   llvm::Constant *
1385   getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
1386     unsigned Sig = (0xeb << 0) |  // jmp rel8
1387                    (0x0a << 8) |  //           .+0x0c
1388                    ('F' << 16) |
1389                    ('T' << 24);
1390     return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
1391   }
1392 
1393 };
1394 
1395 static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
1396   // If the argument does not end in .lib, automatically add the suffix. This
1397   // matches the behavior of MSVC.
1398   std::string ArgStr = Lib;
1399   if (!Lib.endswith_lower(".lib"))
1400     ArgStr += ".lib";
1401   return ArgStr;
1402 }
1403 
1404 class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo {
1405 public:
1406   WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
1407         bool d, bool p, bool w, unsigned RegParms)
1408     : X86_32TargetCodeGenInfo(CGT, d, p, w, RegParms) {}
1409 
1410   void getDependentLibraryOption(llvm::StringRef Lib,
1411                                  llvm::SmallString<24> &Opt) const override {
1412     Opt = "/DEFAULTLIB:";
1413     Opt += qualifyWindowsLibrary(Lib);
1414   }
1415 
1416   void getDetectMismatchOption(llvm::StringRef Name,
1417                                llvm::StringRef Value,
1418                                llvm::SmallString<32> &Opt) const override {
1419     Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
1420   }
1421 };
1422 
1423 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
1424 public:
1425   WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
1426     : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {}
1427 
1428   int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
1429     return 7;
1430   }
1431 
1432   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1433                                llvm::Value *Address) const override {
1434     llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
1435 
1436     // 0-15 are the 16 integer registers.
1437     // 16 is %rip.
1438     AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
1439     return false;
1440   }
1441 
1442   void getDependentLibraryOption(llvm::StringRef Lib,
1443                                  llvm::SmallString<24> &Opt) const override {
1444     Opt = "/DEFAULTLIB:";
1445     Opt += qualifyWindowsLibrary(Lib);
1446   }
1447 
1448   void getDetectMismatchOption(llvm::StringRef Name,
1449                                llvm::StringRef Value,
1450                                llvm::SmallString<32> &Opt) const override {
1451     Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
1452   }
1453 };
1454 
1455 }
1456 
1457 void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo,
1458                               Class &Hi) const {
1459   // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
1460   //
1461   // (a) If one of the classes is Memory, the whole argument is passed in
1462   //     memory.
1463   //
1464   // (b) If X87UP is not preceded by X87, the whole argument is passed in
1465   //     memory.
1466   //
1467   // (c) If the size of the aggregate exceeds two eightbytes and the first
1468   //     eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole
1469   //     argument is passed in memory. NOTE: This is necessary to keep the
1470   //     ABI working for processors that don't support the __m256 type.
1471   //
1472   // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
1473   //
1474   // Some of these are enforced by the merging logic.  Others can arise
1475   // only with unions; for example:
1476   //   union { _Complex double; unsigned; }
1477   //
1478   // Note that clauses (b) and (c) were added in 0.98.
1479   //
1480   if (Hi == Memory)
1481     Lo = Memory;
1482   if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
1483     Lo = Memory;
1484   if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
1485     Lo = Memory;
1486   if (Hi == SSEUp && Lo != SSE)
1487     Hi = SSE;
1488 }
1489 
1490 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
1491   // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
1492   // classified recursively so that always two fields are
1493   // considered. The resulting class is calculated according to
1494   // the classes of the fields in the eightbyte:
1495   //
1496   // (a) If both classes are equal, this is the resulting class.
1497   //
1498   // (b) If one of the classes is NO_CLASS, the resulting class is
1499   // the other class.
1500   //
1501   // (c) If one of the classes is MEMORY, the result is the MEMORY
1502   // class.
1503   //
1504   // (d) If one of the classes is INTEGER, the result is the
1505   // INTEGER.
1506   //
1507   // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
1508   // MEMORY is used as class.
1509   //
1510   // (f) Otherwise class SSE is used.
1511 
1512   // Accum should never be memory (we should have returned) or
1513   // ComplexX87 (because this cannot be passed in a structure).
1514   assert((Accum != Memory && Accum != ComplexX87) &&
1515          "Invalid accumulated classification during merge.");
1516   if (Accum == Field || Field == NoClass)
1517     return Accum;
1518   if (Field == Memory)
1519     return Memory;
1520   if (Accum == NoClass)
1521     return Field;
1522   if (Accum == Integer || Field == Integer)
1523     return Integer;
1524   if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
1525       Accum == X87 || Accum == X87Up)
1526     return Memory;
1527   return SSE;
1528 }
1529 
1530 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
1531                              Class &Lo, Class &Hi, bool isNamedArg) const {
1532   // FIXME: This code can be simplified by introducing a simple value class for
1533   // Class pairs with appropriate constructor methods for the various
1534   // situations.
1535 
1536   // FIXME: Some of the split computations are wrong; unaligned vectors
1537   // shouldn't be passed in registers for example, so there is no chance they
1538   // can straddle an eightbyte. Verify & simplify.
1539 
1540   Lo = Hi = NoClass;
1541 
1542   Class &Current = OffsetBase < 64 ? Lo : Hi;
1543   Current = Memory;
1544 
1545   if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
1546     BuiltinType::Kind k = BT->getKind();
1547 
1548     if (k == BuiltinType::Void) {
1549       Current = NoClass;
1550     } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
1551       Lo = Integer;
1552       Hi = Integer;
1553     } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
1554       Current = Integer;
1555     } else if ((k == BuiltinType::Float || k == BuiltinType::Double) ||
1556                (k == BuiltinType::LongDouble &&
1557                 getTarget().getTriple().isOSNaCl())) {
1558       Current = SSE;
1559     } else if (k == BuiltinType::LongDouble) {
1560       Lo = X87;
1561       Hi = X87Up;
1562     }
1563     // FIXME: _Decimal32 and _Decimal64 are SSE.
1564     // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
1565     return;
1566   }
1567 
1568   if (const EnumType *ET = Ty->getAs<EnumType>()) {
1569     // Classify the underlying integer type.
1570     classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
1571     return;
1572   }
1573 
1574   if (Ty->hasPointerRepresentation()) {
1575     Current = Integer;
1576     return;
1577   }
1578 
1579   if (Ty->isMemberPointerType()) {
1580     if (Ty->isMemberFunctionPointerType() && Has64BitPointers)
1581       Lo = Hi = Integer;
1582     else
1583       Current = Integer;
1584     return;
1585   }
1586 
1587   if (const VectorType *VT = Ty->getAs<VectorType>()) {
1588     uint64_t Size = getContext().getTypeSize(VT);
1589     if (Size == 32) {
1590       // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x
1591       // float> as integer.
1592       Current = Integer;
1593 
1594       // If this type crosses an eightbyte boundary, it should be
1595       // split.
1596       uint64_t EB_Real = (OffsetBase) / 64;
1597       uint64_t EB_Imag = (OffsetBase + Size - 1) / 64;
1598       if (EB_Real != EB_Imag)
1599         Hi = Lo;
1600     } else if (Size == 64) {
1601       // gcc passes <1 x double> in memory. :(
1602       if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double))
1603         return;
1604 
1605       // gcc passes <1 x long long> as INTEGER.
1606       if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong) ||
1607           VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULongLong) ||
1608           VT->getElementType()->isSpecificBuiltinType(BuiltinType::Long) ||
1609           VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULong))
1610         Current = Integer;
1611       else
1612         Current = SSE;
1613 
1614       // If this type crosses an eightbyte boundary, it should be
1615       // split.
1616       if (OffsetBase && OffsetBase != 64)
1617         Hi = Lo;
1618     } else if (Size == 128 || (HasAVX && isNamedArg && Size == 256)) {
1619       // Arguments of 256-bits are split into four eightbyte chunks. The
1620       // least significant one belongs to class SSE and all the others to class
1621       // SSEUP. The original Lo and Hi design considers that types can't be
1622       // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense.
1623       // This design isn't correct for 256-bits, but since there're no cases
1624       // where the upper parts would need to be inspected, avoid adding
1625       // complexity and just consider Hi to match the 64-256 part.
1626       //
1627       // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in
1628       // registers if they are "named", i.e. not part of the "..." of a
1629       // variadic function.
1630       Lo = SSE;
1631       Hi = SSEUp;
1632     }
1633     return;
1634   }
1635 
1636   if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
1637     QualType ET = getContext().getCanonicalType(CT->getElementType());
1638 
1639     uint64_t Size = getContext().getTypeSize(Ty);
1640     if (ET->isIntegralOrEnumerationType()) {
1641       if (Size <= 64)
1642         Current = Integer;
1643       else if (Size <= 128)
1644         Lo = Hi = Integer;
1645     } else if (ET == getContext().FloatTy)
1646       Current = SSE;
1647     else if (ET == getContext().DoubleTy ||
1648              (ET == getContext().LongDoubleTy &&
1649               getTarget().getTriple().isOSNaCl()))
1650       Lo = Hi = SSE;
1651     else if (ET == getContext().LongDoubleTy)
1652       Current = ComplexX87;
1653 
1654     // If this complex type crosses an eightbyte boundary then it
1655     // should be split.
1656     uint64_t EB_Real = (OffsetBase) / 64;
1657     uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
1658     if (Hi == NoClass && EB_Real != EB_Imag)
1659       Hi = Lo;
1660 
1661     return;
1662   }
1663 
1664   if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
1665     // Arrays are treated like structures.
1666 
1667     uint64_t Size = getContext().getTypeSize(Ty);
1668 
1669     // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
1670     // than four eightbytes, ..., it has class MEMORY.
1671     if (Size > 256)
1672       return;
1673 
1674     // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
1675     // fields, it has class MEMORY.
1676     //
1677     // Only need to check alignment of array base.
1678     if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
1679       return;
1680 
1681     // Otherwise implement simplified merge. We could be smarter about
1682     // this, but it isn't worth it and would be harder to verify.
1683     Current = NoClass;
1684     uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
1685     uint64_t ArraySize = AT->getSize().getZExtValue();
1686 
1687     // The only case a 256-bit wide vector could be used is when the array
1688     // contains a single 256-bit element. Since Lo and Hi logic isn't extended
1689     // to work for sizes wider than 128, early check and fallback to memory.
1690     if (Size > 128 && EltSize != 256)
1691       return;
1692 
1693     for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
1694       Class FieldLo, FieldHi;
1695       classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg);
1696       Lo = merge(Lo, FieldLo);
1697       Hi = merge(Hi, FieldHi);
1698       if (Lo == Memory || Hi == Memory)
1699         break;
1700     }
1701 
1702     postMerge(Size, Lo, Hi);
1703     assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
1704     return;
1705   }
1706 
1707   if (const RecordType *RT = Ty->getAs<RecordType>()) {
1708     uint64_t Size = getContext().getTypeSize(Ty);
1709 
1710     // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
1711     // than four eightbytes, ..., it has class MEMORY.
1712     if (Size > 256)
1713       return;
1714 
1715     // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
1716     // copy constructor or a non-trivial destructor, it is passed by invisible
1717     // reference.
1718     if (getRecordArgABI(RT, getCXXABI()))
1719       return;
1720 
1721     const RecordDecl *RD = RT->getDecl();
1722 
1723     // Assume variable sized types are passed in memory.
1724     if (RD->hasFlexibleArrayMember())
1725       return;
1726 
1727     const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
1728 
1729     // Reset Lo class, this will be recomputed.
1730     Current = NoClass;
1731 
1732     // If this is a C++ record, classify the bases first.
1733     if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1734       for (const auto &I : CXXRD->bases()) {
1735         assert(!I.isVirtual() && !I.getType()->isDependentType() &&
1736                "Unexpected base class!");
1737         const CXXRecordDecl *Base =
1738           cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
1739 
1740         // Classify this field.
1741         //
1742         // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
1743         // single eightbyte, each is classified separately. Each eightbyte gets
1744         // initialized to class NO_CLASS.
1745         Class FieldLo, FieldHi;
1746         uint64_t Offset =
1747           OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base));
1748         classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg);
1749         Lo = merge(Lo, FieldLo);
1750         Hi = merge(Hi, FieldHi);
1751         if (Lo == Memory || Hi == Memory)
1752           break;
1753       }
1754     }
1755 
1756     // Classify the fields one at a time, merging the results.
1757     unsigned idx = 0;
1758     for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
1759            i != e; ++i, ++idx) {
1760       uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
1761       bool BitField = i->isBitField();
1762 
1763       // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than
1764       // four eightbytes, or it contains unaligned fields, it has class MEMORY.
1765       //
1766       // The only case a 256-bit wide vector could be used is when the struct
1767       // contains a single 256-bit element. Since Lo and Hi logic isn't extended
1768       // to work for sizes wider than 128, early check and fallback to memory.
1769       //
1770       if (Size > 128 && getContext().getTypeSize(i->getType()) != 256) {
1771         Lo = Memory;
1772         return;
1773       }
1774       // Note, skip this test for bit-fields, see below.
1775       if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
1776         Lo = Memory;
1777         return;
1778       }
1779 
1780       // Classify this field.
1781       //
1782       // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
1783       // exceeds a single eightbyte, each is classified
1784       // separately. Each eightbyte gets initialized to class
1785       // NO_CLASS.
1786       Class FieldLo, FieldHi;
1787 
1788       // Bit-fields require special handling, they do not force the
1789       // structure to be passed in memory even if unaligned, and
1790       // therefore they can straddle an eightbyte.
1791       if (BitField) {
1792         // Ignore padding bit-fields.
1793         if (i->isUnnamedBitfield())
1794           continue;
1795 
1796         uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
1797         uint64_t Size = i->getBitWidthValue(getContext());
1798 
1799         uint64_t EB_Lo = Offset / 64;
1800         uint64_t EB_Hi = (Offset + Size - 1) / 64;
1801 
1802         if (EB_Lo) {
1803           assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
1804           FieldLo = NoClass;
1805           FieldHi = Integer;
1806         } else {
1807           FieldLo = Integer;
1808           FieldHi = EB_Hi ? Integer : NoClass;
1809         }
1810       } else
1811         classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
1812       Lo = merge(Lo, FieldLo);
1813       Hi = merge(Hi, FieldHi);
1814       if (Lo == Memory || Hi == Memory)
1815         break;
1816     }
1817 
1818     postMerge(Size, Lo, Hi);
1819   }
1820 }
1821 
1822 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
1823   // If this is a scalar LLVM value then assume LLVM will pass it in the right
1824   // place naturally.
1825   if (!isAggregateTypeForABI(Ty)) {
1826     // Treat an enum type as its underlying type.
1827     if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1828       Ty = EnumTy->getDecl()->getIntegerType();
1829 
1830     return (Ty->isPromotableIntegerType() ?
1831             ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
1832   }
1833 
1834   return ABIArgInfo::getIndirect(0);
1835 }
1836 
1837 bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const {
1838   if (const VectorType *VecTy = Ty->getAs<VectorType>()) {
1839     uint64_t Size = getContext().getTypeSize(VecTy);
1840     unsigned LargestVector = HasAVX ? 256 : 128;
1841     if (Size <= 64 || Size > LargestVector)
1842       return true;
1843   }
1844 
1845   return false;
1846 }
1847 
1848 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
1849                                             unsigned freeIntRegs) const {
1850   // If this is a scalar LLVM value then assume LLVM will pass it in the right
1851   // place naturally.
1852   //
1853   // This assumption is optimistic, as there could be free registers available
1854   // when we need to pass this argument in memory, and LLVM could try to pass
1855   // the argument in the free register. This does not seem to happen currently,
1856   // but this code would be much safer if we could mark the argument with
1857   // 'onstack'. See PR12193.
1858   if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) {
1859     // Treat an enum type as its underlying type.
1860     if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1861       Ty = EnumTy->getDecl()->getIntegerType();
1862 
1863     return (Ty->isPromotableIntegerType() ?
1864             ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
1865   }
1866 
1867   if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
1868     return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
1869 
1870   // Compute the byval alignment. We specify the alignment of the byval in all
1871   // cases so that the mid-level optimizer knows the alignment of the byval.
1872   unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
1873 
1874   // Attempt to avoid passing indirect results using byval when possible. This
1875   // is important for good codegen.
1876   //
1877   // We do this by coercing the value into a scalar type which the backend can
1878   // handle naturally (i.e., without using byval).
1879   //
1880   // For simplicity, we currently only do this when we have exhausted all of the
1881   // free integer registers. Doing this when there are free integer registers
1882   // would require more care, as we would have to ensure that the coerced value
1883   // did not claim the unused register. That would require either reording the
1884   // arguments to the function (so that any subsequent inreg values came first),
1885   // or only doing this optimization when there were no following arguments that
1886   // might be inreg.
1887   //
1888   // We currently expect it to be rare (particularly in well written code) for
1889   // arguments to be passed on the stack when there are still free integer
1890   // registers available (this would typically imply large structs being passed
1891   // by value), so this seems like a fair tradeoff for now.
1892   //
1893   // We can revisit this if the backend grows support for 'onstack' parameter
1894   // attributes. See PR12193.
1895   if (freeIntRegs == 0) {
1896     uint64_t Size = getContext().getTypeSize(Ty);
1897 
1898     // If this type fits in an eightbyte, coerce it into the matching integral
1899     // type, which will end up on the stack (with alignment 8).
1900     if (Align == 8 && Size <= 64)
1901       return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
1902                                                           Size));
1903   }
1904 
1905   return ABIArgInfo::getIndirect(Align);
1906 }
1907 
1908 /// GetByteVectorType - The ABI specifies that a value should be passed in an
1909 /// full vector XMM/YMM register.  Pick an LLVM IR type that will be passed as a
1910 /// vector register.
1911 llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
1912   llvm::Type *IRType = CGT.ConvertType(Ty);
1913 
1914   // Wrapper structs that just contain vectors are passed just like vectors,
1915   // strip them off if present.
1916   llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType);
1917   while (STy && STy->getNumElements() == 1) {
1918     IRType = STy->getElementType(0);
1919     STy = dyn_cast<llvm::StructType>(IRType);
1920   }
1921 
1922   // If the preferred type is a 16-byte vector, prefer to pass it.
1923   if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){
1924     llvm::Type *EltTy = VT->getElementType();
1925     unsigned BitWidth = VT->getBitWidth();
1926     if ((BitWidth >= 128 && BitWidth <= 256) &&
1927         (EltTy->isFloatTy() || EltTy->isDoubleTy() ||
1928          EltTy->isIntegerTy(8) || EltTy->isIntegerTy(16) ||
1929          EltTy->isIntegerTy(32) || EltTy->isIntegerTy(64) ||
1930          EltTy->isIntegerTy(128)))
1931       return VT;
1932   }
1933 
1934   return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), 2);
1935 }
1936 
1937 /// BitsContainNoUserData - Return true if the specified [start,end) bit range
1938 /// is known to either be off the end of the specified type or being in
1939 /// alignment padding.  The user type specified is known to be at most 128 bits
1940 /// in size, and have passed through X86_64ABIInfo::classify with a successful
1941 /// classification that put one of the two halves in the INTEGER class.
1942 ///
1943 /// It is conservatively correct to return false.
1944 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
1945                                   unsigned EndBit, ASTContext &Context) {
1946   // If the bytes being queried are off the end of the type, there is no user
1947   // data hiding here.  This handles analysis of builtins, vectors and other
1948   // types that don't contain interesting padding.
1949   unsigned TySize = (unsigned)Context.getTypeSize(Ty);
1950   if (TySize <= StartBit)
1951     return true;
1952 
1953   if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
1954     unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType());
1955     unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
1956 
1957     // Check each element to see if the element overlaps with the queried range.
1958     for (unsigned i = 0; i != NumElts; ++i) {
1959       // If the element is after the span we care about, then we're done..
1960       unsigned EltOffset = i*EltSize;
1961       if (EltOffset >= EndBit) break;
1962 
1963       unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
1964       if (!BitsContainNoUserData(AT->getElementType(), EltStart,
1965                                  EndBit-EltOffset, Context))
1966         return false;
1967     }
1968     // If it overlaps no elements, then it is safe to process as padding.
1969     return true;
1970   }
1971 
1972   if (const RecordType *RT = Ty->getAs<RecordType>()) {
1973     const RecordDecl *RD = RT->getDecl();
1974     const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
1975 
1976     // If this is a C++ record, check the bases first.
1977     if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1978       for (const auto &I : CXXRD->bases()) {
1979         assert(!I.isVirtual() && !I.getType()->isDependentType() &&
1980                "Unexpected base class!");
1981         const CXXRecordDecl *Base =
1982           cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
1983 
1984         // If the base is after the span we care about, ignore it.
1985         unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base));
1986         if (BaseOffset >= EndBit) continue;
1987 
1988         unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
1989         if (!BitsContainNoUserData(I.getType(), BaseStart,
1990                                    EndBit-BaseOffset, Context))
1991           return false;
1992       }
1993     }
1994 
1995     // Verify that no field has data that overlaps the region of interest.  Yes
1996     // this could be sped up a lot by being smarter about queried fields,
1997     // however we're only looking at structs up to 16 bytes, so we don't care
1998     // much.
1999     unsigned idx = 0;
2000     for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
2001          i != e; ++i, ++idx) {
2002       unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx);
2003 
2004       // If we found a field after the region we care about, then we're done.
2005       if (FieldOffset >= EndBit) break;
2006 
2007       unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
2008       if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset,
2009                                  Context))
2010         return false;
2011     }
2012 
2013     // If nothing in this record overlapped the area of interest, then we're
2014     // clean.
2015     return true;
2016   }
2017 
2018   return false;
2019 }
2020 
2021 /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a
2022 /// float member at the specified offset.  For example, {int,{float}} has a
2023 /// float at offset 4.  It is conservatively correct for this routine to return
2024 /// false.
2025 static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset,
2026                                   const llvm::DataLayout &TD) {
2027   // Base case if we find a float.
2028   if (IROffset == 0 && IRType->isFloatTy())
2029     return true;
2030 
2031   // If this is a struct, recurse into the field at the specified offset.
2032   if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2033     const llvm::StructLayout *SL = TD.getStructLayout(STy);
2034     unsigned Elt = SL->getElementContainingOffset(IROffset);
2035     IROffset -= SL->getElementOffset(Elt);
2036     return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD);
2037   }
2038 
2039   // If this is an array, recurse into the field at the specified offset.
2040   if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2041     llvm::Type *EltTy = ATy->getElementType();
2042     unsigned EltSize = TD.getTypeAllocSize(EltTy);
2043     IROffset -= IROffset/EltSize*EltSize;
2044     return ContainsFloatAtOffset(EltTy, IROffset, TD);
2045   }
2046 
2047   return false;
2048 }
2049 
2050 
2051 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the
2052 /// low 8 bytes of an XMM register, corresponding to the SSE class.
2053 llvm::Type *X86_64ABIInfo::
2054 GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,
2055                    QualType SourceTy, unsigned SourceOffset) const {
2056   // The only three choices we have are either double, <2 x float>, or float. We
2057   // pass as float if the last 4 bytes is just padding.  This happens for
2058   // structs that contain 3 floats.
2059   if (BitsContainNoUserData(SourceTy, SourceOffset*8+32,
2060                             SourceOffset*8+64, getContext()))
2061     return llvm::Type::getFloatTy(getVMContext());
2062 
2063   // We want to pass as <2 x float> if the LLVM IR type contains a float at
2064   // offset+0 and offset+4.  Walk the LLVM IR type to find out if this is the
2065   // case.
2066   if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) &&
2067       ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout()))
2068     return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2);
2069 
2070   return llvm::Type::getDoubleTy(getVMContext());
2071 }
2072 
2073 
2074 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in
2075 /// an 8-byte GPR.  This means that we either have a scalar or we are talking
2076 /// about the high or low part of an up-to-16-byte struct.  This routine picks
2077 /// the best LLVM IR type to represent this, which may be i64 or may be anything
2078 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*,
2079 /// etc).
2080 ///
2081 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for
2082 /// the source type.  IROffset is an offset in bytes into the LLVM IR type that
2083 /// the 8-byte value references.  PrefType may be null.
2084 ///
2085 /// SourceTy is the source level type for the entire argument.  SourceOffset is
2086 /// an offset into this that we're processing (which is always either 0 or 8).
2087 ///
2088 llvm::Type *X86_64ABIInfo::
2089 GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
2090                        QualType SourceTy, unsigned SourceOffset) const {
2091   // If we're dealing with an un-offset LLVM IR type, then it means that we're
2092   // returning an 8-byte unit starting with it.  See if we can safely use it.
2093   if (IROffset == 0) {
2094     // Pointers and int64's always fill the 8-byte unit.
2095     if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
2096         IRType->isIntegerTy(64))
2097       return IRType;
2098 
2099     // If we have a 1/2/4-byte integer, we can use it only if the rest of the
2100     // goodness in the source type is just tail padding.  This is allowed to
2101     // kick in for struct {double,int} on the int, but not on
2102     // struct{double,int,int} because we wouldn't return the second int.  We
2103     // have to do this analysis on the source type because we can't depend on
2104     // unions being lowered a specific way etc.
2105     if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
2106         IRType->isIntegerTy(32) ||
2107         (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
2108       unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
2109           cast<llvm::IntegerType>(IRType)->getBitWidth();
2110 
2111       if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,
2112                                 SourceOffset*8+64, getContext()))
2113         return IRType;
2114     }
2115   }
2116 
2117   if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2118     // If this is a struct, recurse into the field at the specified offset.
2119     const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
2120     if (IROffset < SL->getSizeInBytes()) {
2121       unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
2122       IROffset -= SL->getElementOffset(FieldIdx);
2123 
2124       return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
2125                                     SourceTy, SourceOffset);
2126     }
2127   }
2128 
2129   if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2130     llvm::Type *EltTy = ATy->getElementType();
2131     unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
2132     unsigned EltOffset = IROffset/EltSize*EltSize;
2133     return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
2134                                   SourceOffset);
2135   }
2136 
2137   // Okay, we don't have any better idea of what to pass, so we pass this in an
2138   // integer register that isn't too big to fit the rest of the struct.
2139   unsigned TySizeInBytes =
2140     (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
2141 
2142   assert(TySizeInBytes != SourceOffset && "Empty field?");
2143 
2144   // It is always safe to classify this as an integer type up to i64 that
2145   // isn't larger than the structure.
2146   return llvm::IntegerType::get(getVMContext(),
2147                                 std::min(TySizeInBytes-SourceOffset, 8U)*8);
2148 }
2149 
2150 
2151 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally
2152 /// be used as elements of a two register pair to pass or return, return a
2153 /// first class aggregate to represent them.  For example, if the low part of
2154 /// a by-value argument should be passed as i32* and the high part as float,
2155 /// return {i32*, float}.
2156 static llvm::Type *
2157 GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi,
2158                            const llvm::DataLayout &TD) {
2159   // In order to correctly satisfy the ABI, we need to the high part to start
2160   // at offset 8.  If the high and low parts we inferred are both 4-byte types
2161   // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have
2162   // the second element at offset 8.  Check for this:
2163   unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
2164   unsigned HiAlign = TD.getABITypeAlignment(Hi);
2165   unsigned HiStart = llvm::DataLayout::RoundUpAlignment(LoSize, HiAlign);
2166   assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");
2167 
2168   // To handle this, we have to increase the size of the low part so that the
2169   // second element will start at an 8 byte offset.  We can't increase the size
2170   // of the second element because it might make us access off the end of the
2171   // struct.
2172   if (HiStart != 8) {
2173     // There are only two sorts of types the ABI generation code can produce for
2174     // the low part of a pair that aren't 8 bytes in size: float or i8/i16/i32.
2175     // Promote these to a larger type.
2176     if (Lo->isFloatTy())
2177       Lo = llvm::Type::getDoubleTy(Lo->getContext());
2178     else {
2179       assert(Lo->isIntegerTy() && "Invalid/unknown lo type");
2180       Lo = llvm::Type::getInt64Ty(Lo->getContext());
2181     }
2182   }
2183 
2184   llvm::StructType *Result = llvm::StructType::get(Lo, Hi, NULL);
2185 
2186 
2187   // Verify that the second element is at an 8-byte offset.
2188   assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
2189          "Invalid x86-64 argument pair!");
2190   return Result;
2191 }
2192 
2193 ABIArgInfo X86_64ABIInfo::
2194 classifyReturnType(QualType RetTy) const {
2195   // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
2196   // classification algorithm.
2197   X86_64ABIInfo::Class Lo, Hi;
2198   classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true);
2199 
2200   // Check some invariants.
2201   assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
2202   assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
2203 
2204   llvm::Type *ResType = 0;
2205   switch (Lo) {
2206   case NoClass:
2207     if (Hi == NoClass)
2208       return ABIArgInfo::getIgnore();
2209     // If the low part is just padding, it takes no register, leave ResType
2210     // null.
2211     assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
2212            "Unknown missing lo part");
2213     break;
2214 
2215   case SSEUp:
2216   case X87Up:
2217     llvm_unreachable("Invalid classification for lo word.");
2218 
2219     // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
2220     // hidden argument.
2221   case Memory:
2222     return getIndirectReturnResult(RetTy);
2223 
2224     // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
2225     // available register of the sequence %rax, %rdx is used.
2226   case Integer:
2227     ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
2228 
2229     // If we have a sign or zero extended integer, make sure to return Extend
2230     // so that the parameter gets the right LLVM IR attributes.
2231     if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
2232       // Treat an enum type as its underlying type.
2233       if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
2234         RetTy = EnumTy->getDecl()->getIntegerType();
2235 
2236       if (RetTy->isIntegralOrEnumerationType() &&
2237           RetTy->isPromotableIntegerType())
2238         return ABIArgInfo::getExtend();
2239     }
2240     break;
2241 
2242     // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
2243     // available SSE register of the sequence %xmm0, %xmm1 is used.
2244   case SSE:
2245     ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
2246     break;
2247 
2248     // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
2249     // returned on the X87 stack in %st0 as 80-bit x87 number.
2250   case X87:
2251     ResType = llvm::Type::getX86_FP80Ty(getVMContext());
2252     break;
2253 
2254     // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
2255     // part of the value is returned in %st0 and the imaginary part in
2256     // %st1.
2257   case ComplexX87:
2258     assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
2259     ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
2260                                     llvm::Type::getX86_FP80Ty(getVMContext()),
2261                                     NULL);
2262     break;
2263   }
2264 
2265   llvm::Type *HighPart = 0;
2266   switch (Hi) {
2267     // Memory was handled previously and X87 should
2268     // never occur as a hi class.
2269   case Memory:
2270   case X87:
2271     llvm_unreachable("Invalid classification for hi word.");
2272 
2273   case ComplexX87: // Previously handled.
2274   case NoClass:
2275     break;
2276 
2277   case Integer:
2278     HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2279     if (Lo == NoClass)  // Return HighPart at offset 8 in memory.
2280       return ABIArgInfo::getDirect(HighPart, 8);
2281     break;
2282   case SSE:
2283     HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2284     if (Lo == NoClass)  // Return HighPart at offset 8 in memory.
2285       return ABIArgInfo::getDirect(HighPart, 8);
2286     break;
2287 
2288     // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
2289     // is passed in the next available eightbyte chunk if the last used
2290     // vector register.
2291     //
2292     // SSEUP should always be preceded by SSE, just widen.
2293   case SSEUp:
2294     assert(Lo == SSE && "Unexpected SSEUp classification.");
2295     ResType = GetByteVectorType(RetTy);
2296     break;
2297 
2298     // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
2299     // returned together with the previous X87 value in %st0.
2300   case X87Up:
2301     // If X87Up is preceded by X87, we don't need to do
2302     // anything. However, in some cases with unions it may not be
2303     // preceded by X87. In such situations we follow gcc and pass the
2304     // extra bits in an SSE reg.
2305     if (Lo != X87) {
2306       HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2307       if (Lo == NoClass)  // Return HighPart at offset 8 in memory.
2308         return ABIArgInfo::getDirect(HighPart, 8);
2309     }
2310     break;
2311   }
2312 
2313   // If a high part was specified, merge it together with the low part.  It is
2314   // known to pass in the high eightbyte of the result.  We do this by forming a
2315   // first class struct aggregate with the high and low part: {low, high}
2316   if (HighPart)
2317     ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
2318 
2319   return ABIArgInfo::getDirect(ResType);
2320 }
2321 
2322 ABIArgInfo X86_64ABIInfo::classifyArgumentType(
2323   QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE,
2324   bool isNamedArg)
2325   const
2326 {
2327   X86_64ABIInfo::Class Lo, Hi;
2328   classify(Ty, 0, Lo, Hi, isNamedArg);
2329 
2330   // Check some invariants.
2331   // FIXME: Enforce these by construction.
2332   assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
2333   assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
2334 
2335   neededInt = 0;
2336   neededSSE = 0;
2337   llvm::Type *ResType = 0;
2338   switch (Lo) {
2339   case NoClass:
2340     if (Hi == NoClass)
2341       return ABIArgInfo::getIgnore();
2342     // If the low part is just padding, it takes no register, leave ResType
2343     // null.
2344     assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
2345            "Unknown missing lo part");
2346     break;
2347 
2348     // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
2349     // on the stack.
2350   case Memory:
2351 
2352     // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
2353     // COMPLEX_X87, it is passed in memory.
2354   case X87:
2355   case ComplexX87:
2356     if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect)
2357       ++neededInt;
2358     return getIndirectResult(Ty, freeIntRegs);
2359 
2360   case SSEUp:
2361   case X87Up:
2362     llvm_unreachable("Invalid classification for lo word.");
2363 
2364     // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
2365     // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
2366     // and %r9 is used.
2367   case Integer:
2368     ++neededInt;
2369 
2370     // Pick an 8-byte type based on the preferred type.
2371     ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
2372 
2373     // If we have a sign or zero extended integer, make sure to return Extend
2374     // so that the parameter gets the right LLVM IR attributes.
2375     if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
2376       // Treat an enum type as its underlying type.
2377       if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2378         Ty = EnumTy->getDecl()->getIntegerType();
2379 
2380       if (Ty->isIntegralOrEnumerationType() &&
2381           Ty->isPromotableIntegerType())
2382         return ABIArgInfo::getExtend();
2383     }
2384 
2385     break;
2386 
2387     // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
2388     // available SSE register is used, the registers are taken in the
2389     // order from %xmm0 to %xmm7.
2390   case SSE: {
2391     llvm::Type *IRType = CGT.ConvertType(Ty);
2392     ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
2393     ++neededSSE;
2394     break;
2395   }
2396   }
2397 
2398   llvm::Type *HighPart = 0;
2399   switch (Hi) {
2400     // Memory was handled previously, ComplexX87 and X87 should
2401     // never occur as hi classes, and X87Up must be preceded by X87,
2402     // which is passed in memory.
2403   case Memory:
2404   case X87:
2405   case ComplexX87:
2406     llvm_unreachable("Invalid classification for hi word.");
2407 
2408   case NoClass: break;
2409 
2410   case Integer:
2411     ++neededInt;
2412     // Pick an 8-byte type based on the preferred type.
2413     HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
2414 
2415     if (Lo == NoClass)  // Pass HighPart at offset 8 in memory.
2416       return ABIArgInfo::getDirect(HighPart, 8);
2417     break;
2418 
2419     // X87Up generally doesn't occur here (long double is passed in
2420     // memory), except in situations involving unions.
2421   case X87Up:
2422   case SSE:
2423     HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
2424 
2425     if (Lo == NoClass)  // Pass HighPart at offset 8 in memory.
2426       return ABIArgInfo::getDirect(HighPart, 8);
2427 
2428     ++neededSSE;
2429     break;
2430 
2431     // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
2432     // eightbyte is passed in the upper half of the last used SSE
2433     // register.  This only happens when 128-bit vectors are passed.
2434   case SSEUp:
2435     assert(Lo == SSE && "Unexpected SSEUp classification");
2436     ResType = GetByteVectorType(Ty);
2437     break;
2438   }
2439 
2440   // If a high part was specified, merge it together with the low part.  It is
2441   // known to pass in the high eightbyte of the result.  We do this by forming a
2442   // first class struct aggregate with the high and low part: {low, high}
2443   if (HighPart)
2444     ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
2445 
2446   return ABIArgInfo::getDirect(ResType);
2447 }
2448 
2449 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
2450 
2451   FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
2452 
2453   // Keep track of the number of assigned registers.
2454   unsigned freeIntRegs = 6, freeSSERegs = 8;
2455 
2456   // If the return value is indirect, then the hidden argument is consuming one
2457   // integer register.
2458   if (FI.getReturnInfo().isIndirect())
2459     --freeIntRegs;
2460 
2461   bool isVariadic = FI.isVariadic();
2462   unsigned numRequiredArgs = 0;
2463   if (isVariadic)
2464     numRequiredArgs = FI.getRequiredArgs().getNumRequiredArgs();
2465 
2466   // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
2467   // get assigned (in left-to-right order) for passing as follows...
2468   for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
2469        it != ie; ++it) {
2470     bool isNamedArg = true;
2471     if (isVariadic)
2472       isNamedArg = (it - FI.arg_begin()) <
2473                     static_cast<signed>(numRequiredArgs);
2474 
2475     unsigned neededInt, neededSSE;
2476     it->info = classifyArgumentType(it->type, freeIntRegs, neededInt,
2477                                     neededSSE, isNamedArg);
2478 
2479     // AMD64-ABI 3.2.3p3: If there are no registers available for any
2480     // eightbyte of an argument, the whole argument is passed on the
2481     // stack. If registers have already been assigned for some
2482     // eightbytes of such an argument, the assignments get reverted.
2483     if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
2484       freeIntRegs -= neededInt;
2485       freeSSERegs -= neededSSE;
2486     } else {
2487       it->info = getIndirectResult(it->type, freeIntRegs);
2488     }
2489   }
2490 }
2491 
2492 static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
2493                                         QualType Ty,
2494                                         CodeGenFunction &CGF) {
2495   llvm::Value *overflow_arg_area_p =
2496     CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
2497   llvm::Value *overflow_arg_area =
2498     CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
2499 
2500   // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
2501   // byte boundary if alignment needed by type exceeds 8 byte boundary.
2502   // It isn't stated explicitly in the standard, but in practice we use
2503   // alignment greater than 16 where necessary.
2504   uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
2505   if (Align > 8) {
2506     // overflow_arg_area = (overflow_arg_area + align - 1) & -align;
2507     llvm::Value *Offset =
2508       llvm::ConstantInt::get(CGF.Int64Ty, Align - 1);
2509     overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset);
2510     llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area,
2511                                                     CGF.Int64Ty);
2512     llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, -(uint64_t)Align);
2513     overflow_arg_area =
2514       CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
2515                                  overflow_arg_area->getType(),
2516                                  "overflow_arg_area.align");
2517   }
2518 
2519   // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
2520   llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
2521   llvm::Value *Res =
2522     CGF.Builder.CreateBitCast(overflow_arg_area,
2523                               llvm::PointerType::getUnqual(LTy));
2524 
2525   // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
2526   // l->overflow_arg_area + sizeof(type).
2527   // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
2528   // an 8 byte boundary.
2529 
2530   uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
2531   llvm::Value *Offset =
2532       llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7)  & ~7);
2533   overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
2534                                             "overflow_arg_area.next");
2535   CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
2536 
2537   // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
2538   return Res;
2539 }
2540 
2541 llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2542                                       CodeGenFunction &CGF) const {
2543   // Assume that va_list type is correct; should be pointer to LLVM type:
2544   // struct {
2545   //   i32 gp_offset;
2546   //   i32 fp_offset;
2547   //   i8* overflow_arg_area;
2548   //   i8* reg_save_area;
2549   // };
2550   unsigned neededInt, neededSSE;
2551 
2552   Ty = CGF.getContext().getCanonicalType(Ty);
2553   ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE,
2554                                        /*isNamedArg*/false);
2555 
2556   // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
2557   // in the registers. If not go to step 7.
2558   if (!neededInt && !neededSSE)
2559     return EmitVAArgFromMemory(VAListAddr, Ty, CGF);
2560 
2561   // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
2562   // general purpose registers needed to pass type and num_fp to hold
2563   // the number of floating point registers needed.
2564 
2565   // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
2566   // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
2567   // l->fp_offset > 304 - num_fp * 16 go to step 7.
2568   //
2569   // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
2570   // register save space).
2571 
2572   llvm::Value *InRegs = 0;
2573   llvm::Value *gp_offset_p = 0, *gp_offset = 0;
2574   llvm::Value *fp_offset_p = 0, *fp_offset = 0;
2575   if (neededInt) {
2576     gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
2577     gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
2578     InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
2579     InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
2580   }
2581 
2582   if (neededSSE) {
2583     fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
2584     fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
2585     llvm::Value *FitsInFP =
2586       llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
2587     FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");
2588     InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
2589   }
2590 
2591   llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
2592   llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
2593   llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
2594   CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
2595 
2596   // Emit code to load the value if it was passed in registers.
2597 
2598   CGF.EmitBlock(InRegBlock);
2599 
2600   // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
2601   // an offset of l->gp_offset and/or l->fp_offset. This may require
2602   // copying to a temporary location in case the parameter is passed
2603   // in different register classes or requires an alignment greater
2604   // than 8 for general purpose registers and 16 for XMM registers.
2605   //
2606   // FIXME: This really results in shameful code when we end up needing to
2607   // collect arguments from different places; often what should result in a
2608   // simple assembling of a structure from scattered addresses has many more
2609   // loads than necessary. Can we clean this up?
2610   llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
2611   llvm::Value *RegAddr =
2612     CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3),
2613                            "reg_save_area");
2614   if (neededInt && neededSSE) {
2615     // FIXME: Cleanup.
2616     assert(AI.isDirect() && "Unexpected ABI info for mixed regs");
2617     llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
2618     llvm::Value *Tmp = CGF.CreateMemTemp(Ty);
2619     Tmp = CGF.Builder.CreateBitCast(Tmp, ST->getPointerTo());
2620     assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
2621     llvm::Type *TyLo = ST->getElementType(0);
2622     llvm::Type *TyHi = ST->getElementType(1);
2623     assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
2624            "Unexpected ABI info for mixed regs");
2625     llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
2626     llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
2627     llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
2628     llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
2629     llvm::Value *RegLoAddr = TyLo->isFloatingPointTy() ? FPAddr : GPAddr;
2630     llvm::Value *RegHiAddr = TyLo->isFloatingPointTy() ? GPAddr : FPAddr;
2631     llvm::Value *V =
2632       CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo));
2633     CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
2634     V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
2635     CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
2636 
2637     RegAddr = CGF.Builder.CreateBitCast(Tmp,
2638                                         llvm::PointerType::getUnqual(LTy));
2639   } else if (neededInt) {
2640     RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
2641     RegAddr = CGF.Builder.CreateBitCast(RegAddr,
2642                                         llvm::PointerType::getUnqual(LTy));
2643 
2644     // Copy to a temporary if necessary to ensure the appropriate alignment.
2645     std::pair<CharUnits, CharUnits> SizeAlign =
2646         CGF.getContext().getTypeInfoInChars(Ty);
2647     uint64_t TySize = SizeAlign.first.getQuantity();
2648     unsigned TyAlign = SizeAlign.second.getQuantity();
2649     if (TyAlign > 8) {
2650       llvm::Value *Tmp = CGF.CreateMemTemp(Ty);
2651       CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, 8, false);
2652       RegAddr = Tmp;
2653     }
2654   } else if (neededSSE == 1) {
2655     RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
2656     RegAddr = CGF.Builder.CreateBitCast(RegAddr,
2657                                         llvm::PointerType::getUnqual(LTy));
2658   } else {
2659     assert(neededSSE == 2 && "Invalid number of needed registers!");
2660     // SSE registers are spaced 16 bytes apart in the register save
2661     // area, we need to collect the two eightbytes together.
2662     llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
2663     llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16);
2664     llvm::Type *DoubleTy = CGF.DoubleTy;
2665     llvm::Type *DblPtrTy =
2666       llvm::PointerType::getUnqual(DoubleTy);
2667     llvm::StructType *ST = llvm::StructType::get(DoubleTy, DoubleTy, NULL);
2668     llvm::Value *V, *Tmp = CGF.CreateMemTemp(Ty);
2669     Tmp = CGF.Builder.CreateBitCast(Tmp, ST->getPointerTo());
2670     V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
2671                                                          DblPtrTy));
2672     CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
2673     V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi,
2674                                                          DblPtrTy));
2675     CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
2676     RegAddr = CGF.Builder.CreateBitCast(Tmp,
2677                                         llvm::PointerType::getUnqual(LTy));
2678   }
2679 
2680   // AMD64-ABI 3.5.7p5: Step 5. Set:
2681   // l->gp_offset = l->gp_offset + num_gp * 8
2682   // l->fp_offset = l->fp_offset + num_fp * 16.
2683   if (neededInt) {
2684     llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);
2685     CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
2686                             gp_offset_p);
2687   }
2688   if (neededSSE) {
2689     llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);
2690     CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
2691                             fp_offset_p);
2692   }
2693   CGF.EmitBranch(ContBlock);
2694 
2695   // Emit code to load the value if it was passed in memory.
2696 
2697   CGF.EmitBlock(InMemBlock);
2698   llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF);
2699 
2700   // Return the appropriate result.
2701 
2702   CGF.EmitBlock(ContBlock);
2703   llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(), 2,
2704                                                  "vaarg.addr");
2705   ResAddr->addIncoming(RegAddr, InRegBlock);
2706   ResAddr->addIncoming(MemAddr, InMemBlock);
2707   return ResAddr;
2708 }
2709 
2710 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, bool IsReturnType) const {
2711 
2712   if (Ty->isVoidType())
2713     return ABIArgInfo::getIgnore();
2714 
2715   if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2716     Ty = EnumTy->getDecl()->getIntegerType();
2717 
2718   uint64_t Size = getContext().getTypeSize(Ty);
2719 
2720   if (const RecordType *RT = Ty->getAs<RecordType>()) {
2721     if (IsReturnType) {
2722       if (isRecordReturnIndirect(RT, getCXXABI()))
2723         return ABIArgInfo::getIndirect(0, false);
2724     } else {
2725       if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()))
2726         return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
2727     }
2728 
2729     if (RT->getDecl()->hasFlexibleArrayMember())
2730       return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
2731 
2732     // FIXME: mingw-w64-gcc emits 128-bit struct as i128
2733     if (Size == 128 && getTarget().getTriple().isWindowsGNUEnvironment())
2734       return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
2735                                                           Size));
2736 
2737     // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
2738     // not 1, 2, 4, or 8 bytes, must be passed by reference."
2739     if (Size <= 64 &&
2740         (Size & (Size - 1)) == 0)
2741       return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
2742                                                           Size));
2743 
2744     return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
2745   }
2746 
2747   if (Ty->isPromotableIntegerType())
2748     return ABIArgInfo::getExtend();
2749 
2750   return ABIArgInfo::getDirect();
2751 }
2752 
2753 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
2754 
2755   QualType RetTy = FI.getReturnType();
2756   FI.getReturnInfo() = classify(RetTy, true);
2757 
2758   for (auto &I : FI.arguments())
2759     I.info = classify(I.type, false);
2760 }
2761 
2762 llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2763                                       CodeGenFunction &CGF) const {
2764   llvm::Type *BPP = CGF.Int8PtrPtrTy;
2765 
2766   CGBuilderTy &Builder = CGF.Builder;
2767   llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
2768                                                        "ap");
2769   llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
2770   llvm::Type *PTy =
2771     llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
2772   llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
2773 
2774   uint64_t Offset =
2775     llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8);
2776   llvm::Value *NextAddr =
2777     Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
2778                       "ap.next");
2779   Builder.CreateStore(NextAddr, VAListAddrAsBPP);
2780 
2781   return AddrTyped;
2782 }
2783 
2784 namespace {
2785 
2786 class NaClX86_64ABIInfo : public ABIInfo {
2787  public:
2788   NaClX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
2789       : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, HasAVX) {}
2790   void computeInfo(CGFunctionInfo &FI) const override;
2791   llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2792                          CodeGenFunction &CGF) const override;
2793  private:
2794   PNaClABIInfo PInfo;  // Used for generating calls with pnaclcall callingconv.
2795   X86_64ABIInfo NInfo; // Used for everything else.
2796 };
2797 
2798 class NaClX86_64TargetCodeGenInfo : public TargetCodeGenInfo  {
2799  public:
2800   NaClX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
2801       : TargetCodeGenInfo(new NaClX86_64ABIInfo(CGT, HasAVX)) {}
2802 };
2803 
2804 }
2805 
2806 void NaClX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
2807   if (FI.getASTCallingConvention() == CC_PnaclCall)
2808     PInfo.computeInfo(FI);
2809   else
2810     NInfo.computeInfo(FI);
2811 }
2812 
2813 llvm::Value *NaClX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2814                                           CodeGenFunction &CGF) const {
2815   // Always use the native convention; calling pnacl-style varargs functions
2816   // is unuspported.
2817   return NInfo.EmitVAArg(VAListAddr, Ty, CGF);
2818 }
2819 
2820 
2821 // PowerPC-32
2822 
2823 namespace {
2824 class PPC32TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
2825 public:
2826   PPC32TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
2827 
2828   int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
2829     // This is recovered from gcc output.
2830     return 1; // r1 is the dedicated stack pointer
2831   }
2832 
2833   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2834                                llvm::Value *Address) const override;
2835 };
2836 
2837 }
2838 
2839 bool
2840 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2841                                                 llvm::Value *Address) const {
2842   // This is calculated from the LLVM and GCC tables and verified
2843   // against gcc output.  AFAIK all ABIs use the same encoding.
2844 
2845   CodeGen::CGBuilderTy &Builder = CGF.Builder;
2846 
2847   llvm::IntegerType *i8 = CGF.Int8Ty;
2848   llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
2849   llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
2850   llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
2851 
2852   // 0-31: r0-31, the 4-byte general-purpose registers
2853   AssignToArrayRange(Builder, Address, Four8, 0, 31);
2854 
2855   // 32-63: fp0-31, the 8-byte floating-point registers
2856   AssignToArrayRange(Builder, Address, Eight8, 32, 63);
2857 
2858   // 64-76 are various 4-byte special-purpose registers:
2859   // 64: mq
2860   // 65: lr
2861   // 66: ctr
2862   // 67: ap
2863   // 68-75 cr0-7
2864   // 76: xer
2865   AssignToArrayRange(Builder, Address, Four8, 64, 76);
2866 
2867   // 77-108: v0-31, the 16-byte vector registers
2868   AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
2869 
2870   // 109: vrsave
2871   // 110: vscr
2872   // 111: spe_acc
2873   // 112: spefscr
2874   // 113: sfp
2875   AssignToArrayRange(Builder, Address, Four8, 109, 113);
2876 
2877   return false;
2878 }
2879 
2880 // PowerPC-64
2881 
2882 namespace {
2883 /// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information.
2884 class PPC64_SVR4_ABIInfo : public DefaultABIInfo {
2885 
2886 public:
2887   PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
2888 
2889   bool isPromotableTypeForABI(QualType Ty) const;
2890 
2891   ABIArgInfo classifyReturnType(QualType RetTy) const;
2892   ABIArgInfo classifyArgumentType(QualType Ty) const;
2893 
2894   // TODO: We can add more logic to computeInfo to improve performance.
2895   // Example: For aggregate arguments that fit in a register, we could
2896   // use getDirectInReg (as is done below for structs containing a single
2897   // floating-point value) to avoid pushing them to memory on function
2898   // entry.  This would require changing the logic in PPCISelLowering
2899   // when lowering the parameters in the caller and args in the callee.
2900   void computeInfo(CGFunctionInfo &FI) const override {
2901     FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
2902     for (auto &I : FI.arguments()) {
2903       // We rely on the default argument classification for the most part.
2904       // One exception:  An aggregate containing a single floating-point
2905       // or vector item must be passed in a register if one is available.
2906       const Type *T = isSingleElementStruct(I.type, getContext());
2907       if (T) {
2908         const BuiltinType *BT = T->getAs<BuiltinType>();
2909         if (T->isVectorType() || (BT && BT->isFloatingPoint())) {
2910           QualType QT(T, 0);
2911           I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
2912           continue;
2913         }
2914       }
2915       I.info = classifyArgumentType(I.type);
2916     }
2917   }
2918 
2919   llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2920                          CodeGenFunction &CGF) const override;
2921 };
2922 
2923 class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
2924 public:
2925   PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT)
2926     : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT)) {}
2927 
2928   int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
2929     // This is recovered from gcc output.
2930     return 1; // r1 is the dedicated stack pointer
2931   }
2932 
2933   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2934                                llvm::Value *Address) const override;
2935 };
2936 
2937 class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
2938 public:
2939   PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
2940 
2941   int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
2942     // This is recovered from gcc output.
2943     return 1; // r1 is the dedicated stack pointer
2944   }
2945 
2946   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2947                                llvm::Value *Address) const override;
2948 };
2949 
2950 }
2951 
2952 // Return true if the ABI requires Ty to be passed sign- or zero-
2953 // extended to 64 bits.
2954 bool
2955 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
2956   // Treat an enum type as its underlying type.
2957   if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2958     Ty = EnumTy->getDecl()->getIntegerType();
2959 
2960   // Promotable integer types are required to be promoted by the ABI.
2961   if (Ty->isPromotableIntegerType())
2962     return true;
2963 
2964   // In addition to the usual promotable integer types, we also need to
2965   // extend all 32-bit types, since the ABI requires promotion to 64 bits.
2966   if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
2967     switch (BT->getKind()) {
2968     case BuiltinType::Int:
2969     case BuiltinType::UInt:
2970       return true;
2971     default:
2972       break;
2973     }
2974 
2975   return false;
2976 }
2977 
2978 ABIArgInfo
2979 PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
2980   if (Ty->isAnyComplexType())
2981     return ABIArgInfo::getDirect();
2982 
2983   if (isAggregateTypeForABI(Ty)) {
2984     if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
2985       return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
2986 
2987     return ABIArgInfo::getIndirect(0);
2988   }
2989 
2990   return (isPromotableTypeForABI(Ty) ?
2991           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2992 }
2993 
2994 ABIArgInfo
2995 PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
2996   if (RetTy->isVoidType())
2997     return ABIArgInfo::getIgnore();
2998 
2999   if (RetTy->isAnyComplexType())
3000     return ABIArgInfo::getDirect();
3001 
3002   if (isAggregateTypeForABI(RetTy))
3003     return ABIArgInfo::getIndirect(0);
3004 
3005   return (isPromotableTypeForABI(RetTy) ?
3006           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
3007 }
3008 
3009 // Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine.
3010 llvm::Value *PPC64_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr,
3011                                            QualType Ty,
3012                                            CodeGenFunction &CGF) const {
3013   llvm::Type *BP = CGF.Int8PtrTy;
3014   llvm::Type *BPP = CGF.Int8PtrPtrTy;
3015 
3016   CGBuilderTy &Builder = CGF.Builder;
3017   llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
3018   llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
3019 
3020   // Update the va_list pointer.  The pointer should be bumped by the
3021   // size of the object.  We can trust getTypeSize() except for a complex
3022   // type whose base type is smaller than a doubleword.  For these, the
3023   // size of the object is 16 bytes; see below for further explanation.
3024   unsigned SizeInBytes = CGF.getContext().getTypeSize(Ty) / 8;
3025   QualType BaseTy;
3026   unsigned CplxBaseSize = 0;
3027 
3028   if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
3029     BaseTy = CTy->getElementType();
3030     CplxBaseSize = CGF.getContext().getTypeSize(BaseTy) / 8;
3031     if (CplxBaseSize < 8)
3032       SizeInBytes = 16;
3033   }
3034 
3035   unsigned Offset = llvm::RoundUpToAlignment(SizeInBytes, 8);
3036   llvm::Value *NextAddr =
3037     Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int64Ty, Offset),
3038                       "ap.next");
3039   Builder.CreateStore(NextAddr, VAListAddrAsBPP);
3040 
3041   // If we have a complex type and the base type is smaller than 8 bytes,
3042   // the ABI calls for the real and imaginary parts to be right-adjusted
3043   // in separate doublewords.  However, Clang expects us to produce a
3044   // pointer to a structure with the two parts packed tightly.  So generate
3045   // loads of the real and imaginary parts relative to the va_list pointer,
3046   // and store them to a temporary structure.
3047   if (CplxBaseSize && CplxBaseSize < 8) {
3048     llvm::Value *RealAddr = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
3049     llvm::Value *ImagAddr = RealAddr;
3050     RealAddr = Builder.CreateAdd(RealAddr, Builder.getInt64(8 - CplxBaseSize));
3051     ImagAddr = Builder.CreateAdd(ImagAddr, Builder.getInt64(16 - CplxBaseSize));
3052     llvm::Type *PBaseTy = llvm::PointerType::getUnqual(CGF.ConvertType(BaseTy));
3053     RealAddr = Builder.CreateIntToPtr(RealAddr, PBaseTy);
3054     ImagAddr = Builder.CreateIntToPtr(ImagAddr, PBaseTy);
3055     llvm::Value *Real = Builder.CreateLoad(RealAddr, false, ".vareal");
3056     llvm::Value *Imag = Builder.CreateLoad(ImagAddr, false, ".vaimag");
3057     llvm::Value *Ptr = CGF.CreateTempAlloca(CGT.ConvertTypeForMem(Ty),
3058                                             "vacplx");
3059     llvm::Value *RealPtr = Builder.CreateStructGEP(Ptr, 0, ".real");
3060     llvm::Value *ImagPtr = Builder.CreateStructGEP(Ptr, 1, ".imag");
3061     Builder.CreateStore(Real, RealPtr, false);
3062     Builder.CreateStore(Imag, ImagPtr, false);
3063     return Ptr;
3064   }
3065 
3066   // If the argument is smaller than 8 bytes, it is right-adjusted in
3067   // its doubleword slot.  Adjust the pointer to pick it up from the
3068   // correct offset.
3069   if (SizeInBytes < 8) {
3070     llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
3071     AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(8 - SizeInBytes));
3072     Addr = Builder.CreateIntToPtr(AddrAsInt, BP);
3073   }
3074 
3075   llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
3076   return Builder.CreateBitCast(Addr, PTy);
3077 }
3078 
3079 static bool
3080 PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3081                               llvm::Value *Address) {
3082   // This is calculated from the LLVM and GCC tables and verified
3083   // against gcc output.  AFAIK all ABIs use the same encoding.
3084 
3085   CodeGen::CGBuilderTy &Builder = CGF.Builder;
3086 
3087   llvm::IntegerType *i8 = CGF.Int8Ty;
3088   llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
3089   llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
3090   llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
3091 
3092   // 0-31: r0-31, the 8-byte general-purpose registers
3093   AssignToArrayRange(Builder, Address, Eight8, 0, 31);
3094 
3095   // 32-63: fp0-31, the 8-byte floating-point registers
3096   AssignToArrayRange(Builder, Address, Eight8, 32, 63);
3097 
3098   // 64-76 are various 4-byte special-purpose registers:
3099   // 64: mq
3100   // 65: lr
3101   // 66: ctr
3102   // 67: ap
3103   // 68-75 cr0-7
3104   // 76: xer
3105   AssignToArrayRange(Builder, Address, Four8, 64, 76);
3106 
3107   // 77-108: v0-31, the 16-byte vector registers
3108   AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
3109 
3110   // 109: vrsave
3111   // 110: vscr
3112   // 111: spe_acc
3113   // 112: spefscr
3114   // 113: sfp
3115   AssignToArrayRange(Builder, Address, Four8, 109, 113);
3116 
3117   return false;
3118 }
3119 
3120 bool
3121 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
3122   CodeGen::CodeGenFunction &CGF,
3123   llvm::Value *Address) const {
3124 
3125   return PPC64_initDwarfEHRegSizeTable(CGF, Address);
3126 }
3127 
3128 bool
3129 PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3130                                                 llvm::Value *Address) const {
3131 
3132   return PPC64_initDwarfEHRegSizeTable(CGF, Address);
3133 }
3134 
3135 //===----------------------------------------------------------------------===//
3136 // ARM64 ABI Implementation
3137 //===----------------------------------------------------------------------===//
3138 
3139 namespace {
3140 
3141 class ARM64ABIInfo : public ABIInfo {
3142 public:
3143   enum ABIKind {
3144     AAPCS = 0,
3145     DarwinPCS
3146   };
3147 
3148 private:
3149   ABIKind Kind;
3150 
3151 public:
3152   ARM64ABIInfo(CodeGenTypes &CGT, ABIKind Kind) : ABIInfo(CGT), Kind(Kind) {}
3153 
3154 private:
3155   ABIKind getABIKind() const { return Kind; }
3156   bool isDarwinPCS() const { return Kind == DarwinPCS; }
3157 
3158   ABIArgInfo classifyReturnType(QualType RetTy) const;
3159   ABIArgInfo classifyArgumentType(QualType RetTy, unsigned &AllocatedVFP,
3160                                   bool &IsHA, unsigned &AllocatedGPR,
3161                                   bool &IsSmallAggr, bool IsNamedArg) const;
3162   bool isIllegalVectorType(QualType Ty) const;
3163 
3164   virtual void computeInfo(CGFunctionInfo &FI) const {
3165     // To correctly handle Homogeneous Aggregate, we need to keep track of the
3166     // number of SIMD and Floating-point registers allocated so far.
3167     // If the argument is an HFA or an HVA and there are sufficient unallocated
3168     // SIMD and Floating-point registers, then the argument is allocated to SIMD
3169     // and Floating-point Registers (with one register per member of the HFA or
3170     // HVA). Otherwise, the NSRN is set to 8.
3171     unsigned AllocatedVFP = 0;
3172 
3173     // To correctly handle small aggregates, we need to keep track of the number
3174     // of GPRs allocated so far. If the small aggregate can't all fit into
3175     // registers, it will be on stack. We don't allow the aggregate to be
3176     // partially in registers.
3177     unsigned AllocatedGPR = 0;
3178 
3179     // Find the number of named arguments. Variadic arguments get special
3180     // treatment with the Darwin ABI.
3181     unsigned NumRequiredArgs = (FI.isVariadic() ?
3182                                 FI.getRequiredArgs().getNumRequiredArgs() :
3183                                 FI.arg_size());
3184 
3185     FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
3186     for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
3187          it != ie; ++it) {
3188       unsigned PreAllocation = AllocatedVFP, PreGPR = AllocatedGPR;
3189       bool IsHA = false, IsSmallAggr = false;
3190       const unsigned NumVFPs = 8;
3191       const unsigned NumGPRs = 8;
3192       bool IsNamedArg = ((it - FI.arg_begin()) <
3193                          static_cast<signed>(NumRequiredArgs));
3194       it->info = classifyArgumentType(it->type, AllocatedVFP, IsHA,
3195                                       AllocatedGPR, IsSmallAggr, IsNamedArg);
3196 
3197       // Under AAPCS the 64-bit stack slot alignment means we can't pass HAs
3198       // as sequences of floats since they'll get "holes" inserted as
3199       // padding by the back end.
3200       if (IsHA && AllocatedVFP > NumVFPs && !isDarwinPCS() &&
3201           getContext().getTypeAlign(it->type) < 64) {
3202         uint32_t NumStackSlots = getContext().getTypeSize(it->type);
3203         NumStackSlots = llvm::RoundUpToAlignment(NumStackSlots, 64) / 64;
3204 
3205         llvm::Type *CoerceTy = llvm::ArrayType::get(
3206             llvm::Type::getDoubleTy(getVMContext()), NumStackSlots);
3207         it->info = ABIArgInfo::getDirect(CoerceTy);
3208       }
3209 
3210       // If we do not have enough VFP registers for the HA, any VFP registers
3211       // that are unallocated are marked as unavailable. To achieve this, we add
3212       // padding of (NumVFPs - PreAllocation) floats.
3213       if (IsHA && AllocatedVFP > NumVFPs && PreAllocation < NumVFPs) {
3214         llvm::Type *PaddingTy = llvm::ArrayType::get(
3215             llvm::Type::getFloatTy(getVMContext()), NumVFPs - PreAllocation);
3216         it->info.setPaddingType(PaddingTy);
3217       }
3218 
3219       // If we do not have enough GPRs for the small aggregate, any GPR regs
3220       // that are unallocated are marked as unavailable.
3221       if (IsSmallAggr && AllocatedGPR > NumGPRs && PreGPR < NumGPRs) {
3222         llvm::Type *PaddingTy = llvm::ArrayType::get(
3223             llvm::Type::getInt32Ty(getVMContext()), NumGPRs - PreGPR);
3224         it->info =
3225             ABIArgInfo::getDirect(it->info.getCoerceToType(), 0, PaddingTy);
3226       }
3227     }
3228   }
3229 
3230   llvm::Value *EmitDarwinVAArg(llvm::Value *VAListAddr, QualType Ty,
3231                                CodeGenFunction &CGF) const;
3232 
3233   llvm::Value *EmitAAPCSVAArg(llvm::Value *VAListAddr, QualType Ty,
3234                               CodeGenFunction &CGF) const;
3235 
3236   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3237                                  CodeGenFunction &CGF) const {
3238     return isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
3239                          : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
3240   }
3241 };
3242 
3243 class ARM64TargetCodeGenInfo : public TargetCodeGenInfo {
3244 public:
3245   ARM64TargetCodeGenInfo(CodeGenTypes &CGT, ARM64ABIInfo::ABIKind Kind)
3246       : TargetCodeGenInfo(new ARM64ABIInfo(CGT, Kind)) {}
3247 
3248   StringRef getARCRetainAutoreleasedReturnValueMarker() const {
3249     return "mov\tfp, fp\t\t; marker for objc_retainAutoreleaseReturnValue";
3250   }
3251 
3252   int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { return 31; }
3253 
3254   virtual bool doesReturnSlotInterfereWithArgs() const { return false; }
3255 };
3256 }
3257 
3258 static bool isHomogeneousAggregate(QualType Ty, const Type *&Base,
3259                                    ASTContext &Context,
3260                                    uint64_t *HAMembers = 0);
3261 
3262 ABIArgInfo ARM64ABIInfo::classifyArgumentType(QualType Ty,
3263                                               unsigned &AllocatedVFP,
3264                                               bool &IsHA,
3265                                               unsigned &AllocatedGPR,
3266                                               bool &IsSmallAggr,
3267                                               bool IsNamedArg) const {
3268   // Handle illegal vector types here.
3269   if (isIllegalVectorType(Ty)) {
3270     uint64_t Size = getContext().getTypeSize(Ty);
3271     if (Size <= 32) {
3272       llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
3273       AllocatedGPR++;
3274       return ABIArgInfo::getDirect(ResType);
3275     }
3276     if (Size == 64) {
3277       llvm::Type *ResType =
3278           llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
3279       AllocatedVFP++;
3280       return ABIArgInfo::getDirect(ResType);
3281     }
3282     if (Size == 128) {
3283       llvm::Type *ResType =
3284           llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
3285       AllocatedVFP++;
3286       return ABIArgInfo::getDirect(ResType);
3287     }
3288     AllocatedGPR++;
3289     return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
3290   }
3291   if (Ty->isVectorType())
3292     // Size of a legal vector should be either 64 or 128.
3293     AllocatedVFP++;
3294   if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
3295     if (BT->getKind() == BuiltinType::Half ||
3296         BT->getKind() == BuiltinType::Float ||
3297         BT->getKind() == BuiltinType::Double ||
3298         BT->getKind() == BuiltinType::LongDouble)
3299       AllocatedVFP++;
3300   }
3301 
3302   if (!isAggregateTypeForABI(Ty)) {
3303     // Treat an enum type as its underlying type.
3304     if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3305       Ty = EnumTy->getDecl()->getIntegerType();
3306 
3307     if (!Ty->isFloatingType() && !Ty->isVectorType()) {
3308       unsigned Alignment = getContext().getTypeAlign(Ty);
3309       if (!isDarwinPCS() && Alignment > 64)
3310         AllocatedGPR = llvm::RoundUpToAlignment(AllocatedGPR, Alignment / 64);
3311 
3312       int RegsNeeded = getContext().getTypeSize(Ty) > 64 ? 2 : 1;
3313       AllocatedGPR += RegsNeeded;
3314     }
3315     return (Ty->isPromotableIntegerType() && isDarwinPCS()
3316                 ? ABIArgInfo::getExtend()
3317                 : ABIArgInfo::getDirect());
3318   }
3319 
3320   // Structures with either a non-trivial destructor or a non-trivial
3321   // copy constructor are always indirect.
3322   if (isRecordReturnIndirect(Ty, getCXXABI())) {
3323     AllocatedGPR++;
3324     return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
3325   }
3326 
3327   // Empty records are always ignored on Darwin, but actually passed in C++ mode
3328   // elsewhere for GNU compatibility.
3329   if (isEmptyRecord(getContext(), Ty, true)) {
3330     if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS())
3331       return ABIArgInfo::getIgnore();
3332 
3333     ++AllocatedGPR;
3334     return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
3335   }
3336 
3337   // Homogeneous Floating-point Aggregates (HFAs) need to be expanded.
3338   const Type *Base = 0;
3339   uint64_t Members = 0;
3340   if (isHomogeneousAggregate(Ty, Base, getContext(), &Members)) {
3341     IsHA = true;
3342     if (!IsNamedArg && isDarwinPCS()) {
3343       // With the Darwin ABI, variadic arguments are always passed on the stack
3344       // and should not be expanded. Treat variadic HFAs as arrays of doubles.
3345       uint64_t Size = getContext().getTypeSize(Ty);
3346       llvm::Type *BaseTy = llvm::Type::getDoubleTy(getVMContext());
3347       return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
3348     }
3349     AllocatedVFP += Members;
3350     return ABIArgInfo::getExpand();
3351   }
3352 
3353   // Aggregates <= 16 bytes are passed directly in registers or on the stack.
3354   uint64_t Size = getContext().getTypeSize(Ty);
3355   if (Size <= 128) {
3356     unsigned Alignment = getContext().getTypeAlign(Ty);
3357     if (!isDarwinPCS() && Alignment > 64)
3358       AllocatedGPR = llvm::RoundUpToAlignment(AllocatedGPR, Alignment / 64);
3359 
3360     Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes
3361     AllocatedGPR += Size / 64;
3362     IsSmallAggr = true;
3363     // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
3364     // For aggregates with 16-byte alignment, we use i128.
3365     if (Alignment < 128 && Size == 128) {
3366       llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
3367       return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
3368     }
3369     return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
3370   }
3371 
3372   AllocatedGPR++;
3373   return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
3374 }
3375 
3376 ABIArgInfo ARM64ABIInfo::classifyReturnType(QualType RetTy) const {
3377   if (RetTy->isVoidType())
3378     return ABIArgInfo::getIgnore();
3379 
3380   // Large vector types should be returned via memory.
3381   if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
3382     return ABIArgInfo::getIndirect(0);
3383 
3384   if (!isAggregateTypeForABI(RetTy)) {
3385     // Treat an enum type as its underlying type.
3386     if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
3387       RetTy = EnumTy->getDecl()->getIntegerType();
3388 
3389     return (RetTy->isPromotableIntegerType() && isDarwinPCS()
3390                 ? ABIArgInfo::getExtend()
3391                 : ABIArgInfo::getDirect());
3392   }
3393 
3394   // Structures with either a non-trivial destructor or a non-trivial
3395   // copy constructor are always indirect.
3396   if (isRecordReturnIndirect(RetTy, getCXXABI()))
3397     return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
3398 
3399   if (isEmptyRecord(getContext(), RetTy, true))
3400     return ABIArgInfo::getIgnore();
3401 
3402   const Type *Base = 0;
3403   if (isHomogeneousAggregate(RetTy, Base, getContext()))
3404     // Homogeneous Floating-point Aggregates (HFAs) are returned directly.
3405     return ABIArgInfo::getDirect();
3406 
3407   // Aggregates <= 16 bytes are returned directly in registers or on the stack.
3408   uint64_t Size = getContext().getTypeSize(RetTy);
3409   if (Size <= 128) {
3410     Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes
3411     return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
3412   }
3413 
3414   return ABIArgInfo::getIndirect(0);
3415 }
3416 
3417 /// isIllegalVectorType - check whether the vector type is legal for ARM64.
3418 bool ARM64ABIInfo::isIllegalVectorType(QualType Ty) const {
3419   if (const VectorType *VT = Ty->getAs<VectorType>()) {
3420     // Check whether VT is legal.
3421     unsigned NumElements = VT->getNumElements();
3422     uint64_t Size = getContext().getTypeSize(VT);
3423     // NumElements should be power of 2 between 1 and 16.
3424     if ((NumElements & (NumElements - 1)) != 0 || NumElements > 16)
3425       return true;
3426     return Size != 64 && (Size != 128 || NumElements == 1);
3427   }
3428   return false;
3429 }
3430 
3431 static llvm::Value *EmitAArch64VAArg(llvm::Value *VAListAddr, QualType Ty,
3432                                      int AllocatedGPR, int AllocatedVFP,
3433                                      bool IsIndirect, CodeGenFunction &CGF) {
3434   // The AArch64 va_list type and handling is specified in the Procedure Call
3435   // Standard, section B.4:
3436   //
3437   // struct {
3438   //   void *__stack;
3439   //   void *__gr_top;
3440   //   void *__vr_top;
3441   //   int __gr_offs;
3442   //   int __vr_offs;
3443   // };
3444 
3445   llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
3446   llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
3447   llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
3448   llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
3449   auto &Ctx = CGF.getContext();
3450 
3451   llvm::Value *reg_offs_p = 0, *reg_offs = 0;
3452   int reg_top_index;
3453   int RegSize;
3454   if (AllocatedGPR) {
3455     assert(!AllocatedVFP && "Arguments never split between int & VFP regs");
3456     // 3 is the field number of __gr_offs
3457     reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p");
3458     reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
3459     reg_top_index = 1; // field number for __gr_top
3460     RegSize = 8 * AllocatedGPR;
3461   } else {
3462     assert(!AllocatedGPR && "Argument must go in VFP or int regs");
3463     // 4 is the field number of __vr_offs.
3464     reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p");
3465     reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
3466     reg_top_index = 2; // field number for __vr_top
3467     RegSize = 16 * AllocatedVFP;
3468   }
3469 
3470   //=======================================
3471   // Find out where argument was passed
3472   //=======================================
3473 
3474   // If reg_offs >= 0 we're already using the stack for this type of
3475   // argument. We don't want to keep updating reg_offs (in case it overflows,
3476   // though anyone passing 2GB of arguments, each at most 16 bytes, deserves
3477   // whatever they get).
3478   llvm::Value *UsingStack = 0;
3479   UsingStack = CGF.Builder.CreateICmpSGE(
3480       reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0));
3481 
3482   CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
3483 
3484   // Otherwise, at least some kind of argument could go in these registers, the
3485   // question is whether this particular type is too big.
3486   CGF.EmitBlock(MaybeRegBlock);
3487 
3488   // Integer arguments may need to correct register alignment (for example a
3489   // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
3490   // align __gr_offs to calculate the potential address.
3491   if (AllocatedGPR && !IsIndirect && Ctx.getTypeAlign(Ty) > 64) {
3492     int Align = Ctx.getTypeAlign(Ty) / 8;
3493 
3494     reg_offs = CGF.Builder.CreateAdd(
3495         reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1),
3496         "align_regoffs");
3497     reg_offs = CGF.Builder.CreateAnd(
3498         reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align),
3499         "aligned_regoffs");
3500   }
3501 
3502   // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list.
3503   llvm::Value *NewOffset = 0;
3504   NewOffset = CGF.Builder.CreateAdd(
3505       reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs");
3506   CGF.Builder.CreateStore(NewOffset, reg_offs_p);
3507 
3508   // Now we're in a position to decide whether this argument really was in
3509   // registers or not.
3510   llvm::Value *InRegs = 0;
3511   InRegs = CGF.Builder.CreateICmpSLE(
3512       NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg");
3513 
3514   CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
3515 
3516   //=======================================
3517   // Argument was in registers
3518   //=======================================
3519 
3520   // Now we emit the code for if the argument was originally passed in
3521   // registers. First start the appropriate block:
3522   CGF.EmitBlock(InRegBlock);
3523 
3524   llvm::Value *reg_top_p = 0, *reg_top = 0;
3525   reg_top_p =
3526       CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p");
3527   reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
3528   llvm::Value *BaseAddr = CGF.Builder.CreateGEP(reg_top, reg_offs);
3529   llvm::Value *RegAddr = 0;
3530   llvm::Type *MemTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
3531 
3532   if (IsIndirect) {
3533     // If it's been passed indirectly (actually a struct), whatever we find from
3534     // stored registers or on the stack will actually be a struct **.
3535     MemTy = llvm::PointerType::getUnqual(MemTy);
3536   }
3537 
3538   const Type *Base = 0;
3539   uint64_t NumMembers;
3540   if (isHomogeneousAggregate(Ty, Base, Ctx, &NumMembers) && NumMembers > 1) {
3541     // Homogeneous aggregates passed in registers will have their elements split
3542     // and stored 16-bytes apart regardless of size (they're notionally in qN,
3543     // qN+1, ...). We reload and store into a temporary local variable
3544     // contiguously.
3545     assert(!IsIndirect && "Homogeneous aggregates should be passed directly");
3546     llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
3547     llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
3548     llvm::Value *Tmp = CGF.CreateTempAlloca(HFATy);
3549     int Offset = 0;
3550 
3551     if (CGF.CGM.getDataLayout().isBigEndian() && Ctx.getTypeSize(Base) < 128)
3552       Offset = 16 - Ctx.getTypeSize(Base) / 8;
3553     for (unsigned i = 0; i < NumMembers; ++i) {
3554       llvm::Value *BaseOffset =
3555           llvm::ConstantInt::get(CGF.Int32Ty, 16 * i + Offset);
3556       llvm::Value *LoadAddr = CGF.Builder.CreateGEP(BaseAddr, BaseOffset);
3557       LoadAddr = CGF.Builder.CreateBitCast(
3558           LoadAddr, llvm::PointerType::getUnqual(BaseTy));
3559       llvm::Value *StoreAddr = CGF.Builder.CreateStructGEP(Tmp, i);
3560 
3561       llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
3562       CGF.Builder.CreateStore(Elem, StoreAddr);
3563     }
3564 
3565     RegAddr = CGF.Builder.CreateBitCast(Tmp, MemTy);
3566   } else {
3567     // Otherwise the object is contiguous in memory
3568     unsigned BeAlign = reg_top_index == 2 ? 16 : 8;
3569     if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
3570         Ctx.getTypeSize(Ty) < (BeAlign * 8)) {
3571       int Offset = BeAlign - Ctx.getTypeSize(Ty) / 8;
3572       BaseAddr = CGF.Builder.CreatePtrToInt(BaseAddr, CGF.Int64Ty);
3573 
3574       BaseAddr = CGF.Builder.CreateAdd(
3575           BaseAddr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), "align_be");
3576 
3577       BaseAddr = CGF.Builder.CreateIntToPtr(BaseAddr, CGF.Int8PtrTy);
3578     }
3579 
3580     RegAddr = CGF.Builder.CreateBitCast(BaseAddr, MemTy);
3581   }
3582 
3583   CGF.EmitBranch(ContBlock);
3584 
3585   //=======================================
3586   // Argument was on the stack
3587   //=======================================
3588   CGF.EmitBlock(OnStackBlock);
3589 
3590   llvm::Value *stack_p = 0, *OnStackAddr = 0;
3591   stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p");
3592   OnStackAddr = CGF.Builder.CreateLoad(stack_p, "stack");
3593 
3594   // Again, stack arguments may need realigmnent. In this case both integer and
3595   // floating-point ones might be affected.
3596   if (!IsIndirect && Ctx.getTypeAlign(Ty) > 64) {
3597     int Align = Ctx.getTypeAlign(Ty) / 8;
3598 
3599     OnStackAddr = CGF.Builder.CreatePtrToInt(OnStackAddr, CGF.Int64Ty);
3600 
3601     OnStackAddr = CGF.Builder.CreateAdd(
3602         OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
3603         "align_stack");
3604     OnStackAddr = CGF.Builder.CreateAnd(
3605         OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, -Align),
3606         "align_stack");
3607 
3608     OnStackAddr = CGF.Builder.CreateIntToPtr(OnStackAddr, CGF.Int8PtrTy);
3609   }
3610 
3611   uint64_t StackSize;
3612   if (IsIndirect)
3613     StackSize = 8;
3614   else
3615     StackSize = Ctx.getTypeSize(Ty) / 8;
3616 
3617   // All stack slots are 8 bytes
3618   StackSize = llvm::RoundUpToAlignment(StackSize, 8);
3619 
3620   llvm::Value *StackSizeC = llvm::ConstantInt::get(CGF.Int32Ty, StackSize);
3621   llvm::Value *NewStack =
3622       CGF.Builder.CreateGEP(OnStackAddr, StackSizeC, "new_stack");
3623 
3624   // Write the new value of __stack for the next call to va_arg
3625   CGF.Builder.CreateStore(NewStack, stack_p);
3626 
3627   if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
3628       Ctx.getTypeSize(Ty) < 64) {
3629     int Offset = 8 - Ctx.getTypeSize(Ty) / 8;
3630     OnStackAddr = CGF.Builder.CreatePtrToInt(OnStackAddr, CGF.Int64Ty);
3631 
3632     OnStackAddr = CGF.Builder.CreateAdd(
3633         OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), "align_be");
3634 
3635     OnStackAddr = CGF.Builder.CreateIntToPtr(OnStackAddr, CGF.Int8PtrTy);
3636   }
3637 
3638   OnStackAddr = CGF.Builder.CreateBitCast(OnStackAddr, MemTy);
3639 
3640   CGF.EmitBranch(ContBlock);
3641 
3642   //=======================================
3643   // Tidy up
3644   //=======================================
3645   CGF.EmitBlock(ContBlock);
3646 
3647   llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(MemTy, 2, "vaarg.addr");
3648   ResAddr->addIncoming(RegAddr, InRegBlock);
3649   ResAddr->addIncoming(OnStackAddr, OnStackBlock);
3650 
3651   if (IsIndirect)
3652     return CGF.Builder.CreateLoad(ResAddr, "vaarg.addr");
3653 
3654   return ResAddr;
3655 }
3656 
3657 llvm::Value *ARM64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr, QualType Ty,
3658                                           CodeGenFunction &CGF) const {
3659 
3660   unsigned AllocatedGPR = 0, AllocatedVFP = 0;
3661   bool IsHA = false, IsSmallAggr = false;
3662   ABIArgInfo AI = classifyArgumentType(Ty, AllocatedVFP, IsHA, AllocatedGPR,
3663                                        IsSmallAggr, false /*IsNamedArg*/);
3664 
3665   return EmitAArch64VAArg(VAListAddr, Ty, AllocatedGPR, AllocatedVFP,
3666                           AI.isIndirect(), CGF);
3667 }
3668 
3669 llvm::Value *ARM64ABIInfo::EmitDarwinVAArg(llvm::Value *VAListAddr, QualType Ty,
3670                                            CodeGenFunction &CGF) const {
3671   // We do not support va_arg for aggregates or illegal vector types.
3672   // Lower VAArg here for these cases and use the LLVM va_arg instruction for
3673   // other cases.
3674   if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty))
3675     return 0;
3676 
3677   uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8;
3678   uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
3679 
3680   const Type *Base = 0;
3681   bool isHA = isHomogeneousAggregate(Ty, Base, getContext());
3682 
3683   bool isIndirect = false;
3684   // Arguments bigger than 16 bytes which aren't homogeneous aggregates should
3685   // be passed indirectly.
3686   if (Size > 16 && !isHA) {
3687     isIndirect = true;
3688     Size = 8;
3689     Align = 8;
3690   }
3691 
3692   llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
3693   llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
3694 
3695   CGBuilderTy &Builder = CGF.Builder;
3696   llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
3697   llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
3698 
3699   if (isEmptyRecord(getContext(), Ty, true)) {
3700     // These are ignored for parameter passing purposes.
3701     llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
3702     return Builder.CreateBitCast(Addr, PTy);
3703   }
3704 
3705   const uint64_t MinABIAlign = 8;
3706   if (Align > MinABIAlign) {
3707     llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, Align - 1);
3708     Addr = Builder.CreateGEP(Addr, Offset);
3709     llvm::Value *AsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
3710     llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, ~(Align - 1));
3711     llvm::Value *Aligned = Builder.CreateAnd(AsInt, Mask);
3712     Addr = Builder.CreateIntToPtr(Aligned, BP, "ap.align");
3713   }
3714 
3715   uint64_t Offset = llvm::RoundUpToAlignment(Size, MinABIAlign);
3716   llvm::Value *NextAddr = Builder.CreateGEP(
3717       Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), "ap.next");
3718   Builder.CreateStore(NextAddr, VAListAddrAsBPP);
3719 
3720   if (isIndirect)
3721     Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP));
3722   llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
3723   llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
3724 
3725   return AddrTyped;
3726 }
3727 
3728 //===----------------------------------------------------------------------===//
3729 // ARM ABI Implementation
3730 //===----------------------------------------------------------------------===//
3731 
3732 namespace {
3733 
3734 class ARMABIInfo : public ABIInfo {
3735 public:
3736   enum ABIKind {
3737     APCS = 0,
3738     AAPCS = 1,
3739     AAPCS_VFP
3740   };
3741 
3742 private:
3743   ABIKind Kind;
3744   mutable int VFPRegs[16];
3745   const unsigned NumVFPs;
3746   const unsigned NumGPRs;
3747   mutable unsigned AllocatedGPRs;
3748   mutable unsigned AllocatedVFPs;
3749 
3750 public:
3751   ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind),
3752     NumVFPs(16), NumGPRs(4) {
3753     setRuntimeCC();
3754     resetAllocatedRegs();
3755   }
3756 
3757   bool isEABI() const {
3758     switch (getTarget().getTriple().getEnvironment()) {
3759     case llvm::Triple::Android:
3760     case llvm::Triple::EABI:
3761     case llvm::Triple::EABIHF:
3762     case llvm::Triple::GNUEABI:
3763     case llvm::Triple::GNUEABIHF:
3764       return true;
3765     default:
3766       return false;
3767     }
3768   }
3769 
3770   bool isEABIHF() const {
3771     switch (getTarget().getTriple().getEnvironment()) {
3772     case llvm::Triple::EABIHF:
3773     case llvm::Triple::GNUEABIHF:
3774       return true;
3775     default:
3776       return false;
3777     }
3778   }
3779 
3780   ABIKind getABIKind() const { return Kind; }
3781 
3782 private:
3783   ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic) const;
3784   ABIArgInfo classifyArgumentType(QualType RetTy, bool &IsHA, bool isVariadic,
3785                                   bool &IsCPRC) const;
3786   bool isIllegalVectorType(QualType Ty) const;
3787 
3788   void computeInfo(CGFunctionInfo &FI) const override;
3789 
3790   llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3791                          CodeGenFunction &CGF) const override;
3792 
3793   llvm::CallingConv::ID getLLVMDefaultCC() const;
3794   llvm::CallingConv::ID getABIDefaultCC() const;
3795   void setRuntimeCC();
3796 
3797   void markAllocatedGPRs(unsigned Alignment, unsigned NumRequired) const;
3798   void markAllocatedVFPs(unsigned Alignment, unsigned NumRequired) const;
3799   void resetAllocatedRegs(void) const;
3800 };
3801 
3802 class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
3803 public:
3804   ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
3805     :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {}
3806 
3807   const ARMABIInfo &getABIInfo() const {
3808     return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo());
3809   }
3810 
3811   int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
3812     return 13;
3813   }
3814 
3815   StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
3816     return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue";
3817   }
3818 
3819   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3820                                llvm::Value *Address) const override {
3821     llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
3822 
3823     // 0-15 are the 16 integer registers.
3824     AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15);
3825     return false;
3826   }
3827 
3828   unsigned getSizeOfUnwindException() const override {
3829     if (getABIInfo().isEABI()) return 88;
3830     return TargetCodeGenInfo::getSizeOfUnwindException();
3831   }
3832 
3833   void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
3834                            CodeGen::CodeGenModule &CGM) const override {
3835     const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
3836     if (!FD)
3837       return;
3838 
3839     const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>();
3840     if (!Attr)
3841       return;
3842 
3843     const char *Kind;
3844     switch (Attr->getInterrupt()) {
3845     case ARMInterruptAttr::Generic: Kind = ""; break;
3846     case ARMInterruptAttr::IRQ:     Kind = "IRQ"; break;
3847     case ARMInterruptAttr::FIQ:     Kind = "FIQ"; break;
3848     case ARMInterruptAttr::SWI:     Kind = "SWI"; break;
3849     case ARMInterruptAttr::ABORT:   Kind = "ABORT"; break;
3850     case ARMInterruptAttr::UNDEF:   Kind = "UNDEF"; break;
3851     }
3852 
3853     llvm::Function *Fn = cast<llvm::Function>(GV);
3854 
3855     Fn->addFnAttr("interrupt", Kind);
3856 
3857     if (cast<ARMABIInfo>(getABIInfo()).getABIKind() == ARMABIInfo::APCS)
3858       return;
3859 
3860     // AAPCS guarantees that sp will be 8-byte aligned on any public interface,
3861     // however this is not necessarily true on taking any interrupt. Instruct
3862     // the backend to perform a realignment as part of the function prologue.
3863     llvm::AttrBuilder B;
3864     B.addStackAlignmentAttr(8);
3865     Fn->addAttributes(llvm::AttributeSet::FunctionIndex,
3866                       llvm::AttributeSet::get(CGM.getLLVMContext(),
3867                                               llvm::AttributeSet::FunctionIndex,
3868                                               B));
3869   }
3870 
3871 };
3872 
3873 }
3874 
3875 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
3876   // To correctly handle Homogeneous Aggregate, we need to keep track of the
3877   // VFP registers allocated so far.
3878   // C.1.vfp If the argument is a VFP CPRC and there are sufficient consecutive
3879   // VFP registers of the appropriate type unallocated then the argument is
3880   // allocated to the lowest-numbered sequence of such registers.
3881   // C.2.vfp If the argument is a VFP CPRC then any VFP registers that are
3882   // unallocated are marked as unavailable.
3883   resetAllocatedRegs();
3884 
3885   FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), FI.isVariadic());
3886   for (auto &I : FI.arguments()) {
3887     unsigned PreAllocationVFPs = AllocatedVFPs;
3888     unsigned PreAllocationGPRs = AllocatedGPRs;
3889     bool IsHA = false;
3890     bool IsCPRC = false;
3891     // 6.1.2.3 There is one VFP co-processor register class using registers
3892     // s0-s15 (d0-d7) for passing arguments.
3893     I.info = classifyArgumentType(I.type, IsHA, FI.isVariadic(), IsCPRC);
3894     assert((IsCPRC || !IsHA) && "Homogeneous aggregates must be CPRCs");
3895     // If we do not have enough VFP registers for the HA, any VFP registers
3896     // that are unallocated are marked as unavailable. To achieve this, we add
3897     // padding of (NumVFPs - PreAllocationVFP) floats.
3898     // Note that IsHA will only be set when using the AAPCS-VFP calling convention,
3899     // and the callee is not variadic.
3900     if (IsHA && AllocatedVFPs > NumVFPs && PreAllocationVFPs < NumVFPs) {
3901       llvm::Type *PaddingTy = llvm::ArrayType::get(
3902           llvm::Type::getFloatTy(getVMContext()), NumVFPs - PreAllocationVFPs);
3903       I.info = ABIArgInfo::getExpandWithPadding(false, PaddingTy);
3904     }
3905 
3906     // If we have allocated some arguments onto the stack (due to running
3907     // out of VFP registers), we cannot split an argument between GPRs and
3908     // the stack. If this situation occurs, we add padding to prevent the
3909     // GPRs from being used. In this situiation, the current argument could
3910     // only be allocated by rule C.8, so rule C.6 would mark these GPRs as
3911     // unusable anyway.
3912     const bool StackUsed = PreAllocationGPRs > NumGPRs || PreAllocationVFPs > NumVFPs;
3913     if (!IsCPRC && PreAllocationGPRs < NumGPRs && AllocatedGPRs > NumGPRs && StackUsed) {
3914       llvm::Type *PaddingTy = llvm::ArrayType::get(
3915           llvm::Type::getInt32Ty(getVMContext()), NumGPRs - PreAllocationGPRs);
3916       I.info = ABIArgInfo::getExpandWithPadding(false, PaddingTy);
3917     }
3918   }
3919 
3920   // Always honor user-specified calling convention.
3921   if (FI.getCallingConvention() != llvm::CallingConv::C)
3922     return;
3923 
3924   llvm::CallingConv::ID cc = getRuntimeCC();
3925   if (cc != llvm::CallingConv::C)
3926     FI.setEffectiveCallingConvention(cc);
3927 }
3928 
3929 /// Return the default calling convention that LLVM will use.
3930 llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const {
3931   // The default calling convention that LLVM will infer.
3932   if (isEABIHF())
3933     return llvm::CallingConv::ARM_AAPCS_VFP;
3934   else if (isEABI())
3935     return llvm::CallingConv::ARM_AAPCS;
3936   else
3937     return llvm::CallingConv::ARM_APCS;
3938 }
3939 
3940 /// Return the calling convention that our ABI would like us to use
3941 /// as the C calling convention.
3942 llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const {
3943   switch (getABIKind()) {
3944   case APCS: return llvm::CallingConv::ARM_APCS;
3945   case AAPCS: return llvm::CallingConv::ARM_AAPCS;
3946   case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
3947   }
3948   llvm_unreachable("bad ABI kind");
3949 }
3950 
3951 void ARMABIInfo::setRuntimeCC() {
3952   assert(getRuntimeCC() == llvm::CallingConv::C);
3953 
3954   // Don't muddy up the IR with a ton of explicit annotations if
3955   // they'd just match what LLVM will infer from the triple.
3956   llvm::CallingConv::ID abiCC = getABIDefaultCC();
3957   if (abiCC != getLLVMDefaultCC())
3958     RuntimeCC = abiCC;
3959 }
3960 
3961 /// isHomogeneousAggregate - Return true if a type is an AAPCS-VFP homogeneous
3962 /// aggregate.  If HAMembers is non-null, the number of base elements
3963 /// contained in the type is returned through it; this is used for the
3964 /// recursive calls that check aggregate component types.
3965 static bool isHomogeneousAggregate(QualType Ty, const Type *&Base,
3966                                    ASTContext &Context, uint64_t *HAMembers) {
3967   uint64_t Members = 0;
3968   if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
3969     if (!isHomogeneousAggregate(AT->getElementType(), Base, Context, &Members))
3970       return false;
3971     Members *= AT->getSize().getZExtValue();
3972   } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
3973     const RecordDecl *RD = RT->getDecl();
3974     if (RD->hasFlexibleArrayMember())
3975       return false;
3976 
3977     Members = 0;
3978     for (const auto *FD : RD->fields()) {
3979       uint64_t FldMembers;
3980       if (!isHomogeneousAggregate(FD->getType(), Base, Context, &FldMembers))
3981         return false;
3982 
3983       Members = (RD->isUnion() ?
3984                  std::max(Members, FldMembers) : Members + FldMembers);
3985     }
3986   } else {
3987     Members = 1;
3988     if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
3989       Members = 2;
3990       Ty = CT->getElementType();
3991     }
3992 
3993     // Homogeneous aggregates for AAPCS-VFP must have base types of float,
3994     // double, or 64-bit or 128-bit vectors.
3995     if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
3996       if (BT->getKind() != BuiltinType::Float &&
3997           BT->getKind() != BuiltinType::Double &&
3998           BT->getKind() != BuiltinType::LongDouble)
3999         return false;
4000     } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
4001       unsigned VecSize = Context.getTypeSize(VT);
4002       if (VecSize != 64 && VecSize != 128)
4003         return false;
4004     } else {
4005       return false;
4006     }
4007 
4008     // The base type must be the same for all members.  Vector types of the
4009     // same total size are treated as being equivalent here.
4010     const Type *TyPtr = Ty.getTypePtr();
4011     if (!Base)
4012       Base = TyPtr;
4013 
4014     if (Base != TyPtr) {
4015       // Homogeneous aggregates are defined as containing members with the
4016       // same machine type. There are two cases in which two members have
4017       // different TypePtrs but the same machine type:
4018 
4019       // 1) Vectors of the same length, regardless of the type and number
4020       //    of their members.
4021       const bool SameLengthVectors = Base->isVectorType() && TyPtr->isVectorType()
4022         && (Context.getTypeSize(Base) == Context.getTypeSize(TyPtr));
4023 
4024       // 2) In the 32-bit AAPCS, `double' and `long double' have the same
4025       //    machine type. This is not the case for the 64-bit AAPCS.
4026       const bool SameSizeDoubles =
4027            (   (   Base->isSpecificBuiltinType(BuiltinType::Double)
4028                 && TyPtr->isSpecificBuiltinType(BuiltinType::LongDouble))
4029             || (   Base->isSpecificBuiltinType(BuiltinType::LongDouble)
4030                 && TyPtr->isSpecificBuiltinType(BuiltinType::Double)))
4031         && (Context.getTypeSize(Base) == Context.getTypeSize(TyPtr));
4032 
4033       if (!SameLengthVectors && !SameSizeDoubles)
4034         return false;
4035     }
4036   }
4037 
4038   // Homogeneous Aggregates can have at most 4 members of the base type.
4039   if (HAMembers)
4040     *HAMembers = Members;
4041 
4042   return (Members > 0 && Members <= 4);
4043 }
4044 
4045 /// markAllocatedVFPs - update VFPRegs according to the alignment and
4046 /// number of VFP registers (unit is S register) requested.
4047 void ARMABIInfo::markAllocatedVFPs(unsigned Alignment,
4048                                    unsigned NumRequired) const {
4049   // Early Exit.
4050   if (AllocatedVFPs >= 16) {
4051     // We use AllocatedVFP > 16 to signal that some CPRCs were allocated on
4052     // the stack.
4053     AllocatedVFPs = 17;
4054     return;
4055   }
4056   // C.1.vfp If the argument is a VFP CPRC and there are sufficient consecutive
4057   // VFP registers of the appropriate type unallocated then the argument is
4058   // allocated to the lowest-numbered sequence of such registers.
4059   for (unsigned I = 0; I < 16; I += Alignment) {
4060     bool FoundSlot = true;
4061     for (unsigned J = I, JEnd = I + NumRequired; J < JEnd; J++)
4062       if (J >= 16 || VFPRegs[J]) {
4063          FoundSlot = false;
4064          break;
4065       }
4066     if (FoundSlot) {
4067       for (unsigned J = I, JEnd = I + NumRequired; J < JEnd; J++)
4068         VFPRegs[J] = 1;
4069       AllocatedVFPs += NumRequired;
4070       return;
4071     }
4072   }
4073   // C.2.vfp If the argument is a VFP CPRC then any VFP registers that are
4074   // unallocated are marked as unavailable.
4075   for (unsigned I = 0; I < 16; I++)
4076     VFPRegs[I] = 1;
4077   AllocatedVFPs = 17; // We do not have enough VFP registers.
4078 }
4079 
4080 /// Update AllocatedGPRs to record the number of general purpose registers
4081 /// which have been allocated. It is valid for AllocatedGPRs to go above 4,
4082 /// this represents arguments being stored on the stack.
4083 void ARMABIInfo::markAllocatedGPRs(unsigned Alignment,
4084                                           unsigned NumRequired) const {
4085   assert((Alignment == 1 || Alignment == 2) && "Alignment must be 4 or 8 bytes");
4086 
4087   if (Alignment == 2 && AllocatedGPRs & 0x1)
4088     AllocatedGPRs += 1;
4089 
4090   AllocatedGPRs += NumRequired;
4091 }
4092 
4093 void ARMABIInfo::resetAllocatedRegs(void) const {
4094   AllocatedGPRs = 0;
4095   AllocatedVFPs = 0;
4096   for (unsigned i = 0; i < NumVFPs; ++i)
4097     VFPRegs[i] = 0;
4098 }
4099 
4100 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool &IsHA,
4101                                             bool isVariadic,
4102                                             bool &IsCPRC) const {
4103   // We update number of allocated VFPs according to
4104   // 6.1.2.1 The following argument types are VFP CPRCs:
4105   //   A single-precision floating-point type (including promoted
4106   //   half-precision types); A double-precision floating-point type;
4107   //   A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate
4108   //   with a Base Type of a single- or double-precision floating-point type,
4109   //   64-bit containerized vectors or 128-bit containerized vectors with one
4110   //   to four Elements.
4111 
4112   // Handle illegal vector types here.
4113   if (isIllegalVectorType(Ty)) {
4114     uint64_t Size = getContext().getTypeSize(Ty);
4115     if (Size <= 32) {
4116       llvm::Type *ResType =
4117           llvm::Type::getInt32Ty(getVMContext());
4118       markAllocatedGPRs(1, 1);
4119       return ABIArgInfo::getDirect(ResType);
4120     }
4121     if (Size == 64) {
4122       llvm::Type *ResType = llvm::VectorType::get(
4123           llvm::Type::getInt32Ty(getVMContext()), 2);
4124       if (getABIKind() == ARMABIInfo::AAPCS || isVariadic){
4125         markAllocatedGPRs(2, 2);
4126       } else {
4127         markAllocatedVFPs(2, 2);
4128         IsCPRC = true;
4129       }
4130       return ABIArgInfo::getDirect(ResType);
4131     }
4132     if (Size == 128) {
4133       llvm::Type *ResType = llvm::VectorType::get(
4134           llvm::Type::getInt32Ty(getVMContext()), 4);
4135       if (getABIKind() == ARMABIInfo::AAPCS || isVariadic) {
4136         markAllocatedGPRs(2, 4);
4137       } else {
4138         markAllocatedVFPs(4, 4);
4139         IsCPRC = true;
4140       }
4141       return ABIArgInfo::getDirect(ResType);
4142     }
4143     markAllocatedGPRs(1, 1);
4144     return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
4145   }
4146   // Update VFPRegs for legal vector types.
4147   if (getABIKind() == ARMABIInfo::AAPCS_VFP && !isVariadic) {
4148     if (const VectorType *VT = Ty->getAs<VectorType>()) {
4149       uint64_t Size = getContext().getTypeSize(VT);
4150       // Size of a legal vector should be power of 2 and above 64.
4151       markAllocatedVFPs(Size >= 128 ? 4 : 2, Size / 32);
4152       IsCPRC = true;
4153     }
4154   }
4155   // Update VFPRegs for floating point types.
4156   if (getABIKind() == ARMABIInfo::AAPCS_VFP && !isVariadic) {
4157     if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
4158       if (BT->getKind() == BuiltinType::Half ||
4159           BT->getKind() == BuiltinType::Float) {
4160         markAllocatedVFPs(1, 1);
4161         IsCPRC = true;
4162       }
4163       if (BT->getKind() == BuiltinType::Double ||
4164           BT->getKind() == BuiltinType::LongDouble) {
4165         markAllocatedVFPs(2, 2);
4166         IsCPRC = true;
4167       }
4168     }
4169   }
4170 
4171   if (!isAggregateTypeForABI(Ty)) {
4172     // Treat an enum type as its underlying type.
4173     if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
4174       Ty = EnumTy->getDecl()->getIntegerType();
4175     }
4176 
4177     unsigned Size = getContext().getTypeSize(Ty);
4178     if (!IsCPRC)
4179       markAllocatedGPRs(Size > 32 ? 2 : 1, (Size + 31) / 32);
4180     return (Ty->isPromotableIntegerType() ?
4181             ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
4182   }
4183 
4184   if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
4185     markAllocatedGPRs(1, 1);
4186     return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
4187   }
4188 
4189   // Ignore empty records.
4190   if (isEmptyRecord(getContext(), Ty, true))
4191     return ABIArgInfo::getIgnore();
4192 
4193   if (getABIKind() == ARMABIInfo::AAPCS_VFP && !isVariadic) {
4194     // Homogeneous Aggregates need to be expanded when we can fit the aggregate
4195     // into VFP registers.
4196     const Type *Base = 0;
4197     uint64_t Members = 0;
4198     if (isHomogeneousAggregate(Ty, Base, getContext(), &Members)) {
4199       assert(Base && "Base class should be set for homogeneous aggregate");
4200       // Base can be a floating-point or a vector.
4201       if (Base->isVectorType()) {
4202         // ElementSize is in number of floats.
4203         unsigned ElementSize = getContext().getTypeSize(Base) == 64 ? 2 : 4;
4204         markAllocatedVFPs(ElementSize,
4205                           Members * ElementSize);
4206       } else if (Base->isSpecificBuiltinType(BuiltinType::Float))
4207         markAllocatedVFPs(1, Members);
4208       else {
4209         assert(Base->isSpecificBuiltinType(BuiltinType::Double) ||
4210                Base->isSpecificBuiltinType(BuiltinType::LongDouble));
4211         markAllocatedVFPs(2, Members * 2);
4212       }
4213       IsHA = true;
4214       IsCPRC = true;
4215       return ABIArgInfo::getExpand();
4216     }
4217   }
4218 
4219   // Support byval for ARM.
4220   // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at
4221   // most 8-byte. We realign the indirect argument if type alignment is bigger
4222   // than ABI alignment.
4223   uint64_t ABIAlign = 4;
4224   uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8;
4225   if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
4226       getABIKind() == ARMABIInfo::AAPCS)
4227     ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
4228   if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) {
4229       // Update Allocated GPRs
4230     markAllocatedGPRs(1, 1);
4231     return ABIArgInfo::getIndirect(TyAlign, /*ByVal=*/true,
4232            /*Realign=*/TyAlign > ABIAlign);
4233   }
4234 
4235   // Otherwise, pass by coercing to a structure of the appropriate size.
4236   llvm::Type* ElemTy;
4237   unsigned SizeRegs;
4238   // FIXME: Try to match the types of the arguments more accurately where
4239   // we can.
4240   if (getContext().getTypeAlign(Ty) <= 32) {
4241     ElemTy = llvm::Type::getInt32Ty(getVMContext());
4242     SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
4243     markAllocatedGPRs(1, SizeRegs);
4244   } else {
4245     ElemTy = llvm::Type::getInt64Ty(getVMContext());
4246     SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
4247     markAllocatedGPRs(2, SizeRegs * 2);
4248   }
4249 
4250   llvm::Type *STy =
4251     llvm::StructType::get(llvm::ArrayType::get(ElemTy, SizeRegs), NULL);
4252   return ABIArgInfo::getDirect(STy);
4253 }
4254 
4255 static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
4256                               llvm::LLVMContext &VMContext) {
4257   // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
4258   // is called integer-like if its size is less than or equal to one word, and
4259   // the offset of each of its addressable sub-fields is zero.
4260 
4261   uint64_t Size = Context.getTypeSize(Ty);
4262 
4263   // Check that the type fits in a word.
4264   if (Size > 32)
4265     return false;
4266 
4267   // FIXME: Handle vector types!
4268   if (Ty->isVectorType())
4269     return false;
4270 
4271   // Float types are never treated as "integer like".
4272   if (Ty->isRealFloatingType())
4273     return false;
4274 
4275   // If this is a builtin or pointer type then it is ok.
4276   if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
4277     return true;
4278 
4279   // Small complex integer types are "integer like".
4280   if (const ComplexType *CT = Ty->getAs<ComplexType>())
4281     return isIntegerLikeType(CT->getElementType(), Context, VMContext);
4282 
4283   // Single element and zero sized arrays should be allowed, by the definition
4284   // above, but they are not.
4285 
4286   // Otherwise, it must be a record type.
4287   const RecordType *RT = Ty->getAs<RecordType>();
4288   if (!RT) return false;
4289 
4290   // Ignore records with flexible arrays.
4291   const RecordDecl *RD = RT->getDecl();
4292   if (RD->hasFlexibleArrayMember())
4293     return false;
4294 
4295   // Check that all sub-fields are at offset 0, and are themselves "integer
4296   // like".
4297   const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
4298 
4299   bool HadField = false;
4300   unsigned idx = 0;
4301   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
4302        i != e; ++i, ++idx) {
4303     const FieldDecl *FD = *i;
4304 
4305     // Bit-fields are not addressable, we only need to verify they are "integer
4306     // like". We still have to disallow a subsequent non-bitfield, for example:
4307     //   struct { int : 0; int x }
4308     // is non-integer like according to gcc.
4309     if (FD->isBitField()) {
4310       if (!RD->isUnion())
4311         HadField = true;
4312 
4313       if (!isIntegerLikeType(FD->getType(), Context, VMContext))
4314         return false;
4315 
4316       continue;
4317     }
4318 
4319     // Check if this field is at offset 0.
4320     if (Layout.getFieldOffset(idx) != 0)
4321       return false;
4322 
4323     if (!isIntegerLikeType(FD->getType(), Context, VMContext))
4324       return false;
4325 
4326     // Only allow at most one field in a structure. This doesn't match the
4327     // wording above, but follows gcc in situations with a field following an
4328     // empty structure.
4329     if (!RD->isUnion()) {
4330       if (HadField)
4331         return false;
4332 
4333       HadField = true;
4334     }
4335   }
4336 
4337   return true;
4338 }
4339 
4340 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
4341                                           bool isVariadic) const {
4342   if (RetTy->isVoidType())
4343     return ABIArgInfo::getIgnore();
4344 
4345   // Large vector types should be returned via memory.
4346   if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) {
4347     markAllocatedGPRs(1, 1);
4348     return ABIArgInfo::getIndirect(0);
4349   }
4350 
4351   if (!isAggregateTypeForABI(RetTy)) {
4352     // Treat an enum type as its underlying type.
4353     if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
4354       RetTy = EnumTy->getDecl()->getIntegerType();
4355 
4356     return (RetTy->isPromotableIntegerType() ?
4357             ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
4358   }
4359 
4360   // Structures with either a non-trivial destructor or a non-trivial
4361   // copy constructor are always indirect.
4362   if (isRecordReturnIndirect(RetTy, getCXXABI())) {
4363     markAllocatedGPRs(1, 1);
4364     return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
4365   }
4366 
4367   // Are we following APCS?
4368   if (getABIKind() == APCS) {
4369     if (isEmptyRecord(getContext(), RetTy, false))
4370       return ABIArgInfo::getIgnore();
4371 
4372     // Complex types are all returned as packed integers.
4373     //
4374     // FIXME: Consider using 2 x vector types if the back end handles them
4375     // correctly.
4376     if (RetTy->isAnyComplexType())
4377       return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
4378                                               getContext().getTypeSize(RetTy)));
4379 
4380     // Integer like structures are returned in r0.
4381     if (isIntegerLikeType(RetTy, getContext(), getVMContext())) {
4382       // Return in the smallest viable integer type.
4383       uint64_t Size = getContext().getTypeSize(RetTy);
4384       if (Size <= 8)
4385         return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
4386       if (Size <= 16)
4387         return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
4388       return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
4389     }
4390 
4391     // Otherwise return in memory.
4392     markAllocatedGPRs(1, 1);
4393     return ABIArgInfo::getIndirect(0);
4394   }
4395 
4396   // Otherwise this is an AAPCS variant.
4397 
4398   if (isEmptyRecord(getContext(), RetTy, true))
4399     return ABIArgInfo::getIgnore();
4400 
4401   // Check for homogeneous aggregates with AAPCS-VFP.
4402   if (getABIKind() == AAPCS_VFP && !isVariadic) {
4403     const Type *Base = 0;
4404     if (isHomogeneousAggregate(RetTy, Base, getContext())) {
4405       assert(Base && "Base class should be set for homogeneous aggregate");
4406       // Homogeneous Aggregates are returned directly.
4407       return ABIArgInfo::getDirect();
4408     }
4409   }
4410 
4411   // Aggregates <= 4 bytes are returned in r0; other aggregates
4412   // are returned indirectly.
4413   uint64_t Size = getContext().getTypeSize(RetTy);
4414   if (Size <= 32) {
4415     // Return in the smallest viable integer type.
4416     if (Size <= 8)
4417       return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
4418     if (Size <= 16)
4419       return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
4420     return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
4421   }
4422 
4423   markAllocatedGPRs(1, 1);
4424   return ABIArgInfo::getIndirect(0);
4425 }
4426 
4427 /// isIllegalVector - check whether Ty is an illegal vector type.
4428 bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
4429   if (const VectorType *VT = Ty->getAs<VectorType>()) {
4430     // Check whether VT is legal.
4431     unsigned NumElements = VT->getNumElements();
4432     uint64_t Size = getContext().getTypeSize(VT);
4433     // NumElements should be power of 2.
4434     if ((NumElements & (NumElements - 1)) != 0)
4435       return true;
4436     // Size should be greater than 32 bits.
4437     return Size <= 32;
4438   }
4439   return false;
4440 }
4441 
4442 llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4443                                    CodeGenFunction &CGF) const {
4444   llvm::Type *BP = CGF.Int8PtrTy;
4445   llvm::Type *BPP = CGF.Int8PtrPtrTy;
4446 
4447   CGBuilderTy &Builder = CGF.Builder;
4448   llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
4449   llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
4450 
4451   if (isEmptyRecord(getContext(), Ty, true)) {
4452     // These are ignored for parameter passing purposes.
4453     llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
4454     return Builder.CreateBitCast(Addr, PTy);
4455   }
4456 
4457   uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8;
4458   uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8;
4459   bool IsIndirect = false;
4460 
4461   // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for
4462   // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte.
4463   if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
4464       getABIKind() == ARMABIInfo::AAPCS)
4465     TyAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
4466   else
4467     TyAlign = 4;
4468   // Use indirect if size of the illegal vector is bigger than 16 bytes.
4469   if (isIllegalVectorType(Ty) && Size > 16) {
4470     IsIndirect = true;
4471     Size = 4;
4472     TyAlign = 4;
4473   }
4474 
4475   // Handle address alignment for ABI alignment > 4 bytes.
4476   if (TyAlign > 4) {
4477     assert((TyAlign & (TyAlign - 1)) == 0 &&
4478            "Alignment is not power of 2!");
4479     llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty);
4480     AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1));
4481     AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1)));
4482     Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align");
4483   }
4484 
4485   uint64_t Offset =
4486     llvm::RoundUpToAlignment(Size, 4);
4487   llvm::Value *NextAddr =
4488     Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
4489                       "ap.next");
4490   Builder.CreateStore(NextAddr, VAListAddrAsBPP);
4491 
4492   if (IsIndirect)
4493     Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP));
4494   else if (TyAlign < CGF.getContext().getTypeAlign(Ty) / 8) {
4495     // We can't directly cast ap.cur to pointer to a vector type, since ap.cur
4496     // may not be correctly aligned for the vector type. We create an aligned
4497     // temporary space and copy the content over from ap.cur to the temporary
4498     // space. This is necessary if the natural alignment of the type is greater
4499     // than the ABI alignment.
4500     llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
4501     CharUnits CharSize = getContext().getTypeSizeInChars(Ty);
4502     llvm::Value *AlignedTemp = CGF.CreateTempAlloca(CGF.ConvertType(Ty),
4503                                                     "var.align");
4504     llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
4505     llvm::Value *Src = Builder.CreateBitCast(Addr, I8PtrTy);
4506     Builder.CreateMemCpy(Dst, Src,
4507         llvm::ConstantInt::get(CGF.IntPtrTy, CharSize.getQuantity()),
4508         TyAlign, false);
4509     Addr = AlignedTemp; //The content is in aligned location.
4510   }
4511   llvm::Type *PTy =
4512     llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
4513   llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
4514 
4515   return AddrTyped;
4516 }
4517 
4518 namespace {
4519 
4520 class NaClARMABIInfo : public ABIInfo {
4521  public:
4522   NaClARMABIInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind)
4523       : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, Kind) {}
4524   void computeInfo(CGFunctionInfo &FI) const override;
4525   llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4526                          CodeGenFunction &CGF) const override;
4527  private:
4528   PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv.
4529   ARMABIInfo NInfo; // Used for everything else.
4530 };
4531 
4532 class NaClARMTargetCodeGenInfo : public TargetCodeGenInfo  {
4533  public:
4534   NaClARMTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind)
4535       : TargetCodeGenInfo(new NaClARMABIInfo(CGT, Kind)) {}
4536 };
4537 
4538 }
4539 
4540 void NaClARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
4541   if (FI.getASTCallingConvention() == CC_PnaclCall)
4542     PInfo.computeInfo(FI);
4543   else
4544     static_cast<const ABIInfo&>(NInfo).computeInfo(FI);
4545 }
4546 
4547 llvm::Value *NaClARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4548                                        CodeGenFunction &CGF) const {
4549   // Always use the native convention; calling pnacl-style varargs functions
4550   // is unsupported.
4551   return static_cast<const ABIInfo&>(NInfo).EmitVAArg(VAListAddr, Ty, CGF);
4552 }
4553 
4554 //===----------------------------------------------------------------------===//
4555 // AArch64 ABI Implementation
4556 //===----------------------------------------------------------------------===//
4557 
4558 namespace {
4559 
4560 class AArch64ABIInfo : public ABIInfo {
4561 public:
4562   AArch64ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
4563 
4564 private:
4565   // The AArch64 PCS is explicit about return types and argument types being
4566   // handled identically, so we don't need to draw a distinction between
4567   // Argument and Return classification.
4568   ABIArgInfo classifyGenericType(QualType Ty, int &FreeIntRegs,
4569                                  int &FreeVFPRegs) const;
4570 
4571   ABIArgInfo tryUseRegs(QualType Ty, int &FreeRegs, int RegsNeeded, bool IsInt,
4572                         llvm::Type *DirectTy = 0) const;
4573 
4574   void computeInfo(CGFunctionInfo &FI) const override;
4575 
4576   llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4577                          CodeGenFunction &CGF) const override;
4578 };
4579 
4580 class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
4581 public:
4582   AArch64TargetCodeGenInfo(CodeGenTypes &CGT)
4583     :TargetCodeGenInfo(new AArch64ABIInfo(CGT)) {}
4584 
4585   const AArch64ABIInfo &getABIInfo() const {
4586     return static_cast<const AArch64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
4587   }
4588 
4589   int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4590     return 31;
4591   }
4592 
4593   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4594                                llvm::Value *Address) const override {
4595     // 0-31 are x0-x30 and sp: 8 bytes each
4596     llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
4597     AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 31);
4598 
4599     // 64-95 are v0-v31: 16 bytes each
4600     llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
4601     AssignToArrayRange(CGF.Builder, Address, Sixteen8, 64, 95);
4602 
4603     return false;
4604   }
4605 
4606 };
4607 
4608 }
4609 
4610 void AArch64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
4611   int FreeIntRegs = 8, FreeVFPRegs = 8;
4612 
4613   FI.getReturnInfo() = classifyGenericType(FI.getReturnType(),
4614                                            FreeIntRegs, FreeVFPRegs);
4615 
4616   FreeIntRegs = FreeVFPRegs = 8;
4617   for (auto &I : FI.arguments()) {
4618     I.info = classifyGenericType(I.type, FreeIntRegs, FreeVFPRegs);
4619 
4620   }
4621 }
4622 
4623 ABIArgInfo
4624 AArch64ABIInfo::tryUseRegs(QualType Ty, int &FreeRegs, int RegsNeeded,
4625                            bool IsInt, llvm::Type *DirectTy) const {
4626   if (FreeRegs >= RegsNeeded) {
4627     FreeRegs -= RegsNeeded;
4628     return ABIArgInfo::getDirect(DirectTy);
4629   }
4630 
4631   llvm::Type *Padding = 0;
4632 
4633   // We need padding so that later arguments don't get filled in anyway. That
4634   // wouldn't happen if only ByVal arguments followed in the same category, but
4635   // a large structure will simply seem to be a pointer as far as LLVM is
4636   // concerned.
4637   if (FreeRegs > 0) {
4638     if (IsInt)
4639       Padding = llvm::Type::getInt64Ty(getVMContext());
4640     else
4641       Padding = llvm::Type::getFloatTy(getVMContext());
4642 
4643     // Either [N x i64] or [N x float].
4644     Padding = llvm::ArrayType::get(Padding, FreeRegs);
4645     FreeRegs = 0;
4646   }
4647 
4648   return ABIArgInfo::getIndirect(getContext().getTypeAlign(Ty) / 8,
4649                                  /*IsByVal=*/ true, /*Realign=*/ false,
4650                                  Padding);
4651 }
4652 
4653 
4654 ABIArgInfo AArch64ABIInfo::classifyGenericType(QualType Ty,
4655                                                int &FreeIntRegs,
4656                                                int &FreeVFPRegs) const {
4657   // Can only occurs for return, but harmless otherwise.
4658   if (Ty->isVoidType())
4659     return ABIArgInfo::getIgnore();
4660 
4661   // Large vector types should be returned via memory. There's no such concept
4662   // in the ABI, but they'd be over 16 bytes anyway so no matter how they're
4663   // classified they'd go into memory (see B.3).
4664   if (Ty->isVectorType() && getContext().getTypeSize(Ty) > 128) {
4665     if (FreeIntRegs > 0)
4666       --FreeIntRegs;
4667     return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
4668   }
4669 
4670   // All non-aggregate LLVM types have a concrete ABI representation so they can
4671   // be passed directly. After this block we're guaranteed to be in a
4672   // complicated case.
4673   if (!isAggregateTypeForABI(Ty)) {
4674     // Treat an enum type as its underlying type.
4675     if (const EnumType *EnumTy = Ty->getAs<EnumType>())
4676       Ty = EnumTy->getDecl()->getIntegerType();
4677 
4678     if (Ty->isFloatingType() || Ty->isVectorType())
4679       return tryUseRegs(Ty, FreeVFPRegs, /*RegsNeeded=*/ 1, /*IsInt=*/ false);
4680 
4681     assert(getContext().getTypeSize(Ty) <= 128 &&
4682            "unexpectedly large scalar type");
4683 
4684     int RegsNeeded = getContext().getTypeSize(Ty) > 64 ? 2 : 1;
4685 
4686     // If the type may need padding registers to ensure "alignment", we must be
4687     // careful when this is accounted for. Increasing the effective size covers
4688     // all cases.
4689     if (getContext().getTypeAlign(Ty) == 128)
4690       RegsNeeded += FreeIntRegs % 2 != 0;
4691 
4692     return tryUseRegs(Ty, FreeIntRegs, RegsNeeded, /*IsInt=*/ true);
4693   }
4694 
4695   if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
4696     if (FreeIntRegs > 0 && RAA == CGCXXABI::RAA_Indirect)
4697       --FreeIntRegs;
4698     return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
4699   }
4700 
4701   if (isEmptyRecord(getContext(), Ty, true)) {
4702     if (!getContext().getLangOpts().CPlusPlus) {
4703       // Empty structs outside C++ mode are a GNU extension, so no ABI can
4704       // possibly tell us what to do. It turns out (I believe) that GCC ignores
4705       // the object for parameter-passsing purposes.
4706       return ABIArgInfo::getIgnore();
4707     }
4708 
4709     // The combination of C++98 9p5 (sizeof(struct) != 0) and the pseudocode
4710     // description of va_arg in the PCS require that an empty struct does
4711     // actually occupy space for parameter-passing. I'm hoping for a
4712     // clarification giving an explicit paragraph to point to in future.
4713     return tryUseRegs(Ty, FreeIntRegs, /*RegsNeeded=*/ 1, /*IsInt=*/ true,
4714                       llvm::Type::getInt8Ty(getVMContext()));
4715   }
4716 
4717   // Homogeneous vector aggregates get passed in registers or on the stack.
4718   const Type *Base = 0;
4719   uint64_t NumMembers = 0;
4720   if (isHomogeneousAggregate(Ty, Base, getContext(), &NumMembers)) {
4721     assert(Base && "Base class should be set for homogeneous aggregate");
4722     // Homogeneous aggregates are passed and returned directly.
4723     return tryUseRegs(Ty, FreeVFPRegs, /*RegsNeeded=*/ NumMembers,
4724                       /*IsInt=*/ false);
4725   }
4726 
4727   uint64_t Size = getContext().getTypeSize(Ty);
4728   if (Size <= 128) {
4729     // Small structs can use the same direct type whether they're in registers
4730     // or on the stack.
4731     llvm::Type *BaseTy;
4732     unsigned NumBases;
4733     int SizeInRegs = (Size + 63) / 64;
4734 
4735     if (getContext().getTypeAlign(Ty) == 128) {
4736       BaseTy = llvm::Type::getIntNTy(getVMContext(), 128);
4737       NumBases = 1;
4738 
4739       // If the type may need padding registers to ensure "alignment", we must
4740       // be careful when this is accounted for. Increasing the effective size
4741       // covers all cases.
4742       SizeInRegs += FreeIntRegs % 2 != 0;
4743     } else {
4744       BaseTy = llvm::Type::getInt64Ty(getVMContext());
4745       NumBases = SizeInRegs;
4746     }
4747     llvm::Type *DirectTy = llvm::ArrayType::get(BaseTy, NumBases);
4748 
4749     return tryUseRegs(Ty, FreeIntRegs, /*RegsNeeded=*/ SizeInRegs,
4750                       /*IsInt=*/ true, DirectTy);
4751   }
4752 
4753   // If the aggregate is > 16 bytes, it's passed and returned indirectly. In
4754   // LLVM terms the return uses an "sret" pointer, but that's handled elsewhere.
4755   --FreeIntRegs;
4756   return ABIArgInfo::getIndirect(0, /* byVal = */ false);
4757 }
4758 
4759 llvm::Value *AArch64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4760                                        CodeGenFunction &CGF) const {
4761   int FreeIntRegs = 8, FreeVFPRegs = 8;
4762   Ty = CGF.getContext().getCanonicalType(Ty);
4763   ABIArgInfo AI = classifyGenericType(Ty, FreeIntRegs, FreeVFPRegs);
4764 
4765   return EmitAArch64VAArg(VAListAddr, Ty, 8 - FreeIntRegs, 8 - FreeVFPRegs,
4766                           AI.isIndirect(), CGF);
4767 }
4768 
4769 //===----------------------------------------------------------------------===//
4770 // NVPTX ABI Implementation
4771 //===----------------------------------------------------------------------===//
4772 
4773 namespace {
4774 
4775 class NVPTXABIInfo : public ABIInfo {
4776 public:
4777   NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
4778 
4779   ABIArgInfo classifyReturnType(QualType RetTy) const;
4780   ABIArgInfo classifyArgumentType(QualType Ty) const;
4781 
4782   void computeInfo(CGFunctionInfo &FI) const override;
4783   llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4784                          CodeGenFunction &CFG) const override;
4785 };
4786 
4787 class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo {
4788 public:
4789   NVPTXTargetCodeGenInfo(CodeGenTypes &CGT)
4790     : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {}
4791 
4792   void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
4793                            CodeGen::CodeGenModule &M) const override;
4794 private:
4795   // Adds a NamedMDNode with F, Name, and Operand as operands, and adds the
4796   // resulting MDNode to the nvvm.annotations MDNode.
4797   static void addNVVMMetadata(llvm::Function *F, StringRef Name, int Operand);
4798 };
4799 
4800 ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
4801   if (RetTy->isVoidType())
4802     return ABIArgInfo::getIgnore();
4803 
4804   // note: this is different from default ABI
4805   if (!RetTy->isScalarType())
4806     return ABIArgInfo::getDirect();
4807 
4808   // Treat an enum type as its underlying type.
4809   if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
4810     RetTy = EnumTy->getDecl()->getIntegerType();
4811 
4812   return (RetTy->isPromotableIntegerType() ?
4813           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
4814 }
4815 
4816 ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
4817   // Treat an enum type as its underlying type.
4818   if (const EnumType *EnumTy = Ty->getAs<EnumType>())
4819     Ty = EnumTy->getDecl()->getIntegerType();
4820 
4821   return (Ty->isPromotableIntegerType() ?
4822           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
4823 }
4824 
4825 void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
4826   FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4827   for (auto &I : FI.arguments())
4828     I.info = classifyArgumentType(I.type);
4829 
4830   // Always honor user-specified calling convention.
4831   if (FI.getCallingConvention() != llvm::CallingConv::C)
4832     return;
4833 
4834   FI.setEffectiveCallingConvention(getRuntimeCC());
4835 }
4836 
4837 llvm::Value *NVPTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4838                                      CodeGenFunction &CFG) const {
4839   llvm_unreachable("NVPTX does not support varargs");
4840 }
4841 
4842 void NVPTXTargetCodeGenInfo::
4843 SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
4844                     CodeGen::CodeGenModule &M) const{
4845   const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
4846   if (!FD) return;
4847 
4848   llvm::Function *F = cast<llvm::Function>(GV);
4849 
4850   // Perform special handling in OpenCL mode
4851   if (M.getLangOpts().OpenCL) {
4852     // Use OpenCL function attributes to check for kernel functions
4853     // By default, all functions are device functions
4854     if (FD->hasAttr<OpenCLKernelAttr>()) {
4855       // OpenCL __kernel functions get kernel metadata
4856       // Create !{<func-ref>, metadata !"kernel", i32 1} node
4857       addNVVMMetadata(F, "kernel", 1);
4858       // And kernel functions are not subject to inlining
4859       F->addFnAttr(llvm::Attribute::NoInline);
4860     }
4861   }
4862 
4863   // Perform special handling in CUDA mode.
4864   if (M.getLangOpts().CUDA) {
4865     // CUDA __global__ functions get a kernel metadata entry.  Since
4866     // __global__ functions cannot be called from the device, we do not
4867     // need to set the noinline attribute.
4868     if (FD->hasAttr<CUDAGlobalAttr>()) {
4869       // Create !{<func-ref>, metadata !"kernel", i32 1} node
4870       addNVVMMetadata(F, "kernel", 1);
4871     }
4872     if (FD->hasAttr<CUDALaunchBoundsAttr>()) {
4873       // Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node
4874       addNVVMMetadata(F, "maxntidx",
4875                       FD->getAttr<CUDALaunchBoundsAttr>()->getMaxThreads());
4876       // min blocks is a default argument for CUDALaunchBoundsAttr, so getting a
4877       // zero value from getMinBlocks either means it was not specified in
4878       // __launch_bounds__ or the user specified a 0 value. In both cases, we
4879       // don't have to add a PTX directive.
4880       int MinCTASM = FD->getAttr<CUDALaunchBoundsAttr>()->getMinBlocks();
4881       if (MinCTASM > 0) {
4882         // Create !{<func-ref>, metadata !"minctasm", i32 <val>} node
4883         addNVVMMetadata(F, "minctasm", MinCTASM);
4884       }
4885     }
4886   }
4887 }
4888 
4889 void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name,
4890                                              int Operand) {
4891   llvm::Module *M = F->getParent();
4892   llvm::LLVMContext &Ctx = M->getContext();
4893 
4894   // Get "nvvm.annotations" metadata node
4895   llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations");
4896 
4897   llvm::Value *MDVals[] = {
4898       F, llvm::MDString::get(Ctx, Name),
4899       llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand)};
4900   // Append metadata to nvvm.annotations
4901   MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
4902 }
4903 }
4904 
4905 //===----------------------------------------------------------------------===//
4906 // SystemZ ABI Implementation
4907 //===----------------------------------------------------------------------===//
4908 
4909 namespace {
4910 
4911 class SystemZABIInfo : public ABIInfo {
4912 public:
4913   SystemZABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
4914 
4915   bool isPromotableIntegerType(QualType Ty) const;
4916   bool isCompoundType(QualType Ty) const;
4917   bool isFPArgumentType(QualType Ty) const;
4918 
4919   ABIArgInfo classifyReturnType(QualType RetTy) const;
4920   ABIArgInfo classifyArgumentType(QualType ArgTy) const;
4921 
4922   void computeInfo(CGFunctionInfo &FI) const override {
4923     FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4924     for (auto &I : FI.arguments())
4925       I.info = classifyArgumentType(I.type);
4926   }
4927 
4928   llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4929                          CodeGenFunction &CGF) const override;
4930 };
4931 
4932 class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
4933 public:
4934   SystemZTargetCodeGenInfo(CodeGenTypes &CGT)
4935     : TargetCodeGenInfo(new SystemZABIInfo(CGT)) {}
4936 };
4937 
4938 }
4939 
4940 bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const {
4941   // Treat an enum type as its underlying type.
4942   if (const EnumType *EnumTy = Ty->getAs<EnumType>())
4943     Ty = EnumTy->getDecl()->getIntegerType();
4944 
4945   // Promotable integer types are required to be promoted by the ABI.
4946   if (Ty->isPromotableIntegerType())
4947     return true;
4948 
4949   // 32-bit values must also be promoted.
4950   if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
4951     switch (BT->getKind()) {
4952     case BuiltinType::Int:
4953     case BuiltinType::UInt:
4954       return true;
4955     default:
4956       return false;
4957     }
4958   return false;
4959 }
4960 
4961 bool SystemZABIInfo::isCompoundType(QualType Ty) const {
4962   return Ty->isAnyComplexType() || isAggregateTypeForABI(Ty);
4963 }
4964 
4965 bool SystemZABIInfo::isFPArgumentType(QualType Ty) const {
4966   if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
4967     switch (BT->getKind()) {
4968     case BuiltinType::Float:
4969     case BuiltinType::Double:
4970       return true;
4971     default:
4972       return false;
4973     }
4974 
4975   if (const RecordType *RT = Ty->getAsStructureType()) {
4976     const RecordDecl *RD = RT->getDecl();
4977     bool Found = false;
4978 
4979     // If this is a C++ record, check the bases first.
4980     if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
4981       for (const auto &I : CXXRD->bases()) {
4982         QualType Base = I.getType();
4983 
4984         // Empty bases don't affect things either way.
4985         if (isEmptyRecord(getContext(), Base, true))
4986           continue;
4987 
4988         if (Found)
4989           return false;
4990         Found = isFPArgumentType(Base);
4991         if (!Found)
4992           return false;
4993       }
4994 
4995     // Check the fields.
4996     for (const auto *FD : RD->fields()) {
4997       // Empty bitfields don't affect things either way.
4998       // Unlike isSingleElementStruct(), empty structure and array fields
4999       // do count.  So do anonymous bitfields that aren't zero-sized.
5000       if (FD->isBitField() && FD->getBitWidthValue(getContext()) == 0)
5001         return true;
5002 
5003       // Unlike isSingleElementStruct(), arrays do not count.
5004       // Nested isFPArgumentType structures still do though.
5005       if (Found)
5006         return false;
5007       Found = isFPArgumentType(FD->getType());
5008       if (!Found)
5009         return false;
5010     }
5011 
5012     // Unlike isSingleElementStruct(), trailing padding is allowed.
5013     // An 8-byte aligned struct s { float f; } is passed as a double.
5014     return Found;
5015   }
5016 
5017   return false;
5018 }
5019 
5020 llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5021                                        CodeGenFunction &CGF) const {
5022   // Assume that va_list type is correct; should be pointer to LLVM type:
5023   // struct {
5024   //   i64 __gpr;
5025   //   i64 __fpr;
5026   //   i8 *__overflow_arg_area;
5027   //   i8 *__reg_save_area;
5028   // };
5029 
5030   // Every argument occupies 8 bytes and is passed by preference in either
5031   // GPRs or FPRs.
5032   Ty = CGF.getContext().getCanonicalType(Ty);
5033   ABIArgInfo AI = classifyArgumentType(Ty);
5034   bool InFPRs = isFPArgumentType(Ty);
5035 
5036   llvm::Type *APTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
5037   bool IsIndirect = AI.isIndirect();
5038   unsigned UnpaddedBitSize;
5039   if (IsIndirect) {
5040     APTy = llvm::PointerType::getUnqual(APTy);
5041     UnpaddedBitSize = 64;
5042   } else
5043     UnpaddedBitSize = getContext().getTypeSize(Ty);
5044   unsigned PaddedBitSize = 64;
5045   assert((UnpaddedBitSize <= PaddedBitSize) && "Invalid argument size.");
5046 
5047   unsigned PaddedSize = PaddedBitSize / 8;
5048   unsigned Padding = (PaddedBitSize - UnpaddedBitSize) / 8;
5049 
5050   unsigned MaxRegs, RegCountField, RegSaveIndex, RegPadding;
5051   if (InFPRs) {
5052     MaxRegs = 4; // Maximum of 4 FPR arguments
5053     RegCountField = 1; // __fpr
5054     RegSaveIndex = 16; // save offset for f0
5055     RegPadding = 0; // floats are passed in the high bits of an FPR
5056   } else {
5057     MaxRegs = 5; // Maximum of 5 GPR arguments
5058     RegCountField = 0; // __gpr
5059     RegSaveIndex = 2; // save offset for r2
5060     RegPadding = Padding; // values are passed in the low bits of a GPR
5061   }
5062 
5063   llvm::Value *RegCountPtr =
5064     CGF.Builder.CreateStructGEP(VAListAddr, RegCountField, "reg_count_ptr");
5065   llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count");
5066   llvm::Type *IndexTy = RegCount->getType();
5067   llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
5068   llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV,
5069                                                  "fits_in_regs");
5070 
5071   llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
5072   llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
5073   llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
5074   CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
5075 
5076   // Emit code to load the value if it was passed in registers.
5077   CGF.EmitBlock(InRegBlock);
5078 
5079   // Work out the address of an argument register.
5080   llvm::Value *PaddedSizeV = llvm::ConstantInt::get(IndexTy, PaddedSize);
5081   llvm::Value *ScaledRegCount =
5082     CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count");
5083   llvm::Value *RegBase =
5084     llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize + RegPadding);
5085   llvm::Value *RegOffset =
5086     CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset");
5087   llvm::Value *RegSaveAreaPtr =
5088     CGF.Builder.CreateStructGEP(VAListAddr, 3, "reg_save_area_ptr");
5089   llvm::Value *RegSaveArea =
5090     CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area");
5091   llvm::Value *RawRegAddr =
5092     CGF.Builder.CreateGEP(RegSaveArea, RegOffset, "raw_reg_addr");
5093   llvm::Value *RegAddr =
5094     CGF.Builder.CreateBitCast(RawRegAddr, APTy, "reg_addr");
5095 
5096   // Update the register count
5097   llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
5098   llvm::Value *NewRegCount =
5099     CGF.Builder.CreateAdd(RegCount, One, "reg_count");
5100   CGF.Builder.CreateStore(NewRegCount, RegCountPtr);
5101   CGF.EmitBranch(ContBlock);
5102 
5103   // Emit code to load the value if it was passed in memory.
5104   CGF.EmitBlock(InMemBlock);
5105 
5106   // Work out the address of a stack argument.
5107   llvm::Value *OverflowArgAreaPtr =
5108     CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr");
5109   llvm::Value *OverflowArgArea =
5110     CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area");
5111   llvm::Value *PaddingV = llvm::ConstantInt::get(IndexTy, Padding);
5112   llvm::Value *RawMemAddr =
5113     CGF.Builder.CreateGEP(OverflowArgArea, PaddingV, "raw_mem_addr");
5114   llvm::Value *MemAddr =
5115     CGF.Builder.CreateBitCast(RawMemAddr, APTy, "mem_addr");
5116 
5117   // Update overflow_arg_area_ptr pointer
5118   llvm::Value *NewOverflowArgArea =
5119     CGF.Builder.CreateGEP(OverflowArgArea, PaddedSizeV, "overflow_arg_area");
5120   CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
5121   CGF.EmitBranch(ContBlock);
5122 
5123   // Return the appropriate result.
5124   CGF.EmitBlock(ContBlock);
5125   llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(APTy, 2, "va_arg.addr");
5126   ResAddr->addIncoming(RegAddr, InRegBlock);
5127   ResAddr->addIncoming(MemAddr, InMemBlock);
5128 
5129   if (IsIndirect)
5130     return CGF.Builder.CreateLoad(ResAddr, "indirect_arg");
5131 
5132   return ResAddr;
5133 }
5134 
5135 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
5136     const llvm::Triple &Triple, const CodeGenOptions &Opts) {
5137   assert(Triple.getArch() == llvm::Triple::x86);
5138 
5139   switch (Opts.getStructReturnConvention()) {
5140   case CodeGenOptions::SRCK_Default:
5141     break;
5142   case CodeGenOptions::SRCK_OnStack:  // -fpcc-struct-return
5143     return false;
5144   case CodeGenOptions::SRCK_InRegs:  // -freg-struct-return
5145     return true;
5146   }
5147 
5148   if (Triple.isOSDarwin())
5149     return true;
5150 
5151   switch (Triple.getOS()) {
5152   case llvm::Triple::AuroraUX:
5153   case llvm::Triple::DragonFly:
5154   case llvm::Triple::FreeBSD:
5155   case llvm::Triple::OpenBSD:
5156   case llvm::Triple::Bitrig:
5157     return true;
5158   case llvm::Triple::Win32:
5159     switch (Triple.getEnvironment()) {
5160     case llvm::Triple::UnknownEnvironment:
5161     case llvm::Triple::Cygnus:
5162     case llvm::Triple::GNU:
5163     case llvm::Triple::MSVC:
5164       return true;
5165     default:
5166       return false;
5167     }
5168   default:
5169     return false;
5170   }
5171 }
5172 
5173 ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
5174   if (RetTy->isVoidType())
5175     return ABIArgInfo::getIgnore();
5176   if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64)
5177     return ABIArgInfo::getIndirect(0);
5178   return (isPromotableIntegerType(RetTy) ?
5179           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
5180 }
5181 
5182 ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
5183   // Handle the generic C++ ABI.
5184   if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
5185     return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
5186 
5187   // Integers and enums are extended to full register width.
5188   if (isPromotableIntegerType(Ty))
5189     return ABIArgInfo::getExtend();
5190 
5191   // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly.
5192   uint64_t Size = getContext().getTypeSize(Ty);
5193   if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
5194     return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
5195 
5196   // Handle small structures.
5197   if (const RecordType *RT = Ty->getAs<RecordType>()) {
5198     // Structures with flexible arrays have variable length, so really
5199     // fail the size test above.
5200     const RecordDecl *RD = RT->getDecl();
5201     if (RD->hasFlexibleArrayMember())
5202       return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
5203 
5204     // The structure is passed as an unextended integer, a float, or a double.
5205     llvm::Type *PassTy;
5206     if (isFPArgumentType(Ty)) {
5207       assert(Size == 32 || Size == 64);
5208       if (Size == 32)
5209         PassTy = llvm::Type::getFloatTy(getVMContext());
5210       else
5211         PassTy = llvm::Type::getDoubleTy(getVMContext());
5212     } else
5213       PassTy = llvm::IntegerType::get(getVMContext(), Size);
5214     return ABIArgInfo::getDirect(PassTy);
5215   }
5216 
5217   // Non-structure compounds are passed indirectly.
5218   if (isCompoundType(Ty))
5219     return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
5220 
5221   return ABIArgInfo::getDirect(0);
5222 }
5223 
5224 //===----------------------------------------------------------------------===//
5225 // MSP430 ABI Implementation
5226 //===----------------------------------------------------------------------===//
5227 
5228 namespace {
5229 
5230 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
5231 public:
5232   MSP430TargetCodeGenInfo(CodeGenTypes &CGT)
5233     : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
5234   void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5235                            CodeGen::CodeGenModule &M) const override;
5236 };
5237 
5238 }
5239 
5240 void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
5241                                                   llvm::GlobalValue *GV,
5242                                              CodeGen::CodeGenModule &M) const {
5243   if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
5244     if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) {
5245       // Handle 'interrupt' attribute:
5246       llvm::Function *F = cast<llvm::Function>(GV);
5247 
5248       // Step 1: Set ISR calling convention.
5249       F->setCallingConv(llvm::CallingConv::MSP430_INTR);
5250 
5251       // Step 2: Add attributes goodness.
5252       F->addFnAttr(llvm::Attribute::NoInline);
5253 
5254       // Step 3: Emit ISR vector alias.
5255       unsigned Num = attr->getNumber() / 2;
5256       new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage,
5257                             "__isr_" + Twine(Num),
5258                             GV, &M.getModule());
5259     }
5260   }
5261 }
5262 
5263 //===----------------------------------------------------------------------===//
5264 // MIPS ABI Implementation.  This works for both little-endian and
5265 // big-endian variants.
5266 //===----------------------------------------------------------------------===//
5267 
5268 namespace {
5269 class MipsABIInfo : public ABIInfo {
5270   bool IsO32;
5271   unsigned MinABIStackAlignInBytes, StackAlignInBytes;
5272   void CoerceToIntArgs(uint64_t TySize,
5273                        SmallVectorImpl<llvm::Type *> &ArgList) const;
5274   llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const;
5275   llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const;
5276   llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const;
5277 public:
5278   MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) :
5279     ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
5280     StackAlignInBytes(IsO32 ? 8 : 16) {}
5281 
5282   ABIArgInfo classifyReturnType(QualType RetTy) const;
5283   ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const;
5284   void computeInfo(CGFunctionInfo &FI) const override;
5285   llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5286                          CodeGenFunction &CGF) const override;
5287 };
5288 
5289 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
5290   unsigned SizeOfUnwindException;
5291 public:
5292   MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32)
5293     : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)),
5294       SizeOfUnwindException(IsO32 ? 24 : 32) {}
5295 
5296   int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
5297     return 29;
5298   }
5299 
5300   void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5301                            CodeGen::CodeGenModule &CGM) const override {
5302     const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
5303     if (!FD) return;
5304     llvm::Function *Fn = cast<llvm::Function>(GV);
5305     if (FD->hasAttr<Mips16Attr>()) {
5306       Fn->addFnAttr("mips16");
5307     }
5308     else if (FD->hasAttr<NoMips16Attr>()) {
5309       Fn->addFnAttr("nomips16");
5310     }
5311   }
5312 
5313   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
5314                                llvm::Value *Address) const override;
5315 
5316   unsigned getSizeOfUnwindException() const override {
5317     return SizeOfUnwindException;
5318   }
5319 };
5320 }
5321 
5322 void MipsABIInfo::CoerceToIntArgs(uint64_t TySize,
5323                                   SmallVectorImpl<llvm::Type *> &ArgList) const {
5324   llvm::IntegerType *IntTy =
5325     llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
5326 
5327   // Add (TySize / MinABIStackAlignInBytes) args of IntTy.
5328   for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
5329     ArgList.push_back(IntTy);
5330 
5331   // If necessary, add one more integer type to ArgList.
5332   unsigned R = TySize % (MinABIStackAlignInBytes * 8);
5333 
5334   if (R)
5335     ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
5336 }
5337 
5338 // In N32/64, an aligned double precision floating point field is passed in
5339 // a register.
5340 llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const {
5341   SmallVector<llvm::Type*, 8> ArgList, IntArgList;
5342 
5343   if (IsO32) {
5344     CoerceToIntArgs(TySize, ArgList);
5345     return llvm::StructType::get(getVMContext(), ArgList);
5346   }
5347 
5348   if (Ty->isComplexType())
5349     return CGT.ConvertType(Ty);
5350 
5351   const RecordType *RT = Ty->getAs<RecordType>();
5352 
5353   // Unions/vectors are passed in integer registers.
5354   if (!RT || !RT->isStructureOrClassType()) {
5355     CoerceToIntArgs(TySize, ArgList);
5356     return llvm::StructType::get(getVMContext(), ArgList);
5357   }
5358 
5359   const RecordDecl *RD = RT->getDecl();
5360   const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
5361   assert(!(TySize % 8) && "Size of structure must be multiple of 8.");
5362 
5363   uint64_t LastOffset = 0;
5364   unsigned idx = 0;
5365   llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64);
5366 
5367   // Iterate over fields in the struct/class and check if there are any aligned
5368   // double fields.
5369   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
5370        i != e; ++i, ++idx) {
5371     const QualType Ty = i->getType();
5372     const BuiltinType *BT = Ty->getAs<BuiltinType>();
5373 
5374     if (!BT || BT->getKind() != BuiltinType::Double)
5375       continue;
5376 
5377     uint64_t Offset = Layout.getFieldOffset(idx);
5378     if (Offset % 64) // Ignore doubles that are not aligned.
5379       continue;
5380 
5381     // Add ((Offset - LastOffset) / 64) args of type i64.
5382     for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
5383       ArgList.push_back(I64);
5384 
5385     // Add double type.
5386     ArgList.push_back(llvm::Type::getDoubleTy(getVMContext()));
5387     LastOffset = Offset + 64;
5388   }
5389 
5390   CoerceToIntArgs(TySize - LastOffset, IntArgList);
5391   ArgList.append(IntArgList.begin(), IntArgList.end());
5392 
5393   return llvm::StructType::get(getVMContext(), ArgList);
5394 }
5395 
5396 llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset,
5397                                         uint64_t Offset) const {
5398   if (OrigOffset + MinABIStackAlignInBytes > Offset)
5399     return 0;
5400 
5401   return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8);
5402 }
5403 
5404 ABIArgInfo
5405 MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
5406   uint64_t OrigOffset = Offset;
5407   uint64_t TySize = getContext().getTypeSize(Ty);
5408   uint64_t Align = getContext().getTypeAlign(Ty) / 8;
5409 
5410   Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes),
5411                    (uint64_t)StackAlignInBytes);
5412   unsigned CurrOffset = llvm::RoundUpToAlignment(Offset, Align);
5413   Offset = CurrOffset + llvm::RoundUpToAlignment(TySize, Align * 8) / 8;
5414 
5415   if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) {
5416     // Ignore empty aggregates.
5417     if (TySize == 0)
5418       return ABIArgInfo::getIgnore();
5419 
5420     if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
5421       Offset = OrigOffset + MinABIStackAlignInBytes;
5422       return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
5423     }
5424 
5425     // If we have reached here, aggregates are passed directly by coercing to
5426     // another structure type. Padding is inserted if the offset of the
5427     // aggregate is unaligned.
5428     return ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0,
5429                                  getPaddingType(OrigOffset, CurrOffset));
5430   }
5431 
5432   // Treat an enum type as its underlying type.
5433   if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5434     Ty = EnumTy->getDecl()->getIntegerType();
5435 
5436   if (Ty->isPromotableIntegerType())
5437     return ABIArgInfo::getExtend();
5438 
5439   return ABIArgInfo::getDirect(
5440       0, 0, IsO32 ? 0 : getPaddingType(OrigOffset, CurrOffset));
5441 }
5442 
5443 llvm::Type*
5444 MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
5445   const RecordType *RT = RetTy->getAs<RecordType>();
5446   SmallVector<llvm::Type*, 8> RTList;
5447 
5448   if (RT && RT->isStructureOrClassType()) {
5449     const RecordDecl *RD = RT->getDecl();
5450     const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
5451     unsigned FieldCnt = Layout.getFieldCount();
5452 
5453     // N32/64 returns struct/classes in floating point registers if the
5454     // following conditions are met:
5455     // 1. The size of the struct/class is no larger than 128-bit.
5456     // 2. The struct/class has one or two fields all of which are floating
5457     //    point types.
5458     // 3. The offset of the first field is zero (this follows what gcc does).
5459     //
5460     // Any other composite results are returned in integer registers.
5461     //
5462     if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) {
5463       RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end();
5464       for (; b != e; ++b) {
5465         const BuiltinType *BT = b->getType()->getAs<BuiltinType>();
5466 
5467         if (!BT || !BT->isFloatingPoint())
5468           break;
5469 
5470         RTList.push_back(CGT.ConvertType(b->getType()));
5471       }
5472 
5473       if (b == e)
5474         return llvm::StructType::get(getVMContext(), RTList,
5475                                      RD->hasAttr<PackedAttr>());
5476 
5477       RTList.clear();
5478     }
5479   }
5480 
5481   CoerceToIntArgs(Size, RTList);
5482   return llvm::StructType::get(getVMContext(), RTList);
5483 }
5484 
5485 ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
5486   uint64_t Size = getContext().getTypeSize(RetTy);
5487 
5488   if (RetTy->isVoidType() || Size == 0)
5489     return ABIArgInfo::getIgnore();
5490 
5491   if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) {
5492     if (isRecordReturnIndirect(RetTy, getCXXABI()))
5493       return ABIArgInfo::getIndirect(0);
5494 
5495     if (Size <= 128) {
5496       if (RetTy->isAnyComplexType())
5497         return ABIArgInfo::getDirect();
5498 
5499       // O32 returns integer vectors in registers.
5500       if (IsO32 && RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())
5501         return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
5502 
5503       if (!IsO32)
5504         return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
5505     }
5506 
5507     return ABIArgInfo::getIndirect(0);
5508   }
5509 
5510   // Treat an enum type as its underlying type.
5511   if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
5512     RetTy = EnumTy->getDecl()->getIntegerType();
5513 
5514   return (RetTy->isPromotableIntegerType() ?
5515           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
5516 }
5517 
5518 void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const {
5519   ABIArgInfo &RetInfo = FI.getReturnInfo();
5520   RetInfo = classifyReturnType(FI.getReturnType());
5521 
5522   // Check if a pointer to an aggregate is passed as a hidden argument.
5523   uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0;
5524 
5525   for (auto &I : FI.arguments())
5526     I.info = classifyArgumentType(I.type, Offset);
5527 }
5528 
5529 llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5530                                     CodeGenFunction &CGF) const {
5531   llvm::Type *BP = CGF.Int8PtrTy;
5532   llvm::Type *BPP = CGF.Int8PtrPtrTy;
5533 
5534   CGBuilderTy &Builder = CGF.Builder;
5535   llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
5536   llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
5537   int64_t TypeAlign = getContext().getTypeAlign(Ty) / 8;
5538   llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
5539   llvm::Value *AddrTyped;
5540   unsigned PtrWidth = getTarget().getPointerWidth(0);
5541   llvm::IntegerType *IntTy = (PtrWidth == 32) ? CGF.Int32Ty : CGF.Int64Ty;
5542 
5543   if (TypeAlign > MinABIStackAlignInBytes) {
5544     llvm::Value *AddrAsInt = CGF.Builder.CreatePtrToInt(Addr, IntTy);
5545     llvm::Value *Inc = llvm::ConstantInt::get(IntTy, TypeAlign - 1);
5546     llvm::Value *Mask = llvm::ConstantInt::get(IntTy, -TypeAlign);
5547     llvm::Value *Add = CGF.Builder.CreateAdd(AddrAsInt, Inc);
5548     llvm::Value *And = CGF.Builder.CreateAnd(Add, Mask);
5549     AddrTyped = CGF.Builder.CreateIntToPtr(And, PTy);
5550   }
5551   else
5552     AddrTyped = Builder.CreateBitCast(Addr, PTy);
5553 
5554   llvm::Value *AlignedAddr = Builder.CreateBitCast(AddrTyped, BP);
5555   TypeAlign = std::max((unsigned)TypeAlign, MinABIStackAlignInBytes);
5556   uint64_t Offset =
5557     llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, TypeAlign);
5558   llvm::Value *NextAddr =
5559     Builder.CreateGEP(AlignedAddr, llvm::ConstantInt::get(IntTy, Offset),
5560                       "ap.next");
5561   Builder.CreateStore(NextAddr, VAListAddrAsBPP);
5562 
5563   return AddrTyped;
5564 }
5565 
5566 bool
5567 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
5568                                                llvm::Value *Address) const {
5569   // This information comes from gcc's implementation, which seems to
5570   // as canonical as it gets.
5571 
5572   // Everything on MIPS is 4 bytes.  Double-precision FP registers
5573   // are aliased to pairs of single-precision FP registers.
5574   llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
5575 
5576   // 0-31 are the general purpose registers, $0 - $31.
5577   // 32-63 are the floating-point registers, $f0 - $f31.
5578   // 64 and 65 are the multiply/divide registers, $hi and $lo.
5579   // 66 is the (notional, I think) register for signal-handler return.
5580   AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65);
5581 
5582   // 67-74 are the floating-point status registers, $fcc0 - $fcc7.
5583   // They are one bit wide and ignored here.
5584 
5585   // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31.
5586   // (coprocessor 1 is the FP unit)
5587   // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31.
5588   // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31.
5589   // 176-181 are the DSP accumulator registers.
5590   AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181);
5591   return false;
5592 }
5593 
5594 //===----------------------------------------------------------------------===//
5595 // TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults.
5596 // Currently subclassed only to implement custom OpenCL C function attribute
5597 // handling.
5598 //===----------------------------------------------------------------------===//
5599 
5600 namespace {
5601 
5602 class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo {
5603 public:
5604   TCETargetCodeGenInfo(CodeGenTypes &CGT)
5605     : DefaultTargetCodeGenInfo(CGT) {}
5606 
5607   void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5608                            CodeGen::CodeGenModule &M) const override;
5609 };
5610 
5611 void TCETargetCodeGenInfo::SetTargetAttributes(const Decl *D,
5612                                                llvm::GlobalValue *GV,
5613                                                CodeGen::CodeGenModule &M) const {
5614   const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
5615   if (!FD) return;
5616 
5617   llvm::Function *F = cast<llvm::Function>(GV);
5618 
5619   if (M.getLangOpts().OpenCL) {
5620     if (FD->hasAttr<OpenCLKernelAttr>()) {
5621       // OpenCL C Kernel functions are not subject to inlining
5622       F->addFnAttr(llvm::Attribute::NoInline);
5623       const ReqdWorkGroupSizeAttr *Attr = FD->getAttr<ReqdWorkGroupSizeAttr>();
5624       if (Attr) {
5625         // Convert the reqd_work_group_size() attributes to metadata.
5626         llvm::LLVMContext &Context = F->getContext();
5627         llvm::NamedMDNode *OpenCLMetadata =
5628             M.getModule().getOrInsertNamedMetadata("opencl.kernel_wg_size_info");
5629 
5630         SmallVector<llvm::Value*, 5> Operands;
5631         Operands.push_back(F);
5632 
5633         Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty,
5634                              llvm::APInt(32, Attr->getXDim())));
5635         Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty,
5636                              llvm::APInt(32, Attr->getYDim())));
5637         Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty,
5638                              llvm::APInt(32, Attr->getZDim())));
5639 
5640         // Add a boolean constant operand for "required" (true) or "hint" (false)
5641         // for implementing the work_group_size_hint attr later. Currently
5642         // always true as the hint is not yet implemented.
5643         Operands.push_back(llvm::ConstantInt::getTrue(Context));
5644         OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
5645       }
5646     }
5647   }
5648 }
5649 
5650 }
5651 
5652 //===----------------------------------------------------------------------===//
5653 // Hexagon ABI Implementation
5654 //===----------------------------------------------------------------------===//
5655 
5656 namespace {
5657 
5658 class HexagonABIInfo : public ABIInfo {
5659 
5660 
5661 public:
5662   HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
5663 
5664 private:
5665 
5666   ABIArgInfo classifyReturnType(QualType RetTy) const;
5667   ABIArgInfo classifyArgumentType(QualType RetTy) const;
5668 
5669   void computeInfo(CGFunctionInfo &FI) const override;
5670 
5671   llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5672                          CodeGenFunction &CGF) const override;
5673 };
5674 
5675 class HexagonTargetCodeGenInfo : public TargetCodeGenInfo {
5676 public:
5677   HexagonTargetCodeGenInfo(CodeGenTypes &CGT)
5678     :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {}
5679 
5680   int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
5681     return 29;
5682   }
5683 };
5684 
5685 }
5686 
5687 void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const {
5688   FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
5689   for (auto &I : FI.arguments())
5690     I.info = classifyArgumentType(I.type);
5691 }
5692 
5693 ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const {
5694   if (!isAggregateTypeForABI(Ty)) {
5695     // Treat an enum type as its underlying type.
5696     if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5697       Ty = EnumTy->getDecl()->getIntegerType();
5698 
5699     return (Ty->isPromotableIntegerType() ?
5700             ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
5701   }
5702 
5703   // Ignore empty records.
5704   if (isEmptyRecord(getContext(), Ty, true))
5705     return ABIArgInfo::getIgnore();
5706 
5707   if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
5708     return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
5709 
5710   uint64_t Size = getContext().getTypeSize(Ty);
5711   if (Size > 64)
5712     return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
5713     // Pass in the smallest viable integer type.
5714   else if (Size > 32)
5715       return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
5716   else if (Size > 16)
5717       return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
5718   else if (Size > 8)
5719       return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
5720   else
5721       return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
5722 }
5723 
5724 ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
5725   if (RetTy->isVoidType())
5726     return ABIArgInfo::getIgnore();
5727 
5728   // Large vector types should be returned via memory.
5729   if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64)
5730     return ABIArgInfo::getIndirect(0);
5731 
5732   if (!isAggregateTypeForABI(RetTy)) {
5733     // Treat an enum type as its underlying type.
5734     if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
5735       RetTy = EnumTy->getDecl()->getIntegerType();
5736 
5737     return (RetTy->isPromotableIntegerType() ?
5738             ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
5739   }
5740 
5741   // Structures with either a non-trivial destructor or a non-trivial
5742   // copy constructor are always indirect.
5743   if (isRecordReturnIndirect(RetTy, getCXXABI()))
5744     return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
5745 
5746   if (isEmptyRecord(getContext(), RetTy, true))
5747     return ABIArgInfo::getIgnore();
5748 
5749   // Aggregates <= 8 bytes are returned in r0; other aggregates
5750   // are returned indirectly.
5751   uint64_t Size = getContext().getTypeSize(RetTy);
5752   if (Size <= 64) {
5753     // Return in the smallest viable integer type.
5754     if (Size <= 8)
5755       return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
5756     if (Size <= 16)
5757       return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
5758     if (Size <= 32)
5759       return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
5760     return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
5761   }
5762 
5763   return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
5764 }
5765 
5766 llvm::Value *HexagonABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5767                                        CodeGenFunction &CGF) const {
5768   // FIXME: Need to handle alignment
5769   llvm::Type *BPP = CGF.Int8PtrPtrTy;
5770 
5771   CGBuilderTy &Builder = CGF.Builder;
5772   llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
5773                                                        "ap");
5774   llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
5775   llvm::Type *PTy =
5776     llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
5777   llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
5778 
5779   uint64_t Offset =
5780     llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
5781   llvm::Value *NextAddr =
5782     Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
5783                       "ap.next");
5784   Builder.CreateStore(NextAddr, VAListAddrAsBPP);
5785 
5786   return AddrTyped;
5787 }
5788 
5789 
5790 //===----------------------------------------------------------------------===//
5791 // SPARC v9 ABI Implementation.
5792 // Based on the SPARC Compliance Definition version 2.4.1.
5793 //
5794 // Function arguments a mapped to a nominal "parameter array" and promoted to
5795 // registers depending on their type. Each argument occupies 8 or 16 bytes in
5796 // the array, structs larger than 16 bytes are passed indirectly.
5797 //
5798 // One case requires special care:
5799 //
5800 //   struct mixed {
5801 //     int i;
5802 //     float f;
5803 //   };
5804 //
5805 // When a struct mixed is passed by value, it only occupies 8 bytes in the
5806 // parameter array, but the int is passed in an integer register, and the float
5807 // is passed in a floating point register. This is represented as two arguments
5808 // with the LLVM IR inreg attribute:
5809 //
5810 //   declare void f(i32 inreg %i, float inreg %f)
5811 //
5812 // The code generator will only allocate 4 bytes from the parameter array for
5813 // the inreg arguments. All other arguments are allocated a multiple of 8
5814 // bytes.
5815 //
5816 namespace {
5817 class SparcV9ABIInfo : public ABIInfo {
5818 public:
5819   SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
5820 
5821 private:
5822   ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const;
5823   void computeInfo(CGFunctionInfo &FI) const override;
5824   llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5825                          CodeGenFunction &CGF) const override;
5826 
5827   // Coercion type builder for structs passed in registers. The coercion type
5828   // serves two purposes:
5829   //
5830   // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned'
5831   //    in registers.
5832   // 2. Expose aligned floating point elements as first-level elements, so the
5833   //    code generator knows to pass them in floating point registers.
5834   //
5835   // We also compute the InReg flag which indicates that the struct contains
5836   // aligned 32-bit floats.
5837   //
5838   struct CoerceBuilder {
5839     llvm::LLVMContext &Context;
5840     const llvm::DataLayout &DL;
5841     SmallVector<llvm::Type*, 8> Elems;
5842     uint64_t Size;
5843     bool InReg;
5844 
5845     CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl)
5846       : Context(c), DL(dl), Size(0), InReg(false) {}
5847 
5848     // Pad Elems with integers until Size is ToSize.
5849     void pad(uint64_t ToSize) {
5850       assert(ToSize >= Size && "Cannot remove elements");
5851       if (ToSize == Size)
5852         return;
5853 
5854       // Finish the current 64-bit word.
5855       uint64_t Aligned = llvm::RoundUpToAlignment(Size, 64);
5856       if (Aligned > Size && Aligned <= ToSize) {
5857         Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size));
5858         Size = Aligned;
5859       }
5860 
5861       // Add whole 64-bit words.
5862       while (Size + 64 <= ToSize) {
5863         Elems.push_back(llvm::Type::getInt64Ty(Context));
5864         Size += 64;
5865       }
5866 
5867       // Final in-word padding.
5868       if (Size < ToSize) {
5869         Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size));
5870         Size = ToSize;
5871       }
5872     }
5873 
5874     // Add a floating point element at Offset.
5875     void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) {
5876       // Unaligned floats are treated as integers.
5877       if (Offset % Bits)
5878         return;
5879       // The InReg flag is only required if there are any floats < 64 bits.
5880       if (Bits < 64)
5881         InReg = true;
5882       pad(Offset);
5883       Elems.push_back(Ty);
5884       Size = Offset + Bits;
5885     }
5886 
5887     // Add a struct type to the coercion type, starting at Offset (in bits).
5888     void addStruct(uint64_t Offset, llvm::StructType *StrTy) {
5889       const llvm::StructLayout *Layout = DL.getStructLayout(StrTy);
5890       for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) {
5891         llvm::Type *ElemTy = StrTy->getElementType(i);
5892         uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i);
5893         switch (ElemTy->getTypeID()) {
5894         case llvm::Type::StructTyID:
5895           addStruct(ElemOffset, cast<llvm::StructType>(ElemTy));
5896           break;
5897         case llvm::Type::FloatTyID:
5898           addFloat(ElemOffset, ElemTy, 32);
5899           break;
5900         case llvm::Type::DoubleTyID:
5901           addFloat(ElemOffset, ElemTy, 64);
5902           break;
5903         case llvm::Type::FP128TyID:
5904           addFloat(ElemOffset, ElemTy, 128);
5905           break;
5906         case llvm::Type::PointerTyID:
5907           if (ElemOffset % 64 == 0) {
5908             pad(ElemOffset);
5909             Elems.push_back(ElemTy);
5910             Size += 64;
5911           }
5912           break;
5913         default:
5914           break;
5915         }
5916       }
5917     }
5918 
5919     // Check if Ty is a usable substitute for the coercion type.
5920     bool isUsableType(llvm::StructType *Ty) const {
5921       if (Ty->getNumElements() != Elems.size())
5922         return false;
5923       for (unsigned i = 0, e = Elems.size(); i != e; ++i)
5924         if (Elems[i] != Ty->getElementType(i))
5925           return false;
5926       return true;
5927     }
5928 
5929     // Get the coercion type as a literal struct type.
5930     llvm::Type *getType() const {
5931       if (Elems.size() == 1)
5932         return Elems.front();
5933       else
5934         return llvm::StructType::get(Context, Elems);
5935     }
5936   };
5937 };
5938 } // end anonymous namespace
5939 
5940 ABIArgInfo
5941 SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
5942   if (Ty->isVoidType())
5943     return ABIArgInfo::getIgnore();
5944 
5945   uint64_t Size = getContext().getTypeSize(Ty);
5946 
5947   // Anything too big to fit in registers is passed with an explicit indirect
5948   // pointer / sret pointer.
5949   if (Size > SizeLimit)
5950     return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
5951 
5952   // Treat an enum type as its underlying type.
5953   if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5954     Ty = EnumTy->getDecl()->getIntegerType();
5955 
5956   // Integer types smaller than a register are extended.
5957   if (Size < 64 && Ty->isIntegerType())
5958     return ABIArgInfo::getExtend();
5959 
5960   // Other non-aggregates go in registers.
5961   if (!isAggregateTypeForABI(Ty))
5962     return ABIArgInfo::getDirect();
5963 
5964   // If a C++ object has either a non-trivial copy constructor or a non-trivial
5965   // destructor, it is passed with an explicit indirect pointer / sret pointer.
5966   if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
5967     return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
5968 
5969   // This is a small aggregate type that should be passed in registers.
5970   // Build a coercion type from the LLVM struct type.
5971   llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty));
5972   if (!StrTy)
5973     return ABIArgInfo::getDirect();
5974 
5975   CoerceBuilder CB(getVMContext(), getDataLayout());
5976   CB.addStruct(0, StrTy);
5977   CB.pad(llvm::RoundUpToAlignment(CB.DL.getTypeSizeInBits(StrTy), 64));
5978 
5979   // Try to use the original type for coercion.
5980   llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType();
5981 
5982   if (CB.InReg)
5983     return ABIArgInfo::getDirectInReg(CoerceTy);
5984   else
5985     return ABIArgInfo::getDirect(CoerceTy);
5986 }
5987 
5988 llvm::Value *SparcV9ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5989                                        CodeGenFunction &CGF) const {
5990   ABIArgInfo AI = classifyType(Ty, 16 * 8);
5991   llvm::Type *ArgTy = CGT.ConvertType(Ty);
5992   if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
5993     AI.setCoerceToType(ArgTy);
5994 
5995   llvm::Type *BPP = CGF.Int8PtrPtrTy;
5996   CGBuilderTy &Builder = CGF.Builder;
5997   llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
5998   llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
5999   llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
6000   llvm::Value *ArgAddr;
6001   unsigned Stride;
6002 
6003   switch (AI.getKind()) {
6004   case ABIArgInfo::Expand:
6005   case ABIArgInfo::InAlloca:
6006     llvm_unreachable("Unsupported ABI kind for va_arg");
6007 
6008   case ABIArgInfo::Extend:
6009     Stride = 8;
6010     ArgAddr = Builder
6011       .CreateConstGEP1_32(Addr, 8 - getDataLayout().getTypeAllocSize(ArgTy),
6012                           "extend");
6013     break;
6014 
6015   case ABIArgInfo::Direct:
6016     Stride = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
6017     ArgAddr = Addr;
6018     break;
6019 
6020   case ABIArgInfo::Indirect:
6021     Stride = 8;
6022     ArgAddr = Builder.CreateBitCast(Addr,
6023                                     llvm::PointerType::getUnqual(ArgPtrTy),
6024                                     "indirect");
6025     ArgAddr = Builder.CreateLoad(ArgAddr, "indirect.arg");
6026     break;
6027 
6028   case ABIArgInfo::Ignore:
6029     return llvm::UndefValue::get(ArgPtrTy);
6030   }
6031 
6032   // Update VAList.
6033   Addr = Builder.CreateConstGEP1_32(Addr, Stride, "ap.next");
6034   Builder.CreateStore(Addr, VAListAddrAsBPP);
6035 
6036   return Builder.CreatePointerCast(ArgAddr, ArgPtrTy, "arg.addr");
6037 }
6038 
6039 void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const {
6040   FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8);
6041   for (auto &I : FI.arguments())
6042     I.info = classifyType(I.type, 16 * 8);
6043 }
6044 
6045 namespace {
6046 class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo {
6047 public:
6048   SparcV9TargetCodeGenInfo(CodeGenTypes &CGT)
6049     : TargetCodeGenInfo(new SparcV9ABIInfo(CGT)) {}
6050 
6051   int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
6052     return 14;
6053   }
6054 
6055   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
6056                                llvm::Value *Address) const override;
6057 };
6058 } // end anonymous namespace
6059 
6060 bool
6061 SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
6062                                                 llvm::Value *Address) const {
6063   // This is calculated from the LLVM and GCC tables and verified
6064   // against gcc output.  AFAIK all ABIs use the same encoding.
6065 
6066   CodeGen::CGBuilderTy &Builder = CGF.Builder;
6067 
6068   llvm::IntegerType *i8 = CGF.Int8Ty;
6069   llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
6070   llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
6071 
6072   // 0-31: the 8-byte general-purpose registers
6073   AssignToArrayRange(Builder, Address, Eight8, 0, 31);
6074 
6075   // 32-63: f0-31, the 4-byte floating-point registers
6076   AssignToArrayRange(Builder, Address, Four8, 32, 63);
6077 
6078   //   Y   = 64
6079   //   PSR = 65
6080   //   WIM = 66
6081   //   TBR = 67
6082   //   PC  = 68
6083   //   NPC = 69
6084   //   FSR = 70
6085   //   CSR = 71
6086   AssignToArrayRange(Builder, Address, Eight8, 64, 71);
6087 
6088   // 72-87: d0-15, the 8-byte floating-point registers
6089   AssignToArrayRange(Builder, Address, Eight8, 72, 87);
6090 
6091   return false;
6092 }
6093 
6094 
6095 //===----------------------------------------------------------------------===//
6096 // XCore ABI Implementation
6097 //===----------------------------------------------------------------------===//
6098 namespace {
6099 class XCoreABIInfo : public DefaultABIInfo {
6100 public:
6101   XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
6102   llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
6103                          CodeGenFunction &CGF) const override;
6104 };
6105 
6106 class XCoreTargetCodeGenInfo : public TargetCodeGenInfo {
6107 public:
6108   XCoreTargetCodeGenInfo(CodeGenTypes &CGT)
6109     :TargetCodeGenInfo(new XCoreABIInfo(CGT)) {}
6110 };
6111 } // End anonymous namespace.
6112 
6113 llvm::Value *XCoreABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
6114                                      CodeGenFunction &CGF) const {
6115   CGBuilderTy &Builder = CGF.Builder;
6116 
6117   // Get the VAList.
6118   llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr,
6119                                                        CGF.Int8PtrPtrTy);
6120   llvm::Value *AP = Builder.CreateLoad(VAListAddrAsBPP);
6121 
6122   // Handle the argument.
6123   ABIArgInfo AI = classifyArgumentType(Ty);
6124   llvm::Type *ArgTy = CGT.ConvertType(Ty);
6125   if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
6126     AI.setCoerceToType(ArgTy);
6127   llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
6128   llvm::Value *Val;
6129   uint64_t ArgSize = 0;
6130   switch (AI.getKind()) {
6131   case ABIArgInfo::Expand:
6132   case ABIArgInfo::InAlloca:
6133     llvm_unreachable("Unsupported ABI kind for va_arg");
6134   case ABIArgInfo::Ignore:
6135     Val = llvm::UndefValue::get(ArgPtrTy);
6136     ArgSize = 0;
6137     break;
6138   case ABIArgInfo::Extend:
6139   case ABIArgInfo::Direct:
6140     Val = Builder.CreatePointerCast(AP, ArgPtrTy);
6141     ArgSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
6142     if (ArgSize < 4)
6143       ArgSize = 4;
6144     break;
6145   case ABIArgInfo::Indirect:
6146     llvm::Value *ArgAddr;
6147     ArgAddr = Builder.CreateBitCast(AP, llvm::PointerType::getUnqual(ArgPtrTy));
6148     ArgAddr = Builder.CreateLoad(ArgAddr);
6149     Val = Builder.CreatePointerCast(ArgAddr, ArgPtrTy);
6150     ArgSize = 4;
6151     break;
6152   }
6153 
6154   // Increment the VAList.
6155   if (ArgSize) {
6156     llvm::Value *APN = Builder.CreateConstGEP1_32(AP, ArgSize);
6157     Builder.CreateStore(APN, VAListAddrAsBPP);
6158   }
6159   return Val;
6160 }
6161 
6162 //===----------------------------------------------------------------------===//
6163 // Driver code
6164 //===----------------------------------------------------------------------===//
6165 
6166 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
6167   if (TheTargetCodeGenInfo)
6168     return *TheTargetCodeGenInfo;
6169 
6170   const llvm::Triple &Triple = getTarget().getTriple();
6171   switch (Triple.getArch()) {
6172   default:
6173     return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types));
6174 
6175   case llvm::Triple::le32:
6176     return *(TheTargetCodeGenInfo = new PNaClTargetCodeGenInfo(Types));
6177   case llvm::Triple::mips:
6178   case llvm::Triple::mipsel:
6179     return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, true));
6180 
6181   case llvm::Triple::mips64:
6182   case llvm::Triple::mips64el:
6183     return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, false));
6184 
6185   case llvm::Triple::arm64:
6186   case llvm::Triple::arm64_be: {
6187     ARM64ABIInfo::ABIKind Kind = ARM64ABIInfo::AAPCS;
6188     if (strcmp(getTarget().getABI(), "darwinpcs") == 0)
6189       Kind = ARM64ABIInfo::DarwinPCS;
6190 
6191     return *(TheTargetCodeGenInfo = new ARM64TargetCodeGenInfo(Types, Kind));
6192   }
6193 
6194   case llvm::Triple::aarch64:
6195   case llvm::Triple::aarch64_be:
6196     return *(TheTargetCodeGenInfo = new AArch64TargetCodeGenInfo(Types));
6197 
6198   case llvm::Triple::arm:
6199   case llvm::Triple::armeb:
6200   case llvm::Triple::thumb:
6201   case llvm::Triple::thumbeb:
6202     {
6203       ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS;
6204       if (strcmp(getTarget().getABI(), "apcs-gnu") == 0)
6205         Kind = ARMABIInfo::APCS;
6206       else if (CodeGenOpts.FloatABI == "hard" ||
6207                (CodeGenOpts.FloatABI != "soft" &&
6208                 Triple.getEnvironment() == llvm::Triple::GNUEABIHF))
6209         Kind = ARMABIInfo::AAPCS_VFP;
6210 
6211       switch (Triple.getOS()) {
6212         case llvm::Triple::NaCl:
6213           return *(TheTargetCodeGenInfo =
6214                    new NaClARMTargetCodeGenInfo(Types, Kind));
6215         default:
6216           return *(TheTargetCodeGenInfo =
6217                    new ARMTargetCodeGenInfo(Types, Kind));
6218       }
6219     }
6220 
6221   case llvm::Triple::ppc:
6222     return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types));
6223   case llvm::Triple::ppc64:
6224     if (Triple.isOSBinFormatELF())
6225       return *(TheTargetCodeGenInfo = new PPC64_SVR4_TargetCodeGenInfo(Types));
6226     else
6227       return *(TheTargetCodeGenInfo = new PPC64TargetCodeGenInfo(Types));
6228   case llvm::Triple::ppc64le:
6229     assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!");
6230     return *(TheTargetCodeGenInfo = new PPC64_SVR4_TargetCodeGenInfo(Types));
6231 
6232   case llvm::Triple::nvptx:
6233   case llvm::Triple::nvptx64:
6234     return *(TheTargetCodeGenInfo = new NVPTXTargetCodeGenInfo(Types));
6235 
6236   case llvm::Triple::msp430:
6237     return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types));
6238 
6239   case llvm::Triple::systemz:
6240     return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo(Types));
6241 
6242   case llvm::Triple::tce:
6243     return *(TheTargetCodeGenInfo = new TCETargetCodeGenInfo(Types));
6244 
6245   case llvm::Triple::x86: {
6246     bool IsDarwinVectorABI = Triple.isOSDarwin();
6247     bool IsSmallStructInRegABI =
6248         X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
6249     bool IsWin32FloatStructABI = Triple.isWindowsMSVCEnvironment();
6250 
6251     if (Triple.getOS() == llvm::Triple::Win32) {
6252       return *(TheTargetCodeGenInfo =
6253                new WinX86_32TargetCodeGenInfo(Types,
6254                                               IsDarwinVectorABI, IsSmallStructInRegABI,
6255                                               IsWin32FloatStructABI,
6256                                               CodeGenOpts.NumRegisterParameters));
6257     } else {
6258       return *(TheTargetCodeGenInfo =
6259                new X86_32TargetCodeGenInfo(Types,
6260                                            IsDarwinVectorABI, IsSmallStructInRegABI,
6261                                            IsWin32FloatStructABI,
6262                                            CodeGenOpts.NumRegisterParameters));
6263     }
6264   }
6265 
6266   case llvm::Triple::x86_64: {
6267     bool HasAVX = strcmp(getTarget().getABI(), "avx") == 0;
6268 
6269     switch (Triple.getOS()) {
6270     case llvm::Triple::Win32:
6271     case llvm::Triple::MinGW32:
6272     case llvm::Triple::Cygwin:
6273       return *(TheTargetCodeGenInfo = new WinX86_64TargetCodeGenInfo(Types));
6274     case llvm::Triple::NaCl:
6275       return *(TheTargetCodeGenInfo = new NaClX86_64TargetCodeGenInfo(Types,
6276                                                                       HasAVX));
6277     default:
6278       return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo(Types,
6279                                                                   HasAVX));
6280     }
6281   }
6282   case llvm::Triple::hexagon:
6283     return *(TheTargetCodeGenInfo = new HexagonTargetCodeGenInfo(Types));
6284   case llvm::Triple::sparcv9:
6285     return *(TheTargetCodeGenInfo = new SparcV9TargetCodeGenInfo(Types));
6286   case llvm::Triple::xcore:
6287     return *(TheTargetCodeGenInfo = new XCoreTargetCodeGenInfo(Types));
6288   }
6289 }
6290