1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "TargetInfo.h"
16 #include "ABIInfo.h"
17 #include "CGCXXABI.h"
18 #include "CodeGenFunction.h"
19 #include "clang/AST/RecordLayout.h"
20 #include "clang/Frontend/CodeGenOptions.h"
21 #include "llvm/ADT/Triple.h"
22 #include "llvm/IR/DataLayout.h"
23 #include "llvm/IR/Type.h"
24 #include "llvm/Support/raw_ostream.h"
25 using namespace clang;
26 using namespace CodeGen;
27 
28 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
29                                llvm::Value *Array,
30                                llvm::Value *Value,
31                                unsigned FirstIndex,
32                                unsigned LastIndex) {
33   // Alternatively, we could emit this as a loop in the source.
34   for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
35     llvm::Value *Cell = Builder.CreateConstInBoundsGEP1_32(Array, I);
36     Builder.CreateStore(Value, Cell);
37   }
38 }
39 
40 static bool isAggregateTypeForABI(QualType T) {
41   return !CodeGenFunction::hasScalarEvaluationKind(T) ||
42          T->isMemberFunctionPointerType();
43 }
44 
45 ABIInfo::~ABIInfo() {}
46 
47 static bool isRecordReturnIndirect(const RecordType *RT, CodeGen::CodeGenTypes &CGT) {
48   const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
49   if (!RD)
50     return false;
51   return CGT.CGM.getCXXABI().isReturnTypeIndirect(RD);
52 }
53 
54 
55 static bool isRecordReturnIndirect(QualType T, CodeGen::CodeGenTypes &CGT) {
56   const RecordType *RT = T->getAs<RecordType>();
57   if (!RT)
58     return false;
59   return isRecordReturnIndirect(RT, CGT);
60 }
61 
62 static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT,
63                                               CodeGen::CodeGenTypes &CGT) {
64   const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
65   if (!RD)
66     return CGCXXABI::RAA_Default;
67   return CGT.CGM.getCXXABI().getRecordArgABI(RD);
68 }
69 
70 static CGCXXABI::RecordArgABI getRecordArgABI(QualType T,
71                                               CodeGen::CodeGenTypes &CGT) {
72   const RecordType *RT = T->getAs<RecordType>();
73   if (!RT)
74     return CGCXXABI::RAA_Default;
75   return getRecordArgABI(RT, CGT);
76 }
77 
78 ASTContext &ABIInfo::getContext() const {
79   return CGT.getContext();
80 }
81 
82 llvm::LLVMContext &ABIInfo::getVMContext() const {
83   return CGT.getLLVMContext();
84 }
85 
86 const llvm::DataLayout &ABIInfo::getDataLayout() const {
87   return CGT.getDataLayout();
88 }
89 
90 const TargetInfo &ABIInfo::getTarget() const {
91   return CGT.getTarget();
92 }
93 
94 void ABIArgInfo::dump() const {
95   raw_ostream &OS = llvm::errs();
96   OS << "(ABIArgInfo Kind=";
97   switch (TheKind) {
98   case Direct:
99     OS << "Direct Type=";
100     if (llvm::Type *Ty = getCoerceToType())
101       Ty->print(OS);
102     else
103       OS << "null";
104     break;
105   case Extend:
106     OS << "Extend";
107     break;
108   case Ignore:
109     OS << "Ignore";
110     break;
111   case Indirect:
112     OS << "Indirect Align=" << getIndirectAlign()
113        << " ByVal=" << getIndirectByVal()
114        << " Realign=" << getIndirectRealign();
115     break;
116   case Expand:
117     OS << "Expand";
118     break;
119   }
120   OS << ")\n";
121 }
122 
123 TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; }
124 
125 // If someone can figure out a general rule for this, that would be great.
126 // It's probably just doomed to be platform-dependent, though.
127 unsigned TargetCodeGenInfo::getSizeOfUnwindException() const {
128   // Verified for:
129   //   x86-64     FreeBSD, Linux, Darwin
130   //   x86-32     FreeBSD, Linux, Darwin
131   //   PowerPC    Linux, Darwin
132   //   ARM        Darwin (*not* EABI)
133   //   AArch64    Linux
134   return 32;
135 }
136 
137 bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args,
138                                      const FunctionNoProtoType *fnType) const {
139   // The following conventions are known to require this to be false:
140   //   x86_stdcall
141   //   MIPS
142   // For everything else, we just prefer false unless we opt out.
143   return false;
144 }
145 
146 void
147 TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib,
148                                              llvm::SmallString<24> &Opt) const {
149   // This assumes the user is passing a library name like "rt" instead of a
150   // filename like "librt.a/so", and that they don't care whether it's static or
151   // dynamic.
152   Opt = "-l";
153   Opt += Lib;
154 }
155 
156 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
157 
158 /// isEmptyField - Return true iff a the field is "empty", that is it
159 /// is an unnamed bit-field or an (array of) empty record(s).
160 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
161                          bool AllowArrays) {
162   if (FD->isUnnamedBitfield())
163     return true;
164 
165   QualType FT = FD->getType();
166 
167   // Constant arrays of empty records count as empty, strip them off.
168   // Constant arrays of zero length always count as empty.
169   if (AllowArrays)
170     while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
171       if (AT->getSize() == 0)
172         return true;
173       FT = AT->getElementType();
174     }
175 
176   const RecordType *RT = FT->getAs<RecordType>();
177   if (!RT)
178     return false;
179 
180   // C++ record fields are never empty, at least in the Itanium ABI.
181   //
182   // FIXME: We should use a predicate for whether this behavior is true in the
183   // current ABI.
184   if (isa<CXXRecordDecl>(RT->getDecl()))
185     return false;
186 
187   return isEmptyRecord(Context, FT, AllowArrays);
188 }
189 
190 /// isEmptyRecord - Return true iff a structure contains only empty
191 /// fields. Note that a structure with a flexible array member is not
192 /// considered empty.
193 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
194   const RecordType *RT = T->getAs<RecordType>();
195   if (!RT)
196     return 0;
197   const RecordDecl *RD = RT->getDecl();
198   if (RD->hasFlexibleArrayMember())
199     return false;
200 
201   // If this is a C++ record, check the bases first.
202   if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
203     for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
204            e = CXXRD->bases_end(); i != e; ++i)
205       if (!isEmptyRecord(Context, i->getType(), true))
206         return false;
207 
208   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
209          i != e; ++i)
210     if (!isEmptyField(Context, *i, AllowArrays))
211       return false;
212   return true;
213 }
214 
215 /// isSingleElementStruct - Determine if a structure is a "single
216 /// element struct", i.e. it has exactly one non-empty field or
217 /// exactly one field which is itself a single element
218 /// struct. Structures with flexible array members are never
219 /// considered single element structs.
220 ///
221 /// \return The field declaration for the single non-empty field, if
222 /// it exists.
223 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
224   const RecordType *RT = T->getAsStructureType();
225   if (!RT)
226     return 0;
227 
228   const RecordDecl *RD = RT->getDecl();
229   if (RD->hasFlexibleArrayMember())
230     return 0;
231 
232   const Type *Found = 0;
233 
234   // If this is a C++ record, check the bases first.
235   if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
236     for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
237            e = CXXRD->bases_end(); i != e; ++i) {
238       // Ignore empty records.
239       if (isEmptyRecord(Context, i->getType(), true))
240         continue;
241 
242       // If we already found an element then this isn't a single-element struct.
243       if (Found)
244         return 0;
245 
246       // If this is non-empty and not a single element struct, the composite
247       // cannot be a single element struct.
248       Found = isSingleElementStruct(i->getType(), Context);
249       if (!Found)
250         return 0;
251     }
252   }
253 
254   // Check for single element.
255   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
256          i != e; ++i) {
257     const FieldDecl *FD = *i;
258     QualType FT = FD->getType();
259 
260     // Ignore empty fields.
261     if (isEmptyField(Context, FD, true))
262       continue;
263 
264     // If we already found an element then this isn't a single-element
265     // struct.
266     if (Found)
267       return 0;
268 
269     // Treat single element arrays as the element.
270     while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
271       if (AT->getSize().getZExtValue() != 1)
272         break;
273       FT = AT->getElementType();
274     }
275 
276     if (!isAggregateTypeForABI(FT)) {
277       Found = FT.getTypePtr();
278     } else {
279       Found = isSingleElementStruct(FT, Context);
280       if (!Found)
281         return 0;
282     }
283   }
284 
285   // We don't consider a struct a single-element struct if it has
286   // padding beyond the element type.
287   if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T))
288     return 0;
289 
290   return Found;
291 }
292 
293 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
294   // Treat complex types as the element type.
295   if (const ComplexType *CTy = Ty->getAs<ComplexType>())
296     Ty = CTy->getElementType();
297 
298   // Check for a type which we know has a simple scalar argument-passing
299   // convention without any padding.  (We're specifically looking for 32
300   // and 64-bit integer and integer-equivalents, float, and double.)
301   if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
302       !Ty->isEnumeralType() && !Ty->isBlockPointerType())
303     return false;
304 
305   uint64_t Size = Context.getTypeSize(Ty);
306   return Size == 32 || Size == 64;
307 }
308 
309 /// canExpandIndirectArgument - Test whether an argument type which is to be
310 /// passed indirectly (on the stack) would have the equivalent layout if it was
311 /// expanded into separate arguments. If so, we prefer to do the latter to avoid
312 /// inhibiting optimizations.
313 ///
314 // FIXME: This predicate is missing many cases, currently it just follows
315 // llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We
316 // should probably make this smarter, or better yet make the LLVM backend
317 // capable of handling it.
318 static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) {
319   // We can only expand structure types.
320   const RecordType *RT = Ty->getAs<RecordType>();
321   if (!RT)
322     return false;
323 
324   // We can only expand (C) structures.
325   //
326   // FIXME: This needs to be generalized to handle classes as well.
327   const RecordDecl *RD = RT->getDecl();
328   if (!RD->isStruct() || isa<CXXRecordDecl>(RD))
329     return false;
330 
331   uint64_t Size = 0;
332 
333   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
334          i != e; ++i) {
335     const FieldDecl *FD = *i;
336 
337     if (!is32Or64BitBasicType(FD->getType(), Context))
338       return false;
339 
340     // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
341     // how to expand them yet, and the predicate for telling if a bitfield still
342     // counts as "basic" is more complicated than what we were doing previously.
343     if (FD->isBitField())
344       return false;
345 
346     Size += Context.getTypeSize(FD->getType());
347   }
348 
349   // Make sure there are not any holes in the struct.
350   if (Size != Context.getTypeSize(Ty))
351     return false;
352 
353   return true;
354 }
355 
356 namespace {
357 /// DefaultABIInfo - The default implementation for ABI specific
358 /// details. This implementation provides information which results in
359 /// self-consistent and sensible LLVM IR generation, but does not
360 /// conform to any particular ABI.
361 class DefaultABIInfo : public ABIInfo {
362 public:
363   DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
364 
365   ABIArgInfo classifyReturnType(QualType RetTy) const;
366   ABIArgInfo classifyArgumentType(QualType RetTy) const;
367 
368   virtual void computeInfo(CGFunctionInfo &FI) const {
369     FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
370     for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
371          it != ie; ++it)
372       it->info = classifyArgumentType(it->type);
373   }
374 
375   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
376                                  CodeGenFunction &CGF) const;
377 };
378 
379 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
380 public:
381   DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
382     : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
383 };
384 
385 llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
386                                        CodeGenFunction &CGF) const {
387   return 0;
388 }
389 
390 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
391   if (isAggregateTypeForABI(Ty)) {
392     // Records with non trivial destructors/constructors should not be passed
393     // by value.
394     if (isRecordReturnIndirect(Ty, CGT))
395       return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
396 
397     return ABIArgInfo::getIndirect(0);
398   }
399 
400   // Treat an enum type as its underlying type.
401   if (const EnumType *EnumTy = Ty->getAs<EnumType>())
402     Ty = EnumTy->getDecl()->getIntegerType();
403 
404   return (Ty->isPromotableIntegerType() ?
405           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
406 }
407 
408 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
409   if (RetTy->isVoidType())
410     return ABIArgInfo::getIgnore();
411 
412   if (isAggregateTypeForABI(RetTy))
413     return ABIArgInfo::getIndirect(0);
414 
415   // Treat an enum type as its underlying type.
416   if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
417     RetTy = EnumTy->getDecl()->getIntegerType();
418 
419   return (RetTy->isPromotableIntegerType() ?
420           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
421 }
422 
423 //===----------------------------------------------------------------------===//
424 // le32/PNaCl bitcode ABI Implementation
425 //
426 // This is a simplified version of the x86_32 ABI.  Arguments and return values
427 // are always passed on the stack.
428 //===----------------------------------------------------------------------===//
429 
430 class PNaClABIInfo : public ABIInfo {
431  public:
432   PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
433 
434   ABIArgInfo classifyReturnType(QualType RetTy) const;
435   ABIArgInfo classifyArgumentType(QualType RetTy) const;
436 
437   virtual void computeInfo(CGFunctionInfo &FI) const;
438   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
439                                  CodeGenFunction &CGF) const;
440 };
441 
442 class PNaClTargetCodeGenInfo : public TargetCodeGenInfo {
443  public:
444   PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
445     : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {}
446 };
447 
448 void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const {
449     FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
450 
451     for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
452          it != ie; ++it)
453       it->info = classifyArgumentType(it->type);
454   }
455 
456 llvm::Value *PNaClABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
457                                        CodeGenFunction &CGF) const {
458   return 0;
459 }
460 
461 /// \brief Classify argument of given type \p Ty.
462 ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const {
463   if (isAggregateTypeForABI(Ty)) {
464     if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, CGT))
465       return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
466     return ABIArgInfo::getIndirect(0);
467   } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
468     // Treat an enum type as its underlying type.
469     Ty = EnumTy->getDecl()->getIntegerType();
470   } else if (Ty->isFloatingType()) {
471     // Floating-point types don't go inreg.
472     return ABIArgInfo::getDirect();
473   }
474 
475   return (Ty->isPromotableIntegerType() ?
476           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
477 }
478 
479 ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
480   if (RetTy->isVoidType())
481     return ABIArgInfo::getIgnore();
482 
483   // In the PNaCl ABI we always return records/structures on the stack.
484   if (isAggregateTypeForABI(RetTy))
485     return ABIArgInfo::getIndirect(0);
486 
487   // Treat an enum type as its underlying type.
488   if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
489     RetTy = EnumTy->getDecl()->getIntegerType();
490 
491   return (RetTy->isPromotableIntegerType() ?
492           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
493 }
494 
495 /// IsX86_MMXType - Return true if this is an MMX type.
496 bool IsX86_MMXType(llvm::Type *IRType) {
497   // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>.
498   return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
499     cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
500     IRType->getScalarSizeInBits() != 64;
501 }
502 
503 static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
504                                           StringRef Constraint,
505                                           llvm::Type* Ty) {
506   if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy()) {
507     if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) {
508       // Invalid MMX constraint
509       return 0;
510     }
511 
512     return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
513   }
514 
515   // No operation needed
516   return Ty;
517 }
518 
519 //===----------------------------------------------------------------------===//
520 // X86-32 ABI Implementation
521 //===----------------------------------------------------------------------===//
522 
523 /// X86_32ABIInfo - The X86-32 ABI information.
524 class X86_32ABIInfo : public ABIInfo {
525   enum Class {
526     Integer,
527     Float
528   };
529 
530   static const unsigned MinABIStackAlignInBytes = 4;
531 
532   bool IsDarwinVectorABI;
533   bool IsSmallStructInRegABI;
534   bool IsWin32StructABI;
535   unsigned DefaultNumRegisterParameters;
536 
537   static bool isRegisterSize(unsigned Size) {
538     return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
539   }
540 
541   static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context,
542                                           unsigned callingConvention);
543 
544   /// getIndirectResult - Give a source type \arg Ty, return a suitable result
545   /// such that the argument will be passed in memory.
546   ABIArgInfo getIndirectResult(QualType Ty, bool ByVal,
547                                unsigned &FreeRegs) const;
548 
549   /// \brief Return the alignment to use for the given type on the stack.
550   unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
551 
552   Class classify(QualType Ty) const;
553   ABIArgInfo classifyReturnType(QualType RetTy,
554                                 unsigned callingConvention) const;
555   ABIArgInfo classifyArgumentType(QualType RetTy, unsigned &FreeRegs,
556                                   bool IsFastCall) const;
557   bool shouldUseInReg(QualType Ty, unsigned &FreeRegs,
558                       bool IsFastCall, bool &NeedsPadding) const;
559 
560 public:
561 
562   virtual void computeInfo(CGFunctionInfo &FI) const;
563   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
564                                  CodeGenFunction &CGF) const;
565 
566   X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool w,
567                 unsigned r)
568     : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p),
569       IsWin32StructABI(w), DefaultNumRegisterParameters(r) {}
570 };
571 
572 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
573 public:
574   X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
575       bool d, bool p, bool w, unsigned r)
576     :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, w, r)) {}
577 
578   static bool isStructReturnInRegABI(
579       const llvm::Triple &Triple, const CodeGenOptions &Opts);
580 
581   void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
582                            CodeGen::CodeGenModule &CGM) const;
583 
584   int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
585     // Darwin uses different dwarf register numbers for EH.
586     if (CGM.getTarget().getTriple().isOSDarwin()) return 5;
587     return 4;
588   }
589 
590   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
591                                llvm::Value *Address) const;
592 
593   llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
594                                   StringRef Constraint,
595                                   llvm::Type* Ty) const {
596     return X86AdjustInlineAsmType(CGF, Constraint, Ty);
597   }
598 
599 };
600 
601 }
602 
603 /// shouldReturnTypeInRegister - Determine if the given type should be
604 /// passed in a register (for the Darwin ABI).
605 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
606                                                ASTContext &Context,
607                                                unsigned callingConvention) {
608   uint64_t Size = Context.getTypeSize(Ty);
609 
610   // Type must be register sized.
611   if (!isRegisterSize(Size))
612     return false;
613 
614   if (Ty->isVectorType()) {
615     // 64- and 128- bit vectors inside structures are not returned in
616     // registers.
617     if (Size == 64 || Size == 128)
618       return false;
619 
620     return true;
621   }
622 
623   // If this is a builtin, pointer, enum, complex type, member pointer, or
624   // member function pointer it is ok.
625   if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
626       Ty->isAnyComplexType() || Ty->isEnumeralType() ||
627       Ty->isBlockPointerType() || Ty->isMemberPointerType())
628     return true;
629 
630   // Arrays are treated like records.
631   if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
632     return shouldReturnTypeInRegister(AT->getElementType(), Context,
633                                       callingConvention);
634 
635   // Otherwise, it must be a record type.
636   const RecordType *RT = Ty->getAs<RecordType>();
637   if (!RT) return false;
638 
639   // FIXME: Traverse bases here too.
640 
641   // For thiscall conventions, structures will never be returned in
642   // a register.  This is for compatibility with the MSVC ABI
643   if (callingConvention == llvm::CallingConv::X86_ThisCall &&
644       RT->isStructureType()) {
645     return false;
646   }
647 
648   // Structure types are passed in register if all fields would be
649   // passed in a register.
650   for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(),
651          e = RT->getDecl()->field_end(); i != e; ++i) {
652     const FieldDecl *FD = *i;
653 
654     // Empty fields are ignored.
655     if (isEmptyField(Context, FD, true))
656       continue;
657 
658     // Check fields recursively.
659     if (!shouldReturnTypeInRegister(FD->getType(), Context,
660                                     callingConvention))
661       return false;
662   }
663   return true;
664 }
665 
666 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
667                                             unsigned callingConvention) const {
668   if (RetTy->isVoidType())
669     return ABIArgInfo::getIgnore();
670 
671   if (const VectorType *VT = RetTy->getAs<VectorType>()) {
672     // On Darwin, some vectors are returned in registers.
673     if (IsDarwinVectorABI) {
674       uint64_t Size = getContext().getTypeSize(RetTy);
675 
676       // 128-bit vectors are a special case; they are returned in
677       // registers and we need to make sure to pick a type the LLVM
678       // backend will like.
679       if (Size == 128)
680         return ABIArgInfo::getDirect(llvm::VectorType::get(
681                   llvm::Type::getInt64Ty(getVMContext()), 2));
682 
683       // Always return in register if it fits in a general purpose
684       // register, or if it is 64 bits and has a single element.
685       if ((Size == 8 || Size == 16 || Size == 32) ||
686           (Size == 64 && VT->getNumElements() == 1))
687         return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
688                                                             Size));
689 
690       return ABIArgInfo::getIndirect(0);
691     }
692 
693     return ABIArgInfo::getDirect();
694   }
695 
696   if (isAggregateTypeForABI(RetTy)) {
697     if (const RecordType *RT = RetTy->getAs<RecordType>()) {
698       if (isRecordReturnIndirect(RT, CGT))
699         return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
700 
701       // Structures with flexible arrays are always indirect.
702       if (RT->getDecl()->hasFlexibleArrayMember())
703         return ABIArgInfo::getIndirect(0);
704     }
705 
706     // If specified, structs and unions are always indirect.
707     if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType())
708       return ABIArgInfo::getIndirect(0);
709 
710     // Small structures which are register sized are generally returned
711     // in a register.
712     if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, getContext(),
713                                                   callingConvention)) {
714       uint64_t Size = getContext().getTypeSize(RetTy);
715 
716       // As a special-case, if the struct is a "single-element" struct, and
717       // the field is of type "float" or "double", return it in a
718       // floating-point register. (MSVC does not apply this special case.)
719       // We apply a similar transformation for pointer types to improve the
720       // quality of the generated IR.
721       if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
722         if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
723             || SeltTy->hasPointerRepresentation())
724           return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
725 
726       // FIXME: We should be able to narrow this integer in cases with dead
727       // padding.
728       return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
729     }
730 
731     return ABIArgInfo::getIndirect(0);
732   }
733 
734   // Treat an enum type as its underlying type.
735   if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
736     RetTy = EnumTy->getDecl()->getIntegerType();
737 
738   return (RetTy->isPromotableIntegerType() ?
739           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
740 }
741 
742 static bool isSSEVectorType(ASTContext &Context, QualType Ty) {
743   return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128;
744 }
745 
746 static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) {
747   const RecordType *RT = Ty->getAs<RecordType>();
748   if (!RT)
749     return 0;
750   const RecordDecl *RD = RT->getDecl();
751 
752   // If this is a C++ record, check the bases first.
753   if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
754     for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
755            e = CXXRD->bases_end(); i != e; ++i)
756       if (!isRecordWithSSEVectorType(Context, i->getType()))
757         return false;
758 
759   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
760        i != e; ++i) {
761     QualType FT = i->getType();
762 
763     if (isSSEVectorType(Context, FT))
764       return true;
765 
766     if (isRecordWithSSEVectorType(Context, FT))
767       return true;
768   }
769 
770   return false;
771 }
772 
773 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
774                                                  unsigned Align) const {
775   // Otherwise, if the alignment is less than or equal to the minimum ABI
776   // alignment, just use the default; the backend will handle this.
777   if (Align <= MinABIStackAlignInBytes)
778     return 0; // Use default alignment.
779 
780   // On non-Darwin, the stack type alignment is always 4.
781   if (!IsDarwinVectorABI) {
782     // Set explicit alignment, since we may need to realign the top.
783     return MinABIStackAlignInBytes;
784   }
785 
786   // Otherwise, if the type contains an SSE vector type, the alignment is 16.
787   if (Align >= 16 && (isSSEVectorType(getContext(), Ty) ||
788                       isRecordWithSSEVectorType(getContext(), Ty)))
789     return 16;
790 
791   return MinABIStackAlignInBytes;
792 }
793 
794 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal,
795                                             unsigned &FreeRegs) const {
796   if (!ByVal) {
797     if (FreeRegs) {
798       --FreeRegs; // Non byval indirects just use one pointer.
799       return ABIArgInfo::getIndirectInReg(0, false);
800     }
801     return ABIArgInfo::getIndirect(0, false);
802   }
803 
804   // Compute the byval alignment.
805   unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
806   unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
807   if (StackAlign == 0)
808     return ABIArgInfo::getIndirect(4);
809 
810   // If the stack alignment is less than the type alignment, realign the
811   // argument.
812   if (StackAlign < TypeAlign)
813     return ABIArgInfo::getIndirect(StackAlign, /*ByVal=*/true,
814                                    /*Realign=*/true);
815 
816   return ABIArgInfo::getIndirect(StackAlign);
817 }
818 
819 X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
820   const Type *T = isSingleElementStruct(Ty, getContext());
821   if (!T)
822     T = Ty.getTypePtr();
823 
824   if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
825     BuiltinType::Kind K = BT->getKind();
826     if (K == BuiltinType::Float || K == BuiltinType::Double)
827       return Float;
828   }
829   return Integer;
830 }
831 
832 bool X86_32ABIInfo::shouldUseInReg(QualType Ty, unsigned &FreeRegs,
833                                    bool IsFastCall, bool &NeedsPadding) const {
834   NeedsPadding = false;
835   Class C = classify(Ty);
836   if (C == Float)
837     return false;
838 
839   unsigned Size = getContext().getTypeSize(Ty);
840   unsigned SizeInRegs = (Size + 31) / 32;
841 
842   if (SizeInRegs == 0)
843     return false;
844 
845   if (SizeInRegs > FreeRegs) {
846     FreeRegs = 0;
847     return false;
848   }
849 
850   FreeRegs -= SizeInRegs;
851 
852   if (IsFastCall) {
853     if (Size > 32)
854       return false;
855 
856     if (Ty->isIntegralOrEnumerationType())
857       return true;
858 
859     if (Ty->isPointerType())
860       return true;
861 
862     if (Ty->isReferenceType())
863       return true;
864 
865     if (FreeRegs)
866       NeedsPadding = true;
867 
868     return false;
869   }
870 
871   return true;
872 }
873 
874 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
875                                                unsigned &FreeRegs,
876                                                bool IsFastCall) const {
877   // FIXME: Set alignment on indirect arguments.
878   if (isAggregateTypeForABI(Ty)) {
879     if (const RecordType *RT = Ty->getAs<RecordType>()) {
880       if (IsWin32StructABI)
881         return getIndirectResult(Ty, true, FreeRegs);
882 
883       if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, CGT))
884         return getIndirectResult(Ty, RAA == CGCXXABI::RAA_DirectInMemory, FreeRegs);
885 
886       // Structures with flexible arrays are always indirect.
887       if (RT->getDecl()->hasFlexibleArrayMember())
888         return getIndirectResult(Ty, true, FreeRegs);
889     }
890 
891     // Ignore empty structs/unions.
892     if (isEmptyRecord(getContext(), Ty, true))
893       return ABIArgInfo::getIgnore();
894 
895     llvm::LLVMContext &LLVMContext = getVMContext();
896     llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
897     bool NeedsPadding;
898     if (shouldUseInReg(Ty, FreeRegs, IsFastCall, NeedsPadding)) {
899       unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
900       SmallVector<llvm::Type*, 3> Elements;
901       for (unsigned I = 0; I < SizeInRegs; ++I)
902         Elements.push_back(Int32);
903       llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
904       return ABIArgInfo::getDirectInReg(Result);
905     }
906     llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : 0;
907 
908     // Expand small (<= 128-bit) record types when we know that the stack layout
909     // of those arguments will match the struct. This is important because the
910     // LLVM backend isn't smart enough to remove byval, which inhibits many
911     // optimizations.
912     if (getContext().getTypeSize(Ty) <= 4*32 &&
913         canExpandIndirectArgument(Ty, getContext()))
914       return ABIArgInfo::getExpandWithPadding(IsFastCall, PaddingType);
915 
916     return getIndirectResult(Ty, true, FreeRegs);
917   }
918 
919   if (const VectorType *VT = Ty->getAs<VectorType>()) {
920     // On Darwin, some vectors are passed in memory, we handle this by passing
921     // it as an i8/i16/i32/i64.
922     if (IsDarwinVectorABI) {
923       uint64_t Size = getContext().getTypeSize(Ty);
924       if ((Size == 8 || Size == 16 || Size == 32) ||
925           (Size == 64 && VT->getNumElements() == 1))
926         return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
927                                                             Size));
928     }
929 
930     if (IsX86_MMXType(CGT.ConvertType(Ty)))
931       return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64));
932 
933     return ABIArgInfo::getDirect();
934   }
935 
936 
937   if (const EnumType *EnumTy = Ty->getAs<EnumType>())
938     Ty = EnumTy->getDecl()->getIntegerType();
939 
940   bool NeedsPadding;
941   bool InReg = shouldUseInReg(Ty, FreeRegs, IsFastCall, NeedsPadding);
942 
943   if (Ty->isPromotableIntegerType()) {
944     if (InReg)
945       return ABIArgInfo::getExtendInReg();
946     return ABIArgInfo::getExtend();
947   }
948   if (InReg)
949     return ABIArgInfo::getDirectInReg();
950   return ABIArgInfo::getDirect();
951 }
952 
953 void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
954   FI.getReturnInfo() = classifyReturnType(FI.getReturnType(),
955                                           FI.getCallingConvention());
956 
957   unsigned CC = FI.getCallingConvention();
958   bool IsFastCall = CC == llvm::CallingConv::X86_FastCall;
959   unsigned FreeRegs;
960   if (IsFastCall)
961     FreeRegs = 2;
962   else if (FI.getHasRegParm())
963     FreeRegs = FI.getRegParm();
964   else
965     FreeRegs = DefaultNumRegisterParameters;
966 
967   // If the return value is indirect, then the hidden argument is consuming one
968   // integer register.
969   if (FI.getReturnInfo().isIndirect() && FreeRegs) {
970     --FreeRegs;
971     ABIArgInfo &Old = FI.getReturnInfo();
972     Old = ABIArgInfo::getIndirectInReg(Old.getIndirectAlign(),
973                                        Old.getIndirectByVal(),
974                                        Old.getIndirectRealign());
975   }
976 
977   for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
978        it != ie; ++it)
979     it->info = classifyArgumentType(it->type, FreeRegs, IsFastCall);
980 }
981 
982 llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
983                                       CodeGenFunction &CGF) const {
984   llvm::Type *BPP = CGF.Int8PtrPtrTy;
985 
986   CGBuilderTy &Builder = CGF.Builder;
987   llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
988                                                        "ap");
989   llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
990 
991   // Compute if the address needs to be aligned
992   unsigned Align = CGF.getContext().getTypeAlignInChars(Ty).getQuantity();
993   Align = getTypeStackAlignInBytes(Ty, Align);
994   Align = std::max(Align, 4U);
995   if (Align > 4) {
996     // addr = (addr + align - 1) & -align;
997     llvm::Value *Offset =
998       llvm::ConstantInt::get(CGF.Int32Ty, Align - 1);
999     Addr = CGF.Builder.CreateGEP(Addr, Offset);
1000     llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(Addr,
1001                                                     CGF.Int32Ty);
1002     llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -Align);
1003     Addr = CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
1004                                       Addr->getType(),
1005                                       "ap.cur.aligned");
1006   }
1007 
1008   llvm::Type *PTy =
1009     llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
1010   llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
1011 
1012   uint64_t Offset =
1013     llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, Align);
1014   llvm::Value *NextAddr =
1015     Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
1016                       "ap.next");
1017   Builder.CreateStore(NextAddr, VAListAddrAsBPP);
1018 
1019   return AddrTyped;
1020 }
1021 
1022 void X86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
1023                                                   llvm::GlobalValue *GV,
1024                                             CodeGen::CodeGenModule &CGM) const {
1025   if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
1026     if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1027       // Get the LLVM function.
1028       llvm::Function *Fn = cast<llvm::Function>(GV);
1029 
1030       // Now add the 'alignstack' attribute with a value of 16.
1031       llvm::AttrBuilder B;
1032       B.addStackAlignmentAttr(16);
1033       Fn->addAttributes(llvm::AttributeSet::FunctionIndex,
1034                       llvm::AttributeSet::get(CGM.getLLVMContext(),
1035                                               llvm::AttributeSet::FunctionIndex,
1036                                               B));
1037     }
1038   }
1039 }
1040 
1041 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
1042                                                CodeGen::CodeGenFunction &CGF,
1043                                                llvm::Value *Address) const {
1044   CodeGen::CGBuilderTy &Builder = CGF.Builder;
1045 
1046   llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
1047 
1048   // 0-7 are the eight integer registers;  the order is different
1049   //   on Darwin (for EH), but the range is the same.
1050   // 8 is %eip.
1051   AssignToArrayRange(Builder, Address, Four8, 0, 8);
1052 
1053   if (CGF.CGM.getTarget().getTriple().isOSDarwin()) {
1054     // 12-16 are st(0..4).  Not sure why we stop at 4.
1055     // These have size 16, which is sizeof(long double) on
1056     // platforms with 8-byte alignment for that type.
1057     llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
1058     AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
1059 
1060   } else {
1061     // 9 is %eflags, which doesn't get a size on Darwin for some
1062     // reason.
1063     Builder.CreateStore(Four8, Builder.CreateConstInBoundsGEP1_32(Address, 9));
1064 
1065     // 11-16 are st(0..5).  Not sure why we stop at 5.
1066     // These have size 12, which is sizeof(long double) on
1067     // platforms with 4-byte alignment for that type.
1068     llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12);
1069     AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
1070   }
1071 
1072   return false;
1073 }
1074 
1075 //===----------------------------------------------------------------------===//
1076 // X86-64 ABI Implementation
1077 //===----------------------------------------------------------------------===//
1078 
1079 
1080 namespace {
1081 /// X86_64ABIInfo - The X86_64 ABI information.
1082 class X86_64ABIInfo : public ABIInfo {
1083   enum Class {
1084     Integer = 0,
1085     SSE,
1086     SSEUp,
1087     X87,
1088     X87Up,
1089     ComplexX87,
1090     NoClass,
1091     Memory
1092   };
1093 
1094   /// merge - Implement the X86_64 ABI merging algorithm.
1095   ///
1096   /// Merge an accumulating classification \arg Accum with a field
1097   /// classification \arg Field.
1098   ///
1099   /// \param Accum - The accumulating classification. This should
1100   /// always be either NoClass or the result of a previous merge
1101   /// call. In addition, this should never be Memory (the caller
1102   /// should just return Memory for the aggregate).
1103   static Class merge(Class Accum, Class Field);
1104 
1105   /// postMerge - Implement the X86_64 ABI post merging algorithm.
1106   ///
1107   /// Post merger cleanup, reduces a malformed Hi and Lo pair to
1108   /// final MEMORY or SSE classes when necessary.
1109   ///
1110   /// \param AggregateSize - The size of the current aggregate in
1111   /// the classification process.
1112   ///
1113   /// \param Lo - The classification for the parts of the type
1114   /// residing in the low word of the containing object.
1115   ///
1116   /// \param Hi - The classification for the parts of the type
1117   /// residing in the higher words of the containing object.
1118   ///
1119   void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const;
1120 
1121   /// classify - Determine the x86_64 register classes in which the
1122   /// given type T should be passed.
1123   ///
1124   /// \param Lo - The classification for the parts of the type
1125   /// residing in the low word of the containing object.
1126   ///
1127   /// \param Hi - The classification for the parts of the type
1128   /// residing in the high word of the containing object.
1129   ///
1130   /// \param OffsetBase - The bit offset of this type in the
1131   /// containing object.  Some parameters are classified different
1132   /// depending on whether they straddle an eightbyte boundary.
1133   ///
1134   /// \param isNamedArg - Whether the argument in question is a "named"
1135   /// argument, as used in AMD64-ABI 3.5.7.
1136   ///
1137   /// If a word is unused its result will be NoClass; if a type should
1138   /// be passed in Memory then at least the classification of \arg Lo
1139   /// will be Memory.
1140   ///
1141   /// The \arg Lo class will be NoClass iff the argument is ignored.
1142   ///
1143   /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
1144   /// also be ComplexX87.
1145   void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
1146                 bool isNamedArg) const;
1147 
1148   llvm::Type *GetByteVectorType(QualType Ty) const;
1149   llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
1150                                  unsigned IROffset, QualType SourceTy,
1151                                  unsigned SourceOffset) const;
1152   llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
1153                                      unsigned IROffset, QualType SourceTy,
1154                                      unsigned SourceOffset) const;
1155 
1156   /// getIndirectResult - Give a source type \arg Ty, return a suitable result
1157   /// such that the argument will be returned in memory.
1158   ABIArgInfo getIndirectReturnResult(QualType Ty) const;
1159 
1160   /// getIndirectResult - Give a source type \arg Ty, return a suitable result
1161   /// such that the argument will be passed in memory.
1162   ///
1163   /// \param freeIntRegs - The number of free integer registers remaining
1164   /// available.
1165   ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const;
1166 
1167   ABIArgInfo classifyReturnType(QualType RetTy) const;
1168 
1169   ABIArgInfo classifyArgumentType(QualType Ty,
1170                                   unsigned freeIntRegs,
1171                                   unsigned &neededInt,
1172                                   unsigned &neededSSE,
1173                                   bool isNamedArg) const;
1174 
1175   bool IsIllegalVectorType(QualType Ty) const;
1176 
1177   /// The 0.98 ABI revision clarified a lot of ambiguities,
1178   /// unfortunately in ways that were not always consistent with
1179   /// certain previous compilers.  In particular, platforms which
1180   /// required strict binary compatibility with older versions of GCC
1181   /// may need to exempt themselves.
1182   bool honorsRevision0_98() const {
1183     return !getTarget().getTriple().isOSDarwin();
1184   }
1185 
1186   bool HasAVX;
1187   // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on
1188   // 64-bit hardware.
1189   bool Has64BitPointers;
1190 
1191 public:
1192   X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool hasavx) :
1193       ABIInfo(CGT), HasAVX(hasavx),
1194       Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {
1195   }
1196 
1197   bool isPassedUsingAVXType(QualType type) const {
1198     unsigned neededInt, neededSSE;
1199     // The freeIntRegs argument doesn't matter here.
1200     ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE,
1201                                            /*isNamedArg*/true);
1202     if (info.isDirect()) {
1203       llvm::Type *ty = info.getCoerceToType();
1204       if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
1205         return (vectorTy->getBitWidth() > 128);
1206     }
1207     return false;
1208   }
1209 
1210   virtual void computeInfo(CGFunctionInfo &FI) const;
1211 
1212   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1213                                  CodeGenFunction &CGF) const;
1214 };
1215 
1216 /// WinX86_64ABIInfo - The Windows X86_64 ABI information.
1217 class WinX86_64ABIInfo : public ABIInfo {
1218 
1219   ABIArgInfo classify(QualType Ty, bool IsReturnType) const;
1220 
1221 public:
1222   WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
1223 
1224   virtual void computeInfo(CGFunctionInfo &FI) const;
1225 
1226   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1227                                  CodeGenFunction &CGF) const;
1228 };
1229 
1230 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
1231 public:
1232   X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
1233       : TargetCodeGenInfo(new X86_64ABIInfo(CGT, HasAVX)) {}
1234 
1235   const X86_64ABIInfo &getABIInfo() const {
1236     return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
1237   }
1238 
1239   int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
1240     return 7;
1241   }
1242 
1243   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1244                                llvm::Value *Address) const {
1245     llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
1246 
1247     // 0-15 are the 16 integer registers.
1248     // 16 is %rip.
1249     AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
1250     return false;
1251   }
1252 
1253   llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
1254                                   StringRef Constraint,
1255                                   llvm::Type* Ty) const {
1256     return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1257   }
1258 
1259   bool isNoProtoCallVariadic(const CallArgList &args,
1260                              const FunctionNoProtoType *fnType) const {
1261     // The default CC on x86-64 sets %al to the number of SSA
1262     // registers used, and GCC sets this when calling an unprototyped
1263     // function, so we override the default behavior.  However, don't do
1264     // that when AVX types are involved: the ABI explicitly states it is
1265     // undefined, and it doesn't work in practice because of how the ABI
1266     // defines varargs anyway.
1267     if (fnType->getCallConv() == CC_Default || fnType->getCallConv() == CC_C) {
1268       bool HasAVXType = false;
1269       for (CallArgList::const_iterator
1270              it = args.begin(), ie = args.end(); it != ie; ++it) {
1271         if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
1272           HasAVXType = true;
1273           break;
1274         }
1275       }
1276 
1277       if (!HasAVXType)
1278         return true;
1279     }
1280 
1281     return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType);
1282   }
1283 
1284 };
1285 
1286 static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
1287   // If the argument does not end in .lib, automatically add the suffix. This
1288   // matches the behavior of MSVC.
1289   std::string ArgStr = Lib;
1290   if (Lib.size() <= 4 ||
1291       Lib.substr(Lib.size() - 4).compare_lower(".lib") != 0) {
1292     ArgStr += ".lib";
1293   }
1294   return ArgStr;
1295 }
1296 
1297 class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo {
1298 public:
1299   WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
1300         bool d, bool p, bool w, unsigned RegParms)
1301     : X86_32TargetCodeGenInfo(CGT, d, p, w, RegParms) {}
1302 
1303   void getDependentLibraryOption(llvm::StringRef Lib,
1304                                  llvm::SmallString<24> &Opt) const {
1305     Opt = "/DEFAULTLIB:";
1306     Opt += qualifyWindowsLibrary(Lib);
1307   }
1308 
1309   void getDetectMismatchOption(llvm::StringRef Name,
1310                                llvm::StringRef Value,
1311                                llvm::SmallString<32> &Opt) const {
1312     Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
1313   }
1314 };
1315 
1316 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
1317 public:
1318   WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
1319     : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {}
1320 
1321   int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
1322     return 7;
1323   }
1324 
1325   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1326                                llvm::Value *Address) const {
1327     llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
1328 
1329     // 0-15 are the 16 integer registers.
1330     // 16 is %rip.
1331     AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
1332     return false;
1333   }
1334 
1335   void getDependentLibraryOption(llvm::StringRef Lib,
1336                                  llvm::SmallString<24> &Opt) const {
1337     Opt = "/DEFAULTLIB:";
1338     Opt += qualifyWindowsLibrary(Lib);
1339   }
1340 
1341   void getDetectMismatchOption(llvm::StringRef Name,
1342                                llvm::StringRef Value,
1343                                llvm::SmallString<32> &Opt) const {
1344     Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
1345   }
1346 };
1347 
1348 }
1349 
1350 void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo,
1351                               Class &Hi) const {
1352   // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
1353   //
1354   // (a) If one of the classes is Memory, the whole argument is passed in
1355   //     memory.
1356   //
1357   // (b) If X87UP is not preceded by X87, the whole argument is passed in
1358   //     memory.
1359   //
1360   // (c) If the size of the aggregate exceeds two eightbytes and the first
1361   //     eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole
1362   //     argument is passed in memory. NOTE: This is necessary to keep the
1363   //     ABI working for processors that don't support the __m256 type.
1364   //
1365   // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
1366   //
1367   // Some of these are enforced by the merging logic.  Others can arise
1368   // only with unions; for example:
1369   //   union { _Complex double; unsigned; }
1370   //
1371   // Note that clauses (b) and (c) were added in 0.98.
1372   //
1373   if (Hi == Memory)
1374     Lo = Memory;
1375   if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
1376     Lo = Memory;
1377   if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
1378     Lo = Memory;
1379   if (Hi == SSEUp && Lo != SSE)
1380     Hi = SSE;
1381 }
1382 
1383 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
1384   // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
1385   // classified recursively so that always two fields are
1386   // considered. The resulting class is calculated according to
1387   // the classes of the fields in the eightbyte:
1388   //
1389   // (a) If both classes are equal, this is the resulting class.
1390   //
1391   // (b) If one of the classes is NO_CLASS, the resulting class is
1392   // the other class.
1393   //
1394   // (c) If one of the classes is MEMORY, the result is the MEMORY
1395   // class.
1396   //
1397   // (d) If one of the classes is INTEGER, the result is the
1398   // INTEGER.
1399   //
1400   // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
1401   // MEMORY is used as class.
1402   //
1403   // (f) Otherwise class SSE is used.
1404 
1405   // Accum should never be memory (we should have returned) or
1406   // ComplexX87 (because this cannot be passed in a structure).
1407   assert((Accum != Memory && Accum != ComplexX87) &&
1408          "Invalid accumulated classification during merge.");
1409   if (Accum == Field || Field == NoClass)
1410     return Accum;
1411   if (Field == Memory)
1412     return Memory;
1413   if (Accum == NoClass)
1414     return Field;
1415   if (Accum == Integer || Field == Integer)
1416     return Integer;
1417   if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
1418       Accum == X87 || Accum == X87Up)
1419     return Memory;
1420   return SSE;
1421 }
1422 
1423 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
1424                              Class &Lo, Class &Hi, bool isNamedArg) const {
1425   // FIXME: This code can be simplified by introducing a simple value class for
1426   // Class pairs with appropriate constructor methods for the various
1427   // situations.
1428 
1429   // FIXME: Some of the split computations are wrong; unaligned vectors
1430   // shouldn't be passed in registers for example, so there is no chance they
1431   // can straddle an eightbyte. Verify & simplify.
1432 
1433   Lo = Hi = NoClass;
1434 
1435   Class &Current = OffsetBase < 64 ? Lo : Hi;
1436   Current = Memory;
1437 
1438   if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
1439     BuiltinType::Kind k = BT->getKind();
1440 
1441     if (k == BuiltinType::Void) {
1442       Current = NoClass;
1443     } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
1444       Lo = Integer;
1445       Hi = Integer;
1446     } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
1447       Current = Integer;
1448     } else if ((k == BuiltinType::Float || k == BuiltinType::Double) ||
1449                (k == BuiltinType::LongDouble &&
1450                 getTarget().getTriple().getOS() == llvm::Triple::NaCl)) {
1451       Current = SSE;
1452     } else if (k == BuiltinType::LongDouble) {
1453       Lo = X87;
1454       Hi = X87Up;
1455     }
1456     // FIXME: _Decimal32 and _Decimal64 are SSE.
1457     // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
1458     return;
1459   }
1460 
1461   if (const EnumType *ET = Ty->getAs<EnumType>()) {
1462     // Classify the underlying integer type.
1463     classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
1464     return;
1465   }
1466 
1467   if (Ty->hasPointerRepresentation()) {
1468     Current = Integer;
1469     return;
1470   }
1471 
1472   if (Ty->isMemberPointerType()) {
1473     if (Ty->isMemberFunctionPointerType() && Has64BitPointers)
1474       Lo = Hi = Integer;
1475     else
1476       Current = Integer;
1477     return;
1478   }
1479 
1480   if (const VectorType *VT = Ty->getAs<VectorType>()) {
1481     uint64_t Size = getContext().getTypeSize(VT);
1482     if (Size == 32) {
1483       // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x
1484       // float> as integer.
1485       Current = Integer;
1486 
1487       // If this type crosses an eightbyte boundary, it should be
1488       // split.
1489       uint64_t EB_Real = (OffsetBase) / 64;
1490       uint64_t EB_Imag = (OffsetBase + Size - 1) / 64;
1491       if (EB_Real != EB_Imag)
1492         Hi = Lo;
1493     } else if (Size == 64) {
1494       // gcc passes <1 x double> in memory. :(
1495       if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double))
1496         return;
1497 
1498       // gcc passes <1 x long long> as INTEGER.
1499       if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong) ||
1500           VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULongLong) ||
1501           VT->getElementType()->isSpecificBuiltinType(BuiltinType::Long) ||
1502           VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULong))
1503         Current = Integer;
1504       else
1505         Current = SSE;
1506 
1507       // If this type crosses an eightbyte boundary, it should be
1508       // split.
1509       if (OffsetBase && OffsetBase != 64)
1510         Hi = Lo;
1511     } else if (Size == 128 || (HasAVX && isNamedArg && Size == 256)) {
1512       // Arguments of 256-bits are split into four eightbyte chunks. The
1513       // least significant one belongs to class SSE and all the others to class
1514       // SSEUP. The original Lo and Hi design considers that types can't be
1515       // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense.
1516       // This design isn't correct for 256-bits, but since there're no cases
1517       // where the upper parts would need to be inspected, avoid adding
1518       // complexity and just consider Hi to match the 64-256 part.
1519       //
1520       // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in
1521       // registers if they are "named", i.e. not part of the "..." of a
1522       // variadic function.
1523       Lo = SSE;
1524       Hi = SSEUp;
1525     }
1526     return;
1527   }
1528 
1529   if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
1530     QualType ET = getContext().getCanonicalType(CT->getElementType());
1531 
1532     uint64_t Size = getContext().getTypeSize(Ty);
1533     if (ET->isIntegralOrEnumerationType()) {
1534       if (Size <= 64)
1535         Current = Integer;
1536       else if (Size <= 128)
1537         Lo = Hi = Integer;
1538     } else if (ET == getContext().FloatTy)
1539       Current = SSE;
1540     else if (ET == getContext().DoubleTy ||
1541              (ET == getContext().LongDoubleTy &&
1542               getTarget().getTriple().getOS() == llvm::Triple::NaCl))
1543       Lo = Hi = SSE;
1544     else if (ET == getContext().LongDoubleTy)
1545       Current = ComplexX87;
1546 
1547     // If this complex type crosses an eightbyte boundary then it
1548     // should be split.
1549     uint64_t EB_Real = (OffsetBase) / 64;
1550     uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
1551     if (Hi == NoClass && EB_Real != EB_Imag)
1552       Hi = Lo;
1553 
1554     return;
1555   }
1556 
1557   if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
1558     // Arrays are treated like structures.
1559 
1560     uint64_t Size = getContext().getTypeSize(Ty);
1561 
1562     // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
1563     // than four eightbytes, ..., it has class MEMORY.
1564     if (Size > 256)
1565       return;
1566 
1567     // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
1568     // fields, it has class MEMORY.
1569     //
1570     // Only need to check alignment of array base.
1571     if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
1572       return;
1573 
1574     // Otherwise implement simplified merge. We could be smarter about
1575     // this, but it isn't worth it and would be harder to verify.
1576     Current = NoClass;
1577     uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
1578     uint64_t ArraySize = AT->getSize().getZExtValue();
1579 
1580     // The only case a 256-bit wide vector could be used is when the array
1581     // contains a single 256-bit element. Since Lo and Hi logic isn't extended
1582     // to work for sizes wider than 128, early check and fallback to memory.
1583     if (Size > 128 && EltSize != 256)
1584       return;
1585 
1586     for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
1587       Class FieldLo, FieldHi;
1588       classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg);
1589       Lo = merge(Lo, FieldLo);
1590       Hi = merge(Hi, FieldHi);
1591       if (Lo == Memory || Hi == Memory)
1592         break;
1593     }
1594 
1595     postMerge(Size, Lo, Hi);
1596     assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
1597     return;
1598   }
1599 
1600   if (const RecordType *RT = Ty->getAs<RecordType>()) {
1601     uint64_t Size = getContext().getTypeSize(Ty);
1602 
1603     // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
1604     // than four eightbytes, ..., it has class MEMORY.
1605     if (Size > 256)
1606       return;
1607 
1608     // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
1609     // copy constructor or a non-trivial destructor, it is passed by invisible
1610     // reference.
1611     if (getRecordArgABI(RT, CGT))
1612       return;
1613 
1614     const RecordDecl *RD = RT->getDecl();
1615 
1616     // Assume variable sized types are passed in memory.
1617     if (RD->hasFlexibleArrayMember())
1618       return;
1619 
1620     const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
1621 
1622     // Reset Lo class, this will be recomputed.
1623     Current = NoClass;
1624 
1625     // If this is a C++ record, classify the bases first.
1626     if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1627       for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
1628              e = CXXRD->bases_end(); i != e; ++i) {
1629         assert(!i->isVirtual() && !i->getType()->isDependentType() &&
1630                "Unexpected base class!");
1631         const CXXRecordDecl *Base =
1632           cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
1633 
1634         // Classify this field.
1635         //
1636         // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
1637         // single eightbyte, each is classified separately. Each eightbyte gets
1638         // initialized to class NO_CLASS.
1639         Class FieldLo, FieldHi;
1640         uint64_t Offset =
1641           OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base));
1642         classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
1643         Lo = merge(Lo, FieldLo);
1644         Hi = merge(Hi, FieldHi);
1645         if (Lo == Memory || Hi == Memory)
1646           break;
1647       }
1648     }
1649 
1650     // Classify the fields one at a time, merging the results.
1651     unsigned idx = 0;
1652     for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
1653            i != e; ++i, ++idx) {
1654       uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
1655       bool BitField = i->isBitField();
1656 
1657       // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than
1658       // four eightbytes, or it contains unaligned fields, it has class MEMORY.
1659       //
1660       // The only case a 256-bit wide vector could be used is when the struct
1661       // contains a single 256-bit element. Since Lo and Hi logic isn't extended
1662       // to work for sizes wider than 128, early check and fallback to memory.
1663       //
1664       if (Size > 128 && getContext().getTypeSize(i->getType()) != 256) {
1665         Lo = Memory;
1666         return;
1667       }
1668       // Note, skip this test for bit-fields, see below.
1669       if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
1670         Lo = Memory;
1671         return;
1672       }
1673 
1674       // Classify this field.
1675       //
1676       // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
1677       // exceeds a single eightbyte, each is classified
1678       // separately. Each eightbyte gets initialized to class
1679       // NO_CLASS.
1680       Class FieldLo, FieldHi;
1681 
1682       // Bit-fields require special handling, they do not force the
1683       // structure to be passed in memory even if unaligned, and
1684       // therefore they can straddle an eightbyte.
1685       if (BitField) {
1686         // Ignore padding bit-fields.
1687         if (i->isUnnamedBitfield())
1688           continue;
1689 
1690         uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
1691         uint64_t Size = i->getBitWidthValue(getContext());
1692 
1693         uint64_t EB_Lo = Offset / 64;
1694         uint64_t EB_Hi = (Offset + Size - 1) / 64;
1695         FieldLo = FieldHi = NoClass;
1696         if (EB_Lo) {
1697           assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
1698           FieldLo = NoClass;
1699           FieldHi = Integer;
1700         } else {
1701           FieldLo = Integer;
1702           FieldHi = EB_Hi ? Integer : NoClass;
1703         }
1704       } else
1705         classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
1706       Lo = merge(Lo, FieldLo);
1707       Hi = merge(Hi, FieldHi);
1708       if (Lo == Memory || Hi == Memory)
1709         break;
1710     }
1711 
1712     postMerge(Size, Lo, Hi);
1713   }
1714 }
1715 
1716 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
1717   // If this is a scalar LLVM value then assume LLVM will pass it in the right
1718   // place naturally.
1719   if (!isAggregateTypeForABI(Ty)) {
1720     // Treat an enum type as its underlying type.
1721     if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1722       Ty = EnumTy->getDecl()->getIntegerType();
1723 
1724     return (Ty->isPromotableIntegerType() ?
1725             ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
1726   }
1727 
1728   return ABIArgInfo::getIndirect(0);
1729 }
1730 
1731 bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const {
1732   if (const VectorType *VecTy = Ty->getAs<VectorType>()) {
1733     uint64_t Size = getContext().getTypeSize(VecTy);
1734     unsigned LargestVector = HasAVX ? 256 : 128;
1735     if (Size <= 64 || Size > LargestVector)
1736       return true;
1737   }
1738 
1739   return false;
1740 }
1741 
1742 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
1743                                             unsigned freeIntRegs) const {
1744   // If this is a scalar LLVM value then assume LLVM will pass it in the right
1745   // place naturally.
1746   //
1747   // This assumption is optimistic, as there could be free registers available
1748   // when we need to pass this argument in memory, and LLVM could try to pass
1749   // the argument in the free register. This does not seem to happen currently,
1750   // but this code would be much safer if we could mark the argument with
1751   // 'onstack'. See PR12193.
1752   if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) {
1753     // Treat an enum type as its underlying type.
1754     if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1755       Ty = EnumTy->getDecl()->getIntegerType();
1756 
1757     return (Ty->isPromotableIntegerType() ?
1758             ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
1759   }
1760 
1761   if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, CGT))
1762     return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
1763 
1764   // Compute the byval alignment. We specify the alignment of the byval in all
1765   // cases so that the mid-level optimizer knows the alignment of the byval.
1766   unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
1767 
1768   // Attempt to avoid passing indirect results using byval when possible. This
1769   // is important for good codegen.
1770   //
1771   // We do this by coercing the value into a scalar type which the backend can
1772   // handle naturally (i.e., without using byval).
1773   //
1774   // For simplicity, we currently only do this when we have exhausted all of the
1775   // free integer registers. Doing this when there are free integer registers
1776   // would require more care, as we would have to ensure that the coerced value
1777   // did not claim the unused register. That would require either reording the
1778   // arguments to the function (so that any subsequent inreg values came first),
1779   // or only doing this optimization when there were no following arguments that
1780   // might be inreg.
1781   //
1782   // We currently expect it to be rare (particularly in well written code) for
1783   // arguments to be passed on the stack when there are still free integer
1784   // registers available (this would typically imply large structs being passed
1785   // by value), so this seems like a fair tradeoff for now.
1786   //
1787   // We can revisit this if the backend grows support for 'onstack' parameter
1788   // attributes. See PR12193.
1789   if (freeIntRegs == 0) {
1790     uint64_t Size = getContext().getTypeSize(Ty);
1791 
1792     // If this type fits in an eightbyte, coerce it into the matching integral
1793     // type, which will end up on the stack (with alignment 8).
1794     if (Align == 8 && Size <= 64)
1795       return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
1796                                                           Size));
1797   }
1798 
1799   return ABIArgInfo::getIndirect(Align);
1800 }
1801 
1802 /// GetByteVectorType - The ABI specifies that a value should be passed in an
1803 /// full vector XMM/YMM register.  Pick an LLVM IR type that will be passed as a
1804 /// vector register.
1805 llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
1806   llvm::Type *IRType = CGT.ConvertType(Ty);
1807 
1808   // Wrapper structs that just contain vectors are passed just like vectors,
1809   // strip them off if present.
1810   llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType);
1811   while (STy && STy->getNumElements() == 1) {
1812     IRType = STy->getElementType(0);
1813     STy = dyn_cast<llvm::StructType>(IRType);
1814   }
1815 
1816   // If the preferred type is a 16-byte vector, prefer to pass it.
1817   if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){
1818     llvm::Type *EltTy = VT->getElementType();
1819     unsigned BitWidth = VT->getBitWidth();
1820     if ((BitWidth >= 128 && BitWidth <= 256) &&
1821         (EltTy->isFloatTy() || EltTy->isDoubleTy() ||
1822          EltTy->isIntegerTy(8) || EltTy->isIntegerTy(16) ||
1823          EltTy->isIntegerTy(32) || EltTy->isIntegerTy(64) ||
1824          EltTy->isIntegerTy(128)))
1825       return VT;
1826   }
1827 
1828   return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), 2);
1829 }
1830 
1831 /// BitsContainNoUserData - Return true if the specified [start,end) bit range
1832 /// is known to either be off the end of the specified type or being in
1833 /// alignment padding.  The user type specified is known to be at most 128 bits
1834 /// in size, and have passed through X86_64ABIInfo::classify with a successful
1835 /// classification that put one of the two halves in the INTEGER class.
1836 ///
1837 /// It is conservatively correct to return false.
1838 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
1839                                   unsigned EndBit, ASTContext &Context) {
1840   // If the bytes being queried are off the end of the type, there is no user
1841   // data hiding here.  This handles analysis of builtins, vectors and other
1842   // types that don't contain interesting padding.
1843   unsigned TySize = (unsigned)Context.getTypeSize(Ty);
1844   if (TySize <= StartBit)
1845     return true;
1846 
1847   if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
1848     unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType());
1849     unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
1850 
1851     // Check each element to see if the element overlaps with the queried range.
1852     for (unsigned i = 0; i != NumElts; ++i) {
1853       // If the element is after the span we care about, then we're done..
1854       unsigned EltOffset = i*EltSize;
1855       if (EltOffset >= EndBit) break;
1856 
1857       unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
1858       if (!BitsContainNoUserData(AT->getElementType(), EltStart,
1859                                  EndBit-EltOffset, Context))
1860         return false;
1861     }
1862     // If it overlaps no elements, then it is safe to process as padding.
1863     return true;
1864   }
1865 
1866   if (const RecordType *RT = Ty->getAs<RecordType>()) {
1867     const RecordDecl *RD = RT->getDecl();
1868     const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
1869 
1870     // If this is a C++ record, check the bases first.
1871     if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1872       for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
1873            e = CXXRD->bases_end(); i != e; ++i) {
1874         assert(!i->isVirtual() && !i->getType()->isDependentType() &&
1875                "Unexpected base class!");
1876         const CXXRecordDecl *Base =
1877           cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
1878 
1879         // If the base is after the span we care about, ignore it.
1880         unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base));
1881         if (BaseOffset >= EndBit) continue;
1882 
1883         unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
1884         if (!BitsContainNoUserData(i->getType(), BaseStart,
1885                                    EndBit-BaseOffset, Context))
1886           return false;
1887       }
1888     }
1889 
1890     // Verify that no field has data that overlaps the region of interest.  Yes
1891     // this could be sped up a lot by being smarter about queried fields,
1892     // however we're only looking at structs up to 16 bytes, so we don't care
1893     // much.
1894     unsigned idx = 0;
1895     for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
1896          i != e; ++i, ++idx) {
1897       unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx);
1898 
1899       // If we found a field after the region we care about, then we're done.
1900       if (FieldOffset >= EndBit) break;
1901 
1902       unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
1903       if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset,
1904                                  Context))
1905         return false;
1906     }
1907 
1908     // If nothing in this record overlapped the area of interest, then we're
1909     // clean.
1910     return true;
1911   }
1912 
1913   return false;
1914 }
1915 
1916 /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a
1917 /// float member at the specified offset.  For example, {int,{float}} has a
1918 /// float at offset 4.  It is conservatively correct for this routine to return
1919 /// false.
1920 static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset,
1921                                   const llvm::DataLayout &TD) {
1922   // Base case if we find a float.
1923   if (IROffset == 0 && IRType->isFloatTy())
1924     return true;
1925 
1926   // If this is a struct, recurse into the field at the specified offset.
1927   if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
1928     const llvm::StructLayout *SL = TD.getStructLayout(STy);
1929     unsigned Elt = SL->getElementContainingOffset(IROffset);
1930     IROffset -= SL->getElementOffset(Elt);
1931     return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD);
1932   }
1933 
1934   // If this is an array, recurse into the field at the specified offset.
1935   if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
1936     llvm::Type *EltTy = ATy->getElementType();
1937     unsigned EltSize = TD.getTypeAllocSize(EltTy);
1938     IROffset -= IROffset/EltSize*EltSize;
1939     return ContainsFloatAtOffset(EltTy, IROffset, TD);
1940   }
1941 
1942   return false;
1943 }
1944 
1945 
1946 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the
1947 /// low 8 bytes of an XMM register, corresponding to the SSE class.
1948 llvm::Type *X86_64ABIInfo::
1949 GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,
1950                    QualType SourceTy, unsigned SourceOffset) const {
1951   // The only three choices we have are either double, <2 x float>, or float. We
1952   // pass as float if the last 4 bytes is just padding.  This happens for
1953   // structs that contain 3 floats.
1954   if (BitsContainNoUserData(SourceTy, SourceOffset*8+32,
1955                             SourceOffset*8+64, getContext()))
1956     return llvm::Type::getFloatTy(getVMContext());
1957 
1958   // We want to pass as <2 x float> if the LLVM IR type contains a float at
1959   // offset+0 and offset+4.  Walk the LLVM IR type to find out if this is the
1960   // case.
1961   if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) &&
1962       ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout()))
1963     return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2);
1964 
1965   return llvm::Type::getDoubleTy(getVMContext());
1966 }
1967 
1968 
1969 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in
1970 /// an 8-byte GPR.  This means that we either have a scalar or we are talking
1971 /// about the high or low part of an up-to-16-byte struct.  This routine picks
1972 /// the best LLVM IR type to represent this, which may be i64 or may be anything
1973 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*,
1974 /// etc).
1975 ///
1976 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for
1977 /// the source type.  IROffset is an offset in bytes into the LLVM IR type that
1978 /// the 8-byte value references.  PrefType may be null.
1979 ///
1980 /// SourceTy is the source level type for the entire argument.  SourceOffset is
1981 /// an offset into this that we're processing (which is always either 0 or 8).
1982 ///
1983 llvm::Type *X86_64ABIInfo::
1984 GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
1985                        QualType SourceTy, unsigned SourceOffset) const {
1986   // If we're dealing with an un-offset LLVM IR type, then it means that we're
1987   // returning an 8-byte unit starting with it.  See if we can safely use it.
1988   if (IROffset == 0) {
1989     // Pointers and int64's always fill the 8-byte unit.
1990     if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
1991         IRType->isIntegerTy(64))
1992       return IRType;
1993 
1994     // If we have a 1/2/4-byte integer, we can use it only if the rest of the
1995     // goodness in the source type is just tail padding.  This is allowed to
1996     // kick in for struct {double,int} on the int, but not on
1997     // struct{double,int,int} because we wouldn't return the second int.  We
1998     // have to do this analysis on the source type because we can't depend on
1999     // unions being lowered a specific way etc.
2000     if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
2001         IRType->isIntegerTy(32) ||
2002         (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
2003       unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
2004           cast<llvm::IntegerType>(IRType)->getBitWidth();
2005 
2006       if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,
2007                                 SourceOffset*8+64, getContext()))
2008         return IRType;
2009     }
2010   }
2011 
2012   if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2013     // If this is a struct, recurse into the field at the specified offset.
2014     const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
2015     if (IROffset < SL->getSizeInBytes()) {
2016       unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
2017       IROffset -= SL->getElementOffset(FieldIdx);
2018 
2019       return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
2020                                     SourceTy, SourceOffset);
2021     }
2022   }
2023 
2024   if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2025     llvm::Type *EltTy = ATy->getElementType();
2026     unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
2027     unsigned EltOffset = IROffset/EltSize*EltSize;
2028     return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
2029                                   SourceOffset);
2030   }
2031 
2032   // Okay, we don't have any better idea of what to pass, so we pass this in an
2033   // integer register that isn't too big to fit the rest of the struct.
2034   unsigned TySizeInBytes =
2035     (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
2036 
2037   assert(TySizeInBytes != SourceOffset && "Empty field?");
2038 
2039   // It is always safe to classify this as an integer type up to i64 that
2040   // isn't larger than the structure.
2041   return llvm::IntegerType::get(getVMContext(),
2042                                 std::min(TySizeInBytes-SourceOffset, 8U)*8);
2043 }
2044 
2045 
2046 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally
2047 /// be used as elements of a two register pair to pass or return, return a
2048 /// first class aggregate to represent them.  For example, if the low part of
2049 /// a by-value argument should be passed as i32* and the high part as float,
2050 /// return {i32*, float}.
2051 static llvm::Type *
2052 GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi,
2053                            const llvm::DataLayout &TD) {
2054   // In order to correctly satisfy the ABI, we need to the high part to start
2055   // at offset 8.  If the high and low parts we inferred are both 4-byte types
2056   // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have
2057   // the second element at offset 8.  Check for this:
2058   unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
2059   unsigned HiAlign = TD.getABITypeAlignment(Hi);
2060   unsigned HiStart = llvm::DataLayout::RoundUpAlignment(LoSize, HiAlign);
2061   assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");
2062 
2063   // To handle this, we have to increase the size of the low part so that the
2064   // second element will start at an 8 byte offset.  We can't increase the size
2065   // of the second element because it might make us access off the end of the
2066   // struct.
2067   if (HiStart != 8) {
2068     // There are only two sorts of types the ABI generation code can produce for
2069     // the low part of a pair that aren't 8 bytes in size: float or i8/i16/i32.
2070     // Promote these to a larger type.
2071     if (Lo->isFloatTy())
2072       Lo = llvm::Type::getDoubleTy(Lo->getContext());
2073     else {
2074       assert(Lo->isIntegerTy() && "Invalid/unknown lo type");
2075       Lo = llvm::Type::getInt64Ty(Lo->getContext());
2076     }
2077   }
2078 
2079   llvm::StructType *Result = llvm::StructType::get(Lo, Hi, NULL);
2080 
2081 
2082   // Verify that the second element is at an 8-byte offset.
2083   assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
2084          "Invalid x86-64 argument pair!");
2085   return Result;
2086 }
2087 
2088 ABIArgInfo X86_64ABIInfo::
2089 classifyReturnType(QualType RetTy) const {
2090   // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
2091   // classification algorithm.
2092   X86_64ABIInfo::Class Lo, Hi;
2093   classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true);
2094 
2095   // Check some invariants.
2096   assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
2097   assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
2098 
2099   llvm::Type *ResType = 0;
2100   switch (Lo) {
2101   case NoClass:
2102     if (Hi == NoClass)
2103       return ABIArgInfo::getIgnore();
2104     // If the low part is just padding, it takes no register, leave ResType
2105     // null.
2106     assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
2107            "Unknown missing lo part");
2108     break;
2109 
2110   case SSEUp:
2111   case X87Up:
2112     llvm_unreachable("Invalid classification for lo word.");
2113 
2114     // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
2115     // hidden argument.
2116   case Memory:
2117     return getIndirectReturnResult(RetTy);
2118 
2119     // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
2120     // available register of the sequence %rax, %rdx is used.
2121   case Integer:
2122     ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
2123 
2124     // If we have a sign or zero extended integer, make sure to return Extend
2125     // so that the parameter gets the right LLVM IR attributes.
2126     if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
2127       // Treat an enum type as its underlying type.
2128       if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
2129         RetTy = EnumTy->getDecl()->getIntegerType();
2130 
2131       if (RetTy->isIntegralOrEnumerationType() &&
2132           RetTy->isPromotableIntegerType())
2133         return ABIArgInfo::getExtend();
2134     }
2135     break;
2136 
2137     // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
2138     // available SSE register of the sequence %xmm0, %xmm1 is used.
2139   case SSE:
2140     ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
2141     break;
2142 
2143     // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
2144     // returned on the X87 stack in %st0 as 80-bit x87 number.
2145   case X87:
2146     ResType = llvm::Type::getX86_FP80Ty(getVMContext());
2147     break;
2148 
2149     // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
2150     // part of the value is returned in %st0 and the imaginary part in
2151     // %st1.
2152   case ComplexX87:
2153     assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
2154     ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
2155                                     llvm::Type::getX86_FP80Ty(getVMContext()),
2156                                     NULL);
2157     break;
2158   }
2159 
2160   llvm::Type *HighPart = 0;
2161   switch (Hi) {
2162     // Memory was handled previously and X87 should
2163     // never occur as a hi class.
2164   case Memory:
2165   case X87:
2166     llvm_unreachable("Invalid classification for hi word.");
2167 
2168   case ComplexX87: // Previously handled.
2169   case NoClass:
2170     break;
2171 
2172   case Integer:
2173     HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2174     if (Lo == NoClass)  // Return HighPart at offset 8 in memory.
2175       return ABIArgInfo::getDirect(HighPart, 8);
2176     break;
2177   case SSE:
2178     HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2179     if (Lo == NoClass)  // Return HighPart at offset 8 in memory.
2180       return ABIArgInfo::getDirect(HighPart, 8);
2181     break;
2182 
2183     // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
2184     // is passed in the next available eightbyte chunk if the last used
2185     // vector register.
2186     //
2187     // SSEUP should always be preceded by SSE, just widen.
2188   case SSEUp:
2189     assert(Lo == SSE && "Unexpected SSEUp classification.");
2190     ResType = GetByteVectorType(RetTy);
2191     break;
2192 
2193     // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
2194     // returned together with the previous X87 value in %st0.
2195   case X87Up:
2196     // If X87Up is preceded by X87, we don't need to do
2197     // anything. However, in some cases with unions it may not be
2198     // preceded by X87. In such situations we follow gcc and pass the
2199     // extra bits in an SSE reg.
2200     if (Lo != X87) {
2201       HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2202       if (Lo == NoClass)  // Return HighPart at offset 8 in memory.
2203         return ABIArgInfo::getDirect(HighPart, 8);
2204     }
2205     break;
2206   }
2207 
2208   // If a high part was specified, merge it together with the low part.  It is
2209   // known to pass in the high eightbyte of the result.  We do this by forming a
2210   // first class struct aggregate with the high and low part: {low, high}
2211   if (HighPart)
2212     ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
2213 
2214   return ABIArgInfo::getDirect(ResType);
2215 }
2216 
2217 ABIArgInfo X86_64ABIInfo::classifyArgumentType(
2218   QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE,
2219   bool isNamedArg)
2220   const
2221 {
2222   X86_64ABIInfo::Class Lo, Hi;
2223   classify(Ty, 0, Lo, Hi, isNamedArg);
2224 
2225   // Check some invariants.
2226   // FIXME: Enforce these by construction.
2227   assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
2228   assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
2229 
2230   neededInt = 0;
2231   neededSSE = 0;
2232   llvm::Type *ResType = 0;
2233   switch (Lo) {
2234   case NoClass:
2235     if (Hi == NoClass)
2236       return ABIArgInfo::getIgnore();
2237     // If the low part is just padding, it takes no register, leave ResType
2238     // null.
2239     assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
2240            "Unknown missing lo part");
2241     break;
2242 
2243     // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
2244     // on the stack.
2245   case Memory:
2246 
2247     // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
2248     // COMPLEX_X87, it is passed in memory.
2249   case X87:
2250   case ComplexX87:
2251     if (getRecordArgABI(Ty, CGT) == CGCXXABI::RAA_Indirect)
2252       ++neededInt;
2253     return getIndirectResult(Ty, freeIntRegs);
2254 
2255   case SSEUp:
2256   case X87Up:
2257     llvm_unreachable("Invalid classification for lo word.");
2258 
2259     // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
2260     // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
2261     // and %r9 is used.
2262   case Integer:
2263     ++neededInt;
2264 
2265     // Pick an 8-byte type based on the preferred type.
2266     ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
2267 
2268     // If we have a sign or zero extended integer, make sure to return Extend
2269     // so that the parameter gets the right LLVM IR attributes.
2270     if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
2271       // Treat an enum type as its underlying type.
2272       if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2273         Ty = EnumTy->getDecl()->getIntegerType();
2274 
2275       if (Ty->isIntegralOrEnumerationType() &&
2276           Ty->isPromotableIntegerType())
2277         return ABIArgInfo::getExtend();
2278     }
2279 
2280     break;
2281 
2282     // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
2283     // available SSE register is used, the registers are taken in the
2284     // order from %xmm0 to %xmm7.
2285   case SSE: {
2286     llvm::Type *IRType = CGT.ConvertType(Ty);
2287     ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
2288     ++neededSSE;
2289     break;
2290   }
2291   }
2292 
2293   llvm::Type *HighPart = 0;
2294   switch (Hi) {
2295     // Memory was handled previously, ComplexX87 and X87 should
2296     // never occur as hi classes, and X87Up must be preceded by X87,
2297     // which is passed in memory.
2298   case Memory:
2299   case X87:
2300   case ComplexX87:
2301     llvm_unreachable("Invalid classification for hi word.");
2302 
2303   case NoClass: break;
2304 
2305   case Integer:
2306     ++neededInt;
2307     // Pick an 8-byte type based on the preferred type.
2308     HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
2309 
2310     if (Lo == NoClass)  // Pass HighPart at offset 8 in memory.
2311       return ABIArgInfo::getDirect(HighPart, 8);
2312     break;
2313 
2314     // X87Up generally doesn't occur here (long double is passed in
2315     // memory), except in situations involving unions.
2316   case X87Up:
2317   case SSE:
2318     HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
2319 
2320     if (Lo == NoClass)  // Pass HighPart at offset 8 in memory.
2321       return ABIArgInfo::getDirect(HighPart, 8);
2322 
2323     ++neededSSE;
2324     break;
2325 
2326     // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
2327     // eightbyte is passed in the upper half of the last used SSE
2328     // register.  This only happens when 128-bit vectors are passed.
2329   case SSEUp:
2330     assert(Lo == SSE && "Unexpected SSEUp classification");
2331     ResType = GetByteVectorType(Ty);
2332     break;
2333   }
2334 
2335   // If a high part was specified, merge it together with the low part.  It is
2336   // known to pass in the high eightbyte of the result.  We do this by forming a
2337   // first class struct aggregate with the high and low part: {low, high}
2338   if (HighPart)
2339     ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
2340 
2341   return ABIArgInfo::getDirect(ResType);
2342 }
2343 
2344 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
2345 
2346   FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
2347 
2348   // Keep track of the number of assigned registers.
2349   unsigned freeIntRegs = 6, freeSSERegs = 8;
2350 
2351   // If the return value is indirect, then the hidden argument is consuming one
2352   // integer register.
2353   if (FI.getReturnInfo().isIndirect())
2354     --freeIntRegs;
2355 
2356   bool isVariadic = FI.isVariadic();
2357   unsigned numRequiredArgs = 0;
2358   if (isVariadic)
2359     numRequiredArgs = FI.getRequiredArgs().getNumRequiredArgs();
2360 
2361   // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
2362   // get assigned (in left-to-right order) for passing as follows...
2363   for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
2364        it != ie; ++it) {
2365     bool isNamedArg = true;
2366     if (isVariadic)
2367       isNamedArg = (it - FI.arg_begin()) <
2368                     static_cast<signed>(numRequiredArgs);
2369 
2370     unsigned neededInt, neededSSE;
2371     it->info = classifyArgumentType(it->type, freeIntRegs, neededInt,
2372                                     neededSSE, isNamedArg);
2373 
2374     // AMD64-ABI 3.2.3p3: If there are no registers available for any
2375     // eightbyte of an argument, the whole argument is passed on the
2376     // stack. If registers have already been assigned for some
2377     // eightbytes of such an argument, the assignments get reverted.
2378     if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
2379       freeIntRegs -= neededInt;
2380       freeSSERegs -= neededSSE;
2381     } else {
2382       it->info = getIndirectResult(it->type, freeIntRegs);
2383     }
2384   }
2385 }
2386 
2387 static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
2388                                         QualType Ty,
2389                                         CodeGenFunction &CGF) {
2390   llvm::Value *overflow_arg_area_p =
2391     CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
2392   llvm::Value *overflow_arg_area =
2393     CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
2394 
2395   // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
2396   // byte boundary if alignment needed by type exceeds 8 byte boundary.
2397   // It isn't stated explicitly in the standard, but in practice we use
2398   // alignment greater than 16 where necessary.
2399   uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
2400   if (Align > 8) {
2401     // overflow_arg_area = (overflow_arg_area + align - 1) & -align;
2402     llvm::Value *Offset =
2403       llvm::ConstantInt::get(CGF.Int64Ty, Align - 1);
2404     overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset);
2405     llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area,
2406                                                     CGF.Int64Ty);
2407     llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, -(uint64_t)Align);
2408     overflow_arg_area =
2409       CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
2410                                  overflow_arg_area->getType(),
2411                                  "overflow_arg_area.align");
2412   }
2413 
2414   // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
2415   llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
2416   llvm::Value *Res =
2417     CGF.Builder.CreateBitCast(overflow_arg_area,
2418                               llvm::PointerType::getUnqual(LTy));
2419 
2420   // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
2421   // l->overflow_arg_area + sizeof(type).
2422   // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
2423   // an 8 byte boundary.
2424 
2425   uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
2426   llvm::Value *Offset =
2427       llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7)  & ~7);
2428   overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
2429                                             "overflow_arg_area.next");
2430   CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
2431 
2432   // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
2433   return Res;
2434 }
2435 
2436 llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2437                                       CodeGenFunction &CGF) const {
2438   // Assume that va_list type is correct; should be pointer to LLVM type:
2439   // struct {
2440   //   i32 gp_offset;
2441   //   i32 fp_offset;
2442   //   i8* overflow_arg_area;
2443   //   i8* reg_save_area;
2444   // };
2445   unsigned neededInt, neededSSE;
2446 
2447   Ty = CGF.getContext().getCanonicalType(Ty);
2448   ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE,
2449                                        /*isNamedArg*/false);
2450 
2451   // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
2452   // in the registers. If not go to step 7.
2453   if (!neededInt && !neededSSE)
2454     return EmitVAArgFromMemory(VAListAddr, Ty, CGF);
2455 
2456   // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
2457   // general purpose registers needed to pass type and num_fp to hold
2458   // the number of floating point registers needed.
2459 
2460   // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
2461   // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
2462   // l->fp_offset > 304 - num_fp * 16 go to step 7.
2463   //
2464   // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
2465   // register save space).
2466 
2467   llvm::Value *InRegs = 0;
2468   llvm::Value *gp_offset_p = 0, *gp_offset = 0;
2469   llvm::Value *fp_offset_p = 0, *fp_offset = 0;
2470   if (neededInt) {
2471     gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
2472     gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
2473     InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
2474     InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
2475   }
2476 
2477   if (neededSSE) {
2478     fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
2479     fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
2480     llvm::Value *FitsInFP =
2481       llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
2482     FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");
2483     InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
2484   }
2485 
2486   llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
2487   llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
2488   llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
2489   CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
2490 
2491   // Emit code to load the value if it was passed in registers.
2492 
2493   CGF.EmitBlock(InRegBlock);
2494 
2495   // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
2496   // an offset of l->gp_offset and/or l->fp_offset. This may require
2497   // copying to a temporary location in case the parameter is passed
2498   // in different register classes or requires an alignment greater
2499   // than 8 for general purpose registers and 16 for XMM registers.
2500   //
2501   // FIXME: This really results in shameful code when we end up needing to
2502   // collect arguments from different places; often what should result in a
2503   // simple assembling of a structure from scattered addresses has many more
2504   // loads than necessary. Can we clean this up?
2505   llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
2506   llvm::Value *RegAddr =
2507     CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3),
2508                            "reg_save_area");
2509   if (neededInt && neededSSE) {
2510     // FIXME: Cleanup.
2511     assert(AI.isDirect() && "Unexpected ABI info for mixed regs");
2512     llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
2513     llvm::Value *Tmp = CGF.CreateMemTemp(Ty);
2514     Tmp = CGF.Builder.CreateBitCast(Tmp, ST->getPointerTo());
2515     assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
2516     llvm::Type *TyLo = ST->getElementType(0);
2517     llvm::Type *TyHi = ST->getElementType(1);
2518     assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
2519            "Unexpected ABI info for mixed regs");
2520     llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
2521     llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
2522     llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
2523     llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
2524     llvm::Value *RegLoAddr = TyLo->isFloatingPointTy() ? FPAddr : GPAddr;
2525     llvm::Value *RegHiAddr = TyLo->isFloatingPointTy() ? GPAddr : FPAddr;
2526     llvm::Value *V =
2527       CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo));
2528     CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
2529     V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
2530     CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
2531 
2532     RegAddr = CGF.Builder.CreateBitCast(Tmp,
2533                                         llvm::PointerType::getUnqual(LTy));
2534   } else if (neededInt) {
2535     RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
2536     RegAddr = CGF.Builder.CreateBitCast(RegAddr,
2537                                         llvm::PointerType::getUnqual(LTy));
2538 
2539     // Copy to a temporary if necessary to ensure the appropriate alignment.
2540     std::pair<CharUnits, CharUnits> SizeAlign =
2541         CGF.getContext().getTypeInfoInChars(Ty);
2542     uint64_t TySize = SizeAlign.first.getQuantity();
2543     unsigned TyAlign = SizeAlign.second.getQuantity();
2544     if (TyAlign > 8) {
2545       llvm::Value *Tmp = CGF.CreateMemTemp(Ty);
2546       CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, 8, false);
2547       RegAddr = Tmp;
2548     }
2549   } else if (neededSSE == 1) {
2550     RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
2551     RegAddr = CGF.Builder.CreateBitCast(RegAddr,
2552                                         llvm::PointerType::getUnqual(LTy));
2553   } else {
2554     assert(neededSSE == 2 && "Invalid number of needed registers!");
2555     // SSE registers are spaced 16 bytes apart in the register save
2556     // area, we need to collect the two eightbytes together.
2557     llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
2558     llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16);
2559     llvm::Type *DoubleTy = CGF.DoubleTy;
2560     llvm::Type *DblPtrTy =
2561       llvm::PointerType::getUnqual(DoubleTy);
2562     llvm::StructType *ST = llvm::StructType::get(DoubleTy, DoubleTy, NULL);
2563     llvm::Value *V, *Tmp = CGF.CreateMemTemp(Ty);
2564     Tmp = CGF.Builder.CreateBitCast(Tmp, ST->getPointerTo());
2565     V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
2566                                                          DblPtrTy));
2567     CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
2568     V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi,
2569                                                          DblPtrTy));
2570     CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
2571     RegAddr = CGF.Builder.CreateBitCast(Tmp,
2572                                         llvm::PointerType::getUnqual(LTy));
2573   }
2574 
2575   // AMD64-ABI 3.5.7p5: Step 5. Set:
2576   // l->gp_offset = l->gp_offset + num_gp * 8
2577   // l->fp_offset = l->fp_offset + num_fp * 16.
2578   if (neededInt) {
2579     llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);
2580     CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
2581                             gp_offset_p);
2582   }
2583   if (neededSSE) {
2584     llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);
2585     CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
2586                             fp_offset_p);
2587   }
2588   CGF.EmitBranch(ContBlock);
2589 
2590   // Emit code to load the value if it was passed in memory.
2591 
2592   CGF.EmitBlock(InMemBlock);
2593   llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF);
2594 
2595   // Return the appropriate result.
2596 
2597   CGF.EmitBlock(ContBlock);
2598   llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(), 2,
2599                                                  "vaarg.addr");
2600   ResAddr->addIncoming(RegAddr, InRegBlock);
2601   ResAddr->addIncoming(MemAddr, InMemBlock);
2602   return ResAddr;
2603 }
2604 
2605 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, bool IsReturnType) const {
2606 
2607   if (Ty->isVoidType())
2608     return ABIArgInfo::getIgnore();
2609 
2610   if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2611     Ty = EnumTy->getDecl()->getIntegerType();
2612 
2613   uint64_t Size = getContext().getTypeSize(Ty);
2614 
2615   if (const RecordType *RT = Ty->getAs<RecordType>()) {
2616     if (IsReturnType) {
2617       if (isRecordReturnIndirect(RT, CGT))
2618         return ABIArgInfo::getIndirect(0, false);
2619     } else {
2620       if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, CGT))
2621         return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
2622     }
2623 
2624     if (RT->getDecl()->hasFlexibleArrayMember())
2625       return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
2626 
2627     // FIXME: mingw-w64-gcc emits 128-bit struct as i128
2628     if (Size == 128 && getTarget().getTriple().getOS() == llvm::Triple::MinGW32)
2629       return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
2630                                                           Size));
2631 
2632     // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
2633     // not 1, 2, 4, or 8 bytes, must be passed by reference."
2634     if (Size <= 64 &&
2635         (Size & (Size - 1)) == 0)
2636       return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
2637                                                           Size));
2638 
2639     return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
2640   }
2641 
2642   if (Ty->isPromotableIntegerType())
2643     return ABIArgInfo::getExtend();
2644 
2645   return ABIArgInfo::getDirect();
2646 }
2647 
2648 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
2649 
2650   QualType RetTy = FI.getReturnType();
2651   FI.getReturnInfo() = classify(RetTy, true);
2652 
2653   for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
2654        it != ie; ++it)
2655     it->info = classify(it->type, false);
2656 }
2657 
2658 llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2659                                       CodeGenFunction &CGF) const {
2660   llvm::Type *BPP = CGF.Int8PtrPtrTy;
2661 
2662   CGBuilderTy &Builder = CGF.Builder;
2663   llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
2664                                                        "ap");
2665   llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
2666   llvm::Type *PTy =
2667     llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
2668   llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
2669 
2670   uint64_t Offset =
2671     llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8);
2672   llvm::Value *NextAddr =
2673     Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
2674                       "ap.next");
2675   Builder.CreateStore(NextAddr, VAListAddrAsBPP);
2676 
2677   return AddrTyped;
2678 }
2679 
2680 namespace {
2681 
2682 class NaClX86_64ABIInfo : public ABIInfo {
2683  public:
2684   NaClX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
2685       : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, HasAVX) {}
2686   virtual void computeInfo(CGFunctionInfo &FI) const;
2687   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2688                                  CodeGenFunction &CGF) const;
2689  private:
2690   PNaClABIInfo PInfo;  // Used for generating calls with pnaclcall callingconv.
2691   X86_64ABIInfo NInfo; // Used for everything else.
2692 };
2693 
2694 class NaClX86_64TargetCodeGenInfo : public TargetCodeGenInfo  {
2695  public:
2696   NaClX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
2697       : TargetCodeGenInfo(new NaClX86_64ABIInfo(CGT, HasAVX)) {}
2698 };
2699 
2700 }
2701 
2702 void NaClX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
2703   if (FI.getASTCallingConvention() == CC_PnaclCall)
2704     PInfo.computeInfo(FI);
2705   else
2706     NInfo.computeInfo(FI);
2707 }
2708 
2709 llvm::Value *NaClX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2710                                           CodeGenFunction &CGF) const {
2711   // Always use the native convention; calling pnacl-style varargs functions
2712   // is unuspported.
2713   return NInfo.EmitVAArg(VAListAddr, Ty, CGF);
2714 }
2715 
2716 
2717 // PowerPC-32
2718 
2719 namespace {
2720 class PPC32TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
2721 public:
2722   PPC32TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
2723 
2724   int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
2725     // This is recovered from gcc output.
2726     return 1; // r1 is the dedicated stack pointer
2727   }
2728 
2729   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2730                                llvm::Value *Address) const;
2731 };
2732 
2733 }
2734 
2735 bool
2736 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2737                                                 llvm::Value *Address) const {
2738   // This is calculated from the LLVM and GCC tables and verified
2739   // against gcc output.  AFAIK all ABIs use the same encoding.
2740 
2741   CodeGen::CGBuilderTy &Builder = CGF.Builder;
2742 
2743   llvm::IntegerType *i8 = CGF.Int8Ty;
2744   llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
2745   llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
2746   llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
2747 
2748   // 0-31: r0-31, the 4-byte general-purpose registers
2749   AssignToArrayRange(Builder, Address, Four8, 0, 31);
2750 
2751   // 32-63: fp0-31, the 8-byte floating-point registers
2752   AssignToArrayRange(Builder, Address, Eight8, 32, 63);
2753 
2754   // 64-76 are various 4-byte special-purpose registers:
2755   // 64: mq
2756   // 65: lr
2757   // 66: ctr
2758   // 67: ap
2759   // 68-75 cr0-7
2760   // 76: xer
2761   AssignToArrayRange(Builder, Address, Four8, 64, 76);
2762 
2763   // 77-108: v0-31, the 16-byte vector registers
2764   AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
2765 
2766   // 109: vrsave
2767   // 110: vscr
2768   // 111: spe_acc
2769   // 112: spefscr
2770   // 113: sfp
2771   AssignToArrayRange(Builder, Address, Four8, 109, 113);
2772 
2773   return false;
2774 }
2775 
2776 // PowerPC-64
2777 
2778 namespace {
2779 /// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information.
2780 class PPC64_SVR4_ABIInfo : public DefaultABIInfo {
2781 
2782 public:
2783   PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
2784 
2785   bool isPromotableTypeForABI(QualType Ty) const;
2786 
2787   ABIArgInfo classifyReturnType(QualType RetTy) const;
2788   ABIArgInfo classifyArgumentType(QualType Ty) const;
2789 
2790   // TODO: We can add more logic to computeInfo to improve performance.
2791   // Example: For aggregate arguments that fit in a register, we could
2792   // use getDirectInReg (as is done below for structs containing a single
2793   // floating-point value) to avoid pushing them to memory on function
2794   // entry.  This would require changing the logic in PPCISelLowering
2795   // when lowering the parameters in the caller and args in the callee.
2796   virtual void computeInfo(CGFunctionInfo &FI) const {
2797     FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
2798     for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
2799          it != ie; ++it) {
2800       // We rely on the default argument classification for the most part.
2801       // One exception:  An aggregate containing a single floating-point
2802       // item must be passed in a register if one is available.
2803       const Type *T = isSingleElementStruct(it->type, getContext());
2804       if (T) {
2805         const BuiltinType *BT = T->getAs<BuiltinType>();
2806         if (BT && BT->isFloatingPoint()) {
2807           QualType QT(T, 0);
2808           it->info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
2809           continue;
2810         }
2811       }
2812       it->info = classifyArgumentType(it->type);
2813     }
2814   }
2815 
2816   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr,
2817                                  QualType Ty,
2818                                  CodeGenFunction &CGF) const;
2819 };
2820 
2821 class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
2822 public:
2823   PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT)
2824     : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT)) {}
2825 
2826   int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
2827     // This is recovered from gcc output.
2828     return 1; // r1 is the dedicated stack pointer
2829   }
2830 
2831   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2832                                llvm::Value *Address) const;
2833 };
2834 
2835 class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
2836 public:
2837   PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
2838 
2839   int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
2840     // This is recovered from gcc output.
2841     return 1; // r1 is the dedicated stack pointer
2842   }
2843 
2844   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2845                                llvm::Value *Address) const;
2846 };
2847 
2848 }
2849 
2850 // Return true if the ABI requires Ty to be passed sign- or zero-
2851 // extended to 64 bits.
2852 bool
2853 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
2854   // Treat an enum type as its underlying type.
2855   if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2856     Ty = EnumTy->getDecl()->getIntegerType();
2857 
2858   // Promotable integer types are required to be promoted by the ABI.
2859   if (Ty->isPromotableIntegerType())
2860     return true;
2861 
2862   // In addition to the usual promotable integer types, we also need to
2863   // extend all 32-bit types, since the ABI requires promotion to 64 bits.
2864   if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
2865     switch (BT->getKind()) {
2866     case BuiltinType::Int:
2867     case BuiltinType::UInt:
2868       return true;
2869     default:
2870       break;
2871     }
2872 
2873   return false;
2874 }
2875 
2876 ABIArgInfo
2877 PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
2878   if (Ty->isAnyComplexType())
2879     return ABIArgInfo::getDirect();
2880 
2881   if (isAggregateTypeForABI(Ty)) {
2882     if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, CGT))
2883       return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
2884 
2885     return ABIArgInfo::getIndirect(0);
2886   }
2887 
2888   return (isPromotableTypeForABI(Ty) ?
2889           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2890 }
2891 
2892 ABIArgInfo
2893 PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
2894   if (RetTy->isVoidType())
2895     return ABIArgInfo::getIgnore();
2896 
2897   if (RetTy->isAnyComplexType())
2898     return ABIArgInfo::getDirect();
2899 
2900   if (isAggregateTypeForABI(RetTy))
2901     return ABIArgInfo::getIndirect(0);
2902 
2903   return (isPromotableTypeForABI(RetTy) ?
2904           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2905 }
2906 
2907 // Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine.
2908 llvm::Value *PPC64_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr,
2909                                            QualType Ty,
2910                                            CodeGenFunction &CGF) const {
2911   llvm::Type *BP = CGF.Int8PtrTy;
2912   llvm::Type *BPP = CGF.Int8PtrPtrTy;
2913 
2914   CGBuilderTy &Builder = CGF.Builder;
2915   llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
2916   llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
2917 
2918   // Update the va_list pointer.  The pointer should be bumped by the
2919   // size of the object.  We can trust getTypeSize() except for a complex
2920   // type whose base type is smaller than a doubleword.  For these, the
2921   // size of the object is 16 bytes; see below for further explanation.
2922   unsigned SizeInBytes = CGF.getContext().getTypeSize(Ty) / 8;
2923   QualType BaseTy;
2924   unsigned CplxBaseSize = 0;
2925 
2926   if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
2927     BaseTy = CTy->getElementType();
2928     CplxBaseSize = CGF.getContext().getTypeSize(BaseTy) / 8;
2929     if (CplxBaseSize < 8)
2930       SizeInBytes = 16;
2931   }
2932 
2933   unsigned Offset = llvm::RoundUpToAlignment(SizeInBytes, 8);
2934   llvm::Value *NextAddr =
2935     Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int64Ty, Offset),
2936                       "ap.next");
2937   Builder.CreateStore(NextAddr, VAListAddrAsBPP);
2938 
2939   // If we have a complex type and the base type is smaller than 8 bytes,
2940   // the ABI calls for the real and imaginary parts to be right-adjusted
2941   // in separate doublewords.  However, Clang expects us to produce a
2942   // pointer to a structure with the two parts packed tightly.  So generate
2943   // loads of the real and imaginary parts relative to the va_list pointer,
2944   // and store them to a temporary structure.
2945   if (CplxBaseSize && CplxBaseSize < 8) {
2946     llvm::Value *RealAddr = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
2947     llvm::Value *ImagAddr = RealAddr;
2948     RealAddr = Builder.CreateAdd(RealAddr, Builder.getInt64(8 - CplxBaseSize));
2949     ImagAddr = Builder.CreateAdd(ImagAddr, Builder.getInt64(16 - CplxBaseSize));
2950     llvm::Type *PBaseTy = llvm::PointerType::getUnqual(CGF.ConvertType(BaseTy));
2951     RealAddr = Builder.CreateIntToPtr(RealAddr, PBaseTy);
2952     ImagAddr = Builder.CreateIntToPtr(ImagAddr, PBaseTy);
2953     llvm::Value *Real = Builder.CreateLoad(RealAddr, false, ".vareal");
2954     llvm::Value *Imag = Builder.CreateLoad(ImagAddr, false, ".vaimag");
2955     llvm::Value *Ptr = CGF.CreateTempAlloca(CGT.ConvertTypeForMem(Ty),
2956                                             "vacplx");
2957     llvm::Value *RealPtr = Builder.CreateStructGEP(Ptr, 0, ".real");
2958     llvm::Value *ImagPtr = Builder.CreateStructGEP(Ptr, 1, ".imag");
2959     Builder.CreateStore(Real, RealPtr, false);
2960     Builder.CreateStore(Imag, ImagPtr, false);
2961     return Ptr;
2962   }
2963 
2964   // If the argument is smaller than 8 bytes, it is right-adjusted in
2965   // its doubleword slot.  Adjust the pointer to pick it up from the
2966   // correct offset.
2967   if (SizeInBytes < 8) {
2968     llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
2969     AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(8 - SizeInBytes));
2970     Addr = Builder.CreateIntToPtr(AddrAsInt, BP);
2971   }
2972 
2973   llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
2974   return Builder.CreateBitCast(Addr, PTy);
2975 }
2976 
2977 static bool
2978 PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2979                               llvm::Value *Address) {
2980   // This is calculated from the LLVM and GCC tables and verified
2981   // against gcc output.  AFAIK all ABIs use the same encoding.
2982 
2983   CodeGen::CGBuilderTy &Builder = CGF.Builder;
2984 
2985   llvm::IntegerType *i8 = CGF.Int8Ty;
2986   llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
2987   llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
2988   llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
2989 
2990   // 0-31: r0-31, the 8-byte general-purpose registers
2991   AssignToArrayRange(Builder, Address, Eight8, 0, 31);
2992 
2993   // 32-63: fp0-31, the 8-byte floating-point registers
2994   AssignToArrayRange(Builder, Address, Eight8, 32, 63);
2995 
2996   // 64-76 are various 4-byte special-purpose registers:
2997   // 64: mq
2998   // 65: lr
2999   // 66: ctr
3000   // 67: ap
3001   // 68-75 cr0-7
3002   // 76: xer
3003   AssignToArrayRange(Builder, Address, Four8, 64, 76);
3004 
3005   // 77-108: v0-31, the 16-byte vector registers
3006   AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
3007 
3008   // 109: vrsave
3009   // 110: vscr
3010   // 111: spe_acc
3011   // 112: spefscr
3012   // 113: sfp
3013   AssignToArrayRange(Builder, Address, Four8, 109, 113);
3014 
3015   return false;
3016 }
3017 
3018 bool
3019 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
3020   CodeGen::CodeGenFunction &CGF,
3021   llvm::Value *Address) const {
3022 
3023   return PPC64_initDwarfEHRegSizeTable(CGF, Address);
3024 }
3025 
3026 bool
3027 PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3028                                                 llvm::Value *Address) const {
3029 
3030   return PPC64_initDwarfEHRegSizeTable(CGF, Address);
3031 }
3032 
3033 //===----------------------------------------------------------------------===//
3034 // ARM ABI Implementation
3035 //===----------------------------------------------------------------------===//
3036 
3037 namespace {
3038 
3039 class ARMABIInfo : public ABIInfo {
3040 public:
3041   enum ABIKind {
3042     APCS = 0,
3043     AAPCS = 1,
3044     AAPCS_VFP
3045   };
3046 
3047 private:
3048   ABIKind Kind;
3049 
3050 public:
3051   ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind) {
3052     setRuntimeCC();
3053   }
3054 
3055   bool isEABI() const {
3056     StringRef Env = getTarget().getTriple().getEnvironmentName();
3057     return (Env == "gnueabi" || Env == "eabi" ||
3058             Env == "android" || Env == "androideabi");
3059   }
3060 
3061 private:
3062   ABIKind getABIKind() const { return Kind; }
3063 
3064   ABIArgInfo classifyReturnType(QualType RetTy) const;
3065   ABIArgInfo classifyArgumentType(QualType RetTy, int *VFPRegs,
3066                                   unsigned &AllocatedVFP,
3067                                   bool &IsHA) const;
3068   bool isIllegalVectorType(QualType Ty) const;
3069 
3070   virtual void computeInfo(CGFunctionInfo &FI) const;
3071 
3072   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3073                                  CodeGenFunction &CGF) const;
3074 
3075   llvm::CallingConv::ID getLLVMDefaultCC() const;
3076   llvm::CallingConv::ID getABIDefaultCC() const;
3077   void setRuntimeCC();
3078 };
3079 
3080 class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
3081 public:
3082   ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
3083     :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {}
3084 
3085   const ARMABIInfo &getABIInfo() const {
3086     return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo());
3087   }
3088 
3089   int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
3090     return 13;
3091   }
3092 
3093   StringRef getARCRetainAutoreleasedReturnValueMarker() const {
3094     return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue";
3095   }
3096 
3097   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3098                                llvm::Value *Address) const {
3099     llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
3100 
3101     // 0-15 are the 16 integer registers.
3102     AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15);
3103     return false;
3104   }
3105 
3106   unsigned getSizeOfUnwindException() const {
3107     if (getABIInfo().isEABI()) return 88;
3108     return TargetCodeGenInfo::getSizeOfUnwindException();
3109   }
3110 };
3111 
3112 }
3113 
3114 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
3115   // To correctly handle Homogeneous Aggregate, we need to keep track of the
3116   // VFP registers allocated so far.
3117   // C.1.vfp If the argument is a VFP CPRC and there are sufficient consecutive
3118   // VFP registers of the appropriate type unallocated then the argument is
3119   // allocated to the lowest-numbered sequence of such registers.
3120   // C.2.vfp If the argument is a VFP CPRC then any VFP registers that are
3121   // unallocated are marked as unavailable.
3122   unsigned AllocatedVFP = 0;
3123   int VFPRegs[16] = { 0 };
3124   FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
3125   for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
3126        it != ie; ++it) {
3127     unsigned PreAllocation = AllocatedVFP;
3128     bool IsHA = false;
3129     // 6.1.2.3 There is one VFP co-processor register class using registers
3130     // s0-s15 (d0-d7) for passing arguments.
3131     const unsigned NumVFPs = 16;
3132     it->info = classifyArgumentType(it->type, VFPRegs, AllocatedVFP, IsHA);
3133     // If we do not have enough VFP registers for the HA, any VFP registers
3134     // that are unallocated are marked as unavailable. To achieve this, we add
3135     // padding of (NumVFPs - PreAllocation) floats.
3136     if (IsHA && AllocatedVFP > NumVFPs && PreAllocation < NumVFPs) {
3137       llvm::Type *PaddingTy = llvm::ArrayType::get(
3138           llvm::Type::getFloatTy(getVMContext()), NumVFPs - PreAllocation);
3139       it->info = ABIArgInfo::getExpandWithPadding(false, PaddingTy);
3140     }
3141   }
3142 
3143   // Always honor user-specified calling convention.
3144   if (FI.getCallingConvention() != llvm::CallingConv::C)
3145     return;
3146 
3147   llvm::CallingConv::ID cc = getRuntimeCC();
3148   if (cc != llvm::CallingConv::C)
3149     FI.setEffectiveCallingConvention(cc);
3150 }
3151 
3152 /// Return the default calling convention that LLVM will use.
3153 llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const {
3154   // The default calling convention that LLVM will infer.
3155   if (getTarget().getTriple().getEnvironmentName()=="gnueabihf")
3156     return llvm::CallingConv::ARM_AAPCS_VFP;
3157   else if (isEABI())
3158     return llvm::CallingConv::ARM_AAPCS;
3159   else
3160     return llvm::CallingConv::ARM_APCS;
3161 }
3162 
3163 /// Return the calling convention that our ABI would like us to use
3164 /// as the C calling convention.
3165 llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const {
3166   switch (getABIKind()) {
3167   case APCS: return llvm::CallingConv::ARM_APCS;
3168   case AAPCS: return llvm::CallingConv::ARM_AAPCS;
3169   case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
3170   }
3171   llvm_unreachable("bad ABI kind");
3172 }
3173 
3174 void ARMABIInfo::setRuntimeCC() {
3175   assert(getRuntimeCC() == llvm::CallingConv::C);
3176 
3177   // Don't muddy up the IR with a ton of explicit annotations if
3178   // they'd just match what LLVM will infer from the triple.
3179   llvm::CallingConv::ID abiCC = getABIDefaultCC();
3180   if (abiCC != getLLVMDefaultCC())
3181     RuntimeCC = abiCC;
3182 }
3183 
3184 /// isHomogeneousAggregate - Return true if a type is an AAPCS-VFP homogeneous
3185 /// aggregate.  If HAMembers is non-null, the number of base elements
3186 /// contained in the type is returned through it; this is used for the
3187 /// recursive calls that check aggregate component types.
3188 static bool isHomogeneousAggregate(QualType Ty, const Type *&Base,
3189                                    ASTContext &Context,
3190                                    uint64_t *HAMembers = 0) {
3191   uint64_t Members = 0;
3192   if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
3193     if (!isHomogeneousAggregate(AT->getElementType(), Base, Context, &Members))
3194       return false;
3195     Members *= AT->getSize().getZExtValue();
3196   } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
3197     const RecordDecl *RD = RT->getDecl();
3198     if (RD->hasFlexibleArrayMember())
3199       return false;
3200 
3201     Members = 0;
3202     for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
3203          i != e; ++i) {
3204       const FieldDecl *FD = *i;
3205       uint64_t FldMembers;
3206       if (!isHomogeneousAggregate(FD->getType(), Base, Context, &FldMembers))
3207         return false;
3208 
3209       Members = (RD->isUnion() ?
3210                  std::max(Members, FldMembers) : Members + FldMembers);
3211     }
3212   } else {
3213     Members = 1;
3214     if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
3215       Members = 2;
3216       Ty = CT->getElementType();
3217     }
3218 
3219     // Homogeneous aggregates for AAPCS-VFP must have base types of float,
3220     // double, or 64-bit or 128-bit vectors.
3221     if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
3222       if (BT->getKind() != BuiltinType::Float &&
3223           BT->getKind() != BuiltinType::Double &&
3224           BT->getKind() != BuiltinType::LongDouble)
3225         return false;
3226     } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
3227       unsigned VecSize = Context.getTypeSize(VT);
3228       if (VecSize != 64 && VecSize != 128)
3229         return false;
3230     } else {
3231       return false;
3232     }
3233 
3234     // The base type must be the same for all members.  Vector types of the
3235     // same total size are treated as being equivalent here.
3236     const Type *TyPtr = Ty.getTypePtr();
3237     if (!Base)
3238       Base = TyPtr;
3239     if (Base != TyPtr &&
3240         (!Base->isVectorType() || !TyPtr->isVectorType() ||
3241          Context.getTypeSize(Base) != Context.getTypeSize(TyPtr)))
3242       return false;
3243   }
3244 
3245   // Homogeneous Aggregates can have at most 4 members of the base type.
3246   if (HAMembers)
3247     *HAMembers = Members;
3248 
3249   return (Members > 0 && Members <= 4);
3250 }
3251 
3252 /// markAllocatedVFPs - update VFPRegs according to the alignment and
3253 /// number of VFP registers (unit is S register) requested.
3254 static void markAllocatedVFPs(int *VFPRegs, unsigned &AllocatedVFP,
3255                               unsigned Alignment,
3256                               unsigned NumRequired) {
3257   // Early Exit.
3258   if (AllocatedVFP >= 16)
3259     return;
3260   // C.1.vfp If the argument is a VFP CPRC and there are sufficient consecutive
3261   // VFP registers of the appropriate type unallocated then the argument is
3262   // allocated to the lowest-numbered sequence of such registers.
3263   for (unsigned I = 0; I < 16; I += Alignment) {
3264     bool FoundSlot = true;
3265     for (unsigned J = I, JEnd = I + NumRequired; J < JEnd; J++)
3266       if (J >= 16 || VFPRegs[J]) {
3267          FoundSlot = false;
3268          break;
3269       }
3270     if (FoundSlot) {
3271       for (unsigned J = I, JEnd = I + NumRequired; J < JEnd; J++)
3272         VFPRegs[J] = 1;
3273       AllocatedVFP += NumRequired;
3274       return;
3275     }
3276   }
3277   // C.2.vfp If the argument is a VFP CPRC then any VFP registers that are
3278   // unallocated are marked as unavailable.
3279   for (unsigned I = 0; I < 16; I++)
3280     VFPRegs[I] = 1;
3281   AllocatedVFP = 17; // We do not have enough VFP registers.
3282 }
3283 
3284 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, int *VFPRegs,
3285                                             unsigned &AllocatedVFP,
3286                                             bool &IsHA) const {
3287   // We update number of allocated VFPs according to
3288   // 6.1.2.1 The following argument types are VFP CPRCs:
3289   //   A single-precision floating-point type (including promoted
3290   //   half-precision types); A double-precision floating-point type;
3291   //   A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate
3292   //   with a Base Type of a single- or double-precision floating-point type,
3293   //   64-bit containerized vectors or 128-bit containerized vectors with one
3294   //   to four Elements.
3295 
3296   // Handle illegal vector types here.
3297   if (isIllegalVectorType(Ty)) {
3298     uint64_t Size = getContext().getTypeSize(Ty);
3299     if (Size <= 32) {
3300       llvm::Type *ResType =
3301           llvm::Type::getInt32Ty(getVMContext());
3302       return ABIArgInfo::getDirect(ResType);
3303     }
3304     if (Size == 64) {
3305       llvm::Type *ResType = llvm::VectorType::get(
3306           llvm::Type::getInt32Ty(getVMContext()), 2);
3307       markAllocatedVFPs(VFPRegs, AllocatedVFP, 2, 2);
3308       return ABIArgInfo::getDirect(ResType);
3309     }
3310     if (Size == 128) {
3311       llvm::Type *ResType = llvm::VectorType::get(
3312           llvm::Type::getInt32Ty(getVMContext()), 4);
3313       markAllocatedVFPs(VFPRegs, AllocatedVFP, 4, 4);
3314       return ABIArgInfo::getDirect(ResType);
3315     }
3316     return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
3317   }
3318   // Update VFPRegs for legal vector types.
3319   if (const VectorType *VT = Ty->getAs<VectorType>()) {
3320     uint64_t Size = getContext().getTypeSize(VT);
3321     // Size of a legal vector should be power of 2 and above 64.
3322     markAllocatedVFPs(VFPRegs, AllocatedVFP, Size >= 128 ? 4 : 2, Size / 32);
3323   }
3324   // Update VFPRegs for floating point types.
3325   if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
3326     if (BT->getKind() == BuiltinType::Half ||
3327         BT->getKind() == BuiltinType::Float)
3328       markAllocatedVFPs(VFPRegs, AllocatedVFP, 1, 1);
3329     if (BT->getKind() == BuiltinType::Double ||
3330         BT->getKind() == BuiltinType::LongDouble)
3331       markAllocatedVFPs(VFPRegs, AllocatedVFP, 2, 2);
3332   }
3333 
3334   if (!isAggregateTypeForABI(Ty)) {
3335     // Treat an enum type as its underlying type.
3336     if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3337       Ty = EnumTy->getDecl()->getIntegerType();
3338 
3339     return (Ty->isPromotableIntegerType() ?
3340             ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
3341   }
3342 
3343   if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, CGT))
3344     return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
3345 
3346   // Ignore empty records.
3347   if (isEmptyRecord(getContext(), Ty, true))
3348     return ABIArgInfo::getIgnore();
3349 
3350   if (getABIKind() == ARMABIInfo::AAPCS_VFP) {
3351     // Homogeneous Aggregates need to be expanded when we can fit the aggregate
3352     // into VFP registers.
3353     const Type *Base = 0;
3354     uint64_t Members = 0;
3355     if (isHomogeneousAggregate(Ty, Base, getContext(), &Members)) {
3356       assert(Base && "Base class should be set for homogeneous aggregate");
3357       // Base can be a floating-point or a vector.
3358       if (Base->isVectorType()) {
3359         // ElementSize is in number of floats.
3360         unsigned ElementSize = getContext().getTypeSize(Base) == 64 ? 2 : 4;
3361         markAllocatedVFPs(VFPRegs, AllocatedVFP, ElementSize,
3362                           Members * ElementSize);
3363       } else if (Base->isSpecificBuiltinType(BuiltinType::Float))
3364         markAllocatedVFPs(VFPRegs, AllocatedVFP, 1, Members);
3365       else {
3366         assert(Base->isSpecificBuiltinType(BuiltinType::Double) ||
3367                Base->isSpecificBuiltinType(BuiltinType::LongDouble));
3368         markAllocatedVFPs(VFPRegs, AllocatedVFP, 2, Members * 2);
3369       }
3370       IsHA = true;
3371       return ABIArgInfo::getExpand();
3372     }
3373   }
3374 
3375   // Support byval for ARM.
3376   // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at
3377   // most 8-byte. We realign the indirect argument if type alignment is bigger
3378   // than ABI alignment.
3379   uint64_t ABIAlign = 4;
3380   uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8;
3381   if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
3382       getABIKind() == ARMABIInfo::AAPCS)
3383     ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
3384   if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) {
3385     return ABIArgInfo::getIndirect(0, /*ByVal=*/true,
3386            /*Realign=*/TyAlign > ABIAlign);
3387   }
3388 
3389   // Otherwise, pass by coercing to a structure of the appropriate size.
3390   llvm::Type* ElemTy;
3391   unsigned SizeRegs;
3392   // FIXME: Try to match the types of the arguments more accurately where
3393   // we can.
3394   if (getContext().getTypeAlign(Ty) <= 32) {
3395     ElemTy = llvm::Type::getInt32Ty(getVMContext());
3396     SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
3397   } else {
3398     ElemTy = llvm::Type::getInt64Ty(getVMContext());
3399     SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
3400   }
3401 
3402   llvm::Type *STy =
3403     llvm::StructType::get(llvm::ArrayType::get(ElemTy, SizeRegs), NULL);
3404   return ABIArgInfo::getDirect(STy);
3405 }
3406 
3407 static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
3408                               llvm::LLVMContext &VMContext) {
3409   // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
3410   // is called integer-like if its size is less than or equal to one word, and
3411   // the offset of each of its addressable sub-fields is zero.
3412 
3413   uint64_t Size = Context.getTypeSize(Ty);
3414 
3415   // Check that the type fits in a word.
3416   if (Size > 32)
3417     return false;
3418 
3419   // FIXME: Handle vector types!
3420   if (Ty->isVectorType())
3421     return false;
3422 
3423   // Float types are never treated as "integer like".
3424   if (Ty->isRealFloatingType())
3425     return false;
3426 
3427   // If this is a builtin or pointer type then it is ok.
3428   if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
3429     return true;
3430 
3431   // Small complex integer types are "integer like".
3432   if (const ComplexType *CT = Ty->getAs<ComplexType>())
3433     return isIntegerLikeType(CT->getElementType(), Context, VMContext);
3434 
3435   // Single element and zero sized arrays should be allowed, by the definition
3436   // above, but they are not.
3437 
3438   // Otherwise, it must be a record type.
3439   const RecordType *RT = Ty->getAs<RecordType>();
3440   if (!RT) return false;
3441 
3442   // Ignore records with flexible arrays.
3443   const RecordDecl *RD = RT->getDecl();
3444   if (RD->hasFlexibleArrayMember())
3445     return false;
3446 
3447   // Check that all sub-fields are at offset 0, and are themselves "integer
3448   // like".
3449   const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
3450 
3451   bool HadField = false;
3452   unsigned idx = 0;
3453   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
3454        i != e; ++i, ++idx) {
3455     const FieldDecl *FD = *i;
3456 
3457     // Bit-fields are not addressable, we only need to verify they are "integer
3458     // like". We still have to disallow a subsequent non-bitfield, for example:
3459     //   struct { int : 0; int x }
3460     // is non-integer like according to gcc.
3461     if (FD->isBitField()) {
3462       if (!RD->isUnion())
3463         HadField = true;
3464 
3465       if (!isIntegerLikeType(FD->getType(), Context, VMContext))
3466         return false;
3467 
3468       continue;
3469     }
3470 
3471     // Check if this field is at offset 0.
3472     if (Layout.getFieldOffset(idx) != 0)
3473       return false;
3474 
3475     if (!isIntegerLikeType(FD->getType(), Context, VMContext))
3476       return false;
3477 
3478     // Only allow at most one field in a structure. This doesn't match the
3479     // wording above, but follows gcc in situations with a field following an
3480     // empty structure.
3481     if (!RD->isUnion()) {
3482       if (HadField)
3483         return false;
3484 
3485       HadField = true;
3486     }
3487   }
3488 
3489   return true;
3490 }
3491 
3492 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy) const {
3493   if (RetTy->isVoidType())
3494     return ABIArgInfo::getIgnore();
3495 
3496   // Large vector types should be returned via memory.
3497   if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
3498     return ABIArgInfo::getIndirect(0);
3499 
3500   if (!isAggregateTypeForABI(RetTy)) {
3501     // Treat an enum type as its underlying type.
3502     if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
3503       RetTy = EnumTy->getDecl()->getIntegerType();
3504 
3505     return (RetTy->isPromotableIntegerType() ?
3506             ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
3507   }
3508 
3509   // Structures with either a non-trivial destructor or a non-trivial
3510   // copy constructor are always indirect.
3511   if (isRecordReturnIndirect(RetTy, CGT))
3512     return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
3513 
3514   // Are we following APCS?
3515   if (getABIKind() == APCS) {
3516     if (isEmptyRecord(getContext(), RetTy, false))
3517       return ABIArgInfo::getIgnore();
3518 
3519     // Complex types are all returned as packed integers.
3520     //
3521     // FIXME: Consider using 2 x vector types if the back end handles them
3522     // correctly.
3523     if (RetTy->isAnyComplexType())
3524       return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
3525                                               getContext().getTypeSize(RetTy)));
3526 
3527     // Integer like structures are returned in r0.
3528     if (isIntegerLikeType(RetTy, getContext(), getVMContext())) {
3529       // Return in the smallest viable integer type.
3530       uint64_t Size = getContext().getTypeSize(RetTy);
3531       if (Size <= 8)
3532         return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
3533       if (Size <= 16)
3534         return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
3535       return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
3536     }
3537 
3538     // Otherwise return in memory.
3539     return ABIArgInfo::getIndirect(0);
3540   }
3541 
3542   // Otherwise this is an AAPCS variant.
3543 
3544   if (isEmptyRecord(getContext(), RetTy, true))
3545     return ABIArgInfo::getIgnore();
3546 
3547   // Check for homogeneous aggregates with AAPCS-VFP.
3548   if (getABIKind() == AAPCS_VFP) {
3549     const Type *Base = 0;
3550     if (isHomogeneousAggregate(RetTy, Base, getContext())) {
3551       assert(Base && "Base class should be set for homogeneous aggregate");
3552       // Homogeneous Aggregates are returned directly.
3553       return ABIArgInfo::getDirect();
3554     }
3555   }
3556 
3557   // Aggregates <= 4 bytes are returned in r0; other aggregates
3558   // are returned indirectly.
3559   uint64_t Size = getContext().getTypeSize(RetTy);
3560   if (Size <= 32) {
3561     // Return in the smallest viable integer type.
3562     if (Size <= 8)
3563       return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
3564     if (Size <= 16)
3565       return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
3566     return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
3567   }
3568 
3569   return ABIArgInfo::getIndirect(0);
3570 }
3571 
3572 /// isIllegalVector - check whether Ty is an illegal vector type.
3573 bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
3574   if (const VectorType *VT = Ty->getAs<VectorType>()) {
3575     // Check whether VT is legal.
3576     unsigned NumElements = VT->getNumElements();
3577     uint64_t Size = getContext().getTypeSize(VT);
3578     // NumElements should be power of 2.
3579     if ((NumElements & (NumElements - 1)) != 0)
3580       return true;
3581     // Size should be greater than 32 bits.
3582     return Size <= 32;
3583   }
3584   return false;
3585 }
3586 
3587 llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3588                                    CodeGenFunction &CGF) const {
3589   llvm::Type *BP = CGF.Int8PtrTy;
3590   llvm::Type *BPP = CGF.Int8PtrPtrTy;
3591 
3592   CGBuilderTy &Builder = CGF.Builder;
3593   llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
3594   llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
3595 
3596   if (isEmptyRecord(getContext(), Ty, true)) {
3597     // These are ignored for parameter passing purposes.
3598     llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
3599     return Builder.CreateBitCast(Addr, PTy);
3600   }
3601 
3602   uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8;
3603   uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8;
3604   bool IsIndirect = false;
3605 
3606   // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for
3607   // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte.
3608   if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
3609       getABIKind() == ARMABIInfo::AAPCS)
3610     TyAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
3611   else
3612     TyAlign = 4;
3613   // Use indirect if size of the illegal vector is bigger than 16 bytes.
3614   if (isIllegalVectorType(Ty) && Size > 16) {
3615     IsIndirect = true;
3616     Size = 4;
3617     TyAlign = 4;
3618   }
3619 
3620   // Handle address alignment for ABI alignment > 4 bytes.
3621   if (TyAlign > 4) {
3622     assert((TyAlign & (TyAlign - 1)) == 0 &&
3623            "Alignment is not power of 2!");
3624     llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty);
3625     AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1));
3626     AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1)));
3627     Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align");
3628   }
3629 
3630   uint64_t Offset =
3631     llvm::RoundUpToAlignment(Size, 4);
3632   llvm::Value *NextAddr =
3633     Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
3634                       "ap.next");
3635   Builder.CreateStore(NextAddr, VAListAddrAsBPP);
3636 
3637   if (IsIndirect)
3638     Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP));
3639   else if (TyAlign < CGF.getContext().getTypeAlign(Ty) / 8) {
3640     // We can't directly cast ap.cur to pointer to a vector type, since ap.cur
3641     // may not be correctly aligned for the vector type. We create an aligned
3642     // temporary space and copy the content over from ap.cur to the temporary
3643     // space. This is necessary if the natural alignment of the type is greater
3644     // than the ABI alignment.
3645     llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
3646     CharUnits CharSize = getContext().getTypeSizeInChars(Ty);
3647     llvm::Value *AlignedTemp = CGF.CreateTempAlloca(CGF.ConvertType(Ty),
3648                                                     "var.align");
3649     llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
3650     llvm::Value *Src = Builder.CreateBitCast(Addr, I8PtrTy);
3651     Builder.CreateMemCpy(Dst, Src,
3652         llvm::ConstantInt::get(CGF.IntPtrTy, CharSize.getQuantity()),
3653         TyAlign, false);
3654     Addr = AlignedTemp; //The content is in aligned location.
3655   }
3656   llvm::Type *PTy =
3657     llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
3658   llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
3659 
3660   return AddrTyped;
3661 }
3662 
3663 namespace {
3664 
3665 class NaClARMABIInfo : public ABIInfo {
3666  public:
3667   NaClARMABIInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind)
3668       : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, Kind) {}
3669   virtual void computeInfo(CGFunctionInfo &FI) const;
3670   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3671                                  CodeGenFunction &CGF) const;
3672  private:
3673   PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv.
3674   ARMABIInfo NInfo; // Used for everything else.
3675 };
3676 
3677 class NaClARMTargetCodeGenInfo : public TargetCodeGenInfo  {
3678  public:
3679   NaClARMTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind)
3680       : TargetCodeGenInfo(new NaClARMABIInfo(CGT, Kind)) {}
3681 };
3682 
3683 }
3684 
3685 void NaClARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
3686   if (FI.getASTCallingConvention() == CC_PnaclCall)
3687     PInfo.computeInfo(FI);
3688   else
3689     static_cast<const ABIInfo&>(NInfo).computeInfo(FI);
3690 }
3691 
3692 llvm::Value *NaClARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3693                                        CodeGenFunction &CGF) const {
3694   // Always use the native convention; calling pnacl-style varargs functions
3695   // is unsupported.
3696   return static_cast<const ABIInfo&>(NInfo).EmitVAArg(VAListAddr, Ty, CGF);
3697 }
3698 
3699 //===----------------------------------------------------------------------===//
3700 // AArch64 ABI Implementation
3701 //===----------------------------------------------------------------------===//
3702 
3703 namespace {
3704 
3705 class AArch64ABIInfo : public ABIInfo {
3706 public:
3707   AArch64ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
3708 
3709 private:
3710   // The AArch64 PCS is explicit about return types and argument types being
3711   // handled identically, so we don't need to draw a distinction between
3712   // Argument and Return classification.
3713   ABIArgInfo classifyGenericType(QualType Ty, int &FreeIntRegs,
3714                                  int &FreeVFPRegs) const;
3715 
3716   ABIArgInfo tryUseRegs(QualType Ty, int &FreeRegs, int RegsNeeded, bool IsInt,
3717                         llvm::Type *DirectTy = 0) const;
3718 
3719   virtual void computeInfo(CGFunctionInfo &FI) const;
3720 
3721   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3722                                  CodeGenFunction &CGF) const;
3723 };
3724 
3725 class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
3726 public:
3727   AArch64TargetCodeGenInfo(CodeGenTypes &CGT)
3728     :TargetCodeGenInfo(new AArch64ABIInfo(CGT)) {}
3729 
3730   const AArch64ABIInfo &getABIInfo() const {
3731     return static_cast<const AArch64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
3732   }
3733 
3734   int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
3735     return 31;
3736   }
3737 
3738   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3739                                llvm::Value *Address) const {
3740     // 0-31 are x0-x30 and sp: 8 bytes each
3741     llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
3742     AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 31);
3743 
3744     // 64-95 are v0-v31: 16 bytes each
3745     llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
3746     AssignToArrayRange(CGF.Builder, Address, Sixteen8, 64, 95);
3747 
3748     return false;
3749   }
3750 
3751 };
3752 
3753 }
3754 
3755 void AArch64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
3756   int FreeIntRegs = 8, FreeVFPRegs = 8;
3757 
3758   FI.getReturnInfo() = classifyGenericType(FI.getReturnType(),
3759                                            FreeIntRegs, FreeVFPRegs);
3760 
3761   FreeIntRegs = FreeVFPRegs = 8;
3762   for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
3763        it != ie; ++it) {
3764     it->info = classifyGenericType(it->type, FreeIntRegs, FreeVFPRegs);
3765 
3766   }
3767 }
3768 
3769 ABIArgInfo
3770 AArch64ABIInfo::tryUseRegs(QualType Ty, int &FreeRegs, int RegsNeeded,
3771                            bool IsInt, llvm::Type *DirectTy) const {
3772   if (FreeRegs >= RegsNeeded) {
3773     FreeRegs -= RegsNeeded;
3774     return ABIArgInfo::getDirect(DirectTy);
3775   }
3776 
3777   llvm::Type *Padding = 0;
3778 
3779   // We need padding so that later arguments don't get filled in anyway. That
3780   // wouldn't happen if only ByVal arguments followed in the same category, but
3781   // a large structure will simply seem to be a pointer as far as LLVM is
3782   // concerned.
3783   if (FreeRegs > 0) {
3784     if (IsInt)
3785       Padding = llvm::Type::getInt64Ty(getVMContext());
3786     else
3787       Padding = llvm::Type::getFloatTy(getVMContext());
3788 
3789     // Either [N x i64] or [N x float].
3790     Padding = llvm::ArrayType::get(Padding, FreeRegs);
3791     FreeRegs = 0;
3792   }
3793 
3794   return ABIArgInfo::getIndirect(getContext().getTypeAlign(Ty) / 8,
3795                                  /*IsByVal=*/ true, /*Realign=*/ false,
3796                                  Padding);
3797 }
3798 
3799 
3800 ABIArgInfo AArch64ABIInfo::classifyGenericType(QualType Ty,
3801                                                int &FreeIntRegs,
3802                                                int &FreeVFPRegs) const {
3803   // Can only occurs for return, but harmless otherwise.
3804   if (Ty->isVoidType())
3805     return ABIArgInfo::getIgnore();
3806 
3807   // Large vector types should be returned via memory. There's no such concept
3808   // in the ABI, but they'd be over 16 bytes anyway so no matter how they're
3809   // classified they'd go into memory (see B.3).
3810   if (Ty->isVectorType() && getContext().getTypeSize(Ty) > 128) {
3811     if (FreeIntRegs > 0)
3812       --FreeIntRegs;
3813     return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
3814   }
3815 
3816   // All non-aggregate LLVM types have a concrete ABI representation so they can
3817   // be passed directly. After this block we're guaranteed to be in a
3818   // complicated case.
3819   if (!isAggregateTypeForABI(Ty)) {
3820     // Treat an enum type as its underlying type.
3821     if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3822       Ty = EnumTy->getDecl()->getIntegerType();
3823 
3824     if (Ty->isFloatingType() || Ty->isVectorType())
3825       return tryUseRegs(Ty, FreeVFPRegs, /*RegsNeeded=*/ 1, /*IsInt=*/ false);
3826 
3827     assert(getContext().getTypeSize(Ty) <= 128 &&
3828            "unexpectedly large scalar type");
3829 
3830     int RegsNeeded = getContext().getTypeSize(Ty) > 64 ? 2 : 1;
3831 
3832     // If the type may need padding registers to ensure "alignment", we must be
3833     // careful when this is accounted for. Increasing the effective size covers
3834     // all cases.
3835     if (getContext().getTypeAlign(Ty) == 128)
3836       RegsNeeded += FreeIntRegs % 2 != 0;
3837 
3838     return tryUseRegs(Ty, FreeIntRegs, RegsNeeded, /*IsInt=*/ true);
3839   }
3840 
3841   if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, CGT)) {
3842     if (FreeIntRegs > 0 && RAA == CGCXXABI::RAA_Indirect)
3843       --FreeIntRegs;
3844     return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
3845   }
3846 
3847   if (isEmptyRecord(getContext(), Ty, true)) {
3848     if (!getContext().getLangOpts().CPlusPlus) {
3849       // Empty structs outside C++ mode are a GNU extension, so no ABI can
3850       // possibly tell us what to do. It turns out (I believe) that GCC ignores
3851       // the object for parameter-passsing purposes.
3852       return ABIArgInfo::getIgnore();
3853     }
3854 
3855     // The combination of C++98 9p5 (sizeof(struct) != 0) and the pseudocode
3856     // description of va_arg in the PCS require that an empty struct does
3857     // actually occupy space for parameter-passing. I'm hoping for a
3858     // clarification giving an explicit paragraph to point to in future.
3859     return tryUseRegs(Ty, FreeIntRegs, /*RegsNeeded=*/ 1, /*IsInt=*/ true,
3860                       llvm::Type::getInt8Ty(getVMContext()));
3861   }
3862 
3863   // Homogeneous vector aggregates get passed in registers or on the stack.
3864   const Type *Base = 0;
3865   uint64_t NumMembers = 0;
3866   if (isHomogeneousAggregate(Ty, Base, getContext(), &NumMembers)) {
3867     assert(Base && "Base class should be set for homogeneous aggregate");
3868     // Homogeneous aggregates are passed and returned directly.
3869     return tryUseRegs(Ty, FreeVFPRegs, /*RegsNeeded=*/ NumMembers,
3870                       /*IsInt=*/ false);
3871   }
3872 
3873   uint64_t Size = getContext().getTypeSize(Ty);
3874   if (Size <= 128) {
3875     // Small structs can use the same direct type whether they're in registers
3876     // or on the stack.
3877     llvm::Type *BaseTy;
3878     unsigned NumBases;
3879     int SizeInRegs = (Size + 63) / 64;
3880 
3881     if (getContext().getTypeAlign(Ty) == 128) {
3882       BaseTy = llvm::Type::getIntNTy(getVMContext(), 128);
3883       NumBases = 1;
3884 
3885       // If the type may need padding registers to ensure "alignment", we must
3886       // be careful when this is accounted for. Increasing the effective size
3887       // covers all cases.
3888       SizeInRegs += FreeIntRegs % 2 != 0;
3889     } else {
3890       BaseTy = llvm::Type::getInt64Ty(getVMContext());
3891       NumBases = SizeInRegs;
3892     }
3893     llvm::Type *DirectTy = llvm::ArrayType::get(BaseTy, NumBases);
3894 
3895     return tryUseRegs(Ty, FreeIntRegs, /*RegsNeeded=*/ SizeInRegs,
3896                       /*IsInt=*/ true, DirectTy);
3897   }
3898 
3899   // If the aggregate is > 16 bytes, it's passed and returned indirectly. In
3900   // LLVM terms the return uses an "sret" pointer, but that's handled elsewhere.
3901   --FreeIntRegs;
3902   return ABIArgInfo::getIndirect(0, /* byVal = */ false);
3903 }
3904 
3905 llvm::Value *AArch64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3906                                        CodeGenFunction &CGF) const {
3907   // The AArch64 va_list type and handling is specified in the Procedure Call
3908   // Standard, section B.4:
3909   //
3910   // struct {
3911   //   void *__stack;
3912   //   void *__gr_top;
3913   //   void *__vr_top;
3914   //   int __gr_offs;
3915   //   int __vr_offs;
3916   // };
3917 
3918   assert(!CGF.CGM.getDataLayout().isBigEndian()
3919          && "va_arg not implemented for big-endian AArch64");
3920 
3921   int FreeIntRegs = 8, FreeVFPRegs = 8;
3922   Ty = CGF.getContext().getCanonicalType(Ty);
3923   ABIArgInfo AI = classifyGenericType(Ty, FreeIntRegs, FreeVFPRegs);
3924 
3925   llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
3926   llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
3927   llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
3928   llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
3929 
3930   llvm::Value *reg_offs_p = 0, *reg_offs = 0;
3931   int reg_top_index;
3932   int RegSize;
3933   if (FreeIntRegs < 8) {
3934     assert(FreeVFPRegs == 8 && "Arguments never split between int & VFP regs");
3935     // 3 is the field number of __gr_offs
3936     reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p");
3937     reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
3938     reg_top_index = 1; // field number for __gr_top
3939     RegSize = 8 * (8 - FreeIntRegs);
3940   } else {
3941     assert(FreeVFPRegs < 8 && "Argument must go in VFP or int regs");
3942     // 4 is the field number of __vr_offs.
3943     reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p");
3944     reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
3945     reg_top_index = 2; // field number for __vr_top
3946     RegSize = 16 * (8 - FreeVFPRegs);
3947   }
3948 
3949   //=======================================
3950   // Find out where argument was passed
3951   //=======================================
3952 
3953   // If reg_offs >= 0 we're already using the stack for this type of
3954   // argument. We don't want to keep updating reg_offs (in case it overflows,
3955   // though anyone passing 2GB of arguments, each at most 16 bytes, deserves
3956   // whatever they get).
3957   llvm::Value *UsingStack = 0;
3958   UsingStack = CGF.Builder.CreateICmpSGE(reg_offs,
3959                                          llvm::ConstantInt::get(CGF.Int32Ty, 0));
3960 
3961   CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
3962 
3963   // Otherwise, at least some kind of argument could go in these registers, the
3964   // quesiton is whether this particular type is too big.
3965   CGF.EmitBlock(MaybeRegBlock);
3966 
3967   // Integer arguments may need to correct register alignment (for example a
3968   // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
3969   // align __gr_offs to calculate the potential address.
3970   if (FreeIntRegs < 8 && AI.isDirect() && getContext().getTypeAlign(Ty) > 64) {
3971     int Align = getContext().getTypeAlign(Ty) / 8;
3972 
3973     reg_offs = CGF.Builder.CreateAdd(reg_offs,
3974                                  llvm::ConstantInt::get(CGF.Int32Ty, Align - 1),
3975                                  "align_regoffs");
3976     reg_offs = CGF.Builder.CreateAnd(reg_offs,
3977                                     llvm::ConstantInt::get(CGF.Int32Ty, -Align),
3978                                     "aligned_regoffs");
3979   }
3980 
3981   // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list.
3982   llvm::Value *NewOffset = 0;
3983   NewOffset = CGF.Builder.CreateAdd(reg_offs,
3984                                     llvm::ConstantInt::get(CGF.Int32Ty, RegSize),
3985                                     "new_reg_offs");
3986   CGF.Builder.CreateStore(NewOffset, reg_offs_p);
3987 
3988   // Now we're in a position to decide whether this argument really was in
3989   // registers or not.
3990   llvm::Value *InRegs = 0;
3991   InRegs = CGF.Builder.CreateICmpSLE(NewOffset,
3992                                      llvm::ConstantInt::get(CGF.Int32Ty, 0),
3993                                      "inreg");
3994 
3995   CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
3996 
3997   //=======================================
3998   // Argument was in registers
3999   //=======================================
4000 
4001   // Now we emit the code for if the argument was originally passed in
4002   // registers. First start the appropriate block:
4003   CGF.EmitBlock(InRegBlock);
4004 
4005   llvm::Value *reg_top_p = 0, *reg_top = 0;
4006   reg_top_p = CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p");
4007   reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
4008   llvm::Value *BaseAddr = CGF.Builder.CreateGEP(reg_top, reg_offs);
4009   llvm::Value *RegAddr = 0;
4010   llvm::Type *MemTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
4011 
4012   if (!AI.isDirect()) {
4013     // If it's been passed indirectly (actually a struct), whatever we find from
4014     // stored registers or on the stack will actually be a struct **.
4015     MemTy = llvm::PointerType::getUnqual(MemTy);
4016   }
4017 
4018   const Type *Base = 0;
4019   uint64_t NumMembers;
4020   if (isHomogeneousAggregate(Ty, Base, getContext(), &NumMembers)
4021       && NumMembers > 1) {
4022     // Homogeneous aggregates passed in registers will have their elements split
4023     // and stored 16-bytes apart regardless of size (they're notionally in qN,
4024     // qN+1, ...). We reload and store into a temporary local variable
4025     // contiguously.
4026     assert(AI.isDirect() && "Homogeneous aggregates should be passed directly");
4027     llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
4028     llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
4029     llvm::Value *Tmp = CGF.CreateTempAlloca(HFATy);
4030 
4031     for (unsigned i = 0; i < NumMembers; ++i) {
4032       llvm::Value *BaseOffset = llvm::ConstantInt::get(CGF.Int32Ty, 16 * i);
4033       llvm::Value *LoadAddr = CGF.Builder.CreateGEP(BaseAddr, BaseOffset);
4034       LoadAddr = CGF.Builder.CreateBitCast(LoadAddr,
4035                                            llvm::PointerType::getUnqual(BaseTy));
4036       llvm::Value *StoreAddr = CGF.Builder.CreateStructGEP(Tmp, i);
4037 
4038       llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
4039       CGF.Builder.CreateStore(Elem, StoreAddr);
4040     }
4041 
4042     RegAddr = CGF.Builder.CreateBitCast(Tmp, MemTy);
4043   } else {
4044     // Otherwise the object is contiguous in memory
4045     RegAddr = CGF.Builder.CreateBitCast(BaseAddr, MemTy);
4046   }
4047 
4048   CGF.EmitBranch(ContBlock);
4049 
4050   //=======================================
4051   // Argument was on the stack
4052   //=======================================
4053   CGF.EmitBlock(OnStackBlock);
4054 
4055   llvm::Value *stack_p = 0, *OnStackAddr = 0;
4056   stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p");
4057   OnStackAddr = CGF.Builder.CreateLoad(stack_p, "stack");
4058 
4059   // Again, stack arguments may need realigmnent. In this case both integer and
4060   // floating-point ones might be affected.
4061   if (AI.isDirect() && getContext().getTypeAlign(Ty) > 64) {
4062     int Align = getContext().getTypeAlign(Ty) / 8;
4063 
4064     OnStackAddr = CGF.Builder.CreatePtrToInt(OnStackAddr, CGF.Int64Ty);
4065 
4066     OnStackAddr = CGF.Builder.CreateAdd(OnStackAddr,
4067                                  llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
4068                                  "align_stack");
4069     OnStackAddr = CGF.Builder.CreateAnd(OnStackAddr,
4070                                     llvm::ConstantInt::get(CGF.Int64Ty, -Align),
4071                                     "align_stack");
4072 
4073     OnStackAddr = CGF.Builder.CreateIntToPtr(OnStackAddr, CGF.Int8PtrTy);
4074   }
4075 
4076   uint64_t StackSize;
4077   if (AI.isDirect())
4078     StackSize = getContext().getTypeSize(Ty) / 8;
4079   else
4080     StackSize = 8;
4081 
4082   // All stack slots are 8 bytes
4083   StackSize = llvm::RoundUpToAlignment(StackSize, 8);
4084 
4085   llvm::Value *StackSizeC = llvm::ConstantInt::get(CGF.Int32Ty, StackSize);
4086   llvm::Value *NewStack = CGF.Builder.CreateGEP(OnStackAddr, StackSizeC,
4087                                                 "new_stack");
4088 
4089   // Write the new value of __stack for the next call to va_arg
4090   CGF.Builder.CreateStore(NewStack, stack_p);
4091 
4092   OnStackAddr = CGF.Builder.CreateBitCast(OnStackAddr, MemTy);
4093 
4094   CGF.EmitBranch(ContBlock);
4095 
4096   //=======================================
4097   // Tidy up
4098   //=======================================
4099   CGF.EmitBlock(ContBlock);
4100 
4101   llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(MemTy, 2, "vaarg.addr");
4102   ResAddr->addIncoming(RegAddr, InRegBlock);
4103   ResAddr->addIncoming(OnStackAddr, OnStackBlock);
4104 
4105   if (AI.isDirect())
4106     return ResAddr;
4107 
4108   return CGF.Builder.CreateLoad(ResAddr, "vaarg.addr");
4109 }
4110 
4111 //===----------------------------------------------------------------------===//
4112 // NVPTX ABI Implementation
4113 //===----------------------------------------------------------------------===//
4114 
4115 namespace {
4116 
4117 class NVPTXABIInfo : public ABIInfo {
4118 public:
4119   NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
4120 
4121   ABIArgInfo classifyReturnType(QualType RetTy) const;
4122   ABIArgInfo classifyArgumentType(QualType Ty) const;
4123 
4124   virtual void computeInfo(CGFunctionInfo &FI) const;
4125   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4126                                  CodeGenFunction &CFG) const;
4127 };
4128 
4129 class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo {
4130 public:
4131   NVPTXTargetCodeGenInfo(CodeGenTypes &CGT)
4132     : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {}
4133 
4134   virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
4135                                    CodeGen::CodeGenModule &M) const;
4136 private:
4137   static void addKernelMetadata(llvm::Function *F);
4138 };
4139 
4140 ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
4141   if (RetTy->isVoidType())
4142     return ABIArgInfo::getIgnore();
4143   if (isAggregateTypeForABI(RetTy))
4144     return ABIArgInfo::getIndirect(0);
4145   return ABIArgInfo::getDirect();
4146 }
4147 
4148 ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
4149   if (isAggregateTypeForABI(Ty))
4150     return ABIArgInfo::getIndirect(0);
4151 
4152   return ABIArgInfo::getDirect();
4153 }
4154 
4155 void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
4156   FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4157   for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
4158        it != ie; ++it)
4159     it->info = classifyArgumentType(it->type);
4160 
4161   // Always honor user-specified calling convention.
4162   if (FI.getCallingConvention() != llvm::CallingConv::C)
4163     return;
4164 
4165   FI.setEffectiveCallingConvention(getRuntimeCC());
4166 }
4167 
4168 llvm::Value *NVPTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4169                                      CodeGenFunction &CFG) const {
4170   llvm_unreachable("NVPTX does not support varargs");
4171 }
4172 
4173 void NVPTXTargetCodeGenInfo::
4174 SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
4175                     CodeGen::CodeGenModule &M) const{
4176   const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
4177   if (!FD) return;
4178 
4179   llvm::Function *F = cast<llvm::Function>(GV);
4180 
4181   // Perform special handling in OpenCL mode
4182   if (M.getLangOpts().OpenCL) {
4183     // Use OpenCL function attributes to check for kernel functions
4184     // By default, all functions are device functions
4185     if (FD->hasAttr<OpenCLKernelAttr>()) {
4186       // OpenCL __kernel functions get kernel metadata
4187       addKernelMetadata(F);
4188       // And kernel functions are not subject to inlining
4189       F->addFnAttr(llvm::Attribute::NoInline);
4190     }
4191   }
4192 
4193   // Perform special handling in CUDA mode.
4194   if (M.getLangOpts().CUDA) {
4195     // CUDA __global__ functions get a kernel metadata entry.  Since
4196     // __global__ functions cannot be called from the device, we do not
4197     // need to set the noinline attribute.
4198     if (FD->getAttr<CUDAGlobalAttr>())
4199       addKernelMetadata(F);
4200   }
4201 }
4202 
4203 void NVPTXTargetCodeGenInfo::addKernelMetadata(llvm::Function *F) {
4204   llvm::Module *M = F->getParent();
4205   llvm::LLVMContext &Ctx = M->getContext();
4206 
4207   // Get "nvvm.annotations" metadata node
4208   llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations");
4209 
4210   // Create !{<func-ref>, metadata !"kernel", i32 1} node
4211   llvm::SmallVector<llvm::Value *, 3> MDVals;
4212   MDVals.push_back(F);
4213   MDVals.push_back(llvm::MDString::get(Ctx, "kernel"));
4214   MDVals.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), 1));
4215 
4216   // Append metadata to nvvm.annotations
4217   MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
4218 }
4219 
4220 }
4221 
4222 //===----------------------------------------------------------------------===//
4223 // SystemZ ABI Implementation
4224 //===----------------------------------------------------------------------===//
4225 
4226 namespace {
4227 
4228 class SystemZABIInfo : public ABIInfo {
4229 public:
4230   SystemZABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
4231 
4232   bool isPromotableIntegerType(QualType Ty) const;
4233   bool isCompoundType(QualType Ty) const;
4234   bool isFPArgumentType(QualType Ty) const;
4235 
4236   ABIArgInfo classifyReturnType(QualType RetTy) const;
4237   ABIArgInfo classifyArgumentType(QualType ArgTy) const;
4238 
4239   virtual void computeInfo(CGFunctionInfo &FI) const {
4240     FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4241     for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
4242          it != ie; ++it)
4243       it->info = classifyArgumentType(it->type);
4244   }
4245 
4246   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4247                                  CodeGenFunction &CGF) const;
4248 };
4249 
4250 class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
4251 public:
4252   SystemZTargetCodeGenInfo(CodeGenTypes &CGT)
4253     : TargetCodeGenInfo(new SystemZABIInfo(CGT)) {}
4254 };
4255 
4256 }
4257 
4258 bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const {
4259   // Treat an enum type as its underlying type.
4260   if (const EnumType *EnumTy = Ty->getAs<EnumType>())
4261     Ty = EnumTy->getDecl()->getIntegerType();
4262 
4263   // Promotable integer types are required to be promoted by the ABI.
4264   if (Ty->isPromotableIntegerType())
4265     return true;
4266 
4267   // 32-bit values must also be promoted.
4268   if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
4269     switch (BT->getKind()) {
4270     case BuiltinType::Int:
4271     case BuiltinType::UInt:
4272       return true;
4273     default:
4274       return false;
4275     }
4276   return false;
4277 }
4278 
4279 bool SystemZABIInfo::isCompoundType(QualType Ty) const {
4280   return Ty->isAnyComplexType() || isAggregateTypeForABI(Ty);
4281 }
4282 
4283 bool SystemZABIInfo::isFPArgumentType(QualType Ty) const {
4284   if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
4285     switch (BT->getKind()) {
4286     case BuiltinType::Float:
4287     case BuiltinType::Double:
4288       return true;
4289     default:
4290       return false;
4291     }
4292 
4293   if (const RecordType *RT = Ty->getAsStructureType()) {
4294     const RecordDecl *RD = RT->getDecl();
4295     bool Found = false;
4296 
4297     // If this is a C++ record, check the bases first.
4298     if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
4299       for (CXXRecordDecl::base_class_const_iterator I = CXXRD->bases_begin(),
4300              E = CXXRD->bases_end(); I != E; ++I) {
4301         QualType Base = I->getType();
4302 
4303         // Empty bases don't affect things either way.
4304         if (isEmptyRecord(getContext(), Base, true))
4305           continue;
4306 
4307         if (Found)
4308           return false;
4309         Found = isFPArgumentType(Base);
4310         if (!Found)
4311           return false;
4312       }
4313 
4314     // Check the fields.
4315     for (RecordDecl::field_iterator I = RD->field_begin(),
4316            E = RD->field_end(); I != E; ++I) {
4317       const FieldDecl *FD = *I;
4318 
4319       // Empty bitfields don't affect things either way.
4320       // Unlike isSingleElementStruct(), empty structure and array fields
4321       // do count.  So do anonymous bitfields that aren't zero-sized.
4322       if (FD->isBitField() && FD->getBitWidthValue(getContext()) == 0)
4323         return true;
4324 
4325       // Unlike isSingleElementStruct(), arrays do not count.
4326       // Nested isFPArgumentType structures still do though.
4327       if (Found)
4328         return false;
4329       Found = isFPArgumentType(FD->getType());
4330       if (!Found)
4331         return false;
4332     }
4333 
4334     // Unlike isSingleElementStruct(), trailing padding is allowed.
4335     // An 8-byte aligned struct s { float f; } is passed as a double.
4336     return Found;
4337   }
4338 
4339   return false;
4340 }
4341 
4342 llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4343                                        CodeGenFunction &CGF) const {
4344   // Assume that va_list type is correct; should be pointer to LLVM type:
4345   // struct {
4346   //   i64 __gpr;
4347   //   i64 __fpr;
4348   //   i8 *__overflow_arg_area;
4349   //   i8 *__reg_save_area;
4350   // };
4351 
4352   // Every argument occupies 8 bytes and is passed by preference in either
4353   // GPRs or FPRs.
4354   Ty = CGF.getContext().getCanonicalType(Ty);
4355   ABIArgInfo AI = classifyArgumentType(Ty);
4356   bool InFPRs = isFPArgumentType(Ty);
4357 
4358   llvm::Type *APTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
4359   bool IsIndirect = AI.isIndirect();
4360   unsigned UnpaddedBitSize;
4361   if (IsIndirect) {
4362     APTy = llvm::PointerType::getUnqual(APTy);
4363     UnpaddedBitSize = 64;
4364   } else
4365     UnpaddedBitSize = getContext().getTypeSize(Ty);
4366   unsigned PaddedBitSize = 64;
4367   assert((UnpaddedBitSize <= PaddedBitSize) && "Invalid argument size.");
4368 
4369   unsigned PaddedSize = PaddedBitSize / 8;
4370   unsigned Padding = (PaddedBitSize - UnpaddedBitSize) / 8;
4371 
4372   unsigned MaxRegs, RegCountField, RegSaveIndex, RegPadding;
4373   if (InFPRs) {
4374     MaxRegs = 4; // Maximum of 4 FPR arguments
4375     RegCountField = 1; // __fpr
4376     RegSaveIndex = 16; // save offset for f0
4377     RegPadding = 0; // floats are passed in the high bits of an FPR
4378   } else {
4379     MaxRegs = 5; // Maximum of 5 GPR arguments
4380     RegCountField = 0; // __gpr
4381     RegSaveIndex = 2; // save offset for r2
4382     RegPadding = Padding; // values are passed in the low bits of a GPR
4383   }
4384 
4385   llvm::Value *RegCountPtr =
4386     CGF.Builder.CreateStructGEP(VAListAddr, RegCountField, "reg_count_ptr");
4387   llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count");
4388   llvm::Type *IndexTy = RegCount->getType();
4389   llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
4390   llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV,
4391 						  "fits_in_regs");
4392 
4393   llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
4394   llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
4395   llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
4396   CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
4397 
4398   // Emit code to load the value if it was passed in registers.
4399   CGF.EmitBlock(InRegBlock);
4400 
4401   // Work out the address of an argument register.
4402   llvm::Value *PaddedSizeV = llvm::ConstantInt::get(IndexTy, PaddedSize);
4403   llvm::Value *ScaledRegCount =
4404     CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count");
4405   llvm::Value *RegBase =
4406     llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize + RegPadding);
4407   llvm::Value *RegOffset =
4408     CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset");
4409   llvm::Value *RegSaveAreaPtr =
4410     CGF.Builder.CreateStructGEP(VAListAddr, 3, "reg_save_area_ptr");
4411   llvm::Value *RegSaveArea =
4412     CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area");
4413   llvm::Value *RawRegAddr =
4414     CGF.Builder.CreateGEP(RegSaveArea, RegOffset, "raw_reg_addr");
4415   llvm::Value *RegAddr =
4416     CGF.Builder.CreateBitCast(RawRegAddr, APTy, "reg_addr");
4417 
4418   // Update the register count
4419   llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
4420   llvm::Value *NewRegCount =
4421     CGF.Builder.CreateAdd(RegCount, One, "reg_count");
4422   CGF.Builder.CreateStore(NewRegCount, RegCountPtr);
4423   CGF.EmitBranch(ContBlock);
4424 
4425   // Emit code to load the value if it was passed in memory.
4426   CGF.EmitBlock(InMemBlock);
4427 
4428   // Work out the address of a stack argument.
4429   llvm::Value *OverflowArgAreaPtr =
4430     CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr");
4431   llvm::Value *OverflowArgArea =
4432     CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area");
4433   llvm::Value *PaddingV = llvm::ConstantInt::get(IndexTy, Padding);
4434   llvm::Value *RawMemAddr =
4435     CGF.Builder.CreateGEP(OverflowArgArea, PaddingV, "raw_mem_addr");
4436   llvm::Value *MemAddr =
4437     CGF.Builder.CreateBitCast(RawMemAddr, APTy, "mem_addr");
4438 
4439   // Update overflow_arg_area_ptr pointer
4440   llvm::Value *NewOverflowArgArea =
4441     CGF.Builder.CreateGEP(OverflowArgArea, PaddedSizeV, "overflow_arg_area");
4442   CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
4443   CGF.EmitBranch(ContBlock);
4444 
4445   // Return the appropriate result.
4446   CGF.EmitBlock(ContBlock);
4447   llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(APTy, 2, "va_arg.addr");
4448   ResAddr->addIncoming(RegAddr, InRegBlock);
4449   ResAddr->addIncoming(MemAddr, InMemBlock);
4450 
4451   if (IsIndirect)
4452     return CGF.Builder.CreateLoad(ResAddr, "indirect_arg");
4453 
4454   return ResAddr;
4455 }
4456 
4457 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
4458     const llvm::Triple &Triple, const CodeGenOptions &Opts) {
4459   assert(Triple.getArch() == llvm::Triple::x86);
4460 
4461   switch (Opts.getStructReturnConvention()) {
4462   case CodeGenOptions::SRCK_Default:
4463     break;
4464   case CodeGenOptions::SRCK_OnStack:  // -fpcc-struct-return
4465     return false;
4466   case CodeGenOptions::SRCK_InRegs:  // -freg-struct-return
4467     return true;
4468   }
4469 
4470   if (Triple.isOSDarwin())
4471     return true;
4472 
4473   switch (Triple.getOS()) {
4474   case llvm::Triple::Cygwin:
4475   case llvm::Triple::MinGW32:
4476   case llvm::Triple::AuroraUX:
4477   case llvm::Triple::DragonFly:
4478   case llvm::Triple::FreeBSD:
4479   case llvm::Triple::OpenBSD:
4480   case llvm::Triple::Bitrig:
4481   case llvm::Triple::Win32:
4482     return true;
4483   default:
4484     return false;
4485   }
4486 }
4487 
4488 ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
4489   if (RetTy->isVoidType())
4490     return ABIArgInfo::getIgnore();
4491   if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64)
4492     return ABIArgInfo::getIndirect(0);
4493   return (isPromotableIntegerType(RetTy) ?
4494           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
4495 }
4496 
4497 ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
4498   // Handle the generic C++ ABI.
4499   if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, CGT))
4500     return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
4501 
4502   // Integers and enums are extended to full register width.
4503   if (isPromotableIntegerType(Ty))
4504     return ABIArgInfo::getExtend();
4505 
4506   // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly.
4507   uint64_t Size = getContext().getTypeSize(Ty);
4508   if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
4509     return ABIArgInfo::getIndirect(0);
4510 
4511   // Handle small structures.
4512   if (const RecordType *RT = Ty->getAs<RecordType>()) {
4513     // Structures with flexible arrays have variable length, so really
4514     // fail the size test above.
4515     const RecordDecl *RD = RT->getDecl();
4516     if (RD->hasFlexibleArrayMember())
4517       return ABIArgInfo::getIndirect(0);
4518 
4519     // The structure is passed as an unextended integer, a float, or a double.
4520     llvm::Type *PassTy;
4521     if (isFPArgumentType(Ty)) {
4522       assert(Size == 32 || Size == 64);
4523       if (Size == 32)
4524         PassTy = llvm::Type::getFloatTy(getVMContext());
4525       else
4526         PassTy = llvm::Type::getDoubleTy(getVMContext());
4527     } else
4528       PassTy = llvm::IntegerType::get(getVMContext(), Size);
4529     return ABIArgInfo::getDirect(PassTy);
4530   }
4531 
4532   // Non-structure compounds are passed indirectly.
4533   if (isCompoundType(Ty))
4534     return ABIArgInfo::getIndirect(0);
4535 
4536   return ABIArgInfo::getDirect(0);
4537 }
4538 
4539 //===----------------------------------------------------------------------===//
4540 // MBlaze ABI Implementation
4541 //===----------------------------------------------------------------------===//
4542 
4543 namespace {
4544 
4545 class MBlazeABIInfo : public ABIInfo {
4546 public:
4547   MBlazeABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
4548 
4549   bool isPromotableIntegerType(QualType Ty) const;
4550 
4551   ABIArgInfo classifyReturnType(QualType RetTy) const;
4552   ABIArgInfo classifyArgumentType(QualType RetTy) const;
4553 
4554   virtual void computeInfo(CGFunctionInfo &FI) const {
4555     FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4556     for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
4557          it != ie; ++it)
4558       it->info = classifyArgumentType(it->type);
4559   }
4560 
4561   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4562                                  CodeGenFunction &CGF) const;
4563 };
4564 
4565 class MBlazeTargetCodeGenInfo : public TargetCodeGenInfo {
4566 public:
4567   MBlazeTargetCodeGenInfo(CodeGenTypes &CGT)
4568     : TargetCodeGenInfo(new MBlazeABIInfo(CGT)) {}
4569   void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
4570                            CodeGen::CodeGenModule &M) const;
4571 };
4572 
4573 }
4574 
4575 bool MBlazeABIInfo::isPromotableIntegerType(QualType Ty) const {
4576   // MBlaze ABI requires all 8 and 16 bit quantities to be extended.
4577   if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
4578     switch (BT->getKind()) {
4579     case BuiltinType::Bool:
4580     case BuiltinType::Char_S:
4581     case BuiltinType::Char_U:
4582     case BuiltinType::SChar:
4583     case BuiltinType::UChar:
4584     case BuiltinType::Short:
4585     case BuiltinType::UShort:
4586       return true;
4587     default:
4588       return false;
4589     }
4590   return false;
4591 }
4592 
4593 llvm::Value *MBlazeABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4594                                       CodeGenFunction &CGF) const {
4595   // FIXME: Implement
4596   return 0;
4597 }
4598 
4599 
4600 ABIArgInfo MBlazeABIInfo::classifyReturnType(QualType RetTy) const {
4601   if (RetTy->isVoidType())
4602     return ABIArgInfo::getIgnore();
4603   if (isAggregateTypeForABI(RetTy))
4604     return ABIArgInfo::getIndirect(0);
4605 
4606   return (isPromotableIntegerType(RetTy) ?
4607           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
4608 }
4609 
4610 ABIArgInfo MBlazeABIInfo::classifyArgumentType(QualType Ty) const {
4611   if (isAggregateTypeForABI(Ty))
4612     return ABIArgInfo::getIndirect(0);
4613 
4614   return (isPromotableIntegerType(Ty) ?
4615           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
4616 }
4617 
4618 void MBlazeTargetCodeGenInfo::SetTargetAttributes(const Decl *D,
4619                                                   llvm::GlobalValue *GV,
4620                                                   CodeGen::CodeGenModule &M)
4621                                                   const {
4622   const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
4623   if (!FD) return;
4624 
4625   llvm::CallingConv::ID CC = llvm::CallingConv::C;
4626   if (FD->hasAttr<MBlazeInterruptHandlerAttr>())
4627     CC = llvm::CallingConv::MBLAZE_INTR;
4628   else if (FD->hasAttr<MBlazeSaveVolatilesAttr>())
4629     CC = llvm::CallingConv::MBLAZE_SVOL;
4630 
4631   if (CC != llvm::CallingConv::C) {
4632       // Handle 'interrupt_handler' attribute:
4633       llvm::Function *F = cast<llvm::Function>(GV);
4634 
4635       // Step 1: Set ISR calling convention.
4636       F->setCallingConv(CC);
4637 
4638       // Step 2: Add attributes goodness.
4639       F->addFnAttr(llvm::Attribute::NoInline);
4640   }
4641 
4642   // Step 3: Emit _interrupt_handler alias.
4643   if (CC == llvm::CallingConv::MBLAZE_INTR)
4644     new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage,
4645                           "_interrupt_handler", GV, &M.getModule());
4646 }
4647 
4648 
4649 //===----------------------------------------------------------------------===//
4650 // MSP430 ABI Implementation
4651 //===----------------------------------------------------------------------===//
4652 
4653 namespace {
4654 
4655 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
4656 public:
4657   MSP430TargetCodeGenInfo(CodeGenTypes &CGT)
4658     : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
4659   void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
4660                            CodeGen::CodeGenModule &M) const;
4661 };
4662 
4663 }
4664 
4665 void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
4666                                                   llvm::GlobalValue *GV,
4667                                              CodeGen::CodeGenModule &M) const {
4668   if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
4669     if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) {
4670       // Handle 'interrupt' attribute:
4671       llvm::Function *F = cast<llvm::Function>(GV);
4672 
4673       // Step 1: Set ISR calling convention.
4674       F->setCallingConv(llvm::CallingConv::MSP430_INTR);
4675 
4676       // Step 2: Add attributes goodness.
4677       F->addFnAttr(llvm::Attribute::NoInline);
4678 
4679       // Step 3: Emit ISR vector alias.
4680       unsigned Num = attr->getNumber() / 2;
4681       new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage,
4682                             "__isr_" + Twine(Num),
4683                             GV, &M.getModule());
4684     }
4685   }
4686 }
4687 
4688 //===----------------------------------------------------------------------===//
4689 // MIPS ABI Implementation.  This works for both little-endian and
4690 // big-endian variants.
4691 //===----------------------------------------------------------------------===//
4692 
4693 namespace {
4694 class MipsABIInfo : public ABIInfo {
4695   bool IsO32;
4696   unsigned MinABIStackAlignInBytes, StackAlignInBytes;
4697   void CoerceToIntArgs(uint64_t TySize,
4698                        SmallVector<llvm::Type*, 8> &ArgList) const;
4699   llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const;
4700   llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const;
4701   llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const;
4702 public:
4703   MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) :
4704     ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
4705     StackAlignInBytes(IsO32 ? 8 : 16) {}
4706 
4707   ABIArgInfo classifyReturnType(QualType RetTy) const;
4708   ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const;
4709   virtual void computeInfo(CGFunctionInfo &FI) const;
4710   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4711                                  CodeGenFunction &CGF) const;
4712 };
4713 
4714 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
4715   unsigned SizeOfUnwindException;
4716 public:
4717   MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32)
4718     : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)),
4719       SizeOfUnwindException(IsO32 ? 24 : 32) {}
4720 
4721   int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
4722     return 29;
4723   }
4724 
4725   void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
4726                            CodeGen::CodeGenModule &CGM) const {
4727     const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
4728     if (!FD) return;
4729     llvm::Function *Fn = cast<llvm::Function>(GV);
4730     if (FD->hasAttr<Mips16Attr>()) {
4731       Fn->addFnAttr("mips16");
4732     }
4733     else if (FD->hasAttr<NoMips16Attr>()) {
4734       Fn->addFnAttr("nomips16");
4735     }
4736   }
4737 
4738   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4739                                llvm::Value *Address) const;
4740 
4741   unsigned getSizeOfUnwindException() const {
4742     return SizeOfUnwindException;
4743   }
4744 };
4745 }
4746 
4747 void MipsABIInfo::CoerceToIntArgs(uint64_t TySize,
4748                                   SmallVector<llvm::Type*, 8> &ArgList) const {
4749   llvm::IntegerType *IntTy =
4750     llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
4751 
4752   // Add (TySize / MinABIStackAlignInBytes) args of IntTy.
4753   for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
4754     ArgList.push_back(IntTy);
4755 
4756   // If necessary, add one more integer type to ArgList.
4757   unsigned R = TySize % (MinABIStackAlignInBytes * 8);
4758 
4759   if (R)
4760     ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
4761 }
4762 
4763 // In N32/64, an aligned double precision floating point field is passed in
4764 // a register.
4765 llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const {
4766   SmallVector<llvm::Type*, 8> ArgList, IntArgList;
4767 
4768   if (IsO32) {
4769     CoerceToIntArgs(TySize, ArgList);
4770     return llvm::StructType::get(getVMContext(), ArgList);
4771   }
4772 
4773   if (Ty->isComplexType())
4774     return CGT.ConvertType(Ty);
4775 
4776   const RecordType *RT = Ty->getAs<RecordType>();
4777 
4778   // Unions/vectors are passed in integer registers.
4779   if (!RT || !RT->isStructureOrClassType()) {
4780     CoerceToIntArgs(TySize, ArgList);
4781     return llvm::StructType::get(getVMContext(), ArgList);
4782   }
4783 
4784   const RecordDecl *RD = RT->getDecl();
4785   const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
4786   assert(!(TySize % 8) && "Size of structure must be multiple of 8.");
4787 
4788   uint64_t LastOffset = 0;
4789   unsigned idx = 0;
4790   llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64);
4791 
4792   // Iterate over fields in the struct/class and check if there are any aligned
4793   // double fields.
4794   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
4795        i != e; ++i, ++idx) {
4796     const QualType Ty = i->getType();
4797     const BuiltinType *BT = Ty->getAs<BuiltinType>();
4798 
4799     if (!BT || BT->getKind() != BuiltinType::Double)
4800       continue;
4801 
4802     uint64_t Offset = Layout.getFieldOffset(idx);
4803     if (Offset % 64) // Ignore doubles that are not aligned.
4804       continue;
4805 
4806     // Add ((Offset - LastOffset) / 64) args of type i64.
4807     for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
4808       ArgList.push_back(I64);
4809 
4810     // Add double type.
4811     ArgList.push_back(llvm::Type::getDoubleTy(getVMContext()));
4812     LastOffset = Offset + 64;
4813   }
4814 
4815   CoerceToIntArgs(TySize - LastOffset, IntArgList);
4816   ArgList.append(IntArgList.begin(), IntArgList.end());
4817 
4818   return llvm::StructType::get(getVMContext(), ArgList);
4819 }
4820 
4821 llvm::Type *MipsABIInfo::getPaddingType(uint64_t Align, uint64_t Offset) const {
4822   assert((Offset % MinABIStackAlignInBytes) == 0);
4823 
4824   if ((Align - 1) & Offset)
4825     return llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
4826 
4827   return 0;
4828 }
4829 
4830 ABIArgInfo
4831 MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
4832   uint64_t OrigOffset = Offset;
4833   uint64_t TySize = getContext().getTypeSize(Ty);
4834   uint64_t Align = getContext().getTypeAlign(Ty) / 8;
4835 
4836   Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes),
4837                    (uint64_t)StackAlignInBytes);
4838   Offset = llvm::RoundUpToAlignment(Offset, Align);
4839   Offset += llvm::RoundUpToAlignment(TySize, Align * 8) / 8;
4840 
4841   if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) {
4842     // Ignore empty aggregates.
4843     if (TySize == 0)
4844       return ABIArgInfo::getIgnore();
4845 
4846     if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, CGT)) {
4847       Offset = OrigOffset + MinABIStackAlignInBytes;
4848       return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
4849     }
4850 
4851     // If we have reached here, aggregates are passed directly by coercing to
4852     // another structure type. Padding is inserted if the offset of the
4853     // aggregate is unaligned.
4854     return ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0,
4855                                  getPaddingType(Align, OrigOffset));
4856   }
4857 
4858   // Treat an enum type as its underlying type.
4859   if (const EnumType *EnumTy = Ty->getAs<EnumType>())
4860     Ty = EnumTy->getDecl()->getIntegerType();
4861 
4862   if (Ty->isPromotableIntegerType())
4863     return ABIArgInfo::getExtend();
4864 
4865   return ABIArgInfo::getDirect(0, 0,
4866                                IsO32 ? 0 : getPaddingType(Align, OrigOffset));
4867 }
4868 
4869 llvm::Type*
4870 MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
4871   const RecordType *RT = RetTy->getAs<RecordType>();
4872   SmallVector<llvm::Type*, 8> RTList;
4873 
4874   if (RT && RT->isStructureOrClassType()) {
4875     const RecordDecl *RD = RT->getDecl();
4876     const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
4877     unsigned FieldCnt = Layout.getFieldCount();
4878 
4879     // N32/64 returns struct/classes in floating point registers if the
4880     // following conditions are met:
4881     // 1. The size of the struct/class is no larger than 128-bit.
4882     // 2. The struct/class has one or two fields all of which are floating
4883     //    point types.
4884     // 3. The offset of the first field is zero (this follows what gcc does).
4885     //
4886     // Any other composite results are returned in integer registers.
4887     //
4888     if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) {
4889       RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end();
4890       for (; b != e; ++b) {
4891         const BuiltinType *BT = b->getType()->getAs<BuiltinType>();
4892 
4893         if (!BT || !BT->isFloatingPoint())
4894           break;
4895 
4896         RTList.push_back(CGT.ConvertType(b->getType()));
4897       }
4898 
4899       if (b == e)
4900         return llvm::StructType::get(getVMContext(), RTList,
4901                                      RD->hasAttr<PackedAttr>());
4902 
4903       RTList.clear();
4904     }
4905   }
4906 
4907   CoerceToIntArgs(Size, RTList);
4908   return llvm::StructType::get(getVMContext(), RTList);
4909 }
4910 
4911 ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
4912   uint64_t Size = getContext().getTypeSize(RetTy);
4913 
4914   if (RetTy->isVoidType() || Size == 0)
4915     return ABIArgInfo::getIgnore();
4916 
4917   if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) {
4918     if (isRecordReturnIndirect(RetTy, CGT))
4919       return ABIArgInfo::getIndirect(0);
4920 
4921     if (Size <= 128) {
4922       if (RetTy->isAnyComplexType())
4923         return ABIArgInfo::getDirect();
4924 
4925       // O32 returns integer vectors in registers.
4926       if (IsO32 && RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())
4927         return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
4928 
4929       if (!IsO32)
4930         return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
4931     }
4932 
4933     return ABIArgInfo::getIndirect(0);
4934   }
4935 
4936   // Treat an enum type as its underlying type.
4937   if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
4938     RetTy = EnumTy->getDecl()->getIntegerType();
4939 
4940   return (RetTy->isPromotableIntegerType() ?
4941           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
4942 }
4943 
4944 void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const {
4945   ABIArgInfo &RetInfo = FI.getReturnInfo();
4946   RetInfo = classifyReturnType(FI.getReturnType());
4947 
4948   // Check if a pointer to an aggregate is passed as a hidden argument.
4949   uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0;
4950 
4951   for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
4952        it != ie; ++it)
4953     it->info = classifyArgumentType(it->type, Offset);
4954 }
4955 
4956 llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4957                                     CodeGenFunction &CGF) const {
4958   llvm::Type *BP = CGF.Int8PtrTy;
4959   llvm::Type *BPP = CGF.Int8PtrPtrTy;
4960 
4961   CGBuilderTy &Builder = CGF.Builder;
4962   llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
4963   llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
4964   int64_t TypeAlign = getContext().getTypeAlign(Ty) / 8;
4965   llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
4966   llvm::Value *AddrTyped;
4967   unsigned PtrWidth = getTarget().getPointerWidth(0);
4968   llvm::IntegerType *IntTy = (PtrWidth == 32) ? CGF.Int32Ty : CGF.Int64Ty;
4969 
4970   if (TypeAlign > MinABIStackAlignInBytes) {
4971     llvm::Value *AddrAsInt = CGF.Builder.CreatePtrToInt(Addr, IntTy);
4972     llvm::Value *Inc = llvm::ConstantInt::get(IntTy, TypeAlign - 1);
4973     llvm::Value *Mask = llvm::ConstantInt::get(IntTy, -TypeAlign);
4974     llvm::Value *Add = CGF.Builder.CreateAdd(AddrAsInt, Inc);
4975     llvm::Value *And = CGF.Builder.CreateAnd(Add, Mask);
4976     AddrTyped = CGF.Builder.CreateIntToPtr(And, PTy);
4977   }
4978   else
4979     AddrTyped = Builder.CreateBitCast(Addr, PTy);
4980 
4981   llvm::Value *AlignedAddr = Builder.CreateBitCast(AddrTyped, BP);
4982   TypeAlign = std::max((unsigned)TypeAlign, MinABIStackAlignInBytes);
4983   uint64_t Offset =
4984     llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, TypeAlign);
4985   llvm::Value *NextAddr =
4986     Builder.CreateGEP(AlignedAddr, llvm::ConstantInt::get(IntTy, Offset),
4987                       "ap.next");
4988   Builder.CreateStore(NextAddr, VAListAddrAsBPP);
4989 
4990   return AddrTyped;
4991 }
4992 
4993 bool
4994 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4995                                                llvm::Value *Address) const {
4996   // This information comes from gcc's implementation, which seems to
4997   // as canonical as it gets.
4998 
4999   // Everything on MIPS is 4 bytes.  Double-precision FP registers
5000   // are aliased to pairs of single-precision FP registers.
5001   llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
5002 
5003   // 0-31 are the general purpose registers, $0 - $31.
5004   // 32-63 are the floating-point registers, $f0 - $f31.
5005   // 64 and 65 are the multiply/divide registers, $hi and $lo.
5006   // 66 is the (notional, I think) register for signal-handler return.
5007   AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65);
5008 
5009   // 67-74 are the floating-point status registers, $fcc0 - $fcc7.
5010   // They are one bit wide and ignored here.
5011 
5012   // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31.
5013   // (coprocessor 1 is the FP unit)
5014   // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31.
5015   // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31.
5016   // 176-181 are the DSP accumulator registers.
5017   AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181);
5018   return false;
5019 }
5020 
5021 //===----------------------------------------------------------------------===//
5022 // TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults.
5023 // Currently subclassed only to implement custom OpenCL C function attribute
5024 // handling.
5025 //===----------------------------------------------------------------------===//
5026 
5027 namespace {
5028 
5029 class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo {
5030 public:
5031   TCETargetCodeGenInfo(CodeGenTypes &CGT)
5032     : DefaultTargetCodeGenInfo(CGT) {}
5033 
5034   virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5035                                    CodeGen::CodeGenModule &M) const;
5036 };
5037 
5038 void TCETargetCodeGenInfo::SetTargetAttributes(const Decl *D,
5039                                                llvm::GlobalValue *GV,
5040                                                CodeGen::CodeGenModule &M) const {
5041   const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
5042   if (!FD) return;
5043 
5044   llvm::Function *F = cast<llvm::Function>(GV);
5045 
5046   if (M.getLangOpts().OpenCL) {
5047     if (FD->hasAttr<OpenCLKernelAttr>()) {
5048       // OpenCL C Kernel functions are not subject to inlining
5049       F->addFnAttr(llvm::Attribute::NoInline);
5050 
5051       if (FD->hasAttr<ReqdWorkGroupSizeAttr>()) {
5052 
5053         // Convert the reqd_work_group_size() attributes to metadata.
5054         llvm::LLVMContext &Context = F->getContext();
5055         llvm::NamedMDNode *OpenCLMetadata =
5056             M.getModule().getOrInsertNamedMetadata("opencl.kernel_wg_size_info");
5057 
5058         SmallVector<llvm::Value*, 5> Operands;
5059         Operands.push_back(F);
5060 
5061         Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty,
5062                              llvm::APInt(32,
5063                              FD->getAttr<ReqdWorkGroupSizeAttr>()->getXDim())));
5064         Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty,
5065                              llvm::APInt(32,
5066                                FD->getAttr<ReqdWorkGroupSizeAttr>()->getYDim())));
5067         Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty,
5068                              llvm::APInt(32,
5069                                FD->getAttr<ReqdWorkGroupSizeAttr>()->getZDim())));
5070 
5071         // Add a boolean constant operand for "required" (true) or "hint" (false)
5072         // for implementing the work_group_size_hint attr later. Currently
5073         // always true as the hint is not yet implemented.
5074         Operands.push_back(llvm::ConstantInt::getTrue(Context));
5075         OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
5076       }
5077     }
5078   }
5079 }
5080 
5081 }
5082 
5083 //===----------------------------------------------------------------------===//
5084 // Hexagon ABI Implementation
5085 //===----------------------------------------------------------------------===//
5086 
5087 namespace {
5088 
5089 class HexagonABIInfo : public ABIInfo {
5090 
5091 
5092 public:
5093   HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
5094 
5095 private:
5096 
5097   ABIArgInfo classifyReturnType(QualType RetTy) const;
5098   ABIArgInfo classifyArgumentType(QualType RetTy) const;
5099 
5100   virtual void computeInfo(CGFunctionInfo &FI) const;
5101 
5102   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5103                                  CodeGenFunction &CGF) const;
5104 };
5105 
5106 class HexagonTargetCodeGenInfo : public TargetCodeGenInfo {
5107 public:
5108   HexagonTargetCodeGenInfo(CodeGenTypes &CGT)
5109     :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {}
5110 
5111   int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
5112     return 29;
5113   }
5114 };
5115 
5116 }
5117 
5118 void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const {
5119   FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
5120   for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
5121        it != ie; ++it)
5122     it->info = classifyArgumentType(it->type);
5123 }
5124 
5125 ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const {
5126   if (!isAggregateTypeForABI(Ty)) {
5127     // Treat an enum type as its underlying type.
5128     if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5129       Ty = EnumTy->getDecl()->getIntegerType();
5130 
5131     return (Ty->isPromotableIntegerType() ?
5132             ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
5133   }
5134 
5135   // Ignore empty records.
5136   if (isEmptyRecord(getContext(), Ty, true))
5137     return ABIArgInfo::getIgnore();
5138 
5139   if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, CGT))
5140     return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
5141 
5142   uint64_t Size = getContext().getTypeSize(Ty);
5143   if (Size > 64)
5144     return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
5145     // Pass in the smallest viable integer type.
5146   else if (Size > 32)
5147       return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
5148   else if (Size > 16)
5149       return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
5150   else if (Size > 8)
5151       return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
5152   else
5153       return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
5154 }
5155 
5156 ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
5157   if (RetTy->isVoidType())
5158     return ABIArgInfo::getIgnore();
5159 
5160   // Large vector types should be returned via memory.
5161   if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64)
5162     return ABIArgInfo::getIndirect(0);
5163 
5164   if (!isAggregateTypeForABI(RetTy)) {
5165     // Treat an enum type as its underlying type.
5166     if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
5167       RetTy = EnumTy->getDecl()->getIntegerType();
5168 
5169     return (RetTy->isPromotableIntegerType() ?
5170             ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
5171   }
5172 
5173   // Structures with either a non-trivial destructor or a non-trivial
5174   // copy constructor are always indirect.
5175   if (isRecordReturnIndirect(RetTy, CGT))
5176     return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
5177 
5178   if (isEmptyRecord(getContext(), RetTy, true))
5179     return ABIArgInfo::getIgnore();
5180 
5181   // Aggregates <= 8 bytes are returned in r0; other aggregates
5182   // are returned indirectly.
5183   uint64_t Size = getContext().getTypeSize(RetTy);
5184   if (Size <= 64) {
5185     // Return in the smallest viable integer type.
5186     if (Size <= 8)
5187       return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
5188     if (Size <= 16)
5189       return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
5190     if (Size <= 32)
5191       return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
5192     return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
5193   }
5194 
5195   return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
5196 }
5197 
5198 llvm::Value *HexagonABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5199                                        CodeGenFunction &CGF) const {
5200   // FIXME: Need to handle alignment
5201   llvm::Type *BPP = CGF.Int8PtrPtrTy;
5202 
5203   CGBuilderTy &Builder = CGF.Builder;
5204   llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
5205                                                        "ap");
5206   llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
5207   llvm::Type *PTy =
5208     llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
5209   llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
5210 
5211   uint64_t Offset =
5212     llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
5213   llvm::Value *NextAddr =
5214     Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
5215                       "ap.next");
5216   Builder.CreateStore(NextAddr, VAListAddrAsBPP);
5217 
5218   return AddrTyped;
5219 }
5220 
5221 
5222 //===----------------------------------------------------------------------===//
5223 // SPARC v9 ABI Implementation.
5224 // Based on the SPARC Compliance Definition version 2.4.1.
5225 //
5226 // Function arguments a mapped to a nominal "parameter array" and promoted to
5227 // registers depending on their type. Each argument occupies 8 or 16 bytes in
5228 // the array, structs larger than 16 bytes are passed indirectly.
5229 //
5230 // One case requires special care:
5231 //
5232 //   struct mixed {
5233 //     int i;
5234 //     float f;
5235 //   };
5236 //
5237 // When a struct mixed is passed by value, it only occupies 8 bytes in the
5238 // parameter array, but the int is passed in an integer register, and the float
5239 // is passed in a floating point register. This is represented as two arguments
5240 // with the LLVM IR inreg attribute:
5241 //
5242 //   declare void f(i32 inreg %i, float inreg %f)
5243 //
5244 // The code generator will only allocate 4 bytes from the parameter array for
5245 // the inreg arguments. All other arguments are allocated a multiple of 8
5246 // bytes.
5247 //
5248 namespace {
5249 class SparcV9ABIInfo : public ABIInfo {
5250 public:
5251   SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
5252 
5253 private:
5254   ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const;
5255   virtual void computeInfo(CGFunctionInfo &FI) const;
5256   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5257                                  CodeGenFunction &CGF) const;
5258 
5259   // Coercion type builder for structs passed in registers. The coercion type
5260   // serves two purposes:
5261   //
5262   // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned'
5263   //    in registers.
5264   // 2. Expose aligned floating point elements as first-level elements, so the
5265   //    code generator knows to pass them in floating point registers.
5266   //
5267   // We also compute the InReg flag which indicates that the struct contains
5268   // aligned 32-bit floats.
5269   //
5270   struct CoerceBuilder {
5271     llvm::LLVMContext &Context;
5272     const llvm::DataLayout &DL;
5273     SmallVector<llvm::Type*, 8> Elems;
5274     uint64_t Size;
5275     bool InReg;
5276 
5277     CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl)
5278       : Context(c), DL(dl), Size(0), InReg(false) {}
5279 
5280     // Pad Elems with integers until Size is ToSize.
5281     void pad(uint64_t ToSize) {
5282       assert(ToSize >= Size && "Cannot remove elements");
5283       if (ToSize == Size)
5284         return;
5285 
5286       // Finish the current 64-bit word.
5287       uint64_t Aligned = llvm::RoundUpToAlignment(Size, 64);
5288       if (Aligned > Size && Aligned <= ToSize) {
5289         Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size));
5290         Size = Aligned;
5291       }
5292 
5293       // Add whole 64-bit words.
5294       while (Size + 64 <= ToSize) {
5295         Elems.push_back(llvm::Type::getInt64Ty(Context));
5296         Size += 64;
5297       }
5298 
5299       // Final in-word padding.
5300       if (Size < ToSize) {
5301         Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size));
5302         Size = ToSize;
5303       }
5304     }
5305 
5306     // Add a floating point element at Offset.
5307     void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) {
5308       // Unaligned floats are treated as integers.
5309       if (Offset % Bits)
5310         return;
5311       // The InReg flag is only required if there are any floats < 64 bits.
5312       if (Bits < 64)
5313         InReg = true;
5314       pad(Offset);
5315       Elems.push_back(Ty);
5316       Size = Offset + Bits;
5317     }
5318 
5319     // Add a struct type to the coercion type, starting at Offset (in bits).
5320     void addStruct(uint64_t Offset, llvm::StructType *StrTy) {
5321       const llvm::StructLayout *Layout = DL.getStructLayout(StrTy);
5322       for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) {
5323         llvm::Type *ElemTy = StrTy->getElementType(i);
5324         uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i);
5325         switch (ElemTy->getTypeID()) {
5326         case llvm::Type::StructTyID:
5327           addStruct(ElemOffset, cast<llvm::StructType>(ElemTy));
5328           break;
5329         case llvm::Type::FloatTyID:
5330           addFloat(ElemOffset, ElemTy, 32);
5331           break;
5332         case llvm::Type::DoubleTyID:
5333           addFloat(ElemOffset, ElemTy, 64);
5334           break;
5335         case llvm::Type::FP128TyID:
5336           addFloat(ElemOffset, ElemTy, 128);
5337           break;
5338         case llvm::Type::PointerTyID:
5339           if (ElemOffset % 64 == 0) {
5340             pad(ElemOffset);
5341             Elems.push_back(ElemTy);
5342             Size += 64;
5343           }
5344           break;
5345         default:
5346           break;
5347         }
5348       }
5349     }
5350 
5351     // Check if Ty is a usable substitute for the coercion type.
5352     bool isUsableType(llvm::StructType *Ty) const {
5353       if (Ty->getNumElements() != Elems.size())
5354         return false;
5355       for (unsigned i = 0, e = Elems.size(); i != e; ++i)
5356         if (Elems[i] != Ty->getElementType(i))
5357           return false;
5358       return true;
5359     }
5360 
5361     // Get the coercion type as a literal struct type.
5362     llvm::Type *getType() const {
5363       if (Elems.size() == 1)
5364         return Elems.front();
5365       else
5366         return llvm::StructType::get(Context, Elems);
5367     }
5368   };
5369 };
5370 } // end anonymous namespace
5371 
5372 ABIArgInfo
5373 SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
5374   if (Ty->isVoidType())
5375     return ABIArgInfo::getIgnore();
5376 
5377   uint64_t Size = getContext().getTypeSize(Ty);
5378 
5379   // Anything too big to fit in registers is passed with an explicit indirect
5380   // pointer / sret pointer.
5381   if (Size > SizeLimit)
5382     return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
5383 
5384   // Treat an enum type as its underlying type.
5385   if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5386     Ty = EnumTy->getDecl()->getIntegerType();
5387 
5388   // Integer types smaller than a register are extended.
5389   if (Size < 64 && Ty->isIntegerType())
5390     return ABIArgInfo::getExtend();
5391 
5392   // Other non-aggregates go in registers.
5393   if (!isAggregateTypeForABI(Ty))
5394     return ABIArgInfo::getDirect();
5395 
5396   // This is a small aggregate type that should be passed in registers.
5397   // Build a coercion type from the LLVM struct type.
5398   llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty));
5399   if (!StrTy)
5400     return ABIArgInfo::getDirect();
5401 
5402   CoerceBuilder CB(getVMContext(), getDataLayout());
5403   CB.addStruct(0, StrTy);
5404   CB.pad(llvm::RoundUpToAlignment(CB.DL.getTypeSizeInBits(StrTy), 64));
5405 
5406   // Try to use the original type for coercion.
5407   llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType();
5408 
5409   if (CB.InReg)
5410     return ABIArgInfo::getDirectInReg(CoerceTy);
5411   else
5412     return ABIArgInfo::getDirect(CoerceTy);
5413 }
5414 
5415 llvm::Value *SparcV9ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5416                                        CodeGenFunction &CGF) const {
5417   ABIArgInfo AI = classifyType(Ty, 16 * 8);
5418   llvm::Type *ArgTy = CGT.ConvertType(Ty);
5419   if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
5420     AI.setCoerceToType(ArgTy);
5421 
5422   llvm::Type *BPP = CGF.Int8PtrPtrTy;
5423   CGBuilderTy &Builder = CGF.Builder;
5424   llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
5425   llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
5426   llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
5427   llvm::Value *ArgAddr;
5428   unsigned Stride;
5429 
5430   switch (AI.getKind()) {
5431   case ABIArgInfo::Expand:
5432     llvm_unreachable("Unsupported ABI kind for va_arg");
5433 
5434   case ABIArgInfo::Extend:
5435     Stride = 8;
5436     ArgAddr = Builder
5437       .CreateConstGEP1_32(Addr, 8 - getDataLayout().getTypeAllocSize(ArgTy),
5438                           "extend");
5439     break;
5440 
5441   case ABIArgInfo::Direct:
5442     Stride = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
5443     ArgAddr = Addr;
5444     break;
5445 
5446   case ABIArgInfo::Indirect:
5447     Stride = 8;
5448     ArgAddr = Builder.CreateBitCast(Addr,
5449                                     llvm::PointerType::getUnqual(ArgPtrTy),
5450                                     "indirect");
5451     ArgAddr = Builder.CreateLoad(ArgAddr, "indirect.arg");
5452     break;
5453 
5454   case ABIArgInfo::Ignore:
5455     return llvm::UndefValue::get(ArgPtrTy);
5456   }
5457 
5458   // Update VAList.
5459   Addr = Builder.CreateConstGEP1_32(Addr, Stride, "ap.next");
5460   Builder.CreateStore(Addr, VAListAddrAsBPP);
5461 
5462   return Builder.CreatePointerCast(ArgAddr, ArgPtrTy, "arg.addr");
5463 }
5464 
5465 void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const {
5466   FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8);
5467   for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
5468        it != ie; ++it)
5469     it->info = classifyType(it->type, 16 * 8);
5470 }
5471 
5472 namespace {
5473 class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo {
5474 public:
5475   SparcV9TargetCodeGenInfo(CodeGenTypes &CGT)
5476     : TargetCodeGenInfo(new SparcV9ABIInfo(CGT)) {}
5477 };
5478 } // end anonymous namespace
5479 
5480 
5481 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
5482   if (TheTargetCodeGenInfo)
5483     return *TheTargetCodeGenInfo;
5484 
5485   const llvm::Triple &Triple = getTarget().getTriple();
5486   switch (Triple.getArch()) {
5487   default:
5488     return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types));
5489 
5490   case llvm::Triple::le32:
5491     return *(TheTargetCodeGenInfo = new PNaClTargetCodeGenInfo(Types));
5492   case llvm::Triple::mips:
5493   case llvm::Triple::mipsel:
5494     return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, true));
5495 
5496   case llvm::Triple::mips64:
5497   case llvm::Triple::mips64el:
5498     return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, false));
5499 
5500   case llvm::Triple::aarch64:
5501     return *(TheTargetCodeGenInfo = new AArch64TargetCodeGenInfo(Types));
5502 
5503   case llvm::Triple::arm:
5504   case llvm::Triple::thumb:
5505     {
5506       ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS;
5507       if (strcmp(getTarget().getABI(), "apcs-gnu") == 0)
5508         Kind = ARMABIInfo::APCS;
5509       else if (CodeGenOpts.FloatABI == "hard" ||
5510                (CodeGenOpts.FloatABI != "soft" &&
5511                 Triple.getEnvironment() == llvm::Triple::GNUEABIHF))
5512         Kind = ARMABIInfo::AAPCS_VFP;
5513 
5514       switch (Triple.getOS()) {
5515         case llvm::Triple::NaCl:
5516           return *(TheTargetCodeGenInfo =
5517                    new NaClARMTargetCodeGenInfo(Types, Kind));
5518         default:
5519           return *(TheTargetCodeGenInfo =
5520                    new ARMTargetCodeGenInfo(Types, Kind));
5521       }
5522     }
5523 
5524   case llvm::Triple::ppc:
5525     return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types));
5526   case llvm::Triple::ppc64:
5527     if (Triple.isOSBinFormatELF())
5528       return *(TheTargetCodeGenInfo = new PPC64_SVR4_TargetCodeGenInfo(Types));
5529     else
5530       return *(TheTargetCodeGenInfo = new PPC64TargetCodeGenInfo(Types));
5531 
5532   case llvm::Triple::nvptx:
5533   case llvm::Triple::nvptx64:
5534     return *(TheTargetCodeGenInfo = new NVPTXTargetCodeGenInfo(Types));
5535 
5536   case llvm::Triple::mblaze:
5537     return *(TheTargetCodeGenInfo = new MBlazeTargetCodeGenInfo(Types));
5538 
5539   case llvm::Triple::msp430:
5540     return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types));
5541 
5542   case llvm::Triple::systemz:
5543     return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo(Types));
5544 
5545   case llvm::Triple::tce:
5546     return *(TheTargetCodeGenInfo = new TCETargetCodeGenInfo(Types));
5547 
5548   case llvm::Triple::x86: {
5549     bool IsDarwinVectorABI = Triple.isOSDarwin();
5550     bool IsSmallStructInRegABI =
5551         X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
5552     bool IsWin32FloatStructABI = (Triple.getOS() == llvm::Triple::Win32);
5553 
5554     if (Triple.getOS() == llvm::Triple::Win32) {
5555       return *(TheTargetCodeGenInfo =
5556                new WinX86_32TargetCodeGenInfo(Types,
5557                                               IsDarwinVectorABI, IsSmallStructInRegABI,
5558                                               IsWin32FloatStructABI,
5559                                               CodeGenOpts.NumRegisterParameters));
5560     } else {
5561       return *(TheTargetCodeGenInfo =
5562                new X86_32TargetCodeGenInfo(Types,
5563                                            IsDarwinVectorABI, IsSmallStructInRegABI,
5564                                            IsWin32FloatStructABI,
5565                                            CodeGenOpts.NumRegisterParameters));
5566     }
5567   }
5568 
5569   case llvm::Triple::x86_64: {
5570     bool HasAVX = strcmp(getTarget().getABI(), "avx") == 0;
5571 
5572     switch (Triple.getOS()) {
5573     case llvm::Triple::Win32:
5574     case llvm::Triple::MinGW32:
5575     case llvm::Triple::Cygwin:
5576       return *(TheTargetCodeGenInfo = new WinX86_64TargetCodeGenInfo(Types));
5577     case llvm::Triple::NaCl:
5578       return *(TheTargetCodeGenInfo = new NaClX86_64TargetCodeGenInfo(Types,
5579                                                                       HasAVX));
5580     default:
5581       return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo(Types,
5582                                                                   HasAVX));
5583     }
5584   }
5585   case llvm::Triple::hexagon:
5586     return *(TheTargetCodeGenInfo = new HexagonTargetCodeGenInfo(Types));
5587   case llvm::Triple::sparcv9:
5588     return *(TheTargetCodeGenInfo = new SparcV9TargetCodeGenInfo(Types));
5589   }
5590 }
5591