1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "TargetInfo.h"
16 #include "ABIInfo.h"
17 #include "CGCXXABI.h"
18 #include "CodeGenFunction.h"
19 #include "clang/AST/RecordLayout.h"
20 #include "clang/Frontend/CodeGenOptions.h"
21 #include "llvm/ADT/Triple.h"
22 #include "llvm/IR/DataLayout.h"
23 #include "llvm/IR/Type.h"
24 #include "llvm/Support/raw_ostream.h"
25 using namespace clang;
26 using namespace CodeGen;
27 
28 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
29                                llvm::Value *Array,
30                                llvm::Value *Value,
31                                unsigned FirstIndex,
32                                unsigned LastIndex) {
33   // Alternatively, we could emit this as a loop in the source.
34   for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
35     llvm::Value *Cell = Builder.CreateConstInBoundsGEP1_32(Array, I);
36     Builder.CreateStore(Value, Cell);
37   }
38 }
39 
40 static bool isAggregateTypeForABI(QualType T) {
41   return !CodeGenFunction::hasScalarEvaluationKind(T) ||
42          T->isMemberFunctionPointerType();
43 }
44 
45 ABIInfo::~ABIInfo() {}
46 
47 static bool isRecordReturnIndirect(const RecordType *RT, CodeGen::CodeGenTypes &CGT) {
48   const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
49   if (!RD)
50     return false;
51   return CGT.CGM.getCXXABI().isReturnTypeIndirect(RD);
52 }
53 
54 
55 static bool isRecordReturnIndirect(QualType T, CodeGen::CodeGenTypes &CGT) {
56   const RecordType *RT = T->getAs<RecordType>();
57   if (!RT)
58     return false;
59   return isRecordReturnIndirect(RT, CGT);
60 }
61 
62 static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT,
63                                               CodeGen::CodeGenTypes &CGT) {
64   const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
65   if (!RD)
66     return CGCXXABI::RAA_Default;
67   return CGT.CGM.getCXXABI().getRecordArgABI(RD);
68 }
69 
70 static CGCXXABI::RecordArgABI getRecordArgABI(QualType T,
71                                               CodeGen::CodeGenTypes &CGT) {
72   const RecordType *RT = T->getAs<RecordType>();
73   if (!RT)
74     return CGCXXABI::RAA_Default;
75   return getRecordArgABI(RT, CGT);
76 }
77 
78 ASTContext &ABIInfo::getContext() const {
79   return CGT.getContext();
80 }
81 
82 llvm::LLVMContext &ABIInfo::getVMContext() const {
83   return CGT.getLLVMContext();
84 }
85 
86 const llvm::DataLayout &ABIInfo::getDataLayout() const {
87   return CGT.getDataLayout();
88 }
89 
90 const TargetInfo &ABIInfo::getTarget() const {
91   return CGT.getTarget();
92 }
93 
94 void ABIArgInfo::dump() const {
95   raw_ostream &OS = llvm::errs();
96   OS << "(ABIArgInfo Kind=";
97   switch (TheKind) {
98   case Direct:
99     OS << "Direct Type=";
100     if (llvm::Type *Ty = getCoerceToType())
101       Ty->print(OS);
102     else
103       OS << "null";
104     break;
105   case Extend:
106     OS << "Extend";
107     break;
108   case Ignore:
109     OS << "Ignore";
110     break;
111   case Indirect:
112     OS << "Indirect Align=" << getIndirectAlign()
113        << " ByVal=" << getIndirectByVal()
114        << " Realign=" << getIndirectRealign();
115     break;
116   case Expand:
117     OS << "Expand";
118     break;
119   }
120   OS << ")\n";
121 }
122 
123 TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; }
124 
125 // If someone can figure out a general rule for this, that would be great.
126 // It's probably just doomed to be platform-dependent, though.
127 unsigned TargetCodeGenInfo::getSizeOfUnwindException() const {
128   // Verified for:
129   //   x86-64     FreeBSD, Linux, Darwin
130   //   x86-32     FreeBSD, Linux, Darwin
131   //   PowerPC    Linux, Darwin
132   //   ARM        Darwin (*not* EABI)
133   //   AArch64    Linux
134   return 32;
135 }
136 
137 bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args,
138                                      const FunctionNoProtoType *fnType) const {
139   // The following conventions are known to require this to be false:
140   //   x86_stdcall
141   //   MIPS
142   // For everything else, we just prefer false unless we opt out.
143   return false;
144 }
145 
146 void
147 TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib,
148                                              llvm::SmallString<24> &Opt) const {
149   // This assumes the user is passing a library name like "rt" instead of a
150   // filename like "librt.a/so", and that they don't care whether it's static or
151   // dynamic.
152   Opt = "-l";
153   Opt += Lib;
154 }
155 
156 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
157 
158 /// isEmptyField - Return true iff a the field is "empty", that is it
159 /// is an unnamed bit-field or an (array of) empty record(s).
160 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
161                          bool AllowArrays) {
162   if (FD->isUnnamedBitfield())
163     return true;
164 
165   QualType FT = FD->getType();
166 
167   // Constant arrays of empty records count as empty, strip them off.
168   // Constant arrays of zero length always count as empty.
169   if (AllowArrays)
170     while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
171       if (AT->getSize() == 0)
172         return true;
173       FT = AT->getElementType();
174     }
175 
176   const RecordType *RT = FT->getAs<RecordType>();
177   if (!RT)
178     return false;
179 
180   // C++ record fields are never empty, at least in the Itanium ABI.
181   //
182   // FIXME: We should use a predicate for whether this behavior is true in the
183   // current ABI.
184   if (isa<CXXRecordDecl>(RT->getDecl()))
185     return false;
186 
187   return isEmptyRecord(Context, FT, AllowArrays);
188 }
189 
190 /// isEmptyRecord - Return true iff a structure contains only empty
191 /// fields. Note that a structure with a flexible array member is not
192 /// considered empty.
193 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
194   const RecordType *RT = T->getAs<RecordType>();
195   if (!RT)
196     return 0;
197   const RecordDecl *RD = RT->getDecl();
198   if (RD->hasFlexibleArrayMember())
199     return false;
200 
201   // If this is a C++ record, check the bases first.
202   if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
203     for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
204            e = CXXRD->bases_end(); i != e; ++i)
205       if (!isEmptyRecord(Context, i->getType(), true))
206         return false;
207 
208   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
209          i != e; ++i)
210     if (!isEmptyField(Context, *i, AllowArrays))
211       return false;
212   return true;
213 }
214 
215 /// isSingleElementStruct - Determine if a structure is a "single
216 /// element struct", i.e. it has exactly one non-empty field or
217 /// exactly one field which is itself a single element
218 /// struct. Structures with flexible array members are never
219 /// considered single element structs.
220 ///
221 /// \return The field declaration for the single non-empty field, if
222 /// it exists.
223 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
224   const RecordType *RT = T->getAsStructureType();
225   if (!RT)
226     return 0;
227 
228   const RecordDecl *RD = RT->getDecl();
229   if (RD->hasFlexibleArrayMember())
230     return 0;
231 
232   const Type *Found = 0;
233 
234   // If this is a C++ record, check the bases first.
235   if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
236     for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
237            e = CXXRD->bases_end(); i != e; ++i) {
238       // Ignore empty records.
239       if (isEmptyRecord(Context, i->getType(), true))
240         continue;
241 
242       // If we already found an element then this isn't a single-element struct.
243       if (Found)
244         return 0;
245 
246       // If this is non-empty and not a single element struct, the composite
247       // cannot be a single element struct.
248       Found = isSingleElementStruct(i->getType(), Context);
249       if (!Found)
250         return 0;
251     }
252   }
253 
254   // Check for single element.
255   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
256          i != e; ++i) {
257     const FieldDecl *FD = *i;
258     QualType FT = FD->getType();
259 
260     // Ignore empty fields.
261     if (isEmptyField(Context, FD, true))
262       continue;
263 
264     // If we already found an element then this isn't a single-element
265     // struct.
266     if (Found)
267       return 0;
268 
269     // Treat single element arrays as the element.
270     while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
271       if (AT->getSize().getZExtValue() != 1)
272         break;
273       FT = AT->getElementType();
274     }
275 
276     if (!isAggregateTypeForABI(FT)) {
277       Found = FT.getTypePtr();
278     } else {
279       Found = isSingleElementStruct(FT, Context);
280       if (!Found)
281         return 0;
282     }
283   }
284 
285   // We don't consider a struct a single-element struct if it has
286   // padding beyond the element type.
287   if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T))
288     return 0;
289 
290   return Found;
291 }
292 
293 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
294   // Treat complex types as the element type.
295   if (const ComplexType *CTy = Ty->getAs<ComplexType>())
296     Ty = CTy->getElementType();
297 
298   // Check for a type which we know has a simple scalar argument-passing
299   // convention without any padding.  (We're specifically looking for 32
300   // and 64-bit integer and integer-equivalents, float, and double.)
301   if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
302       !Ty->isEnumeralType() && !Ty->isBlockPointerType())
303     return false;
304 
305   uint64_t Size = Context.getTypeSize(Ty);
306   return Size == 32 || Size == 64;
307 }
308 
309 /// canExpandIndirectArgument - Test whether an argument type which is to be
310 /// passed indirectly (on the stack) would have the equivalent layout if it was
311 /// expanded into separate arguments. If so, we prefer to do the latter to avoid
312 /// inhibiting optimizations.
313 ///
314 // FIXME: This predicate is missing many cases, currently it just follows
315 // llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We
316 // should probably make this smarter, or better yet make the LLVM backend
317 // capable of handling it.
318 static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) {
319   // We can only expand structure types.
320   const RecordType *RT = Ty->getAs<RecordType>();
321   if (!RT)
322     return false;
323 
324   // We can only expand (C) structures.
325   //
326   // FIXME: This needs to be generalized to handle classes as well.
327   const RecordDecl *RD = RT->getDecl();
328   if (!RD->isStruct() || isa<CXXRecordDecl>(RD))
329     return false;
330 
331   uint64_t Size = 0;
332 
333   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
334          i != e; ++i) {
335     const FieldDecl *FD = *i;
336 
337     if (!is32Or64BitBasicType(FD->getType(), Context))
338       return false;
339 
340     // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
341     // how to expand them yet, and the predicate for telling if a bitfield still
342     // counts as "basic" is more complicated than what we were doing previously.
343     if (FD->isBitField())
344       return false;
345 
346     Size += Context.getTypeSize(FD->getType());
347   }
348 
349   // Make sure there are not any holes in the struct.
350   if (Size != Context.getTypeSize(Ty))
351     return false;
352 
353   return true;
354 }
355 
356 namespace {
357 /// DefaultABIInfo - The default implementation for ABI specific
358 /// details. This implementation provides information which results in
359 /// self-consistent and sensible LLVM IR generation, but does not
360 /// conform to any particular ABI.
361 class DefaultABIInfo : public ABIInfo {
362 public:
363   DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
364 
365   ABIArgInfo classifyReturnType(QualType RetTy) const;
366   ABIArgInfo classifyArgumentType(QualType RetTy) const;
367 
368   virtual void computeInfo(CGFunctionInfo &FI) const {
369     FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
370     for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
371          it != ie; ++it)
372       it->info = classifyArgumentType(it->type);
373   }
374 
375   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
376                                  CodeGenFunction &CGF) const;
377 };
378 
379 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
380 public:
381   DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
382     : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
383 };
384 
385 llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
386                                        CodeGenFunction &CGF) const {
387   return 0;
388 }
389 
390 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
391   if (isAggregateTypeForABI(Ty)) {
392     // Records with non trivial destructors/constructors should not be passed
393     // by value.
394     if (isRecordReturnIndirect(Ty, CGT))
395       return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
396 
397     return ABIArgInfo::getIndirect(0);
398   }
399 
400   // Treat an enum type as its underlying type.
401   if (const EnumType *EnumTy = Ty->getAs<EnumType>())
402     Ty = EnumTy->getDecl()->getIntegerType();
403 
404   return (Ty->isPromotableIntegerType() ?
405           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
406 }
407 
408 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
409   if (RetTy->isVoidType())
410     return ABIArgInfo::getIgnore();
411 
412   if (isAggregateTypeForABI(RetTy))
413     return ABIArgInfo::getIndirect(0);
414 
415   // Treat an enum type as its underlying type.
416   if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
417     RetTy = EnumTy->getDecl()->getIntegerType();
418 
419   return (RetTy->isPromotableIntegerType() ?
420           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
421 }
422 
423 //===----------------------------------------------------------------------===//
424 // le32/PNaCl bitcode ABI Implementation
425 //
426 // This is a simplified version of the x86_32 ABI.  Arguments and return values
427 // are always passed on the stack.
428 //===----------------------------------------------------------------------===//
429 
430 class PNaClABIInfo : public ABIInfo {
431  public:
432   PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
433 
434   ABIArgInfo classifyReturnType(QualType RetTy) const;
435   ABIArgInfo classifyArgumentType(QualType RetTy) const;
436 
437   virtual void computeInfo(CGFunctionInfo &FI) const;
438   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
439                                  CodeGenFunction &CGF) const;
440 };
441 
442 class PNaClTargetCodeGenInfo : public TargetCodeGenInfo {
443  public:
444   PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
445     : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {}
446 
447   /// For PNaCl we don't want llvm.pow.* intrinsics to be emitted instead
448   /// of library function calls.
449   bool emitIntrinsicForPow() const { return false; }
450 };
451 
452 void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const {
453     FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
454 
455     for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
456          it != ie; ++it)
457       it->info = classifyArgumentType(it->type);
458   }
459 
460 llvm::Value *PNaClABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
461                                        CodeGenFunction &CGF) const {
462   return 0;
463 }
464 
465 /// \brief Classify argument of given type \p Ty.
466 ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const {
467   if (isAggregateTypeForABI(Ty)) {
468     if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, CGT))
469       return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
470     return ABIArgInfo::getIndirect(0);
471   } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
472     // Treat an enum type as its underlying type.
473     Ty = EnumTy->getDecl()->getIntegerType();
474   } else if (Ty->isFloatingType()) {
475     // Floating-point types don't go inreg.
476     return ABIArgInfo::getDirect();
477   }
478 
479   return (Ty->isPromotableIntegerType() ?
480           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
481 }
482 
483 ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
484   if (RetTy->isVoidType())
485     return ABIArgInfo::getIgnore();
486 
487   // In the PNaCl ABI we always return records/structures on the stack.
488   if (isAggregateTypeForABI(RetTy))
489     return ABIArgInfo::getIndirect(0);
490 
491   // Treat an enum type as its underlying type.
492   if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
493     RetTy = EnumTy->getDecl()->getIntegerType();
494 
495   return (RetTy->isPromotableIntegerType() ?
496           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
497 }
498 
499 /// IsX86_MMXType - Return true if this is an MMX type.
500 bool IsX86_MMXType(llvm::Type *IRType) {
501   // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>.
502   return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
503     cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
504     IRType->getScalarSizeInBits() != 64;
505 }
506 
507 static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
508                                           StringRef Constraint,
509                                           llvm::Type* Ty) {
510   if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy()) {
511     if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) {
512       // Invalid MMX constraint
513       return 0;
514     }
515 
516     return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
517   }
518 
519   // No operation needed
520   return Ty;
521 }
522 
523 //===----------------------------------------------------------------------===//
524 // X86-32 ABI Implementation
525 //===----------------------------------------------------------------------===//
526 
527 /// X86_32ABIInfo - The X86-32 ABI information.
528 class X86_32ABIInfo : public ABIInfo {
529   enum Class {
530     Integer,
531     Float
532   };
533 
534   static const unsigned MinABIStackAlignInBytes = 4;
535 
536   bool IsDarwinVectorABI;
537   bool IsSmallStructInRegABI;
538   bool IsWin32StructABI;
539   unsigned DefaultNumRegisterParameters;
540 
541   static bool isRegisterSize(unsigned Size) {
542     return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
543   }
544 
545   static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context,
546                                           unsigned callingConvention);
547 
548   /// getIndirectResult - Give a source type \arg Ty, return a suitable result
549   /// such that the argument will be passed in memory.
550   ABIArgInfo getIndirectResult(QualType Ty, bool ByVal,
551                                unsigned &FreeRegs) const;
552 
553   /// \brief Return the alignment to use for the given type on the stack.
554   unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
555 
556   Class classify(QualType Ty) const;
557   ABIArgInfo classifyReturnType(QualType RetTy,
558                                 unsigned callingConvention) const;
559   ABIArgInfo classifyArgumentType(QualType RetTy, unsigned &FreeRegs,
560                                   bool IsFastCall) const;
561   bool shouldUseInReg(QualType Ty, unsigned &FreeRegs,
562                       bool IsFastCall, bool &NeedsPadding) const;
563 
564 public:
565 
566   virtual void computeInfo(CGFunctionInfo &FI) const;
567   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
568                                  CodeGenFunction &CGF) const;
569 
570   X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool w,
571                 unsigned r)
572     : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p),
573       IsWin32StructABI(w), DefaultNumRegisterParameters(r) {}
574 };
575 
576 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
577 public:
578   X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
579       bool d, bool p, bool w, unsigned r)
580     :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, w, r)) {}
581 
582   static bool isStructReturnInRegABI(
583       const llvm::Triple &Triple, const CodeGenOptions &Opts);
584 
585   void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
586                            CodeGen::CodeGenModule &CGM) const;
587 
588   int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
589     // Darwin uses different dwarf register numbers for EH.
590     if (CGM.getTarget().getTriple().isOSDarwin()) return 5;
591     return 4;
592   }
593 
594   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
595                                llvm::Value *Address) const;
596 
597   llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
598                                   StringRef Constraint,
599                                   llvm::Type* Ty) const {
600     return X86AdjustInlineAsmType(CGF, Constraint, Ty);
601   }
602 
603 };
604 
605 }
606 
607 /// shouldReturnTypeInRegister - Determine if the given type should be
608 /// passed in a register (for the Darwin ABI).
609 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
610                                                ASTContext &Context,
611                                                unsigned callingConvention) {
612   uint64_t Size = Context.getTypeSize(Ty);
613 
614   // Type must be register sized.
615   if (!isRegisterSize(Size))
616     return false;
617 
618   if (Ty->isVectorType()) {
619     // 64- and 128- bit vectors inside structures are not returned in
620     // registers.
621     if (Size == 64 || Size == 128)
622       return false;
623 
624     return true;
625   }
626 
627   // If this is a builtin, pointer, enum, complex type, member pointer, or
628   // member function pointer it is ok.
629   if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
630       Ty->isAnyComplexType() || Ty->isEnumeralType() ||
631       Ty->isBlockPointerType() || Ty->isMemberPointerType())
632     return true;
633 
634   // Arrays are treated like records.
635   if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
636     return shouldReturnTypeInRegister(AT->getElementType(), Context,
637                                       callingConvention);
638 
639   // Otherwise, it must be a record type.
640   const RecordType *RT = Ty->getAs<RecordType>();
641   if (!RT) return false;
642 
643   // FIXME: Traverse bases here too.
644 
645   // For thiscall conventions, structures will never be returned in
646   // a register.  This is for compatibility with the MSVC ABI
647   if (callingConvention == llvm::CallingConv::X86_ThisCall &&
648       RT->isStructureType()) {
649     return false;
650   }
651 
652   // Structure types are passed in register if all fields would be
653   // passed in a register.
654   for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(),
655          e = RT->getDecl()->field_end(); i != e; ++i) {
656     const FieldDecl *FD = *i;
657 
658     // Empty fields are ignored.
659     if (isEmptyField(Context, FD, true))
660       continue;
661 
662     // Check fields recursively.
663     if (!shouldReturnTypeInRegister(FD->getType(), Context,
664                                     callingConvention))
665       return false;
666   }
667   return true;
668 }
669 
670 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
671                                             unsigned callingConvention) const {
672   if (RetTy->isVoidType())
673     return ABIArgInfo::getIgnore();
674 
675   if (const VectorType *VT = RetTy->getAs<VectorType>()) {
676     // On Darwin, some vectors are returned in registers.
677     if (IsDarwinVectorABI) {
678       uint64_t Size = getContext().getTypeSize(RetTy);
679 
680       // 128-bit vectors are a special case; they are returned in
681       // registers and we need to make sure to pick a type the LLVM
682       // backend will like.
683       if (Size == 128)
684         return ABIArgInfo::getDirect(llvm::VectorType::get(
685                   llvm::Type::getInt64Ty(getVMContext()), 2));
686 
687       // Always return in register if it fits in a general purpose
688       // register, or if it is 64 bits and has a single element.
689       if ((Size == 8 || Size == 16 || Size == 32) ||
690           (Size == 64 && VT->getNumElements() == 1))
691         return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
692                                                             Size));
693 
694       return ABIArgInfo::getIndirect(0);
695     }
696 
697     return ABIArgInfo::getDirect();
698   }
699 
700   if (isAggregateTypeForABI(RetTy)) {
701     if (const RecordType *RT = RetTy->getAs<RecordType>()) {
702       if (isRecordReturnIndirect(RT, CGT))
703         return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
704 
705       // Structures with flexible arrays are always indirect.
706       if (RT->getDecl()->hasFlexibleArrayMember())
707         return ABIArgInfo::getIndirect(0);
708     }
709 
710     // If specified, structs and unions are always indirect.
711     if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType())
712       return ABIArgInfo::getIndirect(0);
713 
714     // Small structures which are register sized are generally returned
715     // in a register.
716     if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, getContext(),
717                                                   callingConvention)) {
718       uint64_t Size = getContext().getTypeSize(RetTy);
719 
720       // As a special-case, if the struct is a "single-element" struct, and
721       // the field is of type "float" or "double", return it in a
722       // floating-point register. (MSVC does not apply this special case.)
723       // We apply a similar transformation for pointer types to improve the
724       // quality of the generated IR.
725       if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
726         if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
727             || SeltTy->hasPointerRepresentation())
728           return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
729 
730       // FIXME: We should be able to narrow this integer in cases with dead
731       // padding.
732       return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
733     }
734 
735     return ABIArgInfo::getIndirect(0);
736   }
737 
738   // Treat an enum type as its underlying type.
739   if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
740     RetTy = EnumTy->getDecl()->getIntegerType();
741 
742   return (RetTy->isPromotableIntegerType() ?
743           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
744 }
745 
746 static bool isSSEVectorType(ASTContext &Context, QualType Ty) {
747   return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128;
748 }
749 
750 static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) {
751   const RecordType *RT = Ty->getAs<RecordType>();
752   if (!RT)
753     return 0;
754   const RecordDecl *RD = RT->getDecl();
755 
756   // If this is a C++ record, check the bases first.
757   if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
758     for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
759            e = CXXRD->bases_end(); i != e; ++i)
760       if (!isRecordWithSSEVectorType(Context, i->getType()))
761         return false;
762 
763   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
764        i != e; ++i) {
765     QualType FT = i->getType();
766 
767     if (isSSEVectorType(Context, FT))
768       return true;
769 
770     if (isRecordWithSSEVectorType(Context, FT))
771       return true;
772   }
773 
774   return false;
775 }
776 
777 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
778                                                  unsigned Align) const {
779   // Otherwise, if the alignment is less than or equal to the minimum ABI
780   // alignment, just use the default; the backend will handle this.
781   if (Align <= MinABIStackAlignInBytes)
782     return 0; // Use default alignment.
783 
784   // On non-Darwin, the stack type alignment is always 4.
785   if (!IsDarwinVectorABI) {
786     // Set explicit alignment, since we may need to realign the top.
787     return MinABIStackAlignInBytes;
788   }
789 
790   // Otherwise, if the type contains an SSE vector type, the alignment is 16.
791   if (Align >= 16 && (isSSEVectorType(getContext(), Ty) ||
792                       isRecordWithSSEVectorType(getContext(), Ty)))
793     return 16;
794 
795   return MinABIStackAlignInBytes;
796 }
797 
798 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal,
799                                             unsigned &FreeRegs) const {
800   if (!ByVal) {
801     if (FreeRegs) {
802       --FreeRegs; // Non byval indirects just use one pointer.
803       return ABIArgInfo::getIndirectInReg(0, false);
804     }
805     return ABIArgInfo::getIndirect(0, false);
806   }
807 
808   // Compute the byval alignment.
809   unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
810   unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
811   if (StackAlign == 0)
812     return ABIArgInfo::getIndirect(4);
813 
814   // If the stack alignment is less than the type alignment, realign the
815   // argument.
816   if (StackAlign < TypeAlign)
817     return ABIArgInfo::getIndirect(StackAlign, /*ByVal=*/true,
818                                    /*Realign=*/true);
819 
820   return ABIArgInfo::getIndirect(StackAlign);
821 }
822 
823 X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
824   const Type *T = isSingleElementStruct(Ty, getContext());
825   if (!T)
826     T = Ty.getTypePtr();
827 
828   if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
829     BuiltinType::Kind K = BT->getKind();
830     if (K == BuiltinType::Float || K == BuiltinType::Double)
831       return Float;
832   }
833   return Integer;
834 }
835 
836 bool X86_32ABIInfo::shouldUseInReg(QualType Ty, unsigned &FreeRegs,
837                                    bool IsFastCall, bool &NeedsPadding) const {
838   NeedsPadding = false;
839   Class C = classify(Ty);
840   if (C == Float)
841     return false;
842 
843   unsigned Size = getContext().getTypeSize(Ty);
844   unsigned SizeInRegs = (Size + 31) / 32;
845 
846   if (SizeInRegs == 0)
847     return false;
848 
849   if (SizeInRegs > FreeRegs) {
850     FreeRegs = 0;
851     return false;
852   }
853 
854   FreeRegs -= SizeInRegs;
855 
856   if (IsFastCall) {
857     if (Size > 32)
858       return false;
859 
860     if (Ty->isIntegralOrEnumerationType())
861       return true;
862 
863     if (Ty->isPointerType())
864       return true;
865 
866     if (Ty->isReferenceType())
867       return true;
868 
869     if (FreeRegs)
870       NeedsPadding = true;
871 
872     return false;
873   }
874 
875   return true;
876 }
877 
878 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
879                                                unsigned &FreeRegs,
880                                                bool IsFastCall) const {
881   // FIXME: Set alignment on indirect arguments.
882   if (isAggregateTypeForABI(Ty)) {
883     if (const RecordType *RT = Ty->getAs<RecordType>()) {
884       if (IsWin32StructABI)
885         return getIndirectResult(Ty, true, FreeRegs);
886 
887       if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, CGT))
888         return getIndirectResult(Ty, RAA == CGCXXABI::RAA_DirectInMemory, FreeRegs);
889 
890       // Structures with flexible arrays are always indirect.
891       if (RT->getDecl()->hasFlexibleArrayMember())
892         return getIndirectResult(Ty, true, FreeRegs);
893     }
894 
895     // Ignore empty structs/unions.
896     if (isEmptyRecord(getContext(), Ty, true))
897       return ABIArgInfo::getIgnore();
898 
899     llvm::LLVMContext &LLVMContext = getVMContext();
900     llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
901     bool NeedsPadding;
902     if (shouldUseInReg(Ty, FreeRegs, IsFastCall, NeedsPadding)) {
903       unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
904       SmallVector<llvm::Type*, 3> Elements;
905       for (unsigned I = 0; I < SizeInRegs; ++I)
906         Elements.push_back(Int32);
907       llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
908       return ABIArgInfo::getDirectInReg(Result);
909     }
910     llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : 0;
911 
912     // Expand small (<= 128-bit) record types when we know that the stack layout
913     // of those arguments will match the struct. This is important because the
914     // LLVM backend isn't smart enough to remove byval, which inhibits many
915     // optimizations.
916     if (getContext().getTypeSize(Ty) <= 4*32 &&
917         canExpandIndirectArgument(Ty, getContext()))
918       return ABIArgInfo::getExpandWithPadding(IsFastCall, PaddingType);
919 
920     return getIndirectResult(Ty, true, FreeRegs);
921   }
922 
923   if (const VectorType *VT = Ty->getAs<VectorType>()) {
924     // On Darwin, some vectors are passed in memory, we handle this by passing
925     // it as an i8/i16/i32/i64.
926     if (IsDarwinVectorABI) {
927       uint64_t Size = getContext().getTypeSize(Ty);
928       if ((Size == 8 || Size == 16 || Size == 32) ||
929           (Size == 64 && VT->getNumElements() == 1))
930         return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
931                                                             Size));
932     }
933 
934     if (IsX86_MMXType(CGT.ConvertType(Ty)))
935       return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64));
936 
937     return ABIArgInfo::getDirect();
938   }
939 
940 
941   if (const EnumType *EnumTy = Ty->getAs<EnumType>())
942     Ty = EnumTy->getDecl()->getIntegerType();
943 
944   bool NeedsPadding;
945   bool InReg = shouldUseInReg(Ty, FreeRegs, IsFastCall, NeedsPadding);
946 
947   if (Ty->isPromotableIntegerType()) {
948     if (InReg)
949       return ABIArgInfo::getExtendInReg();
950     return ABIArgInfo::getExtend();
951   }
952   if (InReg)
953     return ABIArgInfo::getDirectInReg();
954   return ABIArgInfo::getDirect();
955 }
956 
957 void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
958   FI.getReturnInfo() = classifyReturnType(FI.getReturnType(),
959                                           FI.getCallingConvention());
960 
961   unsigned CC = FI.getCallingConvention();
962   bool IsFastCall = CC == llvm::CallingConv::X86_FastCall;
963   unsigned FreeRegs;
964   if (IsFastCall)
965     FreeRegs = 2;
966   else if (FI.getHasRegParm())
967     FreeRegs = FI.getRegParm();
968   else
969     FreeRegs = DefaultNumRegisterParameters;
970 
971   // If the return value is indirect, then the hidden argument is consuming one
972   // integer register.
973   if (FI.getReturnInfo().isIndirect() && FreeRegs) {
974     --FreeRegs;
975     ABIArgInfo &Old = FI.getReturnInfo();
976     Old = ABIArgInfo::getIndirectInReg(Old.getIndirectAlign(),
977                                        Old.getIndirectByVal(),
978                                        Old.getIndirectRealign());
979   }
980 
981   for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
982        it != ie; ++it)
983     it->info = classifyArgumentType(it->type, FreeRegs, IsFastCall);
984 }
985 
986 llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
987                                       CodeGenFunction &CGF) const {
988   llvm::Type *BPP = CGF.Int8PtrPtrTy;
989 
990   CGBuilderTy &Builder = CGF.Builder;
991   llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
992                                                        "ap");
993   llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
994 
995   // Compute if the address needs to be aligned
996   unsigned Align = CGF.getContext().getTypeAlignInChars(Ty).getQuantity();
997   Align = getTypeStackAlignInBytes(Ty, Align);
998   Align = std::max(Align, 4U);
999   if (Align > 4) {
1000     // addr = (addr + align - 1) & -align;
1001     llvm::Value *Offset =
1002       llvm::ConstantInt::get(CGF.Int32Ty, Align - 1);
1003     Addr = CGF.Builder.CreateGEP(Addr, Offset);
1004     llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(Addr,
1005                                                     CGF.Int32Ty);
1006     llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -Align);
1007     Addr = CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
1008                                       Addr->getType(),
1009                                       "ap.cur.aligned");
1010   }
1011 
1012   llvm::Type *PTy =
1013     llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
1014   llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
1015 
1016   uint64_t Offset =
1017     llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, Align);
1018   llvm::Value *NextAddr =
1019     Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
1020                       "ap.next");
1021   Builder.CreateStore(NextAddr, VAListAddrAsBPP);
1022 
1023   return AddrTyped;
1024 }
1025 
1026 void X86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
1027                                                   llvm::GlobalValue *GV,
1028                                             CodeGen::CodeGenModule &CGM) const {
1029   if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
1030     if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1031       // Get the LLVM function.
1032       llvm::Function *Fn = cast<llvm::Function>(GV);
1033 
1034       // Now add the 'alignstack' attribute with a value of 16.
1035       llvm::AttrBuilder B;
1036       B.addStackAlignmentAttr(16);
1037       Fn->addAttributes(llvm::AttributeSet::FunctionIndex,
1038                       llvm::AttributeSet::get(CGM.getLLVMContext(),
1039                                               llvm::AttributeSet::FunctionIndex,
1040                                               B));
1041     }
1042   }
1043 }
1044 
1045 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
1046                                                CodeGen::CodeGenFunction &CGF,
1047                                                llvm::Value *Address) const {
1048   CodeGen::CGBuilderTy &Builder = CGF.Builder;
1049 
1050   llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
1051 
1052   // 0-7 are the eight integer registers;  the order is different
1053   //   on Darwin (for EH), but the range is the same.
1054   // 8 is %eip.
1055   AssignToArrayRange(Builder, Address, Four8, 0, 8);
1056 
1057   if (CGF.CGM.getTarget().getTriple().isOSDarwin()) {
1058     // 12-16 are st(0..4).  Not sure why we stop at 4.
1059     // These have size 16, which is sizeof(long double) on
1060     // platforms with 8-byte alignment for that type.
1061     llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
1062     AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
1063 
1064   } else {
1065     // 9 is %eflags, which doesn't get a size on Darwin for some
1066     // reason.
1067     Builder.CreateStore(Four8, Builder.CreateConstInBoundsGEP1_32(Address, 9));
1068 
1069     // 11-16 are st(0..5).  Not sure why we stop at 5.
1070     // These have size 12, which is sizeof(long double) on
1071     // platforms with 4-byte alignment for that type.
1072     llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12);
1073     AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
1074   }
1075 
1076   return false;
1077 }
1078 
1079 //===----------------------------------------------------------------------===//
1080 // X86-64 ABI Implementation
1081 //===----------------------------------------------------------------------===//
1082 
1083 
1084 namespace {
1085 /// X86_64ABIInfo - The X86_64 ABI information.
1086 class X86_64ABIInfo : public ABIInfo {
1087   enum Class {
1088     Integer = 0,
1089     SSE,
1090     SSEUp,
1091     X87,
1092     X87Up,
1093     ComplexX87,
1094     NoClass,
1095     Memory
1096   };
1097 
1098   /// merge - Implement the X86_64 ABI merging algorithm.
1099   ///
1100   /// Merge an accumulating classification \arg Accum with a field
1101   /// classification \arg Field.
1102   ///
1103   /// \param Accum - The accumulating classification. This should
1104   /// always be either NoClass or the result of a previous merge
1105   /// call. In addition, this should never be Memory (the caller
1106   /// should just return Memory for the aggregate).
1107   static Class merge(Class Accum, Class Field);
1108 
1109   /// postMerge - Implement the X86_64 ABI post merging algorithm.
1110   ///
1111   /// Post merger cleanup, reduces a malformed Hi and Lo pair to
1112   /// final MEMORY or SSE classes when necessary.
1113   ///
1114   /// \param AggregateSize - The size of the current aggregate in
1115   /// the classification process.
1116   ///
1117   /// \param Lo - The classification for the parts of the type
1118   /// residing in the low word of the containing object.
1119   ///
1120   /// \param Hi - The classification for the parts of the type
1121   /// residing in the higher words of the containing object.
1122   ///
1123   void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const;
1124 
1125   /// classify - Determine the x86_64 register classes in which the
1126   /// given type T should be passed.
1127   ///
1128   /// \param Lo - The classification for the parts of the type
1129   /// residing in the low word of the containing object.
1130   ///
1131   /// \param Hi - The classification for the parts of the type
1132   /// residing in the high word of the containing object.
1133   ///
1134   /// \param OffsetBase - The bit offset of this type in the
1135   /// containing object.  Some parameters are classified different
1136   /// depending on whether they straddle an eightbyte boundary.
1137   ///
1138   /// \param isNamedArg - Whether the argument in question is a "named"
1139   /// argument, as used in AMD64-ABI 3.5.7.
1140   ///
1141   /// If a word is unused its result will be NoClass; if a type should
1142   /// be passed in Memory then at least the classification of \arg Lo
1143   /// will be Memory.
1144   ///
1145   /// The \arg Lo class will be NoClass iff the argument is ignored.
1146   ///
1147   /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
1148   /// also be ComplexX87.
1149   void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
1150                 bool isNamedArg) const;
1151 
1152   llvm::Type *GetByteVectorType(QualType Ty) const;
1153   llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
1154                                  unsigned IROffset, QualType SourceTy,
1155                                  unsigned SourceOffset) const;
1156   llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
1157                                      unsigned IROffset, QualType SourceTy,
1158                                      unsigned SourceOffset) const;
1159 
1160   /// getIndirectResult - Give a source type \arg Ty, return a suitable result
1161   /// such that the argument will be returned in memory.
1162   ABIArgInfo getIndirectReturnResult(QualType Ty) const;
1163 
1164   /// getIndirectResult - Give a source type \arg Ty, return a suitable result
1165   /// such that the argument will be passed in memory.
1166   ///
1167   /// \param freeIntRegs - The number of free integer registers remaining
1168   /// available.
1169   ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const;
1170 
1171   ABIArgInfo classifyReturnType(QualType RetTy) const;
1172 
1173   ABIArgInfo classifyArgumentType(QualType Ty,
1174                                   unsigned freeIntRegs,
1175                                   unsigned &neededInt,
1176                                   unsigned &neededSSE,
1177                                   bool isNamedArg) const;
1178 
1179   bool IsIllegalVectorType(QualType Ty) const;
1180 
1181   /// The 0.98 ABI revision clarified a lot of ambiguities,
1182   /// unfortunately in ways that were not always consistent with
1183   /// certain previous compilers.  In particular, platforms which
1184   /// required strict binary compatibility with older versions of GCC
1185   /// may need to exempt themselves.
1186   bool honorsRevision0_98() const {
1187     return !getTarget().getTriple().isOSDarwin();
1188   }
1189 
1190   bool HasAVX;
1191   // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on
1192   // 64-bit hardware.
1193   bool Has64BitPointers;
1194 
1195 public:
1196   X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool hasavx) :
1197       ABIInfo(CGT), HasAVX(hasavx),
1198       Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {
1199   }
1200 
1201   bool isPassedUsingAVXType(QualType type) const {
1202     unsigned neededInt, neededSSE;
1203     // The freeIntRegs argument doesn't matter here.
1204     ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE,
1205                                            /*isNamedArg*/true);
1206     if (info.isDirect()) {
1207       llvm::Type *ty = info.getCoerceToType();
1208       if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
1209         return (vectorTy->getBitWidth() > 128);
1210     }
1211     return false;
1212   }
1213 
1214   virtual void computeInfo(CGFunctionInfo &FI) const;
1215 
1216   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1217                                  CodeGenFunction &CGF) const;
1218 };
1219 
1220 /// WinX86_64ABIInfo - The Windows X86_64 ABI information.
1221 class WinX86_64ABIInfo : public ABIInfo {
1222 
1223   ABIArgInfo classify(QualType Ty, bool IsReturnType) const;
1224 
1225 public:
1226   WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
1227 
1228   virtual void computeInfo(CGFunctionInfo &FI) const;
1229 
1230   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1231                                  CodeGenFunction &CGF) const;
1232 };
1233 
1234 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
1235 public:
1236   X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
1237       : TargetCodeGenInfo(new X86_64ABIInfo(CGT, HasAVX)) {}
1238 
1239   const X86_64ABIInfo &getABIInfo() const {
1240     return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
1241   }
1242 
1243   int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
1244     return 7;
1245   }
1246 
1247   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1248                                llvm::Value *Address) const {
1249     llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
1250 
1251     // 0-15 are the 16 integer registers.
1252     // 16 is %rip.
1253     AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
1254     return false;
1255   }
1256 
1257   llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
1258                                   StringRef Constraint,
1259                                   llvm::Type* Ty) const {
1260     return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1261   }
1262 
1263   bool isNoProtoCallVariadic(const CallArgList &args,
1264                              const FunctionNoProtoType *fnType) const {
1265     // The default CC on x86-64 sets %al to the number of SSA
1266     // registers used, and GCC sets this when calling an unprototyped
1267     // function, so we override the default behavior.  However, don't do
1268     // that when AVX types are involved: the ABI explicitly states it is
1269     // undefined, and it doesn't work in practice because of how the ABI
1270     // defines varargs anyway.
1271     if (fnType->getCallConv() == CC_Default || fnType->getCallConv() == CC_C) {
1272       bool HasAVXType = false;
1273       for (CallArgList::const_iterator
1274              it = args.begin(), ie = args.end(); it != ie; ++it) {
1275         if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
1276           HasAVXType = true;
1277           break;
1278         }
1279       }
1280 
1281       if (!HasAVXType)
1282         return true;
1283     }
1284 
1285     return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType);
1286   }
1287 
1288 };
1289 
1290 static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
1291   // If the argument does not end in .lib, automatically add the suffix. This
1292   // matches the behavior of MSVC.
1293   std::string ArgStr = Lib;
1294   if (Lib.size() <= 4 ||
1295       Lib.substr(Lib.size() - 4).compare_lower(".lib") != 0) {
1296     ArgStr += ".lib";
1297   }
1298   return ArgStr;
1299 }
1300 
1301 class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo {
1302 public:
1303   WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
1304         bool d, bool p, bool w, unsigned RegParms)
1305     : X86_32TargetCodeGenInfo(CGT, d, p, w, RegParms) {}
1306 
1307   void getDependentLibraryOption(llvm::StringRef Lib,
1308                                  llvm::SmallString<24> &Opt) const {
1309     Opt = "/DEFAULTLIB:";
1310     Opt += qualifyWindowsLibrary(Lib);
1311   }
1312 
1313   void getDetectMismatchOption(llvm::StringRef Name,
1314                                llvm::StringRef Value,
1315                                llvm::SmallString<32> &Opt) const {
1316     Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
1317   }
1318 };
1319 
1320 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
1321 public:
1322   WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
1323     : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {}
1324 
1325   int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
1326     return 7;
1327   }
1328 
1329   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1330                                llvm::Value *Address) const {
1331     llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
1332 
1333     // 0-15 are the 16 integer registers.
1334     // 16 is %rip.
1335     AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
1336     return false;
1337   }
1338 
1339   void getDependentLibraryOption(llvm::StringRef Lib,
1340                                  llvm::SmallString<24> &Opt) const {
1341     Opt = "/DEFAULTLIB:";
1342     Opt += qualifyWindowsLibrary(Lib);
1343   }
1344 
1345   void getDetectMismatchOption(llvm::StringRef Name,
1346                                llvm::StringRef Value,
1347                                llvm::SmallString<32> &Opt) const {
1348     Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
1349   }
1350 };
1351 
1352 }
1353 
1354 void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo,
1355                               Class &Hi) const {
1356   // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
1357   //
1358   // (a) If one of the classes is Memory, the whole argument is passed in
1359   //     memory.
1360   //
1361   // (b) If X87UP is not preceded by X87, the whole argument is passed in
1362   //     memory.
1363   //
1364   // (c) If the size of the aggregate exceeds two eightbytes and the first
1365   //     eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole
1366   //     argument is passed in memory. NOTE: This is necessary to keep the
1367   //     ABI working for processors that don't support the __m256 type.
1368   //
1369   // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
1370   //
1371   // Some of these are enforced by the merging logic.  Others can arise
1372   // only with unions; for example:
1373   //   union { _Complex double; unsigned; }
1374   //
1375   // Note that clauses (b) and (c) were added in 0.98.
1376   //
1377   if (Hi == Memory)
1378     Lo = Memory;
1379   if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
1380     Lo = Memory;
1381   if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
1382     Lo = Memory;
1383   if (Hi == SSEUp && Lo != SSE)
1384     Hi = SSE;
1385 }
1386 
1387 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
1388   // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
1389   // classified recursively so that always two fields are
1390   // considered. The resulting class is calculated according to
1391   // the classes of the fields in the eightbyte:
1392   //
1393   // (a) If both classes are equal, this is the resulting class.
1394   //
1395   // (b) If one of the classes is NO_CLASS, the resulting class is
1396   // the other class.
1397   //
1398   // (c) If one of the classes is MEMORY, the result is the MEMORY
1399   // class.
1400   //
1401   // (d) If one of the classes is INTEGER, the result is the
1402   // INTEGER.
1403   //
1404   // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
1405   // MEMORY is used as class.
1406   //
1407   // (f) Otherwise class SSE is used.
1408 
1409   // Accum should never be memory (we should have returned) or
1410   // ComplexX87 (because this cannot be passed in a structure).
1411   assert((Accum != Memory && Accum != ComplexX87) &&
1412          "Invalid accumulated classification during merge.");
1413   if (Accum == Field || Field == NoClass)
1414     return Accum;
1415   if (Field == Memory)
1416     return Memory;
1417   if (Accum == NoClass)
1418     return Field;
1419   if (Accum == Integer || Field == Integer)
1420     return Integer;
1421   if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
1422       Accum == X87 || Accum == X87Up)
1423     return Memory;
1424   return SSE;
1425 }
1426 
1427 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
1428                              Class &Lo, Class &Hi, bool isNamedArg) const {
1429   // FIXME: This code can be simplified by introducing a simple value class for
1430   // Class pairs with appropriate constructor methods for the various
1431   // situations.
1432 
1433   // FIXME: Some of the split computations are wrong; unaligned vectors
1434   // shouldn't be passed in registers for example, so there is no chance they
1435   // can straddle an eightbyte. Verify & simplify.
1436 
1437   Lo = Hi = NoClass;
1438 
1439   Class &Current = OffsetBase < 64 ? Lo : Hi;
1440   Current = Memory;
1441 
1442   if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
1443     BuiltinType::Kind k = BT->getKind();
1444 
1445     if (k == BuiltinType::Void) {
1446       Current = NoClass;
1447     } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
1448       Lo = Integer;
1449       Hi = Integer;
1450     } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
1451       Current = Integer;
1452     } else if ((k == BuiltinType::Float || k == BuiltinType::Double) ||
1453                (k == BuiltinType::LongDouble &&
1454                 getTarget().getTriple().getOS() == llvm::Triple::NaCl)) {
1455       Current = SSE;
1456     } else if (k == BuiltinType::LongDouble) {
1457       Lo = X87;
1458       Hi = X87Up;
1459     }
1460     // FIXME: _Decimal32 and _Decimal64 are SSE.
1461     // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
1462     return;
1463   }
1464 
1465   if (const EnumType *ET = Ty->getAs<EnumType>()) {
1466     // Classify the underlying integer type.
1467     classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
1468     return;
1469   }
1470 
1471   if (Ty->hasPointerRepresentation()) {
1472     Current = Integer;
1473     return;
1474   }
1475 
1476   if (Ty->isMemberPointerType()) {
1477     if (Ty->isMemberFunctionPointerType() && Has64BitPointers)
1478       Lo = Hi = Integer;
1479     else
1480       Current = Integer;
1481     return;
1482   }
1483 
1484   if (const VectorType *VT = Ty->getAs<VectorType>()) {
1485     uint64_t Size = getContext().getTypeSize(VT);
1486     if (Size == 32) {
1487       // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x
1488       // float> as integer.
1489       Current = Integer;
1490 
1491       // If this type crosses an eightbyte boundary, it should be
1492       // split.
1493       uint64_t EB_Real = (OffsetBase) / 64;
1494       uint64_t EB_Imag = (OffsetBase + Size - 1) / 64;
1495       if (EB_Real != EB_Imag)
1496         Hi = Lo;
1497     } else if (Size == 64) {
1498       // gcc passes <1 x double> in memory. :(
1499       if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double))
1500         return;
1501 
1502       // gcc passes <1 x long long> as INTEGER.
1503       if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong) ||
1504           VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULongLong) ||
1505           VT->getElementType()->isSpecificBuiltinType(BuiltinType::Long) ||
1506           VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULong))
1507         Current = Integer;
1508       else
1509         Current = SSE;
1510 
1511       // If this type crosses an eightbyte boundary, it should be
1512       // split.
1513       if (OffsetBase && OffsetBase != 64)
1514         Hi = Lo;
1515     } else if (Size == 128 || (HasAVX && isNamedArg && Size == 256)) {
1516       // Arguments of 256-bits are split into four eightbyte chunks. The
1517       // least significant one belongs to class SSE and all the others to class
1518       // SSEUP. The original Lo and Hi design considers that types can't be
1519       // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense.
1520       // This design isn't correct for 256-bits, but since there're no cases
1521       // where the upper parts would need to be inspected, avoid adding
1522       // complexity and just consider Hi to match the 64-256 part.
1523       //
1524       // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in
1525       // registers if they are "named", i.e. not part of the "..." of a
1526       // variadic function.
1527       Lo = SSE;
1528       Hi = SSEUp;
1529     }
1530     return;
1531   }
1532 
1533   if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
1534     QualType ET = getContext().getCanonicalType(CT->getElementType());
1535 
1536     uint64_t Size = getContext().getTypeSize(Ty);
1537     if (ET->isIntegralOrEnumerationType()) {
1538       if (Size <= 64)
1539         Current = Integer;
1540       else if (Size <= 128)
1541         Lo = Hi = Integer;
1542     } else if (ET == getContext().FloatTy)
1543       Current = SSE;
1544     else if (ET == getContext().DoubleTy ||
1545              (ET == getContext().LongDoubleTy &&
1546               getTarget().getTriple().getOS() == llvm::Triple::NaCl))
1547       Lo = Hi = SSE;
1548     else if (ET == getContext().LongDoubleTy)
1549       Current = ComplexX87;
1550 
1551     // If this complex type crosses an eightbyte boundary then it
1552     // should be split.
1553     uint64_t EB_Real = (OffsetBase) / 64;
1554     uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
1555     if (Hi == NoClass && EB_Real != EB_Imag)
1556       Hi = Lo;
1557 
1558     return;
1559   }
1560 
1561   if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
1562     // Arrays are treated like structures.
1563 
1564     uint64_t Size = getContext().getTypeSize(Ty);
1565 
1566     // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
1567     // than four eightbytes, ..., it has class MEMORY.
1568     if (Size > 256)
1569       return;
1570 
1571     // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
1572     // fields, it has class MEMORY.
1573     //
1574     // Only need to check alignment of array base.
1575     if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
1576       return;
1577 
1578     // Otherwise implement simplified merge. We could be smarter about
1579     // this, but it isn't worth it and would be harder to verify.
1580     Current = NoClass;
1581     uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
1582     uint64_t ArraySize = AT->getSize().getZExtValue();
1583 
1584     // The only case a 256-bit wide vector could be used is when the array
1585     // contains a single 256-bit element. Since Lo and Hi logic isn't extended
1586     // to work for sizes wider than 128, early check and fallback to memory.
1587     if (Size > 128 && EltSize != 256)
1588       return;
1589 
1590     for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
1591       Class FieldLo, FieldHi;
1592       classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg);
1593       Lo = merge(Lo, FieldLo);
1594       Hi = merge(Hi, FieldHi);
1595       if (Lo == Memory || Hi == Memory)
1596         break;
1597     }
1598 
1599     postMerge(Size, Lo, Hi);
1600     assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
1601     return;
1602   }
1603 
1604   if (const RecordType *RT = Ty->getAs<RecordType>()) {
1605     uint64_t Size = getContext().getTypeSize(Ty);
1606 
1607     // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
1608     // than four eightbytes, ..., it has class MEMORY.
1609     if (Size > 256)
1610       return;
1611 
1612     // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
1613     // copy constructor or a non-trivial destructor, it is passed by invisible
1614     // reference.
1615     if (getRecordArgABI(RT, CGT))
1616       return;
1617 
1618     const RecordDecl *RD = RT->getDecl();
1619 
1620     // Assume variable sized types are passed in memory.
1621     if (RD->hasFlexibleArrayMember())
1622       return;
1623 
1624     const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
1625 
1626     // Reset Lo class, this will be recomputed.
1627     Current = NoClass;
1628 
1629     // If this is a C++ record, classify the bases first.
1630     if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1631       for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
1632              e = CXXRD->bases_end(); i != e; ++i) {
1633         assert(!i->isVirtual() && !i->getType()->isDependentType() &&
1634                "Unexpected base class!");
1635         const CXXRecordDecl *Base =
1636           cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
1637 
1638         // Classify this field.
1639         //
1640         // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
1641         // single eightbyte, each is classified separately. Each eightbyte gets
1642         // initialized to class NO_CLASS.
1643         Class FieldLo, FieldHi;
1644         uint64_t Offset =
1645           OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base));
1646         classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
1647         Lo = merge(Lo, FieldLo);
1648         Hi = merge(Hi, FieldHi);
1649         if (Lo == Memory || Hi == Memory)
1650           break;
1651       }
1652     }
1653 
1654     // Classify the fields one at a time, merging the results.
1655     unsigned idx = 0;
1656     for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
1657            i != e; ++i, ++idx) {
1658       uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
1659       bool BitField = i->isBitField();
1660 
1661       // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than
1662       // four eightbytes, or it contains unaligned fields, it has class MEMORY.
1663       //
1664       // The only case a 256-bit wide vector could be used is when the struct
1665       // contains a single 256-bit element. Since Lo and Hi logic isn't extended
1666       // to work for sizes wider than 128, early check and fallback to memory.
1667       //
1668       if (Size > 128 && getContext().getTypeSize(i->getType()) != 256) {
1669         Lo = Memory;
1670         return;
1671       }
1672       // Note, skip this test for bit-fields, see below.
1673       if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
1674         Lo = Memory;
1675         return;
1676       }
1677 
1678       // Classify this field.
1679       //
1680       // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
1681       // exceeds a single eightbyte, each is classified
1682       // separately. Each eightbyte gets initialized to class
1683       // NO_CLASS.
1684       Class FieldLo, FieldHi;
1685 
1686       // Bit-fields require special handling, they do not force the
1687       // structure to be passed in memory even if unaligned, and
1688       // therefore they can straddle an eightbyte.
1689       if (BitField) {
1690         // Ignore padding bit-fields.
1691         if (i->isUnnamedBitfield())
1692           continue;
1693 
1694         uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
1695         uint64_t Size = i->getBitWidthValue(getContext());
1696 
1697         uint64_t EB_Lo = Offset / 64;
1698         uint64_t EB_Hi = (Offset + Size - 1) / 64;
1699         FieldLo = FieldHi = NoClass;
1700         if (EB_Lo) {
1701           assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
1702           FieldLo = NoClass;
1703           FieldHi = Integer;
1704         } else {
1705           FieldLo = Integer;
1706           FieldHi = EB_Hi ? Integer : NoClass;
1707         }
1708       } else
1709         classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
1710       Lo = merge(Lo, FieldLo);
1711       Hi = merge(Hi, FieldHi);
1712       if (Lo == Memory || Hi == Memory)
1713         break;
1714     }
1715 
1716     postMerge(Size, Lo, Hi);
1717   }
1718 }
1719 
1720 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
1721   // If this is a scalar LLVM value then assume LLVM will pass it in the right
1722   // place naturally.
1723   if (!isAggregateTypeForABI(Ty)) {
1724     // Treat an enum type as its underlying type.
1725     if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1726       Ty = EnumTy->getDecl()->getIntegerType();
1727 
1728     return (Ty->isPromotableIntegerType() ?
1729             ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
1730   }
1731 
1732   return ABIArgInfo::getIndirect(0);
1733 }
1734 
1735 bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const {
1736   if (const VectorType *VecTy = Ty->getAs<VectorType>()) {
1737     uint64_t Size = getContext().getTypeSize(VecTy);
1738     unsigned LargestVector = HasAVX ? 256 : 128;
1739     if (Size <= 64 || Size > LargestVector)
1740       return true;
1741   }
1742 
1743   return false;
1744 }
1745 
1746 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
1747                                             unsigned freeIntRegs) const {
1748   // If this is a scalar LLVM value then assume LLVM will pass it in the right
1749   // place naturally.
1750   //
1751   // This assumption is optimistic, as there could be free registers available
1752   // when we need to pass this argument in memory, and LLVM could try to pass
1753   // the argument in the free register. This does not seem to happen currently,
1754   // but this code would be much safer if we could mark the argument with
1755   // 'onstack'. See PR12193.
1756   if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) {
1757     // Treat an enum type as its underlying type.
1758     if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1759       Ty = EnumTy->getDecl()->getIntegerType();
1760 
1761     return (Ty->isPromotableIntegerType() ?
1762             ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
1763   }
1764 
1765   if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, CGT))
1766     return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
1767 
1768   // Compute the byval alignment. We specify the alignment of the byval in all
1769   // cases so that the mid-level optimizer knows the alignment of the byval.
1770   unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
1771 
1772   // Attempt to avoid passing indirect results using byval when possible. This
1773   // is important for good codegen.
1774   //
1775   // We do this by coercing the value into a scalar type which the backend can
1776   // handle naturally (i.e., without using byval).
1777   //
1778   // For simplicity, we currently only do this when we have exhausted all of the
1779   // free integer registers. Doing this when there are free integer registers
1780   // would require more care, as we would have to ensure that the coerced value
1781   // did not claim the unused register. That would require either reording the
1782   // arguments to the function (so that any subsequent inreg values came first),
1783   // or only doing this optimization when there were no following arguments that
1784   // might be inreg.
1785   //
1786   // We currently expect it to be rare (particularly in well written code) for
1787   // arguments to be passed on the stack when there are still free integer
1788   // registers available (this would typically imply large structs being passed
1789   // by value), so this seems like a fair tradeoff for now.
1790   //
1791   // We can revisit this if the backend grows support for 'onstack' parameter
1792   // attributes. See PR12193.
1793   if (freeIntRegs == 0) {
1794     uint64_t Size = getContext().getTypeSize(Ty);
1795 
1796     // If this type fits in an eightbyte, coerce it into the matching integral
1797     // type, which will end up on the stack (with alignment 8).
1798     if (Align == 8 && Size <= 64)
1799       return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
1800                                                           Size));
1801   }
1802 
1803   return ABIArgInfo::getIndirect(Align);
1804 }
1805 
1806 /// GetByteVectorType - The ABI specifies that a value should be passed in an
1807 /// full vector XMM/YMM register.  Pick an LLVM IR type that will be passed as a
1808 /// vector register.
1809 llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
1810   llvm::Type *IRType = CGT.ConvertType(Ty);
1811 
1812   // Wrapper structs that just contain vectors are passed just like vectors,
1813   // strip them off if present.
1814   llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType);
1815   while (STy && STy->getNumElements() == 1) {
1816     IRType = STy->getElementType(0);
1817     STy = dyn_cast<llvm::StructType>(IRType);
1818   }
1819 
1820   // If the preferred type is a 16-byte vector, prefer to pass it.
1821   if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){
1822     llvm::Type *EltTy = VT->getElementType();
1823     unsigned BitWidth = VT->getBitWidth();
1824     if ((BitWidth >= 128 && BitWidth <= 256) &&
1825         (EltTy->isFloatTy() || EltTy->isDoubleTy() ||
1826          EltTy->isIntegerTy(8) || EltTy->isIntegerTy(16) ||
1827          EltTy->isIntegerTy(32) || EltTy->isIntegerTy(64) ||
1828          EltTy->isIntegerTy(128)))
1829       return VT;
1830   }
1831 
1832   return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), 2);
1833 }
1834 
1835 /// BitsContainNoUserData - Return true if the specified [start,end) bit range
1836 /// is known to either be off the end of the specified type or being in
1837 /// alignment padding.  The user type specified is known to be at most 128 bits
1838 /// in size, and have passed through X86_64ABIInfo::classify with a successful
1839 /// classification that put one of the two halves in the INTEGER class.
1840 ///
1841 /// It is conservatively correct to return false.
1842 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
1843                                   unsigned EndBit, ASTContext &Context) {
1844   // If the bytes being queried are off the end of the type, there is no user
1845   // data hiding here.  This handles analysis of builtins, vectors and other
1846   // types that don't contain interesting padding.
1847   unsigned TySize = (unsigned)Context.getTypeSize(Ty);
1848   if (TySize <= StartBit)
1849     return true;
1850 
1851   if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
1852     unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType());
1853     unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
1854 
1855     // Check each element to see if the element overlaps with the queried range.
1856     for (unsigned i = 0; i != NumElts; ++i) {
1857       // If the element is after the span we care about, then we're done..
1858       unsigned EltOffset = i*EltSize;
1859       if (EltOffset >= EndBit) break;
1860 
1861       unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
1862       if (!BitsContainNoUserData(AT->getElementType(), EltStart,
1863                                  EndBit-EltOffset, Context))
1864         return false;
1865     }
1866     // If it overlaps no elements, then it is safe to process as padding.
1867     return true;
1868   }
1869 
1870   if (const RecordType *RT = Ty->getAs<RecordType>()) {
1871     const RecordDecl *RD = RT->getDecl();
1872     const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
1873 
1874     // If this is a C++ record, check the bases first.
1875     if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1876       for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
1877            e = CXXRD->bases_end(); i != e; ++i) {
1878         assert(!i->isVirtual() && !i->getType()->isDependentType() &&
1879                "Unexpected base class!");
1880         const CXXRecordDecl *Base =
1881           cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
1882 
1883         // If the base is after the span we care about, ignore it.
1884         unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base));
1885         if (BaseOffset >= EndBit) continue;
1886 
1887         unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
1888         if (!BitsContainNoUserData(i->getType(), BaseStart,
1889                                    EndBit-BaseOffset, Context))
1890           return false;
1891       }
1892     }
1893 
1894     // Verify that no field has data that overlaps the region of interest.  Yes
1895     // this could be sped up a lot by being smarter about queried fields,
1896     // however we're only looking at structs up to 16 bytes, so we don't care
1897     // much.
1898     unsigned idx = 0;
1899     for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
1900          i != e; ++i, ++idx) {
1901       unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx);
1902 
1903       // If we found a field after the region we care about, then we're done.
1904       if (FieldOffset >= EndBit) break;
1905 
1906       unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
1907       if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset,
1908                                  Context))
1909         return false;
1910     }
1911 
1912     // If nothing in this record overlapped the area of interest, then we're
1913     // clean.
1914     return true;
1915   }
1916 
1917   return false;
1918 }
1919 
1920 /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a
1921 /// float member at the specified offset.  For example, {int,{float}} has a
1922 /// float at offset 4.  It is conservatively correct for this routine to return
1923 /// false.
1924 static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset,
1925                                   const llvm::DataLayout &TD) {
1926   // Base case if we find a float.
1927   if (IROffset == 0 && IRType->isFloatTy())
1928     return true;
1929 
1930   // If this is a struct, recurse into the field at the specified offset.
1931   if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
1932     const llvm::StructLayout *SL = TD.getStructLayout(STy);
1933     unsigned Elt = SL->getElementContainingOffset(IROffset);
1934     IROffset -= SL->getElementOffset(Elt);
1935     return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD);
1936   }
1937 
1938   // If this is an array, recurse into the field at the specified offset.
1939   if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
1940     llvm::Type *EltTy = ATy->getElementType();
1941     unsigned EltSize = TD.getTypeAllocSize(EltTy);
1942     IROffset -= IROffset/EltSize*EltSize;
1943     return ContainsFloatAtOffset(EltTy, IROffset, TD);
1944   }
1945 
1946   return false;
1947 }
1948 
1949 
1950 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the
1951 /// low 8 bytes of an XMM register, corresponding to the SSE class.
1952 llvm::Type *X86_64ABIInfo::
1953 GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,
1954                    QualType SourceTy, unsigned SourceOffset) const {
1955   // The only three choices we have are either double, <2 x float>, or float. We
1956   // pass as float if the last 4 bytes is just padding.  This happens for
1957   // structs that contain 3 floats.
1958   if (BitsContainNoUserData(SourceTy, SourceOffset*8+32,
1959                             SourceOffset*8+64, getContext()))
1960     return llvm::Type::getFloatTy(getVMContext());
1961 
1962   // We want to pass as <2 x float> if the LLVM IR type contains a float at
1963   // offset+0 and offset+4.  Walk the LLVM IR type to find out if this is the
1964   // case.
1965   if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) &&
1966       ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout()))
1967     return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2);
1968 
1969   return llvm::Type::getDoubleTy(getVMContext());
1970 }
1971 
1972 
1973 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in
1974 /// an 8-byte GPR.  This means that we either have a scalar or we are talking
1975 /// about the high or low part of an up-to-16-byte struct.  This routine picks
1976 /// the best LLVM IR type to represent this, which may be i64 or may be anything
1977 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*,
1978 /// etc).
1979 ///
1980 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for
1981 /// the source type.  IROffset is an offset in bytes into the LLVM IR type that
1982 /// the 8-byte value references.  PrefType may be null.
1983 ///
1984 /// SourceTy is the source level type for the entire argument.  SourceOffset is
1985 /// an offset into this that we're processing (which is always either 0 or 8).
1986 ///
1987 llvm::Type *X86_64ABIInfo::
1988 GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
1989                        QualType SourceTy, unsigned SourceOffset) const {
1990   // If we're dealing with an un-offset LLVM IR type, then it means that we're
1991   // returning an 8-byte unit starting with it.  See if we can safely use it.
1992   if (IROffset == 0) {
1993     // Pointers and int64's always fill the 8-byte unit.
1994     if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
1995         IRType->isIntegerTy(64))
1996       return IRType;
1997 
1998     // If we have a 1/2/4-byte integer, we can use it only if the rest of the
1999     // goodness in the source type is just tail padding.  This is allowed to
2000     // kick in for struct {double,int} on the int, but not on
2001     // struct{double,int,int} because we wouldn't return the second int.  We
2002     // have to do this analysis on the source type because we can't depend on
2003     // unions being lowered a specific way etc.
2004     if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
2005         IRType->isIntegerTy(32) ||
2006         (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
2007       unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
2008           cast<llvm::IntegerType>(IRType)->getBitWidth();
2009 
2010       if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,
2011                                 SourceOffset*8+64, getContext()))
2012         return IRType;
2013     }
2014   }
2015 
2016   if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2017     // If this is a struct, recurse into the field at the specified offset.
2018     const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
2019     if (IROffset < SL->getSizeInBytes()) {
2020       unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
2021       IROffset -= SL->getElementOffset(FieldIdx);
2022 
2023       return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
2024                                     SourceTy, SourceOffset);
2025     }
2026   }
2027 
2028   if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2029     llvm::Type *EltTy = ATy->getElementType();
2030     unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
2031     unsigned EltOffset = IROffset/EltSize*EltSize;
2032     return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
2033                                   SourceOffset);
2034   }
2035 
2036   // Okay, we don't have any better idea of what to pass, so we pass this in an
2037   // integer register that isn't too big to fit the rest of the struct.
2038   unsigned TySizeInBytes =
2039     (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
2040 
2041   assert(TySizeInBytes != SourceOffset && "Empty field?");
2042 
2043   // It is always safe to classify this as an integer type up to i64 that
2044   // isn't larger than the structure.
2045   return llvm::IntegerType::get(getVMContext(),
2046                                 std::min(TySizeInBytes-SourceOffset, 8U)*8);
2047 }
2048 
2049 
2050 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally
2051 /// be used as elements of a two register pair to pass or return, return a
2052 /// first class aggregate to represent them.  For example, if the low part of
2053 /// a by-value argument should be passed as i32* and the high part as float,
2054 /// return {i32*, float}.
2055 static llvm::Type *
2056 GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi,
2057                            const llvm::DataLayout &TD) {
2058   // In order to correctly satisfy the ABI, we need to the high part to start
2059   // at offset 8.  If the high and low parts we inferred are both 4-byte types
2060   // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have
2061   // the second element at offset 8.  Check for this:
2062   unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
2063   unsigned HiAlign = TD.getABITypeAlignment(Hi);
2064   unsigned HiStart = llvm::DataLayout::RoundUpAlignment(LoSize, HiAlign);
2065   assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");
2066 
2067   // To handle this, we have to increase the size of the low part so that the
2068   // second element will start at an 8 byte offset.  We can't increase the size
2069   // of the second element because it might make us access off the end of the
2070   // struct.
2071   if (HiStart != 8) {
2072     // There are only two sorts of types the ABI generation code can produce for
2073     // the low part of a pair that aren't 8 bytes in size: float or i8/i16/i32.
2074     // Promote these to a larger type.
2075     if (Lo->isFloatTy())
2076       Lo = llvm::Type::getDoubleTy(Lo->getContext());
2077     else {
2078       assert(Lo->isIntegerTy() && "Invalid/unknown lo type");
2079       Lo = llvm::Type::getInt64Ty(Lo->getContext());
2080     }
2081   }
2082 
2083   llvm::StructType *Result = llvm::StructType::get(Lo, Hi, NULL);
2084 
2085 
2086   // Verify that the second element is at an 8-byte offset.
2087   assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
2088          "Invalid x86-64 argument pair!");
2089   return Result;
2090 }
2091 
2092 ABIArgInfo X86_64ABIInfo::
2093 classifyReturnType(QualType RetTy) const {
2094   // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
2095   // classification algorithm.
2096   X86_64ABIInfo::Class Lo, Hi;
2097   classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true);
2098 
2099   // Check some invariants.
2100   assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
2101   assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
2102 
2103   llvm::Type *ResType = 0;
2104   switch (Lo) {
2105   case NoClass:
2106     if (Hi == NoClass)
2107       return ABIArgInfo::getIgnore();
2108     // If the low part is just padding, it takes no register, leave ResType
2109     // null.
2110     assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
2111            "Unknown missing lo part");
2112     break;
2113 
2114   case SSEUp:
2115   case X87Up:
2116     llvm_unreachable("Invalid classification for lo word.");
2117 
2118     // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
2119     // hidden argument.
2120   case Memory:
2121     return getIndirectReturnResult(RetTy);
2122 
2123     // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
2124     // available register of the sequence %rax, %rdx is used.
2125   case Integer:
2126     ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
2127 
2128     // If we have a sign or zero extended integer, make sure to return Extend
2129     // so that the parameter gets the right LLVM IR attributes.
2130     if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
2131       // Treat an enum type as its underlying type.
2132       if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
2133         RetTy = EnumTy->getDecl()->getIntegerType();
2134 
2135       if (RetTy->isIntegralOrEnumerationType() &&
2136           RetTy->isPromotableIntegerType())
2137         return ABIArgInfo::getExtend();
2138     }
2139     break;
2140 
2141     // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
2142     // available SSE register of the sequence %xmm0, %xmm1 is used.
2143   case SSE:
2144     ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
2145     break;
2146 
2147     // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
2148     // returned on the X87 stack in %st0 as 80-bit x87 number.
2149   case X87:
2150     ResType = llvm::Type::getX86_FP80Ty(getVMContext());
2151     break;
2152 
2153     // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
2154     // part of the value is returned in %st0 and the imaginary part in
2155     // %st1.
2156   case ComplexX87:
2157     assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
2158     ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
2159                                     llvm::Type::getX86_FP80Ty(getVMContext()),
2160                                     NULL);
2161     break;
2162   }
2163 
2164   llvm::Type *HighPart = 0;
2165   switch (Hi) {
2166     // Memory was handled previously and X87 should
2167     // never occur as a hi class.
2168   case Memory:
2169   case X87:
2170     llvm_unreachable("Invalid classification for hi word.");
2171 
2172   case ComplexX87: // Previously handled.
2173   case NoClass:
2174     break;
2175 
2176   case Integer:
2177     HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2178     if (Lo == NoClass)  // Return HighPart at offset 8 in memory.
2179       return ABIArgInfo::getDirect(HighPart, 8);
2180     break;
2181   case SSE:
2182     HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2183     if (Lo == NoClass)  // Return HighPart at offset 8 in memory.
2184       return ABIArgInfo::getDirect(HighPart, 8);
2185     break;
2186 
2187     // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
2188     // is passed in the next available eightbyte chunk if the last used
2189     // vector register.
2190     //
2191     // SSEUP should always be preceded by SSE, just widen.
2192   case SSEUp:
2193     assert(Lo == SSE && "Unexpected SSEUp classification.");
2194     ResType = GetByteVectorType(RetTy);
2195     break;
2196 
2197     // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
2198     // returned together with the previous X87 value in %st0.
2199   case X87Up:
2200     // If X87Up is preceded by X87, we don't need to do
2201     // anything. However, in some cases with unions it may not be
2202     // preceded by X87. In such situations we follow gcc and pass the
2203     // extra bits in an SSE reg.
2204     if (Lo != X87) {
2205       HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2206       if (Lo == NoClass)  // Return HighPart at offset 8 in memory.
2207         return ABIArgInfo::getDirect(HighPart, 8);
2208     }
2209     break;
2210   }
2211 
2212   // If a high part was specified, merge it together with the low part.  It is
2213   // known to pass in the high eightbyte of the result.  We do this by forming a
2214   // first class struct aggregate with the high and low part: {low, high}
2215   if (HighPart)
2216     ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
2217 
2218   return ABIArgInfo::getDirect(ResType);
2219 }
2220 
2221 ABIArgInfo X86_64ABIInfo::classifyArgumentType(
2222   QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE,
2223   bool isNamedArg)
2224   const
2225 {
2226   X86_64ABIInfo::Class Lo, Hi;
2227   classify(Ty, 0, Lo, Hi, isNamedArg);
2228 
2229   // Check some invariants.
2230   // FIXME: Enforce these by construction.
2231   assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
2232   assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
2233 
2234   neededInt = 0;
2235   neededSSE = 0;
2236   llvm::Type *ResType = 0;
2237   switch (Lo) {
2238   case NoClass:
2239     if (Hi == NoClass)
2240       return ABIArgInfo::getIgnore();
2241     // If the low part is just padding, it takes no register, leave ResType
2242     // null.
2243     assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
2244            "Unknown missing lo part");
2245     break;
2246 
2247     // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
2248     // on the stack.
2249   case Memory:
2250 
2251     // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
2252     // COMPLEX_X87, it is passed in memory.
2253   case X87:
2254   case ComplexX87:
2255     if (getRecordArgABI(Ty, CGT) == CGCXXABI::RAA_Indirect)
2256       ++neededInt;
2257     return getIndirectResult(Ty, freeIntRegs);
2258 
2259   case SSEUp:
2260   case X87Up:
2261     llvm_unreachable("Invalid classification for lo word.");
2262 
2263     // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
2264     // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
2265     // and %r9 is used.
2266   case Integer:
2267     ++neededInt;
2268 
2269     // Pick an 8-byte type based on the preferred type.
2270     ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
2271 
2272     // If we have a sign or zero extended integer, make sure to return Extend
2273     // so that the parameter gets the right LLVM IR attributes.
2274     if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
2275       // Treat an enum type as its underlying type.
2276       if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2277         Ty = EnumTy->getDecl()->getIntegerType();
2278 
2279       if (Ty->isIntegralOrEnumerationType() &&
2280           Ty->isPromotableIntegerType())
2281         return ABIArgInfo::getExtend();
2282     }
2283 
2284     break;
2285 
2286     // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
2287     // available SSE register is used, the registers are taken in the
2288     // order from %xmm0 to %xmm7.
2289   case SSE: {
2290     llvm::Type *IRType = CGT.ConvertType(Ty);
2291     ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
2292     ++neededSSE;
2293     break;
2294   }
2295   }
2296 
2297   llvm::Type *HighPart = 0;
2298   switch (Hi) {
2299     // Memory was handled previously, ComplexX87 and X87 should
2300     // never occur as hi classes, and X87Up must be preceded by X87,
2301     // which is passed in memory.
2302   case Memory:
2303   case X87:
2304   case ComplexX87:
2305     llvm_unreachable("Invalid classification for hi word.");
2306 
2307   case NoClass: break;
2308 
2309   case Integer:
2310     ++neededInt;
2311     // Pick an 8-byte type based on the preferred type.
2312     HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
2313 
2314     if (Lo == NoClass)  // Pass HighPart at offset 8 in memory.
2315       return ABIArgInfo::getDirect(HighPart, 8);
2316     break;
2317 
2318     // X87Up generally doesn't occur here (long double is passed in
2319     // memory), except in situations involving unions.
2320   case X87Up:
2321   case SSE:
2322     HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
2323 
2324     if (Lo == NoClass)  // Pass HighPart at offset 8 in memory.
2325       return ABIArgInfo::getDirect(HighPart, 8);
2326 
2327     ++neededSSE;
2328     break;
2329 
2330     // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
2331     // eightbyte is passed in the upper half of the last used SSE
2332     // register.  This only happens when 128-bit vectors are passed.
2333   case SSEUp:
2334     assert(Lo == SSE && "Unexpected SSEUp classification");
2335     ResType = GetByteVectorType(Ty);
2336     break;
2337   }
2338 
2339   // If a high part was specified, merge it together with the low part.  It is
2340   // known to pass in the high eightbyte of the result.  We do this by forming a
2341   // first class struct aggregate with the high and low part: {low, high}
2342   if (HighPart)
2343     ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
2344 
2345   return ABIArgInfo::getDirect(ResType);
2346 }
2347 
2348 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
2349 
2350   FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
2351 
2352   // Keep track of the number of assigned registers.
2353   unsigned freeIntRegs = 6, freeSSERegs = 8;
2354 
2355   // If the return value is indirect, then the hidden argument is consuming one
2356   // integer register.
2357   if (FI.getReturnInfo().isIndirect())
2358     --freeIntRegs;
2359 
2360   bool isVariadic = FI.isVariadic();
2361   unsigned numRequiredArgs = 0;
2362   if (isVariadic)
2363     numRequiredArgs = FI.getRequiredArgs().getNumRequiredArgs();
2364 
2365   // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
2366   // get assigned (in left-to-right order) for passing as follows...
2367   for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
2368        it != ie; ++it) {
2369     bool isNamedArg = true;
2370     if (isVariadic)
2371       isNamedArg = (it - FI.arg_begin()) <
2372                     static_cast<signed>(numRequiredArgs);
2373 
2374     unsigned neededInt, neededSSE;
2375     it->info = classifyArgumentType(it->type, freeIntRegs, neededInt,
2376                                     neededSSE, isNamedArg);
2377 
2378     // AMD64-ABI 3.2.3p3: If there are no registers available for any
2379     // eightbyte of an argument, the whole argument is passed on the
2380     // stack. If registers have already been assigned for some
2381     // eightbytes of such an argument, the assignments get reverted.
2382     if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
2383       freeIntRegs -= neededInt;
2384       freeSSERegs -= neededSSE;
2385     } else {
2386       it->info = getIndirectResult(it->type, freeIntRegs);
2387     }
2388   }
2389 }
2390 
2391 static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
2392                                         QualType Ty,
2393                                         CodeGenFunction &CGF) {
2394   llvm::Value *overflow_arg_area_p =
2395     CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
2396   llvm::Value *overflow_arg_area =
2397     CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
2398 
2399   // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
2400   // byte boundary if alignment needed by type exceeds 8 byte boundary.
2401   // It isn't stated explicitly in the standard, but in practice we use
2402   // alignment greater than 16 where necessary.
2403   uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
2404   if (Align > 8) {
2405     // overflow_arg_area = (overflow_arg_area + align - 1) & -align;
2406     llvm::Value *Offset =
2407       llvm::ConstantInt::get(CGF.Int64Ty, Align - 1);
2408     overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset);
2409     llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area,
2410                                                     CGF.Int64Ty);
2411     llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, -(uint64_t)Align);
2412     overflow_arg_area =
2413       CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
2414                                  overflow_arg_area->getType(),
2415                                  "overflow_arg_area.align");
2416   }
2417 
2418   // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
2419   llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
2420   llvm::Value *Res =
2421     CGF.Builder.CreateBitCast(overflow_arg_area,
2422                               llvm::PointerType::getUnqual(LTy));
2423 
2424   // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
2425   // l->overflow_arg_area + sizeof(type).
2426   // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
2427   // an 8 byte boundary.
2428 
2429   uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
2430   llvm::Value *Offset =
2431       llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7)  & ~7);
2432   overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
2433                                             "overflow_arg_area.next");
2434   CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
2435 
2436   // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
2437   return Res;
2438 }
2439 
2440 llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2441                                       CodeGenFunction &CGF) const {
2442   // Assume that va_list type is correct; should be pointer to LLVM type:
2443   // struct {
2444   //   i32 gp_offset;
2445   //   i32 fp_offset;
2446   //   i8* overflow_arg_area;
2447   //   i8* reg_save_area;
2448   // };
2449   unsigned neededInt, neededSSE;
2450 
2451   Ty = CGF.getContext().getCanonicalType(Ty);
2452   ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE,
2453                                        /*isNamedArg*/false);
2454 
2455   // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
2456   // in the registers. If not go to step 7.
2457   if (!neededInt && !neededSSE)
2458     return EmitVAArgFromMemory(VAListAddr, Ty, CGF);
2459 
2460   // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
2461   // general purpose registers needed to pass type and num_fp to hold
2462   // the number of floating point registers needed.
2463 
2464   // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
2465   // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
2466   // l->fp_offset > 304 - num_fp * 16 go to step 7.
2467   //
2468   // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
2469   // register save space).
2470 
2471   llvm::Value *InRegs = 0;
2472   llvm::Value *gp_offset_p = 0, *gp_offset = 0;
2473   llvm::Value *fp_offset_p = 0, *fp_offset = 0;
2474   if (neededInt) {
2475     gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
2476     gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
2477     InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
2478     InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
2479   }
2480 
2481   if (neededSSE) {
2482     fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
2483     fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
2484     llvm::Value *FitsInFP =
2485       llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
2486     FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");
2487     InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
2488   }
2489 
2490   llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
2491   llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
2492   llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
2493   CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
2494 
2495   // Emit code to load the value if it was passed in registers.
2496 
2497   CGF.EmitBlock(InRegBlock);
2498 
2499   // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
2500   // an offset of l->gp_offset and/or l->fp_offset. This may require
2501   // copying to a temporary location in case the parameter is passed
2502   // in different register classes or requires an alignment greater
2503   // than 8 for general purpose registers and 16 for XMM registers.
2504   //
2505   // FIXME: This really results in shameful code when we end up needing to
2506   // collect arguments from different places; often what should result in a
2507   // simple assembling of a structure from scattered addresses has many more
2508   // loads than necessary. Can we clean this up?
2509   llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
2510   llvm::Value *RegAddr =
2511     CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3),
2512                            "reg_save_area");
2513   if (neededInt && neededSSE) {
2514     // FIXME: Cleanup.
2515     assert(AI.isDirect() && "Unexpected ABI info for mixed regs");
2516     llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
2517     llvm::Value *Tmp = CGF.CreateMemTemp(Ty);
2518     Tmp = CGF.Builder.CreateBitCast(Tmp, ST->getPointerTo());
2519     assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
2520     llvm::Type *TyLo = ST->getElementType(0);
2521     llvm::Type *TyHi = ST->getElementType(1);
2522     assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
2523            "Unexpected ABI info for mixed regs");
2524     llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
2525     llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
2526     llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
2527     llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
2528     llvm::Value *RegLoAddr = TyLo->isFloatingPointTy() ? FPAddr : GPAddr;
2529     llvm::Value *RegHiAddr = TyLo->isFloatingPointTy() ? GPAddr : FPAddr;
2530     llvm::Value *V =
2531       CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo));
2532     CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
2533     V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
2534     CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
2535 
2536     RegAddr = CGF.Builder.CreateBitCast(Tmp,
2537                                         llvm::PointerType::getUnqual(LTy));
2538   } else if (neededInt) {
2539     RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
2540     RegAddr = CGF.Builder.CreateBitCast(RegAddr,
2541                                         llvm::PointerType::getUnqual(LTy));
2542 
2543     // Copy to a temporary if necessary to ensure the appropriate alignment.
2544     std::pair<CharUnits, CharUnits> SizeAlign =
2545         CGF.getContext().getTypeInfoInChars(Ty);
2546     uint64_t TySize = SizeAlign.first.getQuantity();
2547     unsigned TyAlign = SizeAlign.second.getQuantity();
2548     if (TyAlign > 8) {
2549       llvm::Value *Tmp = CGF.CreateMemTemp(Ty);
2550       CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, 8, false);
2551       RegAddr = Tmp;
2552     }
2553   } else if (neededSSE == 1) {
2554     RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
2555     RegAddr = CGF.Builder.CreateBitCast(RegAddr,
2556                                         llvm::PointerType::getUnqual(LTy));
2557   } else {
2558     assert(neededSSE == 2 && "Invalid number of needed registers!");
2559     // SSE registers are spaced 16 bytes apart in the register save
2560     // area, we need to collect the two eightbytes together.
2561     llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
2562     llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16);
2563     llvm::Type *DoubleTy = CGF.DoubleTy;
2564     llvm::Type *DblPtrTy =
2565       llvm::PointerType::getUnqual(DoubleTy);
2566     llvm::StructType *ST = llvm::StructType::get(DoubleTy, DoubleTy, NULL);
2567     llvm::Value *V, *Tmp = CGF.CreateMemTemp(Ty);
2568     Tmp = CGF.Builder.CreateBitCast(Tmp, ST->getPointerTo());
2569     V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
2570                                                          DblPtrTy));
2571     CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
2572     V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi,
2573                                                          DblPtrTy));
2574     CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
2575     RegAddr = CGF.Builder.CreateBitCast(Tmp,
2576                                         llvm::PointerType::getUnqual(LTy));
2577   }
2578 
2579   // AMD64-ABI 3.5.7p5: Step 5. Set:
2580   // l->gp_offset = l->gp_offset + num_gp * 8
2581   // l->fp_offset = l->fp_offset + num_fp * 16.
2582   if (neededInt) {
2583     llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);
2584     CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
2585                             gp_offset_p);
2586   }
2587   if (neededSSE) {
2588     llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);
2589     CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
2590                             fp_offset_p);
2591   }
2592   CGF.EmitBranch(ContBlock);
2593 
2594   // Emit code to load the value if it was passed in memory.
2595 
2596   CGF.EmitBlock(InMemBlock);
2597   llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF);
2598 
2599   // Return the appropriate result.
2600 
2601   CGF.EmitBlock(ContBlock);
2602   llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(), 2,
2603                                                  "vaarg.addr");
2604   ResAddr->addIncoming(RegAddr, InRegBlock);
2605   ResAddr->addIncoming(MemAddr, InMemBlock);
2606   return ResAddr;
2607 }
2608 
2609 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, bool IsReturnType) const {
2610 
2611   if (Ty->isVoidType())
2612     return ABIArgInfo::getIgnore();
2613 
2614   if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2615     Ty = EnumTy->getDecl()->getIntegerType();
2616 
2617   uint64_t Size = getContext().getTypeSize(Ty);
2618 
2619   if (const RecordType *RT = Ty->getAs<RecordType>()) {
2620     if (IsReturnType) {
2621       if (isRecordReturnIndirect(RT, CGT))
2622         return ABIArgInfo::getIndirect(0, false);
2623     } else {
2624       if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, CGT))
2625         return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
2626     }
2627 
2628     if (RT->getDecl()->hasFlexibleArrayMember())
2629       return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
2630 
2631     // FIXME: mingw-w64-gcc emits 128-bit struct as i128
2632     if (Size == 128 && getTarget().getTriple().getOS() == llvm::Triple::MinGW32)
2633       return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
2634                                                           Size));
2635 
2636     // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
2637     // not 1, 2, 4, or 8 bytes, must be passed by reference."
2638     if (Size <= 64 &&
2639         (Size & (Size - 1)) == 0)
2640       return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
2641                                                           Size));
2642 
2643     return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
2644   }
2645 
2646   if (Ty->isPromotableIntegerType())
2647     return ABIArgInfo::getExtend();
2648 
2649   return ABIArgInfo::getDirect();
2650 }
2651 
2652 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
2653 
2654   QualType RetTy = FI.getReturnType();
2655   FI.getReturnInfo() = classify(RetTy, true);
2656 
2657   for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
2658        it != ie; ++it)
2659     it->info = classify(it->type, false);
2660 }
2661 
2662 llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2663                                       CodeGenFunction &CGF) const {
2664   llvm::Type *BPP = CGF.Int8PtrPtrTy;
2665 
2666   CGBuilderTy &Builder = CGF.Builder;
2667   llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
2668                                                        "ap");
2669   llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
2670   llvm::Type *PTy =
2671     llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
2672   llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
2673 
2674   uint64_t Offset =
2675     llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8);
2676   llvm::Value *NextAddr =
2677     Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
2678                       "ap.next");
2679   Builder.CreateStore(NextAddr, VAListAddrAsBPP);
2680 
2681   return AddrTyped;
2682 }
2683 
2684 namespace {
2685 
2686 class NaClX86_64ABIInfo : public ABIInfo {
2687  public:
2688   NaClX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
2689       : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, HasAVX) {}
2690   virtual void computeInfo(CGFunctionInfo &FI) const;
2691   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2692                                  CodeGenFunction &CGF) const;
2693  private:
2694   PNaClABIInfo PInfo;  // Used for generating calls with pnaclcall callingconv.
2695   X86_64ABIInfo NInfo; // Used for everything else.
2696 };
2697 
2698 class NaClX86_64TargetCodeGenInfo : public TargetCodeGenInfo  {
2699  public:
2700   NaClX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
2701       : TargetCodeGenInfo(new NaClX86_64ABIInfo(CGT, HasAVX)) {}
2702 };
2703 
2704 }
2705 
2706 void NaClX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
2707   if (FI.getASTCallingConvention() == CC_PnaclCall)
2708     PInfo.computeInfo(FI);
2709   else
2710     NInfo.computeInfo(FI);
2711 }
2712 
2713 llvm::Value *NaClX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2714                                           CodeGenFunction &CGF) const {
2715   // Always use the native convention; calling pnacl-style varargs functions
2716   // is unuspported.
2717   return NInfo.EmitVAArg(VAListAddr, Ty, CGF);
2718 }
2719 
2720 
2721 // PowerPC-32
2722 
2723 namespace {
2724 class PPC32TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
2725 public:
2726   PPC32TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
2727 
2728   int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
2729     // This is recovered from gcc output.
2730     return 1; // r1 is the dedicated stack pointer
2731   }
2732 
2733   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2734                                llvm::Value *Address) const;
2735 };
2736 
2737 }
2738 
2739 bool
2740 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2741                                                 llvm::Value *Address) const {
2742   // This is calculated from the LLVM and GCC tables and verified
2743   // against gcc output.  AFAIK all ABIs use the same encoding.
2744 
2745   CodeGen::CGBuilderTy &Builder = CGF.Builder;
2746 
2747   llvm::IntegerType *i8 = CGF.Int8Ty;
2748   llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
2749   llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
2750   llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
2751 
2752   // 0-31: r0-31, the 4-byte general-purpose registers
2753   AssignToArrayRange(Builder, Address, Four8, 0, 31);
2754 
2755   // 32-63: fp0-31, the 8-byte floating-point registers
2756   AssignToArrayRange(Builder, Address, Eight8, 32, 63);
2757 
2758   // 64-76 are various 4-byte special-purpose registers:
2759   // 64: mq
2760   // 65: lr
2761   // 66: ctr
2762   // 67: ap
2763   // 68-75 cr0-7
2764   // 76: xer
2765   AssignToArrayRange(Builder, Address, Four8, 64, 76);
2766 
2767   // 77-108: v0-31, the 16-byte vector registers
2768   AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
2769 
2770   // 109: vrsave
2771   // 110: vscr
2772   // 111: spe_acc
2773   // 112: spefscr
2774   // 113: sfp
2775   AssignToArrayRange(Builder, Address, Four8, 109, 113);
2776 
2777   return false;
2778 }
2779 
2780 // PowerPC-64
2781 
2782 namespace {
2783 /// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information.
2784 class PPC64_SVR4_ABIInfo : public DefaultABIInfo {
2785 
2786 public:
2787   PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
2788 
2789   bool isPromotableTypeForABI(QualType Ty) const;
2790 
2791   ABIArgInfo classifyReturnType(QualType RetTy) const;
2792   ABIArgInfo classifyArgumentType(QualType Ty) const;
2793 
2794   // TODO: We can add more logic to computeInfo to improve performance.
2795   // Example: For aggregate arguments that fit in a register, we could
2796   // use getDirectInReg (as is done below for structs containing a single
2797   // floating-point value) to avoid pushing them to memory on function
2798   // entry.  This would require changing the logic in PPCISelLowering
2799   // when lowering the parameters in the caller and args in the callee.
2800   virtual void computeInfo(CGFunctionInfo &FI) const {
2801     FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
2802     for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
2803          it != ie; ++it) {
2804       // We rely on the default argument classification for the most part.
2805       // One exception:  An aggregate containing a single floating-point
2806       // item must be passed in a register if one is available.
2807       const Type *T = isSingleElementStruct(it->type, getContext());
2808       if (T) {
2809         const BuiltinType *BT = T->getAs<BuiltinType>();
2810         if (BT && BT->isFloatingPoint()) {
2811           QualType QT(T, 0);
2812           it->info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
2813           continue;
2814         }
2815       }
2816       it->info = classifyArgumentType(it->type);
2817     }
2818   }
2819 
2820   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr,
2821                                  QualType Ty,
2822                                  CodeGenFunction &CGF) const;
2823 };
2824 
2825 class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
2826 public:
2827   PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT)
2828     : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT)) {}
2829 
2830   int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
2831     // This is recovered from gcc output.
2832     return 1; // r1 is the dedicated stack pointer
2833   }
2834 
2835   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2836                                llvm::Value *Address) const;
2837 };
2838 
2839 class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
2840 public:
2841   PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
2842 
2843   int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
2844     // This is recovered from gcc output.
2845     return 1; // r1 is the dedicated stack pointer
2846   }
2847 
2848   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2849                                llvm::Value *Address) const;
2850 };
2851 
2852 }
2853 
2854 // Return true if the ABI requires Ty to be passed sign- or zero-
2855 // extended to 64 bits.
2856 bool
2857 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
2858   // Treat an enum type as its underlying type.
2859   if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2860     Ty = EnumTy->getDecl()->getIntegerType();
2861 
2862   // Promotable integer types are required to be promoted by the ABI.
2863   if (Ty->isPromotableIntegerType())
2864     return true;
2865 
2866   // In addition to the usual promotable integer types, we also need to
2867   // extend all 32-bit types, since the ABI requires promotion to 64 bits.
2868   if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
2869     switch (BT->getKind()) {
2870     case BuiltinType::Int:
2871     case BuiltinType::UInt:
2872       return true;
2873     default:
2874       break;
2875     }
2876 
2877   return false;
2878 }
2879 
2880 ABIArgInfo
2881 PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
2882   if (Ty->isAnyComplexType())
2883     return ABIArgInfo::getDirect();
2884 
2885   if (isAggregateTypeForABI(Ty)) {
2886     if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, CGT))
2887       return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
2888 
2889     return ABIArgInfo::getIndirect(0);
2890   }
2891 
2892   return (isPromotableTypeForABI(Ty) ?
2893           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2894 }
2895 
2896 ABIArgInfo
2897 PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
2898   if (RetTy->isVoidType())
2899     return ABIArgInfo::getIgnore();
2900 
2901   if (RetTy->isAnyComplexType())
2902     return ABIArgInfo::getDirect();
2903 
2904   if (isAggregateTypeForABI(RetTy))
2905     return ABIArgInfo::getIndirect(0);
2906 
2907   return (isPromotableTypeForABI(RetTy) ?
2908           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2909 }
2910 
2911 // Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine.
2912 llvm::Value *PPC64_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr,
2913                                            QualType Ty,
2914                                            CodeGenFunction &CGF) const {
2915   llvm::Type *BP = CGF.Int8PtrTy;
2916   llvm::Type *BPP = CGF.Int8PtrPtrTy;
2917 
2918   CGBuilderTy &Builder = CGF.Builder;
2919   llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
2920   llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
2921 
2922   // Update the va_list pointer.  The pointer should be bumped by the
2923   // size of the object.  We can trust getTypeSize() except for a complex
2924   // type whose base type is smaller than a doubleword.  For these, the
2925   // size of the object is 16 bytes; see below for further explanation.
2926   unsigned SizeInBytes = CGF.getContext().getTypeSize(Ty) / 8;
2927   QualType BaseTy;
2928   unsigned CplxBaseSize = 0;
2929 
2930   if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
2931     BaseTy = CTy->getElementType();
2932     CplxBaseSize = CGF.getContext().getTypeSize(BaseTy) / 8;
2933     if (CplxBaseSize < 8)
2934       SizeInBytes = 16;
2935   }
2936 
2937   unsigned Offset = llvm::RoundUpToAlignment(SizeInBytes, 8);
2938   llvm::Value *NextAddr =
2939     Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int64Ty, Offset),
2940                       "ap.next");
2941   Builder.CreateStore(NextAddr, VAListAddrAsBPP);
2942 
2943   // If we have a complex type and the base type is smaller than 8 bytes,
2944   // the ABI calls for the real and imaginary parts to be right-adjusted
2945   // in separate doublewords.  However, Clang expects us to produce a
2946   // pointer to a structure with the two parts packed tightly.  So generate
2947   // loads of the real and imaginary parts relative to the va_list pointer,
2948   // and store them to a temporary structure.
2949   if (CplxBaseSize && CplxBaseSize < 8) {
2950     llvm::Value *RealAddr = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
2951     llvm::Value *ImagAddr = RealAddr;
2952     RealAddr = Builder.CreateAdd(RealAddr, Builder.getInt64(8 - CplxBaseSize));
2953     ImagAddr = Builder.CreateAdd(ImagAddr, Builder.getInt64(16 - CplxBaseSize));
2954     llvm::Type *PBaseTy = llvm::PointerType::getUnqual(CGF.ConvertType(BaseTy));
2955     RealAddr = Builder.CreateIntToPtr(RealAddr, PBaseTy);
2956     ImagAddr = Builder.CreateIntToPtr(ImagAddr, PBaseTy);
2957     llvm::Value *Real = Builder.CreateLoad(RealAddr, false, ".vareal");
2958     llvm::Value *Imag = Builder.CreateLoad(ImagAddr, false, ".vaimag");
2959     llvm::Value *Ptr = CGF.CreateTempAlloca(CGT.ConvertTypeForMem(Ty),
2960                                             "vacplx");
2961     llvm::Value *RealPtr = Builder.CreateStructGEP(Ptr, 0, ".real");
2962     llvm::Value *ImagPtr = Builder.CreateStructGEP(Ptr, 1, ".imag");
2963     Builder.CreateStore(Real, RealPtr, false);
2964     Builder.CreateStore(Imag, ImagPtr, false);
2965     return Ptr;
2966   }
2967 
2968   // If the argument is smaller than 8 bytes, it is right-adjusted in
2969   // its doubleword slot.  Adjust the pointer to pick it up from the
2970   // correct offset.
2971   if (SizeInBytes < 8) {
2972     llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
2973     AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(8 - SizeInBytes));
2974     Addr = Builder.CreateIntToPtr(AddrAsInt, BP);
2975   }
2976 
2977   llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
2978   return Builder.CreateBitCast(Addr, PTy);
2979 }
2980 
2981 static bool
2982 PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2983                               llvm::Value *Address) {
2984   // This is calculated from the LLVM and GCC tables and verified
2985   // against gcc output.  AFAIK all ABIs use the same encoding.
2986 
2987   CodeGen::CGBuilderTy &Builder = CGF.Builder;
2988 
2989   llvm::IntegerType *i8 = CGF.Int8Ty;
2990   llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
2991   llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
2992   llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
2993 
2994   // 0-31: r0-31, the 8-byte general-purpose registers
2995   AssignToArrayRange(Builder, Address, Eight8, 0, 31);
2996 
2997   // 32-63: fp0-31, the 8-byte floating-point registers
2998   AssignToArrayRange(Builder, Address, Eight8, 32, 63);
2999 
3000   // 64-76 are various 4-byte special-purpose registers:
3001   // 64: mq
3002   // 65: lr
3003   // 66: ctr
3004   // 67: ap
3005   // 68-75 cr0-7
3006   // 76: xer
3007   AssignToArrayRange(Builder, Address, Four8, 64, 76);
3008 
3009   // 77-108: v0-31, the 16-byte vector registers
3010   AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
3011 
3012   // 109: vrsave
3013   // 110: vscr
3014   // 111: spe_acc
3015   // 112: spefscr
3016   // 113: sfp
3017   AssignToArrayRange(Builder, Address, Four8, 109, 113);
3018 
3019   return false;
3020 }
3021 
3022 bool
3023 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
3024   CodeGen::CodeGenFunction &CGF,
3025   llvm::Value *Address) const {
3026 
3027   return PPC64_initDwarfEHRegSizeTable(CGF, Address);
3028 }
3029 
3030 bool
3031 PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3032                                                 llvm::Value *Address) const {
3033 
3034   return PPC64_initDwarfEHRegSizeTable(CGF, Address);
3035 }
3036 
3037 //===----------------------------------------------------------------------===//
3038 // ARM ABI Implementation
3039 //===----------------------------------------------------------------------===//
3040 
3041 namespace {
3042 
3043 class ARMABIInfo : public ABIInfo {
3044 public:
3045   enum ABIKind {
3046     APCS = 0,
3047     AAPCS = 1,
3048     AAPCS_VFP
3049   };
3050 
3051 private:
3052   ABIKind Kind;
3053 
3054 public:
3055   ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind) {
3056     setRuntimeCC();
3057   }
3058 
3059   bool isEABI() const {
3060     StringRef Env = getTarget().getTriple().getEnvironmentName();
3061     return (Env == "gnueabi" || Env == "eabi" ||
3062             Env == "android" || Env == "androideabi");
3063   }
3064 
3065 private:
3066   ABIKind getABIKind() const { return Kind; }
3067 
3068   ABIArgInfo classifyReturnType(QualType RetTy) const;
3069   ABIArgInfo classifyArgumentType(QualType RetTy, int *VFPRegs,
3070                                   unsigned &AllocatedVFP,
3071                                   bool &IsHA) const;
3072   bool isIllegalVectorType(QualType Ty) const;
3073 
3074   virtual void computeInfo(CGFunctionInfo &FI) const;
3075 
3076   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3077                                  CodeGenFunction &CGF) const;
3078 
3079   llvm::CallingConv::ID getLLVMDefaultCC() const;
3080   llvm::CallingConv::ID getABIDefaultCC() const;
3081   void setRuntimeCC();
3082 };
3083 
3084 class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
3085 public:
3086   ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
3087     :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {}
3088 
3089   const ARMABIInfo &getABIInfo() const {
3090     return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo());
3091   }
3092 
3093   int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
3094     return 13;
3095   }
3096 
3097   StringRef getARCRetainAutoreleasedReturnValueMarker() const {
3098     return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue";
3099   }
3100 
3101   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3102                                llvm::Value *Address) const {
3103     llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
3104 
3105     // 0-15 are the 16 integer registers.
3106     AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15);
3107     return false;
3108   }
3109 
3110   unsigned getSizeOfUnwindException() const {
3111     if (getABIInfo().isEABI()) return 88;
3112     return TargetCodeGenInfo::getSizeOfUnwindException();
3113   }
3114 };
3115 
3116 }
3117 
3118 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
3119   // To correctly handle Homogeneous Aggregate, we need to keep track of the
3120   // VFP registers allocated so far.
3121   // C.1.vfp If the argument is a VFP CPRC and there are sufficient consecutive
3122   // VFP registers of the appropriate type unallocated then the argument is
3123   // allocated to the lowest-numbered sequence of such registers.
3124   // C.2.vfp If the argument is a VFP CPRC then any VFP registers that are
3125   // unallocated are marked as unavailable.
3126   unsigned AllocatedVFP = 0;
3127   int VFPRegs[16] = { 0 };
3128   FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
3129   for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
3130        it != ie; ++it) {
3131     unsigned PreAllocation = AllocatedVFP;
3132     bool IsHA = false;
3133     // 6.1.2.3 There is one VFP co-processor register class using registers
3134     // s0-s15 (d0-d7) for passing arguments.
3135     const unsigned NumVFPs = 16;
3136     it->info = classifyArgumentType(it->type, VFPRegs, AllocatedVFP, IsHA);
3137     // If we do not have enough VFP registers for the HA, any VFP registers
3138     // that are unallocated are marked as unavailable. To achieve this, we add
3139     // padding of (NumVFPs - PreAllocation) floats.
3140     if (IsHA && AllocatedVFP > NumVFPs && PreAllocation < NumVFPs) {
3141       llvm::Type *PaddingTy = llvm::ArrayType::get(
3142           llvm::Type::getFloatTy(getVMContext()), NumVFPs - PreAllocation);
3143       it->info = ABIArgInfo::getExpandWithPadding(false, PaddingTy);
3144     }
3145   }
3146 
3147   // Always honor user-specified calling convention.
3148   if (FI.getCallingConvention() != llvm::CallingConv::C)
3149     return;
3150 
3151   llvm::CallingConv::ID cc = getRuntimeCC();
3152   if (cc != llvm::CallingConv::C)
3153     FI.setEffectiveCallingConvention(cc);
3154 }
3155 
3156 /// Return the default calling convention that LLVM will use.
3157 llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const {
3158   // The default calling convention that LLVM will infer.
3159   if (getTarget().getTriple().getEnvironmentName()=="gnueabihf")
3160     return llvm::CallingConv::ARM_AAPCS_VFP;
3161   else if (isEABI())
3162     return llvm::CallingConv::ARM_AAPCS;
3163   else
3164     return llvm::CallingConv::ARM_APCS;
3165 }
3166 
3167 /// Return the calling convention that our ABI would like us to use
3168 /// as the C calling convention.
3169 llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const {
3170   switch (getABIKind()) {
3171   case APCS: return llvm::CallingConv::ARM_APCS;
3172   case AAPCS: return llvm::CallingConv::ARM_AAPCS;
3173   case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
3174   }
3175   llvm_unreachable("bad ABI kind");
3176 }
3177 
3178 void ARMABIInfo::setRuntimeCC() {
3179   assert(getRuntimeCC() == llvm::CallingConv::C);
3180 
3181   // Don't muddy up the IR with a ton of explicit annotations if
3182   // they'd just match what LLVM will infer from the triple.
3183   llvm::CallingConv::ID abiCC = getABIDefaultCC();
3184   if (abiCC != getLLVMDefaultCC())
3185     RuntimeCC = abiCC;
3186 }
3187 
3188 /// isHomogeneousAggregate - Return true if a type is an AAPCS-VFP homogeneous
3189 /// aggregate.  If HAMembers is non-null, the number of base elements
3190 /// contained in the type is returned through it; this is used for the
3191 /// recursive calls that check aggregate component types.
3192 static bool isHomogeneousAggregate(QualType Ty, const Type *&Base,
3193                                    ASTContext &Context,
3194                                    uint64_t *HAMembers = 0) {
3195   uint64_t Members = 0;
3196   if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
3197     if (!isHomogeneousAggregate(AT->getElementType(), Base, Context, &Members))
3198       return false;
3199     Members *= AT->getSize().getZExtValue();
3200   } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
3201     const RecordDecl *RD = RT->getDecl();
3202     if (RD->hasFlexibleArrayMember())
3203       return false;
3204 
3205     Members = 0;
3206     for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
3207          i != e; ++i) {
3208       const FieldDecl *FD = *i;
3209       uint64_t FldMembers;
3210       if (!isHomogeneousAggregate(FD->getType(), Base, Context, &FldMembers))
3211         return false;
3212 
3213       Members = (RD->isUnion() ?
3214                  std::max(Members, FldMembers) : Members + FldMembers);
3215     }
3216   } else {
3217     Members = 1;
3218     if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
3219       Members = 2;
3220       Ty = CT->getElementType();
3221     }
3222 
3223     // Homogeneous aggregates for AAPCS-VFP must have base types of float,
3224     // double, or 64-bit or 128-bit vectors.
3225     if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
3226       if (BT->getKind() != BuiltinType::Float &&
3227           BT->getKind() != BuiltinType::Double &&
3228           BT->getKind() != BuiltinType::LongDouble)
3229         return false;
3230     } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
3231       unsigned VecSize = Context.getTypeSize(VT);
3232       if (VecSize != 64 && VecSize != 128)
3233         return false;
3234     } else {
3235       return false;
3236     }
3237 
3238     // The base type must be the same for all members.  Vector types of the
3239     // same total size are treated as being equivalent here.
3240     const Type *TyPtr = Ty.getTypePtr();
3241     if (!Base)
3242       Base = TyPtr;
3243     if (Base != TyPtr &&
3244         (!Base->isVectorType() || !TyPtr->isVectorType() ||
3245          Context.getTypeSize(Base) != Context.getTypeSize(TyPtr)))
3246       return false;
3247   }
3248 
3249   // Homogeneous Aggregates can have at most 4 members of the base type.
3250   if (HAMembers)
3251     *HAMembers = Members;
3252 
3253   return (Members > 0 && Members <= 4);
3254 }
3255 
3256 /// markAllocatedVFPs - update VFPRegs according to the alignment and
3257 /// number of VFP registers (unit is S register) requested.
3258 static void markAllocatedVFPs(int *VFPRegs, unsigned &AllocatedVFP,
3259                               unsigned Alignment,
3260                               unsigned NumRequired) {
3261   // Early Exit.
3262   if (AllocatedVFP >= 16)
3263     return;
3264   // C.1.vfp If the argument is a VFP CPRC and there are sufficient consecutive
3265   // VFP registers of the appropriate type unallocated then the argument is
3266   // allocated to the lowest-numbered sequence of such registers.
3267   for (unsigned I = 0; I < 16; I += Alignment) {
3268     bool FoundSlot = true;
3269     for (unsigned J = I, JEnd = I + NumRequired; J < JEnd; J++)
3270       if (J >= 16 || VFPRegs[J]) {
3271          FoundSlot = false;
3272          break;
3273       }
3274     if (FoundSlot) {
3275       for (unsigned J = I, JEnd = I + NumRequired; J < JEnd; J++)
3276         VFPRegs[J] = 1;
3277       AllocatedVFP += NumRequired;
3278       return;
3279     }
3280   }
3281   // C.2.vfp If the argument is a VFP CPRC then any VFP registers that are
3282   // unallocated are marked as unavailable.
3283   for (unsigned I = 0; I < 16; I++)
3284     VFPRegs[I] = 1;
3285   AllocatedVFP = 17; // We do not have enough VFP registers.
3286 }
3287 
3288 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, int *VFPRegs,
3289                                             unsigned &AllocatedVFP,
3290                                             bool &IsHA) const {
3291   // We update number of allocated VFPs according to
3292   // 6.1.2.1 The following argument types are VFP CPRCs:
3293   //   A single-precision floating-point type (including promoted
3294   //   half-precision types); A double-precision floating-point type;
3295   //   A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate
3296   //   with a Base Type of a single- or double-precision floating-point type,
3297   //   64-bit containerized vectors or 128-bit containerized vectors with one
3298   //   to four Elements.
3299 
3300   // Handle illegal vector types here.
3301   if (isIllegalVectorType(Ty)) {
3302     uint64_t Size = getContext().getTypeSize(Ty);
3303     if (Size <= 32) {
3304       llvm::Type *ResType =
3305           llvm::Type::getInt32Ty(getVMContext());
3306       return ABIArgInfo::getDirect(ResType);
3307     }
3308     if (Size == 64) {
3309       llvm::Type *ResType = llvm::VectorType::get(
3310           llvm::Type::getInt32Ty(getVMContext()), 2);
3311       markAllocatedVFPs(VFPRegs, AllocatedVFP, 2, 2);
3312       return ABIArgInfo::getDirect(ResType);
3313     }
3314     if (Size == 128) {
3315       llvm::Type *ResType = llvm::VectorType::get(
3316           llvm::Type::getInt32Ty(getVMContext()), 4);
3317       markAllocatedVFPs(VFPRegs, AllocatedVFP, 4, 4);
3318       return ABIArgInfo::getDirect(ResType);
3319     }
3320     return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
3321   }
3322   // Update VFPRegs for legal vector types.
3323   if (const VectorType *VT = Ty->getAs<VectorType>()) {
3324     uint64_t Size = getContext().getTypeSize(VT);
3325     // Size of a legal vector should be power of 2 and above 64.
3326     markAllocatedVFPs(VFPRegs, AllocatedVFP, Size >= 128 ? 4 : 2, Size / 32);
3327   }
3328   // Update VFPRegs for floating point types.
3329   if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
3330     if (BT->getKind() == BuiltinType::Half ||
3331         BT->getKind() == BuiltinType::Float)
3332       markAllocatedVFPs(VFPRegs, AllocatedVFP, 1, 1);
3333     if (BT->getKind() == BuiltinType::Double ||
3334         BT->getKind() == BuiltinType::LongDouble)
3335       markAllocatedVFPs(VFPRegs, AllocatedVFP, 2, 2);
3336   }
3337 
3338   if (!isAggregateTypeForABI(Ty)) {
3339     // Treat an enum type as its underlying type.
3340     if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3341       Ty = EnumTy->getDecl()->getIntegerType();
3342 
3343     return (Ty->isPromotableIntegerType() ?
3344             ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
3345   }
3346 
3347   if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, CGT))
3348     return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
3349 
3350   // Ignore empty records.
3351   if (isEmptyRecord(getContext(), Ty, true))
3352     return ABIArgInfo::getIgnore();
3353 
3354   if (getABIKind() == ARMABIInfo::AAPCS_VFP) {
3355     // Homogeneous Aggregates need to be expanded when we can fit the aggregate
3356     // into VFP registers.
3357     const Type *Base = 0;
3358     uint64_t Members = 0;
3359     if (isHomogeneousAggregate(Ty, Base, getContext(), &Members)) {
3360       assert(Base && "Base class should be set for homogeneous aggregate");
3361       // Base can be a floating-point or a vector.
3362       if (Base->isVectorType()) {
3363         // ElementSize is in number of floats.
3364         unsigned ElementSize = getContext().getTypeSize(Base) == 64 ? 2 : 4;
3365         markAllocatedVFPs(VFPRegs, AllocatedVFP, ElementSize,
3366                           Members * ElementSize);
3367       } else if (Base->isSpecificBuiltinType(BuiltinType::Float))
3368         markAllocatedVFPs(VFPRegs, AllocatedVFP, 1, Members);
3369       else {
3370         assert(Base->isSpecificBuiltinType(BuiltinType::Double) ||
3371                Base->isSpecificBuiltinType(BuiltinType::LongDouble));
3372         markAllocatedVFPs(VFPRegs, AllocatedVFP, 2, Members * 2);
3373       }
3374       IsHA = true;
3375       return ABIArgInfo::getExpand();
3376     }
3377   }
3378 
3379   // Support byval for ARM.
3380   // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at
3381   // most 8-byte. We realign the indirect argument if type alignment is bigger
3382   // than ABI alignment.
3383   uint64_t ABIAlign = 4;
3384   uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8;
3385   if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
3386       getABIKind() == ARMABIInfo::AAPCS)
3387     ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
3388   if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) {
3389     return ABIArgInfo::getIndirect(0, /*ByVal=*/true,
3390            /*Realign=*/TyAlign > ABIAlign);
3391   }
3392 
3393   // Otherwise, pass by coercing to a structure of the appropriate size.
3394   llvm::Type* ElemTy;
3395   unsigned SizeRegs;
3396   // FIXME: Try to match the types of the arguments more accurately where
3397   // we can.
3398   if (getContext().getTypeAlign(Ty) <= 32) {
3399     ElemTy = llvm::Type::getInt32Ty(getVMContext());
3400     SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
3401   } else {
3402     ElemTy = llvm::Type::getInt64Ty(getVMContext());
3403     SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
3404   }
3405 
3406   llvm::Type *STy =
3407     llvm::StructType::get(llvm::ArrayType::get(ElemTy, SizeRegs), NULL);
3408   return ABIArgInfo::getDirect(STy);
3409 }
3410 
3411 static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
3412                               llvm::LLVMContext &VMContext) {
3413   // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
3414   // is called integer-like if its size is less than or equal to one word, and
3415   // the offset of each of its addressable sub-fields is zero.
3416 
3417   uint64_t Size = Context.getTypeSize(Ty);
3418 
3419   // Check that the type fits in a word.
3420   if (Size > 32)
3421     return false;
3422 
3423   // FIXME: Handle vector types!
3424   if (Ty->isVectorType())
3425     return false;
3426 
3427   // Float types are never treated as "integer like".
3428   if (Ty->isRealFloatingType())
3429     return false;
3430 
3431   // If this is a builtin or pointer type then it is ok.
3432   if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
3433     return true;
3434 
3435   // Small complex integer types are "integer like".
3436   if (const ComplexType *CT = Ty->getAs<ComplexType>())
3437     return isIntegerLikeType(CT->getElementType(), Context, VMContext);
3438 
3439   // Single element and zero sized arrays should be allowed, by the definition
3440   // above, but they are not.
3441 
3442   // Otherwise, it must be a record type.
3443   const RecordType *RT = Ty->getAs<RecordType>();
3444   if (!RT) return false;
3445 
3446   // Ignore records with flexible arrays.
3447   const RecordDecl *RD = RT->getDecl();
3448   if (RD->hasFlexibleArrayMember())
3449     return false;
3450 
3451   // Check that all sub-fields are at offset 0, and are themselves "integer
3452   // like".
3453   const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
3454 
3455   bool HadField = false;
3456   unsigned idx = 0;
3457   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
3458        i != e; ++i, ++idx) {
3459     const FieldDecl *FD = *i;
3460 
3461     // Bit-fields are not addressable, we only need to verify they are "integer
3462     // like". We still have to disallow a subsequent non-bitfield, for example:
3463     //   struct { int : 0; int x }
3464     // is non-integer like according to gcc.
3465     if (FD->isBitField()) {
3466       if (!RD->isUnion())
3467         HadField = true;
3468 
3469       if (!isIntegerLikeType(FD->getType(), Context, VMContext))
3470         return false;
3471 
3472       continue;
3473     }
3474 
3475     // Check if this field is at offset 0.
3476     if (Layout.getFieldOffset(idx) != 0)
3477       return false;
3478 
3479     if (!isIntegerLikeType(FD->getType(), Context, VMContext))
3480       return false;
3481 
3482     // Only allow at most one field in a structure. This doesn't match the
3483     // wording above, but follows gcc in situations with a field following an
3484     // empty structure.
3485     if (!RD->isUnion()) {
3486       if (HadField)
3487         return false;
3488 
3489       HadField = true;
3490     }
3491   }
3492 
3493   return true;
3494 }
3495 
3496 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy) const {
3497   if (RetTy->isVoidType())
3498     return ABIArgInfo::getIgnore();
3499 
3500   // Large vector types should be returned via memory.
3501   if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
3502     return ABIArgInfo::getIndirect(0);
3503 
3504   if (!isAggregateTypeForABI(RetTy)) {
3505     // Treat an enum type as its underlying type.
3506     if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
3507       RetTy = EnumTy->getDecl()->getIntegerType();
3508 
3509     return (RetTy->isPromotableIntegerType() ?
3510             ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
3511   }
3512 
3513   // Structures with either a non-trivial destructor or a non-trivial
3514   // copy constructor are always indirect.
3515   if (isRecordReturnIndirect(RetTy, CGT))
3516     return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
3517 
3518   // Are we following APCS?
3519   if (getABIKind() == APCS) {
3520     if (isEmptyRecord(getContext(), RetTy, false))
3521       return ABIArgInfo::getIgnore();
3522 
3523     // Complex types are all returned as packed integers.
3524     //
3525     // FIXME: Consider using 2 x vector types if the back end handles them
3526     // correctly.
3527     if (RetTy->isAnyComplexType())
3528       return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
3529                                               getContext().getTypeSize(RetTy)));
3530 
3531     // Integer like structures are returned in r0.
3532     if (isIntegerLikeType(RetTy, getContext(), getVMContext())) {
3533       // Return in the smallest viable integer type.
3534       uint64_t Size = getContext().getTypeSize(RetTy);
3535       if (Size <= 8)
3536         return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
3537       if (Size <= 16)
3538         return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
3539       return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
3540     }
3541 
3542     // Otherwise return in memory.
3543     return ABIArgInfo::getIndirect(0);
3544   }
3545 
3546   // Otherwise this is an AAPCS variant.
3547 
3548   if (isEmptyRecord(getContext(), RetTy, true))
3549     return ABIArgInfo::getIgnore();
3550 
3551   // Check for homogeneous aggregates with AAPCS-VFP.
3552   if (getABIKind() == AAPCS_VFP) {
3553     const Type *Base = 0;
3554     if (isHomogeneousAggregate(RetTy, Base, getContext())) {
3555       assert(Base && "Base class should be set for homogeneous aggregate");
3556       // Homogeneous Aggregates are returned directly.
3557       return ABIArgInfo::getDirect();
3558     }
3559   }
3560 
3561   // Aggregates <= 4 bytes are returned in r0; other aggregates
3562   // are returned indirectly.
3563   uint64_t Size = getContext().getTypeSize(RetTy);
3564   if (Size <= 32) {
3565     // Return in the smallest viable integer type.
3566     if (Size <= 8)
3567       return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
3568     if (Size <= 16)
3569       return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
3570     return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
3571   }
3572 
3573   return ABIArgInfo::getIndirect(0);
3574 }
3575 
3576 /// isIllegalVector - check whether Ty is an illegal vector type.
3577 bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
3578   if (const VectorType *VT = Ty->getAs<VectorType>()) {
3579     // Check whether VT is legal.
3580     unsigned NumElements = VT->getNumElements();
3581     uint64_t Size = getContext().getTypeSize(VT);
3582     // NumElements should be power of 2.
3583     if ((NumElements & (NumElements - 1)) != 0)
3584       return true;
3585     // Size should be greater than 32 bits.
3586     return Size <= 32;
3587   }
3588   return false;
3589 }
3590 
3591 llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3592                                    CodeGenFunction &CGF) const {
3593   llvm::Type *BP = CGF.Int8PtrTy;
3594   llvm::Type *BPP = CGF.Int8PtrPtrTy;
3595 
3596   CGBuilderTy &Builder = CGF.Builder;
3597   llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
3598   llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
3599 
3600   if (isEmptyRecord(getContext(), Ty, true)) {
3601     // These are ignored for parameter passing purposes.
3602     llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
3603     return Builder.CreateBitCast(Addr, PTy);
3604   }
3605 
3606   uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8;
3607   uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8;
3608   bool IsIndirect = false;
3609 
3610   // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for
3611   // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte.
3612   if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
3613       getABIKind() == ARMABIInfo::AAPCS)
3614     TyAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
3615   else
3616     TyAlign = 4;
3617   // Use indirect if size of the illegal vector is bigger than 16 bytes.
3618   if (isIllegalVectorType(Ty) && Size > 16) {
3619     IsIndirect = true;
3620     Size = 4;
3621     TyAlign = 4;
3622   }
3623 
3624   // Handle address alignment for ABI alignment > 4 bytes.
3625   if (TyAlign > 4) {
3626     assert((TyAlign & (TyAlign - 1)) == 0 &&
3627            "Alignment is not power of 2!");
3628     llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty);
3629     AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1));
3630     AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1)));
3631     Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align");
3632   }
3633 
3634   uint64_t Offset =
3635     llvm::RoundUpToAlignment(Size, 4);
3636   llvm::Value *NextAddr =
3637     Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
3638                       "ap.next");
3639   Builder.CreateStore(NextAddr, VAListAddrAsBPP);
3640 
3641   if (IsIndirect)
3642     Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP));
3643   else if (TyAlign < CGF.getContext().getTypeAlign(Ty) / 8) {
3644     // We can't directly cast ap.cur to pointer to a vector type, since ap.cur
3645     // may not be correctly aligned for the vector type. We create an aligned
3646     // temporary space and copy the content over from ap.cur to the temporary
3647     // space. This is necessary if the natural alignment of the type is greater
3648     // than the ABI alignment.
3649     llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
3650     CharUnits CharSize = getContext().getTypeSizeInChars(Ty);
3651     llvm::Value *AlignedTemp = CGF.CreateTempAlloca(CGF.ConvertType(Ty),
3652                                                     "var.align");
3653     llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
3654     llvm::Value *Src = Builder.CreateBitCast(Addr, I8PtrTy);
3655     Builder.CreateMemCpy(Dst, Src,
3656         llvm::ConstantInt::get(CGF.IntPtrTy, CharSize.getQuantity()),
3657         TyAlign, false);
3658     Addr = AlignedTemp; //The content is in aligned location.
3659   }
3660   llvm::Type *PTy =
3661     llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
3662   llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
3663 
3664   return AddrTyped;
3665 }
3666 
3667 namespace {
3668 
3669 class NaClARMABIInfo : public ABIInfo {
3670  public:
3671   NaClARMABIInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind)
3672       : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, Kind) {}
3673   virtual void computeInfo(CGFunctionInfo &FI) const;
3674   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3675                                  CodeGenFunction &CGF) const;
3676  private:
3677   PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv.
3678   ARMABIInfo NInfo; // Used for everything else.
3679 };
3680 
3681 class NaClARMTargetCodeGenInfo : public TargetCodeGenInfo  {
3682  public:
3683   NaClARMTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind)
3684       : TargetCodeGenInfo(new NaClARMABIInfo(CGT, Kind)) {}
3685 };
3686 
3687 }
3688 
3689 void NaClARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
3690   if (FI.getASTCallingConvention() == CC_PnaclCall)
3691     PInfo.computeInfo(FI);
3692   else
3693     static_cast<const ABIInfo&>(NInfo).computeInfo(FI);
3694 }
3695 
3696 llvm::Value *NaClARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3697                                        CodeGenFunction &CGF) const {
3698   // Always use the native convention; calling pnacl-style varargs functions
3699   // is unsupported.
3700   return static_cast<const ABIInfo&>(NInfo).EmitVAArg(VAListAddr, Ty, CGF);
3701 }
3702 
3703 //===----------------------------------------------------------------------===//
3704 // AArch64 ABI Implementation
3705 //===----------------------------------------------------------------------===//
3706 
3707 namespace {
3708 
3709 class AArch64ABIInfo : public ABIInfo {
3710 public:
3711   AArch64ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
3712 
3713 private:
3714   // The AArch64 PCS is explicit about return types and argument types being
3715   // handled identically, so we don't need to draw a distinction between
3716   // Argument and Return classification.
3717   ABIArgInfo classifyGenericType(QualType Ty, int &FreeIntRegs,
3718                                  int &FreeVFPRegs) const;
3719 
3720   ABIArgInfo tryUseRegs(QualType Ty, int &FreeRegs, int RegsNeeded, bool IsInt,
3721                         llvm::Type *DirectTy = 0) const;
3722 
3723   virtual void computeInfo(CGFunctionInfo &FI) const;
3724 
3725   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3726                                  CodeGenFunction &CGF) const;
3727 };
3728 
3729 class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
3730 public:
3731   AArch64TargetCodeGenInfo(CodeGenTypes &CGT)
3732     :TargetCodeGenInfo(new AArch64ABIInfo(CGT)) {}
3733 
3734   const AArch64ABIInfo &getABIInfo() const {
3735     return static_cast<const AArch64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
3736   }
3737 
3738   int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
3739     return 31;
3740   }
3741 
3742   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3743                                llvm::Value *Address) const {
3744     // 0-31 are x0-x30 and sp: 8 bytes each
3745     llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
3746     AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 31);
3747 
3748     // 64-95 are v0-v31: 16 bytes each
3749     llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
3750     AssignToArrayRange(CGF.Builder, Address, Sixteen8, 64, 95);
3751 
3752     return false;
3753   }
3754 
3755 };
3756 
3757 }
3758 
3759 void AArch64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
3760   int FreeIntRegs = 8, FreeVFPRegs = 8;
3761 
3762   FI.getReturnInfo() = classifyGenericType(FI.getReturnType(),
3763                                            FreeIntRegs, FreeVFPRegs);
3764 
3765   FreeIntRegs = FreeVFPRegs = 8;
3766   for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
3767        it != ie; ++it) {
3768     it->info = classifyGenericType(it->type, FreeIntRegs, FreeVFPRegs);
3769 
3770   }
3771 }
3772 
3773 ABIArgInfo
3774 AArch64ABIInfo::tryUseRegs(QualType Ty, int &FreeRegs, int RegsNeeded,
3775                            bool IsInt, llvm::Type *DirectTy) const {
3776   if (FreeRegs >= RegsNeeded) {
3777     FreeRegs -= RegsNeeded;
3778     return ABIArgInfo::getDirect(DirectTy);
3779   }
3780 
3781   llvm::Type *Padding = 0;
3782 
3783   // We need padding so that later arguments don't get filled in anyway. That
3784   // wouldn't happen if only ByVal arguments followed in the same category, but
3785   // a large structure will simply seem to be a pointer as far as LLVM is
3786   // concerned.
3787   if (FreeRegs > 0) {
3788     if (IsInt)
3789       Padding = llvm::Type::getInt64Ty(getVMContext());
3790     else
3791       Padding = llvm::Type::getFloatTy(getVMContext());
3792 
3793     // Either [N x i64] or [N x float].
3794     Padding = llvm::ArrayType::get(Padding, FreeRegs);
3795     FreeRegs = 0;
3796   }
3797 
3798   return ABIArgInfo::getIndirect(getContext().getTypeAlign(Ty) / 8,
3799                                  /*IsByVal=*/ true, /*Realign=*/ false,
3800                                  Padding);
3801 }
3802 
3803 
3804 ABIArgInfo AArch64ABIInfo::classifyGenericType(QualType Ty,
3805                                                int &FreeIntRegs,
3806                                                int &FreeVFPRegs) const {
3807   // Can only occurs for return, but harmless otherwise.
3808   if (Ty->isVoidType())
3809     return ABIArgInfo::getIgnore();
3810 
3811   // Large vector types should be returned via memory. There's no such concept
3812   // in the ABI, but they'd be over 16 bytes anyway so no matter how they're
3813   // classified they'd go into memory (see B.3).
3814   if (Ty->isVectorType() && getContext().getTypeSize(Ty) > 128) {
3815     if (FreeIntRegs > 0)
3816       --FreeIntRegs;
3817     return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
3818   }
3819 
3820   // All non-aggregate LLVM types have a concrete ABI representation so they can
3821   // be passed directly. After this block we're guaranteed to be in a
3822   // complicated case.
3823   if (!isAggregateTypeForABI(Ty)) {
3824     // Treat an enum type as its underlying type.
3825     if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3826       Ty = EnumTy->getDecl()->getIntegerType();
3827 
3828     if (Ty->isFloatingType() || Ty->isVectorType())
3829       return tryUseRegs(Ty, FreeVFPRegs, /*RegsNeeded=*/ 1, /*IsInt=*/ false);
3830 
3831     assert(getContext().getTypeSize(Ty) <= 128 &&
3832            "unexpectedly large scalar type");
3833 
3834     int RegsNeeded = getContext().getTypeSize(Ty) > 64 ? 2 : 1;
3835 
3836     // If the type may need padding registers to ensure "alignment", we must be
3837     // careful when this is accounted for. Increasing the effective size covers
3838     // all cases.
3839     if (getContext().getTypeAlign(Ty) == 128)
3840       RegsNeeded += FreeIntRegs % 2 != 0;
3841 
3842     return tryUseRegs(Ty, FreeIntRegs, RegsNeeded, /*IsInt=*/ true);
3843   }
3844 
3845   if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, CGT)) {
3846     if (FreeIntRegs > 0 && RAA == CGCXXABI::RAA_Indirect)
3847       --FreeIntRegs;
3848     return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
3849   }
3850 
3851   if (isEmptyRecord(getContext(), Ty, true)) {
3852     if (!getContext().getLangOpts().CPlusPlus) {
3853       // Empty structs outside C++ mode are a GNU extension, so no ABI can
3854       // possibly tell us what to do. It turns out (I believe) that GCC ignores
3855       // the object for parameter-passsing purposes.
3856       return ABIArgInfo::getIgnore();
3857     }
3858 
3859     // The combination of C++98 9p5 (sizeof(struct) != 0) and the pseudocode
3860     // description of va_arg in the PCS require that an empty struct does
3861     // actually occupy space for parameter-passing. I'm hoping for a
3862     // clarification giving an explicit paragraph to point to in future.
3863     return tryUseRegs(Ty, FreeIntRegs, /*RegsNeeded=*/ 1, /*IsInt=*/ true,
3864                       llvm::Type::getInt8Ty(getVMContext()));
3865   }
3866 
3867   // Homogeneous vector aggregates get passed in registers or on the stack.
3868   const Type *Base = 0;
3869   uint64_t NumMembers = 0;
3870   if (isHomogeneousAggregate(Ty, Base, getContext(), &NumMembers)) {
3871     assert(Base && "Base class should be set for homogeneous aggregate");
3872     // Homogeneous aggregates are passed and returned directly.
3873     return tryUseRegs(Ty, FreeVFPRegs, /*RegsNeeded=*/ NumMembers,
3874                       /*IsInt=*/ false);
3875   }
3876 
3877   uint64_t Size = getContext().getTypeSize(Ty);
3878   if (Size <= 128) {
3879     // Small structs can use the same direct type whether they're in registers
3880     // or on the stack.
3881     llvm::Type *BaseTy;
3882     unsigned NumBases;
3883     int SizeInRegs = (Size + 63) / 64;
3884 
3885     if (getContext().getTypeAlign(Ty) == 128) {
3886       BaseTy = llvm::Type::getIntNTy(getVMContext(), 128);
3887       NumBases = 1;
3888 
3889       // If the type may need padding registers to ensure "alignment", we must
3890       // be careful when this is accounted for. Increasing the effective size
3891       // covers all cases.
3892       SizeInRegs += FreeIntRegs % 2 != 0;
3893     } else {
3894       BaseTy = llvm::Type::getInt64Ty(getVMContext());
3895       NumBases = SizeInRegs;
3896     }
3897     llvm::Type *DirectTy = llvm::ArrayType::get(BaseTy, NumBases);
3898 
3899     return tryUseRegs(Ty, FreeIntRegs, /*RegsNeeded=*/ SizeInRegs,
3900                       /*IsInt=*/ true, DirectTy);
3901   }
3902 
3903   // If the aggregate is > 16 bytes, it's passed and returned indirectly. In
3904   // LLVM terms the return uses an "sret" pointer, but that's handled elsewhere.
3905   --FreeIntRegs;
3906   return ABIArgInfo::getIndirect(0, /* byVal = */ false);
3907 }
3908 
3909 llvm::Value *AArch64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3910                                        CodeGenFunction &CGF) const {
3911   // The AArch64 va_list type and handling is specified in the Procedure Call
3912   // Standard, section B.4:
3913   //
3914   // struct {
3915   //   void *__stack;
3916   //   void *__gr_top;
3917   //   void *__vr_top;
3918   //   int __gr_offs;
3919   //   int __vr_offs;
3920   // };
3921 
3922   assert(!CGF.CGM.getDataLayout().isBigEndian()
3923          && "va_arg not implemented for big-endian AArch64");
3924 
3925   int FreeIntRegs = 8, FreeVFPRegs = 8;
3926   Ty = CGF.getContext().getCanonicalType(Ty);
3927   ABIArgInfo AI = classifyGenericType(Ty, FreeIntRegs, FreeVFPRegs);
3928 
3929   llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
3930   llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
3931   llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
3932   llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
3933 
3934   llvm::Value *reg_offs_p = 0, *reg_offs = 0;
3935   int reg_top_index;
3936   int RegSize;
3937   if (FreeIntRegs < 8) {
3938     assert(FreeVFPRegs == 8 && "Arguments never split between int & VFP regs");
3939     // 3 is the field number of __gr_offs
3940     reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p");
3941     reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
3942     reg_top_index = 1; // field number for __gr_top
3943     RegSize = 8 * (8 - FreeIntRegs);
3944   } else {
3945     assert(FreeVFPRegs < 8 && "Argument must go in VFP or int regs");
3946     // 4 is the field number of __vr_offs.
3947     reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p");
3948     reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
3949     reg_top_index = 2; // field number for __vr_top
3950     RegSize = 16 * (8 - FreeVFPRegs);
3951   }
3952 
3953   //=======================================
3954   // Find out where argument was passed
3955   //=======================================
3956 
3957   // If reg_offs >= 0 we're already using the stack for this type of
3958   // argument. We don't want to keep updating reg_offs (in case it overflows,
3959   // though anyone passing 2GB of arguments, each at most 16 bytes, deserves
3960   // whatever they get).
3961   llvm::Value *UsingStack = 0;
3962   UsingStack = CGF.Builder.CreateICmpSGE(reg_offs,
3963                                          llvm::ConstantInt::get(CGF.Int32Ty, 0));
3964 
3965   CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
3966 
3967   // Otherwise, at least some kind of argument could go in these registers, the
3968   // quesiton is whether this particular type is too big.
3969   CGF.EmitBlock(MaybeRegBlock);
3970 
3971   // Integer arguments may need to correct register alignment (for example a
3972   // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
3973   // align __gr_offs to calculate the potential address.
3974   if (FreeIntRegs < 8 && AI.isDirect() && getContext().getTypeAlign(Ty) > 64) {
3975     int Align = getContext().getTypeAlign(Ty) / 8;
3976 
3977     reg_offs = CGF.Builder.CreateAdd(reg_offs,
3978                                  llvm::ConstantInt::get(CGF.Int32Ty, Align - 1),
3979                                  "align_regoffs");
3980     reg_offs = CGF.Builder.CreateAnd(reg_offs,
3981                                     llvm::ConstantInt::get(CGF.Int32Ty, -Align),
3982                                     "aligned_regoffs");
3983   }
3984 
3985   // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list.
3986   llvm::Value *NewOffset = 0;
3987   NewOffset = CGF.Builder.CreateAdd(reg_offs,
3988                                     llvm::ConstantInt::get(CGF.Int32Ty, RegSize),
3989                                     "new_reg_offs");
3990   CGF.Builder.CreateStore(NewOffset, reg_offs_p);
3991 
3992   // Now we're in a position to decide whether this argument really was in
3993   // registers or not.
3994   llvm::Value *InRegs = 0;
3995   InRegs = CGF.Builder.CreateICmpSLE(NewOffset,
3996                                      llvm::ConstantInt::get(CGF.Int32Ty, 0),
3997                                      "inreg");
3998 
3999   CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
4000 
4001   //=======================================
4002   // Argument was in registers
4003   //=======================================
4004 
4005   // Now we emit the code for if the argument was originally passed in
4006   // registers. First start the appropriate block:
4007   CGF.EmitBlock(InRegBlock);
4008 
4009   llvm::Value *reg_top_p = 0, *reg_top = 0;
4010   reg_top_p = CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p");
4011   reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
4012   llvm::Value *BaseAddr = CGF.Builder.CreateGEP(reg_top, reg_offs);
4013   llvm::Value *RegAddr = 0;
4014   llvm::Type *MemTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
4015 
4016   if (!AI.isDirect()) {
4017     // If it's been passed indirectly (actually a struct), whatever we find from
4018     // stored registers or on the stack will actually be a struct **.
4019     MemTy = llvm::PointerType::getUnqual(MemTy);
4020   }
4021 
4022   const Type *Base = 0;
4023   uint64_t NumMembers;
4024   if (isHomogeneousAggregate(Ty, Base, getContext(), &NumMembers)
4025       && NumMembers > 1) {
4026     // Homogeneous aggregates passed in registers will have their elements split
4027     // and stored 16-bytes apart regardless of size (they're notionally in qN,
4028     // qN+1, ...). We reload and store into a temporary local variable
4029     // contiguously.
4030     assert(AI.isDirect() && "Homogeneous aggregates should be passed directly");
4031     llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
4032     llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
4033     llvm::Value *Tmp = CGF.CreateTempAlloca(HFATy);
4034 
4035     for (unsigned i = 0; i < NumMembers; ++i) {
4036       llvm::Value *BaseOffset = llvm::ConstantInt::get(CGF.Int32Ty, 16 * i);
4037       llvm::Value *LoadAddr = CGF.Builder.CreateGEP(BaseAddr, BaseOffset);
4038       LoadAddr = CGF.Builder.CreateBitCast(LoadAddr,
4039                                            llvm::PointerType::getUnqual(BaseTy));
4040       llvm::Value *StoreAddr = CGF.Builder.CreateStructGEP(Tmp, i);
4041 
4042       llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
4043       CGF.Builder.CreateStore(Elem, StoreAddr);
4044     }
4045 
4046     RegAddr = CGF.Builder.CreateBitCast(Tmp, MemTy);
4047   } else {
4048     // Otherwise the object is contiguous in memory
4049     RegAddr = CGF.Builder.CreateBitCast(BaseAddr, MemTy);
4050   }
4051 
4052   CGF.EmitBranch(ContBlock);
4053 
4054   //=======================================
4055   // Argument was on the stack
4056   //=======================================
4057   CGF.EmitBlock(OnStackBlock);
4058 
4059   llvm::Value *stack_p = 0, *OnStackAddr = 0;
4060   stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p");
4061   OnStackAddr = CGF.Builder.CreateLoad(stack_p, "stack");
4062 
4063   // Again, stack arguments may need realigmnent. In this case both integer and
4064   // floating-point ones might be affected.
4065   if (AI.isDirect() && getContext().getTypeAlign(Ty) > 64) {
4066     int Align = getContext().getTypeAlign(Ty) / 8;
4067 
4068     OnStackAddr = CGF.Builder.CreatePtrToInt(OnStackAddr, CGF.Int64Ty);
4069 
4070     OnStackAddr = CGF.Builder.CreateAdd(OnStackAddr,
4071                                  llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
4072                                  "align_stack");
4073     OnStackAddr = CGF.Builder.CreateAnd(OnStackAddr,
4074                                     llvm::ConstantInt::get(CGF.Int64Ty, -Align),
4075                                     "align_stack");
4076 
4077     OnStackAddr = CGF.Builder.CreateIntToPtr(OnStackAddr, CGF.Int8PtrTy);
4078   }
4079 
4080   uint64_t StackSize;
4081   if (AI.isDirect())
4082     StackSize = getContext().getTypeSize(Ty) / 8;
4083   else
4084     StackSize = 8;
4085 
4086   // All stack slots are 8 bytes
4087   StackSize = llvm::RoundUpToAlignment(StackSize, 8);
4088 
4089   llvm::Value *StackSizeC = llvm::ConstantInt::get(CGF.Int32Ty, StackSize);
4090   llvm::Value *NewStack = CGF.Builder.CreateGEP(OnStackAddr, StackSizeC,
4091                                                 "new_stack");
4092 
4093   // Write the new value of __stack for the next call to va_arg
4094   CGF.Builder.CreateStore(NewStack, stack_p);
4095 
4096   OnStackAddr = CGF.Builder.CreateBitCast(OnStackAddr, MemTy);
4097 
4098   CGF.EmitBranch(ContBlock);
4099 
4100   //=======================================
4101   // Tidy up
4102   //=======================================
4103   CGF.EmitBlock(ContBlock);
4104 
4105   llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(MemTy, 2, "vaarg.addr");
4106   ResAddr->addIncoming(RegAddr, InRegBlock);
4107   ResAddr->addIncoming(OnStackAddr, OnStackBlock);
4108 
4109   if (AI.isDirect())
4110     return ResAddr;
4111 
4112   return CGF.Builder.CreateLoad(ResAddr, "vaarg.addr");
4113 }
4114 
4115 //===----------------------------------------------------------------------===//
4116 // NVPTX ABI Implementation
4117 //===----------------------------------------------------------------------===//
4118 
4119 namespace {
4120 
4121 class NVPTXABIInfo : public ABIInfo {
4122 public:
4123   NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
4124 
4125   ABIArgInfo classifyReturnType(QualType RetTy) const;
4126   ABIArgInfo classifyArgumentType(QualType Ty) const;
4127 
4128   virtual void computeInfo(CGFunctionInfo &FI) const;
4129   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4130                                  CodeGenFunction &CFG) const;
4131 };
4132 
4133 class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo {
4134 public:
4135   NVPTXTargetCodeGenInfo(CodeGenTypes &CGT)
4136     : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {}
4137 
4138   virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
4139                                    CodeGen::CodeGenModule &M) const;
4140 private:
4141   static void addKernelMetadata(llvm::Function *F);
4142 };
4143 
4144 ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
4145   if (RetTy->isVoidType())
4146     return ABIArgInfo::getIgnore();
4147   if (isAggregateTypeForABI(RetTy))
4148     return ABIArgInfo::getIndirect(0);
4149   return ABIArgInfo::getDirect();
4150 }
4151 
4152 ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
4153   if (isAggregateTypeForABI(Ty))
4154     return ABIArgInfo::getIndirect(0);
4155 
4156   return ABIArgInfo::getDirect();
4157 }
4158 
4159 void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
4160   FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4161   for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
4162        it != ie; ++it)
4163     it->info = classifyArgumentType(it->type);
4164 
4165   // Always honor user-specified calling convention.
4166   if (FI.getCallingConvention() != llvm::CallingConv::C)
4167     return;
4168 
4169   FI.setEffectiveCallingConvention(getRuntimeCC());
4170 }
4171 
4172 llvm::Value *NVPTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4173                                      CodeGenFunction &CFG) const {
4174   llvm_unreachable("NVPTX does not support varargs");
4175 }
4176 
4177 void NVPTXTargetCodeGenInfo::
4178 SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
4179                     CodeGen::CodeGenModule &M) const{
4180   const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
4181   if (!FD) return;
4182 
4183   llvm::Function *F = cast<llvm::Function>(GV);
4184 
4185   // Perform special handling in OpenCL mode
4186   if (M.getLangOpts().OpenCL) {
4187     // Use OpenCL function attributes to check for kernel functions
4188     // By default, all functions are device functions
4189     if (FD->hasAttr<OpenCLKernelAttr>()) {
4190       // OpenCL __kernel functions get kernel metadata
4191       addKernelMetadata(F);
4192       // And kernel functions are not subject to inlining
4193       F->addFnAttr(llvm::Attribute::NoInline);
4194     }
4195   }
4196 
4197   // Perform special handling in CUDA mode.
4198   if (M.getLangOpts().CUDA) {
4199     // CUDA __global__ functions get a kernel metadata entry.  Since
4200     // __global__ functions cannot be called from the device, we do not
4201     // need to set the noinline attribute.
4202     if (FD->getAttr<CUDAGlobalAttr>())
4203       addKernelMetadata(F);
4204   }
4205 }
4206 
4207 void NVPTXTargetCodeGenInfo::addKernelMetadata(llvm::Function *F) {
4208   llvm::Module *M = F->getParent();
4209   llvm::LLVMContext &Ctx = M->getContext();
4210 
4211   // Get "nvvm.annotations" metadata node
4212   llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations");
4213 
4214   // Create !{<func-ref>, metadata !"kernel", i32 1} node
4215   llvm::SmallVector<llvm::Value *, 3> MDVals;
4216   MDVals.push_back(F);
4217   MDVals.push_back(llvm::MDString::get(Ctx, "kernel"));
4218   MDVals.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), 1));
4219 
4220   // Append metadata to nvvm.annotations
4221   MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
4222 }
4223 
4224 }
4225 
4226 //===----------------------------------------------------------------------===//
4227 // SystemZ ABI Implementation
4228 //===----------------------------------------------------------------------===//
4229 
4230 namespace {
4231 
4232 class SystemZABIInfo : public ABIInfo {
4233 public:
4234   SystemZABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
4235 
4236   bool isPromotableIntegerType(QualType Ty) const;
4237   bool isCompoundType(QualType Ty) const;
4238   bool isFPArgumentType(QualType Ty) const;
4239 
4240   ABIArgInfo classifyReturnType(QualType RetTy) const;
4241   ABIArgInfo classifyArgumentType(QualType ArgTy) const;
4242 
4243   virtual void computeInfo(CGFunctionInfo &FI) const {
4244     FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4245     for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
4246          it != ie; ++it)
4247       it->info = classifyArgumentType(it->type);
4248   }
4249 
4250   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4251                                  CodeGenFunction &CGF) const;
4252 };
4253 
4254 class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
4255 public:
4256   SystemZTargetCodeGenInfo(CodeGenTypes &CGT)
4257     : TargetCodeGenInfo(new SystemZABIInfo(CGT)) {}
4258 };
4259 
4260 }
4261 
4262 bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const {
4263   // Treat an enum type as its underlying type.
4264   if (const EnumType *EnumTy = Ty->getAs<EnumType>())
4265     Ty = EnumTy->getDecl()->getIntegerType();
4266 
4267   // Promotable integer types are required to be promoted by the ABI.
4268   if (Ty->isPromotableIntegerType())
4269     return true;
4270 
4271   // 32-bit values must also be promoted.
4272   if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
4273     switch (BT->getKind()) {
4274     case BuiltinType::Int:
4275     case BuiltinType::UInt:
4276       return true;
4277     default:
4278       return false;
4279     }
4280   return false;
4281 }
4282 
4283 bool SystemZABIInfo::isCompoundType(QualType Ty) const {
4284   return Ty->isAnyComplexType() || isAggregateTypeForABI(Ty);
4285 }
4286 
4287 bool SystemZABIInfo::isFPArgumentType(QualType Ty) const {
4288   if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
4289     switch (BT->getKind()) {
4290     case BuiltinType::Float:
4291     case BuiltinType::Double:
4292       return true;
4293     default:
4294       return false;
4295     }
4296 
4297   if (const RecordType *RT = Ty->getAsStructureType()) {
4298     const RecordDecl *RD = RT->getDecl();
4299     bool Found = false;
4300 
4301     // If this is a C++ record, check the bases first.
4302     if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
4303       for (CXXRecordDecl::base_class_const_iterator I = CXXRD->bases_begin(),
4304              E = CXXRD->bases_end(); I != E; ++I) {
4305         QualType Base = I->getType();
4306 
4307         // Empty bases don't affect things either way.
4308         if (isEmptyRecord(getContext(), Base, true))
4309           continue;
4310 
4311         if (Found)
4312           return false;
4313         Found = isFPArgumentType(Base);
4314         if (!Found)
4315           return false;
4316       }
4317 
4318     // Check the fields.
4319     for (RecordDecl::field_iterator I = RD->field_begin(),
4320            E = RD->field_end(); I != E; ++I) {
4321       const FieldDecl *FD = *I;
4322 
4323       // Empty bitfields don't affect things either way.
4324       // Unlike isSingleElementStruct(), empty structure and array fields
4325       // do count.  So do anonymous bitfields that aren't zero-sized.
4326       if (FD->isBitField() && FD->getBitWidthValue(getContext()) == 0)
4327         return true;
4328 
4329       // Unlike isSingleElementStruct(), arrays do not count.
4330       // Nested isFPArgumentType structures still do though.
4331       if (Found)
4332         return false;
4333       Found = isFPArgumentType(FD->getType());
4334       if (!Found)
4335         return false;
4336     }
4337 
4338     // Unlike isSingleElementStruct(), trailing padding is allowed.
4339     // An 8-byte aligned struct s { float f; } is passed as a double.
4340     return Found;
4341   }
4342 
4343   return false;
4344 }
4345 
4346 llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4347                                        CodeGenFunction &CGF) const {
4348   // Assume that va_list type is correct; should be pointer to LLVM type:
4349   // struct {
4350   //   i64 __gpr;
4351   //   i64 __fpr;
4352   //   i8 *__overflow_arg_area;
4353   //   i8 *__reg_save_area;
4354   // };
4355 
4356   // Every argument occupies 8 bytes and is passed by preference in either
4357   // GPRs or FPRs.
4358   Ty = CGF.getContext().getCanonicalType(Ty);
4359   ABIArgInfo AI = classifyArgumentType(Ty);
4360   bool InFPRs = isFPArgumentType(Ty);
4361 
4362   llvm::Type *APTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
4363   bool IsIndirect = AI.isIndirect();
4364   unsigned UnpaddedBitSize;
4365   if (IsIndirect) {
4366     APTy = llvm::PointerType::getUnqual(APTy);
4367     UnpaddedBitSize = 64;
4368   } else
4369     UnpaddedBitSize = getContext().getTypeSize(Ty);
4370   unsigned PaddedBitSize = 64;
4371   assert((UnpaddedBitSize <= PaddedBitSize) && "Invalid argument size.");
4372 
4373   unsigned PaddedSize = PaddedBitSize / 8;
4374   unsigned Padding = (PaddedBitSize - UnpaddedBitSize) / 8;
4375 
4376   unsigned MaxRegs, RegCountField, RegSaveIndex, RegPadding;
4377   if (InFPRs) {
4378     MaxRegs = 4; // Maximum of 4 FPR arguments
4379     RegCountField = 1; // __fpr
4380     RegSaveIndex = 16; // save offset for f0
4381     RegPadding = 0; // floats are passed in the high bits of an FPR
4382   } else {
4383     MaxRegs = 5; // Maximum of 5 GPR arguments
4384     RegCountField = 0; // __gpr
4385     RegSaveIndex = 2; // save offset for r2
4386     RegPadding = Padding; // values are passed in the low bits of a GPR
4387   }
4388 
4389   llvm::Value *RegCountPtr =
4390     CGF.Builder.CreateStructGEP(VAListAddr, RegCountField, "reg_count_ptr");
4391   llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count");
4392   llvm::Type *IndexTy = RegCount->getType();
4393   llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
4394   llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV,
4395 						  "fits_in_regs");
4396 
4397   llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
4398   llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
4399   llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
4400   CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
4401 
4402   // Emit code to load the value if it was passed in registers.
4403   CGF.EmitBlock(InRegBlock);
4404 
4405   // Work out the address of an argument register.
4406   llvm::Value *PaddedSizeV = llvm::ConstantInt::get(IndexTy, PaddedSize);
4407   llvm::Value *ScaledRegCount =
4408     CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count");
4409   llvm::Value *RegBase =
4410     llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize + RegPadding);
4411   llvm::Value *RegOffset =
4412     CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset");
4413   llvm::Value *RegSaveAreaPtr =
4414     CGF.Builder.CreateStructGEP(VAListAddr, 3, "reg_save_area_ptr");
4415   llvm::Value *RegSaveArea =
4416     CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area");
4417   llvm::Value *RawRegAddr =
4418     CGF.Builder.CreateGEP(RegSaveArea, RegOffset, "raw_reg_addr");
4419   llvm::Value *RegAddr =
4420     CGF.Builder.CreateBitCast(RawRegAddr, APTy, "reg_addr");
4421 
4422   // Update the register count
4423   llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
4424   llvm::Value *NewRegCount =
4425     CGF.Builder.CreateAdd(RegCount, One, "reg_count");
4426   CGF.Builder.CreateStore(NewRegCount, RegCountPtr);
4427   CGF.EmitBranch(ContBlock);
4428 
4429   // Emit code to load the value if it was passed in memory.
4430   CGF.EmitBlock(InMemBlock);
4431 
4432   // Work out the address of a stack argument.
4433   llvm::Value *OverflowArgAreaPtr =
4434     CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr");
4435   llvm::Value *OverflowArgArea =
4436     CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area");
4437   llvm::Value *PaddingV = llvm::ConstantInt::get(IndexTy, Padding);
4438   llvm::Value *RawMemAddr =
4439     CGF.Builder.CreateGEP(OverflowArgArea, PaddingV, "raw_mem_addr");
4440   llvm::Value *MemAddr =
4441     CGF.Builder.CreateBitCast(RawMemAddr, APTy, "mem_addr");
4442 
4443   // Update overflow_arg_area_ptr pointer
4444   llvm::Value *NewOverflowArgArea =
4445     CGF.Builder.CreateGEP(OverflowArgArea, PaddedSizeV, "overflow_arg_area");
4446   CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
4447   CGF.EmitBranch(ContBlock);
4448 
4449   // Return the appropriate result.
4450   CGF.EmitBlock(ContBlock);
4451   llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(APTy, 2, "va_arg.addr");
4452   ResAddr->addIncoming(RegAddr, InRegBlock);
4453   ResAddr->addIncoming(MemAddr, InMemBlock);
4454 
4455   if (IsIndirect)
4456     return CGF.Builder.CreateLoad(ResAddr, "indirect_arg");
4457 
4458   return ResAddr;
4459 }
4460 
4461 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
4462     const llvm::Triple &Triple, const CodeGenOptions &Opts) {
4463   assert(Triple.getArch() == llvm::Triple::x86);
4464 
4465   switch (Opts.getStructReturnConvention()) {
4466   case CodeGenOptions::SRCK_Default:
4467     break;
4468   case CodeGenOptions::SRCK_OnStack:  // -fpcc-struct-return
4469     return false;
4470   case CodeGenOptions::SRCK_InRegs:  // -freg-struct-return
4471     return true;
4472   }
4473 
4474   if (Triple.isOSDarwin())
4475     return true;
4476 
4477   switch (Triple.getOS()) {
4478   case llvm::Triple::Cygwin:
4479   case llvm::Triple::MinGW32:
4480   case llvm::Triple::AuroraUX:
4481   case llvm::Triple::DragonFly:
4482   case llvm::Triple::FreeBSD:
4483   case llvm::Triple::OpenBSD:
4484   case llvm::Triple::Bitrig:
4485   case llvm::Triple::Win32:
4486     return true;
4487   default:
4488     return false;
4489   }
4490 }
4491 
4492 ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
4493   if (RetTy->isVoidType())
4494     return ABIArgInfo::getIgnore();
4495   if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64)
4496     return ABIArgInfo::getIndirect(0);
4497   return (isPromotableIntegerType(RetTy) ?
4498           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
4499 }
4500 
4501 ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
4502   // Handle the generic C++ ABI.
4503   if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, CGT))
4504     return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
4505 
4506   // Integers and enums are extended to full register width.
4507   if (isPromotableIntegerType(Ty))
4508     return ABIArgInfo::getExtend();
4509 
4510   // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly.
4511   uint64_t Size = getContext().getTypeSize(Ty);
4512   if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
4513     return ABIArgInfo::getIndirect(0);
4514 
4515   // Handle small structures.
4516   if (const RecordType *RT = Ty->getAs<RecordType>()) {
4517     // Structures with flexible arrays have variable length, so really
4518     // fail the size test above.
4519     const RecordDecl *RD = RT->getDecl();
4520     if (RD->hasFlexibleArrayMember())
4521       return ABIArgInfo::getIndirect(0);
4522 
4523     // The structure is passed as an unextended integer, a float, or a double.
4524     llvm::Type *PassTy;
4525     if (isFPArgumentType(Ty)) {
4526       assert(Size == 32 || Size == 64);
4527       if (Size == 32)
4528         PassTy = llvm::Type::getFloatTy(getVMContext());
4529       else
4530         PassTy = llvm::Type::getDoubleTy(getVMContext());
4531     } else
4532       PassTy = llvm::IntegerType::get(getVMContext(), Size);
4533     return ABIArgInfo::getDirect(PassTy);
4534   }
4535 
4536   // Non-structure compounds are passed indirectly.
4537   if (isCompoundType(Ty))
4538     return ABIArgInfo::getIndirect(0);
4539 
4540   return ABIArgInfo::getDirect(0);
4541 }
4542 
4543 //===----------------------------------------------------------------------===//
4544 // MBlaze ABI Implementation
4545 //===----------------------------------------------------------------------===//
4546 
4547 namespace {
4548 
4549 class MBlazeABIInfo : public ABIInfo {
4550 public:
4551   MBlazeABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
4552 
4553   bool isPromotableIntegerType(QualType Ty) const;
4554 
4555   ABIArgInfo classifyReturnType(QualType RetTy) const;
4556   ABIArgInfo classifyArgumentType(QualType RetTy) const;
4557 
4558   virtual void computeInfo(CGFunctionInfo &FI) const {
4559     FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4560     for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
4561          it != ie; ++it)
4562       it->info = classifyArgumentType(it->type);
4563   }
4564 
4565   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4566                                  CodeGenFunction &CGF) const;
4567 };
4568 
4569 class MBlazeTargetCodeGenInfo : public TargetCodeGenInfo {
4570 public:
4571   MBlazeTargetCodeGenInfo(CodeGenTypes &CGT)
4572     : TargetCodeGenInfo(new MBlazeABIInfo(CGT)) {}
4573   void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
4574                            CodeGen::CodeGenModule &M) const;
4575 };
4576 
4577 }
4578 
4579 bool MBlazeABIInfo::isPromotableIntegerType(QualType Ty) const {
4580   // MBlaze ABI requires all 8 and 16 bit quantities to be extended.
4581   if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
4582     switch (BT->getKind()) {
4583     case BuiltinType::Bool:
4584     case BuiltinType::Char_S:
4585     case BuiltinType::Char_U:
4586     case BuiltinType::SChar:
4587     case BuiltinType::UChar:
4588     case BuiltinType::Short:
4589     case BuiltinType::UShort:
4590       return true;
4591     default:
4592       return false;
4593     }
4594   return false;
4595 }
4596 
4597 llvm::Value *MBlazeABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4598                                       CodeGenFunction &CGF) const {
4599   // FIXME: Implement
4600   return 0;
4601 }
4602 
4603 
4604 ABIArgInfo MBlazeABIInfo::classifyReturnType(QualType RetTy) const {
4605   if (RetTy->isVoidType())
4606     return ABIArgInfo::getIgnore();
4607   if (isAggregateTypeForABI(RetTy))
4608     return ABIArgInfo::getIndirect(0);
4609 
4610   return (isPromotableIntegerType(RetTy) ?
4611           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
4612 }
4613 
4614 ABIArgInfo MBlazeABIInfo::classifyArgumentType(QualType Ty) const {
4615   if (isAggregateTypeForABI(Ty))
4616     return ABIArgInfo::getIndirect(0);
4617 
4618   return (isPromotableIntegerType(Ty) ?
4619           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
4620 }
4621 
4622 void MBlazeTargetCodeGenInfo::SetTargetAttributes(const Decl *D,
4623                                                   llvm::GlobalValue *GV,
4624                                                   CodeGen::CodeGenModule &M)
4625                                                   const {
4626   const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
4627   if (!FD) return;
4628 
4629   llvm::CallingConv::ID CC = llvm::CallingConv::C;
4630   if (FD->hasAttr<MBlazeInterruptHandlerAttr>())
4631     CC = llvm::CallingConv::MBLAZE_INTR;
4632   else if (FD->hasAttr<MBlazeSaveVolatilesAttr>())
4633     CC = llvm::CallingConv::MBLAZE_SVOL;
4634 
4635   if (CC != llvm::CallingConv::C) {
4636       // Handle 'interrupt_handler' attribute:
4637       llvm::Function *F = cast<llvm::Function>(GV);
4638 
4639       // Step 1: Set ISR calling convention.
4640       F->setCallingConv(CC);
4641 
4642       // Step 2: Add attributes goodness.
4643       F->addFnAttr(llvm::Attribute::NoInline);
4644   }
4645 
4646   // Step 3: Emit _interrupt_handler alias.
4647   if (CC == llvm::CallingConv::MBLAZE_INTR)
4648     new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage,
4649                           "_interrupt_handler", GV, &M.getModule());
4650 }
4651 
4652 
4653 //===----------------------------------------------------------------------===//
4654 // MSP430 ABI Implementation
4655 //===----------------------------------------------------------------------===//
4656 
4657 namespace {
4658 
4659 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
4660 public:
4661   MSP430TargetCodeGenInfo(CodeGenTypes &CGT)
4662     : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
4663   void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
4664                            CodeGen::CodeGenModule &M) const;
4665 };
4666 
4667 }
4668 
4669 void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
4670                                                   llvm::GlobalValue *GV,
4671                                              CodeGen::CodeGenModule &M) const {
4672   if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
4673     if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) {
4674       // Handle 'interrupt' attribute:
4675       llvm::Function *F = cast<llvm::Function>(GV);
4676 
4677       // Step 1: Set ISR calling convention.
4678       F->setCallingConv(llvm::CallingConv::MSP430_INTR);
4679 
4680       // Step 2: Add attributes goodness.
4681       F->addFnAttr(llvm::Attribute::NoInline);
4682 
4683       // Step 3: Emit ISR vector alias.
4684       unsigned Num = attr->getNumber() / 2;
4685       new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage,
4686                             "__isr_" + Twine(Num),
4687                             GV, &M.getModule());
4688     }
4689   }
4690 }
4691 
4692 //===----------------------------------------------------------------------===//
4693 // MIPS ABI Implementation.  This works for both little-endian and
4694 // big-endian variants.
4695 //===----------------------------------------------------------------------===//
4696 
4697 namespace {
4698 class MipsABIInfo : public ABIInfo {
4699   bool IsO32;
4700   unsigned MinABIStackAlignInBytes, StackAlignInBytes;
4701   void CoerceToIntArgs(uint64_t TySize,
4702                        SmallVectorImpl<llvm::Type *> &ArgList) const;
4703   llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const;
4704   llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const;
4705   llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const;
4706 public:
4707   MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) :
4708     ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
4709     StackAlignInBytes(IsO32 ? 8 : 16) {}
4710 
4711   ABIArgInfo classifyReturnType(QualType RetTy) const;
4712   ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const;
4713   virtual void computeInfo(CGFunctionInfo &FI) const;
4714   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4715                                  CodeGenFunction &CGF) const;
4716 };
4717 
4718 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
4719   unsigned SizeOfUnwindException;
4720 public:
4721   MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32)
4722     : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)),
4723       SizeOfUnwindException(IsO32 ? 24 : 32) {}
4724 
4725   int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
4726     return 29;
4727   }
4728 
4729   void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
4730                            CodeGen::CodeGenModule &CGM) const {
4731     const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
4732     if (!FD) return;
4733     llvm::Function *Fn = cast<llvm::Function>(GV);
4734     if (FD->hasAttr<Mips16Attr>()) {
4735       Fn->addFnAttr("mips16");
4736     }
4737     else if (FD->hasAttr<NoMips16Attr>()) {
4738       Fn->addFnAttr("nomips16");
4739     }
4740   }
4741 
4742   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4743                                llvm::Value *Address) const;
4744 
4745   unsigned getSizeOfUnwindException() const {
4746     return SizeOfUnwindException;
4747   }
4748 };
4749 }
4750 
4751 void MipsABIInfo::CoerceToIntArgs(uint64_t TySize,
4752                                   SmallVectorImpl<llvm::Type *> &ArgList) const {
4753   llvm::IntegerType *IntTy =
4754     llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
4755 
4756   // Add (TySize / MinABIStackAlignInBytes) args of IntTy.
4757   for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
4758     ArgList.push_back(IntTy);
4759 
4760   // If necessary, add one more integer type to ArgList.
4761   unsigned R = TySize % (MinABIStackAlignInBytes * 8);
4762 
4763   if (R)
4764     ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
4765 }
4766 
4767 // In N32/64, an aligned double precision floating point field is passed in
4768 // a register.
4769 llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const {
4770   SmallVector<llvm::Type*, 8> ArgList, IntArgList;
4771 
4772   if (IsO32) {
4773     CoerceToIntArgs(TySize, ArgList);
4774     return llvm::StructType::get(getVMContext(), ArgList);
4775   }
4776 
4777   if (Ty->isComplexType())
4778     return CGT.ConvertType(Ty);
4779 
4780   const RecordType *RT = Ty->getAs<RecordType>();
4781 
4782   // Unions/vectors are passed in integer registers.
4783   if (!RT || !RT->isStructureOrClassType()) {
4784     CoerceToIntArgs(TySize, ArgList);
4785     return llvm::StructType::get(getVMContext(), ArgList);
4786   }
4787 
4788   const RecordDecl *RD = RT->getDecl();
4789   const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
4790   assert(!(TySize % 8) && "Size of structure must be multiple of 8.");
4791 
4792   uint64_t LastOffset = 0;
4793   unsigned idx = 0;
4794   llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64);
4795 
4796   // Iterate over fields in the struct/class and check if there are any aligned
4797   // double fields.
4798   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
4799        i != e; ++i, ++idx) {
4800     const QualType Ty = i->getType();
4801     const BuiltinType *BT = Ty->getAs<BuiltinType>();
4802 
4803     if (!BT || BT->getKind() != BuiltinType::Double)
4804       continue;
4805 
4806     uint64_t Offset = Layout.getFieldOffset(idx);
4807     if (Offset % 64) // Ignore doubles that are not aligned.
4808       continue;
4809 
4810     // Add ((Offset - LastOffset) / 64) args of type i64.
4811     for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
4812       ArgList.push_back(I64);
4813 
4814     // Add double type.
4815     ArgList.push_back(llvm::Type::getDoubleTy(getVMContext()));
4816     LastOffset = Offset + 64;
4817   }
4818 
4819   CoerceToIntArgs(TySize - LastOffset, IntArgList);
4820   ArgList.append(IntArgList.begin(), IntArgList.end());
4821 
4822   return llvm::StructType::get(getVMContext(), ArgList);
4823 }
4824 
4825 llvm::Type *MipsABIInfo::getPaddingType(uint64_t Align, uint64_t Offset) const {
4826   assert((Offset % MinABIStackAlignInBytes) == 0);
4827 
4828   if ((Align - 1) & Offset)
4829     return llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
4830 
4831   return 0;
4832 }
4833 
4834 ABIArgInfo
4835 MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
4836   uint64_t OrigOffset = Offset;
4837   uint64_t TySize = getContext().getTypeSize(Ty);
4838   uint64_t Align = getContext().getTypeAlign(Ty) / 8;
4839 
4840   Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes),
4841                    (uint64_t)StackAlignInBytes);
4842   Offset = llvm::RoundUpToAlignment(Offset, Align);
4843   Offset += llvm::RoundUpToAlignment(TySize, Align * 8) / 8;
4844 
4845   if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) {
4846     // Ignore empty aggregates.
4847     if (TySize == 0)
4848       return ABIArgInfo::getIgnore();
4849 
4850     if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, CGT)) {
4851       Offset = OrigOffset + MinABIStackAlignInBytes;
4852       return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
4853     }
4854 
4855     // If we have reached here, aggregates are passed directly by coercing to
4856     // another structure type. Padding is inserted if the offset of the
4857     // aggregate is unaligned.
4858     return ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0,
4859                                  getPaddingType(Align, OrigOffset));
4860   }
4861 
4862   // Treat an enum type as its underlying type.
4863   if (const EnumType *EnumTy = Ty->getAs<EnumType>())
4864     Ty = EnumTy->getDecl()->getIntegerType();
4865 
4866   if (Ty->isPromotableIntegerType())
4867     return ABIArgInfo::getExtend();
4868 
4869   return ABIArgInfo::getDirect(0, 0,
4870                                IsO32 ? 0 : getPaddingType(Align, OrigOffset));
4871 }
4872 
4873 llvm::Type*
4874 MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
4875   const RecordType *RT = RetTy->getAs<RecordType>();
4876   SmallVector<llvm::Type*, 8> RTList;
4877 
4878   if (RT && RT->isStructureOrClassType()) {
4879     const RecordDecl *RD = RT->getDecl();
4880     const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
4881     unsigned FieldCnt = Layout.getFieldCount();
4882 
4883     // N32/64 returns struct/classes in floating point registers if the
4884     // following conditions are met:
4885     // 1. The size of the struct/class is no larger than 128-bit.
4886     // 2. The struct/class has one or two fields all of which are floating
4887     //    point types.
4888     // 3. The offset of the first field is zero (this follows what gcc does).
4889     //
4890     // Any other composite results are returned in integer registers.
4891     //
4892     if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) {
4893       RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end();
4894       for (; b != e; ++b) {
4895         const BuiltinType *BT = b->getType()->getAs<BuiltinType>();
4896 
4897         if (!BT || !BT->isFloatingPoint())
4898           break;
4899 
4900         RTList.push_back(CGT.ConvertType(b->getType()));
4901       }
4902 
4903       if (b == e)
4904         return llvm::StructType::get(getVMContext(), RTList,
4905                                      RD->hasAttr<PackedAttr>());
4906 
4907       RTList.clear();
4908     }
4909   }
4910 
4911   CoerceToIntArgs(Size, RTList);
4912   return llvm::StructType::get(getVMContext(), RTList);
4913 }
4914 
4915 ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
4916   uint64_t Size = getContext().getTypeSize(RetTy);
4917 
4918   if (RetTy->isVoidType() || Size == 0)
4919     return ABIArgInfo::getIgnore();
4920 
4921   if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) {
4922     if (isRecordReturnIndirect(RetTy, CGT))
4923       return ABIArgInfo::getIndirect(0);
4924 
4925     if (Size <= 128) {
4926       if (RetTy->isAnyComplexType())
4927         return ABIArgInfo::getDirect();
4928 
4929       // O32 returns integer vectors in registers.
4930       if (IsO32 && RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())
4931         return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
4932 
4933       if (!IsO32)
4934         return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
4935     }
4936 
4937     return ABIArgInfo::getIndirect(0);
4938   }
4939 
4940   // Treat an enum type as its underlying type.
4941   if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
4942     RetTy = EnumTy->getDecl()->getIntegerType();
4943 
4944   return (RetTy->isPromotableIntegerType() ?
4945           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
4946 }
4947 
4948 void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const {
4949   ABIArgInfo &RetInfo = FI.getReturnInfo();
4950   RetInfo = classifyReturnType(FI.getReturnType());
4951 
4952   // Check if a pointer to an aggregate is passed as a hidden argument.
4953   uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0;
4954 
4955   for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
4956        it != ie; ++it)
4957     it->info = classifyArgumentType(it->type, Offset);
4958 }
4959 
4960 llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4961                                     CodeGenFunction &CGF) const {
4962   llvm::Type *BP = CGF.Int8PtrTy;
4963   llvm::Type *BPP = CGF.Int8PtrPtrTy;
4964 
4965   CGBuilderTy &Builder = CGF.Builder;
4966   llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
4967   llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
4968   int64_t TypeAlign = getContext().getTypeAlign(Ty) / 8;
4969   llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
4970   llvm::Value *AddrTyped;
4971   unsigned PtrWidth = getTarget().getPointerWidth(0);
4972   llvm::IntegerType *IntTy = (PtrWidth == 32) ? CGF.Int32Ty : CGF.Int64Ty;
4973 
4974   if (TypeAlign > MinABIStackAlignInBytes) {
4975     llvm::Value *AddrAsInt = CGF.Builder.CreatePtrToInt(Addr, IntTy);
4976     llvm::Value *Inc = llvm::ConstantInt::get(IntTy, TypeAlign - 1);
4977     llvm::Value *Mask = llvm::ConstantInt::get(IntTy, -TypeAlign);
4978     llvm::Value *Add = CGF.Builder.CreateAdd(AddrAsInt, Inc);
4979     llvm::Value *And = CGF.Builder.CreateAnd(Add, Mask);
4980     AddrTyped = CGF.Builder.CreateIntToPtr(And, PTy);
4981   }
4982   else
4983     AddrTyped = Builder.CreateBitCast(Addr, PTy);
4984 
4985   llvm::Value *AlignedAddr = Builder.CreateBitCast(AddrTyped, BP);
4986   TypeAlign = std::max((unsigned)TypeAlign, MinABIStackAlignInBytes);
4987   uint64_t Offset =
4988     llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, TypeAlign);
4989   llvm::Value *NextAddr =
4990     Builder.CreateGEP(AlignedAddr, llvm::ConstantInt::get(IntTy, Offset),
4991                       "ap.next");
4992   Builder.CreateStore(NextAddr, VAListAddrAsBPP);
4993 
4994   return AddrTyped;
4995 }
4996 
4997 bool
4998 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4999                                                llvm::Value *Address) const {
5000   // This information comes from gcc's implementation, which seems to
5001   // as canonical as it gets.
5002 
5003   // Everything on MIPS is 4 bytes.  Double-precision FP registers
5004   // are aliased to pairs of single-precision FP registers.
5005   llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
5006 
5007   // 0-31 are the general purpose registers, $0 - $31.
5008   // 32-63 are the floating-point registers, $f0 - $f31.
5009   // 64 and 65 are the multiply/divide registers, $hi and $lo.
5010   // 66 is the (notional, I think) register for signal-handler return.
5011   AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65);
5012 
5013   // 67-74 are the floating-point status registers, $fcc0 - $fcc7.
5014   // They are one bit wide and ignored here.
5015 
5016   // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31.
5017   // (coprocessor 1 is the FP unit)
5018   // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31.
5019   // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31.
5020   // 176-181 are the DSP accumulator registers.
5021   AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181);
5022   return false;
5023 }
5024 
5025 //===----------------------------------------------------------------------===//
5026 // TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults.
5027 // Currently subclassed only to implement custom OpenCL C function attribute
5028 // handling.
5029 //===----------------------------------------------------------------------===//
5030 
5031 namespace {
5032 
5033 class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo {
5034 public:
5035   TCETargetCodeGenInfo(CodeGenTypes &CGT)
5036     : DefaultTargetCodeGenInfo(CGT) {}
5037 
5038   virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5039                                    CodeGen::CodeGenModule &M) const;
5040 };
5041 
5042 void TCETargetCodeGenInfo::SetTargetAttributes(const Decl *D,
5043                                                llvm::GlobalValue *GV,
5044                                                CodeGen::CodeGenModule &M) const {
5045   const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
5046   if (!FD) return;
5047 
5048   llvm::Function *F = cast<llvm::Function>(GV);
5049 
5050   if (M.getLangOpts().OpenCL) {
5051     if (FD->hasAttr<OpenCLKernelAttr>()) {
5052       // OpenCL C Kernel functions are not subject to inlining
5053       F->addFnAttr(llvm::Attribute::NoInline);
5054 
5055       if (FD->hasAttr<ReqdWorkGroupSizeAttr>()) {
5056 
5057         // Convert the reqd_work_group_size() attributes to metadata.
5058         llvm::LLVMContext &Context = F->getContext();
5059         llvm::NamedMDNode *OpenCLMetadata =
5060             M.getModule().getOrInsertNamedMetadata("opencl.kernel_wg_size_info");
5061 
5062         SmallVector<llvm::Value*, 5> Operands;
5063         Operands.push_back(F);
5064 
5065         Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty,
5066                              llvm::APInt(32,
5067                              FD->getAttr<ReqdWorkGroupSizeAttr>()->getXDim())));
5068         Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty,
5069                              llvm::APInt(32,
5070                                FD->getAttr<ReqdWorkGroupSizeAttr>()->getYDim())));
5071         Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty,
5072                              llvm::APInt(32,
5073                                FD->getAttr<ReqdWorkGroupSizeAttr>()->getZDim())));
5074 
5075         // Add a boolean constant operand for "required" (true) or "hint" (false)
5076         // for implementing the work_group_size_hint attr later. Currently
5077         // always true as the hint is not yet implemented.
5078         Operands.push_back(llvm::ConstantInt::getTrue(Context));
5079         OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
5080       }
5081     }
5082   }
5083 }
5084 
5085 }
5086 
5087 //===----------------------------------------------------------------------===//
5088 // Hexagon ABI Implementation
5089 //===----------------------------------------------------------------------===//
5090 
5091 namespace {
5092 
5093 class HexagonABIInfo : public ABIInfo {
5094 
5095 
5096 public:
5097   HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
5098 
5099 private:
5100 
5101   ABIArgInfo classifyReturnType(QualType RetTy) const;
5102   ABIArgInfo classifyArgumentType(QualType RetTy) const;
5103 
5104   virtual void computeInfo(CGFunctionInfo &FI) const;
5105 
5106   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5107                                  CodeGenFunction &CGF) const;
5108 };
5109 
5110 class HexagonTargetCodeGenInfo : public TargetCodeGenInfo {
5111 public:
5112   HexagonTargetCodeGenInfo(CodeGenTypes &CGT)
5113     :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {}
5114 
5115   int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
5116     return 29;
5117   }
5118 };
5119 
5120 }
5121 
5122 void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const {
5123   FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
5124   for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
5125        it != ie; ++it)
5126     it->info = classifyArgumentType(it->type);
5127 }
5128 
5129 ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const {
5130   if (!isAggregateTypeForABI(Ty)) {
5131     // Treat an enum type as its underlying type.
5132     if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5133       Ty = EnumTy->getDecl()->getIntegerType();
5134 
5135     return (Ty->isPromotableIntegerType() ?
5136             ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
5137   }
5138 
5139   // Ignore empty records.
5140   if (isEmptyRecord(getContext(), Ty, true))
5141     return ABIArgInfo::getIgnore();
5142 
5143   if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, CGT))
5144     return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
5145 
5146   uint64_t Size = getContext().getTypeSize(Ty);
5147   if (Size > 64)
5148     return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
5149     // Pass in the smallest viable integer type.
5150   else if (Size > 32)
5151       return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
5152   else if (Size > 16)
5153       return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
5154   else if (Size > 8)
5155       return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
5156   else
5157       return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
5158 }
5159 
5160 ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
5161   if (RetTy->isVoidType())
5162     return ABIArgInfo::getIgnore();
5163 
5164   // Large vector types should be returned via memory.
5165   if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64)
5166     return ABIArgInfo::getIndirect(0);
5167 
5168   if (!isAggregateTypeForABI(RetTy)) {
5169     // Treat an enum type as its underlying type.
5170     if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
5171       RetTy = EnumTy->getDecl()->getIntegerType();
5172 
5173     return (RetTy->isPromotableIntegerType() ?
5174             ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
5175   }
5176 
5177   // Structures with either a non-trivial destructor or a non-trivial
5178   // copy constructor are always indirect.
5179   if (isRecordReturnIndirect(RetTy, CGT))
5180     return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
5181 
5182   if (isEmptyRecord(getContext(), RetTy, true))
5183     return ABIArgInfo::getIgnore();
5184 
5185   // Aggregates <= 8 bytes are returned in r0; other aggregates
5186   // are returned indirectly.
5187   uint64_t Size = getContext().getTypeSize(RetTy);
5188   if (Size <= 64) {
5189     // Return in the smallest viable integer type.
5190     if (Size <= 8)
5191       return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
5192     if (Size <= 16)
5193       return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
5194     if (Size <= 32)
5195       return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
5196     return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
5197   }
5198 
5199   return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
5200 }
5201 
5202 llvm::Value *HexagonABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5203                                        CodeGenFunction &CGF) const {
5204   // FIXME: Need to handle alignment
5205   llvm::Type *BPP = CGF.Int8PtrPtrTy;
5206 
5207   CGBuilderTy &Builder = CGF.Builder;
5208   llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
5209                                                        "ap");
5210   llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
5211   llvm::Type *PTy =
5212     llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
5213   llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
5214 
5215   uint64_t Offset =
5216     llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
5217   llvm::Value *NextAddr =
5218     Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
5219                       "ap.next");
5220   Builder.CreateStore(NextAddr, VAListAddrAsBPP);
5221 
5222   return AddrTyped;
5223 }
5224 
5225 
5226 //===----------------------------------------------------------------------===//
5227 // SPARC v9 ABI Implementation.
5228 // Based on the SPARC Compliance Definition version 2.4.1.
5229 //
5230 // Function arguments a mapped to a nominal "parameter array" and promoted to
5231 // registers depending on their type. Each argument occupies 8 or 16 bytes in
5232 // the array, structs larger than 16 bytes are passed indirectly.
5233 //
5234 // One case requires special care:
5235 //
5236 //   struct mixed {
5237 //     int i;
5238 //     float f;
5239 //   };
5240 //
5241 // When a struct mixed is passed by value, it only occupies 8 bytes in the
5242 // parameter array, but the int is passed in an integer register, and the float
5243 // is passed in a floating point register. This is represented as two arguments
5244 // with the LLVM IR inreg attribute:
5245 //
5246 //   declare void f(i32 inreg %i, float inreg %f)
5247 //
5248 // The code generator will only allocate 4 bytes from the parameter array for
5249 // the inreg arguments. All other arguments are allocated a multiple of 8
5250 // bytes.
5251 //
5252 namespace {
5253 class SparcV9ABIInfo : public ABIInfo {
5254 public:
5255   SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
5256 
5257 private:
5258   ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const;
5259   virtual void computeInfo(CGFunctionInfo &FI) const;
5260   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5261                                  CodeGenFunction &CGF) const;
5262 
5263   // Coercion type builder for structs passed in registers. The coercion type
5264   // serves two purposes:
5265   //
5266   // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned'
5267   //    in registers.
5268   // 2. Expose aligned floating point elements as first-level elements, so the
5269   //    code generator knows to pass them in floating point registers.
5270   //
5271   // We also compute the InReg flag which indicates that the struct contains
5272   // aligned 32-bit floats.
5273   //
5274   struct CoerceBuilder {
5275     llvm::LLVMContext &Context;
5276     const llvm::DataLayout &DL;
5277     SmallVector<llvm::Type*, 8> Elems;
5278     uint64_t Size;
5279     bool InReg;
5280 
5281     CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl)
5282       : Context(c), DL(dl), Size(0), InReg(false) {}
5283 
5284     // Pad Elems with integers until Size is ToSize.
5285     void pad(uint64_t ToSize) {
5286       assert(ToSize >= Size && "Cannot remove elements");
5287       if (ToSize == Size)
5288         return;
5289 
5290       // Finish the current 64-bit word.
5291       uint64_t Aligned = llvm::RoundUpToAlignment(Size, 64);
5292       if (Aligned > Size && Aligned <= ToSize) {
5293         Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size));
5294         Size = Aligned;
5295       }
5296 
5297       // Add whole 64-bit words.
5298       while (Size + 64 <= ToSize) {
5299         Elems.push_back(llvm::Type::getInt64Ty(Context));
5300         Size += 64;
5301       }
5302 
5303       // Final in-word padding.
5304       if (Size < ToSize) {
5305         Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size));
5306         Size = ToSize;
5307       }
5308     }
5309 
5310     // Add a floating point element at Offset.
5311     void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) {
5312       // Unaligned floats are treated as integers.
5313       if (Offset % Bits)
5314         return;
5315       // The InReg flag is only required if there are any floats < 64 bits.
5316       if (Bits < 64)
5317         InReg = true;
5318       pad(Offset);
5319       Elems.push_back(Ty);
5320       Size = Offset + Bits;
5321     }
5322 
5323     // Add a struct type to the coercion type, starting at Offset (in bits).
5324     void addStruct(uint64_t Offset, llvm::StructType *StrTy) {
5325       const llvm::StructLayout *Layout = DL.getStructLayout(StrTy);
5326       for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) {
5327         llvm::Type *ElemTy = StrTy->getElementType(i);
5328         uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i);
5329         switch (ElemTy->getTypeID()) {
5330         case llvm::Type::StructTyID:
5331           addStruct(ElemOffset, cast<llvm::StructType>(ElemTy));
5332           break;
5333         case llvm::Type::FloatTyID:
5334           addFloat(ElemOffset, ElemTy, 32);
5335           break;
5336         case llvm::Type::DoubleTyID:
5337           addFloat(ElemOffset, ElemTy, 64);
5338           break;
5339         case llvm::Type::FP128TyID:
5340           addFloat(ElemOffset, ElemTy, 128);
5341           break;
5342         case llvm::Type::PointerTyID:
5343           if (ElemOffset % 64 == 0) {
5344             pad(ElemOffset);
5345             Elems.push_back(ElemTy);
5346             Size += 64;
5347           }
5348           break;
5349         default:
5350           break;
5351         }
5352       }
5353     }
5354 
5355     // Check if Ty is a usable substitute for the coercion type.
5356     bool isUsableType(llvm::StructType *Ty) const {
5357       if (Ty->getNumElements() != Elems.size())
5358         return false;
5359       for (unsigned i = 0, e = Elems.size(); i != e; ++i)
5360         if (Elems[i] != Ty->getElementType(i))
5361           return false;
5362       return true;
5363     }
5364 
5365     // Get the coercion type as a literal struct type.
5366     llvm::Type *getType() const {
5367       if (Elems.size() == 1)
5368         return Elems.front();
5369       else
5370         return llvm::StructType::get(Context, Elems);
5371     }
5372   };
5373 };
5374 } // end anonymous namespace
5375 
5376 ABIArgInfo
5377 SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
5378   if (Ty->isVoidType())
5379     return ABIArgInfo::getIgnore();
5380 
5381   uint64_t Size = getContext().getTypeSize(Ty);
5382 
5383   // Anything too big to fit in registers is passed with an explicit indirect
5384   // pointer / sret pointer.
5385   if (Size > SizeLimit)
5386     return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
5387 
5388   // Treat an enum type as its underlying type.
5389   if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5390     Ty = EnumTy->getDecl()->getIntegerType();
5391 
5392   // Integer types smaller than a register are extended.
5393   if (Size < 64 && Ty->isIntegerType())
5394     return ABIArgInfo::getExtend();
5395 
5396   // Other non-aggregates go in registers.
5397   if (!isAggregateTypeForABI(Ty))
5398     return ABIArgInfo::getDirect();
5399 
5400   // This is a small aggregate type that should be passed in registers.
5401   // Build a coercion type from the LLVM struct type.
5402   llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty));
5403   if (!StrTy)
5404     return ABIArgInfo::getDirect();
5405 
5406   CoerceBuilder CB(getVMContext(), getDataLayout());
5407   CB.addStruct(0, StrTy);
5408   CB.pad(llvm::RoundUpToAlignment(CB.DL.getTypeSizeInBits(StrTy), 64));
5409 
5410   // Try to use the original type for coercion.
5411   llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType();
5412 
5413   if (CB.InReg)
5414     return ABIArgInfo::getDirectInReg(CoerceTy);
5415   else
5416     return ABIArgInfo::getDirect(CoerceTy);
5417 }
5418 
5419 llvm::Value *SparcV9ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5420                                        CodeGenFunction &CGF) const {
5421   ABIArgInfo AI = classifyType(Ty, 16 * 8);
5422   llvm::Type *ArgTy = CGT.ConvertType(Ty);
5423   if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
5424     AI.setCoerceToType(ArgTy);
5425 
5426   llvm::Type *BPP = CGF.Int8PtrPtrTy;
5427   CGBuilderTy &Builder = CGF.Builder;
5428   llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
5429   llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
5430   llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
5431   llvm::Value *ArgAddr;
5432   unsigned Stride;
5433 
5434   switch (AI.getKind()) {
5435   case ABIArgInfo::Expand:
5436     llvm_unreachable("Unsupported ABI kind for va_arg");
5437 
5438   case ABIArgInfo::Extend:
5439     Stride = 8;
5440     ArgAddr = Builder
5441       .CreateConstGEP1_32(Addr, 8 - getDataLayout().getTypeAllocSize(ArgTy),
5442                           "extend");
5443     break;
5444 
5445   case ABIArgInfo::Direct:
5446     Stride = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
5447     ArgAddr = Addr;
5448     break;
5449 
5450   case ABIArgInfo::Indirect:
5451     Stride = 8;
5452     ArgAddr = Builder.CreateBitCast(Addr,
5453                                     llvm::PointerType::getUnqual(ArgPtrTy),
5454                                     "indirect");
5455     ArgAddr = Builder.CreateLoad(ArgAddr, "indirect.arg");
5456     break;
5457 
5458   case ABIArgInfo::Ignore:
5459     return llvm::UndefValue::get(ArgPtrTy);
5460   }
5461 
5462   // Update VAList.
5463   Addr = Builder.CreateConstGEP1_32(Addr, Stride, "ap.next");
5464   Builder.CreateStore(Addr, VAListAddrAsBPP);
5465 
5466   return Builder.CreatePointerCast(ArgAddr, ArgPtrTy, "arg.addr");
5467 }
5468 
5469 void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const {
5470   FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8);
5471   for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
5472        it != ie; ++it)
5473     it->info = classifyType(it->type, 16 * 8);
5474 }
5475 
5476 namespace {
5477 class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo {
5478 public:
5479   SparcV9TargetCodeGenInfo(CodeGenTypes &CGT)
5480     : TargetCodeGenInfo(new SparcV9ABIInfo(CGT)) {}
5481 };
5482 } // end anonymous namespace
5483 
5484 
5485 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
5486   if (TheTargetCodeGenInfo)
5487     return *TheTargetCodeGenInfo;
5488 
5489   const llvm::Triple &Triple = getTarget().getTriple();
5490   switch (Triple.getArch()) {
5491   default:
5492     return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types));
5493 
5494   case llvm::Triple::le32:
5495     return *(TheTargetCodeGenInfo = new PNaClTargetCodeGenInfo(Types));
5496   case llvm::Triple::mips:
5497   case llvm::Triple::mipsel:
5498     return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, true));
5499 
5500   case llvm::Triple::mips64:
5501   case llvm::Triple::mips64el:
5502     return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, false));
5503 
5504   case llvm::Triple::aarch64:
5505     return *(TheTargetCodeGenInfo = new AArch64TargetCodeGenInfo(Types));
5506 
5507   case llvm::Triple::arm:
5508   case llvm::Triple::thumb:
5509     {
5510       ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS;
5511       if (strcmp(getTarget().getABI(), "apcs-gnu") == 0)
5512         Kind = ARMABIInfo::APCS;
5513       else if (CodeGenOpts.FloatABI == "hard" ||
5514                (CodeGenOpts.FloatABI != "soft" &&
5515                 Triple.getEnvironment() == llvm::Triple::GNUEABIHF))
5516         Kind = ARMABIInfo::AAPCS_VFP;
5517 
5518       switch (Triple.getOS()) {
5519         case llvm::Triple::NaCl:
5520           return *(TheTargetCodeGenInfo =
5521                    new NaClARMTargetCodeGenInfo(Types, Kind));
5522         default:
5523           return *(TheTargetCodeGenInfo =
5524                    new ARMTargetCodeGenInfo(Types, Kind));
5525       }
5526     }
5527 
5528   case llvm::Triple::ppc:
5529     return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types));
5530   case llvm::Triple::ppc64:
5531     if (Triple.isOSBinFormatELF())
5532       return *(TheTargetCodeGenInfo = new PPC64_SVR4_TargetCodeGenInfo(Types));
5533     else
5534       return *(TheTargetCodeGenInfo = new PPC64TargetCodeGenInfo(Types));
5535 
5536   case llvm::Triple::nvptx:
5537   case llvm::Triple::nvptx64:
5538     return *(TheTargetCodeGenInfo = new NVPTXTargetCodeGenInfo(Types));
5539 
5540   case llvm::Triple::mblaze:
5541     return *(TheTargetCodeGenInfo = new MBlazeTargetCodeGenInfo(Types));
5542 
5543   case llvm::Triple::msp430:
5544     return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types));
5545 
5546   case llvm::Triple::systemz:
5547     return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo(Types));
5548 
5549   case llvm::Triple::tce:
5550     return *(TheTargetCodeGenInfo = new TCETargetCodeGenInfo(Types));
5551 
5552   case llvm::Triple::x86: {
5553     bool IsDarwinVectorABI = Triple.isOSDarwin();
5554     bool IsSmallStructInRegABI =
5555         X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
5556     bool IsWin32FloatStructABI = (Triple.getOS() == llvm::Triple::Win32);
5557 
5558     if (Triple.getOS() == llvm::Triple::Win32) {
5559       return *(TheTargetCodeGenInfo =
5560                new WinX86_32TargetCodeGenInfo(Types,
5561                                               IsDarwinVectorABI, IsSmallStructInRegABI,
5562                                               IsWin32FloatStructABI,
5563                                               CodeGenOpts.NumRegisterParameters));
5564     } else {
5565       return *(TheTargetCodeGenInfo =
5566                new X86_32TargetCodeGenInfo(Types,
5567                                            IsDarwinVectorABI, IsSmallStructInRegABI,
5568                                            IsWin32FloatStructABI,
5569                                            CodeGenOpts.NumRegisterParameters));
5570     }
5571   }
5572 
5573   case llvm::Triple::x86_64: {
5574     bool HasAVX = strcmp(getTarget().getABI(), "avx") == 0;
5575 
5576     switch (Triple.getOS()) {
5577     case llvm::Triple::Win32:
5578     case llvm::Triple::MinGW32:
5579     case llvm::Triple::Cygwin:
5580       return *(TheTargetCodeGenInfo = new WinX86_64TargetCodeGenInfo(Types));
5581     case llvm::Triple::NaCl:
5582       return *(TheTargetCodeGenInfo = new NaClX86_64TargetCodeGenInfo(Types,
5583                                                                       HasAVX));
5584     default:
5585       return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo(Types,
5586                                                                   HasAVX));
5587     }
5588   }
5589   case llvm::Triple::hexagon:
5590     return *(TheTargetCodeGenInfo = new HexagonTargetCodeGenInfo(Types));
5591   case llvm::Triple::sparcv9:
5592     return *(TheTargetCodeGenInfo = new SparcV9TargetCodeGenInfo(Types));
5593   }
5594 }
5595