1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "TargetInfo.h"
16 #include "ABIInfo.h"
17 #include "CodeGenFunction.h"
18 #include "clang/AST/RecordLayout.h"
19 #include "clang/Frontend/CodeGenOptions.h"
20 #include "llvm/Type.h"
21 #include "llvm/Target/TargetData.h"
22 #include "llvm/ADT/Triple.h"
23 #include "llvm/Support/raw_ostream.h"
24 using namespace clang;
25 using namespace CodeGen;
26 
27 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
28                                llvm::Value *Array,
29                                llvm::Value *Value,
30                                unsigned FirstIndex,
31                                unsigned LastIndex) {
32   // Alternatively, we could emit this as a loop in the source.
33   for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
34     llvm::Value *Cell = Builder.CreateConstInBoundsGEP1_32(Array, I);
35     Builder.CreateStore(Value, Cell);
36   }
37 }
38 
39 static bool isAggregateTypeForABI(QualType T) {
40   return CodeGenFunction::hasAggregateLLVMType(T) ||
41          T->isMemberFunctionPointerType();
42 }
43 
44 ABIInfo::~ABIInfo() {}
45 
46 ASTContext &ABIInfo::getContext() const {
47   return CGT.getContext();
48 }
49 
50 llvm::LLVMContext &ABIInfo::getVMContext() const {
51   return CGT.getLLVMContext();
52 }
53 
54 const llvm::TargetData &ABIInfo::getTargetData() const {
55   return CGT.getTargetData();
56 }
57 
58 
59 void ABIArgInfo::dump() const {
60   raw_ostream &OS = llvm::errs();
61   OS << "(ABIArgInfo Kind=";
62   switch (TheKind) {
63   case Direct:
64     OS << "Direct Type=";
65     if (llvm::Type *Ty = getCoerceToType())
66       Ty->print(OS);
67     else
68       OS << "null";
69     break;
70   case Extend:
71     OS << "Extend";
72     break;
73   case Ignore:
74     OS << "Ignore";
75     break;
76   case Indirect:
77     OS << "Indirect Align=" << getIndirectAlign()
78        << " ByVal=" << getIndirectByVal()
79        << " Realign=" << getIndirectRealign();
80     break;
81   case Expand:
82     OS << "Expand";
83     break;
84   }
85   OS << ")\n";
86 }
87 
88 TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; }
89 
90 // If someone can figure out a general rule for this, that would be great.
91 // It's probably just doomed to be platform-dependent, though.
92 unsigned TargetCodeGenInfo::getSizeOfUnwindException() const {
93   // Verified for:
94   //   x86-64     FreeBSD, Linux, Darwin
95   //   x86-32     FreeBSD, Linux, Darwin
96   //   PowerPC    Linux, Darwin
97   //   ARM        Darwin (*not* EABI)
98   return 32;
99 }
100 
101 bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args,
102                                      const FunctionNoProtoType *fnType) const {
103   // The following conventions are known to require this to be false:
104   //   x86_stdcall
105   //   MIPS
106   // For everything else, we just prefer false unless we opt out.
107   return false;
108 }
109 
110 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
111 
112 /// isEmptyField - Return true iff a the field is "empty", that is it
113 /// is an unnamed bit-field or an (array of) empty record(s).
114 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
115                          bool AllowArrays) {
116   if (FD->isUnnamedBitfield())
117     return true;
118 
119   QualType FT = FD->getType();
120 
121   // Constant arrays of empty records count as empty, strip them off.
122   // Constant arrays of zero length always count as empty.
123   if (AllowArrays)
124     while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
125       if (AT->getSize() == 0)
126         return true;
127       FT = AT->getElementType();
128     }
129 
130   const RecordType *RT = FT->getAs<RecordType>();
131   if (!RT)
132     return false;
133 
134   // C++ record fields are never empty, at least in the Itanium ABI.
135   //
136   // FIXME: We should use a predicate for whether this behavior is true in the
137   // current ABI.
138   if (isa<CXXRecordDecl>(RT->getDecl()))
139     return false;
140 
141   return isEmptyRecord(Context, FT, AllowArrays);
142 }
143 
144 /// isEmptyRecord - Return true iff a structure contains only empty
145 /// fields. Note that a structure with a flexible array member is not
146 /// considered empty.
147 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
148   const RecordType *RT = T->getAs<RecordType>();
149   if (!RT)
150     return 0;
151   const RecordDecl *RD = RT->getDecl();
152   if (RD->hasFlexibleArrayMember())
153     return false;
154 
155   // If this is a C++ record, check the bases first.
156   if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
157     for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
158            e = CXXRD->bases_end(); i != e; ++i)
159       if (!isEmptyRecord(Context, i->getType(), true))
160         return false;
161 
162   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
163          i != e; ++i)
164     if (!isEmptyField(Context, *i, AllowArrays))
165       return false;
166   return true;
167 }
168 
169 /// hasNonTrivialDestructorOrCopyConstructor - Determine if a type has either
170 /// a non-trivial destructor or a non-trivial copy constructor.
171 static bool hasNonTrivialDestructorOrCopyConstructor(const RecordType *RT) {
172   const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
173   if (!RD)
174     return false;
175 
176   return !RD->hasTrivialDestructor() || !RD->hasTrivialCopyConstructor();
177 }
178 
179 /// isRecordWithNonTrivialDestructorOrCopyConstructor - Determine if a type is
180 /// a record type with either a non-trivial destructor or a non-trivial copy
181 /// constructor.
182 static bool isRecordWithNonTrivialDestructorOrCopyConstructor(QualType T) {
183   const RecordType *RT = T->getAs<RecordType>();
184   if (!RT)
185     return false;
186 
187   return hasNonTrivialDestructorOrCopyConstructor(RT);
188 }
189 
190 /// isSingleElementStruct - Determine if a structure is a "single
191 /// element struct", i.e. it has exactly one non-empty field or
192 /// exactly one field which is itself a single element
193 /// struct. Structures with flexible array members are never
194 /// considered single element structs.
195 ///
196 /// \return The field declaration for the single non-empty field, if
197 /// it exists.
198 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
199   const RecordType *RT = T->getAsStructureType();
200   if (!RT)
201     return 0;
202 
203   const RecordDecl *RD = RT->getDecl();
204   if (RD->hasFlexibleArrayMember())
205     return 0;
206 
207   const Type *Found = 0;
208 
209   // If this is a C++ record, check the bases first.
210   if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
211     for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
212            e = CXXRD->bases_end(); i != e; ++i) {
213       // Ignore empty records.
214       if (isEmptyRecord(Context, i->getType(), true))
215         continue;
216 
217       // If we already found an element then this isn't a single-element struct.
218       if (Found)
219         return 0;
220 
221       // If this is non-empty and not a single element struct, the composite
222       // cannot be a single element struct.
223       Found = isSingleElementStruct(i->getType(), Context);
224       if (!Found)
225         return 0;
226     }
227   }
228 
229   // Check for single element.
230   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
231          i != e; ++i) {
232     const FieldDecl *FD = *i;
233     QualType FT = FD->getType();
234 
235     // Ignore empty fields.
236     if (isEmptyField(Context, FD, true))
237       continue;
238 
239     // If we already found an element then this isn't a single-element
240     // struct.
241     if (Found)
242       return 0;
243 
244     // Treat single element arrays as the element.
245     while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
246       if (AT->getSize().getZExtValue() != 1)
247         break;
248       FT = AT->getElementType();
249     }
250 
251     if (!isAggregateTypeForABI(FT)) {
252       Found = FT.getTypePtr();
253     } else {
254       Found = isSingleElementStruct(FT, Context);
255       if (!Found)
256         return 0;
257     }
258   }
259 
260   // We don't consider a struct a single-element struct if it has
261   // padding beyond the element type.
262   if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T))
263     return 0;
264 
265   return Found;
266 }
267 
268 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
269   if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
270       !Ty->isAnyComplexType() && !Ty->isEnumeralType() &&
271       !Ty->isBlockPointerType())
272     return false;
273 
274   uint64_t Size = Context.getTypeSize(Ty);
275   return Size == 32 || Size == 64;
276 }
277 
278 /// canExpandIndirectArgument - Test whether an argument type which is to be
279 /// passed indirectly (on the stack) would have the equivalent layout if it was
280 /// expanded into separate arguments. If so, we prefer to do the latter to avoid
281 /// inhibiting optimizations.
282 ///
283 // FIXME: This predicate is missing many cases, currently it just follows
284 // llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We
285 // should probably make this smarter, or better yet make the LLVM backend
286 // capable of handling it.
287 static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) {
288   // We can only expand structure types.
289   const RecordType *RT = Ty->getAs<RecordType>();
290   if (!RT)
291     return false;
292 
293   // We can only expand (C) structures.
294   //
295   // FIXME: This needs to be generalized to handle classes as well.
296   const RecordDecl *RD = RT->getDecl();
297   if (!RD->isStruct() || isa<CXXRecordDecl>(RD))
298     return false;
299 
300   uint64_t Size = 0;
301 
302   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
303          i != e; ++i) {
304     const FieldDecl *FD = *i;
305 
306     if (!is32Or64BitBasicType(FD->getType(), Context))
307       return false;
308 
309     // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
310     // how to expand them yet, and the predicate for telling if a bitfield still
311     // counts as "basic" is more complicated than what we were doing previously.
312     if (FD->isBitField())
313       return false;
314 
315     Size += Context.getTypeSize(FD->getType());
316   }
317 
318   // Make sure there are not any holes in the struct.
319   if (Size != Context.getTypeSize(Ty))
320     return false;
321 
322   return true;
323 }
324 
325 namespace {
326 /// DefaultABIInfo - The default implementation for ABI specific
327 /// details. This implementation provides information which results in
328 /// self-consistent and sensible LLVM IR generation, but does not
329 /// conform to any particular ABI.
330 class DefaultABIInfo : public ABIInfo {
331 public:
332   DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
333 
334   ABIArgInfo classifyReturnType(QualType RetTy) const;
335   ABIArgInfo classifyArgumentType(QualType RetTy) const;
336 
337   virtual void computeInfo(CGFunctionInfo &FI) const {
338     FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
339     for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
340          it != ie; ++it)
341       it->info = classifyArgumentType(it->type);
342   }
343 
344   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
345                                  CodeGenFunction &CGF) const;
346 };
347 
348 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
349 public:
350   DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
351     : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
352 };
353 
354 llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
355                                        CodeGenFunction &CGF) const {
356   return 0;
357 }
358 
359 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
360   if (isAggregateTypeForABI(Ty)) {
361     // Records with non trivial destructors/constructors should not be passed
362     // by value.
363     if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
364       return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
365 
366     return ABIArgInfo::getIndirect(0);
367   }
368 
369   // Treat an enum type as its underlying type.
370   if (const EnumType *EnumTy = Ty->getAs<EnumType>())
371     Ty = EnumTy->getDecl()->getIntegerType();
372 
373   return (Ty->isPromotableIntegerType() ?
374           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
375 }
376 
377 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
378   if (RetTy->isVoidType())
379     return ABIArgInfo::getIgnore();
380 
381   if (isAggregateTypeForABI(RetTy))
382     return ABIArgInfo::getIndirect(0);
383 
384   // Treat an enum type as its underlying type.
385   if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
386     RetTy = EnumTy->getDecl()->getIntegerType();
387 
388   return (RetTy->isPromotableIntegerType() ?
389           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
390 }
391 
392 /// UseX86_MMXType - Return true if this is an MMX type that should use the
393 /// special x86_mmx type.
394 bool UseX86_MMXType(llvm::Type *IRType) {
395   // If the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>, use the
396   // special x86_mmx type.
397   return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
398     cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
399     IRType->getScalarSizeInBits() != 64;
400 }
401 
402 static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
403                                           StringRef Constraint,
404                                           llvm::Type* Ty) {
405   if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy())
406     return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
407   return Ty;
408 }
409 
410 //===----------------------------------------------------------------------===//
411 // X86-32 ABI Implementation
412 //===----------------------------------------------------------------------===//
413 
414 /// X86_32ABIInfo - The X86-32 ABI information.
415 class X86_32ABIInfo : public ABIInfo {
416   enum Class {
417     Integer,
418     Float
419   };
420 
421   static const unsigned MinABIStackAlignInBytes = 4;
422 
423   bool IsDarwinVectorABI;
424   bool IsSmallStructInRegABI;
425   bool IsMMXDisabled;
426   bool IsWin32FloatStructABI;
427   unsigned DefaultNumRegisterParameters;
428 
429   static bool isRegisterSize(unsigned Size) {
430     return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
431   }
432 
433   static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context,
434                                           unsigned callingConvention);
435 
436   /// getIndirectResult - Give a source type \arg Ty, return a suitable result
437   /// such that the argument will be passed in memory.
438   ABIArgInfo getIndirectResult(QualType Ty, bool ByVal = true) const;
439 
440   /// \brief Return the alignment to use for the given type on the stack.
441   unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
442 
443   Class classify(QualType Ty) const;
444   ABIArgInfo classifyReturnType(QualType RetTy,
445                                 unsigned callingConvention) const;
446   ABIArgInfo classifyArgumentTypeWithReg(QualType RetTy,
447                                          unsigned &FreeRegs) const;
448   ABIArgInfo classifyArgumentType(QualType RetTy) const;
449 
450 public:
451 
452   virtual void computeInfo(CGFunctionInfo &FI) const;
453   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
454                                  CodeGenFunction &CGF) const;
455 
456   X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool m, bool w,
457                 unsigned r)
458     : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p),
459       IsMMXDisabled(m), IsWin32FloatStructABI(w),
460       DefaultNumRegisterParameters(r) {}
461 };
462 
463 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
464 public:
465   X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
466       bool d, bool p, bool m, bool w, unsigned r)
467     :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, m, w, r)) {}
468 
469   void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
470                            CodeGen::CodeGenModule &CGM) const;
471 
472   int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
473     // Darwin uses different dwarf register numbers for EH.
474     if (CGM.isTargetDarwin()) return 5;
475 
476     return 4;
477   }
478 
479   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
480                                llvm::Value *Address) const;
481 
482   llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
483                                   StringRef Constraint,
484                                   llvm::Type* Ty) const {
485     return X86AdjustInlineAsmType(CGF, Constraint, Ty);
486   }
487 
488 };
489 
490 }
491 
492 /// shouldReturnTypeInRegister - Determine if the given type should be
493 /// passed in a register (for the Darwin ABI).
494 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
495                                                ASTContext &Context,
496                                                unsigned callingConvention) {
497   uint64_t Size = Context.getTypeSize(Ty);
498 
499   // Type must be register sized.
500   if (!isRegisterSize(Size))
501     return false;
502 
503   if (Ty->isVectorType()) {
504     // 64- and 128- bit vectors inside structures are not returned in
505     // registers.
506     if (Size == 64 || Size == 128)
507       return false;
508 
509     return true;
510   }
511 
512   // If this is a builtin, pointer, enum, complex type, member pointer, or
513   // member function pointer it is ok.
514   if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
515       Ty->isAnyComplexType() || Ty->isEnumeralType() ||
516       Ty->isBlockPointerType() || Ty->isMemberPointerType())
517     return true;
518 
519   // Arrays are treated like records.
520   if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
521     return shouldReturnTypeInRegister(AT->getElementType(), Context,
522                                       callingConvention);
523 
524   // Otherwise, it must be a record type.
525   const RecordType *RT = Ty->getAs<RecordType>();
526   if (!RT) return false;
527 
528   // FIXME: Traverse bases here too.
529 
530   // For thiscall conventions, structures will never be returned in
531   // a register.  This is for compatibility with the MSVC ABI
532   if (callingConvention == llvm::CallingConv::X86_ThisCall &&
533       RT->isStructureType()) {
534     return false;
535   }
536 
537   // Structure types are passed in register if all fields would be
538   // passed in a register.
539   for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(),
540          e = RT->getDecl()->field_end(); i != e; ++i) {
541     const FieldDecl *FD = *i;
542 
543     // Empty fields are ignored.
544     if (isEmptyField(Context, FD, true))
545       continue;
546 
547     // Check fields recursively.
548     if (!shouldReturnTypeInRegister(FD->getType(), Context,
549                                     callingConvention))
550       return false;
551   }
552   return true;
553 }
554 
555 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
556                                             unsigned callingConvention) const {
557   if (RetTy->isVoidType())
558     return ABIArgInfo::getIgnore();
559 
560   if (const VectorType *VT = RetTy->getAs<VectorType>()) {
561     // On Darwin, some vectors are returned in registers.
562     if (IsDarwinVectorABI) {
563       uint64_t Size = getContext().getTypeSize(RetTy);
564 
565       // 128-bit vectors are a special case; they are returned in
566       // registers and we need to make sure to pick a type the LLVM
567       // backend will like.
568       if (Size == 128)
569         return ABIArgInfo::getDirect(llvm::VectorType::get(
570                   llvm::Type::getInt64Ty(getVMContext()), 2));
571 
572       // Always return in register if it fits in a general purpose
573       // register, or if it is 64 bits and has a single element.
574       if ((Size == 8 || Size == 16 || Size == 32) ||
575           (Size == 64 && VT->getNumElements() == 1))
576         return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
577                                                             Size));
578 
579       return ABIArgInfo::getIndirect(0);
580     }
581 
582     return ABIArgInfo::getDirect();
583   }
584 
585   if (isAggregateTypeForABI(RetTy)) {
586     if (const RecordType *RT = RetTy->getAs<RecordType>()) {
587       // Structures with either a non-trivial destructor or a non-trivial
588       // copy constructor are always indirect.
589       if (hasNonTrivialDestructorOrCopyConstructor(RT))
590         return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
591 
592       // Structures with flexible arrays are always indirect.
593       if (RT->getDecl()->hasFlexibleArrayMember())
594         return ABIArgInfo::getIndirect(0);
595     }
596 
597     // If specified, structs and unions are always indirect.
598     if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType())
599       return ABIArgInfo::getIndirect(0);
600 
601     // Small structures which are register sized are generally returned
602     // in a register.
603     if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, getContext(),
604                                                   callingConvention)) {
605       uint64_t Size = getContext().getTypeSize(RetTy);
606 
607       // As a special-case, if the struct is a "single-element" struct, and
608       // the field is of type "float" or "double", return it in a
609       // floating-point register. (MSVC does not apply this special case.)
610       // We apply a similar transformation for pointer types to improve the
611       // quality of the generated IR.
612       if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
613         if ((!IsWin32FloatStructABI && SeltTy->isRealFloatingType())
614             || SeltTy->hasPointerRepresentation())
615           return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
616 
617       // FIXME: We should be able to narrow this integer in cases with dead
618       // padding.
619       return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
620     }
621 
622     return ABIArgInfo::getIndirect(0);
623   }
624 
625   // Treat an enum type as its underlying type.
626   if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
627     RetTy = EnumTy->getDecl()->getIntegerType();
628 
629   return (RetTy->isPromotableIntegerType() ?
630           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
631 }
632 
633 static bool isSSEVectorType(ASTContext &Context, QualType Ty) {
634   return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128;
635 }
636 
637 static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) {
638   const RecordType *RT = Ty->getAs<RecordType>();
639   if (!RT)
640     return 0;
641   const RecordDecl *RD = RT->getDecl();
642 
643   // If this is a C++ record, check the bases first.
644   if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
645     for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
646            e = CXXRD->bases_end(); i != e; ++i)
647       if (!isRecordWithSSEVectorType(Context, i->getType()))
648         return false;
649 
650   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
651        i != e; ++i) {
652     QualType FT = i->getType();
653 
654     if (isSSEVectorType(Context, FT))
655       return true;
656 
657     if (isRecordWithSSEVectorType(Context, FT))
658       return true;
659   }
660 
661   return false;
662 }
663 
664 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
665                                                  unsigned Align) const {
666   // Otherwise, if the alignment is less than or equal to the minimum ABI
667   // alignment, just use the default; the backend will handle this.
668   if (Align <= MinABIStackAlignInBytes)
669     return 0; // Use default alignment.
670 
671   // On non-Darwin, the stack type alignment is always 4.
672   if (!IsDarwinVectorABI) {
673     // Set explicit alignment, since we may need to realign the top.
674     return MinABIStackAlignInBytes;
675   }
676 
677   // Otherwise, if the type contains an SSE vector type, the alignment is 16.
678   if (Align >= 16 && (isSSEVectorType(getContext(), Ty) ||
679                       isRecordWithSSEVectorType(getContext(), Ty)))
680     return 16;
681 
682   return MinABIStackAlignInBytes;
683 }
684 
685 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal) const {
686   if (!ByVal)
687     return ABIArgInfo::getIndirect(0, false);
688 
689   // Compute the byval alignment.
690   unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
691   unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
692   if (StackAlign == 0)
693     return ABIArgInfo::getIndirect(4);
694 
695   // If the stack alignment is less than the type alignment, realign the
696   // argument.
697   if (StackAlign < TypeAlign)
698     return ABIArgInfo::getIndirect(StackAlign, /*ByVal=*/true,
699                                    /*Realign=*/true);
700 
701   return ABIArgInfo::getIndirect(StackAlign);
702 }
703 
704 X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
705   const Type *T = isSingleElementStruct(Ty, getContext());
706   if (!T)
707     T = Ty.getTypePtr();
708 
709   if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
710     BuiltinType::Kind K = BT->getKind();
711     if (K == BuiltinType::Float || K == BuiltinType::Double)
712       return Float;
713   }
714   return Integer;
715 }
716 
717 ABIArgInfo
718 X86_32ABIInfo::classifyArgumentTypeWithReg(QualType Ty,
719                                            unsigned &FreeRegs) const {
720   // Common case first.
721   if (FreeRegs == 0)
722     return classifyArgumentType(Ty);
723 
724   Class C = classify(Ty);
725   if (C == Float)
726     return classifyArgumentType(Ty);
727 
728   unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
729   if (SizeInRegs == 0)
730     return classifyArgumentType(Ty);
731 
732   if (SizeInRegs > FreeRegs) {
733     FreeRegs = 0;
734     return classifyArgumentType(Ty);
735   }
736   assert(SizeInRegs >= 1 && SizeInRegs <= 3);
737   FreeRegs -= SizeInRegs;
738 
739   // If it is a simple scalar, keep the type so that we produce a cleaner IR.
740   ABIArgInfo Foo = classifyArgumentType(Ty);
741   if (Foo.isDirect() && !Foo.getDirectOffset() && !Foo.getPaddingType())
742     return ABIArgInfo::getDirectInReg(Foo.getCoerceToType());
743   if (Foo.isExtend())
744     return ABIArgInfo::getExtendInReg(Foo.getCoerceToType());
745 
746   llvm::LLVMContext &LLVMContext = getVMContext();
747   llvm::Type *Int32 = llvm::Type::getInt32Ty(LLVMContext);
748   SmallVector<llvm::Type*, 3> Elements;
749   for (unsigned I = 0; I < SizeInRegs; ++I)
750     Elements.push_back(Int32);
751   llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
752   return ABIArgInfo::getDirectInReg(Result);
753 }
754 
755 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const {
756   // FIXME: Set alignment on indirect arguments.
757   if (isAggregateTypeForABI(Ty)) {
758     // Structures with flexible arrays are always indirect.
759     if (const RecordType *RT = Ty->getAs<RecordType>()) {
760       // Structures with either a non-trivial destructor or a non-trivial
761       // copy constructor are always indirect.
762       if (hasNonTrivialDestructorOrCopyConstructor(RT))
763         return getIndirectResult(Ty, /*ByVal=*/false);
764 
765       if (RT->getDecl()->hasFlexibleArrayMember())
766         return getIndirectResult(Ty);
767     }
768 
769     // Ignore empty structs/unions.
770     if (isEmptyRecord(getContext(), Ty, true))
771       return ABIArgInfo::getIgnore();
772 
773     // Expand small (<= 128-bit) record types when we know that the stack layout
774     // of those arguments will match the struct. This is important because the
775     // LLVM backend isn't smart enough to remove byval, which inhibits many
776     // optimizations.
777     if (getContext().getTypeSize(Ty) <= 4*32 &&
778         canExpandIndirectArgument(Ty, getContext()))
779       return ABIArgInfo::getExpand();
780 
781     return getIndirectResult(Ty);
782   }
783 
784   if (const VectorType *VT = Ty->getAs<VectorType>()) {
785     // On Darwin, some vectors are passed in memory, we handle this by passing
786     // it as an i8/i16/i32/i64.
787     if (IsDarwinVectorABI) {
788       uint64_t Size = getContext().getTypeSize(Ty);
789       if ((Size == 8 || Size == 16 || Size == 32) ||
790           (Size == 64 && VT->getNumElements() == 1))
791         return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
792                                                             Size));
793     }
794 
795     llvm::Type *IRType = CGT.ConvertType(Ty);
796     if (UseX86_MMXType(IRType)) {
797       if (IsMMXDisabled)
798         return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
799                                                             64));
800       ABIArgInfo AAI = ABIArgInfo::getDirect(IRType);
801       AAI.setCoerceToType(llvm::Type::getX86_MMXTy(getVMContext()));
802       return AAI;
803     }
804 
805     return ABIArgInfo::getDirect();
806   }
807 
808 
809   if (const EnumType *EnumTy = Ty->getAs<EnumType>())
810     Ty = EnumTy->getDecl()->getIntegerType();
811 
812   return (Ty->isPromotableIntegerType() ?
813           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
814 }
815 
816 void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
817   FI.getReturnInfo() = classifyReturnType(FI.getReturnType(),
818                                           FI.getCallingConvention());
819 
820   unsigned FreeRegs = FI.getHasRegParm() ? FI.getRegParm() :
821     DefaultNumRegisterParameters;
822 
823   // If the return value is indirect, then the hidden argument is consuming one
824   // integer register.
825   if (FI.getReturnInfo().isIndirect() && FreeRegs) {
826     --FreeRegs;
827     ABIArgInfo &Old = FI.getReturnInfo();
828     Old = ABIArgInfo::getIndirectInReg(Old.getIndirectAlign(),
829                                        Old.getIndirectByVal(),
830                                        Old.getIndirectRealign());
831   }
832 
833   for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
834        it != ie; ++it)
835     it->info = classifyArgumentTypeWithReg(it->type, FreeRegs);
836 }
837 
838 llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
839                                       CodeGenFunction &CGF) const {
840   llvm::Type *BPP = CGF.Int8PtrPtrTy;
841 
842   CGBuilderTy &Builder = CGF.Builder;
843   llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
844                                                        "ap");
845   llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
846 
847   // Compute if the address needs to be aligned
848   unsigned Align = CGF.getContext().getTypeAlignInChars(Ty).getQuantity();
849   Align = getTypeStackAlignInBytes(Ty, Align);
850   Align = std::max(Align, 4U);
851   if (Align > 4) {
852     // addr = (addr + align - 1) & -align;
853     llvm::Value *Offset =
854       llvm::ConstantInt::get(CGF.Int32Ty, Align - 1);
855     Addr = CGF.Builder.CreateGEP(Addr, Offset);
856     llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(Addr,
857                                                     CGF.Int32Ty);
858     llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -Align);
859     Addr = CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
860                                       Addr->getType(),
861                                       "ap.cur.aligned");
862   }
863 
864   llvm::Type *PTy =
865     llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
866   llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
867 
868   uint64_t Offset =
869     llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, Align);
870   llvm::Value *NextAddr =
871     Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
872                       "ap.next");
873   Builder.CreateStore(NextAddr, VAListAddrAsBPP);
874 
875   return AddrTyped;
876 }
877 
878 void X86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
879                                                   llvm::GlobalValue *GV,
880                                             CodeGen::CodeGenModule &CGM) const {
881   if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
882     if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
883       // Get the LLVM function.
884       llvm::Function *Fn = cast<llvm::Function>(GV);
885 
886       // Now add the 'alignstack' attribute with a value of 16.
887       Fn->addFnAttr(llvm::Attribute::constructStackAlignmentFromInt(16));
888     }
889   }
890 }
891 
892 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
893                                                CodeGen::CodeGenFunction &CGF,
894                                                llvm::Value *Address) const {
895   CodeGen::CGBuilderTy &Builder = CGF.Builder;
896 
897   llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
898 
899   // 0-7 are the eight integer registers;  the order is different
900   //   on Darwin (for EH), but the range is the same.
901   // 8 is %eip.
902   AssignToArrayRange(Builder, Address, Four8, 0, 8);
903 
904   if (CGF.CGM.isTargetDarwin()) {
905     // 12-16 are st(0..4).  Not sure why we stop at 4.
906     // These have size 16, which is sizeof(long double) on
907     // platforms with 8-byte alignment for that type.
908     llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
909     AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
910 
911   } else {
912     // 9 is %eflags, which doesn't get a size on Darwin for some
913     // reason.
914     Builder.CreateStore(Four8, Builder.CreateConstInBoundsGEP1_32(Address, 9));
915 
916     // 11-16 are st(0..5).  Not sure why we stop at 5.
917     // These have size 12, which is sizeof(long double) on
918     // platforms with 4-byte alignment for that type.
919     llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12);
920     AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
921   }
922 
923   return false;
924 }
925 
926 //===----------------------------------------------------------------------===//
927 // X86-64 ABI Implementation
928 //===----------------------------------------------------------------------===//
929 
930 
931 namespace {
932 /// X86_64ABIInfo - The X86_64 ABI information.
933 class X86_64ABIInfo : public ABIInfo {
934   enum Class {
935     Integer = 0,
936     SSE,
937     SSEUp,
938     X87,
939     X87Up,
940     ComplexX87,
941     NoClass,
942     Memory
943   };
944 
945   /// merge - Implement the X86_64 ABI merging algorithm.
946   ///
947   /// Merge an accumulating classification \arg Accum with a field
948   /// classification \arg Field.
949   ///
950   /// \param Accum - The accumulating classification. This should
951   /// always be either NoClass or the result of a previous merge
952   /// call. In addition, this should never be Memory (the caller
953   /// should just return Memory for the aggregate).
954   static Class merge(Class Accum, Class Field);
955 
956   /// postMerge - Implement the X86_64 ABI post merging algorithm.
957   ///
958   /// Post merger cleanup, reduces a malformed Hi and Lo pair to
959   /// final MEMORY or SSE classes when necessary.
960   ///
961   /// \param AggregateSize - The size of the current aggregate in
962   /// the classification process.
963   ///
964   /// \param Lo - The classification for the parts of the type
965   /// residing in the low word of the containing object.
966   ///
967   /// \param Hi - The classification for the parts of the type
968   /// residing in the higher words of the containing object.
969   ///
970   void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const;
971 
972   /// classify - Determine the x86_64 register classes in which the
973   /// given type T should be passed.
974   ///
975   /// \param Lo - The classification for the parts of the type
976   /// residing in the low word of the containing object.
977   ///
978   /// \param Hi - The classification for the parts of the type
979   /// residing in the high word of the containing object.
980   ///
981   /// \param OffsetBase - The bit offset of this type in the
982   /// containing object.  Some parameters are classified different
983   /// depending on whether they straddle an eightbyte boundary.
984   ///
985   /// If a word is unused its result will be NoClass; if a type should
986   /// be passed in Memory then at least the classification of \arg Lo
987   /// will be Memory.
988   ///
989   /// The \arg Lo class will be NoClass iff the argument is ignored.
990   ///
991   /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
992   /// also be ComplexX87.
993   void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi) const;
994 
995   llvm::Type *GetByteVectorType(QualType Ty) const;
996   llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
997                                  unsigned IROffset, QualType SourceTy,
998                                  unsigned SourceOffset) const;
999   llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
1000                                      unsigned IROffset, QualType SourceTy,
1001                                      unsigned SourceOffset) const;
1002 
1003   /// getIndirectResult - Give a source type \arg Ty, return a suitable result
1004   /// such that the argument will be returned in memory.
1005   ABIArgInfo getIndirectReturnResult(QualType Ty) const;
1006 
1007   /// getIndirectResult - Give a source type \arg Ty, return a suitable result
1008   /// such that the argument will be passed in memory.
1009   ///
1010   /// \param freeIntRegs - The number of free integer registers remaining
1011   /// available.
1012   ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const;
1013 
1014   ABIArgInfo classifyReturnType(QualType RetTy) const;
1015 
1016   ABIArgInfo classifyArgumentType(QualType Ty,
1017                                   unsigned freeIntRegs,
1018                                   unsigned &neededInt,
1019                                   unsigned &neededSSE) const;
1020 
1021   bool IsIllegalVectorType(QualType Ty) const;
1022 
1023   /// The 0.98 ABI revision clarified a lot of ambiguities,
1024   /// unfortunately in ways that were not always consistent with
1025   /// certain previous compilers.  In particular, platforms which
1026   /// required strict binary compatibility with older versions of GCC
1027   /// may need to exempt themselves.
1028   bool honorsRevision0_98() const {
1029     return !getContext().getTargetInfo().getTriple().isOSDarwin();
1030   }
1031 
1032   bool HasAVX;
1033 
1034 public:
1035   X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool hasavx) :
1036       ABIInfo(CGT), HasAVX(hasavx) {}
1037 
1038   bool isPassedUsingAVXType(QualType type) const {
1039     unsigned neededInt, neededSSE;
1040     // The freeIntRegs argument doesn't matter here.
1041     ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE);
1042     if (info.isDirect()) {
1043       llvm::Type *ty = info.getCoerceToType();
1044       if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
1045         return (vectorTy->getBitWidth() > 128);
1046     }
1047     return false;
1048   }
1049 
1050   virtual void computeInfo(CGFunctionInfo &FI) const;
1051 
1052   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1053                                  CodeGenFunction &CGF) const;
1054 };
1055 
1056 /// WinX86_64ABIInfo - The Windows X86_64 ABI information.
1057 class WinX86_64ABIInfo : public ABIInfo {
1058 
1059   ABIArgInfo classify(QualType Ty) const;
1060 
1061 public:
1062   WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
1063 
1064   virtual void computeInfo(CGFunctionInfo &FI) const;
1065 
1066   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1067                                  CodeGenFunction &CGF) const;
1068 };
1069 
1070 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
1071 public:
1072   X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
1073     : TargetCodeGenInfo(new X86_64ABIInfo(CGT, HasAVX)) {}
1074 
1075   const X86_64ABIInfo &getABIInfo() const {
1076     return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
1077   }
1078 
1079   int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
1080     return 7;
1081   }
1082 
1083   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1084                                llvm::Value *Address) const {
1085     llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
1086 
1087     // 0-15 are the 16 integer registers.
1088     // 16 is %rip.
1089     AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
1090     return false;
1091   }
1092 
1093   llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
1094                                   StringRef Constraint,
1095                                   llvm::Type* Ty) const {
1096     return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1097   }
1098 
1099   bool isNoProtoCallVariadic(const CallArgList &args,
1100                              const FunctionNoProtoType *fnType) const {
1101     // The default CC on x86-64 sets %al to the number of SSA
1102     // registers used, and GCC sets this when calling an unprototyped
1103     // function, so we override the default behavior.  However, don't do
1104     // that when AVX types are involved: the ABI explicitly states it is
1105     // undefined, and it doesn't work in practice because of how the ABI
1106     // defines varargs anyway.
1107     if (fnType->getCallConv() == CC_Default || fnType->getCallConv() == CC_C) {
1108       bool HasAVXType = false;
1109       for (CallArgList::const_iterator
1110              it = args.begin(), ie = args.end(); it != ie; ++it) {
1111         if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
1112           HasAVXType = true;
1113           break;
1114         }
1115       }
1116 
1117       if (!HasAVXType)
1118         return true;
1119     }
1120 
1121     return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType);
1122   }
1123 
1124 };
1125 
1126 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
1127 public:
1128   WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
1129     : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {}
1130 
1131   int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
1132     return 7;
1133   }
1134 
1135   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1136                                llvm::Value *Address) const {
1137     llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
1138 
1139     // 0-15 are the 16 integer registers.
1140     // 16 is %rip.
1141     AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
1142     return false;
1143   }
1144 };
1145 
1146 }
1147 
1148 void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo,
1149                               Class &Hi) const {
1150   // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
1151   //
1152   // (a) If one of the classes is Memory, the whole argument is passed in
1153   //     memory.
1154   //
1155   // (b) If X87UP is not preceded by X87, the whole argument is passed in
1156   //     memory.
1157   //
1158   // (c) If the size of the aggregate exceeds two eightbytes and the first
1159   //     eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole
1160   //     argument is passed in memory. NOTE: This is necessary to keep the
1161   //     ABI working for processors that don't support the __m256 type.
1162   //
1163   // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
1164   //
1165   // Some of these are enforced by the merging logic.  Others can arise
1166   // only with unions; for example:
1167   //   union { _Complex double; unsigned; }
1168   //
1169   // Note that clauses (b) and (c) were added in 0.98.
1170   //
1171   if (Hi == Memory)
1172     Lo = Memory;
1173   if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
1174     Lo = Memory;
1175   if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
1176     Lo = Memory;
1177   if (Hi == SSEUp && Lo != SSE)
1178     Hi = SSE;
1179 }
1180 
1181 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
1182   // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
1183   // classified recursively so that always two fields are
1184   // considered. The resulting class is calculated according to
1185   // the classes of the fields in the eightbyte:
1186   //
1187   // (a) If both classes are equal, this is the resulting class.
1188   //
1189   // (b) If one of the classes is NO_CLASS, the resulting class is
1190   // the other class.
1191   //
1192   // (c) If one of the classes is MEMORY, the result is the MEMORY
1193   // class.
1194   //
1195   // (d) If one of the classes is INTEGER, the result is the
1196   // INTEGER.
1197   //
1198   // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
1199   // MEMORY is used as class.
1200   //
1201   // (f) Otherwise class SSE is used.
1202 
1203   // Accum should never be memory (we should have returned) or
1204   // ComplexX87 (because this cannot be passed in a structure).
1205   assert((Accum != Memory && Accum != ComplexX87) &&
1206          "Invalid accumulated classification during merge.");
1207   if (Accum == Field || Field == NoClass)
1208     return Accum;
1209   if (Field == Memory)
1210     return Memory;
1211   if (Accum == NoClass)
1212     return Field;
1213   if (Accum == Integer || Field == Integer)
1214     return Integer;
1215   if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
1216       Accum == X87 || Accum == X87Up)
1217     return Memory;
1218   return SSE;
1219 }
1220 
1221 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
1222                              Class &Lo, Class &Hi) const {
1223   // FIXME: This code can be simplified by introducing a simple value class for
1224   // Class pairs with appropriate constructor methods for the various
1225   // situations.
1226 
1227   // FIXME: Some of the split computations are wrong; unaligned vectors
1228   // shouldn't be passed in registers for example, so there is no chance they
1229   // can straddle an eightbyte. Verify & simplify.
1230 
1231   Lo = Hi = NoClass;
1232 
1233   Class &Current = OffsetBase < 64 ? Lo : Hi;
1234   Current = Memory;
1235 
1236   if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
1237     BuiltinType::Kind k = BT->getKind();
1238 
1239     if (k == BuiltinType::Void) {
1240       Current = NoClass;
1241     } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
1242       Lo = Integer;
1243       Hi = Integer;
1244     } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
1245       Current = Integer;
1246     } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
1247       Current = SSE;
1248     } else if (k == BuiltinType::LongDouble) {
1249       Lo = X87;
1250       Hi = X87Up;
1251     }
1252     // FIXME: _Decimal32 and _Decimal64 are SSE.
1253     // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
1254     return;
1255   }
1256 
1257   if (const EnumType *ET = Ty->getAs<EnumType>()) {
1258     // Classify the underlying integer type.
1259     classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi);
1260     return;
1261   }
1262 
1263   if (Ty->hasPointerRepresentation()) {
1264     Current = Integer;
1265     return;
1266   }
1267 
1268   if (Ty->isMemberPointerType()) {
1269     if (Ty->isMemberFunctionPointerType())
1270       Lo = Hi = Integer;
1271     else
1272       Current = Integer;
1273     return;
1274   }
1275 
1276   if (const VectorType *VT = Ty->getAs<VectorType>()) {
1277     uint64_t Size = getContext().getTypeSize(VT);
1278     if (Size == 32) {
1279       // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x
1280       // float> as integer.
1281       Current = Integer;
1282 
1283       // If this type crosses an eightbyte boundary, it should be
1284       // split.
1285       uint64_t EB_Real = (OffsetBase) / 64;
1286       uint64_t EB_Imag = (OffsetBase + Size - 1) / 64;
1287       if (EB_Real != EB_Imag)
1288         Hi = Lo;
1289     } else if (Size == 64) {
1290       // gcc passes <1 x double> in memory. :(
1291       if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double))
1292         return;
1293 
1294       // gcc passes <1 x long long> as INTEGER.
1295       if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong) ||
1296           VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULongLong) ||
1297           VT->getElementType()->isSpecificBuiltinType(BuiltinType::Long) ||
1298           VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULong))
1299         Current = Integer;
1300       else
1301         Current = SSE;
1302 
1303       // If this type crosses an eightbyte boundary, it should be
1304       // split.
1305       if (OffsetBase && OffsetBase != 64)
1306         Hi = Lo;
1307     } else if (Size == 128 || (HasAVX && Size == 256)) {
1308       // Arguments of 256-bits are split into four eightbyte chunks. The
1309       // least significant one belongs to class SSE and all the others to class
1310       // SSEUP. The original Lo and Hi design considers that types can't be
1311       // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense.
1312       // This design isn't correct for 256-bits, but since there're no cases
1313       // where the upper parts would need to be inspected, avoid adding
1314       // complexity and just consider Hi to match the 64-256 part.
1315       Lo = SSE;
1316       Hi = SSEUp;
1317     }
1318     return;
1319   }
1320 
1321   if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
1322     QualType ET = getContext().getCanonicalType(CT->getElementType());
1323 
1324     uint64_t Size = getContext().getTypeSize(Ty);
1325     if (ET->isIntegralOrEnumerationType()) {
1326       if (Size <= 64)
1327         Current = Integer;
1328       else if (Size <= 128)
1329         Lo = Hi = Integer;
1330     } else if (ET == getContext().FloatTy)
1331       Current = SSE;
1332     else if (ET == getContext().DoubleTy)
1333       Lo = Hi = SSE;
1334     else if (ET == getContext().LongDoubleTy)
1335       Current = ComplexX87;
1336 
1337     // If this complex type crosses an eightbyte boundary then it
1338     // should be split.
1339     uint64_t EB_Real = (OffsetBase) / 64;
1340     uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
1341     if (Hi == NoClass && EB_Real != EB_Imag)
1342       Hi = Lo;
1343 
1344     return;
1345   }
1346 
1347   if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
1348     // Arrays are treated like structures.
1349 
1350     uint64_t Size = getContext().getTypeSize(Ty);
1351 
1352     // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
1353     // than four eightbytes, ..., it has class MEMORY.
1354     if (Size > 256)
1355       return;
1356 
1357     // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
1358     // fields, it has class MEMORY.
1359     //
1360     // Only need to check alignment of array base.
1361     if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
1362       return;
1363 
1364     // Otherwise implement simplified merge. We could be smarter about
1365     // this, but it isn't worth it and would be harder to verify.
1366     Current = NoClass;
1367     uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
1368     uint64_t ArraySize = AT->getSize().getZExtValue();
1369 
1370     // The only case a 256-bit wide vector could be used is when the array
1371     // contains a single 256-bit element. Since Lo and Hi logic isn't extended
1372     // to work for sizes wider than 128, early check and fallback to memory.
1373     if (Size > 128 && EltSize != 256)
1374       return;
1375 
1376     for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
1377       Class FieldLo, FieldHi;
1378       classify(AT->getElementType(), Offset, FieldLo, FieldHi);
1379       Lo = merge(Lo, FieldLo);
1380       Hi = merge(Hi, FieldHi);
1381       if (Lo == Memory || Hi == Memory)
1382         break;
1383     }
1384 
1385     postMerge(Size, Lo, Hi);
1386     assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
1387     return;
1388   }
1389 
1390   if (const RecordType *RT = Ty->getAs<RecordType>()) {
1391     uint64_t Size = getContext().getTypeSize(Ty);
1392 
1393     // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
1394     // than four eightbytes, ..., it has class MEMORY.
1395     if (Size > 256)
1396       return;
1397 
1398     // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
1399     // copy constructor or a non-trivial destructor, it is passed by invisible
1400     // reference.
1401     if (hasNonTrivialDestructorOrCopyConstructor(RT))
1402       return;
1403 
1404     const RecordDecl *RD = RT->getDecl();
1405 
1406     // Assume variable sized types are passed in memory.
1407     if (RD->hasFlexibleArrayMember())
1408       return;
1409 
1410     const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
1411 
1412     // Reset Lo class, this will be recomputed.
1413     Current = NoClass;
1414 
1415     // If this is a C++ record, classify the bases first.
1416     if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1417       for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
1418              e = CXXRD->bases_end(); i != e; ++i) {
1419         assert(!i->isVirtual() && !i->getType()->isDependentType() &&
1420                "Unexpected base class!");
1421         const CXXRecordDecl *Base =
1422           cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
1423 
1424         // Classify this field.
1425         //
1426         // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
1427         // single eightbyte, each is classified separately. Each eightbyte gets
1428         // initialized to class NO_CLASS.
1429         Class FieldLo, FieldHi;
1430         uint64_t Offset =
1431           OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base));
1432         classify(i->getType(), Offset, FieldLo, FieldHi);
1433         Lo = merge(Lo, FieldLo);
1434         Hi = merge(Hi, FieldHi);
1435         if (Lo == Memory || Hi == Memory)
1436           break;
1437       }
1438     }
1439 
1440     // Classify the fields one at a time, merging the results.
1441     unsigned idx = 0;
1442     for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
1443            i != e; ++i, ++idx) {
1444       uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
1445       bool BitField = i->isBitField();
1446 
1447       // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than
1448       // four eightbytes, or it contains unaligned fields, it has class MEMORY.
1449       //
1450       // The only case a 256-bit wide vector could be used is when the struct
1451       // contains a single 256-bit element. Since Lo and Hi logic isn't extended
1452       // to work for sizes wider than 128, early check and fallback to memory.
1453       //
1454       if (Size > 128 && getContext().getTypeSize(i->getType()) != 256) {
1455         Lo = Memory;
1456         return;
1457       }
1458       // Note, skip this test for bit-fields, see below.
1459       if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
1460         Lo = Memory;
1461         return;
1462       }
1463 
1464       // Classify this field.
1465       //
1466       // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
1467       // exceeds a single eightbyte, each is classified
1468       // separately. Each eightbyte gets initialized to class
1469       // NO_CLASS.
1470       Class FieldLo, FieldHi;
1471 
1472       // Bit-fields require special handling, they do not force the
1473       // structure to be passed in memory even if unaligned, and
1474       // therefore they can straddle an eightbyte.
1475       if (BitField) {
1476         // Ignore padding bit-fields.
1477         if (i->isUnnamedBitfield())
1478           continue;
1479 
1480         uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
1481         uint64_t Size = i->getBitWidthValue(getContext());
1482 
1483         uint64_t EB_Lo = Offset / 64;
1484         uint64_t EB_Hi = (Offset + Size - 1) / 64;
1485         FieldLo = FieldHi = NoClass;
1486         if (EB_Lo) {
1487           assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
1488           FieldLo = NoClass;
1489           FieldHi = Integer;
1490         } else {
1491           FieldLo = Integer;
1492           FieldHi = EB_Hi ? Integer : NoClass;
1493         }
1494       } else
1495         classify(i->getType(), Offset, FieldLo, FieldHi);
1496       Lo = merge(Lo, FieldLo);
1497       Hi = merge(Hi, FieldHi);
1498       if (Lo == Memory || Hi == Memory)
1499         break;
1500     }
1501 
1502     postMerge(Size, Lo, Hi);
1503   }
1504 }
1505 
1506 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
1507   // If this is a scalar LLVM value then assume LLVM will pass it in the right
1508   // place naturally.
1509   if (!isAggregateTypeForABI(Ty)) {
1510     // Treat an enum type as its underlying type.
1511     if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1512       Ty = EnumTy->getDecl()->getIntegerType();
1513 
1514     return (Ty->isPromotableIntegerType() ?
1515             ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
1516   }
1517 
1518   return ABIArgInfo::getIndirect(0);
1519 }
1520 
1521 bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const {
1522   if (const VectorType *VecTy = Ty->getAs<VectorType>()) {
1523     uint64_t Size = getContext().getTypeSize(VecTy);
1524     unsigned LargestVector = HasAVX ? 256 : 128;
1525     if (Size <= 64 || Size > LargestVector)
1526       return true;
1527   }
1528 
1529   return false;
1530 }
1531 
1532 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
1533                                             unsigned freeIntRegs) const {
1534   // If this is a scalar LLVM value then assume LLVM will pass it in the right
1535   // place naturally.
1536   //
1537   // This assumption is optimistic, as there could be free registers available
1538   // when we need to pass this argument in memory, and LLVM could try to pass
1539   // the argument in the free register. This does not seem to happen currently,
1540   // but this code would be much safer if we could mark the argument with
1541   // 'onstack'. See PR12193.
1542   if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) {
1543     // Treat an enum type as its underlying type.
1544     if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1545       Ty = EnumTy->getDecl()->getIntegerType();
1546 
1547     return (Ty->isPromotableIntegerType() ?
1548             ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
1549   }
1550 
1551   if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
1552     return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
1553 
1554   // Compute the byval alignment. We specify the alignment of the byval in all
1555   // cases so that the mid-level optimizer knows the alignment of the byval.
1556   unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
1557 
1558   // Attempt to avoid passing indirect results using byval when possible. This
1559   // is important for good codegen.
1560   //
1561   // We do this by coercing the value into a scalar type which the backend can
1562   // handle naturally (i.e., without using byval).
1563   //
1564   // For simplicity, we currently only do this when we have exhausted all of the
1565   // free integer registers. Doing this when there are free integer registers
1566   // would require more care, as we would have to ensure that the coerced value
1567   // did not claim the unused register. That would require either reording the
1568   // arguments to the function (so that any subsequent inreg values came first),
1569   // or only doing this optimization when there were no following arguments that
1570   // might be inreg.
1571   //
1572   // We currently expect it to be rare (particularly in well written code) for
1573   // arguments to be passed on the stack when there are still free integer
1574   // registers available (this would typically imply large structs being passed
1575   // by value), so this seems like a fair tradeoff for now.
1576   //
1577   // We can revisit this if the backend grows support for 'onstack' parameter
1578   // attributes. See PR12193.
1579   if (freeIntRegs == 0) {
1580     uint64_t Size = getContext().getTypeSize(Ty);
1581 
1582     // If this type fits in an eightbyte, coerce it into the matching integral
1583     // type, which will end up on the stack (with alignment 8).
1584     if (Align == 8 && Size <= 64)
1585       return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
1586                                                           Size));
1587   }
1588 
1589   return ABIArgInfo::getIndirect(Align);
1590 }
1591 
1592 /// GetByteVectorType - The ABI specifies that a value should be passed in an
1593 /// full vector XMM/YMM register.  Pick an LLVM IR type that will be passed as a
1594 /// vector register.
1595 llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
1596   llvm::Type *IRType = CGT.ConvertType(Ty);
1597 
1598   // Wrapper structs that just contain vectors are passed just like vectors,
1599   // strip them off if present.
1600   llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType);
1601   while (STy && STy->getNumElements() == 1) {
1602     IRType = STy->getElementType(0);
1603     STy = dyn_cast<llvm::StructType>(IRType);
1604   }
1605 
1606   // If the preferred type is a 16-byte vector, prefer to pass it.
1607   if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){
1608     llvm::Type *EltTy = VT->getElementType();
1609     unsigned BitWidth = VT->getBitWidth();
1610     if ((BitWidth >= 128 && BitWidth <= 256) &&
1611         (EltTy->isFloatTy() || EltTy->isDoubleTy() ||
1612          EltTy->isIntegerTy(8) || EltTy->isIntegerTy(16) ||
1613          EltTy->isIntegerTy(32) || EltTy->isIntegerTy(64) ||
1614          EltTy->isIntegerTy(128)))
1615       return VT;
1616   }
1617 
1618   return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), 2);
1619 }
1620 
1621 /// BitsContainNoUserData - Return true if the specified [start,end) bit range
1622 /// is known to either be off the end of the specified type or being in
1623 /// alignment padding.  The user type specified is known to be at most 128 bits
1624 /// in size, and have passed through X86_64ABIInfo::classify with a successful
1625 /// classification that put one of the two halves in the INTEGER class.
1626 ///
1627 /// It is conservatively correct to return false.
1628 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
1629                                   unsigned EndBit, ASTContext &Context) {
1630   // If the bytes being queried are off the end of the type, there is no user
1631   // data hiding here.  This handles analysis of builtins, vectors and other
1632   // types that don't contain interesting padding.
1633   unsigned TySize = (unsigned)Context.getTypeSize(Ty);
1634   if (TySize <= StartBit)
1635     return true;
1636 
1637   if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
1638     unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType());
1639     unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
1640 
1641     // Check each element to see if the element overlaps with the queried range.
1642     for (unsigned i = 0; i != NumElts; ++i) {
1643       // If the element is after the span we care about, then we're done..
1644       unsigned EltOffset = i*EltSize;
1645       if (EltOffset >= EndBit) break;
1646 
1647       unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
1648       if (!BitsContainNoUserData(AT->getElementType(), EltStart,
1649                                  EndBit-EltOffset, Context))
1650         return false;
1651     }
1652     // If it overlaps no elements, then it is safe to process as padding.
1653     return true;
1654   }
1655 
1656   if (const RecordType *RT = Ty->getAs<RecordType>()) {
1657     const RecordDecl *RD = RT->getDecl();
1658     const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
1659 
1660     // If this is a C++ record, check the bases first.
1661     if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1662       for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
1663            e = CXXRD->bases_end(); i != e; ++i) {
1664         assert(!i->isVirtual() && !i->getType()->isDependentType() &&
1665                "Unexpected base class!");
1666         const CXXRecordDecl *Base =
1667           cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
1668 
1669         // If the base is after the span we care about, ignore it.
1670         unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base));
1671         if (BaseOffset >= EndBit) continue;
1672 
1673         unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
1674         if (!BitsContainNoUserData(i->getType(), BaseStart,
1675                                    EndBit-BaseOffset, Context))
1676           return false;
1677       }
1678     }
1679 
1680     // Verify that no field has data that overlaps the region of interest.  Yes
1681     // this could be sped up a lot by being smarter about queried fields,
1682     // however we're only looking at structs up to 16 bytes, so we don't care
1683     // much.
1684     unsigned idx = 0;
1685     for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
1686          i != e; ++i, ++idx) {
1687       unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx);
1688 
1689       // If we found a field after the region we care about, then we're done.
1690       if (FieldOffset >= EndBit) break;
1691 
1692       unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
1693       if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset,
1694                                  Context))
1695         return false;
1696     }
1697 
1698     // If nothing in this record overlapped the area of interest, then we're
1699     // clean.
1700     return true;
1701   }
1702 
1703   return false;
1704 }
1705 
1706 /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a
1707 /// float member at the specified offset.  For example, {int,{float}} has a
1708 /// float at offset 4.  It is conservatively correct for this routine to return
1709 /// false.
1710 static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset,
1711                                   const llvm::TargetData &TD) {
1712   // Base case if we find a float.
1713   if (IROffset == 0 && IRType->isFloatTy())
1714     return true;
1715 
1716   // If this is a struct, recurse into the field at the specified offset.
1717   if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
1718     const llvm::StructLayout *SL = TD.getStructLayout(STy);
1719     unsigned Elt = SL->getElementContainingOffset(IROffset);
1720     IROffset -= SL->getElementOffset(Elt);
1721     return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD);
1722   }
1723 
1724   // If this is an array, recurse into the field at the specified offset.
1725   if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
1726     llvm::Type *EltTy = ATy->getElementType();
1727     unsigned EltSize = TD.getTypeAllocSize(EltTy);
1728     IROffset -= IROffset/EltSize*EltSize;
1729     return ContainsFloatAtOffset(EltTy, IROffset, TD);
1730   }
1731 
1732   return false;
1733 }
1734 
1735 
1736 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the
1737 /// low 8 bytes of an XMM register, corresponding to the SSE class.
1738 llvm::Type *X86_64ABIInfo::
1739 GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,
1740                    QualType SourceTy, unsigned SourceOffset) const {
1741   // The only three choices we have are either double, <2 x float>, or float. We
1742   // pass as float if the last 4 bytes is just padding.  This happens for
1743   // structs that contain 3 floats.
1744   if (BitsContainNoUserData(SourceTy, SourceOffset*8+32,
1745                             SourceOffset*8+64, getContext()))
1746     return llvm::Type::getFloatTy(getVMContext());
1747 
1748   // We want to pass as <2 x float> if the LLVM IR type contains a float at
1749   // offset+0 and offset+4.  Walk the LLVM IR type to find out if this is the
1750   // case.
1751   if (ContainsFloatAtOffset(IRType, IROffset, getTargetData()) &&
1752       ContainsFloatAtOffset(IRType, IROffset+4, getTargetData()))
1753     return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2);
1754 
1755   return llvm::Type::getDoubleTy(getVMContext());
1756 }
1757 
1758 
1759 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in
1760 /// an 8-byte GPR.  This means that we either have a scalar or we are talking
1761 /// about the high or low part of an up-to-16-byte struct.  This routine picks
1762 /// the best LLVM IR type to represent this, which may be i64 or may be anything
1763 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*,
1764 /// etc).
1765 ///
1766 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for
1767 /// the source type.  IROffset is an offset in bytes into the LLVM IR type that
1768 /// the 8-byte value references.  PrefType may be null.
1769 ///
1770 /// SourceTy is the source level type for the entire argument.  SourceOffset is
1771 /// an offset into this that we're processing (which is always either 0 or 8).
1772 ///
1773 llvm::Type *X86_64ABIInfo::
1774 GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
1775                        QualType SourceTy, unsigned SourceOffset) const {
1776   // If we're dealing with an un-offset LLVM IR type, then it means that we're
1777   // returning an 8-byte unit starting with it.  See if we can safely use it.
1778   if (IROffset == 0) {
1779     // Pointers and int64's always fill the 8-byte unit.
1780     if (isa<llvm::PointerType>(IRType) || IRType->isIntegerTy(64))
1781       return IRType;
1782 
1783     // If we have a 1/2/4-byte integer, we can use it only if the rest of the
1784     // goodness in the source type is just tail padding.  This is allowed to
1785     // kick in for struct {double,int} on the int, but not on
1786     // struct{double,int,int} because we wouldn't return the second int.  We
1787     // have to do this analysis on the source type because we can't depend on
1788     // unions being lowered a specific way etc.
1789     if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
1790         IRType->isIntegerTy(32)) {
1791       unsigned BitWidth = cast<llvm::IntegerType>(IRType)->getBitWidth();
1792 
1793       if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,
1794                                 SourceOffset*8+64, getContext()))
1795         return IRType;
1796     }
1797   }
1798 
1799   if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
1800     // If this is a struct, recurse into the field at the specified offset.
1801     const llvm::StructLayout *SL = getTargetData().getStructLayout(STy);
1802     if (IROffset < SL->getSizeInBytes()) {
1803       unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
1804       IROffset -= SL->getElementOffset(FieldIdx);
1805 
1806       return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
1807                                     SourceTy, SourceOffset);
1808     }
1809   }
1810 
1811   if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
1812     llvm::Type *EltTy = ATy->getElementType();
1813     unsigned EltSize = getTargetData().getTypeAllocSize(EltTy);
1814     unsigned EltOffset = IROffset/EltSize*EltSize;
1815     return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
1816                                   SourceOffset);
1817   }
1818 
1819   // Okay, we don't have any better idea of what to pass, so we pass this in an
1820   // integer register that isn't too big to fit the rest of the struct.
1821   unsigned TySizeInBytes =
1822     (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
1823 
1824   assert(TySizeInBytes != SourceOffset && "Empty field?");
1825 
1826   // It is always safe to classify this as an integer type up to i64 that
1827   // isn't larger than the structure.
1828   return llvm::IntegerType::get(getVMContext(),
1829                                 std::min(TySizeInBytes-SourceOffset, 8U)*8);
1830 }
1831 
1832 
1833 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally
1834 /// be used as elements of a two register pair to pass or return, return a
1835 /// first class aggregate to represent them.  For example, if the low part of
1836 /// a by-value argument should be passed as i32* and the high part as float,
1837 /// return {i32*, float}.
1838 static llvm::Type *
1839 GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi,
1840                            const llvm::TargetData &TD) {
1841   // In order to correctly satisfy the ABI, we need to the high part to start
1842   // at offset 8.  If the high and low parts we inferred are both 4-byte types
1843   // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have
1844   // the second element at offset 8.  Check for this:
1845   unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
1846   unsigned HiAlign = TD.getABITypeAlignment(Hi);
1847   unsigned HiStart = llvm::TargetData::RoundUpAlignment(LoSize, HiAlign);
1848   assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");
1849 
1850   // To handle this, we have to increase the size of the low part so that the
1851   // second element will start at an 8 byte offset.  We can't increase the size
1852   // of the second element because it might make us access off the end of the
1853   // struct.
1854   if (HiStart != 8) {
1855     // There are only two sorts of types the ABI generation code can produce for
1856     // the low part of a pair that aren't 8 bytes in size: float or i8/i16/i32.
1857     // Promote these to a larger type.
1858     if (Lo->isFloatTy())
1859       Lo = llvm::Type::getDoubleTy(Lo->getContext());
1860     else {
1861       assert(Lo->isIntegerTy() && "Invalid/unknown lo type");
1862       Lo = llvm::Type::getInt64Ty(Lo->getContext());
1863     }
1864   }
1865 
1866   llvm::StructType *Result = llvm::StructType::get(Lo, Hi, NULL);
1867 
1868 
1869   // Verify that the second element is at an 8-byte offset.
1870   assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
1871          "Invalid x86-64 argument pair!");
1872   return Result;
1873 }
1874 
1875 ABIArgInfo X86_64ABIInfo::
1876 classifyReturnType(QualType RetTy) const {
1877   // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
1878   // classification algorithm.
1879   X86_64ABIInfo::Class Lo, Hi;
1880   classify(RetTy, 0, Lo, Hi);
1881 
1882   // Check some invariants.
1883   assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
1884   assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
1885 
1886   llvm::Type *ResType = 0;
1887   switch (Lo) {
1888   case NoClass:
1889     if (Hi == NoClass)
1890       return ABIArgInfo::getIgnore();
1891     // If the low part is just padding, it takes no register, leave ResType
1892     // null.
1893     assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
1894            "Unknown missing lo part");
1895     break;
1896 
1897   case SSEUp:
1898   case X87Up:
1899     llvm_unreachable("Invalid classification for lo word.");
1900 
1901     // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
1902     // hidden argument.
1903   case Memory:
1904     return getIndirectReturnResult(RetTy);
1905 
1906     // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
1907     // available register of the sequence %rax, %rdx is used.
1908   case Integer:
1909     ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
1910 
1911     // If we have a sign or zero extended integer, make sure to return Extend
1912     // so that the parameter gets the right LLVM IR attributes.
1913     if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
1914       // Treat an enum type as its underlying type.
1915       if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
1916         RetTy = EnumTy->getDecl()->getIntegerType();
1917 
1918       if (RetTy->isIntegralOrEnumerationType() &&
1919           RetTy->isPromotableIntegerType())
1920         return ABIArgInfo::getExtend();
1921     }
1922     break;
1923 
1924     // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
1925     // available SSE register of the sequence %xmm0, %xmm1 is used.
1926   case SSE:
1927     ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
1928     break;
1929 
1930     // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
1931     // returned on the X87 stack in %st0 as 80-bit x87 number.
1932   case X87:
1933     ResType = llvm::Type::getX86_FP80Ty(getVMContext());
1934     break;
1935 
1936     // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
1937     // part of the value is returned in %st0 and the imaginary part in
1938     // %st1.
1939   case ComplexX87:
1940     assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
1941     ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
1942                                     llvm::Type::getX86_FP80Ty(getVMContext()),
1943                                     NULL);
1944     break;
1945   }
1946 
1947   llvm::Type *HighPart = 0;
1948   switch (Hi) {
1949     // Memory was handled previously and X87 should
1950     // never occur as a hi class.
1951   case Memory:
1952   case X87:
1953     llvm_unreachable("Invalid classification for hi word.");
1954 
1955   case ComplexX87: // Previously handled.
1956   case NoClass:
1957     break;
1958 
1959   case Integer:
1960     HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
1961     if (Lo == NoClass)  // Return HighPart at offset 8 in memory.
1962       return ABIArgInfo::getDirect(HighPart, 8);
1963     break;
1964   case SSE:
1965     HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
1966     if (Lo == NoClass)  // Return HighPart at offset 8 in memory.
1967       return ABIArgInfo::getDirect(HighPart, 8);
1968     break;
1969 
1970     // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
1971     // is passed in the next available eightbyte chunk if the last used
1972     // vector register.
1973     //
1974     // SSEUP should always be preceded by SSE, just widen.
1975   case SSEUp:
1976     assert(Lo == SSE && "Unexpected SSEUp classification.");
1977     ResType = GetByteVectorType(RetTy);
1978     break;
1979 
1980     // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
1981     // returned together with the previous X87 value in %st0.
1982   case X87Up:
1983     // If X87Up is preceded by X87, we don't need to do
1984     // anything. However, in some cases with unions it may not be
1985     // preceded by X87. In such situations we follow gcc and pass the
1986     // extra bits in an SSE reg.
1987     if (Lo != X87) {
1988       HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
1989       if (Lo == NoClass)  // Return HighPart at offset 8 in memory.
1990         return ABIArgInfo::getDirect(HighPart, 8);
1991     }
1992     break;
1993   }
1994 
1995   // If a high part was specified, merge it together with the low part.  It is
1996   // known to pass in the high eightbyte of the result.  We do this by forming a
1997   // first class struct aggregate with the high and low part: {low, high}
1998   if (HighPart)
1999     ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getTargetData());
2000 
2001   return ABIArgInfo::getDirect(ResType);
2002 }
2003 
2004 ABIArgInfo X86_64ABIInfo::classifyArgumentType(
2005   QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE)
2006   const
2007 {
2008   X86_64ABIInfo::Class Lo, Hi;
2009   classify(Ty, 0, Lo, Hi);
2010 
2011   // Check some invariants.
2012   // FIXME: Enforce these by construction.
2013   assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
2014   assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
2015 
2016   neededInt = 0;
2017   neededSSE = 0;
2018   llvm::Type *ResType = 0;
2019   switch (Lo) {
2020   case NoClass:
2021     if (Hi == NoClass)
2022       return ABIArgInfo::getIgnore();
2023     // If the low part is just padding, it takes no register, leave ResType
2024     // null.
2025     assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
2026            "Unknown missing lo part");
2027     break;
2028 
2029     // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
2030     // on the stack.
2031   case Memory:
2032 
2033     // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
2034     // COMPLEX_X87, it is passed in memory.
2035   case X87:
2036   case ComplexX87:
2037     if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
2038       ++neededInt;
2039     return getIndirectResult(Ty, freeIntRegs);
2040 
2041   case SSEUp:
2042   case X87Up:
2043     llvm_unreachable("Invalid classification for lo word.");
2044 
2045     // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
2046     // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
2047     // and %r9 is used.
2048   case Integer:
2049     ++neededInt;
2050 
2051     // Pick an 8-byte type based on the preferred type.
2052     ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
2053 
2054     // If we have a sign or zero extended integer, make sure to return Extend
2055     // so that the parameter gets the right LLVM IR attributes.
2056     if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
2057       // Treat an enum type as its underlying type.
2058       if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2059         Ty = EnumTy->getDecl()->getIntegerType();
2060 
2061       if (Ty->isIntegralOrEnumerationType() &&
2062           Ty->isPromotableIntegerType())
2063         return ABIArgInfo::getExtend();
2064     }
2065 
2066     break;
2067 
2068     // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
2069     // available SSE register is used, the registers are taken in the
2070     // order from %xmm0 to %xmm7.
2071   case SSE: {
2072     llvm::Type *IRType = CGT.ConvertType(Ty);
2073     ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
2074     ++neededSSE;
2075     break;
2076   }
2077   }
2078 
2079   llvm::Type *HighPart = 0;
2080   switch (Hi) {
2081     // Memory was handled previously, ComplexX87 and X87 should
2082     // never occur as hi classes, and X87Up must be preceded by X87,
2083     // which is passed in memory.
2084   case Memory:
2085   case X87:
2086   case ComplexX87:
2087     llvm_unreachable("Invalid classification for hi word.");
2088 
2089   case NoClass: break;
2090 
2091   case Integer:
2092     ++neededInt;
2093     // Pick an 8-byte type based on the preferred type.
2094     HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
2095 
2096     if (Lo == NoClass)  // Pass HighPart at offset 8 in memory.
2097       return ABIArgInfo::getDirect(HighPart, 8);
2098     break;
2099 
2100     // X87Up generally doesn't occur here (long double is passed in
2101     // memory), except in situations involving unions.
2102   case X87Up:
2103   case SSE:
2104     HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
2105 
2106     if (Lo == NoClass)  // Pass HighPart at offset 8 in memory.
2107       return ABIArgInfo::getDirect(HighPart, 8);
2108 
2109     ++neededSSE;
2110     break;
2111 
2112     // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
2113     // eightbyte is passed in the upper half of the last used SSE
2114     // register.  This only happens when 128-bit vectors are passed.
2115   case SSEUp:
2116     assert(Lo == SSE && "Unexpected SSEUp classification");
2117     ResType = GetByteVectorType(Ty);
2118     break;
2119   }
2120 
2121   // If a high part was specified, merge it together with the low part.  It is
2122   // known to pass in the high eightbyte of the result.  We do this by forming a
2123   // first class struct aggregate with the high and low part: {low, high}
2124   if (HighPart)
2125     ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getTargetData());
2126 
2127   return ABIArgInfo::getDirect(ResType);
2128 }
2129 
2130 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
2131 
2132   FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
2133 
2134   // Keep track of the number of assigned registers.
2135   unsigned freeIntRegs = 6, freeSSERegs = 8;
2136 
2137   // If the return value is indirect, then the hidden argument is consuming one
2138   // integer register.
2139   if (FI.getReturnInfo().isIndirect())
2140     --freeIntRegs;
2141 
2142   // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
2143   // get assigned (in left-to-right order) for passing as follows...
2144   for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
2145        it != ie; ++it) {
2146     unsigned neededInt, neededSSE;
2147     it->info = classifyArgumentType(it->type, freeIntRegs, neededInt,
2148                                     neededSSE);
2149 
2150     // AMD64-ABI 3.2.3p3: If there are no registers available for any
2151     // eightbyte of an argument, the whole argument is passed on the
2152     // stack. If registers have already been assigned for some
2153     // eightbytes of such an argument, the assignments get reverted.
2154     if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
2155       freeIntRegs -= neededInt;
2156       freeSSERegs -= neededSSE;
2157     } else {
2158       it->info = getIndirectResult(it->type, freeIntRegs);
2159     }
2160   }
2161 }
2162 
2163 static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
2164                                         QualType Ty,
2165                                         CodeGenFunction &CGF) {
2166   llvm::Value *overflow_arg_area_p =
2167     CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
2168   llvm::Value *overflow_arg_area =
2169     CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
2170 
2171   // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
2172   // byte boundary if alignment needed by type exceeds 8 byte boundary.
2173   // It isn't stated explicitly in the standard, but in practice we use
2174   // alignment greater than 16 where necessary.
2175   uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
2176   if (Align > 8) {
2177     // overflow_arg_area = (overflow_arg_area + align - 1) & -align;
2178     llvm::Value *Offset =
2179       llvm::ConstantInt::get(CGF.Int64Ty, Align - 1);
2180     overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset);
2181     llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area,
2182                                                     CGF.Int64Ty);
2183     llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, -(uint64_t)Align);
2184     overflow_arg_area =
2185       CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
2186                                  overflow_arg_area->getType(),
2187                                  "overflow_arg_area.align");
2188   }
2189 
2190   // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
2191   llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
2192   llvm::Value *Res =
2193     CGF.Builder.CreateBitCast(overflow_arg_area,
2194                               llvm::PointerType::getUnqual(LTy));
2195 
2196   // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
2197   // l->overflow_arg_area + sizeof(type).
2198   // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
2199   // an 8 byte boundary.
2200 
2201   uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
2202   llvm::Value *Offset =
2203       llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7)  & ~7);
2204   overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
2205                                             "overflow_arg_area.next");
2206   CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
2207 
2208   // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
2209   return Res;
2210 }
2211 
2212 llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2213                                       CodeGenFunction &CGF) const {
2214   // Assume that va_list type is correct; should be pointer to LLVM type:
2215   // struct {
2216   //   i32 gp_offset;
2217   //   i32 fp_offset;
2218   //   i8* overflow_arg_area;
2219   //   i8* reg_save_area;
2220   // };
2221   unsigned neededInt, neededSSE;
2222 
2223   Ty = CGF.getContext().getCanonicalType(Ty);
2224   ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE);
2225 
2226   // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
2227   // in the registers. If not go to step 7.
2228   if (!neededInt && !neededSSE)
2229     return EmitVAArgFromMemory(VAListAddr, Ty, CGF);
2230 
2231   // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
2232   // general purpose registers needed to pass type and num_fp to hold
2233   // the number of floating point registers needed.
2234 
2235   // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
2236   // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
2237   // l->fp_offset > 304 - num_fp * 16 go to step 7.
2238   //
2239   // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
2240   // register save space).
2241 
2242   llvm::Value *InRegs = 0;
2243   llvm::Value *gp_offset_p = 0, *gp_offset = 0;
2244   llvm::Value *fp_offset_p = 0, *fp_offset = 0;
2245   if (neededInt) {
2246     gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
2247     gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
2248     InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
2249     InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
2250   }
2251 
2252   if (neededSSE) {
2253     fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
2254     fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
2255     llvm::Value *FitsInFP =
2256       llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
2257     FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");
2258     InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
2259   }
2260 
2261   llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
2262   llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
2263   llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
2264   CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
2265 
2266   // Emit code to load the value if it was passed in registers.
2267 
2268   CGF.EmitBlock(InRegBlock);
2269 
2270   // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
2271   // an offset of l->gp_offset and/or l->fp_offset. This may require
2272   // copying to a temporary location in case the parameter is passed
2273   // in different register classes or requires an alignment greater
2274   // than 8 for general purpose registers and 16 for XMM registers.
2275   //
2276   // FIXME: This really results in shameful code when we end up needing to
2277   // collect arguments from different places; often what should result in a
2278   // simple assembling of a structure from scattered addresses has many more
2279   // loads than necessary. Can we clean this up?
2280   llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
2281   llvm::Value *RegAddr =
2282     CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3),
2283                            "reg_save_area");
2284   if (neededInt && neededSSE) {
2285     // FIXME: Cleanup.
2286     assert(AI.isDirect() && "Unexpected ABI info for mixed regs");
2287     llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
2288     llvm::Value *Tmp = CGF.CreateTempAlloca(ST);
2289     assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
2290     llvm::Type *TyLo = ST->getElementType(0);
2291     llvm::Type *TyHi = ST->getElementType(1);
2292     assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
2293            "Unexpected ABI info for mixed regs");
2294     llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
2295     llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
2296     llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
2297     llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
2298     llvm::Value *RegLoAddr = TyLo->isFloatingPointTy() ? FPAddr : GPAddr;
2299     llvm::Value *RegHiAddr = TyLo->isFloatingPointTy() ? GPAddr : FPAddr;
2300     llvm::Value *V =
2301       CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo));
2302     CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
2303     V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
2304     CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
2305 
2306     RegAddr = CGF.Builder.CreateBitCast(Tmp,
2307                                         llvm::PointerType::getUnqual(LTy));
2308   } else if (neededInt) {
2309     RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
2310     RegAddr = CGF.Builder.CreateBitCast(RegAddr,
2311                                         llvm::PointerType::getUnqual(LTy));
2312   } else if (neededSSE == 1) {
2313     RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
2314     RegAddr = CGF.Builder.CreateBitCast(RegAddr,
2315                                         llvm::PointerType::getUnqual(LTy));
2316   } else {
2317     assert(neededSSE == 2 && "Invalid number of needed registers!");
2318     // SSE registers are spaced 16 bytes apart in the register save
2319     // area, we need to collect the two eightbytes together.
2320     llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
2321     llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16);
2322     llvm::Type *DoubleTy = CGF.DoubleTy;
2323     llvm::Type *DblPtrTy =
2324       llvm::PointerType::getUnqual(DoubleTy);
2325     llvm::StructType *ST = llvm::StructType::get(DoubleTy,
2326                                                        DoubleTy, NULL);
2327     llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST);
2328     V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
2329                                                          DblPtrTy));
2330     CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
2331     V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi,
2332                                                          DblPtrTy));
2333     CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
2334     RegAddr = CGF.Builder.CreateBitCast(Tmp,
2335                                         llvm::PointerType::getUnqual(LTy));
2336   }
2337 
2338   // AMD64-ABI 3.5.7p5: Step 5. Set:
2339   // l->gp_offset = l->gp_offset + num_gp * 8
2340   // l->fp_offset = l->fp_offset + num_fp * 16.
2341   if (neededInt) {
2342     llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);
2343     CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
2344                             gp_offset_p);
2345   }
2346   if (neededSSE) {
2347     llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);
2348     CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
2349                             fp_offset_p);
2350   }
2351   CGF.EmitBranch(ContBlock);
2352 
2353   // Emit code to load the value if it was passed in memory.
2354 
2355   CGF.EmitBlock(InMemBlock);
2356   llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF);
2357 
2358   // Return the appropriate result.
2359 
2360   CGF.EmitBlock(ContBlock);
2361   llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(), 2,
2362                                                  "vaarg.addr");
2363   ResAddr->addIncoming(RegAddr, InRegBlock);
2364   ResAddr->addIncoming(MemAddr, InMemBlock);
2365   return ResAddr;
2366 }
2367 
2368 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty) const {
2369 
2370   if (Ty->isVoidType())
2371     return ABIArgInfo::getIgnore();
2372 
2373   if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2374     Ty = EnumTy->getDecl()->getIntegerType();
2375 
2376   uint64_t Size = getContext().getTypeSize(Ty);
2377 
2378   if (const RecordType *RT = Ty->getAs<RecordType>()) {
2379     if (hasNonTrivialDestructorOrCopyConstructor(RT) ||
2380         RT->getDecl()->hasFlexibleArrayMember())
2381       return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
2382 
2383     // FIXME: mingw-w64-gcc emits 128-bit struct as i128
2384     if (Size == 128 &&
2385         getContext().getTargetInfo().getTriple().getOS()
2386           == llvm::Triple::MinGW32)
2387       return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
2388                                                           Size));
2389 
2390     // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
2391     // not 1, 2, 4, or 8 bytes, must be passed by reference."
2392     if (Size <= 64 &&
2393         (Size & (Size - 1)) == 0)
2394       return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
2395                                                           Size));
2396 
2397     return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
2398   }
2399 
2400   if (Ty->isPromotableIntegerType())
2401     return ABIArgInfo::getExtend();
2402 
2403   return ABIArgInfo::getDirect();
2404 }
2405 
2406 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
2407 
2408   QualType RetTy = FI.getReturnType();
2409   FI.getReturnInfo() = classify(RetTy);
2410 
2411   for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
2412        it != ie; ++it)
2413     it->info = classify(it->type);
2414 }
2415 
2416 llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2417                                       CodeGenFunction &CGF) const {
2418   llvm::Type *BPP = CGF.Int8PtrPtrTy;
2419 
2420   CGBuilderTy &Builder = CGF.Builder;
2421   llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
2422                                                        "ap");
2423   llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
2424   llvm::Type *PTy =
2425     llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
2426   llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
2427 
2428   uint64_t Offset =
2429     llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8);
2430   llvm::Value *NextAddr =
2431     Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
2432                       "ap.next");
2433   Builder.CreateStore(NextAddr, VAListAddrAsBPP);
2434 
2435   return AddrTyped;
2436 }
2437 
2438 // PowerPC-32
2439 
2440 namespace {
2441 class PPC32TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
2442 public:
2443   PPC32TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
2444 
2445   int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
2446     // This is recovered from gcc output.
2447     return 1; // r1 is the dedicated stack pointer
2448   }
2449 
2450   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2451                                llvm::Value *Address) const;
2452 };
2453 
2454 }
2455 
2456 bool
2457 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2458                                                 llvm::Value *Address) const {
2459   // This is calculated from the LLVM and GCC tables and verified
2460   // against gcc output.  AFAIK all ABIs use the same encoding.
2461 
2462   CodeGen::CGBuilderTy &Builder = CGF.Builder;
2463 
2464   llvm::IntegerType *i8 = CGF.Int8Ty;
2465   llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
2466   llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
2467   llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
2468 
2469   // 0-31: r0-31, the 4-byte general-purpose registers
2470   AssignToArrayRange(Builder, Address, Four8, 0, 31);
2471 
2472   // 32-63: fp0-31, the 8-byte floating-point registers
2473   AssignToArrayRange(Builder, Address, Eight8, 32, 63);
2474 
2475   // 64-76 are various 4-byte special-purpose registers:
2476   // 64: mq
2477   // 65: lr
2478   // 66: ctr
2479   // 67: ap
2480   // 68-75 cr0-7
2481   // 76: xer
2482   AssignToArrayRange(Builder, Address, Four8, 64, 76);
2483 
2484   // 77-108: v0-31, the 16-byte vector registers
2485   AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
2486 
2487   // 109: vrsave
2488   // 110: vscr
2489   // 111: spe_acc
2490   // 112: spefscr
2491   // 113: sfp
2492   AssignToArrayRange(Builder, Address, Four8, 109, 113);
2493 
2494   return false;
2495 }
2496 
2497 // PowerPC-64
2498 
2499 namespace {
2500 class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
2501 public:
2502   PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
2503 
2504   int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
2505     // This is recovered from gcc output.
2506     return 1; // r1 is the dedicated stack pointer
2507   }
2508 
2509   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2510                                llvm::Value *Address) const;
2511 };
2512 
2513 }
2514 
2515 bool
2516 PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2517                                                 llvm::Value *Address) const {
2518   // This is calculated from the LLVM and GCC tables and verified
2519   // against gcc output.  AFAIK all ABIs use the same encoding.
2520 
2521   CodeGen::CGBuilderTy &Builder = CGF.Builder;
2522 
2523   llvm::IntegerType *i8 = CGF.Int8Ty;
2524   llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
2525   llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
2526   llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
2527 
2528   // 0-31: r0-31, the 8-byte general-purpose registers
2529   AssignToArrayRange(Builder, Address, Eight8, 0, 31);
2530 
2531   // 32-63: fp0-31, the 8-byte floating-point registers
2532   AssignToArrayRange(Builder, Address, Eight8, 32, 63);
2533 
2534   // 64-76 are various 4-byte special-purpose registers:
2535   // 64: mq
2536   // 65: lr
2537   // 66: ctr
2538   // 67: ap
2539   // 68-75 cr0-7
2540   // 76: xer
2541   AssignToArrayRange(Builder, Address, Four8, 64, 76);
2542 
2543   // 77-108: v0-31, the 16-byte vector registers
2544   AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
2545 
2546   // 109: vrsave
2547   // 110: vscr
2548   // 111: spe_acc
2549   // 112: spefscr
2550   // 113: sfp
2551   AssignToArrayRange(Builder, Address, Four8, 109, 113);
2552 
2553   return false;
2554 }
2555 
2556 //===----------------------------------------------------------------------===//
2557 // ARM ABI Implementation
2558 //===----------------------------------------------------------------------===//
2559 
2560 namespace {
2561 
2562 class ARMABIInfo : public ABIInfo {
2563 public:
2564   enum ABIKind {
2565     APCS = 0,
2566     AAPCS = 1,
2567     AAPCS_VFP
2568   };
2569 
2570 private:
2571   ABIKind Kind;
2572 
2573 public:
2574   ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind) {}
2575 
2576   bool isEABI() const {
2577     StringRef Env =
2578       getContext().getTargetInfo().getTriple().getEnvironmentName();
2579     return (Env == "gnueabi" || Env == "eabi" || Env == "androideabi");
2580   }
2581 
2582 private:
2583   ABIKind getABIKind() const { return Kind; }
2584 
2585   ABIArgInfo classifyReturnType(QualType RetTy) const;
2586   ABIArgInfo classifyArgumentType(QualType RetTy) const;
2587 
2588   virtual void computeInfo(CGFunctionInfo &FI) const;
2589 
2590   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2591                                  CodeGenFunction &CGF) const;
2592 };
2593 
2594 class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
2595 public:
2596   ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
2597     :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {}
2598 
2599   const ARMABIInfo &getABIInfo() const {
2600     return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo());
2601   }
2602 
2603   int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
2604     return 13;
2605   }
2606 
2607   StringRef getARCRetainAutoreleasedReturnValueMarker() const {
2608     return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue";
2609   }
2610 
2611   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2612                                llvm::Value *Address) const {
2613     llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
2614 
2615     // 0-15 are the 16 integer registers.
2616     AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15);
2617     return false;
2618   }
2619 
2620   unsigned getSizeOfUnwindException() const {
2621     if (getABIInfo().isEABI()) return 88;
2622     return TargetCodeGenInfo::getSizeOfUnwindException();
2623   }
2624 };
2625 
2626 }
2627 
2628 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
2629   FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
2630   for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
2631        it != ie; ++it)
2632     it->info = classifyArgumentType(it->type);
2633 
2634   // Always honor user-specified calling convention.
2635   if (FI.getCallingConvention() != llvm::CallingConv::C)
2636     return;
2637 
2638   // Calling convention as default by an ABI.
2639   llvm::CallingConv::ID DefaultCC;
2640   if (isEABI())
2641     DefaultCC = llvm::CallingConv::ARM_AAPCS;
2642   else
2643     DefaultCC = llvm::CallingConv::ARM_APCS;
2644 
2645   // If user did not ask for specific calling convention explicitly (e.g. via
2646   // pcs attribute), set effective calling convention if it's different than ABI
2647   // default.
2648   switch (getABIKind()) {
2649   case APCS:
2650     if (DefaultCC != llvm::CallingConv::ARM_APCS)
2651       FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_APCS);
2652     break;
2653   case AAPCS:
2654     if (DefaultCC != llvm::CallingConv::ARM_AAPCS)
2655       FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS);
2656     break;
2657   case AAPCS_VFP:
2658     if (DefaultCC != llvm::CallingConv::ARM_AAPCS_VFP)
2659       FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS_VFP);
2660     break;
2661   }
2662 }
2663 
2664 /// isHomogeneousAggregate - Return true if a type is an AAPCS-VFP homogeneous
2665 /// aggregate.  If HAMembers is non-null, the number of base elements
2666 /// contained in the type is returned through it; this is used for the
2667 /// recursive calls that check aggregate component types.
2668 static bool isHomogeneousAggregate(QualType Ty, const Type *&Base,
2669                                    ASTContext &Context,
2670                                    uint64_t *HAMembers = 0) {
2671   uint64_t Members = 0;
2672   if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
2673     if (!isHomogeneousAggregate(AT->getElementType(), Base, Context, &Members))
2674       return false;
2675     Members *= AT->getSize().getZExtValue();
2676   } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
2677     const RecordDecl *RD = RT->getDecl();
2678     if (RD->hasFlexibleArrayMember())
2679       return false;
2680 
2681     Members = 0;
2682     for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
2683          i != e; ++i) {
2684       const FieldDecl *FD = *i;
2685       uint64_t FldMembers;
2686       if (!isHomogeneousAggregate(FD->getType(), Base, Context, &FldMembers))
2687         return false;
2688 
2689       Members = (RD->isUnion() ?
2690                  std::max(Members, FldMembers) : Members + FldMembers);
2691     }
2692   } else {
2693     Members = 1;
2694     if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
2695       Members = 2;
2696       Ty = CT->getElementType();
2697     }
2698 
2699     // Homogeneous aggregates for AAPCS-VFP must have base types of float,
2700     // double, or 64-bit or 128-bit vectors.
2701     if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
2702       if (BT->getKind() != BuiltinType::Float &&
2703           BT->getKind() != BuiltinType::Double &&
2704           BT->getKind() != BuiltinType::LongDouble)
2705         return false;
2706     } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
2707       unsigned VecSize = Context.getTypeSize(VT);
2708       if (VecSize != 64 && VecSize != 128)
2709         return false;
2710     } else {
2711       return false;
2712     }
2713 
2714     // The base type must be the same for all members.  Vector types of the
2715     // same total size are treated as being equivalent here.
2716     const Type *TyPtr = Ty.getTypePtr();
2717     if (!Base)
2718       Base = TyPtr;
2719     if (Base != TyPtr &&
2720         (!Base->isVectorType() || !TyPtr->isVectorType() ||
2721          Context.getTypeSize(Base) != Context.getTypeSize(TyPtr)))
2722       return false;
2723   }
2724 
2725   // Homogeneous Aggregates can have at most 4 members of the base type.
2726   if (HAMembers)
2727     *HAMembers = Members;
2728 
2729   return (Members > 0 && Members <= 4);
2730 }
2731 
2732 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty) const {
2733   if (!isAggregateTypeForABI(Ty)) {
2734     // Treat an enum type as its underlying type.
2735     if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2736       Ty = EnumTy->getDecl()->getIntegerType();
2737 
2738     return (Ty->isPromotableIntegerType() ?
2739             ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2740   }
2741 
2742   // Ignore empty records.
2743   if (isEmptyRecord(getContext(), Ty, true))
2744     return ABIArgInfo::getIgnore();
2745 
2746   // Structures with either a non-trivial destructor or a non-trivial
2747   // copy constructor are always indirect.
2748   if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
2749     return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
2750 
2751   if (getABIKind() == ARMABIInfo::AAPCS_VFP) {
2752     // Homogeneous Aggregates need to be expanded.
2753     const Type *Base = 0;
2754     if (isHomogeneousAggregate(Ty, Base, getContext())) {
2755       assert(Base && "Base class should be set for homogeneous aggregate");
2756       return ABIArgInfo::getExpand();
2757     }
2758   }
2759 
2760   // Support byval for ARM.
2761   if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64) ||
2762       getContext().getTypeAlign(Ty) > 64) {
2763     return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
2764   }
2765 
2766   // Otherwise, pass by coercing to a structure of the appropriate size.
2767   llvm::Type* ElemTy;
2768   unsigned SizeRegs;
2769   // FIXME: Try to match the types of the arguments more accurately where
2770   // we can.
2771   if (getContext().getTypeAlign(Ty) <= 32) {
2772     ElemTy = llvm::Type::getInt32Ty(getVMContext());
2773     SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
2774   } else {
2775     ElemTy = llvm::Type::getInt64Ty(getVMContext());
2776     SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
2777   }
2778 
2779   llvm::Type *STy =
2780     llvm::StructType::get(llvm::ArrayType::get(ElemTy, SizeRegs), NULL);
2781   return ABIArgInfo::getDirect(STy);
2782 }
2783 
2784 static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
2785                               llvm::LLVMContext &VMContext) {
2786   // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
2787   // is called integer-like if its size is less than or equal to one word, and
2788   // the offset of each of its addressable sub-fields is zero.
2789 
2790   uint64_t Size = Context.getTypeSize(Ty);
2791 
2792   // Check that the type fits in a word.
2793   if (Size > 32)
2794     return false;
2795 
2796   // FIXME: Handle vector types!
2797   if (Ty->isVectorType())
2798     return false;
2799 
2800   // Float types are never treated as "integer like".
2801   if (Ty->isRealFloatingType())
2802     return false;
2803 
2804   // If this is a builtin or pointer type then it is ok.
2805   if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
2806     return true;
2807 
2808   // Small complex integer types are "integer like".
2809   if (const ComplexType *CT = Ty->getAs<ComplexType>())
2810     return isIntegerLikeType(CT->getElementType(), Context, VMContext);
2811 
2812   // Single element and zero sized arrays should be allowed, by the definition
2813   // above, but they are not.
2814 
2815   // Otherwise, it must be a record type.
2816   const RecordType *RT = Ty->getAs<RecordType>();
2817   if (!RT) return false;
2818 
2819   // Ignore records with flexible arrays.
2820   const RecordDecl *RD = RT->getDecl();
2821   if (RD->hasFlexibleArrayMember())
2822     return false;
2823 
2824   // Check that all sub-fields are at offset 0, and are themselves "integer
2825   // like".
2826   const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
2827 
2828   bool HadField = false;
2829   unsigned idx = 0;
2830   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
2831        i != e; ++i, ++idx) {
2832     const FieldDecl *FD = *i;
2833 
2834     // Bit-fields are not addressable, we only need to verify they are "integer
2835     // like". We still have to disallow a subsequent non-bitfield, for example:
2836     //   struct { int : 0; int x }
2837     // is non-integer like according to gcc.
2838     if (FD->isBitField()) {
2839       if (!RD->isUnion())
2840         HadField = true;
2841 
2842       if (!isIntegerLikeType(FD->getType(), Context, VMContext))
2843         return false;
2844 
2845       continue;
2846     }
2847 
2848     // Check if this field is at offset 0.
2849     if (Layout.getFieldOffset(idx) != 0)
2850       return false;
2851 
2852     if (!isIntegerLikeType(FD->getType(), Context, VMContext))
2853       return false;
2854 
2855     // Only allow at most one field in a structure. This doesn't match the
2856     // wording above, but follows gcc in situations with a field following an
2857     // empty structure.
2858     if (!RD->isUnion()) {
2859       if (HadField)
2860         return false;
2861 
2862       HadField = true;
2863     }
2864   }
2865 
2866   return true;
2867 }
2868 
2869 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy) const {
2870   if (RetTy->isVoidType())
2871     return ABIArgInfo::getIgnore();
2872 
2873   // Large vector types should be returned via memory.
2874   if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
2875     return ABIArgInfo::getIndirect(0);
2876 
2877   if (!isAggregateTypeForABI(RetTy)) {
2878     // Treat an enum type as its underlying type.
2879     if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
2880       RetTy = EnumTy->getDecl()->getIntegerType();
2881 
2882     return (RetTy->isPromotableIntegerType() ?
2883             ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2884   }
2885 
2886   // Structures with either a non-trivial destructor or a non-trivial
2887   // copy constructor are always indirect.
2888   if (isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy))
2889     return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
2890 
2891   // Are we following APCS?
2892   if (getABIKind() == APCS) {
2893     if (isEmptyRecord(getContext(), RetTy, false))
2894       return ABIArgInfo::getIgnore();
2895 
2896     // Complex types are all returned as packed integers.
2897     //
2898     // FIXME: Consider using 2 x vector types if the back end handles them
2899     // correctly.
2900     if (RetTy->isAnyComplexType())
2901       return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
2902                                               getContext().getTypeSize(RetTy)));
2903 
2904     // Integer like structures are returned in r0.
2905     if (isIntegerLikeType(RetTy, getContext(), getVMContext())) {
2906       // Return in the smallest viable integer type.
2907       uint64_t Size = getContext().getTypeSize(RetTy);
2908       if (Size <= 8)
2909         return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
2910       if (Size <= 16)
2911         return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
2912       return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
2913     }
2914 
2915     // Otherwise return in memory.
2916     return ABIArgInfo::getIndirect(0);
2917   }
2918 
2919   // Otherwise this is an AAPCS variant.
2920 
2921   if (isEmptyRecord(getContext(), RetTy, true))
2922     return ABIArgInfo::getIgnore();
2923 
2924   // Check for homogeneous aggregates with AAPCS-VFP.
2925   if (getABIKind() == AAPCS_VFP) {
2926     const Type *Base = 0;
2927     if (isHomogeneousAggregate(RetTy, Base, getContext())) {
2928       assert(Base && "Base class should be set for homogeneous aggregate");
2929       // Homogeneous Aggregates are returned directly.
2930       return ABIArgInfo::getDirect();
2931     }
2932   }
2933 
2934   // Aggregates <= 4 bytes are returned in r0; other aggregates
2935   // are returned indirectly.
2936   uint64_t Size = getContext().getTypeSize(RetTy);
2937   if (Size <= 32) {
2938     // Return in the smallest viable integer type.
2939     if (Size <= 8)
2940       return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
2941     if (Size <= 16)
2942       return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
2943     return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
2944   }
2945 
2946   return ABIArgInfo::getIndirect(0);
2947 }
2948 
2949 llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2950                                    CodeGenFunction &CGF) const {
2951   llvm::Type *BP = CGF.Int8PtrTy;
2952   llvm::Type *BPP = CGF.Int8PtrPtrTy;
2953 
2954   CGBuilderTy &Builder = CGF.Builder;
2955   llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
2956   llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
2957   // Handle address alignment for type alignment > 32 bits
2958   uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8;
2959   if (TyAlign > 4) {
2960     assert((TyAlign & (TyAlign - 1)) == 0 &&
2961            "Alignment is not power of 2!");
2962     llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty);
2963     AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1));
2964     AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1)));
2965     Addr = Builder.CreateIntToPtr(AddrAsInt, BP);
2966   }
2967   llvm::Type *PTy =
2968     llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
2969   llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
2970 
2971   uint64_t Offset =
2972     llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
2973   llvm::Value *NextAddr =
2974     Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
2975                       "ap.next");
2976   Builder.CreateStore(NextAddr, VAListAddrAsBPP);
2977 
2978   return AddrTyped;
2979 }
2980 
2981 //===----------------------------------------------------------------------===//
2982 // NVPTX ABI Implementation
2983 //===----------------------------------------------------------------------===//
2984 
2985 namespace {
2986 
2987 class NVPTXABIInfo : public ABIInfo {
2988 public:
2989   NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
2990 
2991   ABIArgInfo classifyReturnType(QualType RetTy) const;
2992   ABIArgInfo classifyArgumentType(QualType Ty) const;
2993 
2994   virtual void computeInfo(CGFunctionInfo &FI) const;
2995   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2996                                  CodeGenFunction &CFG) const;
2997 };
2998 
2999 class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo {
3000 public:
3001   NVPTXTargetCodeGenInfo(CodeGenTypes &CGT)
3002     : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {}
3003 
3004   virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
3005                                    CodeGen::CodeGenModule &M) const;
3006 };
3007 
3008 ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
3009   if (RetTy->isVoidType())
3010     return ABIArgInfo::getIgnore();
3011   if (isAggregateTypeForABI(RetTy))
3012     return ABIArgInfo::getIndirect(0);
3013   return ABIArgInfo::getDirect();
3014 }
3015 
3016 ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
3017   if (isAggregateTypeForABI(Ty))
3018     return ABIArgInfo::getIndirect(0);
3019 
3020   return ABIArgInfo::getDirect();
3021 }
3022 
3023 void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
3024   FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
3025   for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
3026        it != ie; ++it)
3027     it->info = classifyArgumentType(it->type);
3028 
3029   // Always honor user-specified calling convention.
3030   if (FI.getCallingConvention() != llvm::CallingConv::C)
3031     return;
3032 
3033   // Calling convention as default by an ABI.
3034   // We're still using the PTX_Kernel/PTX_Device calling conventions here,
3035   // but we should switch to NVVM metadata later on.
3036   llvm::CallingConv::ID DefaultCC;
3037   const LangOptions &LangOpts = getContext().getLangOpts();
3038   if (LangOpts.OpenCL || LangOpts.CUDA) {
3039     // If we are in OpenCL or CUDA mode, then default to device functions
3040     DefaultCC = llvm::CallingConv::PTX_Device;
3041   } else {
3042     // If we are in standard C/C++ mode, use the triple to decide on the default
3043     StringRef Env =
3044       getContext().getTargetInfo().getTriple().getEnvironmentName();
3045     if (Env == "device")
3046       DefaultCC = llvm::CallingConv::PTX_Device;
3047     else
3048       DefaultCC = llvm::CallingConv::PTX_Kernel;
3049   }
3050   FI.setEffectiveCallingConvention(DefaultCC);
3051 
3052 }
3053 
3054 llvm::Value *NVPTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3055                                      CodeGenFunction &CFG) const {
3056   llvm_unreachable("NVPTX does not support varargs");
3057 }
3058 
3059 void NVPTXTargetCodeGenInfo::
3060 SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
3061                     CodeGen::CodeGenModule &M) const{
3062   const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
3063   if (!FD) return;
3064 
3065   llvm::Function *F = cast<llvm::Function>(GV);
3066 
3067   // Perform special handling in OpenCL mode
3068   if (M.getLangOpts().OpenCL) {
3069     // Use OpenCL function attributes to set proper calling conventions
3070     // By default, all functions are device functions
3071     if (FD->hasAttr<OpenCLKernelAttr>()) {
3072       // OpenCL __kernel functions get a kernel calling convention
3073       F->setCallingConv(llvm::CallingConv::PTX_Kernel);
3074       // And kernel functions are not subject to inlining
3075       F->addFnAttr(llvm::Attribute::NoInline);
3076     }
3077   }
3078 
3079   // Perform special handling in CUDA mode.
3080   if (M.getLangOpts().CUDA) {
3081     // CUDA __global__ functions get a kernel calling convention.  Since
3082     // __global__ functions cannot be called from the device, we do not
3083     // need to set the noinline attribute.
3084     if (FD->getAttr<CUDAGlobalAttr>())
3085       F->setCallingConv(llvm::CallingConv::PTX_Kernel);
3086   }
3087 }
3088 
3089 }
3090 
3091 //===----------------------------------------------------------------------===//
3092 // MBlaze ABI Implementation
3093 //===----------------------------------------------------------------------===//
3094 
3095 namespace {
3096 
3097 class MBlazeABIInfo : public ABIInfo {
3098 public:
3099   MBlazeABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
3100 
3101   bool isPromotableIntegerType(QualType Ty) const;
3102 
3103   ABIArgInfo classifyReturnType(QualType RetTy) const;
3104   ABIArgInfo classifyArgumentType(QualType RetTy) const;
3105 
3106   virtual void computeInfo(CGFunctionInfo &FI) const {
3107     FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
3108     for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
3109          it != ie; ++it)
3110       it->info = classifyArgumentType(it->type);
3111   }
3112 
3113   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3114                                  CodeGenFunction &CGF) const;
3115 };
3116 
3117 class MBlazeTargetCodeGenInfo : public TargetCodeGenInfo {
3118 public:
3119   MBlazeTargetCodeGenInfo(CodeGenTypes &CGT)
3120     : TargetCodeGenInfo(new MBlazeABIInfo(CGT)) {}
3121   void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
3122                            CodeGen::CodeGenModule &M) const;
3123 };
3124 
3125 }
3126 
3127 bool MBlazeABIInfo::isPromotableIntegerType(QualType Ty) const {
3128   // MBlaze ABI requires all 8 and 16 bit quantities to be extended.
3129   if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
3130     switch (BT->getKind()) {
3131     case BuiltinType::Bool:
3132     case BuiltinType::Char_S:
3133     case BuiltinType::Char_U:
3134     case BuiltinType::SChar:
3135     case BuiltinType::UChar:
3136     case BuiltinType::Short:
3137     case BuiltinType::UShort:
3138       return true;
3139     default:
3140       return false;
3141     }
3142   return false;
3143 }
3144 
3145 llvm::Value *MBlazeABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3146                                       CodeGenFunction &CGF) const {
3147   // FIXME: Implement
3148   return 0;
3149 }
3150 
3151 
3152 ABIArgInfo MBlazeABIInfo::classifyReturnType(QualType RetTy) const {
3153   if (RetTy->isVoidType())
3154     return ABIArgInfo::getIgnore();
3155   if (isAggregateTypeForABI(RetTy))
3156     return ABIArgInfo::getIndirect(0);
3157 
3158   return (isPromotableIntegerType(RetTy) ?
3159           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
3160 }
3161 
3162 ABIArgInfo MBlazeABIInfo::classifyArgumentType(QualType Ty) const {
3163   if (isAggregateTypeForABI(Ty))
3164     return ABIArgInfo::getIndirect(0);
3165 
3166   return (isPromotableIntegerType(Ty) ?
3167           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
3168 }
3169 
3170 void MBlazeTargetCodeGenInfo::SetTargetAttributes(const Decl *D,
3171                                                   llvm::GlobalValue *GV,
3172                                                   CodeGen::CodeGenModule &M)
3173                                                   const {
3174   const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
3175   if (!FD) return;
3176 
3177   llvm::CallingConv::ID CC = llvm::CallingConv::C;
3178   if (FD->hasAttr<MBlazeInterruptHandlerAttr>())
3179     CC = llvm::CallingConv::MBLAZE_INTR;
3180   else if (FD->hasAttr<MBlazeSaveVolatilesAttr>())
3181     CC = llvm::CallingConv::MBLAZE_SVOL;
3182 
3183   if (CC != llvm::CallingConv::C) {
3184       // Handle 'interrupt_handler' attribute:
3185       llvm::Function *F = cast<llvm::Function>(GV);
3186 
3187       // Step 1: Set ISR calling convention.
3188       F->setCallingConv(CC);
3189 
3190       // Step 2: Add attributes goodness.
3191       F->addFnAttr(llvm::Attribute::NoInline);
3192   }
3193 
3194   // Step 3: Emit _interrupt_handler alias.
3195   if (CC == llvm::CallingConv::MBLAZE_INTR)
3196     new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage,
3197                           "_interrupt_handler", GV, &M.getModule());
3198 }
3199 
3200 
3201 //===----------------------------------------------------------------------===//
3202 // MSP430 ABI Implementation
3203 //===----------------------------------------------------------------------===//
3204 
3205 namespace {
3206 
3207 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
3208 public:
3209   MSP430TargetCodeGenInfo(CodeGenTypes &CGT)
3210     : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
3211   void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
3212                            CodeGen::CodeGenModule &M) const;
3213 };
3214 
3215 }
3216 
3217 void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
3218                                                   llvm::GlobalValue *GV,
3219                                              CodeGen::CodeGenModule &M) const {
3220   if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
3221     if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) {
3222       // Handle 'interrupt' attribute:
3223       llvm::Function *F = cast<llvm::Function>(GV);
3224 
3225       // Step 1: Set ISR calling convention.
3226       F->setCallingConv(llvm::CallingConv::MSP430_INTR);
3227 
3228       // Step 2: Add attributes goodness.
3229       F->addFnAttr(llvm::Attribute::NoInline);
3230 
3231       // Step 3: Emit ISR vector alias.
3232       unsigned Num = attr->getNumber() + 0xffe0;
3233       new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage,
3234                             "vector_" + Twine::utohexstr(Num),
3235                             GV, &M.getModule());
3236     }
3237   }
3238 }
3239 
3240 //===----------------------------------------------------------------------===//
3241 // MIPS ABI Implementation.  This works for both little-endian and
3242 // big-endian variants.
3243 //===----------------------------------------------------------------------===//
3244 
3245 namespace {
3246 class MipsABIInfo : public ABIInfo {
3247   bool IsO32;
3248   unsigned MinABIStackAlignInBytes, StackAlignInBytes;
3249   void CoerceToIntArgs(uint64_t TySize,
3250                        SmallVector<llvm::Type*, 8> &ArgList) const;
3251   llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const;
3252   llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const;
3253   llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const;
3254 public:
3255   MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) :
3256     ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
3257     StackAlignInBytes(IsO32 ? 8 : 16) {}
3258 
3259   ABIArgInfo classifyReturnType(QualType RetTy) const;
3260   ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const;
3261   virtual void computeInfo(CGFunctionInfo &FI) const;
3262   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3263                                  CodeGenFunction &CGF) const;
3264 };
3265 
3266 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
3267   unsigned SizeOfUnwindException;
3268 public:
3269   MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32)
3270     : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)),
3271       SizeOfUnwindException(IsO32 ? 24 : 32) {}
3272 
3273   int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
3274     return 29;
3275   }
3276 
3277   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3278                                llvm::Value *Address) const;
3279 
3280   unsigned getSizeOfUnwindException() const {
3281     return SizeOfUnwindException;
3282   }
3283 };
3284 }
3285 
3286 void MipsABIInfo::CoerceToIntArgs(uint64_t TySize,
3287                                   SmallVector<llvm::Type*, 8> &ArgList) const {
3288   llvm::IntegerType *IntTy =
3289     llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
3290 
3291   // Add (TySize / MinABIStackAlignInBytes) args of IntTy.
3292   for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
3293     ArgList.push_back(IntTy);
3294 
3295   // If necessary, add one more integer type to ArgList.
3296   unsigned R = TySize % (MinABIStackAlignInBytes * 8);
3297 
3298   if (R)
3299     ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
3300 }
3301 
3302 // In N32/64, an aligned double precision floating point field is passed in
3303 // a register.
3304 llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const {
3305   SmallVector<llvm::Type*, 8> ArgList, IntArgList;
3306 
3307   if (IsO32) {
3308     CoerceToIntArgs(TySize, ArgList);
3309     return llvm::StructType::get(getVMContext(), ArgList);
3310   }
3311 
3312   if (Ty->isComplexType())
3313     return CGT.ConvertType(Ty);
3314 
3315   const RecordType *RT = Ty->getAs<RecordType>();
3316 
3317   // Unions/vectors are passed in integer registers.
3318   if (!RT || !RT->isStructureOrClassType()) {
3319     CoerceToIntArgs(TySize, ArgList);
3320     return llvm::StructType::get(getVMContext(), ArgList);
3321   }
3322 
3323   const RecordDecl *RD = RT->getDecl();
3324   const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
3325   assert(!(TySize % 8) && "Size of structure must be multiple of 8.");
3326 
3327   uint64_t LastOffset = 0;
3328   unsigned idx = 0;
3329   llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64);
3330 
3331   // Iterate over fields in the struct/class and check if there are any aligned
3332   // double fields.
3333   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
3334        i != e; ++i, ++idx) {
3335     const QualType Ty = i->getType();
3336     const BuiltinType *BT = Ty->getAs<BuiltinType>();
3337 
3338     if (!BT || BT->getKind() != BuiltinType::Double)
3339       continue;
3340 
3341     uint64_t Offset = Layout.getFieldOffset(idx);
3342     if (Offset % 64) // Ignore doubles that are not aligned.
3343       continue;
3344 
3345     // Add ((Offset - LastOffset) / 64) args of type i64.
3346     for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
3347       ArgList.push_back(I64);
3348 
3349     // Add double type.
3350     ArgList.push_back(llvm::Type::getDoubleTy(getVMContext()));
3351     LastOffset = Offset + 64;
3352   }
3353 
3354   CoerceToIntArgs(TySize - LastOffset, IntArgList);
3355   ArgList.append(IntArgList.begin(), IntArgList.end());
3356 
3357   return llvm::StructType::get(getVMContext(), ArgList);
3358 }
3359 
3360 llvm::Type *MipsABIInfo::getPaddingType(uint64_t Align, uint64_t Offset) const {
3361   assert((Offset % MinABIStackAlignInBytes) == 0);
3362 
3363   if ((Align - 1) & Offset)
3364     return llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
3365 
3366   return 0;
3367 }
3368 
3369 ABIArgInfo
3370 MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
3371   uint64_t OrigOffset = Offset;
3372   uint64_t TySize = getContext().getTypeSize(Ty);
3373   uint64_t Align = getContext().getTypeAlign(Ty) / 8;
3374 
3375   Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes),
3376                    (uint64_t)StackAlignInBytes);
3377   Offset = llvm::RoundUpToAlignment(Offset, Align);
3378   Offset += llvm::RoundUpToAlignment(TySize, Align * 8) / 8;
3379 
3380   if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) {
3381     // Ignore empty aggregates.
3382     if (TySize == 0)
3383       return ABIArgInfo::getIgnore();
3384 
3385     // Records with non trivial destructors/constructors should not be passed
3386     // by value.
3387     if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) {
3388       Offset = OrigOffset + MinABIStackAlignInBytes;
3389       return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
3390     }
3391 
3392     // If we have reached here, aggregates are passed directly by coercing to
3393     // another structure type. Padding is inserted if the offset of the
3394     // aggregate is unaligned.
3395     return ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0,
3396                                  getPaddingType(Align, OrigOffset));
3397   }
3398 
3399   // Treat an enum type as its underlying type.
3400   if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3401     Ty = EnumTy->getDecl()->getIntegerType();
3402 
3403   if (Ty->isPromotableIntegerType())
3404     return ABIArgInfo::getExtend();
3405 
3406   return ABIArgInfo::getDirect(0, 0, getPaddingType(Align, OrigOffset));
3407 }
3408 
3409 llvm::Type*
3410 MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
3411   const RecordType *RT = RetTy->getAs<RecordType>();
3412   SmallVector<llvm::Type*, 8> RTList;
3413 
3414   if (RT && RT->isStructureOrClassType()) {
3415     const RecordDecl *RD = RT->getDecl();
3416     const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
3417     unsigned FieldCnt = Layout.getFieldCount();
3418 
3419     // N32/64 returns struct/classes in floating point registers if the
3420     // following conditions are met:
3421     // 1. The size of the struct/class is no larger than 128-bit.
3422     // 2. The struct/class has one or two fields all of which are floating
3423     //    point types.
3424     // 3. The offset of the first field is zero (this follows what gcc does).
3425     //
3426     // Any other composite results are returned in integer registers.
3427     //
3428     if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) {
3429       RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end();
3430       for (; b != e; ++b) {
3431         const BuiltinType *BT = b->getType()->getAs<BuiltinType>();
3432 
3433         if (!BT || !BT->isFloatingPoint())
3434           break;
3435 
3436         RTList.push_back(CGT.ConvertType(b->getType()));
3437       }
3438 
3439       if (b == e)
3440         return llvm::StructType::get(getVMContext(), RTList,
3441                                      RD->hasAttr<PackedAttr>());
3442 
3443       RTList.clear();
3444     }
3445   }
3446 
3447   CoerceToIntArgs(Size, RTList);
3448   return llvm::StructType::get(getVMContext(), RTList);
3449 }
3450 
3451 ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
3452   uint64_t Size = getContext().getTypeSize(RetTy);
3453 
3454   if (RetTy->isVoidType() || Size == 0)
3455     return ABIArgInfo::getIgnore();
3456 
3457   if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) {
3458     if (Size <= 128) {
3459       if (RetTy->isAnyComplexType())
3460         return ABIArgInfo::getDirect();
3461 
3462       // O32 returns integer vectors in registers.
3463       if (IsO32 && RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())
3464         return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
3465 
3466       if (!IsO32 && !isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy))
3467         return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
3468     }
3469 
3470     return ABIArgInfo::getIndirect(0);
3471   }
3472 
3473   // Treat an enum type as its underlying type.
3474   if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
3475     RetTy = EnumTy->getDecl()->getIntegerType();
3476 
3477   return (RetTy->isPromotableIntegerType() ?
3478           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
3479 }
3480 
3481 void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const {
3482   ABIArgInfo &RetInfo = FI.getReturnInfo();
3483   RetInfo = classifyReturnType(FI.getReturnType());
3484 
3485   // Check if a pointer to an aggregate is passed as a hidden argument.
3486   uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0;
3487 
3488   for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
3489        it != ie; ++it)
3490     it->info = classifyArgumentType(it->type, Offset);
3491 }
3492 
3493 llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3494                                     CodeGenFunction &CGF) const {
3495   llvm::Type *BP = CGF.Int8PtrTy;
3496   llvm::Type *BPP = CGF.Int8PtrPtrTy;
3497 
3498   CGBuilderTy &Builder = CGF.Builder;
3499   llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
3500   llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
3501   int64_t TypeAlign = getContext().getTypeAlign(Ty) / 8;
3502   llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
3503   llvm::Value *AddrTyped;
3504   unsigned PtrWidth = getContext().getTargetInfo().getPointerWidth(0);
3505   llvm::IntegerType *IntTy = (PtrWidth == 32) ? CGF.Int32Ty : CGF.Int64Ty;
3506 
3507   if (TypeAlign > MinABIStackAlignInBytes) {
3508     llvm::Value *AddrAsInt = CGF.Builder.CreatePtrToInt(Addr, IntTy);
3509     llvm::Value *Inc = llvm::ConstantInt::get(IntTy, TypeAlign - 1);
3510     llvm::Value *Mask = llvm::ConstantInt::get(IntTy, -TypeAlign);
3511     llvm::Value *Add = CGF.Builder.CreateAdd(AddrAsInt, Inc);
3512     llvm::Value *And = CGF.Builder.CreateAnd(Add, Mask);
3513     AddrTyped = CGF.Builder.CreateIntToPtr(And, PTy);
3514   }
3515   else
3516     AddrTyped = Builder.CreateBitCast(Addr, PTy);
3517 
3518   llvm::Value *AlignedAddr = Builder.CreateBitCast(AddrTyped, BP);
3519   TypeAlign = std::max((unsigned)TypeAlign, MinABIStackAlignInBytes);
3520   uint64_t Offset =
3521     llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, TypeAlign);
3522   llvm::Value *NextAddr =
3523     Builder.CreateGEP(AlignedAddr, llvm::ConstantInt::get(IntTy, Offset),
3524                       "ap.next");
3525   Builder.CreateStore(NextAddr, VAListAddrAsBPP);
3526 
3527   return AddrTyped;
3528 }
3529 
3530 bool
3531 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3532                                                llvm::Value *Address) const {
3533   // This information comes from gcc's implementation, which seems to
3534   // as canonical as it gets.
3535 
3536   // Everything on MIPS is 4 bytes.  Double-precision FP registers
3537   // are aliased to pairs of single-precision FP registers.
3538   llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
3539 
3540   // 0-31 are the general purpose registers, $0 - $31.
3541   // 32-63 are the floating-point registers, $f0 - $f31.
3542   // 64 and 65 are the multiply/divide registers, $hi and $lo.
3543   // 66 is the (notional, I think) register for signal-handler return.
3544   AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65);
3545 
3546   // 67-74 are the floating-point status registers, $fcc0 - $fcc7.
3547   // They are one bit wide and ignored here.
3548 
3549   // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31.
3550   // (coprocessor 1 is the FP unit)
3551   // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31.
3552   // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31.
3553   // 176-181 are the DSP accumulator registers.
3554   AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181);
3555   return false;
3556 }
3557 
3558 //===----------------------------------------------------------------------===//
3559 // TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults.
3560 // Currently subclassed only to implement custom OpenCL C function attribute
3561 // handling.
3562 //===----------------------------------------------------------------------===//
3563 
3564 namespace {
3565 
3566 class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo {
3567 public:
3568   TCETargetCodeGenInfo(CodeGenTypes &CGT)
3569     : DefaultTargetCodeGenInfo(CGT) {}
3570 
3571   virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
3572                                    CodeGen::CodeGenModule &M) const;
3573 };
3574 
3575 void TCETargetCodeGenInfo::SetTargetAttributes(const Decl *D,
3576                                                llvm::GlobalValue *GV,
3577                                                CodeGen::CodeGenModule &M) const {
3578   const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
3579   if (!FD) return;
3580 
3581   llvm::Function *F = cast<llvm::Function>(GV);
3582 
3583   if (M.getLangOpts().OpenCL) {
3584     if (FD->hasAttr<OpenCLKernelAttr>()) {
3585       // OpenCL C Kernel functions are not subject to inlining
3586       F->addFnAttr(llvm::Attribute::NoInline);
3587 
3588       if (FD->hasAttr<ReqdWorkGroupSizeAttr>()) {
3589 
3590         // Convert the reqd_work_group_size() attributes to metadata.
3591         llvm::LLVMContext &Context = F->getContext();
3592         llvm::NamedMDNode *OpenCLMetadata =
3593             M.getModule().getOrInsertNamedMetadata("opencl.kernel_wg_size_info");
3594 
3595         SmallVector<llvm::Value*, 5> Operands;
3596         Operands.push_back(F);
3597 
3598         Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty,
3599                              llvm::APInt(32,
3600                              FD->getAttr<ReqdWorkGroupSizeAttr>()->getXDim())));
3601         Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty,
3602                              llvm::APInt(32,
3603                                FD->getAttr<ReqdWorkGroupSizeAttr>()->getYDim())));
3604         Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty,
3605                              llvm::APInt(32,
3606                                FD->getAttr<ReqdWorkGroupSizeAttr>()->getZDim())));
3607 
3608         // Add a boolean constant operand for "required" (true) or "hint" (false)
3609         // for implementing the work_group_size_hint attr later. Currently
3610         // always true as the hint is not yet implemented.
3611         Operands.push_back(llvm::ConstantInt::getTrue(Context));
3612         OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
3613       }
3614     }
3615   }
3616 }
3617 
3618 }
3619 
3620 //===----------------------------------------------------------------------===//
3621 // Hexagon ABI Implementation
3622 //===----------------------------------------------------------------------===//
3623 
3624 namespace {
3625 
3626 class HexagonABIInfo : public ABIInfo {
3627 
3628 
3629 public:
3630   HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
3631 
3632 private:
3633 
3634   ABIArgInfo classifyReturnType(QualType RetTy) const;
3635   ABIArgInfo classifyArgumentType(QualType RetTy) const;
3636 
3637   virtual void computeInfo(CGFunctionInfo &FI) const;
3638 
3639   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3640                                  CodeGenFunction &CGF) const;
3641 };
3642 
3643 class HexagonTargetCodeGenInfo : public TargetCodeGenInfo {
3644 public:
3645   HexagonTargetCodeGenInfo(CodeGenTypes &CGT)
3646     :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {}
3647 
3648   int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
3649     return 29;
3650   }
3651 };
3652 
3653 }
3654 
3655 void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const {
3656   FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
3657   for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
3658        it != ie; ++it)
3659     it->info = classifyArgumentType(it->type);
3660 }
3661 
3662 ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const {
3663   if (!isAggregateTypeForABI(Ty)) {
3664     // Treat an enum type as its underlying type.
3665     if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3666       Ty = EnumTy->getDecl()->getIntegerType();
3667 
3668     return (Ty->isPromotableIntegerType() ?
3669             ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
3670   }
3671 
3672   // Ignore empty records.
3673   if (isEmptyRecord(getContext(), Ty, true))
3674     return ABIArgInfo::getIgnore();
3675 
3676   // Structures with either a non-trivial destructor or a non-trivial
3677   // copy constructor are always indirect.
3678   if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
3679     return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
3680 
3681   uint64_t Size = getContext().getTypeSize(Ty);
3682   if (Size > 64)
3683     return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
3684     // Pass in the smallest viable integer type.
3685   else if (Size > 32)
3686       return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
3687   else if (Size > 16)
3688       return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
3689   else if (Size > 8)
3690       return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
3691   else
3692       return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
3693 }
3694 
3695 ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
3696   if (RetTy->isVoidType())
3697     return ABIArgInfo::getIgnore();
3698 
3699   // Large vector types should be returned via memory.
3700   if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64)
3701     return ABIArgInfo::getIndirect(0);
3702 
3703   if (!isAggregateTypeForABI(RetTy)) {
3704     // Treat an enum type as its underlying type.
3705     if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
3706       RetTy = EnumTy->getDecl()->getIntegerType();
3707 
3708     return (RetTy->isPromotableIntegerType() ?
3709             ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
3710   }
3711 
3712   // Structures with either a non-trivial destructor or a non-trivial
3713   // copy constructor are always indirect.
3714   if (isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy))
3715     return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
3716 
3717   if (isEmptyRecord(getContext(), RetTy, true))
3718     return ABIArgInfo::getIgnore();
3719 
3720   // Aggregates <= 8 bytes are returned in r0; other aggregates
3721   // are returned indirectly.
3722   uint64_t Size = getContext().getTypeSize(RetTy);
3723   if (Size <= 64) {
3724     // Return in the smallest viable integer type.
3725     if (Size <= 8)
3726       return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
3727     if (Size <= 16)
3728       return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
3729     if (Size <= 32)
3730       return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
3731     return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
3732   }
3733 
3734   return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
3735 }
3736 
3737 llvm::Value *HexagonABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3738                                        CodeGenFunction &CGF) const {
3739   // FIXME: Need to handle alignment
3740   llvm::Type *BPP = CGF.Int8PtrPtrTy;
3741 
3742   CGBuilderTy &Builder = CGF.Builder;
3743   llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
3744                                                        "ap");
3745   llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
3746   llvm::Type *PTy =
3747     llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
3748   llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
3749 
3750   uint64_t Offset =
3751     llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
3752   llvm::Value *NextAddr =
3753     Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
3754                       "ap.next");
3755   Builder.CreateStore(NextAddr, VAListAddrAsBPP);
3756 
3757   return AddrTyped;
3758 }
3759 
3760 
3761 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
3762   if (TheTargetCodeGenInfo)
3763     return *TheTargetCodeGenInfo;
3764 
3765   const llvm::Triple &Triple = getContext().getTargetInfo().getTriple();
3766   switch (Triple.getArch()) {
3767   default:
3768     return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types));
3769 
3770   case llvm::Triple::mips:
3771   case llvm::Triple::mipsel:
3772     return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, true));
3773 
3774   case llvm::Triple::mips64:
3775   case llvm::Triple::mips64el:
3776     return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, false));
3777 
3778   case llvm::Triple::arm:
3779   case llvm::Triple::thumb:
3780     {
3781       ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS;
3782 
3783       if (strcmp(getContext().getTargetInfo().getABI(), "apcs-gnu") == 0)
3784         Kind = ARMABIInfo::APCS;
3785       else if (CodeGenOpts.FloatABI == "hard")
3786         Kind = ARMABIInfo::AAPCS_VFP;
3787 
3788       return *(TheTargetCodeGenInfo = new ARMTargetCodeGenInfo(Types, Kind));
3789     }
3790 
3791   case llvm::Triple::ppc:
3792     return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types));
3793   case llvm::Triple::ppc64:
3794     return *(TheTargetCodeGenInfo = new PPC64TargetCodeGenInfo(Types));
3795 
3796   case llvm::Triple::nvptx:
3797   case llvm::Triple::nvptx64:
3798     return *(TheTargetCodeGenInfo = new NVPTXTargetCodeGenInfo(Types));
3799 
3800   case llvm::Triple::mblaze:
3801     return *(TheTargetCodeGenInfo = new MBlazeTargetCodeGenInfo(Types));
3802 
3803   case llvm::Triple::msp430:
3804     return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types));
3805 
3806   case llvm::Triple::tce:
3807     return *(TheTargetCodeGenInfo = new TCETargetCodeGenInfo(Types));
3808 
3809   case llvm::Triple::x86: {
3810     bool DisableMMX = strcmp(getContext().getTargetInfo().getABI(), "no-mmx") == 0;
3811 
3812     if (Triple.isOSDarwin())
3813       return *(TheTargetCodeGenInfo =
3814                new X86_32TargetCodeGenInfo(Types, true, true, DisableMMX, false,
3815                                            CodeGenOpts.NumRegisterParameters));
3816 
3817     switch (Triple.getOS()) {
3818     case llvm::Triple::Cygwin:
3819     case llvm::Triple::MinGW32:
3820     case llvm::Triple::AuroraUX:
3821     case llvm::Triple::DragonFly:
3822     case llvm::Triple::FreeBSD:
3823     case llvm::Triple::OpenBSD:
3824     case llvm::Triple::Bitrig:
3825       return *(TheTargetCodeGenInfo =
3826                new X86_32TargetCodeGenInfo(Types, false, true, DisableMMX,
3827                                            false,
3828                                            CodeGenOpts.NumRegisterParameters));
3829 
3830     case llvm::Triple::Win32:
3831       return *(TheTargetCodeGenInfo =
3832                new X86_32TargetCodeGenInfo(Types, false, true, DisableMMX, true,
3833                                            CodeGenOpts.NumRegisterParameters));
3834 
3835     default:
3836       return *(TheTargetCodeGenInfo =
3837                new X86_32TargetCodeGenInfo(Types, false, false, DisableMMX,
3838                                            false,
3839                                            CodeGenOpts.NumRegisterParameters));
3840     }
3841   }
3842 
3843   case llvm::Triple::x86_64: {
3844     bool HasAVX = strcmp(getContext().getTargetInfo().getABI(), "avx") == 0;
3845 
3846     switch (Triple.getOS()) {
3847     case llvm::Triple::Win32:
3848     case llvm::Triple::MinGW32:
3849     case llvm::Triple::Cygwin:
3850       return *(TheTargetCodeGenInfo = new WinX86_64TargetCodeGenInfo(Types));
3851     default:
3852       return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo(Types,
3853                                                                   HasAVX));
3854     }
3855   }
3856   case llvm::Triple::hexagon:
3857     return *(TheTargetCodeGenInfo = new HexagonTargetCodeGenInfo(Types));
3858   }
3859 }
3860