1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "TargetInfo.h"
16 #include "ABIInfo.h"
17 #include "CodeGenFunction.h"
18 #include "clang/AST/RecordLayout.h"
19 #include "clang/Frontend/CodeGenOptions.h"
20 #include "llvm/Type.h"
21 #include "llvm/Target/TargetData.h"
22 #include "llvm/ADT/Triple.h"
23 #include "llvm/Support/raw_ostream.h"
24 using namespace clang;
25 using namespace CodeGen;
26 
27 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
28                                llvm::Value *Array,
29                                llvm::Value *Value,
30                                unsigned FirstIndex,
31                                unsigned LastIndex) {
32   // Alternatively, we could emit this as a loop in the source.
33   for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
34     llvm::Value *Cell = Builder.CreateConstInBoundsGEP1_32(Array, I);
35     Builder.CreateStore(Value, Cell);
36   }
37 }
38 
39 static bool isAggregateTypeForABI(QualType T) {
40   return CodeGenFunction::hasAggregateLLVMType(T) ||
41          T->isMemberFunctionPointerType();
42 }
43 
44 ABIInfo::~ABIInfo() {}
45 
46 ASTContext &ABIInfo::getContext() const {
47   return CGT.getContext();
48 }
49 
50 llvm::LLVMContext &ABIInfo::getVMContext() const {
51   return CGT.getLLVMContext();
52 }
53 
54 const llvm::TargetData &ABIInfo::getTargetData() const {
55   return CGT.getTargetData();
56 }
57 
58 
59 void ABIArgInfo::dump() const {
60   llvm::raw_ostream &OS = llvm::errs();
61   OS << "(ABIArgInfo Kind=";
62   switch (TheKind) {
63   case Direct:
64     OS << "Direct Type=";
65     if (const llvm::Type *Ty = getCoerceToType())
66       Ty->print(OS);
67     else
68       OS << "null";
69     break;
70   case Extend:
71     OS << "Extend";
72     break;
73   case Ignore:
74     OS << "Ignore";
75     break;
76   case Indirect:
77     OS << "Indirect Align=" << getIndirectAlign()
78        << " Byal=" << getIndirectByVal()
79        << " Realign=" << getIndirectRealign();
80     break;
81   case Expand:
82     OS << "Expand";
83     break;
84   }
85   OS << ")\n";
86 }
87 
88 TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; }
89 
90 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
91 
92 /// isEmptyField - Return true iff a the field is "empty", that is it
93 /// is an unnamed bit-field or an (array of) empty record(s).
94 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
95                          bool AllowArrays) {
96   if (FD->isUnnamedBitfield())
97     return true;
98 
99   QualType FT = FD->getType();
100 
101     // Constant arrays of empty records count as empty, strip them off.
102   if (AllowArrays)
103     while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT))
104       FT = AT->getElementType();
105 
106   const RecordType *RT = FT->getAs<RecordType>();
107   if (!RT)
108     return false;
109 
110   // C++ record fields are never empty, at least in the Itanium ABI.
111   //
112   // FIXME: We should use a predicate for whether this behavior is true in the
113   // current ABI.
114   if (isa<CXXRecordDecl>(RT->getDecl()))
115     return false;
116 
117   return isEmptyRecord(Context, FT, AllowArrays);
118 }
119 
120 /// isEmptyRecord - Return true iff a structure contains only empty
121 /// fields. Note that a structure with a flexible array member is not
122 /// considered empty.
123 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
124   const RecordType *RT = T->getAs<RecordType>();
125   if (!RT)
126     return 0;
127   const RecordDecl *RD = RT->getDecl();
128   if (RD->hasFlexibleArrayMember())
129     return false;
130 
131   // If this is a C++ record, check the bases first.
132   if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
133     for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
134            e = CXXRD->bases_end(); i != e; ++i)
135       if (!isEmptyRecord(Context, i->getType(), true))
136         return false;
137 
138   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
139          i != e; ++i)
140     if (!isEmptyField(Context, *i, AllowArrays))
141       return false;
142   return true;
143 }
144 
145 /// hasNonTrivialDestructorOrCopyConstructor - Determine if a type has either
146 /// a non-trivial destructor or a non-trivial copy constructor.
147 static bool hasNonTrivialDestructorOrCopyConstructor(const RecordType *RT) {
148   const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
149   if (!RD)
150     return false;
151 
152   return !RD->hasTrivialDestructor() || !RD->hasTrivialCopyConstructor();
153 }
154 
155 /// isRecordWithNonTrivialDestructorOrCopyConstructor - Determine if a type is
156 /// a record type with either a non-trivial destructor or a non-trivial copy
157 /// constructor.
158 static bool isRecordWithNonTrivialDestructorOrCopyConstructor(QualType T) {
159   const RecordType *RT = T->getAs<RecordType>();
160   if (!RT)
161     return false;
162 
163   return hasNonTrivialDestructorOrCopyConstructor(RT);
164 }
165 
166 /// isSingleElementStruct - Determine if a structure is a "single
167 /// element struct", i.e. it has exactly one non-empty field or
168 /// exactly one field which is itself a single element
169 /// struct. Structures with flexible array members are never
170 /// considered single element structs.
171 ///
172 /// \return The field declaration for the single non-empty field, if
173 /// it exists.
174 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
175   const RecordType *RT = T->getAsStructureType();
176   if (!RT)
177     return 0;
178 
179   const RecordDecl *RD = RT->getDecl();
180   if (RD->hasFlexibleArrayMember())
181     return 0;
182 
183   const Type *Found = 0;
184 
185   // If this is a C++ record, check the bases first.
186   if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
187     for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
188            e = CXXRD->bases_end(); i != e; ++i) {
189       // Ignore empty records.
190       if (isEmptyRecord(Context, i->getType(), true))
191         continue;
192 
193       // If we already found an element then this isn't a single-element struct.
194       if (Found)
195         return 0;
196 
197       // If this is non-empty and not a single element struct, the composite
198       // cannot be a single element struct.
199       Found = isSingleElementStruct(i->getType(), Context);
200       if (!Found)
201         return 0;
202     }
203   }
204 
205   // Check for single element.
206   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
207          i != e; ++i) {
208     const FieldDecl *FD = *i;
209     QualType FT = FD->getType();
210 
211     // Ignore empty fields.
212     if (isEmptyField(Context, FD, true))
213       continue;
214 
215     // If we already found an element then this isn't a single-element
216     // struct.
217     if (Found)
218       return 0;
219 
220     // Treat single element arrays as the element.
221     while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
222       if (AT->getSize().getZExtValue() != 1)
223         break;
224       FT = AT->getElementType();
225     }
226 
227     if (!isAggregateTypeForABI(FT)) {
228       Found = FT.getTypePtr();
229     } else {
230       Found = isSingleElementStruct(FT, Context);
231       if (!Found)
232         return 0;
233     }
234   }
235 
236   return Found;
237 }
238 
239 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
240   if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
241       !Ty->isAnyComplexType() && !Ty->isEnumeralType() &&
242       !Ty->isBlockPointerType())
243     return false;
244 
245   uint64_t Size = Context.getTypeSize(Ty);
246   return Size == 32 || Size == 64;
247 }
248 
249 /// canExpandIndirectArgument - Test whether an argument type which is to be
250 /// passed indirectly (on the stack) would have the equivalent layout if it was
251 /// expanded into separate arguments. If so, we prefer to do the latter to avoid
252 /// inhibiting optimizations.
253 ///
254 // FIXME: This predicate is missing many cases, currently it just follows
255 // llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We
256 // should probably make this smarter, or better yet make the LLVM backend
257 // capable of handling it.
258 static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) {
259   // We can only expand structure types.
260   const RecordType *RT = Ty->getAs<RecordType>();
261   if (!RT)
262     return false;
263 
264   // We can only expand (C) structures.
265   //
266   // FIXME: This needs to be generalized to handle classes as well.
267   const RecordDecl *RD = RT->getDecl();
268   if (!RD->isStruct() || isa<CXXRecordDecl>(RD))
269     return false;
270 
271   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
272          i != e; ++i) {
273     const FieldDecl *FD = *i;
274 
275     if (!is32Or64BitBasicType(FD->getType(), Context))
276       return false;
277 
278     // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
279     // how to expand them yet, and the predicate for telling if a bitfield still
280     // counts as "basic" is more complicated than what we were doing previously.
281     if (FD->isBitField())
282       return false;
283   }
284 
285   return true;
286 }
287 
288 namespace {
289 /// DefaultABIInfo - The default implementation for ABI specific
290 /// details. This implementation provides information which results in
291 /// self-consistent and sensible LLVM IR generation, but does not
292 /// conform to any particular ABI.
293 class DefaultABIInfo : public ABIInfo {
294 public:
295   DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
296 
297   ABIArgInfo classifyReturnType(QualType RetTy) const;
298   ABIArgInfo classifyArgumentType(QualType RetTy) const;
299 
300   virtual void computeInfo(CGFunctionInfo &FI) const {
301     FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
302     for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
303          it != ie; ++it)
304       it->info = classifyArgumentType(it->type);
305   }
306 
307   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
308                                  CodeGenFunction &CGF) const;
309 };
310 
311 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
312 public:
313   DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
314     : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
315 };
316 
317 llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
318                                        CodeGenFunction &CGF) const {
319   return 0;
320 }
321 
322 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
323   if (isAggregateTypeForABI(Ty))
324     return ABIArgInfo::getIndirect(0);
325 
326   // Treat an enum type as its underlying type.
327   if (const EnumType *EnumTy = Ty->getAs<EnumType>())
328     Ty = EnumTy->getDecl()->getIntegerType();
329 
330   return (Ty->isPromotableIntegerType() ?
331           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
332 }
333 
334 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
335   if (RetTy->isVoidType())
336     return ABIArgInfo::getIgnore();
337 
338   if (isAggregateTypeForABI(RetTy))
339     return ABIArgInfo::getIndirect(0);
340 
341   // Treat an enum type as its underlying type.
342   if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
343     RetTy = EnumTy->getDecl()->getIntegerType();
344 
345   return (RetTy->isPromotableIntegerType() ?
346           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
347 }
348 
349 /// UseX86_MMXType - Return true if this is an MMX type that should use the special
350 /// x86_mmx type.
351 bool UseX86_MMXType(const llvm::Type *IRType) {
352   // If the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>, use the
353   // special x86_mmx type.
354   return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
355     cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
356     IRType->getScalarSizeInBits() != 64;
357 }
358 
359 static const llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
360                                                 llvm::StringRef Constraint,
361                                                 const llvm::Type* Ty) {
362   if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy())
363     return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
364   return Ty;
365 }
366 
367 //===----------------------------------------------------------------------===//
368 // X86-32 ABI Implementation
369 //===----------------------------------------------------------------------===//
370 
371 /// X86_32ABIInfo - The X86-32 ABI information.
372 class X86_32ABIInfo : public ABIInfo {
373   static const unsigned MinABIStackAlignInBytes = 4;
374 
375   bool IsDarwinVectorABI;
376   bool IsSmallStructInRegABI;
377 
378   static bool isRegisterSize(unsigned Size) {
379     return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
380   }
381 
382   static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context);
383 
384   /// getIndirectResult - Give a source type \arg Ty, return a suitable result
385   /// such that the argument will be passed in memory.
386   ABIArgInfo getIndirectResult(QualType Ty, bool ByVal = true) const;
387 
388   /// \brief Return the alignment to use for the given type on the stack.
389   unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
390 
391 public:
392 
393   ABIArgInfo classifyReturnType(QualType RetTy) const;
394   ABIArgInfo classifyArgumentType(QualType RetTy) const;
395 
396   virtual void computeInfo(CGFunctionInfo &FI) const {
397     FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
398     for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
399          it != ie; ++it)
400       it->info = classifyArgumentType(it->type);
401   }
402 
403   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
404                                  CodeGenFunction &CGF) const;
405 
406   X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p)
407     : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p) {}
408 };
409 
410 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
411 public:
412   X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p)
413     :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p)) {}
414 
415   void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
416                            CodeGen::CodeGenModule &CGM) const;
417 
418   int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
419     // Darwin uses different dwarf register numbers for EH.
420     if (CGM.isTargetDarwin()) return 5;
421 
422     return 4;
423   }
424 
425   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
426                                llvm::Value *Address) const;
427 
428   const llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
429                                         llvm::StringRef Constraint,
430                                         const llvm::Type* Ty) const {
431     return X86AdjustInlineAsmType(CGF, Constraint, Ty);
432   }
433 
434 };
435 
436 }
437 
438 /// shouldReturnTypeInRegister - Determine if the given type should be
439 /// passed in a register (for the Darwin ABI).
440 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
441                                                ASTContext &Context) {
442   uint64_t Size = Context.getTypeSize(Ty);
443 
444   // Type must be register sized.
445   if (!isRegisterSize(Size))
446     return false;
447 
448   if (Ty->isVectorType()) {
449     // 64- and 128- bit vectors inside structures are not returned in
450     // registers.
451     if (Size == 64 || Size == 128)
452       return false;
453 
454     return true;
455   }
456 
457   // If this is a builtin, pointer, enum, complex type, member pointer, or
458   // member function pointer it is ok.
459   if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
460       Ty->isAnyComplexType() || Ty->isEnumeralType() ||
461       Ty->isBlockPointerType() || Ty->isMemberPointerType())
462     return true;
463 
464   // Arrays are treated like records.
465   if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
466     return shouldReturnTypeInRegister(AT->getElementType(), Context);
467 
468   // Otherwise, it must be a record type.
469   const RecordType *RT = Ty->getAs<RecordType>();
470   if (!RT) return false;
471 
472   // FIXME: Traverse bases here too.
473 
474   // Structure types are passed in register if all fields would be
475   // passed in a register.
476   for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(),
477          e = RT->getDecl()->field_end(); i != e; ++i) {
478     const FieldDecl *FD = *i;
479 
480     // Empty fields are ignored.
481     if (isEmptyField(Context, FD, true))
482       continue;
483 
484     // Check fields recursively.
485     if (!shouldReturnTypeInRegister(FD->getType(), Context))
486       return false;
487   }
488 
489   return true;
490 }
491 
492 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy) const {
493   if (RetTy->isVoidType())
494     return ABIArgInfo::getIgnore();
495 
496   if (const VectorType *VT = RetTy->getAs<VectorType>()) {
497     // On Darwin, some vectors are returned in registers.
498     if (IsDarwinVectorABI) {
499       uint64_t Size = getContext().getTypeSize(RetTy);
500 
501       // 128-bit vectors are a special case; they are returned in
502       // registers and we need to make sure to pick a type the LLVM
503       // backend will like.
504       if (Size == 128)
505         return ABIArgInfo::getDirect(llvm::VectorType::get(
506                   llvm::Type::getInt64Ty(getVMContext()), 2));
507 
508       // Always return in register if it fits in a general purpose
509       // register, or if it is 64 bits and has a single element.
510       if ((Size == 8 || Size == 16 || Size == 32) ||
511           (Size == 64 && VT->getNumElements() == 1))
512         return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
513                                                             Size));
514 
515       return ABIArgInfo::getIndirect(0);
516     }
517 
518     return ABIArgInfo::getDirect();
519   }
520 
521   if (isAggregateTypeForABI(RetTy)) {
522     if (const RecordType *RT = RetTy->getAs<RecordType>()) {
523       // Structures with either a non-trivial destructor or a non-trivial
524       // copy constructor are always indirect.
525       if (hasNonTrivialDestructorOrCopyConstructor(RT))
526         return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
527 
528       // Structures with flexible arrays are always indirect.
529       if (RT->getDecl()->hasFlexibleArrayMember())
530         return ABIArgInfo::getIndirect(0);
531     }
532 
533     // If specified, structs and unions are always indirect.
534     if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType())
535       return ABIArgInfo::getIndirect(0);
536 
537     // Classify "single element" structs as their element type.
538     if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) {
539       if (const BuiltinType *BT = SeltTy->getAs<BuiltinType>()) {
540         if (BT->isIntegerType()) {
541           // We need to use the size of the structure, padding
542           // bit-fields can adjust that to be larger than the single
543           // element type.
544           uint64_t Size = getContext().getTypeSize(RetTy);
545           return ABIArgInfo::getDirect(
546             llvm::IntegerType::get(getVMContext(), (unsigned)Size));
547         }
548 
549         if (BT->getKind() == BuiltinType::Float) {
550           assert(getContext().getTypeSize(RetTy) ==
551                  getContext().getTypeSize(SeltTy) &&
552                  "Unexpect single element structure size!");
553           return ABIArgInfo::getDirect(llvm::Type::getFloatTy(getVMContext()));
554         }
555 
556         if (BT->getKind() == BuiltinType::Double) {
557           assert(getContext().getTypeSize(RetTy) ==
558                  getContext().getTypeSize(SeltTy) &&
559                  "Unexpect single element structure size!");
560           return ABIArgInfo::getDirect(llvm::Type::getDoubleTy(getVMContext()));
561         }
562       } else if (SeltTy->isPointerType()) {
563         // FIXME: It would be really nice if this could come out as the proper
564         // pointer type.
565         const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(getVMContext());
566         return ABIArgInfo::getDirect(PtrTy);
567       } else if (SeltTy->isVectorType()) {
568         // 64- and 128-bit vectors are never returned in a
569         // register when inside a structure.
570         uint64_t Size = getContext().getTypeSize(RetTy);
571         if (Size == 64 || Size == 128)
572           return ABIArgInfo::getIndirect(0);
573 
574         return classifyReturnType(QualType(SeltTy, 0));
575       }
576     }
577 
578     // Small structures which are register sized are generally returned
579     // in a register.
580     if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, getContext())) {
581       uint64_t Size = getContext().getTypeSize(RetTy);
582       return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
583     }
584 
585     return ABIArgInfo::getIndirect(0);
586   }
587 
588   // Treat an enum type as its underlying type.
589   if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
590     RetTy = EnumTy->getDecl()->getIntegerType();
591 
592   return (RetTy->isPromotableIntegerType() ?
593           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
594 }
595 
596 static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) {
597   const RecordType *RT = Ty->getAs<RecordType>();
598   if (!RT)
599     return 0;
600   const RecordDecl *RD = RT->getDecl();
601 
602   // If this is a C++ record, check the bases first.
603   if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
604     for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
605            e = CXXRD->bases_end(); i != e; ++i)
606       if (!isRecordWithSSEVectorType(Context, i->getType()))
607         return false;
608 
609   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
610        i != e; ++i) {
611     QualType FT = i->getType();
612 
613     if (FT->getAs<VectorType>() && Context.getTypeSize(Ty) == 128)
614       return true;
615 
616     if (isRecordWithSSEVectorType(Context, FT))
617       return true;
618   }
619 
620   return false;
621 }
622 
623 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
624                                                  unsigned Align) const {
625   // Otherwise, if the alignment is less than or equal to the minimum ABI
626   // alignment, just use the default; the backend will handle this.
627   if (Align <= MinABIStackAlignInBytes)
628     return 0; // Use default alignment.
629 
630   // On non-Darwin, the stack type alignment is always 4.
631   if (!IsDarwinVectorABI) {
632     // Set explicit alignment, since we may need to realign the top.
633     return MinABIStackAlignInBytes;
634   }
635 
636   // Otherwise, if the type contains an SSE vector type, the alignment is 16.
637   if (isRecordWithSSEVectorType(getContext(), Ty))
638     return 16;
639 
640   return MinABIStackAlignInBytes;
641 }
642 
643 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal) const {
644   if (!ByVal)
645     return ABIArgInfo::getIndirect(0, false);
646 
647   // Compute the byval alignment.
648   unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
649   unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
650   if (StackAlign == 0)
651     return ABIArgInfo::getIndirect(0);
652 
653   // If the stack alignment is less than the type alignment, realign the
654   // argument.
655   if (StackAlign < TypeAlign)
656     return ABIArgInfo::getIndirect(StackAlign, /*ByVal=*/true,
657                                    /*Realign=*/true);
658 
659   return ABIArgInfo::getIndirect(StackAlign);
660 }
661 
662 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const {
663   // FIXME: Set alignment on indirect arguments.
664   if (isAggregateTypeForABI(Ty)) {
665     // Structures with flexible arrays are always indirect.
666     if (const RecordType *RT = Ty->getAs<RecordType>()) {
667       // Structures with either a non-trivial destructor or a non-trivial
668       // copy constructor are always indirect.
669       if (hasNonTrivialDestructorOrCopyConstructor(RT))
670         return getIndirectResult(Ty, /*ByVal=*/false);
671 
672       if (RT->getDecl()->hasFlexibleArrayMember())
673         return getIndirectResult(Ty);
674     }
675 
676     // Ignore empty structs.
677     if (Ty->isStructureType() && getContext().getTypeSize(Ty) == 0)
678       return ABIArgInfo::getIgnore();
679 
680     // Expand small (<= 128-bit) record types when we know that the stack layout
681     // of those arguments will match the struct. This is important because the
682     // LLVM backend isn't smart enough to remove byval, which inhibits many
683     // optimizations.
684     if (getContext().getTypeSize(Ty) <= 4*32 &&
685         canExpandIndirectArgument(Ty, getContext()))
686       return ABIArgInfo::getExpand();
687 
688     return getIndirectResult(Ty);
689   }
690 
691   if (const VectorType *VT = Ty->getAs<VectorType>()) {
692     // On Darwin, some vectors are passed in memory, we handle this by passing
693     // it as an i8/i16/i32/i64.
694     if (IsDarwinVectorABI) {
695       uint64_t Size = getContext().getTypeSize(Ty);
696       if ((Size == 8 || Size == 16 || Size == 32) ||
697           (Size == 64 && VT->getNumElements() == 1))
698         return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
699                                                             Size));
700     }
701 
702     const llvm::Type *IRType = CGT.ConvertTypeRecursive(Ty);
703     if (UseX86_MMXType(IRType)) {
704       ABIArgInfo AAI = ABIArgInfo::getDirect(IRType);
705       AAI.setCoerceToType(llvm::Type::getX86_MMXTy(getVMContext()));
706       return AAI;
707     }
708 
709     return ABIArgInfo::getDirect();
710   }
711 
712 
713   if (const EnumType *EnumTy = Ty->getAs<EnumType>())
714     Ty = EnumTy->getDecl()->getIntegerType();
715 
716   return (Ty->isPromotableIntegerType() ?
717           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
718 }
719 
720 llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
721                                       CodeGenFunction &CGF) const {
722   const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
723   const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
724 
725   CGBuilderTy &Builder = CGF.Builder;
726   llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
727                                                        "ap");
728   llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
729   llvm::Type *PTy =
730     llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
731   llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
732 
733   uint64_t Offset =
734     llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
735   llvm::Value *NextAddr =
736     Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
737                       "ap.next");
738   Builder.CreateStore(NextAddr, VAListAddrAsBPP);
739 
740   return AddrTyped;
741 }
742 
743 void X86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
744                                                   llvm::GlobalValue *GV,
745                                             CodeGen::CodeGenModule &CGM) const {
746   if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
747     if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
748       // Get the LLVM function.
749       llvm::Function *Fn = cast<llvm::Function>(GV);
750 
751       // Now add the 'alignstack' attribute with a value of 16.
752       Fn->addFnAttr(llvm::Attribute::constructStackAlignmentFromInt(16));
753     }
754   }
755 }
756 
757 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
758                                                CodeGen::CodeGenFunction &CGF,
759                                                llvm::Value *Address) const {
760   CodeGen::CGBuilderTy &Builder = CGF.Builder;
761   llvm::LLVMContext &Context = CGF.getLLVMContext();
762 
763   const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
764   llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
765 
766   // 0-7 are the eight integer registers;  the order is different
767   //   on Darwin (for EH), but the range is the same.
768   // 8 is %eip.
769   AssignToArrayRange(Builder, Address, Four8, 0, 8);
770 
771   if (CGF.CGM.isTargetDarwin()) {
772     // 12-16 are st(0..4).  Not sure why we stop at 4.
773     // These have size 16, which is sizeof(long double) on
774     // platforms with 8-byte alignment for that type.
775     llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
776     AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
777 
778   } else {
779     // 9 is %eflags, which doesn't get a size on Darwin for some
780     // reason.
781     Builder.CreateStore(Four8, Builder.CreateConstInBoundsGEP1_32(Address, 9));
782 
783     // 11-16 are st(0..5).  Not sure why we stop at 5.
784     // These have size 12, which is sizeof(long double) on
785     // platforms with 4-byte alignment for that type.
786     llvm::Value *Twelve8 = llvm::ConstantInt::get(i8, 12);
787     AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
788   }
789 
790   return false;
791 }
792 
793 //===----------------------------------------------------------------------===//
794 // X86-64 ABI Implementation
795 //===----------------------------------------------------------------------===//
796 
797 
798 namespace {
799 /// X86_64ABIInfo - The X86_64 ABI information.
800 class X86_64ABIInfo : public ABIInfo {
801   enum Class {
802     Integer = 0,
803     SSE,
804     SSEUp,
805     X87,
806     X87Up,
807     ComplexX87,
808     NoClass,
809     Memory
810   };
811 
812   /// merge - Implement the X86_64 ABI merging algorithm.
813   ///
814   /// Merge an accumulating classification \arg Accum with a field
815   /// classification \arg Field.
816   ///
817   /// \param Accum - The accumulating classification. This should
818   /// always be either NoClass or the result of a previous merge
819   /// call. In addition, this should never be Memory (the caller
820   /// should just return Memory for the aggregate).
821   static Class merge(Class Accum, Class Field);
822 
823   /// classify - Determine the x86_64 register classes in which the
824   /// given type T should be passed.
825   ///
826   /// \param Lo - The classification for the parts of the type
827   /// residing in the low word of the containing object.
828   ///
829   /// \param Hi - The classification for the parts of the type
830   /// residing in the high word of the containing object.
831   ///
832   /// \param OffsetBase - The bit offset of this type in the
833   /// containing object.  Some parameters are classified different
834   /// depending on whether they straddle an eightbyte boundary.
835   ///
836   /// If a word is unused its result will be NoClass; if a type should
837   /// be passed in Memory then at least the classification of \arg Lo
838   /// will be Memory.
839   ///
840   /// The \arg Lo class will be NoClass iff the argument is ignored.
841   ///
842   /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
843   /// also be ComplexX87.
844   void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi) const;
845 
846   const llvm::Type *Get16ByteVectorType(QualType Ty) const;
847   const llvm::Type *GetSSETypeAtOffset(const llvm::Type *IRType,
848                                        unsigned IROffset, QualType SourceTy,
849                                        unsigned SourceOffset) const;
850   const llvm::Type *GetINTEGERTypeAtOffset(const llvm::Type *IRType,
851                                            unsigned IROffset, QualType SourceTy,
852                                            unsigned SourceOffset) const;
853 
854   /// getIndirectResult - Give a source type \arg Ty, return a suitable result
855   /// such that the argument will be returned in memory.
856   ABIArgInfo getIndirectReturnResult(QualType Ty) const;
857 
858   /// getIndirectResult - Give a source type \arg Ty, return a suitable result
859   /// such that the argument will be passed in memory.
860   ABIArgInfo getIndirectResult(QualType Ty) const;
861 
862   ABIArgInfo classifyReturnType(QualType RetTy) const;
863 
864   ABIArgInfo classifyArgumentType(QualType Ty,
865                                   unsigned &neededInt,
866                                   unsigned &neededSSE) const;
867 
868   /// The 0.98 ABI revision clarified a lot of ambiguities,
869   /// unfortunately in ways that were not always consistent with
870   /// certain previous compilers.  In particular, platforms which
871   /// required strict binary compatibility with older versions of GCC
872   /// may need to exempt themselves.
873   bool honorsRevision0_98() const {
874     return !getContext().Target.getTriple().isOSDarwin();
875   }
876 
877 public:
878   X86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
879 
880   virtual void computeInfo(CGFunctionInfo &FI) const;
881 
882   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
883                                  CodeGenFunction &CGF) const;
884 };
885 
886 /// WinX86_64ABIInfo - The Windows X86_64 ABI information.
887 class WinX86_64ABIInfo : public ABIInfo {
888 
889   ABIArgInfo classify(QualType Ty) const;
890 
891 public:
892   WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
893 
894   virtual void computeInfo(CGFunctionInfo &FI) const;
895 
896   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
897                                  CodeGenFunction &CGF) const;
898 };
899 
900 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
901 public:
902   X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
903     : TargetCodeGenInfo(new X86_64ABIInfo(CGT)) {}
904 
905   int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
906     return 7;
907   }
908 
909   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
910                                llvm::Value *Address) const {
911     CodeGen::CGBuilderTy &Builder = CGF.Builder;
912     llvm::LLVMContext &Context = CGF.getLLVMContext();
913 
914     const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
915     llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
916 
917     // 0-15 are the 16 integer registers.
918     // 16 is %rip.
919     AssignToArrayRange(Builder, Address, Eight8, 0, 16);
920 
921     return false;
922   }
923 
924   const llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
925                                         llvm::StringRef Constraint,
926                                         const llvm::Type* Ty) const {
927     return X86AdjustInlineAsmType(CGF, Constraint, Ty);
928   }
929 
930 };
931 
932 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
933 public:
934   WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
935     : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {}
936 
937   int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
938     return 7;
939   }
940 
941   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
942                                llvm::Value *Address) const {
943     CodeGen::CGBuilderTy &Builder = CGF.Builder;
944     llvm::LLVMContext &Context = CGF.getLLVMContext();
945 
946     const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
947     llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
948 
949     // 0-15 are the 16 integer registers.
950     // 16 is %rip.
951     AssignToArrayRange(Builder, Address, Eight8, 0, 16);
952 
953     return false;
954   }
955 };
956 
957 }
958 
959 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
960   // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
961   // classified recursively so that always two fields are
962   // considered. The resulting class is calculated according to
963   // the classes of the fields in the eightbyte:
964   //
965   // (a) If both classes are equal, this is the resulting class.
966   //
967   // (b) If one of the classes is NO_CLASS, the resulting class is
968   // the other class.
969   //
970   // (c) If one of the classes is MEMORY, the result is the MEMORY
971   // class.
972   //
973   // (d) If one of the classes is INTEGER, the result is the
974   // INTEGER.
975   //
976   // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
977   // MEMORY is used as class.
978   //
979   // (f) Otherwise class SSE is used.
980 
981   // Accum should never be memory (we should have returned) or
982   // ComplexX87 (because this cannot be passed in a structure).
983   assert((Accum != Memory && Accum != ComplexX87) &&
984          "Invalid accumulated classification during merge.");
985   if (Accum == Field || Field == NoClass)
986     return Accum;
987   if (Field == Memory)
988     return Memory;
989   if (Accum == NoClass)
990     return Field;
991   if (Accum == Integer || Field == Integer)
992     return Integer;
993   if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
994       Accum == X87 || Accum == X87Up)
995     return Memory;
996   return SSE;
997 }
998 
999 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
1000                              Class &Lo, Class &Hi) const {
1001   // FIXME: This code can be simplified by introducing a simple value class for
1002   // Class pairs with appropriate constructor methods for the various
1003   // situations.
1004 
1005   // FIXME: Some of the split computations are wrong; unaligned vectors
1006   // shouldn't be passed in registers for example, so there is no chance they
1007   // can straddle an eightbyte. Verify & simplify.
1008 
1009   Lo = Hi = NoClass;
1010 
1011   Class &Current = OffsetBase < 64 ? Lo : Hi;
1012   Current = Memory;
1013 
1014   if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
1015     BuiltinType::Kind k = BT->getKind();
1016 
1017     if (k == BuiltinType::Void) {
1018       Current = NoClass;
1019     } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
1020       Lo = Integer;
1021       Hi = Integer;
1022     } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
1023       Current = Integer;
1024     } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
1025       Current = SSE;
1026     } else if (k == BuiltinType::LongDouble) {
1027       Lo = X87;
1028       Hi = X87Up;
1029     }
1030     // FIXME: _Decimal32 and _Decimal64 are SSE.
1031     // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
1032     return;
1033   }
1034 
1035   if (const EnumType *ET = Ty->getAs<EnumType>()) {
1036     // Classify the underlying integer type.
1037     classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi);
1038     return;
1039   }
1040 
1041   if (Ty->hasPointerRepresentation()) {
1042     Current = Integer;
1043     return;
1044   }
1045 
1046   if (Ty->isMemberPointerType()) {
1047     if (Ty->isMemberFunctionPointerType())
1048       Lo = Hi = Integer;
1049     else
1050       Current = Integer;
1051     return;
1052   }
1053 
1054   if (const VectorType *VT = Ty->getAs<VectorType>()) {
1055     uint64_t Size = getContext().getTypeSize(VT);
1056     if (Size == 32) {
1057       // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x
1058       // float> as integer.
1059       Current = Integer;
1060 
1061       // If this type crosses an eightbyte boundary, it should be
1062       // split.
1063       uint64_t EB_Real = (OffsetBase) / 64;
1064       uint64_t EB_Imag = (OffsetBase + Size - 1) / 64;
1065       if (EB_Real != EB_Imag)
1066         Hi = Lo;
1067     } else if (Size == 64) {
1068       // gcc passes <1 x double> in memory. :(
1069       if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double))
1070         return;
1071 
1072       // gcc passes <1 x long long> as INTEGER.
1073       if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong) ||
1074           VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULongLong) ||
1075           VT->getElementType()->isSpecificBuiltinType(BuiltinType::Long) ||
1076           VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULong))
1077         Current = Integer;
1078       else
1079         Current = SSE;
1080 
1081       // If this type crosses an eightbyte boundary, it should be
1082       // split.
1083       if (OffsetBase && OffsetBase != 64)
1084         Hi = Lo;
1085     } else if (Size == 128) {
1086       Lo = SSE;
1087       Hi = SSEUp;
1088     }
1089     return;
1090   }
1091 
1092   if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
1093     QualType ET = getContext().getCanonicalType(CT->getElementType());
1094 
1095     uint64_t Size = getContext().getTypeSize(Ty);
1096     if (ET->isIntegralOrEnumerationType()) {
1097       if (Size <= 64)
1098         Current = Integer;
1099       else if (Size <= 128)
1100         Lo = Hi = Integer;
1101     } else if (ET == getContext().FloatTy)
1102       Current = SSE;
1103     else if (ET == getContext().DoubleTy)
1104       Lo = Hi = SSE;
1105     else if (ET == getContext().LongDoubleTy)
1106       Current = ComplexX87;
1107 
1108     // If this complex type crosses an eightbyte boundary then it
1109     // should be split.
1110     uint64_t EB_Real = (OffsetBase) / 64;
1111     uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
1112     if (Hi == NoClass && EB_Real != EB_Imag)
1113       Hi = Lo;
1114 
1115     return;
1116   }
1117 
1118   if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
1119     // Arrays are treated like structures.
1120 
1121     uint64_t Size = getContext().getTypeSize(Ty);
1122 
1123     // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
1124     // than two eightbytes, ..., it has class MEMORY.
1125     if (Size > 128)
1126       return;
1127 
1128     // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
1129     // fields, it has class MEMORY.
1130     //
1131     // Only need to check alignment of array base.
1132     if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
1133       return;
1134 
1135     // Otherwise implement simplified merge. We could be smarter about
1136     // this, but it isn't worth it and would be harder to verify.
1137     Current = NoClass;
1138     uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
1139     uint64_t ArraySize = AT->getSize().getZExtValue();
1140     for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
1141       Class FieldLo, FieldHi;
1142       classify(AT->getElementType(), Offset, FieldLo, FieldHi);
1143       Lo = merge(Lo, FieldLo);
1144       Hi = merge(Hi, FieldHi);
1145       if (Lo == Memory || Hi == Memory)
1146         break;
1147     }
1148 
1149     // Do post merger cleanup (see below). Only case we worry about is Memory.
1150     if (Hi == Memory)
1151       Lo = Memory;
1152     assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
1153     return;
1154   }
1155 
1156   if (const RecordType *RT = Ty->getAs<RecordType>()) {
1157     uint64_t Size = getContext().getTypeSize(Ty);
1158 
1159     // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
1160     // than two eightbytes, ..., it has class MEMORY.
1161     if (Size > 128)
1162       return;
1163 
1164     // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
1165     // copy constructor or a non-trivial destructor, it is passed by invisible
1166     // reference.
1167     if (hasNonTrivialDestructorOrCopyConstructor(RT))
1168       return;
1169 
1170     const RecordDecl *RD = RT->getDecl();
1171 
1172     // Assume variable sized types are passed in memory.
1173     if (RD->hasFlexibleArrayMember())
1174       return;
1175 
1176     const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
1177 
1178     // Reset Lo class, this will be recomputed.
1179     Current = NoClass;
1180 
1181     // If this is a C++ record, classify the bases first.
1182     if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1183       for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
1184              e = CXXRD->bases_end(); i != e; ++i) {
1185         assert(!i->isVirtual() && !i->getType()->isDependentType() &&
1186                "Unexpected base class!");
1187         const CXXRecordDecl *Base =
1188           cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
1189 
1190         // Classify this field.
1191         //
1192         // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
1193         // single eightbyte, each is classified separately. Each eightbyte gets
1194         // initialized to class NO_CLASS.
1195         Class FieldLo, FieldHi;
1196         uint64_t Offset = OffsetBase + Layout.getBaseClassOffsetInBits(Base);
1197         classify(i->getType(), Offset, FieldLo, FieldHi);
1198         Lo = merge(Lo, FieldLo);
1199         Hi = merge(Hi, FieldHi);
1200         if (Lo == Memory || Hi == Memory)
1201           break;
1202       }
1203     }
1204 
1205     // Classify the fields one at a time, merging the results.
1206     unsigned idx = 0;
1207     for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
1208            i != e; ++i, ++idx) {
1209       uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
1210       bool BitField = i->isBitField();
1211 
1212       // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
1213       // fields, it has class MEMORY.
1214       //
1215       // Note, skip this test for bit-fields, see below.
1216       if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
1217         Lo = Memory;
1218         return;
1219       }
1220 
1221       // Classify this field.
1222       //
1223       // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
1224       // exceeds a single eightbyte, each is classified
1225       // separately. Each eightbyte gets initialized to class
1226       // NO_CLASS.
1227       Class FieldLo, FieldHi;
1228 
1229       // Bit-fields require special handling, they do not force the
1230       // structure to be passed in memory even if unaligned, and
1231       // therefore they can straddle an eightbyte.
1232       if (BitField) {
1233         // Ignore padding bit-fields.
1234         if (i->isUnnamedBitfield())
1235           continue;
1236 
1237         uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
1238         uint64_t Size =
1239           i->getBitWidth()->EvaluateAsInt(getContext()).getZExtValue();
1240 
1241         uint64_t EB_Lo = Offset / 64;
1242         uint64_t EB_Hi = (Offset + Size - 1) / 64;
1243         FieldLo = FieldHi = NoClass;
1244         if (EB_Lo) {
1245           assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
1246           FieldLo = NoClass;
1247           FieldHi = Integer;
1248         } else {
1249           FieldLo = Integer;
1250           FieldHi = EB_Hi ? Integer : NoClass;
1251         }
1252       } else
1253         classify(i->getType(), Offset, FieldLo, FieldHi);
1254       Lo = merge(Lo, FieldLo);
1255       Hi = merge(Hi, FieldHi);
1256       if (Lo == Memory || Hi == Memory)
1257         break;
1258     }
1259 
1260     // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
1261     //
1262     // (a) If one of the classes is MEMORY, the whole argument is
1263     // passed in memory.
1264     //
1265     // (b) If X87UP is not preceded by X87, the whole argument is
1266     // passed in memory.
1267     //
1268     // (c) If the size of the aggregate exceeds two eightbytes and the first
1269     // eight-byte isn’t SSE or any other eightbyte isn’t SSEUP, the whole
1270     // argument is passed in memory.
1271     //
1272     // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
1273     //
1274     // Some of these are enforced by the merging logic.  Others can arise
1275     // only with unions; for example:
1276     //   union { _Complex double; unsigned; }
1277     //
1278     // Note that clauses (b) and (c) were added in 0.98.
1279     if (Hi == Memory)
1280       Lo = Memory;
1281     if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
1282       Lo = Memory;
1283     if (Hi == SSEUp && Lo != SSE)
1284       Hi = SSE;
1285   }
1286 }
1287 
1288 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
1289   // If this is a scalar LLVM value then assume LLVM will pass it in the right
1290   // place naturally.
1291   if (!isAggregateTypeForABI(Ty)) {
1292     // Treat an enum type as its underlying type.
1293     if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1294       Ty = EnumTy->getDecl()->getIntegerType();
1295 
1296     return (Ty->isPromotableIntegerType() ?
1297             ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
1298   }
1299 
1300   return ABIArgInfo::getIndirect(0);
1301 }
1302 
1303 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty) const {
1304   // If this is a scalar LLVM value then assume LLVM will pass it in the right
1305   // place naturally.
1306   if (!isAggregateTypeForABI(Ty)) {
1307     // Treat an enum type as its underlying type.
1308     if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1309       Ty = EnumTy->getDecl()->getIntegerType();
1310 
1311     return (Ty->isPromotableIntegerType() ?
1312             ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
1313   }
1314 
1315   if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
1316     return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
1317 
1318   // Compute the byval alignment. We trust the back-end to honor the
1319   // minimum ABI alignment for byval, to make cleaner IR.
1320   const unsigned MinABIAlign = 8;
1321   unsigned Align = getContext().getTypeAlign(Ty) / 8;
1322   if (Align > MinABIAlign)
1323     return ABIArgInfo::getIndirect(Align);
1324   return ABIArgInfo::getIndirect(0);
1325 }
1326 
1327 /// Get16ByteVectorType - The ABI specifies that a value should be passed in an
1328 /// full vector XMM register.  Pick an LLVM IR type that will be passed as a
1329 /// vector register.
1330 const llvm::Type *X86_64ABIInfo::Get16ByteVectorType(QualType Ty) const {
1331   const llvm::Type *IRType = CGT.ConvertTypeRecursive(Ty);
1332 
1333   // Wrapper structs that just contain vectors are passed just like vectors,
1334   // strip them off if present.
1335   const llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType);
1336   while (STy && STy->getNumElements() == 1) {
1337     IRType = STy->getElementType(0);
1338     STy = dyn_cast<llvm::StructType>(IRType);
1339   }
1340 
1341   // If the preferred type is a 16-byte vector, prefer to pass it.
1342   if (const llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){
1343     const llvm::Type *EltTy = VT->getElementType();
1344     if (VT->getBitWidth() == 128 &&
1345         (EltTy->isFloatTy() || EltTy->isDoubleTy() ||
1346          EltTy->isIntegerTy(8) || EltTy->isIntegerTy(16) ||
1347          EltTy->isIntegerTy(32) || EltTy->isIntegerTy(64) ||
1348          EltTy->isIntegerTy(128)))
1349       return VT;
1350   }
1351 
1352   return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), 2);
1353 }
1354 
1355 /// BitsContainNoUserData - Return true if the specified [start,end) bit range
1356 /// is known to either be off the end of the specified type or being in
1357 /// alignment padding.  The user type specified is known to be at most 128 bits
1358 /// in size, and have passed through X86_64ABIInfo::classify with a successful
1359 /// classification that put one of the two halves in the INTEGER class.
1360 ///
1361 /// It is conservatively correct to return false.
1362 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
1363                                   unsigned EndBit, ASTContext &Context) {
1364   // If the bytes being queried are off the end of the type, there is no user
1365   // data hiding here.  This handles analysis of builtins, vectors and other
1366   // types that don't contain interesting padding.
1367   unsigned TySize = (unsigned)Context.getTypeSize(Ty);
1368   if (TySize <= StartBit)
1369     return true;
1370 
1371   if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
1372     unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType());
1373     unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
1374 
1375     // Check each element to see if the element overlaps with the queried range.
1376     for (unsigned i = 0; i != NumElts; ++i) {
1377       // If the element is after the span we care about, then we're done..
1378       unsigned EltOffset = i*EltSize;
1379       if (EltOffset >= EndBit) break;
1380 
1381       unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
1382       if (!BitsContainNoUserData(AT->getElementType(), EltStart,
1383                                  EndBit-EltOffset, Context))
1384         return false;
1385     }
1386     // If it overlaps no elements, then it is safe to process as padding.
1387     return true;
1388   }
1389 
1390   if (const RecordType *RT = Ty->getAs<RecordType>()) {
1391     const RecordDecl *RD = RT->getDecl();
1392     const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
1393 
1394     // If this is a C++ record, check the bases first.
1395     if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1396       for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
1397            e = CXXRD->bases_end(); i != e; ++i) {
1398         assert(!i->isVirtual() && !i->getType()->isDependentType() &&
1399                "Unexpected base class!");
1400         const CXXRecordDecl *Base =
1401           cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
1402 
1403         // If the base is after the span we care about, ignore it.
1404         unsigned BaseOffset = (unsigned)Layout.getBaseClassOffsetInBits(Base);
1405         if (BaseOffset >= EndBit) continue;
1406 
1407         unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
1408         if (!BitsContainNoUserData(i->getType(), BaseStart,
1409                                    EndBit-BaseOffset, Context))
1410           return false;
1411       }
1412     }
1413 
1414     // Verify that no field has data that overlaps the region of interest.  Yes
1415     // this could be sped up a lot by being smarter about queried fields,
1416     // however we're only looking at structs up to 16 bytes, so we don't care
1417     // much.
1418     unsigned idx = 0;
1419     for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
1420          i != e; ++i, ++idx) {
1421       unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx);
1422 
1423       // If we found a field after the region we care about, then we're done.
1424       if (FieldOffset >= EndBit) break;
1425 
1426       unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
1427       if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset,
1428                                  Context))
1429         return false;
1430     }
1431 
1432     // If nothing in this record overlapped the area of interest, then we're
1433     // clean.
1434     return true;
1435   }
1436 
1437   return false;
1438 }
1439 
1440 /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a
1441 /// float member at the specified offset.  For example, {int,{float}} has a
1442 /// float at offset 4.  It is conservatively correct for this routine to return
1443 /// false.
1444 static bool ContainsFloatAtOffset(const llvm::Type *IRType, unsigned IROffset,
1445                                   const llvm::TargetData &TD) {
1446   // Base case if we find a float.
1447   if (IROffset == 0 && IRType->isFloatTy())
1448     return true;
1449 
1450   // If this is a struct, recurse into the field at the specified offset.
1451   if (const llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
1452     const llvm::StructLayout *SL = TD.getStructLayout(STy);
1453     unsigned Elt = SL->getElementContainingOffset(IROffset);
1454     IROffset -= SL->getElementOffset(Elt);
1455     return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD);
1456   }
1457 
1458   // If this is an array, recurse into the field at the specified offset.
1459   if (const llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
1460     const llvm::Type *EltTy = ATy->getElementType();
1461     unsigned EltSize = TD.getTypeAllocSize(EltTy);
1462     IROffset -= IROffset/EltSize*EltSize;
1463     return ContainsFloatAtOffset(EltTy, IROffset, TD);
1464   }
1465 
1466   return false;
1467 }
1468 
1469 
1470 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the
1471 /// low 8 bytes of an XMM register, corresponding to the SSE class.
1472 const llvm::Type *X86_64ABIInfo::
1473 GetSSETypeAtOffset(const llvm::Type *IRType, unsigned IROffset,
1474                    QualType SourceTy, unsigned SourceOffset) const {
1475   // The only three choices we have are either double, <2 x float>, or float. We
1476   // pass as float if the last 4 bytes is just padding.  This happens for
1477   // structs that contain 3 floats.
1478   if (BitsContainNoUserData(SourceTy, SourceOffset*8+32,
1479                             SourceOffset*8+64, getContext()))
1480     return llvm::Type::getFloatTy(getVMContext());
1481 
1482   // We want to pass as <2 x float> if the LLVM IR type contains a float at
1483   // offset+0 and offset+4.  Walk the LLVM IR type to find out if this is the
1484   // case.
1485   if (ContainsFloatAtOffset(IRType, IROffset, getTargetData()) &&
1486       ContainsFloatAtOffset(IRType, IROffset+4, getTargetData()))
1487     return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2);
1488 
1489   return llvm::Type::getDoubleTy(getVMContext());
1490 }
1491 
1492 
1493 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in
1494 /// an 8-byte GPR.  This means that we either have a scalar or we are talking
1495 /// about the high or low part of an up-to-16-byte struct.  This routine picks
1496 /// the best LLVM IR type to represent this, which may be i64 or may be anything
1497 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*,
1498 /// etc).
1499 ///
1500 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for
1501 /// the source type.  IROffset is an offset in bytes into the LLVM IR type that
1502 /// the 8-byte value references.  PrefType may be null.
1503 ///
1504 /// SourceTy is the source level type for the entire argument.  SourceOffset is
1505 /// an offset into this that we're processing (which is always either 0 or 8).
1506 ///
1507 const llvm::Type *X86_64ABIInfo::
1508 GetINTEGERTypeAtOffset(const llvm::Type *IRType, unsigned IROffset,
1509                        QualType SourceTy, unsigned SourceOffset) const {
1510   // If we're dealing with an un-offset LLVM IR type, then it means that we're
1511   // returning an 8-byte unit starting with it.  See if we can safely use it.
1512   if (IROffset == 0) {
1513     // Pointers and int64's always fill the 8-byte unit.
1514     if (isa<llvm::PointerType>(IRType) || IRType->isIntegerTy(64))
1515       return IRType;
1516 
1517     // If we have a 1/2/4-byte integer, we can use it only if the rest of the
1518     // goodness in the source type is just tail padding.  This is allowed to
1519     // kick in for struct {double,int} on the int, but not on
1520     // struct{double,int,int} because we wouldn't return the second int.  We
1521     // have to do this analysis on the source type because we can't depend on
1522     // unions being lowered a specific way etc.
1523     if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
1524         IRType->isIntegerTy(32)) {
1525       unsigned BitWidth = cast<llvm::IntegerType>(IRType)->getBitWidth();
1526 
1527       if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,
1528                                 SourceOffset*8+64, getContext()))
1529         return IRType;
1530     }
1531   }
1532 
1533   if (const llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
1534     // If this is a struct, recurse into the field at the specified offset.
1535     const llvm::StructLayout *SL = getTargetData().getStructLayout(STy);
1536     if (IROffset < SL->getSizeInBytes()) {
1537       unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
1538       IROffset -= SL->getElementOffset(FieldIdx);
1539 
1540       return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
1541                                     SourceTy, SourceOffset);
1542     }
1543   }
1544 
1545   if (const llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
1546     const llvm::Type *EltTy = ATy->getElementType();
1547     unsigned EltSize = getTargetData().getTypeAllocSize(EltTy);
1548     unsigned EltOffset = IROffset/EltSize*EltSize;
1549     return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
1550                                   SourceOffset);
1551   }
1552 
1553   // Okay, we don't have any better idea of what to pass, so we pass this in an
1554   // integer register that isn't too big to fit the rest of the struct.
1555   unsigned TySizeInBytes =
1556     (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
1557 
1558   assert(TySizeInBytes != SourceOffset && "Empty field?");
1559 
1560   // It is always safe to classify this as an integer type up to i64 that
1561   // isn't larger than the structure.
1562   return llvm::IntegerType::get(getVMContext(),
1563                                 std::min(TySizeInBytes-SourceOffset, 8U)*8);
1564 }
1565 
1566 
1567 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally
1568 /// be used as elements of a two register pair to pass or return, return a
1569 /// first class aggregate to represent them.  For example, if the low part of
1570 /// a by-value argument should be passed as i32* and the high part as float,
1571 /// return {i32*, float}.
1572 static const llvm::Type *
1573 GetX86_64ByValArgumentPair(const llvm::Type *Lo, const llvm::Type *Hi,
1574                            const llvm::TargetData &TD) {
1575   // In order to correctly satisfy the ABI, we need to the high part to start
1576   // at offset 8.  If the high and low parts we inferred are both 4-byte types
1577   // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have
1578   // the second element at offset 8.  Check for this:
1579   unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
1580   unsigned HiAlign = TD.getABITypeAlignment(Hi);
1581   unsigned HiStart = llvm::TargetData::RoundUpAlignment(LoSize, HiAlign);
1582   assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");
1583 
1584   // To handle this, we have to increase the size of the low part so that the
1585   // second element will start at an 8 byte offset.  We can't increase the size
1586   // of the second element because it might make us access off the end of the
1587   // struct.
1588   if (HiStart != 8) {
1589     // There are only two sorts of types the ABI generation code can produce for
1590     // the low part of a pair that aren't 8 bytes in size: float or i8/i16/i32.
1591     // Promote these to a larger type.
1592     if (Lo->isFloatTy())
1593       Lo = llvm::Type::getDoubleTy(Lo->getContext());
1594     else {
1595       assert(Lo->isIntegerTy() && "Invalid/unknown lo type");
1596       Lo = llvm::Type::getInt64Ty(Lo->getContext());
1597     }
1598   }
1599 
1600   const llvm::StructType *Result =
1601     llvm::StructType::get(Lo->getContext(), Lo, Hi, NULL);
1602 
1603 
1604   // Verify that the second element is at an 8-byte offset.
1605   assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
1606          "Invalid x86-64 argument pair!");
1607   return Result;
1608 }
1609 
1610 ABIArgInfo X86_64ABIInfo::
1611 classifyReturnType(QualType RetTy) const {
1612   // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
1613   // classification algorithm.
1614   X86_64ABIInfo::Class Lo, Hi;
1615   classify(RetTy, 0, Lo, Hi);
1616 
1617   // Check some invariants.
1618   assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
1619   assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
1620 
1621   const llvm::Type *ResType = 0;
1622   switch (Lo) {
1623   case NoClass:
1624     if (Hi == NoClass)
1625       return ABIArgInfo::getIgnore();
1626     // If the low part is just padding, it takes no register, leave ResType
1627     // null.
1628     assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
1629            "Unknown missing lo part");
1630     break;
1631 
1632   case SSEUp:
1633   case X87Up:
1634     assert(0 && "Invalid classification for lo word.");
1635 
1636     // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
1637     // hidden argument.
1638   case Memory:
1639     return getIndirectReturnResult(RetTy);
1640 
1641     // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
1642     // available register of the sequence %rax, %rdx is used.
1643   case Integer:
1644     ResType = GetINTEGERTypeAtOffset(CGT.ConvertTypeRecursive(RetTy), 0,
1645                                      RetTy, 0);
1646 
1647     // If we have a sign or zero extended integer, make sure to return Extend
1648     // so that the parameter gets the right LLVM IR attributes.
1649     if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
1650       // Treat an enum type as its underlying type.
1651       if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
1652         RetTy = EnumTy->getDecl()->getIntegerType();
1653 
1654       if (RetTy->isIntegralOrEnumerationType() &&
1655           RetTy->isPromotableIntegerType())
1656         return ABIArgInfo::getExtend();
1657     }
1658     break;
1659 
1660     // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
1661     // available SSE register of the sequence %xmm0, %xmm1 is used.
1662   case SSE:
1663     ResType = GetSSETypeAtOffset(CGT.ConvertTypeRecursive(RetTy), 0, RetTy, 0);
1664     break;
1665 
1666     // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
1667     // returned on the X87 stack in %st0 as 80-bit x87 number.
1668   case X87:
1669     ResType = llvm::Type::getX86_FP80Ty(getVMContext());
1670     break;
1671 
1672     // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
1673     // part of the value is returned in %st0 and the imaginary part in
1674     // %st1.
1675   case ComplexX87:
1676     assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
1677     ResType = llvm::StructType::get(getVMContext(),
1678                                     llvm::Type::getX86_FP80Ty(getVMContext()),
1679                                     llvm::Type::getX86_FP80Ty(getVMContext()),
1680                                     NULL);
1681     break;
1682   }
1683 
1684   const llvm::Type *HighPart = 0;
1685   switch (Hi) {
1686     // Memory was handled previously and X87 should
1687     // never occur as a hi class.
1688   case Memory:
1689   case X87:
1690     assert(0 && "Invalid classification for hi word.");
1691 
1692   case ComplexX87: // Previously handled.
1693   case NoClass:
1694     break;
1695 
1696   case Integer:
1697     HighPart = GetINTEGERTypeAtOffset(CGT.ConvertTypeRecursive(RetTy),
1698                                       8, RetTy, 8);
1699     if (Lo == NoClass)  // Return HighPart at offset 8 in memory.
1700       return ABIArgInfo::getDirect(HighPart, 8);
1701     break;
1702   case SSE:
1703     HighPart = GetSSETypeAtOffset(CGT.ConvertTypeRecursive(RetTy), 8, RetTy, 8);
1704     if (Lo == NoClass)  // Return HighPart at offset 8 in memory.
1705       return ABIArgInfo::getDirect(HighPart, 8);
1706     break;
1707 
1708     // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
1709     // is passed in the upper half of the last used SSE register.
1710     //
1711     // SSEUP should always be preceded by SSE, just widen.
1712   case SSEUp:
1713     assert(Lo == SSE && "Unexpected SSEUp classification.");
1714     ResType = Get16ByteVectorType(RetTy);
1715     break;
1716 
1717     // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
1718     // returned together with the previous X87 value in %st0.
1719   case X87Up:
1720     // If X87Up is preceded by X87, we don't need to do
1721     // anything. However, in some cases with unions it may not be
1722     // preceded by X87. In such situations we follow gcc and pass the
1723     // extra bits in an SSE reg.
1724     if (Lo != X87) {
1725       HighPart = GetSSETypeAtOffset(CGT.ConvertTypeRecursive(RetTy),
1726                                     8, RetTy, 8);
1727       if (Lo == NoClass)  // Return HighPart at offset 8 in memory.
1728         return ABIArgInfo::getDirect(HighPart, 8);
1729     }
1730     break;
1731   }
1732 
1733   // If a high part was specified, merge it together with the low part.  It is
1734   // known to pass in the high eightbyte of the result.  We do this by forming a
1735   // first class struct aggregate with the high and low part: {low, high}
1736   if (HighPart)
1737     ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getTargetData());
1738 
1739   return ABIArgInfo::getDirect(ResType);
1740 }
1741 
1742 ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt,
1743                                                unsigned &neededSSE) const {
1744   X86_64ABIInfo::Class Lo, Hi;
1745   classify(Ty, 0, Lo, Hi);
1746 
1747   // Check some invariants.
1748   // FIXME: Enforce these by construction.
1749   assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
1750   assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
1751 
1752   neededInt = 0;
1753   neededSSE = 0;
1754   const llvm::Type *ResType = 0;
1755   switch (Lo) {
1756   case NoClass:
1757     if (Hi == NoClass)
1758       return ABIArgInfo::getIgnore();
1759     // If the low part is just padding, it takes no register, leave ResType
1760     // null.
1761     assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
1762            "Unknown missing lo part");
1763     break;
1764 
1765     // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
1766     // on the stack.
1767   case Memory:
1768 
1769     // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
1770     // COMPLEX_X87, it is passed in memory.
1771   case X87:
1772   case ComplexX87:
1773     return getIndirectResult(Ty);
1774 
1775   case SSEUp:
1776   case X87Up:
1777     assert(0 && "Invalid classification for lo word.");
1778 
1779     // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
1780     // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
1781     // and %r9 is used.
1782   case Integer:
1783     ++neededInt;
1784 
1785     // Pick an 8-byte type based on the preferred type.
1786     ResType = GetINTEGERTypeAtOffset(CGT.ConvertTypeRecursive(Ty), 0, Ty, 0);
1787 
1788     // If we have a sign or zero extended integer, make sure to return Extend
1789     // so that the parameter gets the right LLVM IR attributes.
1790     if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
1791       // Treat an enum type as its underlying type.
1792       if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1793         Ty = EnumTy->getDecl()->getIntegerType();
1794 
1795       if (Ty->isIntegralOrEnumerationType() &&
1796           Ty->isPromotableIntegerType())
1797         return ABIArgInfo::getExtend();
1798     }
1799 
1800     break;
1801 
1802     // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
1803     // available SSE register is used, the registers are taken in the
1804     // order from %xmm0 to %xmm7.
1805   case SSE: {
1806     const llvm::Type *IRType = CGT.ConvertTypeRecursive(Ty);
1807     if (Hi != NoClass || !UseX86_MMXType(IRType))
1808       ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
1809     else
1810       // This is an MMX type. Treat it as such.
1811       ResType = llvm::Type::getX86_MMXTy(getVMContext());
1812 
1813     ++neededSSE;
1814     break;
1815   }
1816   }
1817 
1818   const llvm::Type *HighPart = 0;
1819   switch (Hi) {
1820     // Memory was handled previously, ComplexX87 and X87 should
1821     // never occur as hi classes, and X87Up must be preceded by X87,
1822     // which is passed in memory.
1823   case Memory:
1824   case X87:
1825   case ComplexX87:
1826     assert(0 && "Invalid classification for hi word.");
1827     break;
1828 
1829   case NoClass: break;
1830 
1831   case Integer:
1832     ++neededInt;
1833     // Pick an 8-byte type based on the preferred type.
1834     HighPart = GetINTEGERTypeAtOffset(CGT.ConvertTypeRecursive(Ty), 8, Ty, 8);
1835 
1836     if (Lo == NoClass)  // Pass HighPart at offset 8 in memory.
1837       return ABIArgInfo::getDirect(HighPart, 8);
1838     break;
1839 
1840     // X87Up generally doesn't occur here (long double is passed in
1841     // memory), except in situations involving unions.
1842   case X87Up:
1843   case SSE:
1844     HighPart = GetSSETypeAtOffset(CGT.ConvertTypeRecursive(Ty), 8, Ty, 8);
1845 
1846     if (Lo == NoClass)  // Pass HighPart at offset 8 in memory.
1847       return ABIArgInfo::getDirect(HighPart, 8);
1848 
1849     ++neededSSE;
1850     break;
1851 
1852     // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
1853     // eightbyte is passed in the upper half of the last used SSE
1854     // register.  This only happens when 128-bit vectors are passed.
1855   case SSEUp:
1856     assert(Lo == SSE && "Unexpected SSEUp classification");
1857     ResType = Get16ByteVectorType(Ty);
1858     break;
1859   }
1860 
1861   // If a high part was specified, merge it together with the low part.  It is
1862   // known to pass in the high eightbyte of the result.  We do this by forming a
1863   // first class struct aggregate with the high and low part: {low, high}
1864   if (HighPart)
1865     ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getTargetData());
1866 
1867   return ABIArgInfo::getDirect(ResType);
1868 }
1869 
1870 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
1871 
1872   FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
1873 
1874   // Keep track of the number of assigned registers.
1875   unsigned freeIntRegs = 6, freeSSERegs = 8;
1876 
1877   // If the return value is indirect, then the hidden argument is consuming one
1878   // integer register.
1879   if (FI.getReturnInfo().isIndirect())
1880     --freeIntRegs;
1881 
1882   // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
1883   // get assigned (in left-to-right order) for passing as follows...
1884   for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
1885        it != ie; ++it) {
1886     unsigned neededInt, neededSSE;
1887     it->info = classifyArgumentType(it->type, neededInt, neededSSE);
1888 
1889     // AMD64-ABI 3.2.3p3: If there are no registers available for any
1890     // eightbyte of an argument, the whole argument is passed on the
1891     // stack. If registers have already been assigned for some
1892     // eightbytes of such an argument, the assignments get reverted.
1893     if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
1894       freeIntRegs -= neededInt;
1895       freeSSERegs -= neededSSE;
1896     } else {
1897       it->info = getIndirectResult(it->type);
1898     }
1899   }
1900 }
1901 
1902 static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
1903                                         QualType Ty,
1904                                         CodeGenFunction &CGF) {
1905   llvm::Value *overflow_arg_area_p =
1906     CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
1907   llvm::Value *overflow_arg_area =
1908     CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
1909 
1910   // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
1911   // byte boundary if alignment needed by type exceeds 8 byte boundary.
1912   uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
1913   if (Align > 8) {
1914     // Note that we follow the ABI & gcc here, even though the type
1915     // could in theory have an alignment greater than 16. This case
1916     // shouldn't ever matter in practice.
1917 
1918     // overflow_arg_area = (overflow_arg_area + 15) & ~15;
1919     llvm::Value *Offset =
1920       llvm::ConstantInt::get(CGF.Int32Ty, 15);
1921     overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset);
1922     llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area,
1923                                                     CGF.Int64Ty);
1924     llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, ~15LL);
1925     overflow_arg_area =
1926       CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
1927                                  overflow_arg_area->getType(),
1928                                  "overflow_arg_area.align");
1929   }
1930 
1931   // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
1932   const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
1933   llvm::Value *Res =
1934     CGF.Builder.CreateBitCast(overflow_arg_area,
1935                               llvm::PointerType::getUnqual(LTy));
1936 
1937   // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
1938   // l->overflow_arg_area + sizeof(type).
1939   // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
1940   // an 8 byte boundary.
1941 
1942   uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
1943   llvm::Value *Offset =
1944       llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7)  & ~7);
1945   overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
1946                                             "overflow_arg_area.next");
1947   CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
1948 
1949   // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
1950   return Res;
1951 }
1952 
1953 llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1954                                       CodeGenFunction &CGF) const {
1955   llvm::LLVMContext &VMContext = CGF.getLLVMContext();
1956 
1957   // Assume that va_list type is correct; should be pointer to LLVM type:
1958   // struct {
1959   //   i32 gp_offset;
1960   //   i32 fp_offset;
1961   //   i8* overflow_arg_area;
1962   //   i8* reg_save_area;
1963   // };
1964   unsigned neededInt, neededSSE;
1965 
1966   Ty = CGF.getContext().getCanonicalType(Ty);
1967   ABIArgInfo AI = classifyArgumentType(Ty, neededInt, neededSSE);
1968 
1969   // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
1970   // in the registers. If not go to step 7.
1971   if (!neededInt && !neededSSE)
1972     return EmitVAArgFromMemory(VAListAddr, Ty, CGF);
1973 
1974   // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
1975   // general purpose registers needed to pass type and num_fp to hold
1976   // the number of floating point registers needed.
1977 
1978   // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
1979   // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
1980   // l->fp_offset > 304 - num_fp * 16 go to step 7.
1981   //
1982   // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
1983   // register save space).
1984 
1985   llvm::Value *InRegs = 0;
1986   llvm::Value *gp_offset_p = 0, *gp_offset = 0;
1987   llvm::Value *fp_offset_p = 0, *fp_offset = 0;
1988   if (neededInt) {
1989     gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
1990     gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
1991     InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
1992     InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
1993   }
1994 
1995   if (neededSSE) {
1996     fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
1997     fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
1998     llvm::Value *FitsInFP =
1999       llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
2000     FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");
2001     InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
2002   }
2003 
2004   llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
2005   llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
2006   llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
2007   CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
2008 
2009   // Emit code to load the value if it was passed in registers.
2010 
2011   CGF.EmitBlock(InRegBlock);
2012 
2013   // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
2014   // an offset of l->gp_offset and/or l->fp_offset. This may require
2015   // copying to a temporary location in case the parameter is passed
2016   // in different register classes or requires an alignment greater
2017   // than 8 for general purpose registers and 16 for XMM registers.
2018   //
2019   // FIXME: This really results in shameful code when we end up needing to
2020   // collect arguments from different places; often what should result in a
2021   // simple assembling of a structure from scattered addresses has many more
2022   // loads than necessary. Can we clean this up?
2023   const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
2024   llvm::Value *RegAddr =
2025     CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3),
2026                            "reg_save_area");
2027   if (neededInt && neededSSE) {
2028     // FIXME: Cleanup.
2029     assert(AI.isDirect() && "Unexpected ABI info for mixed regs");
2030     const llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
2031     llvm::Value *Tmp = CGF.CreateTempAlloca(ST);
2032     assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
2033     const llvm::Type *TyLo = ST->getElementType(0);
2034     const llvm::Type *TyHi = ST->getElementType(1);
2035     assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
2036            "Unexpected ABI info for mixed regs");
2037     const llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
2038     const llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
2039     llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
2040     llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
2041     llvm::Value *RegLoAddr = TyLo->isFloatingPointTy() ? FPAddr : GPAddr;
2042     llvm::Value *RegHiAddr = TyLo->isFloatingPointTy() ? GPAddr : FPAddr;
2043     llvm::Value *V =
2044       CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo));
2045     CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
2046     V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
2047     CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
2048 
2049     RegAddr = CGF.Builder.CreateBitCast(Tmp,
2050                                         llvm::PointerType::getUnqual(LTy));
2051   } else if (neededInt) {
2052     RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
2053     RegAddr = CGF.Builder.CreateBitCast(RegAddr,
2054                                         llvm::PointerType::getUnqual(LTy));
2055   } else if (neededSSE == 1) {
2056     RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
2057     RegAddr = CGF.Builder.CreateBitCast(RegAddr,
2058                                         llvm::PointerType::getUnqual(LTy));
2059   } else {
2060     assert(neededSSE == 2 && "Invalid number of needed registers!");
2061     // SSE registers are spaced 16 bytes apart in the register save
2062     // area, we need to collect the two eightbytes together.
2063     llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
2064     llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16);
2065     const llvm::Type *DoubleTy = llvm::Type::getDoubleTy(VMContext);
2066     const llvm::Type *DblPtrTy =
2067       llvm::PointerType::getUnqual(DoubleTy);
2068     const llvm::StructType *ST = llvm::StructType::get(VMContext, DoubleTy,
2069                                                        DoubleTy, NULL);
2070     llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST);
2071     V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
2072                                                          DblPtrTy));
2073     CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
2074     V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi,
2075                                                          DblPtrTy));
2076     CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
2077     RegAddr = CGF.Builder.CreateBitCast(Tmp,
2078                                         llvm::PointerType::getUnqual(LTy));
2079   }
2080 
2081   // AMD64-ABI 3.5.7p5: Step 5. Set:
2082   // l->gp_offset = l->gp_offset + num_gp * 8
2083   // l->fp_offset = l->fp_offset + num_fp * 16.
2084   if (neededInt) {
2085     llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);
2086     CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
2087                             gp_offset_p);
2088   }
2089   if (neededSSE) {
2090     llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);
2091     CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
2092                             fp_offset_p);
2093   }
2094   CGF.EmitBranch(ContBlock);
2095 
2096   // Emit code to load the value if it was passed in memory.
2097 
2098   CGF.EmitBlock(InMemBlock);
2099   llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF);
2100 
2101   // Return the appropriate result.
2102 
2103   CGF.EmitBlock(ContBlock);
2104   llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(), 2,
2105                                                  "vaarg.addr");
2106   ResAddr->addIncoming(RegAddr, InRegBlock);
2107   ResAddr->addIncoming(MemAddr, InMemBlock);
2108   return ResAddr;
2109 }
2110 
2111 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty) const {
2112 
2113   if (Ty->isVoidType())
2114     return ABIArgInfo::getIgnore();
2115 
2116   if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2117     Ty = EnumTy->getDecl()->getIntegerType();
2118 
2119   uint64_t Size = getContext().getTypeSize(Ty);
2120 
2121   if (const RecordType *RT = Ty->getAs<RecordType>()) {
2122     if (hasNonTrivialDestructorOrCopyConstructor(RT) ||
2123         RT->getDecl()->hasFlexibleArrayMember())
2124       return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
2125 
2126     // FIXME: mingw-w64-gcc emits 128-bit struct as i128
2127     if (Size == 128 &&
2128         getContext().Target.getTriple().getOS() == llvm::Triple::MinGW32)
2129       return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
2130                                                           Size));
2131 
2132     // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
2133     // not 1, 2, 4, or 8 bytes, must be passed by reference."
2134     if (Size <= 64 &&
2135         (Size & (Size - 1)) == 0)
2136       return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
2137                                                           Size));
2138 
2139     return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
2140   }
2141 
2142   if (Ty->isPromotableIntegerType())
2143     return ABIArgInfo::getExtend();
2144 
2145   return ABIArgInfo::getDirect();
2146 }
2147 
2148 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
2149 
2150   QualType RetTy = FI.getReturnType();
2151   FI.getReturnInfo() = classify(RetTy);
2152 
2153   for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
2154        it != ie; ++it)
2155     it->info = classify(it->type);
2156 }
2157 
2158 llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2159                                       CodeGenFunction &CGF) const {
2160   const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
2161   const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
2162 
2163   CGBuilderTy &Builder = CGF.Builder;
2164   llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
2165                                                        "ap");
2166   llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
2167   llvm::Type *PTy =
2168     llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
2169   llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
2170 
2171   uint64_t Offset =
2172     llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8);
2173   llvm::Value *NextAddr =
2174     Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
2175                       "ap.next");
2176   Builder.CreateStore(NextAddr, VAListAddrAsBPP);
2177 
2178   return AddrTyped;
2179 }
2180 
2181 // PowerPC-32
2182 
2183 namespace {
2184 class PPC32TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
2185 public:
2186   PPC32TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
2187 
2188   int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
2189     // This is recovered from gcc output.
2190     return 1; // r1 is the dedicated stack pointer
2191   }
2192 
2193   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2194                                llvm::Value *Address) const;
2195 };
2196 
2197 }
2198 
2199 bool
2200 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2201                                                 llvm::Value *Address) const {
2202   // This is calculated from the LLVM and GCC tables and verified
2203   // against gcc output.  AFAIK all ABIs use the same encoding.
2204 
2205   CodeGen::CGBuilderTy &Builder = CGF.Builder;
2206   llvm::LLVMContext &Context = CGF.getLLVMContext();
2207 
2208   const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
2209   llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
2210   llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
2211   llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
2212 
2213   // 0-31: r0-31, the 4-byte general-purpose registers
2214   AssignToArrayRange(Builder, Address, Four8, 0, 31);
2215 
2216   // 32-63: fp0-31, the 8-byte floating-point registers
2217   AssignToArrayRange(Builder, Address, Eight8, 32, 63);
2218 
2219   // 64-76 are various 4-byte special-purpose registers:
2220   // 64: mq
2221   // 65: lr
2222   // 66: ctr
2223   // 67: ap
2224   // 68-75 cr0-7
2225   // 76: xer
2226   AssignToArrayRange(Builder, Address, Four8, 64, 76);
2227 
2228   // 77-108: v0-31, the 16-byte vector registers
2229   AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
2230 
2231   // 109: vrsave
2232   // 110: vscr
2233   // 111: spe_acc
2234   // 112: spefscr
2235   // 113: sfp
2236   AssignToArrayRange(Builder, Address, Four8, 109, 113);
2237 
2238   return false;
2239 }
2240 
2241 
2242 //===----------------------------------------------------------------------===//
2243 // ARM ABI Implementation
2244 //===----------------------------------------------------------------------===//
2245 
2246 namespace {
2247 
2248 class ARMABIInfo : public ABIInfo {
2249 public:
2250   enum ABIKind {
2251     APCS = 0,
2252     AAPCS = 1,
2253     AAPCS_VFP
2254   };
2255 
2256 private:
2257   ABIKind Kind;
2258 
2259 public:
2260   ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind) {}
2261 
2262 private:
2263   ABIKind getABIKind() const { return Kind; }
2264 
2265   ABIArgInfo classifyReturnType(QualType RetTy) const;
2266   ABIArgInfo classifyArgumentType(QualType RetTy) const;
2267 
2268   virtual void computeInfo(CGFunctionInfo &FI) const;
2269 
2270   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2271                                  CodeGenFunction &CGF) const;
2272 };
2273 
2274 class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
2275 public:
2276   ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
2277     :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {}
2278 
2279   int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
2280     return 13;
2281   }
2282 };
2283 
2284 }
2285 
2286 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
2287   FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
2288   for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
2289        it != ie; ++it)
2290     it->info = classifyArgumentType(it->type);
2291 
2292   // Always honor user-specified calling convention.
2293   if (FI.getCallingConvention() != llvm::CallingConv::C)
2294     return;
2295 
2296   // Calling convention as default by an ABI.
2297   llvm::CallingConv::ID DefaultCC;
2298   llvm::StringRef Env = getContext().Target.getTriple().getEnvironmentName();
2299   if (Env == "gnueabi" || Env == "eabi")
2300     DefaultCC = llvm::CallingConv::ARM_AAPCS;
2301   else
2302     DefaultCC = llvm::CallingConv::ARM_APCS;
2303 
2304   // If user did not ask for specific calling convention explicitly (e.g. via
2305   // pcs attribute), set effective calling convention if it's different than ABI
2306   // default.
2307   switch (getABIKind()) {
2308   case APCS:
2309     if (DefaultCC != llvm::CallingConv::ARM_APCS)
2310       FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_APCS);
2311     break;
2312   case AAPCS:
2313     if (DefaultCC != llvm::CallingConv::ARM_AAPCS)
2314       FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS);
2315     break;
2316   case AAPCS_VFP:
2317     if (DefaultCC != llvm::CallingConv::ARM_AAPCS_VFP)
2318       FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS_VFP);
2319     break;
2320   }
2321 }
2322 
2323 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty) const {
2324   if (!isAggregateTypeForABI(Ty)) {
2325     // Treat an enum type as its underlying type.
2326     if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2327       Ty = EnumTy->getDecl()->getIntegerType();
2328 
2329     return (Ty->isPromotableIntegerType() ?
2330             ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2331   }
2332 
2333   // Ignore empty records.
2334   if (isEmptyRecord(getContext(), Ty, true))
2335     return ABIArgInfo::getIgnore();
2336 
2337   // Structures with either a non-trivial destructor or a non-trivial
2338   // copy constructor are always indirect.
2339   if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
2340     return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
2341 
2342   // Otherwise, pass by coercing to a structure of the appropriate size.
2343   //
2344   // FIXME: This doesn't handle alignment > 64 bits.
2345   const llvm::Type* ElemTy;
2346   unsigned SizeRegs;
2347   if (getContext().getTypeSizeInChars(Ty) <= CharUnits::fromQuantity(64)) {
2348     ElemTy = llvm::Type::getInt32Ty(getVMContext());
2349     SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
2350   } else if (getABIKind() == ARMABIInfo::APCS) {
2351     // Initial ARM ByVal support is APCS-only.
2352     return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
2353   } else {
2354     // FIXME: This is kind of nasty... but there isn't much choice
2355     // because most of the ARM calling conventions don't yet support
2356     // byval.
2357     ElemTy = llvm::Type::getInt64Ty(getVMContext());
2358     SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
2359   }
2360 
2361   const llvm::Type *STy =
2362     llvm::StructType::get(getVMContext(),
2363                           llvm::ArrayType::get(ElemTy, SizeRegs), NULL, NULL);
2364   return ABIArgInfo::getDirect(STy);
2365 }
2366 
2367 static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
2368                               llvm::LLVMContext &VMContext) {
2369   // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
2370   // is called integer-like if its size is less than or equal to one word, and
2371   // the offset of each of its addressable sub-fields is zero.
2372 
2373   uint64_t Size = Context.getTypeSize(Ty);
2374 
2375   // Check that the type fits in a word.
2376   if (Size > 32)
2377     return false;
2378 
2379   // FIXME: Handle vector types!
2380   if (Ty->isVectorType())
2381     return false;
2382 
2383   // Float types are never treated as "integer like".
2384   if (Ty->isRealFloatingType())
2385     return false;
2386 
2387   // If this is a builtin or pointer type then it is ok.
2388   if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
2389     return true;
2390 
2391   // Small complex integer types are "integer like".
2392   if (const ComplexType *CT = Ty->getAs<ComplexType>())
2393     return isIntegerLikeType(CT->getElementType(), Context, VMContext);
2394 
2395   // Single element and zero sized arrays should be allowed, by the definition
2396   // above, but they are not.
2397 
2398   // Otherwise, it must be a record type.
2399   const RecordType *RT = Ty->getAs<RecordType>();
2400   if (!RT) return false;
2401 
2402   // Ignore records with flexible arrays.
2403   const RecordDecl *RD = RT->getDecl();
2404   if (RD->hasFlexibleArrayMember())
2405     return false;
2406 
2407   // Check that all sub-fields are at offset 0, and are themselves "integer
2408   // like".
2409   const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
2410 
2411   bool HadField = false;
2412   unsigned idx = 0;
2413   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
2414        i != e; ++i, ++idx) {
2415     const FieldDecl *FD = *i;
2416 
2417     // Bit-fields are not addressable, we only need to verify they are "integer
2418     // like". We still have to disallow a subsequent non-bitfield, for example:
2419     //   struct { int : 0; int x }
2420     // is non-integer like according to gcc.
2421     if (FD->isBitField()) {
2422       if (!RD->isUnion())
2423         HadField = true;
2424 
2425       if (!isIntegerLikeType(FD->getType(), Context, VMContext))
2426         return false;
2427 
2428       continue;
2429     }
2430 
2431     // Check if this field is at offset 0.
2432     if (Layout.getFieldOffset(idx) != 0)
2433       return false;
2434 
2435     if (!isIntegerLikeType(FD->getType(), Context, VMContext))
2436       return false;
2437 
2438     // Only allow at most one field in a structure. This doesn't match the
2439     // wording above, but follows gcc in situations with a field following an
2440     // empty structure.
2441     if (!RD->isUnion()) {
2442       if (HadField)
2443         return false;
2444 
2445       HadField = true;
2446     }
2447   }
2448 
2449   return true;
2450 }
2451 
2452 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy) const {
2453   if (RetTy->isVoidType())
2454     return ABIArgInfo::getIgnore();
2455 
2456   // Large vector types should be returned via memory.
2457   if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
2458     return ABIArgInfo::getIndirect(0);
2459 
2460   if (!isAggregateTypeForABI(RetTy)) {
2461     // Treat an enum type as its underlying type.
2462     if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
2463       RetTy = EnumTy->getDecl()->getIntegerType();
2464 
2465     return (RetTy->isPromotableIntegerType() ?
2466             ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2467   }
2468 
2469   // Structures with either a non-trivial destructor or a non-trivial
2470   // copy constructor are always indirect.
2471   if (isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy))
2472     return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
2473 
2474   // Are we following APCS?
2475   if (getABIKind() == APCS) {
2476     if (isEmptyRecord(getContext(), RetTy, false))
2477       return ABIArgInfo::getIgnore();
2478 
2479     // Complex types are all returned as packed integers.
2480     //
2481     // FIXME: Consider using 2 x vector types if the back end handles them
2482     // correctly.
2483     if (RetTy->isAnyComplexType())
2484       return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
2485                                               getContext().getTypeSize(RetTy)));
2486 
2487     // Integer like structures are returned in r0.
2488     if (isIntegerLikeType(RetTy, getContext(), getVMContext())) {
2489       // Return in the smallest viable integer type.
2490       uint64_t Size = getContext().getTypeSize(RetTy);
2491       if (Size <= 8)
2492         return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
2493       if (Size <= 16)
2494         return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
2495       return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
2496     }
2497 
2498     // Otherwise return in memory.
2499     return ABIArgInfo::getIndirect(0);
2500   }
2501 
2502   // Otherwise this is an AAPCS variant.
2503 
2504   if (isEmptyRecord(getContext(), RetTy, true))
2505     return ABIArgInfo::getIgnore();
2506 
2507   // Aggregates <= 4 bytes are returned in r0; other aggregates
2508   // are returned indirectly.
2509   uint64_t Size = getContext().getTypeSize(RetTy);
2510   if (Size <= 32) {
2511     // Return in the smallest viable integer type.
2512     if (Size <= 8)
2513       return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
2514     if (Size <= 16)
2515       return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
2516     return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
2517   }
2518 
2519   return ABIArgInfo::getIndirect(0);
2520 }
2521 
2522 llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2523                                    CodeGenFunction &CGF) const {
2524   // FIXME: Need to handle alignment
2525   const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
2526   const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
2527 
2528   CGBuilderTy &Builder = CGF.Builder;
2529   llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
2530                                                        "ap");
2531   llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
2532   llvm::Type *PTy =
2533     llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
2534   llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
2535 
2536   uint64_t Offset =
2537     llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
2538   llvm::Value *NextAddr =
2539     Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
2540                       "ap.next");
2541   Builder.CreateStore(NextAddr, VAListAddrAsBPP);
2542 
2543   return AddrTyped;
2544 }
2545 
2546 //===----------------------------------------------------------------------===//
2547 // PTX ABI Implementation
2548 //===----------------------------------------------------------------------===//
2549 
2550 namespace {
2551 
2552 class PTXABIInfo : public ABIInfo {
2553 public:
2554   PTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
2555 
2556   ABIArgInfo classifyReturnType(QualType RetTy) const;
2557   ABIArgInfo classifyArgumentType(QualType Ty) const;
2558 
2559   virtual void computeInfo(CGFunctionInfo &FI) const;
2560   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2561                                  CodeGenFunction &CFG) const;
2562 };
2563 
2564 class PTXTargetCodeGenInfo : public TargetCodeGenInfo {
2565 public:
2566   PTXTargetCodeGenInfo(CodeGenTypes &CGT)
2567     : TargetCodeGenInfo(new PTXABIInfo(CGT)) {}
2568 };
2569 
2570 ABIArgInfo PTXABIInfo::classifyReturnType(QualType RetTy) const {
2571   if (RetTy->isVoidType())
2572     return ABIArgInfo::getIgnore();
2573   if (isAggregateTypeForABI(RetTy))
2574     return ABIArgInfo::getIndirect(0);
2575   return ABIArgInfo::getDirect();
2576 }
2577 
2578 ABIArgInfo PTXABIInfo::classifyArgumentType(QualType Ty) const {
2579   if (isAggregateTypeForABI(Ty))
2580     return ABIArgInfo::getIndirect(0);
2581 
2582   return ABIArgInfo::getDirect();
2583 }
2584 
2585 void PTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
2586   FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
2587   for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
2588        it != ie; ++it)
2589     it->info = classifyArgumentType(it->type);
2590 
2591   // Always honor user-specified calling convention.
2592   if (FI.getCallingConvention() != llvm::CallingConv::C)
2593     return;
2594 
2595   // Calling convention as default by an ABI.
2596   llvm::CallingConv::ID DefaultCC;
2597   llvm::StringRef Env = getContext().Target.getTriple().getEnvironmentName();
2598   if (Env == "device")
2599     DefaultCC = llvm::CallingConv::PTX_Device;
2600   else
2601     DefaultCC = llvm::CallingConv::PTX_Kernel;
2602 
2603   FI.setEffectiveCallingConvention(DefaultCC);
2604 }
2605 
2606 llvm::Value *PTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2607                                    CodeGenFunction &CFG) const {
2608   llvm_unreachable("PTX does not support varargs");
2609   return 0;
2610 }
2611 
2612 }
2613 
2614 //===----------------------------------------------------------------------===//
2615 // SystemZ ABI Implementation
2616 //===----------------------------------------------------------------------===//
2617 
2618 namespace {
2619 
2620 class SystemZABIInfo : public ABIInfo {
2621 public:
2622   SystemZABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
2623 
2624   bool isPromotableIntegerType(QualType Ty) const;
2625 
2626   ABIArgInfo classifyReturnType(QualType RetTy) const;
2627   ABIArgInfo classifyArgumentType(QualType RetTy) const;
2628 
2629   virtual void computeInfo(CGFunctionInfo &FI) const {
2630     FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
2631     for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
2632          it != ie; ++it)
2633       it->info = classifyArgumentType(it->type);
2634   }
2635 
2636   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2637                                  CodeGenFunction &CGF) const;
2638 };
2639 
2640 class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
2641 public:
2642   SystemZTargetCodeGenInfo(CodeGenTypes &CGT)
2643     : TargetCodeGenInfo(new SystemZABIInfo(CGT)) {}
2644 };
2645 
2646 }
2647 
2648 bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const {
2649   // SystemZ ABI requires all 8, 16 and 32 bit quantities to be extended.
2650   if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
2651     switch (BT->getKind()) {
2652     case BuiltinType::Bool:
2653     case BuiltinType::Char_S:
2654     case BuiltinType::Char_U:
2655     case BuiltinType::SChar:
2656     case BuiltinType::UChar:
2657     case BuiltinType::Short:
2658     case BuiltinType::UShort:
2659     case BuiltinType::Int:
2660     case BuiltinType::UInt:
2661       return true;
2662     default:
2663       return false;
2664     }
2665   return false;
2666 }
2667 
2668 llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2669                                        CodeGenFunction &CGF) const {
2670   // FIXME: Implement
2671   return 0;
2672 }
2673 
2674 
2675 ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
2676   if (RetTy->isVoidType())
2677     return ABIArgInfo::getIgnore();
2678   if (isAggregateTypeForABI(RetTy))
2679     return ABIArgInfo::getIndirect(0);
2680 
2681   return (isPromotableIntegerType(RetTy) ?
2682           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2683 }
2684 
2685 ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
2686   if (isAggregateTypeForABI(Ty))
2687     return ABIArgInfo::getIndirect(0);
2688 
2689   return (isPromotableIntegerType(Ty) ?
2690           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2691 }
2692 
2693 //===----------------------------------------------------------------------===//
2694 // MBlaze ABI Implementation
2695 //===----------------------------------------------------------------------===//
2696 
2697 namespace {
2698 
2699 class MBlazeABIInfo : public ABIInfo {
2700 public:
2701   MBlazeABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
2702 
2703   bool isPromotableIntegerType(QualType Ty) const;
2704 
2705   ABIArgInfo classifyReturnType(QualType RetTy) const;
2706   ABIArgInfo classifyArgumentType(QualType RetTy) const;
2707 
2708   virtual void computeInfo(CGFunctionInfo &FI) const {
2709     FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
2710     for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
2711          it != ie; ++it)
2712       it->info = classifyArgumentType(it->type);
2713   }
2714 
2715   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2716                                  CodeGenFunction &CGF) const;
2717 };
2718 
2719 class MBlazeTargetCodeGenInfo : public TargetCodeGenInfo {
2720 public:
2721   MBlazeTargetCodeGenInfo(CodeGenTypes &CGT)
2722     : TargetCodeGenInfo(new MBlazeABIInfo(CGT)) {}
2723   void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2724                            CodeGen::CodeGenModule &M) const;
2725 };
2726 
2727 }
2728 
2729 bool MBlazeABIInfo::isPromotableIntegerType(QualType Ty) const {
2730   // MBlaze ABI requires all 8 and 16 bit quantities to be extended.
2731   if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
2732     switch (BT->getKind()) {
2733     case BuiltinType::Bool:
2734     case BuiltinType::Char_S:
2735     case BuiltinType::Char_U:
2736     case BuiltinType::SChar:
2737     case BuiltinType::UChar:
2738     case BuiltinType::Short:
2739     case BuiltinType::UShort:
2740       return true;
2741     default:
2742       return false;
2743     }
2744   return false;
2745 }
2746 
2747 llvm::Value *MBlazeABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2748                                       CodeGenFunction &CGF) const {
2749   // FIXME: Implement
2750   return 0;
2751 }
2752 
2753 
2754 ABIArgInfo MBlazeABIInfo::classifyReturnType(QualType RetTy) const {
2755   if (RetTy->isVoidType())
2756     return ABIArgInfo::getIgnore();
2757   if (isAggregateTypeForABI(RetTy))
2758     return ABIArgInfo::getIndirect(0);
2759 
2760   return (isPromotableIntegerType(RetTy) ?
2761           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2762 }
2763 
2764 ABIArgInfo MBlazeABIInfo::classifyArgumentType(QualType Ty) const {
2765   if (isAggregateTypeForABI(Ty))
2766     return ABIArgInfo::getIndirect(0);
2767 
2768   return (isPromotableIntegerType(Ty) ?
2769           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2770 }
2771 
2772 void MBlazeTargetCodeGenInfo::SetTargetAttributes(const Decl *D,
2773                                                   llvm::GlobalValue *GV,
2774                                                   CodeGen::CodeGenModule &M)
2775                                                   const {
2776   const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
2777   if (!FD) return;
2778 
2779   llvm::CallingConv::ID CC = llvm::CallingConv::C;
2780   if (FD->hasAttr<MBlazeInterruptHandlerAttr>())
2781     CC = llvm::CallingConv::MBLAZE_INTR;
2782   else if (FD->hasAttr<MBlazeSaveVolatilesAttr>())
2783     CC = llvm::CallingConv::MBLAZE_SVOL;
2784 
2785   if (CC != llvm::CallingConv::C) {
2786       // Handle 'interrupt_handler' attribute:
2787       llvm::Function *F = cast<llvm::Function>(GV);
2788 
2789       // Step 1: Set ISR calling convention.
2790       F->setCallingConv(CC);
2791 
2792       // Step 2: Add attributes goodness.
2793       F->addFnAttr(llvm::Attribute::NoInline);
2794   }
2795 
2796   // Step 3: Emit _interrupt_handler alias.
2797   if (CC == llvm::CallingConv::MBLAZE_INTR)
2798     new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage,
2799                           "_interrupt_handler", GV, &M.getModule());
2800 }
2801 
2802 
2803 //===----------------------------------------------------------------------===//
2804 // MSP430 ABI Implementation
2805 //===----------------------------------------------------------------------===//
2806 
2807 namespace {
2808 
2809 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
2810 public:
2811   MSP430TargetCodeGenInfo(CodeGenTypes &CGT)
2812     : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
2813   void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2814                            CodeGen::CodeGenModule &M) const;
2815 };
2816 
2817 }
2818 
2819 void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
2820                                                   llvm::GlobalValue *GV,
2821                                              CodeGen::CodeGenModule &M) const {
2822   if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
2823     if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) {
2824       // Handle 'interrupt' attribute:
2825       llvm::Function *F = cast<llvm::Function>(GV);
2826 
2827       // Step 1: Set ISR calling convention.
2828       F->setCallingConv(llvm::CallingConv::MSP430_INTR);
2829 
2830       // Step 2: Add attributes goodness.
2831       F->addFnAttr(llvm::Attribute::NoInline);
2832 
2833       // Step 3: Emit ISR vector alias.
2834       unsigned Num = attr->getNumber() + 0xffe0;
2835       new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage,
2836                             "vector_" + llvm::Twine::utohexstr(Num),
2837                             GV, &M.getModule());
2838     }
2839   }
2840 }
2841 
2842 //===----------------------------------------------------------------------===//
2843 // MIPS ABI Implementation.  This works for both little-endian and
2844 // big-endian variants.
2845 //===----------------------------------------------------------------------===//
2846 
2847 namespace {
2848 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
2849 public:
2850   MIPSTargetCodeGenInfo(CodeGenTypes &CGT)
2851     : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
2852 
2853   int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
2854     return 29;
2855   }
2856 
2857   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2858                                llvm::Value *Address) const;
2859 };
2860 }
2861 
2862 bool
2863 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2864                                                llvm::Value *Address) const {
2865   // This information comes from gcc's implementation, which seems to
2866   // as canonical as it gets.
2867 
2868   CodeGen::CGBuilderTy &Builder = CGF.Builder;
2869   llvm::LLVMContext &Context = CGF.getLLVMContext();
2870 
2871   // Everything on MIPS is 4 bytes.  Double-precision FP registers
2872   // are aliased to pairs of single-precision FP registers.
2873   const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
2874   llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
2875 
2876   // 0-31 are the general purpose registers, $0 - $31.
2877   // 32-63 are the floating-point registers, $f0 - $f31.
2878   // 64 and 65 are the multiply/divide registers, $hi and $lo.
2879   // 66 is the (notional, I think) register for signal-handler return.
2880   AssignToArrayRange(Builder, Address, Four8, 0, 65);
2881 
2882   // 67-74 are the floating-point status registers, $fcc0 - $fcc7.
2883   // They are one bit wide and ignored here.
2884 
2885   // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31.
2886   // (coprocessor 1 is the FP unit)
2887   // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31.
2888   // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31.
2889   // 176-181 are the DSP accumulator registers.
2890   AssignToArrayRange(Builder, Address, Four8, 80, 181);
2891 
2892   return false;
2893 }
2894 
2895 
2896 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
2897   if (TheTargetCodeGenInfo)
2898     return *TheTargetCodeGenInfo;
2899 
2900   // For now we just cache the TargetCodeGenInfo in CodeGenModule and don't
2901   // free it.
2902 
2903   const llvm::Triple &Triple = getContext().Target.getTriple();
2904   switch (Triple.getArch()) {
2905   default:
2906     return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types));
2907 
2908   case llvm::Triple::mips:
2909   case llvm::Triple::mipsel:
2910     return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types));
2911 
2912   case llvm::Triple::arm:
2913   case llvm::Triple::thumb:
2914     {
2915       ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS;
2916 
2917       if (strcmp(getContext().Target.getABI(), "apcs-gnu") == 0)
2918         Kind = ARMABIInfo::APCS;
2919       else if (CodeGenOpts.FloatABI == "hard")
2920         Kind = ARMABIInfo::AAPCS_VFP;
2921 
2922       return *(TheTargetCodeGenInfo = new ARMTargetCodeGenInfo(Types, Kind));
2923     }
2924 
2925   case llvm::Triple::ppc:
2926     return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types));
2927 
2928   case llvm::Triple::ptx32:
2929   case llvm::Triple::ptx64:
2930     return *(TheTargetCodeGenInfo = new PTXTargetCodeGenInfo(Types));
2931 
2932   case llvm::Triple::systemz:
2933     return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo(Types));
2934 
2935   case llvm::Triple::mblaze:
2936     return *(TheTargetCodeGenInfo = new MBlazeTargetCodeGenInfo(Types));
2937 
2938   case llvm::Triple::msp430:
2939     return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types));
2940 
2941   case llvm::Triple::x86:
2942     if (Triple.isOSDarwin())
2943       return *(TheTargetCodeGenInfo =
2944                new X86_32TargetCodeGenInfo(Types, true, true));
2945 
2946     switch (Triple.getOS()) {
2947     case llvm::Triple::Cygwin:
2948     case llvm::Triple::MinGW32:
2949     case llvm::Triple::AuroraUX:
2950     case llvm::Triple::DragonFly:
2951     case llvm::Triple::FreeBSD:
2952     case llvm::Triple::OpenBSD:
2953     case llvm::Triple::NetBSD:
2954       return *(TheTargetCodeGenInfo =
2955                new X86_32TargetCodeGenInfo(Types, false, true));
2956 
2957     default:
2958       return *(TheTargetCodeGenInfo =
2959                new X86_32TargetCodeGenInfo(Types, false, false));
2960     }
2961 
2962   case llvm::Triple::x86_64:
2963     switch (Triple.getOS()) {
2964     case llvm::Triple::Win32:
2965     case llvm::Triple::MinGW32:
2966     case llvm::Triple::Cygwin:
2967       return *(TheTargetCodeGenInfo = new WinX86_64TargetCodeGenInfo(Types));
2968     default:
2969       return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo(Types));
2970     }
2971   }
2972 }
2973