1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "TargetInfo.h"
16 #include "ABIInfo.h"
17 #include "CodeGenFunction.h"
18 #include "clang/AST/RecordLayout.h"
19 #include "llvm/Type.h"
20 #include "llvm/Target/TargetData.h"
21 #include "llvm/ADT/Triple.h"
22 #include "llvm/Support/raw_ostream.h"
23 using namespace clang;
24 using namespace CodeGen;
25 
26 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
27                                llvm::Value *Array,
28                                llvm::Value *Value,
29                                unsigned FirstIndex,
30                                unsigned LastIndex) {
31   // Alternatively, we could emit this as a loop in the source.
32   for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
33     llvm::Value *Cell = Builder.CreateConstInBoundsGEP1_32(Array, I);
34     Builder.CreateStore(Value, Cell);
35   }
36 }
37 
38 static bool isAggregateTypeForABI(QualType T) {
39   return CodeGenFunction::hasAggregateLLVMType(T) ||
40          T->isMemberFunctionPointerType();
41 }
42 
43 ABIInfo::~ABIInfo() {}
44 
45 ASTContext &ABIInfo::getContext() const {
46   return CGT.getContext();
47 }
48 
49 llvm::LLVMContext &ABIInfo::getVMContext() const {
50   return CGT.getLLVMContext();
51 }
52 
53 const llvm::TargetData &ABIInfo::getTargetData() const {
54   return CGT.getTargetData();
55 }
56 
57 
58 void ABIArgInfo::dump() const {
59   llvm::raw_ostream &OS = llvm::errs();
60   OS << "(ABIArgInfo Kind=";
61   switch (TheKind) {
62   case Direct:
63     OS << "Direct Type=";
64     if (const llvm::Type *Ty = getCoerceToType())
65       Ty->print(OS);
66     else
67       OS << "null";
68     break;
69   case Extend:
70     OS << "Extend";
71     break;
72   case Ignore:
73     OS << "Ignore";
74     break;
75   case Indirect:
76     OS << "Indirect Align=" << getIndirectAlign()
77        << " Byal=" << getIndirectByVal()
78        << " Realign=" << getIndirectRealign();
79     break;
80   case Expand:
81     OS << "Expand";
82     break;
83   }
84   OS << ")\n";
85 }
86 
87 TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; }
88 
89 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
90 
91 /// isEmptyField - Return true iff a the field is "empty", that is it
92 /// is an unnamed bit-field or an (array of) empty record(s).
93 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
94                          bool AllowArrays) {
95   if (FD->isUnnamedBitfield())
96     return true;
97 
98   QualType FT = FD->getType();
99 
100     // Constant arrays of empty records count as empty, strip them off.
101   if (AllowArrays)
102     while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT))
103       FT = AT->getElementType();
104 
105   const RecordType *RT = FT->getAs<RecordType>();
106   if (!RT)
107     return false;
108 
109   // C++ record fields are never empty, at least in the Itanium ABI.
110   //
111   // FIXME: We should use a predicate for whether this behavior is true in the
112   // current ABI.
113   if (isa<CXXRecordDecl>(RT->getDecl()))
114     return false;
115 
116   return isEmptyRecord(Context, FT, AllowArrays);
117 }
118 
119 /// isEmptyRecord - Return true iff a structure contains only empty
120 /// fields. Note that a structure with a flexible array member is not
121 /// considered empty.
122 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
123   const RecordType *RT = T->getAs<RecordType>();
124   if (!RT)
125     return 0;
126   const RecordDecl *RD = RT->getDecl();
127   if (RD->hasFlexibleArrayMember())
128     return false;
129 
130   // If this is a C++ record, check the bases first.
131   if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
132     for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
133            e = CXXRD->bases_end(); i != e; ++i)
134       if (!isEmptyRecord(Context, i->getType(), true))
135         return false;
136 
137   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
138          i != e; ++i)
139     if (!isEmptyField(Context, *i, AllowArrays))
140       return false;
141   return true;
142 }
143 
144 /// hasNonTrivialDestructorOrCopyConstructor - Determine if a type has either
145 /// a non-trivial destructor or a non-trivial copy constructor.
146 static bool hasNonTrivialDestructorOrCopyConstructor(const RecordType *RT) {
147   const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
148   if (!RD)
149     return false;
150 
151   return !RD->hasTrivialDestructor() || !RD->hasTrivialCopyConstructor();
152 }
153 
154 /// isRecordWithNonTrivialDestructorOrCopyConstructor - Determine if a type is
155 /// a record type with either a non-trivial destructor or a non-trivial copy
156 /// constructor.
157 static bool isRecordWithNonTrivialDestructorOrCopyConstructor(QualType T) {
158   const RecordType *RT = T->getAs<RecordType>();
159   if (!RT)
160     return false;
161 
162   return hasNonTrivialDestructorOrCopyConstructor(RT);
163 }
164 
165 /// isSingleElementStruct - Determine if a structure is a "single
166 /// element struct", i.e. it has exactly one non-empty field or
167 /// exactly one field which is itself a single element
168 /// struct. Structures with flexible array members are never
169 /// considered single element structs.
170 ///
171 /// \return The field declaration for the single non-empty field, if
172 /// it exists.
173 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
174   const RecordType *RT = T->getAsStructureType();
175   if (!RT)
176     return 0;
177 
178   const RecordDecl *RD = RT->getDecl();
179   if (RD->hasFlexibleArrayMember())
180     return 0;
181 
182   const Type *Found = 0;
183 
184   // If this is a C++ record, check the bases first.
185   if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
186     for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
187            e = CXXRD->bases_end(); i != e; ++i) {
188       // Ignore empty records.
189       if (isEmptyRecord(Context, i->getType(), true))
190         continue;
191 
192       // If we already found an element then this isn't a single-element struct.
193       if (Found)
194         return 0;
195 
196       // If this is non-empty and not a single element struct, the composite
197       // cannot be a single element struct.
198       Found = isSingleElementStruct(i->getType(), Context);
199       if (!Found)
200         return 0;
201     }
202   }
203 
204   // Check for single element.
205   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
206          i != e; ++i) {
207     const FieldDecl *FD = *i;
208     QualType FT = FD->getType();
209 
210     // Ignore empty fields.
211     if (isEmptyField(Context, FD, true))
212       continue;
213 
214     // If we already found an element then this isn't a single-element
215     // struct.
216     if (Found)
217       return 0;
218 
219     // Treat single element arrays as the element.
220     while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
221       if (AT->getSize().getZExtValue() != 1)
222         break;
223       FT = AT->getElementType();
224     }
225 
226     if (!isAggregateTypeForABI(FT)) {
227       Found = FT.getTypePtr();
228     } else {
229       Found = isSingleElementStruct(FT, Context);
230       if (!Found)
231         return 0;
232     }
233   }
234 
235   return Found;
236 }
237 
238 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
239   if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
240       !Ty->isAnyComplexType() && !Ty->isEnumeralType() &&
241       !Ty->isBlockPointerType())
242     return false;
243 
244   uint64_t Size = Context.getTypeSize(Ty);
245   return Size == 32 || Size == 64;
246 }
247 
248 /// canExpandIndirectArgument - Test whether an argument type which is to be
249 /// passed indirectly (on the stack) would have the equivalent layout if it was
250 /// expanded into separate arguments. If so, we prefer to do the latter to avoid
251 /// inhibiting optimizations.
252 ///
253 // FIXME: This predicate is missing many cases, currently it just follows
254 // llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We
255 // should probably make this smarter, or better yet make the LLVM backend
256 // capable of handling it.
257 static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) {
258   // We can only expand structure types.
259   const RecordType *RT = Ty->getAs<RecordType>();
260   if (!RT)
261     return false;
262 
263   // We can only expand (C) structures.
264   //
265   // FIXME: This needs to be generalized to handle classes as well.
266   const RecordDecl *RD = RT->getDecl();
267   if (!RD->isStruct() || isa<CXXRecordDecl>(RD))
268     return false;
269 
270   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
271          i != e; ++i) {
272     const FieldDecl *FD = *i;
273 
274     if (!is32Or64BitBasicType(FD->getType(), Context))
275       return false;
276 
277     // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
278     // how to expand them yet, and the predicate for telling if a bitfield still
279     // counts as "basic" is more complicated than what we were doing previously.
280     if (FD->isBitField())
281       return false;
282   }
283 
284   return true;
285 }
286 
287 namespace {
288 /// DefaultABIInfo - The default implementation for ABI specific
289 /// details. This implementation provides information which results in
290 /// self-consistent and sensible LLVM IR generation, but does not
291 /// conform to any particular ABI.
292 class DefaultABIInfo : public ABIInfo {
293 public:
294   DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
295 
296   ABIArgInfo classifyReturnType(QualType RetTy) const;
297   ABIArgInfo classifyArgumentType(QualType RetTy) const;
298 
299   virtual void computeInfo(CGFunctionInfo &FI) const {
300     FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
301     for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
302          it != ie; ++it)
303       it->info = classifyArgumentType(it->type);
304   }
305 
306   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
307                                  CodeGenFunction &CGF) const;
308 };
309 
310 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
311 public:
312   DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
313     : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
314 };
315 
316 llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
317                                        CodeGenFunction &CGF) const {
318   return 0;
319 }
320 
321 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
322   if (isAggregateTypeForABI(Ty))
323     return ABIArgInfo::getIndirect(0);
324 
325   // Treat an enum type as its underlying type.
326   if (const EnumType *EnumTy = Ty->getAs<EnumType>())
327     Ty = EnumTy->getDecl()->getIntegerType();
328 
329   return (Ty->isPromotableIntegerType() ?
330           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
331 }
332 
333 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
334   if (RetTy->isVoidType())
335     return ABIArgInfo::getIgnore();
336 
337   if (isAggregateTypeForABI(RetTy))
338     return ABIArgInfo::getIndirect(0);
339 
340   // Treat an enum type as its underlying type.
341   if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
342     RetTy = EnumTy->getDecl()->getIntegerType();
343 
344   return (RetTy->isPromotableIntegerType() ?
345           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
346 }
347 
348 /// UseX86_MMXType - Return true if this is an MMX type that should use the special
349 /// x86_mmx type.
350 bool UseX86_MMXType(const llvm::Type *IRType) {
351   // If the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>, use the
352   // special x86_mmx type.
353   return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
354     cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
355     IRType->getScalarSizeInBits() != 64;
356 }
357 
358 //===----------------------------------------------------------------------===//
359 // X86-32 ABI Implementation
360 //===----------------------------------------------------------------------===//
361 
362 /// X86_32ABIInfo - The X86-32 ABI information.
363 class X86_32ABIInfo : public ABIInfo {
364   static const unsigned MinABIStackAlignInBytes = 4;
365 
366   bool IsDarwinVectorABI;
367   bool IsSmallStructInRegABI;
368 
369   static bool isRegisterSize(unsigned Size) {
370     return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
371   }
372 
373   static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context);
374 
375   /// getIndirectResult - Give a source type \arg Ty, return a suitable result
376   /// such that the argument will be passed in memory.
377   ABIArgInfo getIndirectResult(QualType Ty, bool ByVal = true) const;
378 
379   /// \brief Return the alignment to use for the given type on the stack.
380   unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
381 
382 public:
383 
384   ABIArgInfo classifyReturnType(QualType RetTy) const;
385   ABIArgInfo classifyArgumentType(QualType RetTy) const;
386 
387   virtual void computeInfo(CGFunctionInfo &FI) const {
388     FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
389     for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
390          it != ie; ++it)
391       it->info = classifyArgumentType(it->type);
392   }
393 
394   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
395                                  CodeGenFunction &CGF) const;
396 
397   X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p)
398     : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p) {}
399 };
400 
401 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
402 public:
403   X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p)
404     :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p)) {}
405 
406   void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
407                            CodeGen::CodeGenModule &CGM) const;
408 
409   int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
410     // Darwin uses different dwarf register numbers for EH.
411     if (CGM.isTargetDarwin()) return 5;
412 
413     return 4;
414   }
415 
416   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
417                                llvm::Value *Address) const;
418 };
419 
420 }
421 
422 /// shouldReturnTypeInRegister - Determine if the given type should be
423 /// passed in a register (for the Darwin ABI).
424 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
425                                                ASTContext &Context) {
426   uint64_t Size = Context.getTypeSize(Ty);
427 
428   // Type must be register sized.
429   if (!isRegisterSize(Size))
430     return false;
431 
432   if (Ty->isVectorType()) {
433     // 64- and 128- bit vectors inside structures are not returned in
434     // registers.
435     if (Size == 64 || Size == 128)
436       return false;
437 
438     return true;
439   }
440 
441   // If this is a builtin, pointer, enum, complex type, member pointer, or
442   // member function pointer it is ok.
443   if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
444       Ty->isAnyComplexType() || Ty->isEnumeralType() ||
445       Ty->isBlockPointerType() || Ty->isMemberPointerType())
446     return true;
447 
448   // Arrays are treated like records.
449   if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
450     return shouldReturnTypeInRegister(AT->getElementType(), Context);
451 
452   // Otherwise, it must be a record type.
453   const RecordType *RT = Ty->getAs<RecordType>();
454   if (!RT) return false;
455 
456   // FIXME: Traverse bases here too.
457 
458   // Structure types are passed in register if all fields would be
459   // passed in a register.
460   for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(),
461          e = RT->getDecl()->field_end(); i != e; ++i) {
462     const FieldDecl *FD = *i;
463 
464     // Empty fields are ignored.
465     if (isEmptyField(Context, FD, true))
466       continue;
467 
468     // Check fields recursively.
469     if (!shouldReturnTypeInRegister(FD->getType(), Context))
470       return false;
471   }
472 
473   return true;
474 }
475 
476 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy) const {
477   if (RetTy->isVoidType())
478     return ABIArgInfo::getIgnore();
479 
480   if (const VectorType *VT = RetTy->getAs<VectorType>()) {
481     // On Darwin, some vectors are returned in registers.
482     if (IsDarwinVectorABI) {
483       uint64_t Size = getContext().getTypeSize(RetTy);
484 
485       // 128-bit vectors are a special case; they are returned in
486       // registers and we need to make sure to pick a type the LLVM
487       // backend will like.
488       if (Size == 128)
489         return ABIArgInfo::getDirect(llvm::VectorType::get(
490                   llvm::Type::getInt64Ty(getVMContext()), 2));
491 
492       // Always return in register if it fits in a general purpose
493       // register, or if it is 64 bits and has a single element.
494       if ((Size == 8 || Size == 16 || Size == 32) ||
495           (Size == 64 && VT->getNumElements() == 1))
496         return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
497                                                             Size));
498 
499       return ABIArgInfo::getIndirect(0);
500     }
501 
502     return ABIArgInfo::getDirect();
503   }
504 
505   if (isAggregateTypeForABI(RetTy)) {
506     if (const RecordType *RT = RetTy->getAs<RecordType>()) {
507       // Structures with either a non-trivial destructor or a non-trivial
508       // copy constructor are always indirect.
509       if (hasNonTrivialDestructorOrCopyConstructor(RT))
510         return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
511 
512       // Structures with flexible arrays are always indirect.
513       if (RT->getDecl()->hasFlexibleArrayMember())
514         return ABIArgInfo::getIndirect(0);
515     }
516 
517     // If specified, structs and unions are always indirect.
518     if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType())
519       return ABIArgInfo::getIndirect(0);
520 
521     // Classify "single element" structs as their element type.
522     if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) {
523       if (const BuiltinType *BT = SeltTy->getAs<BuiltinType>()) {
524         if (BT->isIntegerType()) {
525           // We need to use the size of the structure, padding
526           // bit-fields can adjust that to be larger than the single
527           // element type.
528           uint64_t Size = getContext().getTypeSize(RetTy);
529           return ABIArgInfo::getDirect(
530             llvm::IntegerType::get(getVMContext(), (unsigned)Size));
531         }
532 
533         if (BT->getKind() == BuiltinType::Float) {
534           assert(getContext().getTypeSize(RetTy) ==
535                  getContext().getTypeSize(SeltTy) &&
536                  "Unexpect single element structure size!");
537           return ABIArgInfo::getDirect(llvm::Type::getFloatTy(getVMContext()));
538         }
539 
540         if (BT->getKind() == BuiltinType::Double) {
541           assert(getContext().getTypeSize(RetTy) ==
542                  getContext().getTypeSize(SeltTy) &&
543                  "Unexpect single element structure size!");
544           return ABIArgInfo::getDirect(llvm::Type::getDoubleTy(getVMContext()));
545         }
546       } else if (SeltTy->isPointerType()) {
547         // FIXME: It would be really nice if this could come out as the proper
548         // pointer type.
549         const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(getVMContext());
550         return ABIArgInfo::getDirect(PtrTy);
551       } else if (SeltTy->isVectorType()) {
552         // 64- and 128-bit vectors are never returned in a
553         // register when inside a structure.
554         uint64_t Size = getContext().getTypeSize(RetTy);
555         if (Size == 64 || Size == 128)
556           return ABIArgInfo::getIndirect(0);
557 
558         return classifyReturnType(QualType(SeltTy, 0));
559       }
560     }
561 
562     // Small structures which are register sized are generally returned
563     // in a register.
564     if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, getContext())) {
565       uint64_t Size = getContext().getTypeSize(RetTy);
566       return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
567     }
568 
569     return ABIArgInfo::getIndirect(0);
570   }
571 
572   // Treat an enum type as its underlying type.
573   if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
574     RetTy = EnumTy->getDecl()->getIntegerType();
575 
576   return (RetTy->isPromotableIntegerType() ?
577           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
578 }
579 
580 static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) {
581   const RecordType *RT = Ty->getAs<RecordType>();
582   if (!RT)
583     return 0;
584   const RecordDecl *RD = RT->getDecl();
585 
586   // If this is a C++ record, check the bases first.
587   if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
588     for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
589            e = CXXRD->bases_end(); i != e; ++i)
590       if (!isRecordWithSSEVectorType(Context, i->getType()))
591         return false;
592 
593   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
594        i != e; ++i) {
595     QualType FT = i->getType();
596 
597     if (FT->getAs<VectorType>() && Context.getTypeSize(Ty) == 128)
598       return true;
599 
600     if (isRecordWithSSEVectorType(Context, FT))
601       return true;
602   }
603 
604   return false;
605 }
606 
607 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
608                                                  unsigned Align) const {
609   // Otherwise, if the alignment is less than or equal to the minimum ABI
610   // alignment, just use the default; the backend will handle this.
611   if (Align <= MinABIStackAlignInBytes)
612     return 0; // Use default alignment.
613 
614   // On non-Darwin, the stack type alignment is always 4.
615   if (!IsDarwinVectorABI) {
616     // Set explicit alignment, since we may need to realign the top.
617     return MinABIStackAlignInBytes;
618   }
619 
620   // Otherwise, if the type contains an SSE vector type, the alignment is 16.
621   if (isRecordWithSSEVectorType(getContext(), Ty))
622     return 16;
623 
624   return MinABIStackAlignInBytes;
625 }
626 
627 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal) const {
628   if (!ByVal)
629     return ABIArgInfo::getIndirect(0, false);
630 
631   // Compute the byval alignment.
632   unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
633   unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
634   if (StackAlign == 0)
635     return ABIArgInfo::getIndirect(0);
636 
637   // If the stack alignment is less than the type alignment, realign the
638   // argument.
639   if (StackAlign < TypeAlign)
640     return ABIArgInfo::getIndirect(StackAlign, /*ByVal=*/true,
641                                    /*Realign=*/true);
642 
643   return ABIArgInfo::getIndirect(StackAlign);
644 }
645 
646 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const {
647   // FIXME: Set alignment on indirect arguments.
648   if (isAggregateTypeForABI(Ty)) {
649     // Structures with flexible arrays are always indirect.
650     if (const RecordType *RT = Ty->getAs<RecordType>()) {
651       // Structures with either a non-trivial destructor or a non-trivial
652       // copy constructor are always indirect.
653       if (hasNonTrivialDestructorOrCopyConstructor(RT))
654         return getIndirectResult(Ty, /*ByVal=*/false);
655 
656       if (RT->getDecl()->hasFlexibleArrayMember())
657         return getIndirectResult(Ty);
658     }
659 
660     // Ignore empty structs.
661     if (Ty->isStructureType() && getContext().getTypeSize(Ty) == 0)
662       return ABIArgInfo::getIgnore();
663 
664     // Expand small (<= 128-bit) record types when we know that the stack layout
665     // of those arguments will match the struct. This is important because the
666     // LLVM backend isn't smart enough to remove byval, which inhibits many
667     // optimizations.
668     if (getContext().getTypeSize(Ty) <= 4*32 &&
669         canExpandIndirectArgument(Ty, getContext()))
670       return ABIArgInfo::getExpand();
671 
672     return getIndirectResult(Ty);
673   }
674 
675   if (const VectorType *VT = Ty->getAs<VectorType>()) {
676     // On Darwin, some vectors are passed in memory, we handle this by passing
677     // it as an i8/i16/i32/i64.
678     if (IsDarwinVectorABI) {
679       uint64_t Size = getContext().getTypeSize(Ty);
680       if ((Size == 8 || Size == 16 || Size == 32) ||
681           (Size == 64 && VT->getNumElements() == 1))
682         return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
683                                                             Size));
684     }
685 
686     const llvm::Type *IRType = CGT.ConvertTypeRecursive(Ty);
687     if (UseX86_MMXType(IRType)) {
688       ABIArgInfo AAI = ABIArgInfo::getDirect(IRType);
689       AAI.setCoerceToType(llvm::Type::getX86_MMXTy(getVMContext()));
690       return AAI;
691     }
692 
693     return ABIArgInfo::getDirect();
694   }
695 
696 
697   if (const EnumType *EnumTy = Ty->getAs<EnumType>())
698     Ty = EnumTy->getDecl()->getIntegerType();
699 
700   return (Ty->isPromotableIntegerType() ?
701           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
702 }
703 
704 llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
705                                       CodeGenFunction &CGF) const {
706   const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
707   const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
708 
709   CGBuilderTy &Builder = CGF.Builder;
710   llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
711                                                        "ap");
712   llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
713   llvm::Type *PTy =
714     llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
715   llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
716 
717   uint64_t Offset =
718     llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
719   llvm::Value *NextAddr =
720     Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
721                       "ap.next");
722   Builder.CreateStore(NextAddr, VAListAddrAsBPP);
723 
724   return AddrTyped;
725 }
726 
727 void X86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
728                                                   llvm::GlobalValue *GV,
729                                             CodeGen::CodeGenModule &CGM) const {
730   if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
731     if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
732       // Get the LLVM function.
733       llvm::Function *Fn = cast<llvm::Function>(GV);
734 
735       // Now add the 'alignstack' attribute with a value of 16.
736       Fn->addFnAttr(llvm::Attribute::constructStackAlignmentFromInt(16));
737     }
738   }
739 }
740 
741 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
742                                                CodeGen::CodeGenFunction &CGF,
743                                                llvm::Value *Address) const {
744   CodeGen::CGBuilderTy &Builder = CGF.Builder;
745   llvm::LLVMContext &Context = CGF.getLLVMContext();
746 
747   const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
748   llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
749 
750   // 0-7 are the eight integer registers;  the order is different
751   //   on Darwin (for EH), but the range is the same.
752   // 8 is %eip.
753   AssignToArrayRange(Builder, Address, Four8, 0, 8);
754 
755   if (CGF.CGM.isTargetDarwin()) {
756     // 12-16 are st(0..4).  Not sure why we stop at 4.
757     // These have size 16, which is sizeof(long double) on
758     // platforms with 8-byte alignment for that type.
759     llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
760     AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
761 
762   } else {
763     // 9 is %eflags, which doesn't get a size on Darwin for some
764     // reason.
765     Builder.CreateStore(Four8, Builder.CreateConstInBoundsGEP1_32(Address, 9));
766 
767     // 11-16 are st(0..5).  Not sure why we stop at 5.
768     // These have size 12, which is sizeof(long double) on
769     // platforms with 4-byte alignment for that type.
770     llvm::Value *Twelve8 = llvm::ConstantInt::get(i8, 12);
771     AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
772   }
773 
774   return false;
775 }
776 
777 //===----------------------------------------------------------------------===//
778 // X86-64 ABI Implementation
779 //===----------------------------------------------------------------------===//
780 
781 
782 namespace {
783 /// X86_64ABIInfo - The X86_64 ABI information.
784 class X86_64ABIInfo : public ABIInfo {
785   enum Class {
786     Integer = 0,
787     SSE,
788     SSEUp,
789     X87,
790     X87Up,
791     ComplexX87,
792     NoClass,
793     Memory
794   };
795 
796   /// merge - Implement the X86_64 ABI merging algorithm.
797   ///
798   /// Merge an accumulating classification \arg Accum with a field
799   /// classification \arg Field.
800   ///
801   /// \param Accum - The accumulating classification. This should
802   /// always be either NoClass or the result of a previous merge
803   /// call. In addition, this should never be Memory (the caller
804   /// should just return Memory for the aggregate).
805   static Class merge(Class Accum, Class Field);
806 
807   /// classify - Determine the x86_64 register classes in which the
808   /// given type T should be passed.
809   ///
810   /// \param Lo - The classification for the parts of the type
811   /// residing in the low word of the containing object.
812   ///
813   /// \param Hi - The classification for the parts of the type
814   /// residing in the high word of the containing object.
815   ///
816   /// \param OffsetBase - The bit offset of this type in the
817   /// containing object.  Some parameters are classified different
818   /// depending on whether they straddle an eightbyte boundary.
819   ///
820   /// If a word is unused its result will be NoClass; if a type should
821   /// be passed in Memory then at least the classification of \arg Lo
822   /// will be Memory.
823   ///
824   /// The \arg Lo class will be NoClass iff the argument is ignored.
825   ///
826   /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
827   /// also be ComplexX87.
828   void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi) const;
829 
830   const llvm::Type *Get16ByteVectorType(QualType Ty) const;
831   const llvm::Type *GetSSETypeAtOffset(const llvm::Type *IRType,
832                                        unsigned IROffset, QualType SourceTy,
833                                        unsigned SourceOffset) const;
834   const llvm::Type *GetINTEGERTypeAtOffset(const llvm::Type *IRType,
835                                            unsigned IROffset, QualType SourceTy,
836                                            unsigned SourceOffset) const;
837 
838   /// getIndirectResult - Give a source type \arg Ty, return a suitable result
839   /// such that the argument will be returned in memory.
840   ABIArgInfo getIndirectReturnResult(QualType Ty) const;
841 
842   /// getIndirectResult - Give a source type \arg Ty, return a suitable result
843   /// such that the argument will be passed in memory.
844   ABIArgInfo getIndirectResult(QualType Ty) const;
845 
846   ABIArgInfo classifyReturnType(QualType RetTy) const;
847 
848   ABIArgInfo classifyArgumentType(QualType Ty,
849                                   unsigned &neededInt,
850                                   unsigned &neededSSE) const;
851 
852 public:
853   X86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
854 
855   virtual void computeInfo(CGFunctionInfo &FI) const;
856 
857   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
858                                  CodeGenFunction &CGF) const;
859 };
860 
861 /// WinX86_64ABIInfo - The Windows X86_64 ABI information.
862 class WinX86_64ABIInfo : public X86_64ABIInfo {
863 public:
864   WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : X86_64ABIInfo(CGT) {}
865 
866   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
867                                  CodeGenFunction &CGF) const;
868 };
869 
870 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
871 public:
872   X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
873     : TargetCodeGenInfo(new X86_64ABIInfo(CGT)) {}
874 
875   int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
876     return 7;
877   }
878 
879   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
880                                llvm::Value *Address) const {
881     CodeGen::CGBuilderTy &Builder = CGF.Builder;
882     llvm::LLVMContext &Context = CGF.getLLVMContext();
883 
884     const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
885     llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
886 
887     // 0-15 are the 16 integer registers.
888     // 16 is %rip.
889     AssignToArrayRange(Builder, Address, Eight8, 0, 16);
890 
891     return false;
892   }
893 };
894 
895 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
896 public:
897   WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
898     : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {}
899 
900   int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
901     return 7;
902   }
903 
904   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
905                                llvm::Value *Address) const {
906     CodeGen::CGBuilderTy &Builder = CGF.Builder;
907     llvm::LLVMContext &Context = CGF.getLLVMContext();
908 
909     const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
910     llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
911 
912     // 0-15 are the 16 integer registers.
913     // 16 is %rip.
914     AssignToArrayRange(Builder, Address, Eight8, 0, 16);
915 
916     return false;
917   }
918 };
919 
920 }
921 
922 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
923   // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
924   // classified recursively so that always two fields are
925   // considered. The resulting class is calculated according to
926   // the classes of the fields in the eightbyte:
927   //
928   // (a) If both classes are equal, this is the resulting class.
929   //
930   // (b) If one of the classes is NO_CLASS, the resulting class is
931   // the other class.
932   //
933   // (c) If one of the classes is MEMORY, the result is the MEMORY
934   // class.
935   //
936   // (d) If one of the classes is INTEGER, the result is the
937   // INTEGER.
938   //
939   // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
940   // MEMORY is used as class.
941   //
942   // (f) Otherwise class SSE is used.
943 
944   // Accum should never be memory (we should have returned) or
945   // ComplexX87 (because this cannot be passed in a structure).
946   assert((Accum != Memory && Accum != ComplexX87) &&
947          "Invalid accumulated classification during merge.");
948   if (Accum == Field || Field == NoClass)
949     return Accum;
950   if (Field == Memory)
951     return Memory;
952   if (Accum == NoClass)
953     return Field;
954   if (Accum == Integer || Field == Integer)
955     return Integer;
956   if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
957       Accum == X87 || Accum == X87Up)
958     return Memory;
959   return SSE;
960 }
961 
962 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
963                              Class &Lo, Class &Hi) const {
964   // FIXME: This code can be simplified by introducing a simple value class for
965   // Class pairs with appropriate constructor methods for the various
966   // situations.
967 
968   // FIXME: Some of the split computations are wrong; unaligned vectors
969   // shouldn't be passed in registers for example, so there is no chance they
970   // can straddle an eightbyte. Verify & simplify.
971 
972   Lo = Hi = NoClass;
973 
974   Class &Current = OffsetBase < 64 ? Lo : Hi;
975   Current = Memory;
976 
977   if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
978     BuiltinType::Kind k = BT->getKind();
979 
980     if (k == BuiltinType::Void) {
981       Current = NoClass;
982     } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
983       Lo = Integer;
984       Hi = Integer;
985     } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
986       Current = Integer;
987     } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
988       Current = SSE;
989     } else if (k == BuiltinType::LongDouble) {
990       Lo = X87;
991       Hi = X87Up;
992     }
993     // FIXME: _Decimal32 and _Decimal64 are SSE.
994     // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
995     return;
996   }
997 
998   if (const EnumType *ET = Ty->getAs<EnumType>()) {
999     // Classify the underlying integer type.
1000     classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi);
1001     return;
1002   }
1003 
1004   if (Ty->hasPointerRepresentation()) {
1005     Current = Integer;
1006     return;
1007   }
1008 
1009   if (Ty->isMemberPointerType()) {
1010     if (Ty->isMemberFunctionPointerType())
1011       Lo = Hi = Integer;
1012     else
1013       Current = Integer;
1014     return;
1015   }
1016 
1017   if (const VectorType *VT = Ty->getAs<VectorType>()) {
1018     uint64_t Size = getContext().getTypeSize(VT);
1019     if (Size == 32) {
1020       // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x
1021       // float> as integer.
1022       Current = Integer;
1023 
1024       // If this type crosses an eightbyte boundary, it should be
1025       // split.
1026       uint64_t EB_Real = (OffsetBase) / 64;
1027       uint64_t EB_Imag = (OffsetBase + Size - 1) / 64;
1028       if (EB_Real != EB_Imag)
1029         Hi = Lo;
1030     } else if (Size == 64) {
1031       // gcc passes <1 x double> in memory. :(
1032       if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double))
1033         return;
1034 
1035       // gcc passes <1 x long long> as INTEGER.
1036       if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong) ||
1037           VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULongLong) ||
1038           VT->getElementType()->isSpecificBuiltinType(BuiltinType::Long) ||
1039           VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULong))
1040         Current = Integer;
1041       else
1042         Current = SSE;
1043 
1044       // If this type crosses an eightbyte boundary, it should be
1045       // split.
1046       if (OffsetBase && OffsetBase != 64)
1047         Hi = Lo;
1048     } else if (Size == 128) {
1049       Lo = SSE;
1050       Hi = SSEUp;
1051     }
1052     return;
1053   }
1054 
1055   if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
1056     QualType ET = getContext().getCanonicalType(CT->getElementType());
1057 
1058     uint64_t Size = getContext().getTypeSize(Ty);
1059     if (ET->isIntegralOrEnumerationType()) {
1060       if (Size <= 64)
1061         Current = Integer;
1062       else if (Size <= 128)
1063         Lo = Hi = Integer;
1064     } else if (ET == getContext().FloatTy)
1065       Current = SSE;
1066     else if (ET == getContext().DoubleTy)
1067       Lo = Hi = SSE;
1068     else if (ET == getContext().LongDoubleTy)
1069       Current = ComplexX87;
1070 
1071     // If this complex type crosses an eightbyte boundary then it
1072     // should be split.
1073     uint64_t EB_Real = (OffsetBase) / 64;
1074     uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
1075     if (Hi == NoClass && EB_Real != EB_Imag)
1076       Hi = Lo;
1077 
1078     return;
1079   }
1080 
1081   if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
1082     // Arrays are treated like structures.
1083 
1084     uint64_t Size = getContext().getTypeSize(Ty);
1085 
1086     // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
1087     // than two eightbytes, ..., it has class MEMORY.
1088     if (Size > 128)
1089       return;
1090 
1091     // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
1092     // fields, it has class MEMORY.
1093     //
1094     // Only need to check alignment of array base.
1095     if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
1096       return;
1097 
1098     // Otherwise implement simplified merge. We could be smarter about
1099     // this, but it isn't worth it and would be harder to verify.
1100     Current = NoClass;
1101     uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
1102     uint64_t ArraySize = AT->getSize().getZExtValue();
1103     for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
1104       Class FieldLo, FieldHi;
1105       classify(AT->getElementType(), Offset, FieldLo, FieldHi);
1106       Lo = merge(Lo, FieldLo);
1107       Hi = merge(Hi, FieldHi);
1108       if (Lo == Memory || Hi == Memory)
1109         break;
1110     }
1111 
1112     // Do post merger cleanup (see below). Only case we worry about is Memory.
1113     if (Hi == Memory)
1114       Lo = Memory;
1115     assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
1116     return;
1117   }
1118 
1119   if (const RecordType *RT = Ty->getAs<RecordType>()) {
1120     uint64_t Size = getContext().getTypeSize(Ty);
1121 
1122     // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
1123     // than two eightbytes, ..., it has class MEMORY.
1124     if (Size > 128)
1125       return;
1126 
1127     // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
1128     // copy constructor or a non-trivial destructor, it is passed by invisible
1129     // reference.
1130     if (hasNonTrivialDestructorOrCopyConstructor(RT))
1131       return;
1132 
1133     const RecordDecl *RD = RT->getDecl();
1134 
1135     // Assume variable sized types are passed in memory.
1136     if (RD->hasFlexibleArrayMember())
1137       return;
1138 
1139     const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
1140 
1141     // Reset Lo class, this will be recomputed.
1142     Current = NoClass;
1143 
1144     // If this is a C++ record, classify the bases first.
1145     if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1146       for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
1147              e = CXXRD->bases_end(); i != e; ++i) {
1148         assert(!i->isVirtual() && !i->getType()->isDependentType() &&
1149                "Unexpected base class!");
1150         const CXXRecordDecl *Base =
1151           cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
1152 
1153         // Classify this field.
1154         //
1155         // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
1156         // single eightbyte, each is classified separately. Each eightbyte gets
1157         // initialized to class NO_CLASS.
1158         Class FieldLo, FieldHi;
1159         uint64_t Offset = OffsetBase + Layout.getBaseClassOffsetInBits(Base);
1160         classify(i->getType(), Offset, FieldLo, FieldHi);
1161         Lo = merge(Lo, FieldLo);
1162         Hi = merge(Hi, FieldHi);
1163         if (Lo == Memory || Hi == Memory)
1164           break;
1165       }
1166     }
1167 
1168     // Classify the fields one at a time, merging the results.
1169     unsigned idx = 0;
1170     for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
1171            i != e; ++i, ++idx) {
1172       uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
1173       bool BitField = i->isBitField();
1174 
1175       // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
1176       // fields, it has class MEMORY.
1177       //
1178       // Note, skip this test for bit-fields, see below.
1179       if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
1180         Lo = Memory;
1181         return;
1182       }
1183 
1184       // Classify this field.
1185       //
1186       // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
1187       // exceeds a single eightbyte, each is classified
1188       // separately. Each eightbyte gets initialized to class
1189       // NO_CLASS.
1190       Class FieldLo, FieldHi;
1191 
1192       // Bit-fields require special handling, they do not force the
1193       // structure to be passed in memory even if unaligned, and
1194       // therefore they can straddle an eightbyte.
1195       if (BitField) {
1196         // Ignore padding bit-fields.
1197         if (i->isUnnamedBitfield())
1198           continue;
1199 
1200         uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
1201         uint64_t Size =
1202           i->getBitWidth()->EvaluateAsInt(getContext()).getZExtValue();
1203 
1204         uint64_t EB_Lo = Offset / 64;
1205         uint64_t EB_Hi = (Offset + Size - 1) / 64;
1206         FieldLo = FieldHi = NoClass;
1207         if (EB_Lo) {
1208           assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
1209           FieldLo = NoClass;
1210           FieldHi = Integer;
1211         } else {
1212           FieldLo = Integer;
1213           FieldHi = EB_Hi ? Integer : NoClass;
1214         }
1215       } else
1216         classify(i->getType(), Offset, FieldLo, FieldHi);
1217       Lo = merge(Lo, FieldLo);
1218       Hi = merge(Hi, FieldHi);
1219       if (Lo == Memory || Hi == Memory)
1220         break;
1221     }
1222 
1223     // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
1224     //
1225     // (a) If one of the classes is MEMORY, the whole argument is
1226     // passed in memory.
1227     //
1228     // (b) If SSEUP is not preceeded by SSE, it is converted to SSE.
1229 
1230     // The first of these conditions is guaranteed by how we implement
1231     // the merge (just bail).
1232     //
1233     // The second condition occurs in the case of unions; for example
1234     // union { _Complex double; unsigned; }.
1235     if (Hi == Memory)
1236       Lo = Memory;
1237     if (Hi == SSEUp && Lo != SSE)
1238       Hi = SSE;
1239   }
1240 }
1241 
1242 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
1243   // If this is a scalar LLVM value then assume LLVM will pass it in the right
1244   // place naturally.
1245   if (!isAggregateTypeForABI(Ty)) {
1246     // Treat an enum type as its underlying type.
1247     if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1248       Ty = EnumTy->getDecl()->getIntegerType();
1249 
1250     return (Ty->isPromotableIntegerType() ?
1251             ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
1252   }
1253 
1254   return ABIArgInfo::getIndirect(0);
1255 }
1256 
1257 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty) const {
1258   // If this is a scalar LLVM value then assume LLVM will pass it in the right
1259   // place naturally.
1260   if (!isAggregateTypeForABI(Ty)) {
1261     // Treat an enum type as its underlying type.
1262     if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1263       Ty = EnumTy->getDecl()->getIntegerType();
1264 
1265     return (Ty->isPromotableIntegerType() ?
1266             ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
1267   }
1268 
1269   if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
1270     return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
1271 
1272   // Compute the byval alignment. We trust the back-end to honor the
1273   // minimum ABI alignment for byval, to make cleaner IR.
1274   const unsigned MinABIAlign = 8;
1275   unsigned Align = getContext().getTypeAlign(Ty) / 8;
1276   if (Align > MinABIAlign)
1277     return ABIArgInfo::getIndirect(Align);
1278   return ABIArgInfo::getIndirect(0);
1279 }
1280 
1281 /// Get16ByteVectorType - The ABI specifies that a value should be passed in an
1282 /// full vector XMM register.  Pick an LLVM IR type that will be passed as a
1283 /// vector register.
1284 const llvm::Type *X86_64ABIInfo::Get16ByteVectorType(QualType Ty) const {
1285   const llvm::Type *IRType = CGT.ConvertTypeRecursive(Ty);
1286 
1287   // Wrapper structs that just contain vectors are passed just like vectors,
1288   // strip them off if present.
1289   const llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType);
1290   while (STy && STy->getNumElements() == 1) {
1291     IRType = STy->getElementType(0);
1292     STy = dyn_cast<llvm::StructType>(IRType);
1293   }
1294 
1295   // If the preferred type is a 16-byte vector, prefer to pass it.
1296   if (const llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){
1297     const llvm::Type *EltTy = VT->getElementType();
1298     if (VT->getBitWidth() == 128 &&
1299         (EltTy->isFloatTy() || EltTy->isDoubleTy() ||
1300          EltTy->isIntegerTy(8) || EltTy->isIntegerTy(16) ||
1301          EltTy->isIntegerTy(32) || EltTy->isIntegerTy(64) ||
1302          EltTy->isIntegerTy(128)))
1303       return VT;
1304   }
1305 
1306   return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), 2);
1307 }
1308 
1309 /// BitsContainNoUserData - Return true if the specified [start,end) bit range
1310 /// is known to either be off the end of the specified type or being in
1311 /// alignment padding.  The user type specified is known to be at most 128 bits
1312 /// in size, and have passed through X86_64ABIInfo::classify with a successful
1313 /// classification that put one of the two halves in the INTEGER class.
1314 ///
1315 /// It is conservatively correct to return false.
1316 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
1317                                   unsigned EndBit, ASTContext &Context) {
1318   // If the bytes being queried are off the end of the type, there is no user
1319   // data hiding here.  This handles analysis of builtins, vectors and other
1320   // types that don't contain interesting padding.
1321   unsigned TySize = (unsigned)Context.getTypeSize(Ty);
1322   if (TySize <= StartBit)
1323     return true;
1324 
1325   if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
1326     unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType());
1327     unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
1328 
1329     // Check each element to see if the element overlaps with the queried range.
1330     for (unsigned i = 0; i != NumElts; ++i) {
1331       // If the element is after the span we care about, then we're done..
1332       unsigned EltOffset = i*EltSize;
1333       if (EltOffset >= EndBit) break;
1334 
1335       unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
1336       if (!BitsContainNoUserData(AT->getElementType(), EltStart,
1337                                  EndBit-EltOffset, Context))
1338         return false;
1339     }
1340     // If it overlaps no elements, then it is safe to process as padding.
1341     return true;
1342   }
1343 
1344   if (const RecordType *RT = Ty->getAs<RecordType>()) {
1345     const RecordDecl *RD = RT->getDecl();
1346     const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
1347 
1348     // If this is a C++ record, check the bases first.
1349     if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1350       for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
1351            e = CXXRD->bases_end(); i != e; ++i) {
1352         assert(!i->isVirtual() && !i->getType()->isDependentType() &&
1353                "Unexpected base class!");
1354         const CXXRecordDecl *Base =
1355           cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
1356 
1357         // If the base is after the span we care about, ignore it.
1358         unsigned BaseOffset = (unsigned)Layout.getBaseClassOffsetInBits(Base);
1359         if (BaseOffset >= EndBit) continue;
1360 
1361         unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
1362         if (!BitsContainNoUserData(i->getType(), BaseStart,
1363                                    EndBit-BaseOffset, Context))
1364           return false;
1365       }
1366     }
1367 
1368     // Verify that no field has data that overlaps the region of interest.  Yes
1369     // this could be sped up a lot by being smarter about queried fields,
1370     // however we're only looking at structs up to 16 bytes, so we don't care
1371     // much.
1372     unsigned idx = 0;
1373     for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
1374          i != e; ++i, ++idx) {
1375       unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx);
1376 
1377       // If we found a field after the region we care about, then we're done.
1378       if (FieldOffset >= EndBit) break;
1379 
1380       unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
1381       if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset,
1382                                  Context))
1383         return false;
1384     }
1385 
1386     // If nothing in this record overlapped the area of interest, then we're
1387     // clean.
1388     return true;
1389   }
1390 
1391   return false;
1392 }
1393 
1394 /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a
1395 /// float member at the specified offset.  For example, {int,{float}} has a
1396 /// float at offset 4.  It is conservatively correct for this routine to return
1397 /// false.
1398 static bool ContainsFloatAtOffset(const llvm::Type *IRType, unsigned IROffset,
1399                                   const llvm::TargetData &TD) {
1400   // Base case if we find a float.
1401   if (IROffset == 0 && IRType->isFloatTy())
1402     return true;
1403 
1404   // If this is a struct, recurse into the field at the specified offset.
1405   if (const llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
1406     const llvm::StructLayout *SL = TD.getStructLayout(STy);
1407     unsigned Elt = SL->getElementContainingOffset(IROffset);
1408     IROffset -= SL->getElementOffset(Elt);
1409     return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD);
1410   }
1411 
1412   // If this is an array, recurse into the field at the specified offset.
1413   if (const llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
1414     const llvm::Type *EltTy = ATy->getElementType();
1415     unsigned EltSize = TD.getTypeAllocSize(EltTy);
1416     IROffset -= IROffset/EltSize*EltSize;
1417     return ContainsFloatAtOffset(EltTy, IROffset, TD);
1418   }
1419 
1420   return false;
1421 }
1422 
1423 
1424 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the
1425 /// low 8 bytes of an XMM register, corresponding to the SSE class.
1426 const llvm::Type *X86_64ABIInfo::
1427 GetSSETypeAtOffset(const llvm::Type *IRType, unsigned IROffset,
1428                    QualType SourceTy, unsigned SourceOffset) const {
1429   // The only three choices we have are either double, <2 x float>, or float. We
1430   // pass as float if the last 4 bytes is just padding.  This happens for
1431   // structs that contain 3 floats.
1432   if (BitsContainNoUserData(SourceTy, SourceOffset*8+32,
1433                             SourceOffset*8+64, getContext()))
1434     return llvm::Type::getFloatTy(getVMContext());
1435 
1436   // We want to pass as <2 x float> if the LLVM IR type contains a float at
1437   // offset+0 and offset+4.  Walk the LLVM IR type to find out if this is the
1438   // case.
1439   if (ContainsFloatAtOffset(IRType, IROffset, getTargetData()) &&
1440       ContainsFloatAtOffset(IRType, IROffset+4, getTargetData()))
1441     return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2);
1442 
1443   return llvm::Type::getDoubleTy(getVMContext());
1444 }
1445 
1446 
1447 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in
1448 /// an 8-byte GPR.  This means that we either have a scalar or we are talking
1449 /// about the high or low part of an up-to-16-byte struct.  This routine picks
1450 /// the best LLVM IR type to represent this, which may be i64 or may be anything
1451 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*,
1452 /// etc).
1453 ///
1454 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for
1455 /// the source type.  IROffset is an offset in bytes into the LLVM IR type that
1456 /// the 8-byte value references.  PrefType may be null.
1457 ///
1458 /// SourceTy is the source level type for the entire argument.  SourceOffset is
1459 /// an offset into this that we're processing (which is always either 0 or 8).
1460 ///
1461 const llvm::Type *X86_64ABIInfo::
1462 GetINTEGERTypeAtOffset(const llvm::Type *IRType, unsigned IROffset,
1463                        QualType SourceTy, unsigned SourceOffset) const {
1464   // If we're dealing with an un-offset LLVM IR type, then it means that we're
1465   // returning an 8-byte unit starting with it.  See if we can safely use it.
1466   if (IROffset == 0) {
1467     // Pointers and int64's always fill the 8-byte unit.
1468     if (isa<llvm::PointerType>(IRType) || IRType->isIntegerTy(64))
1469       return IRType;
1470 
1471     // If we have a 1/2/4-byte integer, we can use it only if the rest of the
1472     // goodness in the source type is just tail padding.  This is allowed to
1473     // kick in for struct {double,int} on the int, but not on
1474     // struct{double,int,int} because we wouldn't return the second int.  We
1475     // have to do this analysis on the source type because we can't depend on
1476     // unions being lowered a specific way etc.
1477     if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
1478         IRType->isIntegerTy(32)) {
1479       unsigned BitWidth = cast<llvm::IntegerType>(IRType)->getBitWidth();
1480 
1481       if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,
1482                                 SourceOffset*8+64, getContext()))
1483         return IRType;
1484     }
1485   }
1486 
1487   if (const llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
1488     // If this is a struct, recurse into the field at the specified offset.
1489     const llvm::StructLayout *SL = getTargetData().getStructLayout(STy);
1490     if (IROffset < SL->getSizeInBytes()) {
1491       unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
1492       IROffset -= SL->getElementOffset(FieldIdx);
1493 
1494       return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
1495                                     SourceTy, SourceOffset);
1496     }
1497   }
1498 
1499   if (const llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
1500     const llvm::Type *EltTy = ATy->getElementType();
1501     unsigned EltSize = getTargetData().getTypeAllocSize(EltTy);
1502     unsigned EltOffset = IROffset/EltSize*EltSize;
1503     return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
1504                                   SourceOffset);
1505   }
1506 
1507   // Okay, we don't have any better idea of what to pass, so we pass this in an
1508   // integer register that isn't too big to fit the rest of the struct.
1509   unsigned TySizeInBytes =
1510     (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
1511 
1512   assert(TySizeInBytes != SourceOffset && "Empty field?");
1513 
1514   // It is always safe to classify this as an integer type up to i64 that
1515   // isn't larger than the structure.
1516   return llvm::IntegerType::get(getVMContext(),
1517                                 std::min(TySizeInBytes-SourceOffset, 8U)*8);
1518 }
1519 
1520 
1521 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally
1522 /// be used as elements of a two register pair to pass or return, return a
1523 /// first class aggregate to represent them.  For example, if the low part of
1524 /// a by-value argument should be passed as i32* and the high part as float,
1525 /// return {i32*, float}.
1526 static const llvm::Type *
1527 GetX86_64ByValArgumentPair(const llvm::Type *Lo, const llvm::Type *Hi,
1528                            const llvm::TargetData &TD) {
1529   // In order to correctly satisfy the ABI, we need to the high part to start
1530   // at offset 8.  If the high and low parts we inferred are both 4-byte types
1531   // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have
1532   // the second element at offset 8.  Check for this:
1533   unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
1534   unsigned HiAlign = TD.getABITypeAlignment(Hi);
1535   unsigned HiStart = llvm::TargetData::RoundUpAlignment(LoSize, HiAlign);
1536   assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");
1537 
1538   // To handle this, we have to increase the size of the low part so that the
1539   // second element will start at an 8 byte offset.  We can't increase the size
1540   // of the second element because it might make us access off the end of the
1541   // struct.
1542   if (HiStart != 8) {
1543     // There are only two sorts of types the ABI generation code can produce for
1544     // the low part of a pair that aren't 8 bytes in size: float or i8/i16/i32.
1545     // Promote these to a larger type.
1546     if (Lo->isFloatTy())
1547       Lo = llvm::Type::getDoubleTy(Lo->getContext());
1548     else {
1549       assert(Lo->isIntegerTy() && "Invalid/unknown lo type");
1550       Lo = llvm::Type::getInt64Ty(Lo->getContext());
1551     }
1552   }
1553 
1554   const llvm::StructType *Result =
1555     llvm::StructType::get(Lo->getContext(), Lo, Hi, NULL);
1556 
1557 
1558   // Verify that the second element is at an 8-byte offset.
1559   assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
1560          "Invalid x86-64 argument pair!");
1561   return Result;
1562 }
1563 
1564 ABIArgInfo X86_64ABIInfo::
1565 classifyReturnType(QualType RetTy) const {
1566   // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
1567   // classification algorithm.
1568   X86_64ABIInfo::Class Lo, Hi;
1569   classify(RetTy, 0, Lo, Hi);
1570 
1571   // Check some invariants.
1572   assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
1573   assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
1574 
1575   const llvm::Type *ResType = 0;
1576   switch (Lo) {
1577   case NoClass:
1578     if (Hi == NoClass)
1579       return ABIArgInfo::getIgnore();
1580     // If the low part is just padding, it takes no register, leave ResType
1581     // null.
1582     assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
1583            "Unknown missing lo part");
1584     break;
1585 
1586   case SSEUp:
1587   case X87Up:
1588     assert(0 && "Invalid classification for lo word.");
1589 
1590     // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
1591     // hidden argument.
1592   case Memory:
1593     return getIndirectReturnResult(RetTy);
1594 
1595     // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
1596     // available register of the sequence %rax, %rdx is used.
1597   case Integer:
1598     ResType = GetINTEGERTypeAtOffset(CGT.ConvertTypeRecursive(RetTy), 0,
1599                                      RetTy, 0);
1600 
1601     // If we have a sign or zero extended integer, make sure to return Extend
1602     // so that the parameter gets the right LLVM IR attributes.
1603     if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
1604       // Treat an enum type as its underlying type.
1605       if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
1606         RetTy = EnumTy->getDecl()->getIntegerType();
1607 
1608       if (RetTy->isIntegralOrEnumerationType() &&
1609           RetTy->isPromotableIntegerType())
1610         return ABIArgInfo::getExtend();
1611     }
1612     break;
1613 
1614     // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
1615     // available SSE register of the sequence %xmm0, %xmm1 is used.
1616   case SSE:
1617     ResType = GetSSETypeAtOffset(CGT.ConvertTypeRecursive(RetTy), 0, RetTy, 0);
1618     break;
1619 
1620     // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
1621     // returned on the X87 stack in %st0 as 80-bit x87 number.
1622   case X87:
1623     ResType = llvm::Type::getX86_FP80Ty(getVMContext());
1624     break;
1625 
1626     // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
1627     // part of the value is returned in %st0 and the imaginary part in
1628     // %st1.
1629   case ComplexX87:
1630     assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
1631     ResType = llvm::StructType::get(getVMContext(),
1632                                     llvm::Type::getX86_FP80Ty(getVMContext()),
1633                                     llvm::Type::getX86_FP80Ty(getVMContext()),
1634                                     NULL);
1635     break;
1636   }
1637 
1638   const llvm::Type *HighPart = 0;
1639   switch (Hi) {
1640     // Memory was handled previously and X87 should
1641     // never occur as a hi class.
1642   case Memory:
1643   case X87:
1644     assert(0 && "Invalid classification for hi word.");
1645 
1646   case ComplexX87: // Previously handled.
1647   case NoClass:
1648     break;
1649 
1650   case Integer:
1651     HighPart = GetINTEGERTypeAtOffset(CGT.ConvertTypeRecursive(RetTy),
1652                                       8, RetTy, 8);
1653     if (Lo == NoClass)  // Return HighPart at offset 8 in memory.
1654       return ABIArgInfo::getDirect(HighPart, 8);
1655     break;
1656   case SSE:
1657     HighPart = GetSSETypeAtOffset(CGT.ConvertTypeRecursive(RetTy), 8, RetTy, 8);
1658     if (Lo == NoClass)  // Return HighPart at offset 8 in memory.
1659       return ABIArgInfo::getDirect(HighPart, 8);
1660     break;
1661 
1662     // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
1663     // is passed in the upper half of the last used SSE register.
1664     //
1665     // SSEUP should always be preceeded by SSE, just widen.
1666   case SSEUp:
1667     assert(Lo == SSE && "Unexpected SSEUp classification.");
1668     ResType = Get16ByteVectorType(RetTy);
1669     break;
1670 
1671     // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
1672     // returned together with the previous X87 value in %st0.
1673   case X87Up:
1674     // If X87Up is preceeded by X87, we don't need to do
1675     // anything. However, in some cases with unions it may not be
1676     // preceeded by X87. In such situations we follow gcc and pass the
1677     // extra bits in an SSE reg.
1678     if (Lo != X87) {
1679       HighPart = GetSSETypeAtOffset(CGT.ConvertTypeRecursive(RetTy),
1680                                     8, RetTy, 8);
1681       if (Lo == NoClass)  // Return HighPart at offset 8 in memory.
1682         return ABIArgInfo::getDirect(HighPart, 8);
1683     }
1684     break;
1685   }
1686 
1687   // If a high part was specified, merge it together with the low part.  It is
1688   // known to pass in the high eightbyte of the result.  We do this by forming a
1689   // first class struct aggregate with the high and low part: {low, high}
1690   if (HighPart)
1691     ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getTargetData());
1692 
1693   return ABIArgInfo::getDirect(ResType);
1694 }
1695 
1696 ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt,
1697                                                unsigned &neededSSE) const {
1698   X86_64ABIInfo::Class Lo, Hi;
1699   classify(Ty, 0, Lo, Hi);
1700 
1701   // Check some invariants.
1702   // FIXME: Enforce these by construction.
1703   assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
1704   assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
1705 
1706   neededInt = 0;
1707   neededSSE = 0;
1708   const llvm::Type *ResType = 0;
1709   switch (Lo) {
1710   case NoClass:
1711     if (Hi == NoClass)
1712       return ABIArgInfo::getIgnore();
1713     // If the low part is just padding, it takes no register, leave ResType
1714     // null.
1715     assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
1716            "Unknown missing lo part");
1717     break;
1718 
1719     // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
1720     // on the stack.
1721   case Memory:
1722 
1723     // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
1724     // COMPLEX_X87, it is passed in memory.
1725   case X87:
1726   case ComplexX87:
1727     return getIndirectResult(Ty);
1728 
1729   case SSEUp:
1730   case X87Up:
1731     assert(0 && "Invalid classification for lo word.");
1732 
1733     // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
1734     // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
1735     // and %r9 is used.
1736   case Integer:
1737     ++neededInt;
1738 
1739     // Pick an 8-byte type based on the preferred type.
1740     ResType = GetINTEGERTypeAtOffset(CGT.ConvertTypeRecursive(Ty), 0, Ty, 0);
1741 
1742     // If we have a sign or zero extended integer, make sure to return Extend
1743     // so that the parameter gets the right LLVM IR attributes.
1744     if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
1745       // Treat an enum type as its underlying type.
1746       if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1747         Ty = EnumTy->getDecl()->getIntegerType();
1748 
1749       if (Ty->isIntegralOrEnumerationType() &&
1750           Ty->isPromotableIntegerType())
1751         return ABIArgInfo::getExtend();
1752     }
1753 
1754     break;
1755 
1756     // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
1757     // available SSE register is used, the registers are taken in the
1758     // order from %xmm0 to %xmm7.
1759   case SSE: {
1760     const llvm::Type *IRType = CGT.ConvertTypeRecursive(Ty);
1761     if (Hi != NoClass || !UseX86_MMXType(IRType))
1762       ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
1763     else
1764       // This is an MMX type. Treat it as such.
1765       ResType = llvm::Type::getX86_MMXTy(getVMContext());
1766 
1767     ++neededSSE;
1768     break;
1769   }
1770   }
1771 
1772   const llvm::Type *HighPart = 0;
1773   switch (Hi) {
1774     // Memory was handled previously, ComplexX87 and X87 should
1775     // never occur as hi classes, and X87Up must be preceed by X87,
1776     // which is passed in memory.
1777   case Memory:
1778   case X87:
1779   case ComplexX87:
1780     assert(0 && "Invalid classification for hi word.");
1781     break;
1782 
1783   case NoClass: break;
1784 
1785   case Integer:
1786     ++neededInt;
1787     // Pick an 8-byte type based on the preferred type.
1788     HighPart = GetINTEGERTypeAtOffset(CGT.ConvertTypeRecursive(Ty), 8, Ty, 8);
1789 
1790     if (Lo == NoClass)  // Pass HighPart at offset 8 in memory.
1791       return ABIArgInfo::getDirect(HighPart, 8);
1792     break;
1793 
1794     // X87Up generally doesn't occur here (long double is passed in
1795     // memory), except in situations involving unions.
1796   case X87Up:
1797   case SSE:
1798     HighPart = GetSSETypeAtOffset(CGT.ConvertTypeRecursive(Ty), 8, Ty, 8);
1799 
1800     if (Lo == NoClass)  // Pass HighPart at offset 8 in memory.
1801       return ABIArgInfo::getDirect(HighPart, 8);
1802 
1803     ++neededSSE;
1804     break;
1805 
1806     // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
1807     // eightbyte is passed in the upper half of the last used SSE
1808     // register.  This only happens when 128-bit vectors are passed.
1809   case SSEUp:
1810     assert(Lo == SSE && "Unexpected SSEUp classification");
1811     ResType = Get16ByteVectorType(Ty);
1812     break;
1813   }
1814 
1815   // If a high part was specified, merge it together with the low part.  It is
1816   // known to pass in the high eightbyte of the result.  We do this by forming a
1817   // first class struct aggregate with the high and low part: {low, high}
1818   if (HighPart)
1819     ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getTargetData());
1820 
1821   return ABIArgInfo::getDirect(ResType);
1822 }
1823 
1824 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
1825 
1826   FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
1827 
1828   // Keep track of the number of assigned registers.
1829   unsigned freeIntRegs = 6, freeSSERegs = 8;
1830 
1831   // If the return value is indirect, then the hidden argument is consuming one
1832   // integer register.
1833   if (FI.getReturnInfo().isIndirect())
1834     --freeIntRegs;
1835 
1836   // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
1837   // get assigned (in left-to-right order) for passing as follows...
1838   for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
1839        it != ie; ++it) {
1840     unsigned neededInt, neededSSE;
1841     it->info = classifyArgumentType(it->type, neededInt, neededSSE);
1842 
1843     // AMD64-ABI 3.2.3p3: If there are no registers available for any
1844     // eightbyte of an argument, the whole argument is passed on the
1845     // stack. If registers have already been assigned for some
1846     // eightbytes of such an argument, the assignments get reverted.
1847     if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
1848       freeIntRegs -= neededInt;
1849       freeSSERegs -= neededSSE;
1850     } else {
1851       it->info = getIndirectResult(it->type);
1852     }
1853   }
1854 }
1855 
1856 static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
1857                                         QualType Ty,
1858                                         CodeGenFunction &CGF) {
1859   llvm::Value *overflow_arg_area_p =
1860     CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
1861   llvm::Value *overflow_arg_area =
1862     CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
1863 
1864   // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
1865   // byte boundary if alignment needed by type exceeds 8 byte boundary.
1866   uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
1867   if (Align > 8) {
1868     // Note that we follow the ABI & gcc here, even though the type
1869     // could in theory have an alignment greater than 16. This case
1870     // shouldn't ever matter in practice.
1871 
1872     // overflow_arg_area = (overflow_arg_area + 15) & ~15;
1873     llvm::Value *Offset =
1874       llvm::ConstantInt::get(CGF.Int32Ty, 15);
1875     overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset);
1876     llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area,
1877                                                     CGF.Int64Ty);
1878     llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, ~15LL);
1879     overflow_arg_area =
1880       CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
1881                                  overflow_arg_area->getType(),
1882                                  "overflow_arg_area.align");
1883   }
1884 
1885   // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
1886   const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
1887   llvm::Value *Res =
1888     CGF.Builder.CreateBitCast(overflow_arg_area,
1889                               llvm::PointerType::getUnqual(LTy));
1890 
1891   // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
1892   // l->overflow_arg_area + sizeof(type).
1893   // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
1894   // an 8 byte boundary.
1895 
1896   uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
1897   llvm::Value *Offset =
1898       llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7)  & ~7);
1899   overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
1900                                             "overflow_arg_area.next");
1901   CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
1902 
1903   // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
1904   return Res;
1905 }
1906 
1907 llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1908                                       CodeGenFunction &CGF) const {
1909   llvm::LLVMContext &VMContext = CGF.getLLVMContext();
1910 
1911   // Assume that va_list type is correct; should be pointer to LLVM type:
1912   // struct {
1913   //   i32 gp_offset;
1914   //   i32 fp_offset;
1915   //   i8* overflow_arg_area;
1916   //   i8* reg_save_area;
1917   // };
1918   unsigned neededInt, neededSSE;
1919 
1920   Ty = CGF.getContext().getCanonicalType(Ty);
1921   ABIArgInfo AI = classifyArgumentType(Ty, neededInt, neededSSE);
1922 
1923   // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
1924   // in the registers. If not go to step 7.
1925   if (!neededInt && !neededSSE)
1926     return EmitVAArgFromMemory(VAListAddr, Ty, CGF);
1927 
1928   // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
1929   // general purpose registers needed to pass type and num_fp to hold
1930   // the number of floating point registers needed.
1931 
1932   // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
1933   // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
1934   // l->fp_offset > 304 - num_fp * 16 go to step 7.
1935   //
1936   // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
1937   // register save space).
1938 
1939   llvm::Value *InRegs = 0;
1940   llvm::Value *gp_offset_p = 0, *gp_offset = 0;
1941   llvm::Value *fp_offset_p = 0, *fp_offset = 0;
1942   if (neededInt) {
1943     gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
1944     gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
1945     InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
1946     InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
1947   }
1948 
1949   if (neededSSE) {
1950     fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
1951     fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
1952     llvm::Value *FitsInFP =
1953       llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
1954     FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");
1955     InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
1956   }
1957 
1958   llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
1959   llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
1960   llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
1961   CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
1962 
1963   // Emit code to load the value if it was passed in registers.
1964 
1965   CGF.EmitBlock(InRegBlock);
1966 
1967   // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
1968   // an offset of l->gp_offset and/or l->fp_offset. This may require
1969   // copying to a temporary location in case the parameter is passed
1970   // in different register classes or requires an alignment greater
1971   // than 8 for general purpose registers and 16 for XMM registers.
1972   //
1973   // FIXME: This really results in shameful code when we end up needing to
1974   // collect arguments from different places; often what should result in a
1975   // simple assembling of a structure from scattered addresses has many more
1976   // loads than necessary. Can we clean this up?
1977   const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
1978   llvm::Value *RegAddr =
1979     CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3),
1980                            "reg_save_area");
1981   if (neededInt && neededSSE) {
1982     // FIXME: Cleanup.
1983     assert(AI.isDirect() && "Unexpected ABI info for mixed regs");
1984     const llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
1985     llvm::Value *Tmp = CGF.CreateTempAlloca(ST);
1986     assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
1987     const llvm::Type *TyLo = ST->getElementType(0);
1988     const llvm::Type *TyHi = ST->getElementType(1);
1989     assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
1990            "Unexpected ABI info for mixed regs");
1991     const llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
1992     const llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
1993     llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
1994     llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
1995     llvm::Value *RegLoAddr = TyLo->isFloatingPointTy() ? FPAddr : GPAddr;
1996     llvm::Value *RegHiAddr = TyLo->isFloatingPointTy() ? GPAddr : FPAddr;
1997     llvm::Value *V =
1998       CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo));
1999     CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
2000     V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
2001     CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
2002 
2003     RegAddr = CGF.Builder.CreateBitCast(Tmp,
2004                                         llvm::PointerType::getUnqual(LTy));
2005   } else if (neededInt) {
2006     RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
2007     RegAddr = CGF.Builder.CreateBitCast(RegAddr,
2008                                         llvm::PointerType::getUnqual(LTy));
2009   } else if (neededSSE == 1) {
2010     RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
2011     RegAddr = CGF.Builder.CreateBitCast(RegAddr,
2012                                         llvm::PointerType::getUnqual(LTy));
2013   } else {
2014     assert(neededSSE == 2 && "Invalid number of needed registers!");
2015     // SSE registers are spaced 16 bytes apart in the register save
2016     // area, we need to collect the two eightbytes together.
2017     llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
2018     llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16);
2019     const llvm::Type *DoubleTy = llvm::Type::getDoubleTy(VMContext);
2020     const llvm::Type *DblPtrTy =
2021       llvm::PointerType::getUnqual(DoubleTy);
2022     const llvm::StructType *ST = llvm::StructType::get(VMContext, DoubleTy,
2023                                                        DoubleTy, NULL);
2024     llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST);
2025     V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
2026                                                          DblPtrTy));
2027     CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
2028     V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi,
2029                                                          DblPtrTy));
2030     CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
2031     RegAddr = CGF.Builder.CreateBitCast(Tmp,
2032                                         llvm::PointerType::getUnqual(LTy));
2033   }
2034 
2035   // AMD64-ABI 3.5.7p5: Step 5. Set:
2036   // l->gp_offset = l->gp_offset + num_gp * 8
2037   // l->fp_offset = l->fp_offset + num_fp * 16.
2038   if (neededInt) {
2039     llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);
2040     CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
2041                             gp_offset_p);
2042   }
2043   if (neededSSE) {
2044     llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);
2045     CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
2046                             fp_offset_p);
2047   }
2048   CGF.EmitBranch(ContBlock);
2049 
2050   // Emit code to load the value if it was passed in memory.
2051 
2052   CGF.EmitBlock(InMemBlock);
2053   llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF);
2054 
2055   // Return the appropriate result.
2056 
2057   CGF.EmitBlock(ContBlock);
2058   llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(),
2059                                                  "vaarg.addr");
2060   ResAddr->reserveOperandSpace(2);
2061   ResAddr->addIncoming(RegAddr, InRegBlock);
2062   ResAddr->addIncoming(MemAddr, InMemBlock);
2063   return ResAddr;
2064 }
2065 
2066 llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2067                                       CodeGenFunction &CGF) const {
2068   const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
2069   const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
2070 
2071   CGBuilderTy &Builder = CGF.Builder;
2072   llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
2073                                                        "ap");
2074   llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
2075   llvm::Type *PTy =
2076     llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
2077   llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
2078 
2079   uint64_t Offset =
2080     llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8);
2081   llvm::Value *NextAddr =
2082     Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
2083                       "ap.next");
2084   Builder.CreateStore(NextAddr, VAListAddrAsBPP);
2085 
2086   return AddrTyped;
2087 }
2088 
2089 // PowerPC-32
2090 
2091 namespace {
2092 class PPC32TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
2093 public:
2094   PPC32TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
2095 
2096   int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
2097     // This is recovered from gcc output.
2098     return 1; // r1 is the dedicated stack pointer
2099   }
2100 
2101   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2102                                llvm::Value *Address) const;
2103 };
2104 
2105 }
2106 
2107 bool
2108 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2109                                                 llvm::Value *Address) const {
2110   // This is calculated from the LLVM and GCC tables and verified
2111   // against gcc output.  AFAIK all ABIs use the same encoding.
2112 
2113   CodeGen::CGBuilderTy &Builder = CGF.Builder;
2114   llvm::LLVMContext &Context = CGF.getLLVMContext();
2115 
2116   const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
2117   llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
2118   llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
2119   llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
2120 
2121   // 0-31: r0-31, the 4-byte general-purpose registers
2122   AssignToArrayRange(Builder, Address, Four8, 0, 31);
2123 
2124   // 32-63: fp0-31, the 8-byte floating-point registers
2125   AssignToArrayRange(Builder, Address, Eight8, 32, 63);
2126 
2127   // 64-76 are various 4-byte special-purpose registers:
2128   // 64: mq
2129   // 65: lr
2130   // 66: ctr
2131   // 67: ap
2132   // 68-75 cr0-7
2133   // 76: xer
2134   AssignToArrayRange(Builder, Address, Four8, 64, 76);
2135 
2136   // 77-108: v0-31, the 16-byte vector registers
2137   AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
2138 
2139   // 109: vrsave
2140   // 110: vscr
2141   // 111: spe_acc
2142   // 112: spefscr
2143   // 113: sfp
2144   AssignToArrayRange(Builder, Address, Four8, 109, 113);
2145 
2146   return false;
2147 }
2148 
2149 
2150 //===----------------------------------------------------------------------===//
2151 // ARM ABI Implementation
2152 //===----------------------------------------------------------------------===//
2153 
2154 namespace {
2155 
2156 class ARMABIInfo : public ABIInfo {
2157 public:
2158   enum ABIKind {
2159     APCS = 0,
2160     AAPCS = 1,
2161     AAPCS_VFP
2162   };
2163 
2164 private:
2165   ABIKind Kind;
2166 
2167 public:
2168   ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind) {}
2169 
2170 private:
2171   ABIKind getABIKind() const { return Kind; }
2172 
2173   ABIArgInfo classifyReturnType(QualType RetTy) const;
2174   ABIArgInfo classifyArgumentType(QualType RetTy) const;
2175 
2176   virtual void computeInfo(CGFunctionInfo &FI) const;
2177 
2178   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2179                                  CodeGenFunction &CGF) const;
2180 };
2181 
2182 class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
2183 public:
2184   ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
2185     :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {}
2186 
2187   int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
2188     return 13;
2189   }
2190 };
2191 
2192 }
2193 
2194 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
2195   FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
2196   for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
2197        it != ie; ++it)
2198     it->info = classifyArgumentType(it->type);
2199 
2200   const llvm::Triple &Triple(getContext().Target.getTriple());
2201   llvm::CallingConv::ID DefaultCC;
2202   if (Triple.getEnvironmentName() == "gnueabi" ||
2203       Triple.getEnvironmentName() == "eabi")
2204     DefaultCC = llvm::CallingConv::ARM_AAPCS;
2205   else
2206     DefaultCC = llvm::CallingConv::ARM_APCS;
2207 
2208   switch (getABIKind()) {
2209   case APCS:
2210     if (DefaultCC != llvm::CallingConv::ARM_APCS)
2211       FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_APCS);
2212     break;
2213 
2214   case AAPCS:
2215     if (DefaultCC != llvm::CallingConv::ARM_AAPCS)
2216       FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS);
2217     break;
2218 
2219   case AAPCS_VFP:
2220     FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS_VFP);
2221     break;
2222   }
2223 }
2224 
2225 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty) const {
2226   if (!isAggregateTypeForABI(Ty)) {
2227     // Treat an enum type as its underlying type.
2228     if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2229       Ty = EnumTy->getDecl()->getIntegerType();
2230 
2231     return (Ty->isPromotableIntegerType() ?
2232             ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2233   }
2234 
2235   // Ignore empty records.
2236   if (isEmptyRecord(getContext(), Ty, true))
2237     return ABIArgInfo::getIgnore();
2238 
2239   // Structures with either a non-trivial destructor or a non-trivial
2240   // copy constructor are always indirect.
2241   if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
2242     return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
2243 
2244   // Otherwise, pass by coercing to a structure of the appropriate size.
2245   //
2246   // FIXME: This is kind of nasty... but there isn't much choice because the ARM
2247   // backend doesn't support byval.
2248   // FIXME: This doesn't handle alignment > 64 bits.
2249   const llvm::Type* ElemTy;
2250   unsigned SizeRegs;
2251   if (getContext().getTypeAlign(Ty) > 32) {
2252     ElemTy = llvm::Type::getInt64Ty(getVMContext());
2253     SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
2254   } else {
2255     ElemTy = llvm::Type::getInt32Ty(getVMContext());
2256     SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
2257   }
2258   std::vector<const llvm::Type*> LLVMFields;
2259   LLVMFields.push_back(llvm::ArrayType::get(ElemTy, SizeRegs));
2260   const llvm::Type* STy = llvm::StructType::get(getVMContext(), LLVMFields,
2261                                                 true);
2262   return ABIArgInfo::getDirect(STy);
2263 }
2264 
2265 static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
2266                               llvm::LLVMContext &VMContext) {
2267   // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
2268   // is called integer-like if its size is less than or equal to one word, and
2269   // the offset of each of its addressable sub-fields is zero.
2270 
2271   uint64_t Size = Context.getTypeSize(Ty);
2272 
2273   // Check that the type fits in a word.
2274   if (Size > 32)
2275     return false;
2276 
2277   // FIXME: Handle vector types!
2278   if (Ty->isVectorType())
2279     return false;
2280 
2281   // Float types are never treated as "integer like".
2282   if (Ty->isRealFloatingType())
2283     return false;
2284 
2285   // If this is a builtin or pointer type then it is ok.
2286   if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
2287     return true;
2288 
2289   // Small complex integer types are "integer like".
2290   if (const ComplexType *CT = Ty->getAs<ComplexType>())
2291     return isIntegerLikeType(CT->getElementType(), Context, VMContext);
2292 
2293   // Single element and zero sized arrays should be allowed, by the definition
2294   // above, but they are not.
2295 
2296   // Otherwise, it must be a record type.
2297   const RecordType *RT = Ty->getAs<RecordType>();
2298   if (!RT) return false;
2299 
2300   // Ignore records with flexible arrays.
2301   const RecordDecl *RD = RT->getDecl();
2302   if (RD->hasFlexibleArrayMember())
2303     return false;
2304 
2305   // Check that all sub-fields are at offset 0, and are themselves "integer
2306   // like".
2307   const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
2308 
2309   bool HadField = false;
2310   unsigned idx = 0;
2311   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
2312        i != e; ++i, ++idx) {
2313     const FieldDecl *FD = *i;
2314 
2315     // Bit-fields are not addressable, we only need to verify they are "integer
2316     // like". We still have to disallow a subsequent non-bitfield, for example:
2317     //   struct { int : 0; int x }
2318     // is non-integer like according to gcc.
2319     if (FD->isBitField()) {
2320       if (!RD->isUnion())
2321         HadField = true;
2322 
2323       if (!isIntegerLikeType(FD->getType(), Context, VMContext))
2324         return false;
2325 
2326       continue;
2327     }
2328 
2329     // Check if this field is at offset 0.
2330     if (Layout.getFieldOffset(idx) != 0)
2331       return false;
2332 
2333     if (!isIntegerLikeType(FD->getType(), Context, VMContext))
2334       return false;
2335 
2336     // Only allow at most one field in a structure. This doesn't match the
2337     // wording above, but follows gcc in situations with a field following an
2338     // empty structure.
2339     if (!RD->isUnion()) {
2340       if (HadField)
2341         return false;
2342 
2343       HadField = true;
2344     }
2345   }
2346 
2347   return true;
2348 }
2349 
2350 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy) const {
2351   if (RetTy->isVoidType())
2352     return ABIArgInfo::getIgnore();
2353 
2354   // Large vector types should be returned via memory.
2355   if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
2356     return ABIArgInfo::getIndirect(0);
2357 
2358   if (!isAggregateTypeForABI(RetTy)) {
2359     // Treat an enum type as its underlying type.
2360     if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
2361       RetTy = EnumTy->getDecl()->getIntegerType();
2362 
2363     return (RetTy->isPromotableIntegerType() ?
2364             ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2365   }
2366 
2367   // Structures with either a non-trivial destructor or a non-trivial
2368   // copy constructor are always indirect.
2369   if (isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy))
2370     return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
2371 
2372   // Are we following APCS?
2373   if (getABIKind() == APCS) {
2374     if (isEmptyRecord(getContext(), RetTy, false))
2375       return ABIArgInfo::getIgnore();
2376 
2377     // Complex types are all returned as packed integers.
2378     //
2379     // FIXME: Consider using 2 x vector types if the back end handles them
2380     // correctly.
2381     if (RetTy->isAnyComplexType())
2382       return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
2383                                               getContext().getTypeSize(RetTy)));
2384 
2385     // Integer like structures are returned in r0.
2386     if (isIntegerLikeType(RetTy, getContext(), getVMContext())) {
2387       // Return in the smallest viable integer type.
2388       uint64_t Size = getContext().getTypeSize(RetTy);
2389       if (Size <= 8)
2390         return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
2391       if (Size <= 16)
2392         return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
2393       return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
2394     }
2395 
2396     // Otherwise return in memory.
2397     return ABIArgInfo::getIndirect(0);
2398   }
2399 
2400   // Otherwise this is an AAPCS variant.
2401 
2402   if (isEmptyRecord(getContext(), RetTy, true))
2403     return ABIArgInfo::getIgnore();
2404 
2405   // Aggregates <= 4 bytes are returned in r0; other aggregates
2406   // are returned indirectly.
2407   uint64_t Size = getContext().getTypeSize(RetTy);
2408   if (Size <= 32) {
2409     // Return in the smallest viable integer type.
2410     if (Size <= 8)
2411       return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
2412     if (Size <= 16)
2413       return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
2414     return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
2415   }
2416 
2417   return ABIArgInfo::getIndirect(0);
2418 }
2419 
2420 llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2421                                    CodeGenFunction &CGF) const {
2422   // FIXME: Need to handle alignment
2423   const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
2424   const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
2425 
2426   CGBuilderTy &Builder = CGF.Builder;
2427   llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
2428                                                        "ap");
2429   llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
2430   llvm::Type *PTy =
2431     llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
2432   llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
2433 
2434   uint64_t Offset =
2435     llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
2436   llvm::Value *NextAddr =
2437     Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
2438                       "ap.next");
2439   Builder.CreateStore(NextAddr, VAListAddrAsBPP);
2440 
2441   return AddrTyped;
2442 }
2443 
2444 //===----------------------------------------------------------------------===//
2445 // SystemZ ABI Implementation
2446 //===----------------------------------------------------------------------===//
2447 
2448 namespace {
2449 
2450 class SystemZABIInfo : public ABIInfo {
2451 public:
2452   SystemZABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
2453 
2454   bool isPromotableIntegerType(QualType Ty) const;
2455 
2456   ABIArgInfo classifyReturnType(QualType RetTy) const;
2457   ABIArgInfo classifyArgumentType(QualType RetTy) const;
2458 
2459   virtual void computeInfo(CGFunctionInfo &FI) const {
2460     FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
2461     for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
2462          it != ie; ++it)
2463       it->info = classifyArgumentType(it->type);
2464   }
2465 
2466   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2467                                  CodeGenFunction &CGF) const;
2468 };
2469 
2470 class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
2471 public:
2472   SystemZTargetCodeGenInfo(CodeGenTypes &CGT)
2473     : TargetCodeGenInfo(new SystemZABIInfo(CGT)) {}
2474 };
2475 
2476 }
2477 
2478 bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const {
2479   // SystemZ ABI requires all 8, 16 and 32 bit quantities to be extended.
2480   if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
2481     switch (BT->getKind()) {
2482     case BuiltinType::Bool:
2483     case BuiltinType::Char_S:
2484     case BuiltinType::Char_U:
2485     case BuiltinType::SChar:
2486     case BuiltinType::UChar:
2487     case BuiltinType::Short:
2488     case BuiltinType::UShort:
2489     case BuiltinType::Int:
2490     case BuiltinType::UInt:
2491       return true;
2492     default:
2493       return false;
2494     }
2495   return false;
2496 }
2497 
2498 llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2499                                        CodeGenFunction &CGF) const {
2500   // FIXME: Implement
2501   return 0;
2502 }
2503 
2504 
2505 ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
2506   if (RetTy->isVoidType())
2507     return ABIArgInfo::getIgnore();
2508   if (isAggregateTypeForABI(RetTy))
2509     return ABIArgInfo::getIndirect(0);
2510 
2511   return (isPromotableIntegerType(RetTy) ?
2512           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2513 }
2514 
2515 ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
2516   if (isAggregateTypeForABI(Ty))
2517     return ABIArgInfo::getIndirect(0);
2518 
2519   return (isPromotableIntegerType(Ty) ?
2520           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2521 }
2522 
2523 //===----------------------------------------------------------------------===//
2524 // MBlaze ABI Implementation
2525 //===----------------------------------------------------------------------===//
2526 
2527 namespace {
2528 
2529 class MBlazeABIInfo : public ABIInfo {
2530 public:
2531   MBlazeABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
2532 
2533   bool isPromotableIntegerType(QualType Ty) const;
2534 
2535   ABIArgInfo classifyReturnType(QualType RetTy) const;
2536   ABIArgInfo classifyArgumentType(QualType RetTy) const;
2537 
2538   virtual void computeInfo(CGFunctionInfo &FI) const {
2539     FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
2540     for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
2541          it != ie; ++it)
2542       it->info = classifyArgumentType(it->type);
2543   }
2544 
2545   virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2546                                  CodeGenFunction &CGF) const;
2547 };
2548 
2549 class MBlazeTargetCodeGenInfo : public TargetCodeGenInfo {
2550 public:
2551   MBlazeTargetCodeGenInfo(CodeGenTypes &CGT)
2552     : TargetCodeGenInfo(new MBlazeABIInfo(CGT)) {}
2553   void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2554                            CodeGen::CodeGenModule &M) const;
2555 };
2556 
2557 }
2558 
2559 bool MBlazeABIInfo::isPromotableIntegerType(QualType Ty) const {
2560   // MBlaze ABI requires all 8 and 16 bit quantities to be extended.
2561   if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
2562     switch (BT->getKind()) {
2563     case BuiltinType::Bool:
2564     case BuiltinType::Char_S:
2565     case BuiltinType::Char_U:
2566     case BuiltinType::SChar:
2567     case BuiltinType::UChar:
2568     case BuiltinType::Short:
2569     case BuiltinType::UShort:
2570       return true;
2571     default:
2572       return false;
2573     }
2574   return false;
2575 }
2576 
2577 llvm::Value *MBlazeABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2578                                       CodeGenFunction &CGF) const {
2579   // FIXME: Implement
2580   return 0;
2581 }
2582 
2583 
2584 ABIArgInfo MBlazeABIInfo::classifyReturnType(QualType RetTy) const {
2585   if (RetTy->isVoidType())
2586     return ABIArgInfo::getIgnore();
2587   if (isAggregateTypeForABI(RetTy))
2588     return ABIArgInfo::getIndirect(0);
2589 
2590   return (isPromotableIntegerType(RetTy) ?
2591           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2592 }
2593 
2594 ABIArgInfo MBlazeABIInfo::classifyArgumentType(QualType Ty) const {
2595   if (isAggregateTypeForABI(Ty))
2596     return ABIArgInfo::getIndirect(0);
2597 
2598   return (isPromotableIntegerType(Ty) ?
2599           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2600 }
2601 
2602 void MBlazeTargetCodeGenInfo::SetTargetAttributes(const Decl *D,
2603                                                   llvm::GlobalValue *GV,
2604                                                   CodeGen::CodeGenModule &M)
2605                                                   const {
2606   const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
2607   if (!FD) return;
2608 
2609   llvm::CallingConv::ID CC = llvm::CallingConv::C;
2610   if (FD->hasAttr<MBlazeInterruptHandlerAttr>())
2611     CC = llvm::CallingConv::MBLAZE_INTR;
2612   else if (FD->hasAttr<MBlazeSaveVolatilesAttr>())
2613     CC = llvm::CallingConv::MBLAZE_SVOL;
2614 
2615   if (CC != llvm::CallingConv::C) {
2616       // Handle 'interrupt_handler' attribute:
2617       llvm::Function *F = cast<llvm::Function>(GV);
2618 
2619       // Step 1: Set ISR calling convention.
2620       F->setCallingConv(CC);
2621 
2622       // Step 2: Add attributes goodness.
2623       F->addFnAttr(llvm::Attribute::NoInline);
2624   }
2625 
2626   // Step 3: Emit _interrupt_handler alias.
2627   if (CC == llvm::CallingConv::MBLAZE_INTR)
2628     new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage,
2629                           "_interrupt_handler", GV, &M.getModule());
2630 }
2631 
2632 
2633 //===----------------------------------------------------------------------===//
2634 // MSP430 ABI Implementation
2635 //===----------------------------------------------------------------------===//
2636 
2637 namespace {
2638 
2639 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
2640 public:
2641   MSP430TargetCodeGenInfo(CodeGenTypes &CGT)
2642     : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
2643   void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2644                            CodeGen::CodeGenModule &M) const;
2645 };
2646 
2647 }
2648 
2649 void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
2650                                                   llvm::GlobalValue *GV,
2651                                              CodeGen::CodeGenModule &M) const {
2652   if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
2653     if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) {
2654       // Handle 'interrupt' attribute:
2655       llvm::Function *F = cast<llvm::Function>(GV);
2656 
2657       // Step 1: Set ISR calling convention.
2658       F->setCallingConv(llvm::CallingConv::MSP430_INTR);
2659 
2660       // Step 2: Add attributes goodness.
2661       F->addFnAttr(llvm::Attribute::NoInline);
2662 
2663       // Step 3: Emit ISR vector alias.
2664       unsigned Num = attr->getNumber() + 0xffe0;
2665       new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage,
2666                             "vector_" + llvm::Twine::utohexstr(Num),
2667                             GV, &M.getModule());
2668     }
2669   }
2670 }
2671 
2672 //===----------------------------------------------------------------------===//
2673 // MIPS ABI Implementation.  This works for both little-endian and
2674 // big-endian variants.
2675 //===----------------------------------------------------------------------===//
2676 
2677 namespace {
2678 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
2679 public:
2680   MIPSTargetCodeGenInfo(CodeGenTypes &CGT)
2681     : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
2682 
2683   int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
2684     return 29;
2685   }
2686 
2687   bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2688                                llvm::Value *Address) const;
2689 };
2690 }
2691 
2692 bool
2693 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2694                                                llvm::Value *Address) const {
2695   // This information comes from gcc's implementation, which seems to
2696   // as canonical as it gets.
2697 
2698   CodeGen::CGBuilderTy &Builder = CGF.Builder;
2699   llvm::LLVMContext &Context = CGF.getLLVMContext();
2700 
2701   // Everything on MIPS is 4 bytes.  Double-precision FP registers
2702   // are aliased to pairs of single-precision FP registers.
2703   const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
2704   llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
2705 
2706   // 0-31 are the general purpose registers, $0 - $31.
2707   // 32-63 are the floating-point registers, $f0 - $f31.
2708   // 64 and 65 are the multiply/divide registers, $hi and $lo.
2709   // 66 is the (notional, I think) register for signal-handler return.
2710   AssignToArrayRange(Builder, Address, Four8, 0, 65);
2711 
2712   // 67-74 are the floating-point status registers, $fcc0 - $fcc7.
2713   // They are one bit wide and ignored here.
2714 
2715   // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31.
2716   // (coprocessor 1 is the FP unit)
2717   // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31.
2718   // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31.
2719   // 176-181 are the DSP accumulator registers.
2720   AssignToArrayRange(Builder, Address, Four8, 80, 181);
2721 
2722   return false;
2723 }
2724 
2725 
2726 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
2727   if (TheTargetCodeGenInfo)
2728     return *TheTargetCodeGenInfo;
2729 
2730   // For now we just cache the TargetCodeGenInfo in CodeGenModule and don't
2731   // free it.
2732 
2733   const llvm::Triple &Triple = getContext().Target.getTriple();
2734   switch (Triple.getArch()) {
2735   default:
2736     return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types));
2737 
2738   case llvm::Triple::mips:
2739   case llvm::Triple::mipsel:
2740     return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types));
2741 
2742   case llvm::Triple::arm:
2743   case llvm::Triple::thumb:
2744     // FIXME: We want to know the float calling convention as well.
2745     if (strcmp(getContext().Target.getABI(), "apcs-gnu") == 0)
2746       return *(TheTargetCodeGenInfo =
2747                new ARMTargetCodeGenInfo(Types, ARMABIInfo::APCS));
2748 
2749     return *(TheTargetCodeGenInfo =
2750              new ARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS));
2751 
2752   case llvm::Triple::ppc:
2753     return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types));
2754 
2755   case llvm::Triple::systemz:
2756     return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo(Types));
2757 
2758   case llvm::Triple::mblaze:
2759     return *(TheTargetCodeGenInfo = new MBlazeTargetCodeGenInfo(Types));
2760 
2761   case llvm::Triple::msp430:
2762     return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types));
2763 
2764   case llvm::Triple::x86:
2765     switch (Triple.getOS()) {
2766     case llvm::Triple::Darwin:
2767       return *(TheTargetCodeGenInfo =
2768                new X86_32TargetCodeGenInfo(Types, true, true));
2769     case llvm::Triple::Cygwin:
2770     case llvm::Triple::MinGW32:
2771     case llvm::Triple::AuroraUX:
2772     case llvm::Triple::DragonFly:
2773     case llvm::Triple::FreeBSD:
2774     case llvm::Triple::OpenBSD:
2775       return *(TheTargetCodeGenInfo =
2776                new X86_32TargetCodeGenInfo(Types, false, true));
2777 
2778     default:
2779       return *(TheTargetCodeGenInfo =
2780                new X86_32TargetCodeGenInfo(Types, false, false));
2781     }
2782 
2783   case llvm::Triple::x86_64:
2784     switch (Triple.getOS()) {
2785     case llvm::Triple::Win32:
2786     case llvm::Triple::MinGW64:
2787     case llvm::Triple::Cygwin:
2788       return *(TheTargetCodeGenInfo = new WinX86_64TargetCodeGenInfo(Types));
2789     default:
2790       return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo(Types));
2791     }
2792   }
2793 }
2794