1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // These classes wrap the information about a call or function 11 // definition used to handle ABI compliancy. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "TargetInfo.h" 16 #include "ABIInfo.h" 17 #include "CodeGenFunction.h" 18 #include "clang/AST/RecordLayout.h" 19 #include "clang/Frontend/CodeGenOptions.h" 20 #include "llvm/Type.h" 21 #include "llvm/Target/TargetData.h" 22 #include "llvm/ADT/Triple.h" 23 #include "llvm/Support/raw_ostream.h" 24 using namespace clang; 25 using namespace CodeGen; 26 27 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, 28 llvm::Value *Array, 29 llvm::Value *Value, 30 unsigned FirstIndex, 31 unsigned LastIndex) { 32 // Alternatively, we could emit this as a loop in the source. 33 for (unsigned I = FirstIndex; I <= LastIndex; ++I) { 34 llvm::Value *Cell = Builder.CreateConstInBoundsGEP1_32(Array, I); 35 Builder.CreateStore(Value, Cell); 36 } 37 } 38 39 static bool isAggregateTypeForABI(QualType T) { 40 return CodeGenFunction::hasAggregateLLVMType(T) || 41 T->isMemberFunctionPointerType(); 42 } 43 44 ABIInfo::~ABIInfo() {} 45 46 ASTContext &ABIInfo::getContext() const { 47 return CGT.getContext(); 48 } 49 50 llvm::LLVMContext &ABIInfo::getVMContext() const { 51 return CGT.getLLVMContext(); 52 } 53 54 const llvm::TargetData &ABIInfo::getTargetData() const { 55 return CGT.getTargetData(); 56 } 57 58 59 void ABIArgInfo::dump() const { 60 llvm::raw_ostream &OS = llvm::errs(); 61 OS << "(ABIArgInfo Kind="; 62 switch (TheKind) { 63 case Direct: 64 OS << "Direct Type="; 65 if (const llvm::Type *Ty = getCoerceToType()) 66 Ty->print(OS); 67 else 68 OS << "null"; 69 break; 70 case Extend: 71 OS << "Extend"; 72 break; 73 case Ignore: 74 OS << "Ignore"; 75 break; 76 case Indirect: 77 OS << "Indirect Align=" << getIndirectAlign() 78 << " Byal=" << getIndirectByVal() 79 << " Realign=" << getIndirectRealign(); 80 break; 81 case Expand: 82 OS << "Expand"; 83 break; 84 } 85 OS << ")\n"; 86 } 87 88 TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; } 89 90 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays); 91 92 /// isEmptyField - Return true iff a the field is "empty", that is it 93 /// is an unnamed bit-field or an (array of) empty record(s). 94 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD, 95 bool AllowArrays) { 96 if (FD->isUnnamedBitfield()) 97 return true; 98 99 QualType FT = FD->getType(); 100 101 // Constant arrays of empty records count as empty, strip them off. 102 if (AllowArrays) 103 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) 104 FT = AT->getElementType(); 105 106 const RecordType *RT = FT->getAs<RecordType>(); 107 if (!RT) 108 return false; 109 110 // C++ record fields are never empty, at least in the Itanium ABI. 111 // 112 // FIXME: We should use a predicate for whether this behavior is true in the 113 // current ABI. 114 if (isa<CXXRecordDecl>(RT->getDecl())) 115 return false; 116 117 return isEmptyRecord(Context, FT, AllowArrays); 118 } 119 120 /// isEmptyRecord - Return true iff a structure contains only empty 121 /// fields. Note that a structure with a flexible array member is not 122 /// considered empty. 123 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) { 124 const RecordType *RT = T->getAs<RecordType>(); 125 if (!RT) 126 return 0; 127 const RecordDecl *RD = RT->getDecl(); 128 if (RD->hasFlexibleArrayMember()) 129 return false; 130 131 // If this is a C++ record, check the bases first. 132 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 133 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 134 e = CXXRD->bases_end(); i != e; ++i) 135 if (!isEmptyRecord(Context, i->getType(), true)) 136 return false; 137 138 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 139 i != e; ++i) 140 if (!isEmptyField(Context, *i, AllowArrays)) 141 return false; 142 return true; 143 } 144 145 /// hasNonTrivialDestructorOrCopyConstructor - Determine if a type has either 146 /// a non-trivial destructor or a non-trivial copy constructor. 147 static bool hasNonTrivialDestructorOrCopyConstructor(const RecordType *RT) { 148 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 149 if (!RD) 150 return false; 151 152 return !RD->hasTrivialDestructor() || !RD->hasTrivialCopyConstructor(); 153 } 154 155 /// isRecordWithNonTrivialDestructorOrCopyConstructor - Determine if a type is 156 /// a record type with either a non-trivial destructor or a non-trivial copy 157 /// constructor. 158 static bool isRecordWithNonTrivialDestructorOrCopyConstructor(QualType T) { 159 const RecordType *RT = T->getAs<RecordType>(); 160 if (!RT) 161 return false; 162 163 return hasNonTrivialDestructorOrCopyConstructor(RT); 164 } 165 166 /// isSingleElementStruct - Determine if a structure is a "single 167 /// element struct", i.e. it has exactly one non-empty field or 168 /// exactly one field which is itself a single element 169 /// struct. Structures with flexible array members are never 170 /// considered single element structs. 171 /// 172 /// \return The field declaration for the single non-empty field, if 173 /// it exists. 174 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) { 175 const RecordType *RT = T->getAsStructureType(); 176 if (!RT) 177 return 0; 178 179 const RecordDecl *RD = RT->getDecl(); 180 if (RD->hasFlexibleArrayMember()) 181 return 0; 182 183 const Type *Found = 0; 184 185 // If this is a C++ record, check the bases first. 186 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 187 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 188 e = CXXRD->bases_end(); i != e; ++i) { 189 // Ignore empty records. 190 if (isEmptyRecord(Context, i->getType(), true)) 191 continue; 192 193 // If we already found an element then this isn't a single-element struct. 194 if (Found) 195 return 0; 196 197 // If this is non-empty and not a single element struct, the composite 198 // cannot be a single element struct. 199 Found = isSingleElementStruct(i->getType(), Context); 200 if (!Found) 201 return 0; 202 } 203 } 204 205 // Check for single element. 206 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 207 i != e; ++i) { 208 const FieldDecl *FD = *i; 209 QualType FT = FD->getType(); 210 211 // Ignore empty fields. 212 if (isEmptyField(Context, FD, true)) 213 continue; 214 215 // If we already found an element then this isn't a single-element 216 // struct. 217 if (Found) 218 return 0; 219 220 // Treat single element arrays as the element. 221 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 222 if (AT->getSize().getZExtValue() != 1) 223 break; 224 FT = AT->getElementType(); 225 } 226 227 if (!isAggregateTypeForABI(FT)) { 228 Found = FT.getTypePtr(); 229 } else { 230 Found = isSingleElementStruct(FT, Context); 231 if (!Found) 232 return 0; 233 } 234 } 235 236 return Found; 237 } 238 239 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) { 240 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() && 241 !Ty->isAnyComplexType() && !Ty->isEnumeralType() && 242 !Ty->isBlockPointerType()) 243 return false; 244 245 uint64_t Size = Context.getTypeSize(Ty); 246 return Size == 32 || Size == 64; 247 } 248 249 /// canExpandIndirectArgument - Test whether an argument type which is to be 250 /// passed indirectly (on the stack) would have the equivalent layout if it was 251 /// expanded into separate arguments. If so, we prefer to do the latter to avoid 252 /// inhibiting optimizations. 253 /// 254 // FIXME: This predicate is missing many cases, currently it just follows 255 // llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We 256 // should probably make this smarter, or better yet make the LLVM backend 257 // capable of handling it. 258 static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) { 259 // We can only expand structure types. 260 const RecordType *RT = Ty->getAs<RecordType>(); 261 if (!RT) 262 return false; 263 264 // We can only expand (C) structures. 265 // 266 // FIXME: This needs to be generalized to handle classes as well. 267 const RecordDecl *RD = RT->getDecl(); 268 if (!RD->isStruct() || isa<CXXRecordDecl>(RD)) 269 return false; 270 271 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 272 i != e; ++i) { 273 const FieldDecl *FD = *i; 274 275 if (!is32Or64BitBasicType(FD->getType(), Context)) 276 return false; 277 278 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know 279 // how to expand them yet, and the predicate for telling if a bitfield still 280 // counts as "basic" is more complicated than what we were doing previously. 281 if (FD->isBitField()) 282 return false; 283 } 284 285 return true; 286 } 287 288 namespace { 289 /// DefaultABIInfo - The default implementation for ABI specific 290 /// details. This implementation provides information which results in 291 /// self-consistent and sensible LLVM IR generation, but does not 292 /// conform to any particular ABI. 293 class DefaultABIInfo : public ABIInfo { 294 public: 295 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 296 297 ABIArgInfo classifyReturnType(QualType RetTy) const; 298 ABIArgInfo classifyArgumentType(QualType RetTy) const; 299 300 virtual void computeInfo(CGFunctionInfo &FI) const { 301 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 302 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 303 it != ie; ++it) 304 it->info = classifyArgumentType(it->type); 305 } 306 307 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 308 CodeGenFunction &CGF) const; 309 }; 310 311 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo { 312 public: 313 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 314 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 315 }; 316 317 llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 318 CodeGenFunction &CGF) const { 319 return 0; 320 } 321 322 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const { 323 if (isAggregateTypeForABI(Ty)) 324 return ABIArgInfo::getIndirect(0); 325 326 // Treat an enum type as its underlying type. 327 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 328 Ty = EnumTy->getDecl()->getIntegerType(); 329 330 return (Ty->isPromotableIntegerType() ? 331 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 332 } 333 334 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const { 335 if (RetTy->isVoidType()) 336 return ABIArgInfo::getIgnore(); 337 338 if (isAggregateTypeForABI(RetTy)) 339 return ABIArgInfo::getIndirect(0); 340 341 // Treat an enum type as its underlying type. 342 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 343 RetTy = EnumTy->getDecl()->getIntegerType(); 344 345 return (RetTy->isPromotableIntegerType() ? 346 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 347 } 348 349 /// UseX86_MMXType - Return true if this is an MMX type that should use the special 350 /// x86_mmx type. 351 bool UseX86_MMXType(const llvm::Type *IRType) { 352 // If the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>, use the 353 // special x86_mmx type. 354 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 && 355 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() && 356 IRType->getScalarSizeInBits() != 64; 357 } 358 359 static const llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 360 llvm::StringRef Constraint, 361 const llvm::Type* Ty) { 362 if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy()) 363 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext()); 364 return Ty; 365 } 366 367 //===----------------------------------------------------------------------===// 368 // X86-32 ABI Implementation 369 //===----------------------------------------------------------------------===// 370 371 /// X86_32ABIInfo - The X86-32 ABI information. 372 class X86_32ABIInfo : public ABIInfo { 373 static const unsigned MinABIStackAlignInBytes = 4; 374 375 bool IsDarwinVectorABI; 376 bool IsSmallStructInRegABI; 377 378 static bool isRegisterSize(unsigned Size) { 379 return (Size == 8 || Size == 16 || Size == 32 || Size == 64); 380 } 381 382 static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context); 383 384 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 385 /// such that the argument will be passed in memory. 386 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal = true) const; 387 388 /// \brief Return the alignment to use for the given type on the stack. 389 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const; 390 391 public: 392 393 ABIArgInfo classifyReturnType(QualType RetTy) const; 394 ABIArgInfo classifyArgumentType(QualType RetTy) const; 395 396 virtual void computeInfo(CGFunctionInfo &FI) const { 397 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 398 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 399 it != ie; ++it) 400 it->info = classifyArgumentType(it->type); 401 } 402 403 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 404 CodeGenFunction &CGF) const; 405 406 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p) 407 : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p) {} 408 }; 409 410 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo { 411 public: 412 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p) 413 :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p)) {} 414 415 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 416 CodeGen::CodeGenModule &CGM) const; 417 418 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 419 // Darwin uses different dwarf register numbers for EH. 420 if (CGM.isTargetDarwin()) return 5; 421 422 return 4; 423 } 424 425 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 426 llvm::Value *Address) const; 427 428 const llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 429 llvm::StringRef Constraint, 430 const llvm::Type* Ty) const { 431 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 432 } 433 434 }; 435 436 } 437 438 /// shouldReturnTypeInRegister - Determine if the given type should be 439 /// passed in a register (for the Darwin ABI). 440 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty, 441 ASTContext &Context) { 442 uint64_t Size = Context.getTypeSize(Ty); 443 444 // Type must be register sized. 445 if (!isRegisterSize(Size)) 446 return false; 447 448 if (Ty->isVectorType()) { 449 // 64- and 128- bit vectors inside structures are not returned in 450 // registers. 451 if (Size == 64 || Size == 128) 452 return false; 453 454 return true; 455 } 456 457 // If this is a builtin, pointer, enum, complex type, member pointer, or 458 // member function pointer it is ok. 459 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() || 460 Ty->isAnyComplexType() || Ty->isEnumeralType() || 461 Ty->isBlockPointerType() || Ty->isMemberPointerType()) 462 return true; 463 464 // Arrays are treated like records. 465 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) 466 return shouldReturnTypeInRegister(AT->getElementType(), Context); 467 468 // Otherwise, it must be a record type. 469 const RecordType *RT = Ty->getAs<RecordType>(); 470 if (!RT) return false; 471 472 // FIXME: Traverse bases here too. 473 474 // Structure types are passed in register if all fields would be 475 // passed in a register. 476 for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(), 477 e = RT->getDecl()->field_end(); i != e; ++i) { 478 const FieldDecl *FD = *i; 479 480 // Empty fields are ignored. 481 if (isEmptyField(Context, FD, true)) 482 continue; 483 484 // Check fields recursively. 485 if (!shouldReturnTypeInRegister(FD->getType(), Context)) 486 return false; 487 } 488 489 return true; 490 } 491 492 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy) const { 493 if (RetTy->isVoidType()) 494 return ABIArgInfo::getIgnore(); 495 496 if (const VectorType *VT = RetTy->getAs<VectorType>()) { 497 // On Darwin, some vectors are returned in registers. 498 if (IsDarwinVectorABI) { 499 uint64_t Size = getContext().getTypeSize(RetTy); 500 501 // 128-bit vectors are a special case; they are returned in 502 // registers and we need to make sure to pick a type the LLVM 503 // backend will like. 504 if (Size == 128) 505 return ABIArgInfo::getDirect(llvm::VectorType::get( 506 llvm::Type::getInt64Ty(getVMContext()), 2)); 507 508 // Always return in register if it fits in a general purpose 509 // register, or if it is 64 bits and has a single element. 510 if ((Size == 8 || Size == 16 || Size == 32) || 511 (Size == 64 && VT->getNumElements() == 1)) 512 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 513 Size)); 514 515 return ABIArgInfo::getIndirect(0); 516 } 517 518 return ABIArgInfo::getDirect(); 519 } 520 521 if (isAggregateTypeForABI(RetTy)) { 522 if (const RecordType *RT = RetTy->getAs<RecordType>()) { 523 // Structures with either a non-trivial destructor or a non-trivial 524 // copy constructor are always indirect. 525 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 526 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 527 528 // Structures with flexible arrays are always indirect. 529 if (RT->getDecl()->hasFlexibleArrayMember()) 530 return ABIArgInfo::getIndirect(0); 531 } 532 533 // If specified, structs and unions are always indirect. 534 if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType()) 535 return ABIArgInfo::getIndirect(0); 536 537 // Classify "single element" structs as their element type. 538 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) { 539 if (const BuiltinType *BT = SeltTy->getAs<BuiltinType>()) { 540 if (BT->isIntegerType()) { 541 // We need to use the size of the structure, padding 542 // bit-fields can adjust that to be larger than the single 543 // element type. 544 uint64_t Size = getContext().getTypeSize(RetTy); 545 return ABIArgInfo::getDirect( 546 llvm::IntegerType::get(getVMContext(), (unsigned)Size)); 547 } 548 549 if (BT->getKind() == BuiltinType::Float) { 550 assert(getContext().getTypeSize(RetTy) == 551 getContext().getTypeSize(SeltTy) && 552 "Unexpect single element structure size!"); 553 return ABIArgInfo::getDirect(llvm::Type::getFloatTy(getVMContext())); 554 } 555 556 if (BT->getKind() == BuiltinType::Double) { 557 assert(getContext().getTypeSize(RetTy) == 558 getContext().getTypeSize(SeltTy) && 559 "Unexpect single element structure size!"); 560 return ABIArgInfo::getDirect(llvm::Type::getDoubleTy(getVMContext())); 561 } 562 } else if (SeltTy->isPointerType()) { 563 // FIXME: It would be really nice if this could come out as the proper 564 // pointer type. 565 const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(getVMContext()); 566 return ABIArgInfo::getDirect(PtrTy); 567 } else if (SeltTy->isVectorType()) { 568 // 64- and 128-bit vectors are never returned in a 569 // register when inside a structure. 570 uint64_t Size = getContext().getTypeSize(RetTy); 571 if (Size == 64 || Size == 128) 572 return ABIArgInfo::getIndirect(0); 573 574 return classifyReturnType(QualType(SeltTy, 0)); 575 } 576 } 577 578 // Small structures which are register sized are generally returned 579 // in a register. 580 if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, getContext())) { 581 uint64_t Size = getContext().getTypeSize(RetTy); 582 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size)); 583 } 584 585 return ABIArgInfo::getIndirect(0); 586 } 587 588 // Treat an enum type as its underlying type. 589 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 590 RetTy = EnumTy->getDecl()->getIntegerType(); 591 592 return (RetTy->isPromotableIntegerType() ? 593 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 594 } 595 596 static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) { 597 const RecordType *RT = Ty->getAs<RecordType>(); 598 if (!RT) 599 return 0; 600 const RecordDecl *RD = RT->getDecl(); 601 602 // If this is a C++ record, check the bases first. 603 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 604 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 605 e = CXXRD->bases_end(); i != e; ++i) 606 if (!isRecordWithSSEVectorType(Context, i->getType())) 607 return false; 608 609 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 610 i != e; ++i) { 611 QualType FT = i->getType(); 612 613 if (FT->getAs<VectorType>() && Context.getTypeSize(Ty) == 128) 614 return true; 615 616 if (isRecordWithSSEVectorType(Context, FT)) 617 return true; 618 } 619 620 return false; 621 } 622 623 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty, 624 unsigned Align) const { 625 // Otherwise, if the alignment is less than or equal to the minimum ABI 626 // alignment, just use the default; the backend will handle this. 627 if (Align <= MinABIStackAlignInBytes) 628 return 0; // Use default alignment. 629 630 // On non-Darwin, the stack type alignment is always 4. 631 if (!IsDarwinVectorABI) { 632 // Set explicit alignment, since we may need to realign the top. 633 return MinABIStackAlignInBytes; 634 } 635 636 // Otherwise, if the type contains an SSE vector type, the alignment is 16. 637 if (isRecordWithSSEVectorType(getContext(), Ty)) 638 return 16; 639 640 return MinABIStackAlignInBytes; 641 } 642 643 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal) const { 644 if (!ByVal) 645 return ABIArgInfo::getIndirect(0, false); 646 647 // Compute the byval alignment. 648 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; 649 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign); 650 if (StackAlign == 0) 651 return ABIArgInfo::getIndirect(0); 652 653 // If the stack alignment is less than the type alignment, realign the 654 // argument. 655 if (StackAlign < TypeAlign) 656 return ABIArgInfo::getIndirect(StackAlign, /*ByVal=*/true, 657 /*Realign=*/true); 658 659 return ABIArgInfo::getIndirect(StackAlign); 660 } 661 662 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const { 663 // FIXME: Set alignment on indirect arguments. 664 if (isAggregateTypeForABI(Ty)) { 665 // Structures with flexible arrays are always indirect. 666 if (const RecordType *RT = Ty->getAs<RecordType>()) { 667 // Structures with either a non-trivial destructor or a non-trivial 668 // copy constructor are always indirect. 669 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 670 return getIndirectResult(Ty, /*ByVal=*/false); 671 672 if (RT->getDecl()->hasFlexibleArrayMember()) 673 return getIndirectResult(Ty); 674 } 675 676 // Ignore empty structs. 677 if (Ty->isStructureType() && getContext().getTypeSize(Ty) == 0) 678 return ABIArgInfo::getIgnore(); 679 680 // Expand small (<= 128-bit) record types when we know that the stack layout 681 // of those arguments will match the struct. This is important because the 682 // LLVM backend isn't smart enough to remove byval, which inhibits many 683 // optimizations. 684 if (getContext().getTypeSize(Ty) <= 4*32 && 685 canExpandIndirectArgument(Ty, getContext())) 686 return ABIArgInfo::getExpand(); 687 688 return getIndirectResult(Ty); 689 } 690 691 if (const VectorType *VT = Ty->getAs<VectorType>()) { 692 // On Darwin, some vectors are passed in memory, we handle this by passing 693 // it as an i8/i16/i32/i64. 694 if (IsDarwinVectorABI) { 695 uint64_t Size = getContext().getTypeSize(Ty); 696 if ((Size == 8 || Size == 16 || Size == 32) || 697 (Size == 64 && VT->getNumElements() == 1)) 698 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 699 Size)); 700 } 701 702 const llvm::Type *IRType = CGT.ConvertTypeRecursive(Ty); 703 if (UseX86_MMXType(IRType)) { 704 ABIArgInfo AAI = ABIArgInfo::getDirect(IRType); 705 AAI.setCoerceToType(llvm::Type::getX86_MMXTy(getVMContext())); 706 return AAI; 707 } 708 709 return ABIArgInfo::getDirect(); 710 } 711 712 713 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 714 Ty = EnumTy->getDecl()->getIntegerType(); 715 716 return (Ty->isPromotableIntegerType() ? 717 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 718 } 719 720 llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 721 CodeGenFunction &CGF) const { 722 const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); 723 const llvm::Type *BPP = llvm::PointerType::getUnqual(BP); 724 725 CGBuilderTy &Builder = CGF.Builder; 726 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 727 "ap"); 728 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 729 llvm::Type *PTy = 730 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 731 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 732 733 uint64_t Offset = 734 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); 735 llvm::Value *NextAddr = 736 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 737 "ap.next"); 738 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 739 740 return AddrTyped; 741 } 742 743 void X86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D, 744 llvm::GlobalValue *GV, 745 CodeGen::CodeGenModule &CGM) const { 746 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 747 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { 748 // Get the LLVM function. 749 llvm::Function *Fn = cast<llvm::Function>(GV); 750 751 // Now add the 'alignstack' attribute with a value of 16. 752 Fn->addFnAttr(llvm::Attribute::constructStackAlignmentFromInt(16)); 753 } 754 } 755 } 756 757 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable( 758 CodeGen::CodeGenFunction &CGF, 759 llvm::Value *Address) const { 760 CodeGen::CGBuilderTy &Builder = CGF.Builder; 761 llvm::LLVMContext &Context = CGF.getLLVMContext(); 762 763 const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context); 764 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 765 766 // 0-7 are the eight integer registers; the order is different 767 // on Darwin (for EH), but the range is the same. 768 // 8 is %eip. 769 AssignToArrayRange(Builder, Address, Four8, 0, 8); 770 771 if (CGF.CGM.isTargetDarwin()) { 772 // 12-16 are st(0..4). Not sure why we stop at 4. 773 // These have size 16, which is sizeof(long double) on 774 // platforms with 8-byte alignment for that type. 775 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 776 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16); 777 778 } else { 779 // 9 is %eflags, which doesn't get a size on Darwin for some 780 // reason. 781 Builder.CreateStore(Four8, Builder.CreateConstInBoundsGEP1_32(Address, 9)); 782 783 // 11-16 are st(0..5). Not sure why we stop at 5. 784 // These have size 12, which is sizeof(long double) on 785 // platforms with 4-byte alignment for that type. 786 llvm::Value *Twelve8 = llvm::ConstantInt::get(i8, 12); 787 AssignToArrayRange(Builder, Address, Twelve8, 11, 16); 788 } 789 790 return false; 791 } 792 793 //===----------------------------------------------------------------------===// 794 // X86-64 ABI Implementation 795 //===----------------------------------------------------------------------===// 796 797 798 namespace { 799 /// X86_64ABIInfo - The X86_64 ABI information. 800 class X86_64ABIInfo : public ABIInfo { 801 enum Class { 802 Integer = 0, 803 SSE, 804 SSEUp, 805 X87, 806 X87Up, 807 ComplexX87, 808 NoClass, 809 Memory 810 }; 811 812 /// merge - Implement the X86_64 ABI merging algorithm. 813 /// 814 /// Merge an accumulating classification \arg Accum with a field 815 /// classification \arg Field. 816 /// 817 /// \param Accum - The accumulating classification. This should 818 /// always be either NoClass or the result of a previous merge 819 /// call. In addition, this should never be Memory (the caller 820 /// should just return Memory for the aggregate). 821 static Class merge(Class Accum, Class Field); 822 823 /// classify - Determine the x86_64 register classes in which the 824 /// given type T should be passed. 825 /// 826 /// \param Lo - The classification for the parts of the type 827 /// residing in the low word of the containing object. 828 /// 829 /// \param Hi - The classification for the parts of the type 830 /// residing in the high word of the containing object. 831 /// 832 /// \param OffsetBase - The bit offset of this type in the 833 /// containing object. Some parameters are classified different 834 /// depending on whether they straddle an eightbyte boundary. 835 /// 836 /// If a word is unused its result will be NoClass; if a type should 837 /// be passed in Memory then at least the classification of \arg Lo 838 /// will be Memory. 839 /// 840 /// The \arg Lo class will be NoClass iff the argument is ignored. 841 /// 842 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will 843 /// also be ComplexX87. 844 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi) const; 845 846 const llvm::Type *Get16ByteVectorType(QualType Ty) const; 847 const llvm::Type *GetSSETypeAtOffset(const llvm::Type *IRType, 848 unsigned IROffset, QualType SourceTy, 849 unsigned SourceOffset) const; 850 const llvm::Type *GetINTEGERTypeAtOffset(const llvm::Type *IRType, 851 unsigned IROffset, QualType SourceTy, 852 unsigned SourceOffset) const; 853 854 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 855 /// such that the argument will be returned in memory. 856 ABIArgInfo getIndirectReturnResult(QualType Ty) const; 857 858 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 859 /// such that the argument will be passed in memory. 860 ABIArgInfo getIndirectResult(QualType Ty) const; 861 862 ABIArgInfo classifyReturnType(QualType RetTy) const; 863 864 ABIArgInfo classifyArgumentType(QualType Ty, 865 unsigned &neededInt, 866 unsigned &neededSSE) const; 867 868 public: 869 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 870 871 virtual void computeInfo(CGFunctionInfo &FI) const; 872 873 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 874 CodeGenFunction &CGF) const; 875 }; 876 877 /// WinX86_64ABIInfo - The Windows X86_64 ABI information. 878 class WinX86_64ABIInfo : public ABIInfo { 879 880 ABIArgInfo classify(QualType Ty) const; 881 882 public: 883 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 884 885 virtual void computeInfo(CGFunctionInfo &FI) const; 886 887 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 888 CodeGenFunction &CGF) const; 889 }; 890 891 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo { 892 public: 893 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 894 : TargetCodeGenInfo(new X86_64ABIInfo(CGT)) {} 895 896 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 897 return 7; 898 } 899 900 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 901 llvm::Value *Address) const { 902 CodeGen::CGBuilderTy &Builder = CGF.Builder; 903 llvm::LLVMContext &Context = CGF.getLLVMContext(); 904 905 const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context); 906 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 907 908 // 0-15 are the 16 integer registers. 909 // 16 is %rip. 910 AssignToArrayRange(Builder, Address, Eight8, 0, 16); 911 912 return false; 913 } 914 915 const llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 916 llvm::StringRef Constraint, 917 const llvm::Type* Ty) const { 918 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 919 } 920 921 }; 922 923 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo { 924 public: 925 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 926 : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {} 927 928 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 929 return 7; 930 } 931 932 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 933 llvm::Value *Address) const { 934 CodeGen::CGBuilderTy &Builder = CGF.Builder; 935 llvm::LLVMContext &Context = CGF.getLLVMContext(); 936 937 const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context); 938 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 939 940 // 0-15 are the 16 integer registers. 941 // 16 is %rip. 942 AssignToArrayRange(Builder, Address, Eight8, 0, 16); 943 944 return false; 945 } 946 }; 947 948 } 949 950 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) { 951 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is 952 // classified recursively so that always two fields are 953 // considered. The resulting class is calculated according to 954 // the classes of the fields in the eightbyte: 955 // 956 // (a) If both classes are equal, this is the resulting class. 957 // 958 // (b) If one of the classes is NO_CLASS, the resulting class is 959 // the other class. 960 // 961 // (c) If one of the classes is MEMORY, the result is the MEMORY 962 // class. 963 // 964 // (d) If one of the classes is INTEGER, the result is the 965 // INTEGER. 966 // 967 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class, 968 // MEMORY is used as class. 969 // 970 // (f) Otherwise class SSE is used. 971 972 // Accum should never be memory (we should have returned) or 973 // ComplexX87 (because this cannot be passed in a structure). 974 assert((Accum != Memory && Accum != ComplexX87) && 975 "Invalid accumulated classification during merge."); 976 if (Accum == Field || Field == NoClass) 977 return Accum; 978 if (Field == Memory) 979 return Memory; 980 if (Accum == NoClass) 981 return Field; 982 if (Accum == Integer || Field == Integer) 983 return Integer; 984 if (Field == X87 || Field == X87Up || Field == ComplexX87 || 985 Accum == X87 || Accum == X87Up) 986 return Memory; 987 return SSE; 988 } 989 990 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, 991 Class &Lo, Class &Hi) const { 992 // FIXME: This code can be simplified by introducing a simple value class for 993 // Class pairs with appropriate constructor methods for the various 994 // situations. 995 996 // FIXME: Some of the split computations are wrong; unaligned vectors 997 // shouldn't be passed in registers for example, so there is no chance they 998 // can straddle an eightbyte. Verify & simplify. 999 1000 Lo = Hi = NoClass; 1001 1002 Class &Current = OffsetBase < 64 ? Lo : Hi; 1003 Current = Memory; 1004 1005 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 1006 BuiltinType::Kind k = BT->getKind(); 1007 1008 if (k == BuiltinType::Void) { 1009 Current = NoClass; 1010 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) { 1011 Lo = Integer; 1012 Hi = Integer; 1013 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) { 1014 Current = Integer; 1015 } else if (k == BuiltinType::Float || k == BuiltinType::Double) { 1016 Current = SSE; 1017 } else if (k == BuiltinType::LongDouble) { 1018 Lo = X87; 1019 Hi = X87Up; 1020 } 1021 // FIXME: _Decimal32 and _Decimal64 are SSE. 1022 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). 1023 return; 1024 } 1025 1026 if (const EnumType *ET = Ty->getAs<EnumType>()) { 1027 // Classify the underlying integer type. 1028 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi); 1029 return; 1030 } 1031 1032 if (Ty->hasPointerRepresentation()) { 1033 Current = Integer; 1034 return; 1035 } 1036 1037 if (Ty->isMemberPointerType()) { 1038 if (Ty->isMemberFunctionPointerType()) 1039 Lo = Hi = Integer; 1040 else 1041 Current = Integer; 1042 return; 1043 } 1044 1045 if (const VectorType *VT = Ty->getAs<VectorType>()) { 1046 uint64_t Size = getContext().getTypeSize(VT); 1047 if (Size == 32) { 1048 // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x 1049 // float> as integer. 1050 Current = Integer; 1051 1052 // If this type crosses an eightbyte boundary, it should be 1053 // split. 1054 uint64_t EB_Real = (OffsetBase) / 64; 1055 uint64_t EB_Imag = (OffsetBase + Size - 1) / 64; 1056 if (EB_Real != EB_Imag) 1057 Hi = Lo; 1058 } else if (Size == 64) { 1059 // gcc passes <1 x double> in memory. :( 1060 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) 1061 return; 1062 1063 // gcc passes <1 x long long> as INTEGER. 1064 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong) || 1065 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULongLong) || 1066 VT->getElementType()->isSpecificBuiltinType(BuiltinType::Long) || 1067 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULong)) 1068 Current = Integer; 1069 else 1070 Current = SSE; 1071 1072 // If this type crosses an eightbyte boundary, it should be 1073 // split. 1074 if (OffsetBase && OffsetBase != 64) 1075 Hi = Lo; 1076 } else if (Size == 128) { 1077 Lo = SSE; 1078 Hi = SSEUp; 1079 } 1080 return; 1081 } 1082 1083 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 1084 QualType ET = getContext().getCanonicalType(CT->getElementType()); 1085 1086 uint64_t Size = getContext().getTypeSize(Ty); 1087 if (ET->isIntegralOrEnumerationType()) { 1088 if (Size <= 64) 1089 Current = Integer; 1090 else if (Size <= 128) 1091 Lo = Hi = Integer; 1092 } else if (ET == getContext().FloatTy) 1093 Current = SSE; 1094 else if (ET == getContext().DoubleTy) 1095 Lo = Hi = SSE; 1096 else if (ET == getContext().LongDoubleTy) 1097 Current = ComplexX87; 1098 1099 // If this complex type crosses an eightbyte boundary then it 1100 // should be split. 1101 uint64_t EB_Real = (OffsetBase) / 64; 1102 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64; 1103 if (Hi == NoClass && EB_Real != EB_Imag) 1104 Hi = Lo; 1105 1106 return; 1107 } 1108 1109 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 1110 // Arrays are treated like structures. 1111 1112 uint64_t Size = getContext().getTypeSize(Ty); 1113 1114 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 1115 // than two eightbytes, ..., it has class MEMORY. 1116 if (Size > 128) 1117 return; 1118 1119 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned 1120 // fields, it has class MEMORY. 1121 // 1122 // Only need to check alignment of array base. 1123 if (OffsetBase % getContext().getTypeAlign(AT->getElementType())) 1124 return; 1125 1126 // Otherwise implement simplified merge. We could be smarter about 1127 // this, but it isn't worth it and would be harder to verify. 1128 Current = NoClass; 1129 uint64_t EltSize = getContext().getTypeSize(AT->getElementType()); 1130 uint64_t ArraySize = AT->getSize().getZExtValue(); 1131 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) { 1132 Class FieldLo, FieldHi; 1133 classify(AT->getElementType(), Offset, FieldLo, FieldHi); 1134 Lo = merge(Lo, FieldLo); 1135 Hi = merge(Hi, FieldHi); 1136 if (Lo == Memory || Hi == Memory) 1137 break; 1138 } 1139 1140 // Do post merger cleanup (see below). Only case we worry about is Memory. 1141 if (Hi == Memory) 1142 Lo = Memory; 1143 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification."); 1144 return; 1145 } 1146 1147 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1148 uint64_t Size = getContext().getTypeSize(Ty); 1149 1150 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 1151 // than two eightbytes, ..., it has class MEMORY. 1152 if (Size > 128) 1153 return; 1154 1155 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial 1156 // copy constructor or a non-trivial destructor, it is passed by invisible 1157 // reference. 1158 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 1159 return; 1160 1161 const RecordDecl *RD = RT->getDecl(); 1162 1163 // Assume variable sized types are passed in memory. 1164 if (RD->hasFlexibleArrayMember()) 1165 return; 1166 1167 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 1168 1169 // Reset Lo class, this will be recomputed. 1170 Current = NoClass; 1171 1172 // If this is a C++ record, classify the bases first. 1173 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 1174 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 1175 e = CXXRD->bases_end(); i != e; ++i) { 1176 assert(!i->isVirtual() && !i->getType()->isDependentType() && 1177 "Unexpected base class!"); 1178 const CXXRecordDecl *Base = 1179 cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); 1180 1181 // Classify this field. 1182 // 1183 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a 1184 // single eightbyte, each is classified separately. Each eightbyte gets 1185 // initialized to class NO_CLASS. 1186 Class FieldLo, FieldHi; 1187 uint64_t Offset = OffsetBase + Layout.getBaseClassOffsetInBits(Base); 1188 classify(i->getType(), Offset, FieldLo, FieldHi); 1189 Lo = merge(Lo, FieldLo); 1190 Hi = merge(Hi, FieldHi); 1191 if (Lo == Memory || Hi == Memory) 1192 break; 1193 } 1194 } 1195 1196 // Classify the fields one at a time, merging the results. 1197 unsigned idx = 0; 1198 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1199 i != e; ++i, ++idx) { 1200 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 1201 bool BitField = i->isBitField(); 1202 1203 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned 1204 // fields, it has class MEMORY. 1205 // 1206 // Note, skip this test for bit-fields, see below. 1207 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) { 1208 Lo = Memory; 1209 return; 1210 } 1211 1212 // Classify this field. 1213 // 1214 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate 1215 // exceeds a single eightbyte, each is classified 1216 // separately. Each eightbyte gets initialized to class 1217 // NO_CLASS. 1218 Class FieldLo, FieldHi; 1219 1220 // Bit-fields require special handling, they do not force the 1221 // structure to be passed in memory even if unaligned, and 1222 // therefore they can straddle an eightbyte. 1223 if (BitField) { 1224 // Ignore padding bit-fields. 1225 if (i->isUnnamedBitfield()) 1226 continue; 1227 1228 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 1229 uint64_t Size = 1230 i->getBitWidth()->EvaluateAsInt(getContext()).getZExtValue(); 1231 1232 uint64_t EB_Lo = Offset / 64; 1233 uint64_t EB_Hi = (Offset + Size - 1) / 64; 1234 FieldLo = FieldHi = NoClass; 1235 if (EB_Lo) { 1236 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes."); 1237 FieldLo = NoClass; 1238 FieldHi = Integer; 1239 } else { 1240 FieldLo = Integer; 1241 FieldHi = EB_Hi ? Integer : NoClass; 1242 } 1243 } else 1244 classify(i->getType(), Offset, FieldLo, FieldHi); 1245 Lo = merge(Lo, FieldLo); 1246 Hi = merge(Hi, FieldHi); 1247 if (Lo == Memory || Hi == Memory) 1248 break; 1249 } 1250 1251 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done: 1252 // 1253 // (a) If one of the classes is MEMORY, the whole argument is 1254 // passed in memory. 1255 // 1256 // (b) If SSEUP is not preceded by SSE, it is converted to SSE. 1257 1258 // The first of these conditions is guaranteed by how we implement 1259 // the merge (just bail). 1260 // 1261 // The second condition occurs in the case of unions; for example 1262 // union { _Complex double; unsigned; }. 1263 if (Hi == Memory) 1264 Lo = Memory; 1265 if (Hi == SSEUp && Lo != SSE) 1266 Hi = SSE; 1267 } 1268 } 1269 1270 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const { 1271 // If this is a scalar LLVM value then assume LLVM will pass it in the right 1272 // place naturally. 1273 if (!isAggregateTypeForABI(Ty)) { 1274 // Treat an enum type as its underlying type. 1275 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1276 Ty = EnumTy->getDecl()->getIntegerType(); 1277 1278 return (Ty->isPromotableIntegerType() ? 1279 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 1280 } 1281 1282 return ABIArgInfo::getIndirect(0); 1283 } 1284 1285 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty) const { 1286 // If this is a scalar LLVM value then assume LLVM will pass it in the right 1287 // place naturally. 1288 if (!isAggregateTypeForABI(Ty)) { 1289 // Treat an enum type as its underlying type. 1290 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1291 Ty = EnumTy->getDecl()->getIntegerType(); 1292 1293 return (Ty->isPromotableIntegerType() ? 1294 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 1295 } 1296 1297 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 1298 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 1299 1300 // Compute the byval alignment. We trust the back-end to honor the 1301 // minimum ABI alignment for byval, to make cleaner IR. 1302 const unsigned MinABIAlign = 8; 1303 unsigned Align = getContext().getTypeAlign(Ty) / 8; 1304 if (Align > MinABIAlign) 1305 return ABIArgInfo::getIndirect(Align); 1306 return ABIArgInfo::getIndirect(0); 1307 } 1308 1309 /// Get16ByteVectorType - The ABI specifies that a value should be passed in an 1310 /// full vector XMM register. Pick an LLVM IR type that will be passed as a 1311 /// vector register. 1312 const llvm::Type *X86_64ABIInfo::Get16ByteVectorType(QualType Ty) const { 1313 const llvm::Type *IRType = CGT.ConvertTypeRecursive(Ty); 1314 1315 // Wrapper structs that just contain vectors are passed just like vectors, 1316 // strip them off if present. 1317 const llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType); 1318 while (STy && STy->getNumElements() == 1) { 1319 IRType = STy->getElementType(0); 1320 STy = dyn_cast<llvm::StructType>(IRType); 1321 } 1322 1323 // If the preferred type is a 16-byte vector, prefer to pass it. 1324 if (const llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){ 1325 const llvm::Type *EltTy = VT->getElementType(); 1326 if (VT->getBitWidth() == 128 && 1327 (EltTy->isFloatTy() || EltTy->isDoubleTy() || 1328 EltTy->isIntegerTy(8) || EltTy->isIntegerTy(16) || 1329 EltTy->isIntegerTy(32) || EltTy->isIntegerTy(64) || 1330 EltTy->isIntegerTy(128))) 1331 return VT; 1332 } 1333 1334 return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), 2); 1335 } 1336 1337 /// BitsContainNoUserData - Return true if the specified [start,end) bit range 1338 /// is known to either be off the end of the specified type or being in 1339 /// alignment padding. The user type specified is known to be at most 128 bits 1340 /// in size, and have passed through X86_64ABIInfo::classify with a successful 1341 /// classification that put one of the two halves in the INTEGER class. 1342 /// 1343 /// It is conservatively correct to return false. 1344 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, 1345 unsigned EndBit, ASTContext &Context) { 1346 // If the bytes being queried are off the end of the type, there is no user 1347 // data hiding here. This handles analysis of builtins, vectors and other 1348 // types that don't contain interesting padding. 1349 unsigned TySize = (unsigned)Context.getTypeSize(Ty); 1350 if (TySize <= StartBit) 1351 return true; 1352 1353 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 1354 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType()); 1355 unsigned NumElts = (unsigned)AT->getSize().getZExtValue(); 1356 1357 // Check each element to see if the element overlaps with the queried range. 1358 for (unsigned i = 0; i != NumElts; ++i) { 1359 // If the element is after the span we care about, then we're done.. 1360 unsigned EltOffset = i*EltSize; 1361 if (EltOffset >= EndBit) break; 1362 1363 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0; 1364 if (!BitsContainNoUserData(AT->getElementType(), EltStart, 1365 EndBit-EltOffset, Context)) 1366 return false; 1367 } 1368 // If it overlaps no elements, then it is safe to process as padding. 1369 return true; 1370 } 1371 1372 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1373 const RecordDecl *RD = RT->getDecl(); 1374 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 1375 1376 // If this is a C++ record, check the bases first. 1377 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 1378 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 1379 e = CXXRD->bases_end(); i != e; ++i) { 1380 assert(!i->isVirtual() && !i->getType()->isDependentType() && 1381 "Unexpected base class!"); 1382 const CXXRecordDecl *Base = 1383 cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); 1384 1385 // If the base is after the span we care about, ignore it. 1386 unsigned BaseOffset = (unsigned)Layout.getBaseClassOffsetInBits(Base); 1387 if (BaseOffset >= EndBit) continue; 1388 1389 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0; 1390 if (!BitsContainNoUserData(i->getType(), BaseStart, 1391 EndBit-BaseOffset, Context)) 1392 return false; 1393 } 1394 } 1395 1396 // Verify that no field has data that overlaps the region of interest. Yes 1397 // this could be sped up a lot by being smarter about queried fields, 1398 // however we're only looking at structs up to 16 bytes, so we don't care 1399 // much. 1400 unsigned idx = 0; 1401 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1402 i != e; ++i, ++idx) { 1403 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx); 1404 1405 // If we found a field after the region we care about, then we're done. 1406 if (FieldOffset >= EndBit) break; 1407 1408 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0; 1409 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset, 1410 Context)) 1411 return false; 1412 } 1413 1414 // If nothing in this record overlapped the area of interest, then we're 1415 // clean. 1416 return true; 1417 } 1418 1419 return false; 1420 } 1421 1422 /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a 1423 /// float member at the specified offset. For example, {int,{float}} has a 1424 /// float at offset 4. It is conservatively correct for this routine to return 1425 /// false. 1426 static bool ContainsFloatAtOffset(const llvm::Type *IRType, unsigned IROffset, 1427 const llvm::TargetData &TD) { 1428 // Base case if we find a float. 1429 if (IROffset == 0 && IRType->isFloatTy()) 1430 return true; 1431 1432 // If this is a struct, recurse into the field at the specified offset. 1433 if (const llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 1434 const llvm::StructLayout *SL = TD.getStructLayout(STy); 1435 unsigned Elt = SL->getElementContainingOffset(IROffset); 1436 IROffset -= SL->getElementOffset(Elt); 1437 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD); 1438 } 1439 1440 // If this is an array, recurse into the field at the specified offset. 1441 if (const llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 1442 const llvm::Type *EltTy = ATy->getElementType(); 1443 unsigned EltSize = TD.getTypeAllocSize(EltTy); 1444 IROffset -= IROffset/EltSize*EltSize; 1445 return ContainsFloatAtOffset(EltTy, IROffset, TD); 1446 } 1447 1448 return false; 1449 } 1450 1451 1452 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the 1453 /// low 8 bytes of an XMM register, corresponding to the SSE class. 1454 const llvm::Type *X86_64ABIInfo:: 1455 GetSSETypeAtOffset(const llvm::Type *IRType, unsigned IROffset, 1456 QualType SourceTy, unsigned SourceOffset) const { 1457 // The only three choices we have are either double, <2 x float>, or float. We 1458 // pass as float if the last 4 bytes is just padding. This happens for 1459 // structs that contain 3 floats. 1460 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32, 1461 SourceOffset*8+64, getContext())) 1462 return llvm::Type::getFloatTy(getVMContext()); 1463 1464 // We want to pass as <2 x float> if the LLVM IR type contains a float at 1465 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the 1466 // case. 1467 if (ContainsFloatAtOffset(IRType, IROffset, getTargetData()) && 1468 ContainsFloatAtOffset(IRType, IROffset+4, getTargetData())) 1469 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2); 1470 1471 return llvm::Type::getDoubleTy(getVMContext()); 1472 } 1473 1474 1475 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in 1476 /// an 8-byte GPR. This means that we either have a scalar or we are talking 1477 /// about the high or low part of an up-to-16-byte struct. This routine picks 1478 /// the best LLVM IR type to represent this, which may be i64 or may be anything 1479 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*, 1480 /// etc). 1481 /// 1482 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for 1483 /// the source type. IROffset is an offset in bytes into the LLVM IR type that 1484 /// the 8-byte value references. PrefType may be null. 1485 /// 1486 /// SourceTy is the source level type for the entire argument. SourceOffset is 1487 /// an offset into this that we're processing (which is always either 0 or 8). 1488 /// 1489 const llvm::Type *X86_64ABIInfo:: 1490 GetINTEGERTypeAtOffset(const llvm::Type *IRType, unsigned IROffset, 1491 QualType SourceTy, unsigned SourceOffset) const { 1492 // If we're dealing with an un-offset LLVM IR type, then it means that we're 1493 // returning an 8-byte unit starting with it. See if we can safely use it. 1494 if (IROffset == 0) { 1495 // Pointers and int64's always fill the 8-byte unit. 1496 if (isa<llvm::PointerType>(IRType) || IRType->isIntegerTy(64)) 1497 return IRType; 1498 1499 // If we have a 1/2/4-byte integer, we can use it only if the rest of the 1500 // goodness in the source type is just tail padding. This is allowed to 1501 // kick in for struct {double,int} on the int, but not on 1502 // struct{double,int,int} because we wouldn't return the second int. We 1503 // have to do this analysis on the source type because we can't depend on 1504 // unions being lowered a specific way etc. 1505 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) || 1506 IRType->isIntegerTy(32)) { 1507 unsigned BitWidth = cast<llvm::IntegerType>(IRType)->getBitWidth(); 1508 1509 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth, 1510 SourceOffset*8+64, getContext())) 1511 return IRType; 1512 } 1513 } 1514 1515 if (const llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 1516 // If this is a struct, recurse into the field at the specified offset. 1517 const llvm::StructLayout *SL = getTargetData().getStructLayout(STy); 1518 if (IROffset < SL->getSizeInBytes()) { 1519 unsigned FieldIdx = SL->getElementContainingOffset(IROffset); 1520 IROffset -= SL->getElementOffset(FieldIdx); 1521 1522 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset, 1523 SourceTy, SourceOffset); 1524 } 1525 } 1526 1527 if (const llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 1528 const llvm::Type *EltTy = ATy->getElementType(); 1529 unsigned EltSize = getTargetData().getTypeAllocSize(EltTy); 1530 unsigned EltOffset = IROffset/EltSize*EltSize; 1531 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy, 1532 SourceOffset); 1533 } 1534 1535 // Okay, we don't have any better idea of what to pass, so we pass this in an 1536 // integer register that isn't too big to fit the rest of the struct. 1537 unsigned TySizeInBytes = 1538 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity(); 1539 1540 assert(TySizeInBytes != SourceOffset && "Empty field?"); 1541 1542 // It is always safe to classify this as an integer type up to i64 that 1543 // isn't larger than the structure. 1544 return llvm::IntegerType::get(getVMContext(), 1545 std::min(TySizeInBytes-SourceOffset, 8U)*8); 1546 } 1547 1548 1549 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally 1550 /// be used as elements of a two register pair to pass or return, return a 1551 /// first class aggregate to represent them. For example, if the low part of 1552 /// a by-value argument should be passed as i32* and the high part as float, 1553 /// return {i32*, float}. 1554 static const llvm::Type * 1555 GetX86_64ByValArgumentPair(const llvm::Type *Lo, const llvm::Type *Hi, 1556 const llvm::TargetData &TD) { 1557 // In order to correctly satisfy the ABI, we need to the high part to start 1558 // at offset 8. If the high and low parts we inferred are both 4-byte types 1559 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have 1560 // the second element at offset 8. Check for this: 1561 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo); 1562 unsigned HiAlign = TD.getABITypeAlignment(Hi); 1563 unsigned HiStart = llvm::TargetData::RoundUpAlignment(LoSize, HiAlign); 1564 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!"); 1565 1566 // To handle this, we have to increase the size of the low part so that the 1567 // second element will start at an 8 byte offset. We can't increase the size 1568 // of the second element because it might make us access off the end of the 1569 // struct. 1570 if (HiStart != 8) { 1571 // There are only two sorts of types the ABI generation code can produce for 1572 // the low part of a pair that aren't 8 bytes in size: float or i8/i16/i32. 1573 // Promote these to a larger type. 1574 if (Lo->isFloatTy()) 1575 Lo = llvm::Type::getDoubleTy(Lo->getContext()); 1576 else { 1577 assert(Lo->isIntegerTy() && "Invalid/unknown lo type"); 1578 Lo = llvm::Type::getInt64Ty(Lo->getContext()); 1579 } 1580 } 1581 1582 const llvm::StructType *Result = 1583 llvm::StructType::get(Lo->getContext(), Lo, Hi, NULL); 1584 1585 1586 // Verify that the second element is at an 8-byte offset. 1587 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 && 1588 "Invalid x86-64 argument pair!"); 1589 return Result; 1590 } 1591 1592 ABIArgInfo X86_64ABIInfo:: 1593 classifyReturnType(QualType RetTy) const { 1594 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the 1595 // classification algorithm. 1596 X86_64ABIInfo::Class Lo, Hi; 1597 classify(RetTy, 0, Lo, Hi); 1598 1599 // Check some invariants. 1600 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 1601 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 1602 1603 const llvm::Type *ResType = 0; 1604 switch (Lo) { 1605 case NoClass: 1606 if (Hi == NoClass) 1607 return ABIArgInfo::getIgnore(); 1608 // If the low part is just padding, it takes no register, leave ResType 1609 // null. 1610 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 1611 "Unknown missing lo part"); 1612 break; 1613 1614 case SSEUp: 1615 case X87Up: 1616 assert(0 && "Invalid classification for lo word."); 1617 1618 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via 1619 // hidden argument. 1620 case Memory: 1621 return getIndirectReturnResult(RetTy); 1622 1623 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next 1624 // available register of the sequence %rax, %rdx is used. 1625 case Integer: 1626 ResType = GetINTEGERTypeAtOffset(CGT.ConvertTypeRecursive(RetTy), 0, 1627 RetTy, 0); 1628 1629 // If we have a sign or zero extended integer, make sure to return Extend 1630 // so that the parameter gets the right LLVM IR attributes. 1631 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 1632 // Treat an enum type as its underlying type. 1633 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 1634 RetTy = EnumTy->getDecl()->getIntegerType(); 1635 1636 if (RetTy->isIntegralOrEnumerationType() && 1637 RetTy->isPromotableIntegerType()) 1638 return ABIArgInfo::getExtend(); 1639 } 1640 break; 1641 1642 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next 1643 // available SSE register of the sequence %xmm0, %xmm1 is used. 1644 case SSE: 1645 ResType = GetSSETypeAtOffset(CGT.ConvertTypeRecursive(RetTy), 0, RetTy, 0); 1646 break; 1647 1648 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is 1649 // returned on the X87 stack in %st0 as 80-bit x87 number. 1650 case X87: 1651 ResType = llvm::Type::getX86_FP80Ty(getVMContext()); 1652 break; 1653 1654 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real 1655 // part of the value is returned in %st0 and the imaginary part in 1656 // %st1. 1657 case ComplexX87: 1658 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification."); 1659 ResType = llvm::StructType::get(getVMContext(), 1660 llvm::Type::getX86_FP80Ty(getVMContext()), 1661 llvm::Type::getX86_FP80Ty(getVMContext()), 1662 NULL); 1663 break; 1664 } 1665 1666 const llvm::Type *HighPart = 0; 1667 switch (Hi) { 1668 // Memory was handled previously and X87 should 1669 // never occur as a hi class. 1670 case Memory: 1671 case X87: 1672 assert(0 && "Invalid classification for hi word."); 1673 1674 case ComplexX87: // Previously handled. 1675 case NoClass: 1676 break; 1677 1678 case Integer: 1679 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertTypeRecursive(RetTy), 1680 8, RetTy, 8); 1681 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 1682 return ABIArgInfo::getDirect(HighPart, 8); 1683 break; 1684 case SSE: 1685 HighPart = GetSSETypeAtOffset(CGT.ConvertTypeRecursive(RetTy), 8, RetTy, 8); 1686 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 1687 return ABIArgInfo::getDirect(HighPart, 8); 1688 break; 1689 1690 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte 1691 // is passed in the upper half of the last used SSE register. 1692 // 1693 // SSEUP should always be preceded by SSE, just widen. 1694 case SSEUp: 1695 assert(Lo == SSE && "Unexpected SSEUp classification."); 1696 ResType = Get16ByteVectorType(RetTy); 1697 break; 1698 1699 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is 1700 // returned together with the previous X87 value in %st0. 1701 case X87Up: 1702 // If X87Up is preceded by X87, we don't need to do 1703 // anything. However, in some cases with unions it may not be 1704 // preceded by X87. In such situations we follow gcc and pass the 1705 // extra bits in an SSE reg. 1706 if (Lo != X87) { 1707 HighPart = GetSSETypeAtOffset(CGT.ConvertTypeRecursive(RetTy), 1708 8, RetTy, 8); 1709 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 1710 return ABIArgInfo::getDirect(HighPart, 8); 1711 } 1712 break; 1713 } 1714 1715 // If a high part was specified, merge it together with the low part. It is 1716 // known to pass in the high eightbyte of the result. We do this by forming a 1717 // first class struct aggregate with the high and low part: {low, high} 1718 if (HighPart) 1719 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getTargetData()); 1720 1721 return ABIArgInfo::getDirect(ResType); 1722 } 1723 1724 ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt, 1725 unsigned &neededSSE) const { 1726 X86_64ABIInfo::Class Lo, Hi; 1727 classify(Ty, 0, Lo, Hi); 1728 1729 // Check some invariants. 1730 // FIXME: Enforce these by construction. 1731 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 1732 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 1733 1734 neededInt = 0; 1735 neededSSE = 0; 1736 const llvm::Type *ResType = 0; 1737 switch (Lo) { 1738 case NoClass: 1739 if (Hi == NoClass) 1740 return ABIArgInfo::getIgnore(); 1741 // If the low part is just padding, it takes no register, leave ResType 1742 // null. 1743 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 1744 "Unknown missing lo part"); 1745 break; 1746 1747 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument 1748 // on the stack. 1749 case Memory: 1750 1751 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or 1752 // COMPLEX_X87, it is passed in memory. 1753 case X87: 1754 case ComplexX87: 1755 return getIndirectResult(Ty); 1756 1757 case SSEUp: 1758 case X87Up: 1759 assert(0 && "Invalid classification for lo word."); 1760 1761 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next 1762 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8 1763 // and %r9 is used. 1764 case Integer: 1765 ++neededInt; 1766 1767 // Pick an 8-byte type based on the preferred type. 1768 ResType = GetINTEGERTypeAtOffset(CGT.ConvertTypeRecursive(Ty), 0, Ty, 0); 1769 1770 // If we have a sign or zero extended integer, make sure to return Extend 1771 // so that the parameter gets the right LLVM IR attributes. 1772 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 1773 // Treat an enum type as its underlying type. 1774 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1775 Ty = EnumTy->getDecl()->getIntegerType(); 1776 1777 if (Ty->isIntegralOrEnumerationType() && 1778 Ty->isPromotableIntegerType()) 1779 return ABIArgInfo::getExtend(); 1780 } 1781 1782 break; 1783 1784 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next 1785 // available SSE register is used, the registers are taken in the 1786 // order from %xmm0 to %xmm7. 1787 case SSE: { 1788 const llvm::Type *IRType = CGT.ConvertTypeRecursive(Ty); 1789 if (Hi != NoClass || !UseX86_MMXType(IRType)) 1790 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0); 1791 else 1792 // This is an MMX type. Treat it as such. 1793 ResType = llvm::Type::getX86_MMXTy(getVMContext()); 1794 1795 ++neededSSE; 1796 break; 1797 } 1798 } 1799 1800 const llvm::Type *HighPart = 0; 1801 switch (Hi) { 1802 // Memory was handled previously, ComplexX87 and X87 should 1803 // never occur as hi classes, and X87Up must be preceded by X87, 1804 // which is passed in memory. 1805 case Memory: 1806 case X87: 1807 case ComplexX87: 1808 assert(0 && "Invalid classification for hi word."); 1809 break; 1810 1811 case NoClass: break; 1812 1813 case Integer: 1814 ++neededInt; 1815 // Pick an 8-byte type based on the preferred type. 1816 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertTypeRecursive(Ty), 8, Ty, 8); 1817 1818 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 1819 return ABIArgInfo::getDirect(HighPart, 8); 1820 break; 1821 1822 // X87Up generally doesn't occur here (long double is passed in 1823 // memory), except in situations involving unions. 1824 case X87Up: 1825 case SSE: 1826 HighPart = GetSSETypeAtOffset(CGT.ConvertTypeRecursive(Ty), 8, Ty, 8); 1827 1828 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 1829 return ABIArgInfo::getDirect(HighPart, 8); 1830 1831 ++neededSSE; 1832 break; 1833 1834 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the 1835 // eightbyte is passed in the upper half of the last used SSE 1836 // register. This only happens when 128-bit vectors are passed. 1837 case SSEUp: 1838 assert(Lo == SSE && "Unexpected SSEUp classification"); 1839 ResType = Get16ByteVectorType(Ty); 1840 break; 1841 } 1842 1843 // If a high part was specified, merge it together with the low part. It is 1844 // known to pass in the high eightbyte of the result. We do this by forming a 1845 // first class struct aggregate with the high and low part: {low, high} 1846 if (HighPart) 1847 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getTargetData()); 1848 1849 return ABIArgInfo::getDirect(ResType); 1850 } 1851 1852 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 1853 1854 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 1855 1856 // Keep track of the number of assigned registers. 1857 unsigned freeIntRegs = 6, freeSSERegs = 8; 1858 1859 // If the return value is indirect, then the hidden argument is consuming one 1860 // integer register. 1861 if (FI.getReturnInfo().isIndirect()) 1862 --freeIntRegs; 1863 1864 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers 1865 // get assigned (in left-to-right order) for passing as follows... 1866 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 1867 it != ie; ++it) { 1868 unsigned neededInt, neededSSE; 1869 it->info = classifyArgumentType(it->type, neededInt, neededSSE); 1870 1871 // AMD64-ABI 3.2.3p3: If there are no registers available for any 1872 // eightbyte of an argument, the whole argument is passed on the 1873 // stack. If registers have already been assigned for some 1874 // eightbytes of such an argument, the assignments get reverted. 1875 if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) { 1876 freeIntRegs -= neededInt; 1877 freeSSERegs -= neededSSE; 1878 } else { 1879 it->info = getIndirectResult(it->type); 1880 } 1881 } 1882 } 1883 1884 static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr, 1885 QualType Ty, 1886 CodeGenFunction &CGF) { 1887 llvm::Value *overflow_arg_area_p = 1888 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p"); 1889 llvm::Value *overflow_arg_area = 1890 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area"); 1891 1892 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16 1893 // byte boundary if alignment needed by type exceeds 8 byte boundary. 1894 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8; 1895 if (Align > 8) { 1896 // Note that we follow the ABI & gcc here, even though the type 1897 // could in theory have an alignment greater than 16. This case 1898 // shouldn't ever matter in practice. 1899 1900 // overflow_arg_area = (overflow_arg_area + 15) & ~15; 1901 llvm::Value *Offset = 1902 llvm::ConstantInt::get(CGF.Int32Ty, 15); 1903 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset); 1904 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area, 1905 CGF.Int64Ty); 1906 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, ~15LL); 1907 overflow_arg_area = 1908 CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), 1909 overflow_arg_area->getType(), 1910 "overflow_arg_area.align"); 1911 } 1912 1913 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area. 1914 const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 1915 llvm::Value *Res = 1916 CGF.Builder.CreateBitCast(overflow_arg_area, 1917 llvm::PointerType::getUnqual(LTy)); 1918 1919 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to: 1920 // l->overflow_arg_area + sizeof(type). 1921 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to 1922 // an 8 byte boundary. 1923 1924 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8; 1925 llvm::Value *Offset = 1926 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7); 1927 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset, 1928 "overflow_arg_area.next"); 1929 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p); 1930 1931 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type. 1932 return Res; 1933 } 1934 1935 llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1936 CodeGenFunction &CGF) const { 1937 llvm::LLVMContext &VMContext = CGF.getLLVMContext(); 1938 1939 // Assume that va_list type is correct; should be pointer to LLVM type: 1940 // struct { 1941 // i32 gp_offset; 1942 // i32 fp_offset; 1943 // i8* overflow_arg_area; 1944 // i8* reg_save_area; 1945 // }; 1946 unsigned neededInt, neededSSE; 1947 1948 Ty = CGF.getContext().getCanonicalType(Ty); 1949 ABIArgInfo AI = classifyArgumentType(Ty, neededInt, neededSSE); 1950 1951 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed 1952 // in the registers. If not go to step 7. 1953 if (!neededInt && !neededSSE) 1954 return EmitVAArgFromMemory(VAListAddr, Ty, CGF); 1955 1956 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of 1957 // general purpose registers needed to pass type and num_fp to hold 1958 // the number of floating point registers needed. 1959 1960 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into 1961 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or 1962 // l->fp_offset > 304 - num_fp * 16 go to step 7. 1963 // 1964 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of 1965 // register save space). 1966 1967 llvm::Value *InRegs = 0; 1968 llvm::Value *gp_offset_p = 0, *gp_offset = 0; 1969 llvm::Value *fp_offset_p = 0, *fp_offset = 0; 1970 if (neededInt) { 1971 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p"); 1972 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset"); 1973 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8); 1974 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp"); 1975 } 1976 1977 if (neededSSE) { 1978 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p"); 1979 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset"); 1980 llvm::Value *FitsInFP = 1981 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16); 1982 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp"); 1983 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP; 1984 } 1985 1986 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 1987 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); 1988 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 1989 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); 1990 1991 // Emit code to load the value if it was passed in registers. 1992 1993 CGF.EmitBlock(InRegBlock); 1994 1995 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with 1996 // an offset of l->gp_offset and/or l->fp_offset. This may require 1997 // copying to a temporary location in case the parameter is passed 1998 // in different register classes or requires an alignment greater 1999 // than 8 for general purpose registers and 16 for XMM registers. 2000 // 2001 // FIXME: This really results in shameful code when we end up needing to 2002 // collect arguments from different places; often what should result in a 2003 // simple assembling of a structure from scattered addresses has many more 2004 // loads than necessary. Can we clean this up? 2005 const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 2006 llvm::Value *RegAddr = 2007 CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3), 2008 "reg_save_area"); 2009 if (neededInt && neededSSE) { 2010 // FIXME: Cleanup. 2011 assert(AI.isDirect() && "Unexpected ABI info for mixed regs"); 2012 const llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType()); 2013 llvm::Value *Tmp = CGF.CreateTempAlloca(ST); 2014 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs"); 2015 const llvm::Type *TyLo = ST->getElementType(0); 2016 const llvm::Type *TyHi = ST->getElementType(1); 2017 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) && 2018 "Unexpected ABI info for mixed regs"); 2019 const llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo); 2020 const llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi); 2021 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 2022 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2023 llvm::Value *RegLoAddr = TyLo->isFloatingPointTy() ? FPAddr : GPAddr; 2024 llvm::Value *RegHiAddr = TyLo->isFloatingPointTy() ? GPAddr : FPAddr; 2025 llvm::Value *V = 2026 CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo)); 2027 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 2028 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi)); 2029 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 2030 2031 RegAddr = CGF.Builder.CreateBitCast(Tmp, 2032 llvm::PointerType::getUnqual(LTy)); 2033 } else if (neededInt) { 2034 RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 2035 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 2036 llvm::PointerType::getUnqual(LTy)); 2037 } else if (neededSSE == 1) { 2038 RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2039 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 2040 llvm::PointerType::getUnqual(LTy)); 2041 } else { 2042 assert(neededSSE == 2 && "Invalid number of needed registers!"); 2043 // SSE registers are spaced 16 bytes apart in the register save 2044 // area, we need to collect the two eightbytes together. 2045 llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2046 llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16); 2047 const llvm::Type *DoubleTy = llvm::Type::getDoubleTy(VMContext); 2048 const llvm::Type *DblPtrTy = 2049 llvm::PointerType::getUnqual(DoubleTy); 2050 const llvm::StructType *ST = llvm::StructType::get(VMContext, DoubleTy, 2051 DoubleTy, NULL); 2052 llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST); 2053 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo, 2054 DblPtrTy)); 2055 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 2056 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi, 2057 DblPtrTy)); 2058 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 2059 RegAddr = CGF.Builder.CreateBitCast(Tmp, 2060 llvm::PointerType::getUnqual(LTy)); 2061 } 2062 2063 // AMD64-ABI 3.5.7p5: Step 5. Set: 2064 // l->gp_offset = l->gp_offset + num_gp * 8 2065 // l->fp_offset = l->fp_offset + num_fp * 16. 2066 if (neededInt) { 2067 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8); 2068 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset), 2069 gp_offset_p); 2070 } 2071 if (neededSSE) { 2072 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16); 2073 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset), 2074 fp_offset_p); 2075 } 2076 CGF.EmitBranch(ContBlock); 2077 2078 // Emit code to load the value if it was passed in memory. 2079 2080 CGF.EmitBlock(InMemBlock); 2081 llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF); 2082 2083 // Return the appropriate result. 2084 2085 CGF.EmitBlock(ContBlock); 2086 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(), 2, 2087 "vaarg.addr"); 2088 ResAddr->addIncoming(RegAddr, InRegBlock); 2089 ResAddr->addIncoming(MemAddr, InMemBlock); 2090 return ResAddr; 2091 } 2092 2093 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty) const { 2094 2095 if (Ty->isVoidType()) 2096 return ABIArgInfo::getIgnore(); 2097 2098 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2099 Ty = EnumTy->getDecl()->getIntegerType(); 2100 2101 uint64_t Size = getContext().getTypeSize(Ty); 2102 2103 if (const RecordType *RT = Ty->getAs<RecordType>()) { 2104 if (hasNonTrivialDestructorOrCopyConstructor(RT) || 2105 RT->getDecl()->hasFlexibleArrayMember()) 2106 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2107 2108 // FIXME: mingw-w64-gcc emits 128-bit struct as i128 2109 if (Size == 128 && 2110 getContext().Target.getTriple().getOS() == llvm::Triple::MinGW32) 2111 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2112 Size)); 2113 2114 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is 2115 // not 1, 2, 4, or 8 bytes, must be passed by reference." 2116 if (Size <= 64 && 2117 (Size & (Size - 1)) == 0) 2118 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2119 Size)); 2120 2121 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2122 } 2123 2124 if (Ty->isPromotableIntegerType()) 2125 return ABIArgInfo::getExtend(); 2126 2127 return ABIArgInfo::getDirect(); 2128 } 2129 2130 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 2131 2132 QualType RetTy = FI.getReturnType(); 2133 FI.getReturnInfo() = classify(RetTy); 2134 2135 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2136 it != ie; ++it) 2137 it->info = classify(it->type); 2138 } 2139 2140 llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2141 CodeGenFunction &CGF) const { 2142 const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); 2143 const llvm::Type *BPP = llvm::PointerType::getUnqual(BP); 2144 2145 CGBuilderTy &Builder = CGF.Builder; 2146 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 2147 "ap"); 2148 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 2149 llvm::Type *PTy = 2150 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 2151 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 2152 2153 uint64_t Offset = 2154 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8); 2155 llvm::Value *NextAddr = 2156 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 2157 "ap.next"); 2158 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 2159 2160 return AddrTyped; 2161 } 2162 2163 // PowerPC-32 2164 2165 namespace { 2166 class PPC32TargetCodeGenInfo : public DefaultTargetCodeGenInfo { 2167 public: 2168 PPC32TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} 2169 2170 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2171 // This is recovered from gcc output. 2172 return 1; // r1 is the dedicated stack pointer 2173 } 2174 2175 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2176 llvm::Value *Address) const; 2177 }; 2178 2179 } 2180 2181 bool 2182 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2183 llvm::Value *Address) const { 2184 // This is calculated from the LLVM and GCC tables and verified 2185 // against gcc output. AFAIK all ABIs use the same encoding. 2186 2187 CodeGen::CGBuilderTy &Builder = CGF.Builder; 2188 llvm::LLVMContext &Context = CGF.getLLVMContext(); 2189 2190 const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context); 2191 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 2192 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 2193 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 2194 2195 // 0-31: r0-31, the 4-byte general-purpose registers 2196 AssignToArrayRange(Builder, Address, Four8, 0, 31); 2197 2198 // 32-63: fp0-31, the 8-byte floating-point registers 2199 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 2200 2201 // 64-76 are various 4-byte special-purpose registers: 2202 // 64: mq 2203 // 65: lr 2204 // 66: ctr 2205 // 67: ap 2206 // 68-75 cr0-7 2207 // 76: xer 2208 AssignToArrayRange(Builder, Address, Four8, 64, 76); 2209 2210 // 77-108: v0-31, the 16-byte vector registers 2211 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 2212 2213 // 109: vrsave 2214 // 110: vscr 2215 // 111: spe_acc 2216 // 112: spefscr 2217 // 113: sfp 2218 AssignToArrayRange(Builder, Address, Four8, 109, 113); 2219 2220 return false; 2221 } 2222 2223 2224 //===----------------------------------------------------------------------===// 2225 // ARM ABI Implementation 2226 //===----------------------------------------------------------------------===// 2227 2228 namespace { 2229 2230 class ARMABIInfo : public ABIInfo { 2231 public: 2232 enum ABIKind { 2233 APCS = 0, 2234 AAPCS = 1, 2235 AAPCS_VFP 2236 }; 2237 2238 private: 2239 ABIKind Kind; 2240 2241 public: 2242 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind) {} 2243 2244 private: 2245 ABIKind getABIKind() const { return Kind; } 2246 2247 ABIArgInfo classifyReturnType(QualType RetTy) const; 2248 ABIArgInfo classifyArgumentType(QualType RetTy) const; 2249 2250 virtual void computeInfo(CGFunctionInfo &FI) const; 2251 2252 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2253 CodeGenFunction &CGF) const; 2254 }; 2255 2256 class ARMTargetCodeGenInfo : public TargetCodeGenInfo { 2257 public: 2258 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K) 2259 :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {} 2260 2261 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2262 return 13; 2263 } 2264 }; 2265 2266 } 2267 2268 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const { 2269 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2270 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2271 it != ie; ++it) 2272 it->info = classifyArgumentType(it->type); 2273 2274 // Always honor user-specified calling convention. 2275 if (FI.getCallingConvention() != llvm::CallingConv::C) 2276 return; 2277 2278 // Calling convention as default by an ABI. 2279 llvm::CallingConv::ID DefaultCC; 2280 llvm::StringRef Env = getContext().Target.getTriple().getEnvironmentName(); 2281 if (Env == "gnueabi" || Env == "eabi") 2282 DefaultCC = llvm::CallingConv::ARM_AAPCS; 2283 else 2284 DefaultCC = llvm::CallingConv::ARM_APCS; 2285 2286 // If user did not ask for specific calling convention explicitly (e.g. via 2287 // pcs attribute), set effective calling convention if it's different than ABI 2288 // default. 2289 switch (getABIKind()) { 2290 case APCS: 2291 if (DefaultCC != llvm::CallingConv::ARM_APCS) 2292 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_APCS); 2293 break; 2294 case AAPCS: 2295 if (DefaultCC != llvm::CallingConv::ARM_AAPCS) 2296 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS); 2297 break; 2298 case AAPCS_VFP: 2299 if (DefaultCC != llvm::CallingConv::ARM_AAPCS_VFP) 2300 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS_VFP); 2301 break; 2302 } 2303 } 2304 2305 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty) const { 2306 if (!isAggregateTypeForABI(Ty)) { 2307 // Treat an enum type as its underlying type. 2308 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2309 Ty = EnumTy->getDecl()->getIntegerType(); 2310 2311 return (Ty->isPromotableIntegerType() ? 2312 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2313 } 2314 2315 // Ignore empty records. 2316 if (isEmptyRecord(getContext(), Ty, true)) 2317 return ABIArgInfo::getIgnore(); 2318 2319 // Structures with either a non-trivial destructor or a non-trivial 2320 // copy constructor are always indirect. 2321 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 2322 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2323 2324 // Otherwise, pass by coercing to a structure of the appropriate size. 2325 // 2326 // FIXME: This is kind of nasty... but there isn't much choice because the ARM 2327 // backend doesn't support byval. 2328 // FIXME: This doesn't handle alignment > 64 bits. 2329 const llvm::Type* ElemTy; 2330 unsigned SizeRegs; 2331 if (getContext().getTypeAlign(Ty) > 32) { 2332 ElemTy = llvm::Type::getInt64Ty(getVMContext()); 2333 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64; 2334 } else { 2335 ElemTy = llvm::Type::getInt32Ty(getVMContext()); 2336 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32; 2337 } 2338 std::vector<const llvm::Type*> LLVMFields; 2339 LLVMFields.push_back(llvm::ArrayType::get(ElemTy, SizeRegs)); 2340 const llvm::Type* STy = llvm::StructType::get(getVMContext(), LLVMFields, 2341 true); 2342 return ABIArgInfo::getDirect(STy); 2343 } 2344 2345 static bool isIntegerLikeType(QualType Ty, ASTContext &Context, 2346 llvm::LLVMContext &VMContext) { 2347 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure 2348 // is called integer-like if its size is less than or equal to one word, and 2349 // the offset of each of its addressable sub-fields is zero. 2350 2351 uint64_t Size = Context.getTypeSize(Ty); 2352 2353 // Check that the type fits in a word. 2354 if (Size > 32) 2355 return false; 2356 2357 // FIXME: Handle vector types! 2358 if (Ty->isVectorType()) 2359 return false; 2360 2361 // Float types are never treated as "integer like". 2362 if (Ty->isRealFloatingType()) 2363 return false; 2364 2365 // If this is a builtin or pointer type then it is ok. 2366 if (Ty->getAs<BuiltinType>() || Ty->isPointerType()) 2367 return true; 2368 2369 // Small complex integer types are "integer like". 2370 if (const ComplexType *CT = Ty->getAs<ComplexType>()) 2371 return isIntegerLikeType(CT->getElementType(), Context, VMContext); 2372 2373 // Single element and zero sized arrays should be allowed, by the definition 2374 // above, but they are not. 2375 2376 // Otherwise, it must be a record type. 2377 const RecordType *RT = Ty->getAs<RecordType>(); 2378 if (!RT) return false; 2379 2380 // Ignore records with flexible arrays. 2381 const RecordDecl *RD = RT->getDecl(); 2382 if (RD->hasFlexibleArrayMember()) 2383 return false; 2384 2385 // Check that all sub-fields are at offset 0, and are themselves "integer 2386 // like". 2387 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 2388 2389 bool HadField = false; 2390 unsigned idx = 0; 2391 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 2392 i != e; ++i, ++idx) { 2393 const FieldDecl *FD = *i; 2394 2395 // Bit-fields are not addressable, we only need to verify they are "integer 2396 // like". We still have to disallow a subsequent non-bitfield, for example: 2397 // struct { int : 0; int x } 2398 // is non-integer like according to gcc. 2399 if (FD->isBitField()) { 2400 if (!RD->isUnion()) 2401 HadField = true; 2402 2403 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 2404 return false; 2405 2406 continue; 2407 } 2408 2409 // Check if this field is at offset 0. 2410 if (Layout.getFieldOffset(idx) != 0) 2411 return false; 2412 2413 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 2414 return false; 2415 2416 // Only allow at most one field in a structure. This doesn't match the 2417 // wording above, but follows gcc in situations with a field following an 2418 // empty structure. 2419 if (!RD->isUnion()) { 2420 if (HadField) 2421 return false; 2422 2423 HadField = true; 2424 } 2425 } 2426 2427 return true; 2428 } 2429 2430 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy) const { 2431 if (RetTy->isVoidType()) 2432 return ABIArgInfo::getIgnore(); 2433 2434 // Large vector types should be returned via memory. 2435 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) 2436 return ABIArgInfo::getIndirect(0); 2437 2438 if (!isAggregateTypeForABI(RetTy)) { 2439 // Treat an enum type as its underlying type. 2440 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 2441 RetTy = EnumTy->getDecl()->getIntegerType(); 2442 2443 return (RetTy->isPromotableIntegerType() ? 2444 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2445 } 2446 2447 // Structures with either a non-trivial destructor or a non-trivial 2448 // copy constructor are always indirect. 2449 if (isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy)) 2450 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2451 2452 // Are we following APCS? 2453 if (getABIKind() == APCS) { 2454 if (isEmptyRecord(getContext(), RetTy, false)) 2455 return ABIArgInfo::getIgnore(); 2456 2457 // Complex types are all returned as packed integers. 2458 // 2459 // FIXME: Consider using 2 x vector types if the back end handles them 2460 // correctly. 2461 if (RetTy->isAnyComplexType()) 2462 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2463 getContext().getTypeSize(RetTy))); 2464 2465 // Integer like structures are returned in r0. 2466 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) { 2467 // Return in the smallest viable integer type. 2468 uint64_t Size = getContext().getTypeSize(RetTy); 2469 if (Size <= 8) 2470 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 2471 if (Size <= 16) 2472 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 2473 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 2474 } 2475 2476 // Otherwise return in memory. 2477 return ABIArgInfo::getIndirect(0); 2478 } 2479 2480 // Otherwise this is an AAPCS variant. 2481 2482 if (isEmptyRecord(getContext(), RetTy, true)) 2483 return ABIArgInfo::getIgnore(); 2484 2485 // Aggregates <= 4 bytes are returned in r0; other aggregates 2486 // are returned indirectly. 2487 uint64_t Size = getContext().getTypeSize(RetTy); 2488 if (Size <= 32) { 2489 // Return in the smallest viable integer type. 2490 if (Size <= 8) 2491 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 2492 if (Size <= 16) 2493 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 2494 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 2495 } 2496 2497 return ABIArgInfo::getIndirect(0); 2498 } 2499 2500 llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2501 CodeGenFunction &CGF) const { 2502 // FIXME: Need to handle alignment 2503 const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); 2504 const llvm::Type *BPP = llvm::PointerType::getUnqual(BP); 2505 2506 CGBuilderTy &Builder = CGF.Builder; 2507 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 2508 "ap"); 2509 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 2510 llvm::Type *PTy = 2511 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 2512 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 2513 2514 uint64_t Offset = 2515 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); 2516 llvm::Value *NextAddr = 2517 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 2518 "ap.next"); 2519 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 2520 2521 return AddrTyped; 2522 } 2523 2524 //===----------------------------------------------------------------------===// 2525 // SystemZ ABI Implementation 2526 //===----------------------------------------------------------------------===// 2527 2528 namespace { 2529 2530 class SystemZABIInfo : public ABIInfo { 2531 public: 2532 SystemZABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 2533 2534 bool isPromotableIntegerType(QualType Ty) const; 2535 2536 ABIArgInfo classifyReturnType(QualType RetTy) const; 2537 ABIArgInfo classifyArgumentType(QualType RetTy) const; 2538 2539 virtual void computeInfo(CGFunctionInfo &FI) const { 2540 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2541 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2542 it != ie; ++it) 2543 it->info = classifyArgumentType(it->type); 2544 } 2545 2546 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2547 CodeGenFunction &CGF) const; 2548 }; 2549 2550 class SystemZTargetCodeGenInfo : public TargetCodeGenInfo { 2551 public: 2552 SystemZTargetCodeGenInfo(CodeGenTypes &CGT) 2553 : TargetCodeGenInfo(new SystemZABIInfo(CGT)) {} 2554 }; 2555 2556 } 2557 2558 bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const { 2559 // SystemZ ABI requires all 8, 16 and 32 bit quantities to be extended. 2560 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 2561 switch (BT->getKind()) { 2562 case BuiltinType::Bool: 2563 case BuiltinType::Char_S: 2564 case BuiltinType::Char_U: 2565 case BuiltinType::SChar: 2566 case BuiltinType::UChar: 2567 case BuiltinType::Short: 2568 case BuiltinType::UShort: 2569 case BuiltinType::Int: 2570 case BuiltinType::UInt: 2571 return true; 2572 default: 2573 return false; 2574 } 2575 return false; 2576 } 2577 2578 llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2579 CodeGenFunction &CGF) const { 2580 // FIXME: Implement 2581 return 0; 2582 } 2583 2584 2585 ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const { 2586 if (RetTy->isVoidType()) 2587 return ABIArgInfo::getIgnore(); 2588 if (isAggregateTypeForABI(RetTy)) 2589 return ABIArgInfo::getIndirect(0); 2590 2591 return (isPromotableIntegerType(RetTy) ? 2592 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2593 } 2594 2595 ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const { 2596 if (isAggregateTypeForABI(Ty)) 2597 return ABIArgInfo::getIndirect(0); 2598 2599 return (isPromotableIntegerType(Ty) ? 2600 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2601 } 2602 2603 //===----------------------------------------------------------------------===// 2604 // MBlaze ABI Implementation 2605 //===----------------------------------------------------------------------===// 2606 2607 namespace { 2608 2609 class MBlazeABIInfo : public ABIInfo { 2610 public: 2611 MBlazeABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 2612 2613 bool isPromotableIntegerType(QualType Ty) const; 2614 2615 ABIArgInfo classifyReturnType(QualType RetTy) const; 2616 ABIArgInfo classifyArgumentType(QualType RetTy) const; 2617 2618 virtual void computeInfo(CGFunctionInfo &FI) const { 2619 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2620 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2621 it != ie; ++it) 2622 it->info = classifyArgumentType(it->type); 2623 } 2624 2625 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2626 CodeGenFunction &CGF) const; 2627 }; 2628 2629 class MBlazeTargetCodeGenInfo : public TargetCodeGenInfo { 2630 public: 2631 MBlazeTargetCodeGenInfo(CodeGenTypes &CGT) 2632 : TargetCodeGenInfo(new MBlazeABIInfo(CGT)) {} 2633 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 2634 CodeGen::CodeGenModule &M) const; 2635 }; 2636 2637 } 2638 2639 bool MBlazeABIInfo::isPromotableIntegerType(QualType Ty) const { 2640 // MBlaze ABI requires all 8 and 16 bit quantities to be extended. 2641 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 2642 switch (BT->getKind()) { 2643 case BuiltinType::Bool: 2644 case BuiltinType::Char_S: 2645 case BuiltinType::Char_U: 2646 case BuiltinType::SChar: 2647 case BuiltinType::UChar: 2648 case BuiltinType::Short: 2649 case BuiltinType::UShort: 2650 return true; 2651 default: 2652 return false; 2653 } 2654 return false; 2655 } 2656 2657 llvm::Value *MBlazeABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2658 CodeGenFunction &CGF) const { 2659 // FIXME: Implement 2660 return 0; 2661 } 2662 2663 2664 ABIArgInfo MBlazeABIInfo::classifyReturnType(QualType RetTy) const { 2665 if (RetTy->isVoidType()) 2666 return ABIArgInfo::getIgnore(); 2667 if (isAggregateTypeForABI(RetTy)) 2668 return ABIArgInfo::getIndirect(0); 2669 2670 return (isPromotableIntegerType(RetTy) ? 2671 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2672 } 2673 2674 ABIArgInfo MBlazeABIInfo::classifyArgumentType(QualType Ty) const { 2675 if (isAggregateTypeForABI(Ty)) 2676 return ABIArgInfo::getIndirect(0); 2677 2678 return (isPromotableIntegerType(Ty) ? 2679 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2680 } 2681 2682 void MBlazeTargetCodeGenInfo::SetTargetAttributes(const Decl *D, 2683 llvm::GlobalValue *GV, 2684 CodeGen::CodeGenModule &M) 2685 const { 2686 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 2687 if (!FD) return; 2688 2689 llvm::CallingConv::ID CC = llvm::CallingConv::C; 2690 if (FD->hasAttr<MBlazeInterruptHandlerAttr>()) 2691 CC = llvm::CallingConv::MBLAZE_INTR; 2692 else if (FD->hasAttr<MBlazeSaveVolatilesAttr>()) 2693 CC = llvm::CallingConv::MBLAZE_SVOL; 2694 2695 if (CC != llvm::CallingConv::C) { 2696 // Handle 'interrupt_handler' attribute: 2697 llvm::Function *F = cast<llvm::Function>(GV); 2698 2699 // Step 1: Set ISR calling convention. 2700 F->setCallingConv(CC); 2701 2702 // Step 2: Add attributes goodness. 2703 F->addFnAttr(llvm::Attribute::NoInline); 2704 } 2705 2706 // Step 3: Emit _interrupt_handler alias. 2707 if (CC == llvm::CallingConv::MBLAZE_INTR) 2708 new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage, 2709 "_interrupt_handler", GV, &M.getModule()); 2710 } 2711 2712 2713 //===----------------------------------------------------------------------===// 2714 // MSP430 ABI Implementation 2715 //===----------------------------------------------------------------------===// 2716 2717 namespace { 2718 2719 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo { 2720 public: 2721 MSP430TargetCodeGenInfo(CodeGenTypes &CGT) 2722 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 2723 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 2724 CodeGen::CodeGenModule &M) const; 2725 }; 2726 2727 } 2728 2729 void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D, 2730 llvm::GlobalValue *GV, 2731 CodeGen::CodeGenModule &M) const { 2732 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 2733 if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) { 2734 // Handle 'interrupt' attribute: 2735 llvm::Function *F = cast<llvm::Function>(GV); 2736 2737 // Step 1: Set ISR calling convention. 2738 F->setCallingConv(llvm::CallingConv::MSP430_INTR); 2739 2740 // Step 2: Add attributes goodness. 2741 F->addFnAttr(llvm::Attribute::NoInline); 2742 2743 // Step 3: Emit ISR vector alias. 2744 unsigned Num = attr->getNumber() + 0xffe0; 2745 new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage, 2746 "vector_" + llvm::Twine::utohexstr(Num), 2747 GV, &M.getModule()); 2748 } 2749 } 2750 } 2751 2752 //===----------------------------------------------------------------------===// 2753 // MIPS ABI Implementation. This works for both little-endian and 2754 // big-endian variants. 2755 //===----------------------------------------------------------------------===// 2756 2757 namespace { 2758 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo { 2759 public: 2760 MIPSTargetCodeGenInfo(CodeGenTypes &CGT) 2761 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 2762 2763 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 2764 return 29; 2765 } 2766 2767 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2768 llvm::Value *Address) const; 2769 }; 2770 } 2771 2772 bool 2773 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2774 llvm::Value *Address) const { 2775 // This information comes from gcc's implementation, which seems to 2776 // as canonical as it gets. 2777 2778 CodeGen::CGBuilderTy &Builder = CGF.Builder; 2779 llvm::LLVMContext &Context = CGF.getLLVMContext(); 2780 2781 // Everything on MIPS is 4 bytes. Double-precision FP registers 2782 // are aliased to pairs of single-precision FP registers. 2783 const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context); 2784 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 2785 2786 // 0-31 are the general purpose registers, $0 - $31. 2787 // 32-63 are the floating-point registers, $f0 - $f31. 2788 // 64 and 65 are the multiply/divide registers, $hi and $lo. 2789 // 66 is the (notional, I think) register for signal-handler return. 2790 AssignToArrayRange(Builder, Address, Four8, 0, 65); 2791 2792 // 67-74 are the floating-point status registers, $fcc0 - $fcc7. 2793 // They are one bit wide and ignored here. 2794 2795 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31. 2796 // (coprocessor 1 is the FP unit) 2797 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31. 2798 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31. 2799 // 176-181 are the DSP accumulator registers. 2800 AssignToArrayRange(Builder, Address, Four8, 80, 181); 2801 2802 return false; 2803 } 2804 2805 2806 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() { 2807 if (TheTargetCodeGenInfo) 2808 return *TheTargetCodeGenInfo; 2809 2810 // For now we just cache the TargetCodeGenInfo in CodeGenModule and don't 2811 // free it. 2812 2813 const llvm::Triple &Triple = getContext().Target.getTriple(); 2814 switch (Triple.getArch()) { 2815 default: 2816 return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types)); 2817 2818 case llvm::Triple::mips: 2819 case llvm::Triple::mipsel: 2820 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types)); 2821 2822 case llvm::Triple::arm: 2823 case llvm::Triple::thumb: 2824 { 2825 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS; 2826 2827 if (strcmp(getContext().Target.getABI(), "apcs-gnu") == 0) 2828 Kind = ARMABIInfo::APCS; 2829 else if (CodeGenOpts.FloatABI == "hard") 2830 Kind = ARMABIInfo::AAPCS_VFP; 2831 2832 return *(TheTargetCodeGenInfo = new ARMTargetCodeGenInfo(Types, Kind)); 2833 } 2834 2835 case llvm::Triple::ppc: 2836 return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types)); 2837 2838 case llvm::Triple::systemz: 2839 return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo(Types)); 2840 2841 case llvm::Triple::mblaze: 2842 return *(TheTargetCodeGenInfo = new MBlazeTargetCodeGenInfo(Types)); 2843 2844 case llvm::Triple::msp430: 2845 return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types)); 2846 2847 case llvm::Triple::x86: 2848 switch (Triple.getOS()) { 2849 case llvm::Triple::Darwin: 2850 return *(TheTargetCodeGenInfo = 2851 new X86_32TargetCodeGenInfo(Types, true, true)); 2852 case llvm::Triple::Cygwin: 2853 case llvm::Triple::MinGW32: 2854 case llvm::Triple::AuroraUX: 2855 case llvm::Triple::DragonFly: 2856 case llvm::Triple::FreeBSD: 2857 case llvm::Triple::OpenBSD: 2858 case llvm::Triple::NetBSD: 2859 return *(TheTargetCodeGenInfo = 2860 new X86_32TargetCodeGenInfo(Types, false, true)); 2861 2862 default: 2863 return *(TheTargetCodeGenInfo = 2864 new X86_32TargetCodeGenInfo(Types, false, false)); 2865 } 2866 2867 case llvm::Triple::x86_64: 2868 switch (Triple.getOS()) { 2869 case llvm::Triple::Win32: 2870 case llvm::Triple::MinGW32: 2871 case llvm::Triple::Cygwin: 2872 return *(TheTargetCodeGenInfo = new WinX86_64TargetCodeGenInfo(Types)); 2873 default: 2874 return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo(Types)); 2875 } 2876 } 2877 } 2878