1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // These classes wrap the information about a call or function 11 // definition used to handle ABI compliancy. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "TargetInfo.h" 16 #include "ABIInfo.h" 17 #include "CodeGenFunction.h" 18 #include "clang/AST/RecordLayout.h" 19 #include "llvm/Type.h" 20 #include "llvm/Target/TargetData.h" 21 #include "llvm/ADT/StringExtras.h" 22 #include "llvm/ADT/Triple.h" 23 #include "llvm/Support/raw_ostream.h" 24 using namespace clang; 25 using namespace CodeGen; 26 27 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, 28 llvm::Value *Array, 29 llvm::Value *Value, 30 unsigned FirstIndex, 31 unsigned LastIndex) { 32 // Alternatively, we could emit this as a loop in the source. 33 for (unsigned I = FirstIndex; I <= LastIndex; ++I) { 34 llvm::Value *Cell = Builder.CreateConstInBoundsGEP1_32(Array, I); 35 Builder.CreateStore(Value, Cell); 36 } 37 } 38 39 static bool isAggregateTypeForABI(QualType T) { 40 return CodeGenFunction::hasAggregateLLVMType(T) || 41 T->isMemberFunctionPointerType(); 42 } 43 44 ABIInfo::~ABIInfo() {} 45 46 ASTContext &ABIInfo::getContext() const { 47 return CGT.getContext(); 48 } 49 50 llvm::LLVMContext &ABIInfo::getVMContext() const { 51 return CGT.getLLVMContext(); 52 } 53 54 const llvm::TargetData &ABIInfo::getTargetData() const { 55 return CGT.getTargetData(); 56 } 57 58 59 void ABIArgInfo::dump() const { 60 llvm::raw_ostream &OS = llvm::errs(); 61 OS << "(ABIArgInfo Kind="; 62 switch (TheKind) { 63 case Direct: 64 OS << "Direct Type="; 65 if (const llvm::Type *Ty = getCoerceToType()) 66 Ty->print(OS); 67 else 68 OS << "null"; 69 break; 70 case Extend: 71 OS << "Extend"; 72 break; 73 case Ignore: 74 OS << "Ignore"; 75 break; 76 case Indirect: 77 OS << "Indirect Align=" << getIndirectAlign() 78 << " Byal=" << getIndirectByVal() 79 << " Realign=" << getIndirectRealign(); 80 break; 81 case Expand: 82 OS << "Expand"; 83 break; 84 } 85 OS << ")\n"; 86 } 87 88 TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; } 89 90 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays); 91 92 /// isEmptyField - Return true iff a the field is "empty", that is it 93 /// is an unnamed bit-field or an (array of) empty record(s). 94 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD, 95 bool AllowArrays) { 96 if (FD->isUnnamedBitfield()) 97 return true; 98 99 QualType FT = FD->getType(); 100 101 // Constant arrays of empty records count as empty, strip them off. 102 if (AllowArrays) 103 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) 104 FT = AT->getElementType(); 105 106 const RecordType *RT = FT->getAs<RecordType>(); 107 if (!RT) 108 return false; 109 110 // C++ record fields are never empty, at least in the Itanium ABI. 111 // 112 // FIXME: We should use a predicate for whether this behavior is true in the 113 // current ABI. 114 if (isa<CXXRecordDecl>(RT->getDecl())) 115 return false; 116 117 return isEmptyRecord(Context, FT, AllowArrays); 118 } 119 120 /// isEmptyRecord - Return true iff a structure contains only empty 121 /// fields. Note that a structure with a flexible array member is not 122 /// considered empty. 123 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) { 124 const RecordType *RT = T->getAs<RecordType>(); 125 if (!RT) 126 return 0; 127 const RecordDecl *RD = RT->getDecl(); 128 if (RD->hasFlexibleArrayMember()) 129 return false; 130 131 // If this is a C++ record, check the bases first. 132 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 133 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 134 e = CXXRD->bases_end(); i != e; ++i) 135 if (!isEmptyRecord(Context, i->getType(), true)) 136 return false; 137 138 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 139 i != e; ++i) 140 if (!isEmptyField(Context, *i, AllowArrays)) 141 return false; 142 return true; 143 } 144 145 /// hasNonTrivialDestructorOrCopyConstructor - Determine if a type has either 146 /// a non-trivial destructor or a non-trivial copy constructor. 147 static bool hasNonTrivialDestructorOrCopyConstructor(const RecordType *RT) { 148 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 149 if (!RD) 150 return false; 151 152 return !RD->hasTrivialDestructor() || !RD->hasTrivialCopyConstructor(); 153 } 154 155 /// isRecordWithNonTrivialDestructorOrCopyConstructor - Determine if a type is 156 /// a record type with either a non-trivial destructor or a non-trivial copy 157 /// constructor. 158 static bool isRecordWithNonTrivialDestructorOrCopyConstructor(QualType T) { 159 const RecordType *RT = T->getAs<RecordType>(); 160 if (!RT) 161 return false; 162 163 return hasNonTrivialDestructorOrCopyConstructor(RT); 164 } 165 166 /// isSingleElementStruct - Determine if a structure is a "single 167 /// element struct", i.e. it has exactly one non-empty field or 168 /// exactly one field which is itself a single element 169 /// struct. Structures with flexible array members are never 170 /// considered single element structs. 171 /// 172 /// \return The field declaration for the single non-empty field, if 173 /// it exists. 174 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) { 175 const RecordType *RT = T->getAsStructureType(); 176 if (!RT) 177 return 0; 178 179 const RecordDecl *RD = RT->getDecl(); 180 if (RD->hasFlexibleArrayMember()) 181 return 0; 182 183 const Type *Found = 0; 184 185 // If this is a C++ record, check the bases first. 186 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 187 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 188 e = CXXRD->bases_end(); i != e; ++i) { 189 // Ignore empty records. 190 if (isEmptyRecord(Context, i->getType(), true)) 191 continue; 192 193 // If we already found an element then this isn't a single-element struct. 194 if (Found) 195 return 0; 196 197 // If this is non-empty and not a single element struct, the composite 198 // cannot be a single element struct. 199 Found = isSingleElementStruct(i->getType(), Context); 200 if (!Found) 201 return 0; 202 } 203 } 204 205 // Check for single element. 206 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 207 i != e; ++i) { 208 const FieldDecl *FD = *i; 209 QualType FT = FD->getType(); 210 211 // Ignore empty fields. 212 if (isEmptyField(Context, FD, true)) 213 continue; 214 215 // If we already found an element then this isn't a single-element 216 // struct. 217 if (Found) 218 return 0; 219 220 // Treat single element arrays as the element. 221 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 222 if (AT->getSize().getZExtValue() != 1) 223 break; 224 FT = AT->getElementType(); 225 } 226 227 if (!isAggregateTypeForABI(FT)) { 228 Found = FT.getTypePtr(); 229 } else { 230 Found = isSingleElementStruct(FT, Context); 231 if (!Found) 232 return 0; 233 } 234 } 235 236 return Found; 237 } 238 239 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) { 240 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() && 241 !Ty->isAnyComplexType() && !Ty->isEnumeralType() && 242 !Ty->isBlockPointerType()) 243 return false; 244 245 uint64_t Size = Context.getTypeSize(Ty); 246 return Size == 32 || Size == 64; 247 } 248 249 /// canExpandIndirectArgument - Test whether an argument type which is to be 250 /// passed indirectly (on the stack) would have the equivalent layout if it was 251 /// expanded into separate arguments. If so, we prefer to do the latter to avoid 252 /// inhibiting optimizations. 253 /// 254 // FIXME: This predicate is missing many cases, currently it just follows 255 // llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We 256 // should probably make this smarter, or better yet make the LLVM backend 257 // capable of handling it. 258 static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) { 259 // We can only expand structure types. 260 const RecordType *RT = Ty->getAs<RecordType>(); 261 if (!RT) 262 return false; 263 264 // We can only expand (C) structures. 265 // 266 // FIXME: This needs to be generalized to handle classes as well. 267 const RecordDecl *RD = RT->getDecl(); 268 if (!RD->isStruct() || isa<CXXRecordDecl>(RD)) 269 return false; 270 271 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 272 i != e; ++i) { 273 const FieldDecl *FD = *i; 274 275 if (!is32Or64BitBasicType(FD->getType(), Context)) 276 return false; 277 278 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know 279 // how to expand them yet, and the predicate for telling if a bitfield still 280 // counts as "basic" is more complicated than what we were doing previously. 281 if (FD->isBitField()) 282 return false; 283 } 284 285 return true; 286 } 287 288 namespace { 289 /// DefaultABIInfo - The default implementation for ABI specific 290 /// details. This implementation provides information which results in 291 /// self-consistent and sensible LLVM IR generation, but does not 292 /// conform to any particular ABI. 293 class DefaultABIInfo : public ABIInfo { 294 public: 295 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 296 297 ABIArgInfo classifyReturnType(QualType RetTy) const; 298 ABIArgInfo classifyArgumentType(QualType RetTy) const; 299 300 virtual void computeInfo(CGFunctionInfo &FI) const { 301 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 302 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 303 it != ie; ++it) 304 it->info = classifyArgumentType(it->type); 305 } 306 307 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 308 CodeGenFunction &CGF) const; 309 }; 310 311 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo { 312 public: 313 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 314 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 315 }; 316 317 llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 318 CodeGenFunction &CGF) const { 319 return 0; 320 } 321 322 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const { 323 if (isAggregateTypeForABI(Ty)) 324 return ABIArgInfo::getIndirect(0); 325 326 // Treat an enum type as its underlying type. 327 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 328 Ty = EnumTy->getDecl()->getIntegerType(); 329 330 return (Ty->isPromotableIntegerType() ? 331 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 332 } 333 334 /// UseX86_MMXType - Return true if this is an MMX type that should use the special 335 /// x86_mmx type. 336 bool UseX86_MMXType(const llvm::Type *IRType) { 337 // If the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>, use the 338 // special x86_mmx type. 339 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 && 340 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() && 341 IRType->getScalarSizeInBits() != 64; 342 } 343 344 //===----------------------------------------------------------------------===// 345 // X86-32 ABI Implementation 346 //===----------------------------------------------------------------------===// 347 348 /// X86_32ABIInfo - The X86-32 ABI information. 349 class X86_32ABIInfo : public ABIInfo { 350 static const unsigned MinABIStackAlignInBytes = 4; 351 352 bool IsDarwinVectorABI; 353 bool IsSmallStructInRegABI; 354 355 static bool isRegisterSize(unsigned Size) { 356 return (Size == 8 || Size == 16 || Size == 32 || Size == 64); 357 } 358 359 static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context); 360 361 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 362 /// such that the argument will be passed in memory. 363 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal = true) const; 364 365 /// \brief Return the alignment to use for the given type on the stack. 366 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const; 367 368 public: 369 370 ABIArgInfo classifyReturnType(QualType RetTy) const; 371 ABIArgInfo classifyArgumentType(QualType RetTy) const; 372 373 virtual void computeInfo(CGFunctionInfo &FI) const { 374 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 375 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 376 it != ie; ++it) 377 it->info = classifyArgumentType(it->type); 378 } 379 380 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 381 CodeGenFunction &CGF) const; 382 383 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p) 384 : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p) {} 385 }; 386 387 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo { 388 public: 389 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p) 390 :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p)) {} 391 392 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 393 CodeGen::CodeGenModule &CGM) const; 394 395 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 396 // Darwin uses different dwarf register numbers for EH. 397 if (CGM.isTargetDarwin()) return 5; 398 399 return 4; 400 } 401 402 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 403 llvm::Value *Address) const; 404 }; 405 406 } 407 408 /// shouldReturnTypeInRegister - Determine if the given type should be 409 /// passed in a register (for the Darwin ABI). 410 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty, 411 ASTContext &Context) { 412 uint64_t Size = Context.getTypeSize(Ty); 413 414 // Type must be register sized. 415 if (!isRegisterSize(Size)) 416 return false; 417 418 if (Ty->isVectorType()) { 419 // 64- and 128- bit vectors inside structures are not returned in 420 // registers. 421 if (Size == 64 || Size == 128) 422 return false; 423 424 return true; 425 } 426 427 // If this is a builtin, pointer, enum, complex type, member pointer, or 428 // member function pointer it is ok. 429 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() || 430 Ty->isAnyComplexType() || Ty->isEnumeralType() || 431 Ty->isBlockPointerType() || Ty->isMemberPointerType()) 432 return true; 433 434 // Arrays are treated like records. 435 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) 436 return shouldReturnTypeInRegister(AT->getElementType(), Context); 437 438 // Otherwise, it must be a record type. 439 const RecordType *RT = Ty->getAs<RecordType>(); 440 if (!RT) return false; 441 442 // FIXME: Traverse bases here too. 443 444 // Structure types are passed in register if all fields would be 445 // passed in a register. 446 for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(), 447 e = RT->getDecl()->field_end(); i != e; ++i) { 448 const FieldDecl *FD = *i; 449 450 // Empty fields are ignored. 451 if (isEmptyField(Context, FD, true)) 452 continue; 453 454 // Check fields recursively. 455 if (!shouldReturnTypeInRegister(FD->getType(), Context)) 456 return false; 457 } 458 459 return true; 460 } 461 462 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy) const { 463 if (RetTy->isVoidType()) 464 return ABIArgInfo::getIgnore(); 465 466 if (const VectorType *VT = RetTy->getAs<VectorType>()) { 467 // On Darwin, some vectors are returned in registers. 468 if (IsDarwinVectorABI) { 469 uint64_t Size = getContext().getTypeSize(RetTy); 470 471 // 128-bit vectors are a special case; they are returned in 472 // registers and we need to make sure to pick a type the LLVM 473 // backend will like. 474 if (Size == 128) 475 return ABIArgInfo::getDirect(llvm::VectorType::get( 476 llvm::Type::getInt64Ty(getVMContext()), 2)); 477 478 // Always return in register if it fits in a general purpose 479 // register, or if it is 64 bits and has a single element. 480 if ((Size == 8 || Size == 16 || Size == 32) || 481 (Size == 64 && VT->getNumElements() == 1)) 482 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 483 Size)); 484 485 return ABIArgInfo::getIndirect(0); 486 } 487 488 return ABIArgInfo::getDirect(); 489 } 490 491 if (isAggregateTypeForABI(RetTy)) { 492 if (const RecordType *RT = RetTy->getAs<RecordType>()) { 493 // Structures with either a non-trivial destructor or a non-trivial 494 // copy constructor are always indirect. 495 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 496 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 497 498 // Structures with flexible arrays are always indirect. 499 if (RT->getDecl()->hasFlexibleArrayMember()) 500 return ABIArgInfo::getIndirect(0); 501 } 502 503 // If specified, structs and unions are always indirect. 504 if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType()) 505 return ABIArgInfo::getIndirect(0); 506 507 // Classify "single element" structs as their element type. 508 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) { 509 if (const BuiltinType *BT = SeltTy->getAs<BuiltinType>()) { 510 if (BT->isIntegerType()) { 511 // We need to use the size of the structure, padding 512 // bit-fields can adjust that to be larger than the single 513 // element type. 514 uint64_t Size = getContext().getTypeSize(RetTy); 515 return ABIArgInfo::getDirect( 516 llvm::IntegerType::get(getVMContext(), (unsigned)Size)); 517 } 518 519 if (BT->getKind() == BuiltinType::Float) { 520 assert(getContext().getTypeSize(RetTy) == 521 getContext().getTypeSize(SeltTy) && 522 "Unexpect single element structure size!"); 523 return ABIArgInfo::getDirect(llvm::Type::getFloatTy(getVMContext())); 524 } 525 526 if (BT->getKind() == BuiltinType::Double) { 527 assert(getContext().getTypeSize(RetTy) == 528 getContext().getTypeSize(SeltTy) && 529 "Unexpect single element structure size!"); 530 return ABIArgInfo::getDirect(llvm::Type::getDoubleTy(getVMContext())); 531 } 532 } else if (SeltTy->isPointerType()) { 533 // FIXME: It would be really nice if this could come out as the proper 534 // pointer type. 535 const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(getVMContext()); 536 return ABIArgInfo::getDirect(PtrTy); 537 } else if (SeltTy->isVectorType()) { 538 // 64- and 128-bit vectors are never returned in a 539 // register when inside a structure. 540 uint64_t Size = getContext().getTypeSize(RetTy); 541 if (Size == 64 || Size == 128) 542 return ABIArgInfo::getIndirect(0); 543 544 return classifyReturnType(QualType(SeltTy, 0)); 545 } 546 } 547 548 // Small structures which are register sized are generally returned 549 // in a register. 550 if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, getContext())) { 551 uint64_t Size = getContext().getTypeSize(RetTy); 552 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size)); 553 } 554 555 return ABIArgInfo::getIndirect(0); 556 } 557 558 // Treat an enum type as its underlying type. 559 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 560 RetTy = EnumTy->getDecl()->getIntegerType(); 561 562 return (RetTy->isPromotableIntegerType() ? 563 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 564 } 565 566 static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) { 567 const RecordType *RT = Ty->getAs<RecordType>(); 568 if (!RT) 569 return 0; 570 const RecordDecl *RD = RT->getDecl(); 571 572 // If this is a C++ record, check the bases first. 573 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 574 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 575 e = CXXRD->bases_end(); i != e; ++i) 576 if (!isRecordWithSSEVectorType(Context, i->getType())) 577 return false; 578 579 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 580 i != e; ++i) { 581 QualType FT = i->getType(); 582 583 if (FT->getAs<VectorType>() && Context.getTypeSize(Ty) == 128) 584 return true; 585 586 if (isRecordWithSSEVectorType(Context, FT)) 587 return true; 588 } 589 590 return false; 591 } 592 593 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty, 594 unsigned Align) const { 595 // Otherwise, if the alignment is less than or equal to the minimum ABI 596 // alignment, just use the default; the backend will handle this. 597 if (Align <= MinABIStackAlignInBytes) 598 return 0; // Use default alignment. 599 600 // On non-Darwin, the stack type alignment is always 4. 601 if (!IsDarwinVectorABI) { 602 // Set explicit alignment, since we may need to realign the top. 603 return MinABIStackAlignInBytes; 604 } 605 606 // Otherwise, if the type contains an SSE vector type, the alignment is 16. 607 if (isRecordWithSSEVectorType(getContext(), Ty)) 608 return 16; 609 610 return MinABIStackAlignInBytes; 611 } 612 613 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal) const { 614 if (!ByVal) 615 return ABIArgInfo::getIndirect(0, false); 616 617 // Compute the byval alignment. 618 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; 619 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign); 620 if (StackAlign == 0) 621 return ABIArgInfo::getIndirect(0); 622 623 // If the stack alignment is less than the type alignment, realign the 624 // argument. 625 if (StackAlign < TypeAlign) 626 return ABIArgInfo::getIndirect(StackAlign, /*ByVal=*/true, 627 /*Realign=*/true); 628 629 return ABIArgInfo::getIndirect(StackAlign); 630 } 631 632 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const { 633 // FIXME: Set alignment on indirect arguments. 634 if (isAggregateTypeForABI(Ty)) { 635 // Structures with flexible arrays are always indirect. 636 if (const RecordType *RT = Ty->getAs<RecordType>()) { 637 // Structures with either a non-trivial destructor or a non-trivial 638 // copy constructor are always indirect. 639 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 640 return getIndirectResult(Ty, /*ByVal=*/false); 641 642 if (RT->getDecl()->hasFlexibleArrayMember()) 643 return getIndirectResult(Ty); 644 } 645 646 // Ignore empty structs. 647 if (Ty->isStructureType() && getContext().getTypeSize(Ty) == 0) 648 return ABIArgInfo::getIgnore(); 649 650 // Expand small (<= 128-bit) record types when we know that the stack layout 651 // of those arguments will match the struct. This is important because the 652 // LLVM backend isn't smart enough to remove byval, which inhibits many 653 // optimizations. 654 if (getContext().getTypeSize(Ty) <= 4*32 && 655 canExpandIndirectArgument(Ty, getContext())) 656 return ABIArgInfo::getExpand(); 657 658 return getIndirectResult(Ty); 659 } 660 661 if (const VectorType *VT = Ty->getAs<VectorType>()) { 662 // On Darwin, some vectors are passed in memory, we handle this by passing 663 // it as an i8/i16/i32/i64. 664 if (IsDarwinVectorABI) { 665 uint64_t Size = getContext().getTypeSize(Ty); 666 if ((Size == 8 || Size == 16 || Size == 32) || 667 (Size == 64 && VT->getNumElements() == 1)) 668 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 669 Size)); 670 } 671 672 const llvm::Type *IRType = CGT.ConvertTypeRecursive(Ty); 673 if (UseX86_MMXType(IRType)) { 674 ABIArgInfo AAI = ABIArgInfo::getDirect(IRType); 675 AAI.setCoerceToType(llvm::Type::getX86_MMXTy(getVMContext())); 676 return AAI; 677 } 678 679 return ABIArgInfo::getDirect(); 680 } 681 682 683 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 684 Ty = EnumTy->getDecl()->getIntegerType(); 685 686 return (Ty->isPromotableIntegerType() ? 687 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 688 } 689 690 llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 691 CodeGenFunction &CGF) const { 692 const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); 693 const llvm::Type *BPP = llvm::PointerType::getUnqual(BP); 694 695 CGBuilderTy &Builder = CGF.Builder; 696 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 697 "ap"); 698 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 699 llvm::Type *PTy = 700 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 701 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 702 703 uint64_t Offset = 704 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); 705 llvm::Value *NextAddr = 706 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 707 "ap.next"); 708 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 709 710 return AddrTyped; 711 } 712 713 void X86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D, 714 llvm::GlobalValue *GV, 715 CodeGen::CodeGenModule &CGM) const { 716 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 717 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { 718 // Get the LLVM function. 719 llvm::Function *Fn = cast<llvm::Function>(GV); 720 721 // Now add the 'alignstack' attribute with a value of 16. 722 Fn->addFnAttr(llvm::Attribute::constructStackAlignmentFromInt(16)); 723 } 724 } 725 } 726 727 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable( 728 CodeGen::CodeGenFunction &CGF, 729 llvm::Value *Address) const { 730 CodeGen::CGBuilderTy &Builder = CGF.Builder; 731 llvm::LLVMContext &Context = CGF.getLLVMContext(); 732 733 const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context); 734 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 735 736 // 0-7 are the eight integer registers; the order is different 737 // on Darwin (for EH), but the range is the same. 738 // 8 is %eip. 739 AssignToArrayRange(Builder, Address, Four8, 0, 8); 740 741 if (CGF.CGM.isTargetDarwin()) { 742 // 12-16 are st(0..4). Not sure why we stop at 4. 743 // These have size 16, which is sizeof(long double) on 744 // platforms with 8-byte alignment for that type. 745 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 746 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16); 747 748 } else { 749 // 9 is %eflags, which doesn't get a size on Darwin for some 750 // reason. 751 Builder.CreateStore(Four8, Builder.CreateConstInBoundsGEP1_32(Address, 9)); 752 753 // 11-16 are st(0..5). Not sure why we stop at 5. 754 // These have size 12, which is sizeof(long double) on 755 // platforms with 4-byte alignment for that type. 756 llvm::Value *Twelve8 = llvm::ConstantInt::get(i8, 12); 757 AssignToArrayRange(Builder, Address, Twelve8, 11, 16); 758 } 759 760 return false; 761 } 762 763 //===----------------------------------------------------------------------===// 764 // X86-64 ABI Implementation 765 //===----------------------------------------------------------------------===// 766 767 768 namespace { 769 /// X86_64ABIInfo - The X86_64 ABI information. 770 class X86_64ABIInfo : public ABIInfo { 771 enum Class { 772 Integer = 0, 773 SSE, 774 SSEUp, 775 X87, 776 X87Up, 777 ComplexX87, 778 NoClass, 779 Memory 780 }; 781 782 /// merge - Implement the X86_64 ABI merging algorithm. 783 /// 784 /// Merge an accumulating classification \arg Accum with a field 785 /// classification \arg Field. 786 /// 787 /// \param Accum - The accumulating classification. This should 788 /// always be either NoClass or the result of a previous merge 789 /// call. In addition, this should never be Memory (the caller 790 /// should just return Memory for the aggregate). 791 static Class merge(Class Accum, Class Field); 792 793 /// classify - Determine the x86_64 register classes in which the 794 /// given type T should be passed. 795 /// 796 /// \param Lo - The classification for the parts of the type 797 /// residing in the low word of the containing object. 798 /// 799 /// \param Hi - The classification for the parts of the type 800 /// residing in the high word of the containing object. 801 /// 802 /// \param OffsetBase - The bit offset of this type in the 803 /// containing object. Some parameters are classified different 804 /// depending on whether they straddle an eightbyte boundary. 805 /// 806 /// If a word is unused its result will be NoClass; if a type should 807 /// be passed in Memory then at least the classification of \arg Lo 808 /// will be Memory. 809 /// 810 /// The \arg Lo class will be NoClass iff the argument is ignored. 811 /// 812 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will 813 /// also be ComplexX87. 814 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi) const; 815 816 const llvm::Type *Get16ByteVectorType(QualType Ty) const; 817 const llvm::Type *GetSSETypeAtOffset(const llvm::Type *IRType, 818 unsigned IROffset, QualType SourceTy, 819 unsigned SourceOffset) const; 820 const llvm::Type *GetINTEGERTypeAtOffset(const llvm::Type *IRType, 821 unsigned IROffset, QualType SourceTy, 822 unsigned SourceOffset) const; 823 824 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 825 /// such that the argument will be returned in memory. 826 ABIArgInfo getIndirectReturnResult(QualType Ty) const; 827 828 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 829 /// such that the argument will be passed in memory. 830 ABIArgInfo getIndirectResult(QualType Ty) const; 831 832 ABIArgInfo classifyReturnType(QualType RetTy) const; 833 834 ABIArgInfo classifyArgumentType(QualType Ty, 835 unsigned &neededInt, 836 unsigned &neededSSE) const; 837 838 public: 839 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 840 841 virtual void computeInfo(CGFunctionInfo &FI) const; 842 843 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 844 CodeGenFunction &CGF) const; 845 }; 846 847 /// WinX86_64ABIInfo - The Windows X86_64 ABI information. 848 class WinX86_64ABIInfo : public X86_64ABIInfo { 849 public: 850 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : X86_64ABIInfo(CGT) {} 851 852 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 853 CodeGenFunction &CGF) const; 854 }; 855 856 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo { 857 public: 858 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 859 : TargetCodeGenInfo(new X86_64ABIInfo(CGT)) {} 860 861 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 862 return 7; 863 } 864 865 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 866 llvm::Value *Address) const { 867 CodeGen::CGBuilderTy &Builder = CGF.Builder; 868 llvm::LLVMContext &Context = CGF.getLLVMContext(); 869 870 const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context); 871 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 872 873 // 0-15 are the 16 integer registers. 874 // 16 is %rip. 875 AssignToArrayRange(Builder, Address, Eight8, 0, 16); 876 877 return false; 878 } 879 }; 880 881 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo { 882 public: 883 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 884 : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {} 885 886 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 887 return 7; 888 } 889 890 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 891 llvm::Value *Address) const { 892 CodeGen::CGBuilderTy &Builder = CGF.Builder; 893 llvm::LLVMContext &Context = CGF.getLLVMContext(); 894 895 const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context); 896 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 897 898 // 0-15 are the 16 integer registers. 899 // 16 is %rip. 900 AssignToArrayRange(Builder, Address, Eight8, 0, 16); 901 902 return false; 903 } 904 }; 905 906 } 907 908 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) { 909 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is 910 // classified recursively so that always two fields are 911 // considered. The resulting class is calculated according to 912 // the classes of the fields in the eightbyte: 913 // 914 // (a) If both classes are equal, this is the resulting class. 915 // 916 // (b) If one of the classes is NO_CLASS, the resulting class is 917 // the other class. 918 // 919 // (c) If one of the classes is MEMORY, the result is the MEMORY 920 // class. 921 // 922 // (d) If one of the classes is INTEGER, the result is the 923 // INTEGER. 924 // 925 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class, 926 // MEMORY is used as class. 927 // 928 // (f) Otherwise class SSE is used. 929 930 // Accum should never be memory (we should have returned) or 931 // ComplexX87 (because this cannot be passed in a structure). 932 assert((Accum != Memory && Accum != ComplexX87) && 933 "Invalid accumulated classification during merge."); 934 if (Accum == Field || Field == NoClass) 935 return Accum; 936 if (Field == Memory) 937 return Memory; 938 if (Accum == NoClass) 939 return Field; 940 if (Accum == Integer || Field == Integer) 941 return Integer; 942 if (Field == X87 || Field == X87Up || Field == ComplexX87 || 943 Accum == X87 || Accum == X87Up) 944 return Memory; 945 return SSE; 946 } 947 948 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, 949 Class &Lo, Class &Hi) const { 950 // FIXME: This code can be simplified by introducing a simple value class for 951 // Class pairs with appropriate constructor methods for the various 952 // situations. 953 954 // FIXME: Some of the split computations are wrong; unaligned vectors 955 // shouldn't be passed in registers for example, so there is no chance they 956 // can straddle an eightbyte. Verify & simplify. 957 958 Lo = Hi = NoClass; 959 960 Class &Current = OffsetBase < 64 ? Lo : Hi; 961 Current = Memory; 962 963 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 964 BuiltinType::Kind k = BT->getKind(); 965 966 if (k == BuiltinType::Void) { 967 Current = NoClass; 968 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) { 969 Lo = Integer; 970 Hi = Integer; 971 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) { 972 Current = Integer; 973 } else if (k == BuiltinType::Float || k == BuiltinType::Double) { 974 Current = SSE; 975 } else if (k == BuiltinType::LongDouble) { 976 Lo = X87; 977 Hi = X87Up; 978 } 979 // FIXME: _Decimal32 and _Decimal64 are SSE. 980 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). 981 return; 982 } 983 984 if (const EnumType *ET = Ty->getAs<EnumType>()) { 985 // Classify the underlying integer type. 986 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi); 987 return; 988 } 989 990 if (Ty->hasPointerRepresentation()) { 991 Current = Integer; 992 return; 993 } 994 995 if (Ty->isMemberPointerType()) { 996 if (Ty->isMemberFunctionPointerType()) 997 Lo = Hi = Integer; 998 else 999 Current = Integer; 1000 return; 1001 } 1002 1003 if (const VectorType *VT = Ty->getAs<VectorType>()) { 1004 uint64_t Size = getContext().getTypeSize(VT); 1005 if (Size == 32) { 1006 // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x 1007 // float> as integer. 1008 Current = Integer; 1009 1010 // If this type crosses an eightbyte boundary, it should be 1011 // split. 1012 uint64_t EB_Real = (OffsetBase) / 64; 1013 uint64_t EB_Imag = (OffsetBase + Size - 1) / 64; 1014 if (EB_Real != EB_Imag) 1015 Hi = Lo; 1016 } else if (Size == 64) { 1017 // gcc passes <1 x double> in memory. :( 1018 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) 1019 return; 1020 1021 // gcc passes <1 x long long> as INTEGER. 1022 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong) || 1023 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULongLong) || 1024 VT->getElementType()->isSpecificBuiltinType(BuiltinType::Long) || 1025 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULong)) 1026 Current = Integer; 1027 else 1028 Current = SSE; 1029 1030 // If this type crosses an eightbyte boundary, it should be 1031 // split. 1032 if (OffsetBase && OffsetBase != 64) 1033 Hi = Lo; 1034 } else if (Size == 128) { 1035 Lo = SSE; 1036 Hi = SSEUp; 1037 } 1038 return; 1039 } 1040 1041 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 1042 QualType ET = getContext().getCanonicalType(CT->getElementType()); 1043 1044 uint64_t Size = getContext().getTypeSize(Ty); 1045 if (ET->isIntegralOrEnumerationType()) { 1046 if (Size <= 64) 1047 Current = Integer; 1048 else if (Size <= 128) 1049 Lo = Hi = Integer; 1050 } else if (ET == getContext().FloatTy) 1051 Current = SSE; 1052 else if (ET == getContext().DoubleTy) 1053 Lo = Hi = SSE; 1054 else if (ET == getContext().LongDoubleTy) 1055 Current = ComplexX87; 1056 1057 // If this complex type crosses an eightbyte boundary then it 1058 // should be split. 1059 uint64_t EB_Real = (OffsetBase) / 64; 1060 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64; 1061 if (Hi == NoClass && EB_Real != EB_Imag) 1062 Hi = Lo; 1063 1064 return; 1065 } 1066 1067 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 1068 // Arrays are treated like structures. 1069 1070 uint64_t Size = getContext().getTypeSize(Ty); 1071 1072 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 1073 // than two eightbytes, ..., it has class MEMORY. 1074 if (Size > 128) 1075 return; 1076 1077 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned 1078 // fields, it has class MEMORY. 1079 // 1080 // Only need to check alignment of array base. 1081 if (OffsetBase % getContext().getTypeAlign(AT->getElementType())) 1082 return; 1083 1084 // Otherwise implement simplified merge. We could be smarter about 1085 // this, but it isn't worth it and would be harder to verify. 1086 Current = NoClass; 1087 uint64_t EltSize = getContext().getTypeSize(AT->getElementType()); 1088 uint64_t ArraySize = AT->getSize().getZExtValue(); 1089 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) { 1090 Class FieldLo, FieldHi; 1091 classify(AT->getElementType(), Offset, FieldLo, FieldHi); 1092 Lo = merge(Lo, FieldLo); 1093 Hi = merge(Hi, FieldHi); 1094 if (Lo == Memory || Hi == Memory) 1095 break; 1096 } 1097 1098 // Do post merger cleanup (see below). Only case we worry about is Memory. 1099 if (Hi == Memory) 1100 Lo = Memory; 1101 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification."); 1102 return; 1103 } 1104 1105 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1106 uint64_t Size = getContext().getTypeSize(Ty); 1107 1108 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 1109 // than two eightbytes, ..., it has class MEMORY. 1110 if (Size > 128) 1111 return; 1112 1113 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial 1114 // copy constructor or a non-trivial destructor, it is passed by invisible 1115 // reference. 1116 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 1117 return; 1118 1119 const RecordDecl *RD = RT->getDecl(); 1120 1121 // Assume variable sized types are passed in memory. 1122 if (RD->hasFlexibleArrayMember()) 1123 return; 1124 1125 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 1126 1127 // Reset Lo class, this will be recomputed. 1128 Current = NoClass; 1129 1130 // If this is a C++ record, classify the bases first. 1131 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 1132 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 1133 e = CXXRD->bases_end(); i != e; ++i) { 1134 assert(!i->isVirtual() && !i->getType()->isDependentType() && 1135 "Unexpected base class!"); 1136 const CXXRecordDecl *Base = 1137 cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); 1138 1139 // Classify this field. 1140 // 1141 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a 1142 // single eightbyte, each is classified separately. Each eightbyte gets 1143 // initialized to class NO_CLASS. 1144 Class FieldLo, FieldHi; 1145 uint64_t Offset = OffsetBase + Layout.getBaseClassOffset(Base); 1146 classify(i->getType(), Offset, FieldLo, FieldHi); 1147 Lo = merge(Lo, FieldLo); 1148 Hi = merge(Hi, FieldHi); 1149 if (Lo == Memory || Hi == Memory) 1150 break; 1151 } 1152 } 1153 1154 // Classify the fields one at a time, merging the results. 1155 unsigned idx = 0; 1156 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1157 i != e; ++i, ++idx) { 1158 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 1159 bool BitField = i->isBitField(); 1160 1161 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned 1162 // fields, it has class MEMORY. 1163 // 1164 // Note, skip this test for bit-fields, see below. 1165 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) { 1166 Lo = Memory; 1167 return; 1168 } 1169 1170 // Classify this field. 1171 // 1172 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate 1173 // exceeds a single eightbyte, each is classified 1174 // separately. Each eightbyte gets initialized to class 1175 // NO_CLASS. 1176 Class FieldLo, FieldHi; 1177 1178 // Bit-fields require special handling, they do not force the 1179 // structure to be passed in memory even if unaligned, and 1180 // therefore they can straddle an eightbyte. 1181 if (BitField) { 1182 // Ignore padding bit-fields. 1183 if (i->isUnnamedBitfield()) 1184 continue; 1185 1186 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 1187 uint64_t Size = 1188 i->getBitWidth()->EvaluateAsInt(getContext()).getZExtValue(); 1189 1190 uint64_t EB_Lo = Offset / 64; 1191 uint64_t EB_Hi = (Offset + Size - 1) / 64; 1192 FieldLo = FieldHi = NoClass; 1193 if (EB_Lo) { 1194 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes."); 1195 FieldLo = NoClass; 1196 FieldHi = Integer; 1197 } else { 1198 FieldLo = Integer; 1199 FieldHi = EB_Hi ? Integer : NoClass; 1200 } 1201 } else 1202 classify(i->getType(), Offset, FieldLo, FieldHi); 1203 Lo = merge(Lo, FieldLo); 1204 Hi = merge(Hi, FieldHi); 1205 if (Lo == Memory || Hi == Memory) 1206 break; 1207 } 1208 1209 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done: 1210 // 1211 // (a) If one of the classes is MEMORY, the whole argument is 1212 // passed in memory. 1213 // 1214 // (b) If SSEUP is not preceeded by SSE, it is converted to SSE. 1215 1216 // The first of these conditions is guaranteed by how we implement 1217 // the merge (just bail). 1218 // 1219 // The second condition occurs in the case of unions; for example 1220 // union { _Complex double; unsigned; }. 1221 if (Hi == Memory) 1222 Lo = Memory; 1223 if (Hi == SSEUp && Lo != SSE) 1224 Hi = SSE; 1225 } 1226 } 1227 1228 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const { 1229 // If this is a scalar LLVM value then assume LLVM will pass it in the right 1230 // place naturally. 1231 if (!isAggregateTypeForABI(Ty)) { 1232 // Treat an enum type as its underlying type. 1233 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1234 Ty = EnumTy->getDecl()->getIntegerType(); 1235 1236 return (Ty->isPromotableIntegerType() ? 1237 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 1238 } 1239 1240 return ABIArgInfo::getIndirect(0); 1241 } 1242 1243 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty) const { 1244 // If this is a scalar LLVM value then assume LLVM will pass it in the right 1245 // place naturally. 1246 if (!isAggregateTypeForABI(Ty)) { 1247 // Treat an enum type as its underlying type. 1248 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1249 Ty = EnumTy->getDecl()->getIntegerType(); 1250 1251 return (Ty->isPromotableIntegerType() ? 1252 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 1253 } 1254 1255 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 1256 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 1257 1258 // Compute the byval alignment. We trust the back-end to honor the 1259 // minimum ABI alignment for byval, to make cleaner IR. 1260 const unsigned MinABIAlign = 8; 1261 unsigned Align = getContext().getTypeAlign(Ty) / 8; 1262 if (Align > MinABIAlign) 1263 return ABIArgInfo::getIndirect(Align); 1264 return ABIArgInfo::getIndirect(0); 1265 } 1266 1267 /// Get16ByteVectorType - The ABI specifies that a value should be passed in an 1268 /// full vector XMM register. Pick an LLVM IR type that will be passed as a 1269 /// vector register. 1270 const llvm::Type *X86_64ABIInfo::Get16ByteVectorType(QualType Ty) const { 1271 const llvm::Type *IRType = CGT.ConvertTypeRecursive(Ty); 1272 1273 // Wrapper structs that just contain vectors are passed just like vectors, 1274 // strip them off if present. 1275 const llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType); 1276 while (STy && STy->getNumElements() == 1) { 1277 IRType = STy->getElementType(0); 1278 STy = dyn_cast<llvm::StructType>(IRType); 1279 } 1280 1281 // If the preferred type is a 16-byte vector, prefer to pass it. 1282 if (const llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){ 1283 const llvm::Type *EltTy = VT->getElementType(); 1284 if (VT->getBitWidth() == 128 && 1285 (EltTy->isFloatTy() || EltTy->isDoubleTy() || 1286 EltTy->isIntegerTy(8) || EltTy->isIntegerTy(16) || 1287 EltTy->isIntegerTy(32) || EltTy->isIntegerTy(64) || 1288 EltTy->isIntegerTy(128))) 1289 return VT; 1290 } 1291 1292 return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), 2); 1293 } 1294 1295 /// BitsContainNoUserData - Return true if the specified [start,end) bit range 1296 /// is known to either be off the end of the specified type or being in 1297 /// alignment padding. The user type specified is known to be at most 128 bits 1298 /// in size, and have passed through X86_64ABIInfo::classify with a successful 1299 /// classification that put one of the two halves in the INTEGER class. 1300 /// 1301 /// It is conservatively correct to return false. 1302 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, 1303 unsigned EndBit, ASTContext &Context) { 1304 // If the bytes being queried are off the end of the type, there is no user 1305 // data hiding here. This handles analysis of builtins, vectors and other 1306 // types that don't contain interesting padding. 1307 unsigned TySize = (unsigned)Context.getTypeSize(Ty); 1308 if (TySize <= StartBit) 1309 return true; 1310 1311 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 1312 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType()); 1313 unsigned NumElts = (unsigned)AT->getSize().getZExtValue(); 1314 1315 // Check each element to see if the element overlaps with the queried range. 1316 for (unsigned i = 0; i != NumElts; ++i) { 1317 // If the element is after the span we care about, then we're done.. 1318 unsigned EltOffset = i*EltSize; 1319 if (EltOffset >= EndBit) break; 1320 1321 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0; 1322 if (!BitsContainNoUserData(AT->getElementType(), EltStart, 1323 EndBit-EltOffset, Context)) 1324 return false; 1325 } 1326 // If it overlaps no elements, then it is safe to process as padding. 1327 return true; 1328 } 1329 1330 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1331 const RecordDecl *RD = RT->getDecl(); 1332 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 1333 1334 // If this is a C++ record, check the bases first. 1335 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 1336 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 1337 e = CXXRD->bases_end(); i != e; ++i) { 1338 assert(!i->isVirtual() && !i->getType()->isDependentType() && 1339 "Unexpected base class!"); 1340 const CXXRecordDecl *Base = 1341 cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); 1342 1343 // If the base is after the span we care about, ignore it. 1344 unsigned BaseOffset = (unsigned)Layout.getBaseClassOffset(Base); 1345 if (BaseOffset >= EndBit) continue; 1346 1347 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0; 1348 if (!BitsContainNoUserData(i->getType(), BaseStart, 1349 EndBit-BaseOffset, Context)) 1350 return false; 1351 } 1352 } 1353 1354 // Verify that no field has data that overlaps the region of interest. Yes 1355 // this could be sped up a lot by being smarter about queried fields, 1356 // however we're only looking at structs up to 16 bytes, so we don't care 1357 // much. 1358 unsigned idx = 0; 1359 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1360 i != e; ++i, ++idx) { 1361 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx); 1362 1363 // If we found a field after the region we care about, then we're done. 1364 if (FieldOffset >= EndBit) break; 1365 1366 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0; 1367 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset, 1368 Context)) 1369 return false; 1370 } 1371 1372 // If nothing in this record overlapped the area of interest, then we're 1373 // clean. 1374 return true; 1375 } 1376 1377 return false; 1378 } 1379 1380 /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a 1381 /// float member at the specified offset. For example, {int,{float}} has a 1382 /// float at offset 4. It is conservatively correct for this routine to return 1383 /// false. 1384 static bool ContainsFloatAtOffset(const llvm::Type *IRType, unsigned IROffset, 1385 const llvm::TargetData &TD) { 1386 // Base case if we find a float. 1387 if (IROffset == 0 && IRType->isFloatTy()) 1388 return true; 1389 1390 // If this is a struct, recurse into the field at the specified offset. 1391 if (const llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 1392 const llvm::StructLayout *SL = TD.getStructLayout(STy); 1393 unsigned Elt = SL->getElementContainingOffset(IROffset); 1394 IROffset -= SL->getElementOffset(Elt); 1395 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD); 1396 } 1397 1398 // If this is an array, recurse into the field at the specified offset. 1399 if (const llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 1400 const llvm::Type *EltTy = ATy->getElementType(); 1401 unsigned EltSize = TD.getTypeAllocSize(EltTy); 1402 IROffset -= IROffset/EltSize*EltSize; 1403 return ContainsFloatAtOffset(EltTy, IROffset, TD); 1404 } 1405 1406 return false; 1407 } 1408 1409 1410 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the 1411 /// low 8 bytes of an XMM register, corresponding to the SSE class. 1412 const llvm::Type *X86_64ABIInfo:: 1413 GetSSETypeAtOffset(const llvm::Type *IRType, unsigned IROffset, 1414 QualType SourceTy, unsigned SourceOffset) const { 1415 // The only three choices we have are either double, <2 x float>, or float. We 1416 // pass as float if the last 4 bytes is just padding. This happens for 1417 // structs that contain 3 floats. 1418 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32, 1419 SourceOffset*8+64, getContext())) 1420 return llvm::Type::getFloatTy(getVMContext()); 1421 1422 // We want to pass as <2 x float> if the LLVM IR type contains a float at 1423 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the 1424 // case. 1425 if (ContainsFloatAtOffset(IRType, IROffset, getTargetData()) && 1426 ContainsFloatAtOffset(IRType, IROffset+4, getTargetData())) 1427 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2); 1428 1429 return llvm::Type::getDoubleTy(getVMContext()); 1430 } 1431 1432 1433 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in 1434 /// an 8-byte GPR. This means that we either have a scalar or we are talking 1435 /// about the high or low part of an up-to-16-byte struct. This routine picks 1436 /// the best LLVM IR type to represent this, which may be i64 or may be anything 1437 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*, 1438 /// etc). 1439 /// 1440 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for 1441 /// the source type. IROffset is an offset in bytes into the LLVM IR type that 1442 /// the 8-byte value references. PrefType may be null. 1443 /// 1444 /// SourceTy is the source level type for the entire argument. SourceOffset is 1445 /// an offset into this that we're processing (which is always either 0 or 8). 1446 /// 1447 const llvm::Type *X86_64ABIInfo:: 1448 GetINTEGERTypeAtOffset(const llvm::Type *IRType, unsigned IROffset, 1449 QualType SourceTy, unsigned SourceOffset) const { 1450 // If we're dealing with an un-offset LLVM IR type, then it means that we're 1451 // returning an 8-byte unit starting with it. See if we can safely use it. 1452 if (IROffset == 0) { 1453 // Pointers and int64's always fill the 8-byte unit. 1454 if (isa<llvm::PointerType>(IRType) || IRType->isIntegerTy(64)) 1455 return IRType; 1456 1457 // If we have a 1/2/4-byte integer, we can use it only if the rest of the 1458 // goodness in the source type is just tail padding. This is allowed to 1459 // kick in for struct {double,int} on the int, but not on 1460 // struct{double,int,int} because we wouldn't return the second int. We 1461 // have to do this analysis on the source type because we can't depend on 1462 // unions being lowered a specific way etc. 1463 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) || 1464 IRType->isIntegerTy(32)) { 1465 unsigned BitWidth = cast<llvm::IntegerType>(IRType)->getBitWidth(); 1466 1467 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth, 1468 SourceOffset*8+64, getContext())) 1469 return IRType; 1470 } 1471 } 1472 1473 if (const llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 1474 // If this is a struct, recurse into the field at the specified offset. 1475 const llvm::StructLayout *SL = getTargetData().getStructLayout(STy); 1476 if (IROffset < SL->getSizeInBytes()) { 1477 unsigned FieldIdx = SL->getElementContainingOffset(IROffset); 1478 IROffset -= SL->getElementOffset(FieldIdx); 1479 1480 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset, 1481 SourceTy, SourceOffset); 1482 } 1483 } 1484 1485 if (const llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 1486 const llvm::Type *EltTy = ATy->getElementType(); 1487 unsigned EltSize = getTargetData().getTypeAllocSize(EltTy); 1488 unsigned EltOffset = IROffset/EltSize*EltSize; 1489 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy, 1490 SourceOffset); 1491 } 1492 1493 // Okay, we don't have any better idea of what to pass, so we pass this in an 1494 // integer register that isn't too big to fit the rest of the struct. 1495 unsigned TySizeInBytes = 1496 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity(); 1497 1498 assert(TySizeInBytes != SourceOffset && "Empty field?"); 1499 1500 // It is always safe to classify this as an integer type up to i64 that 1501 // isn't larger than the structure. 1502 return llvm::IntegerType::get(getVMContext(), 1503 std::min(TySizeInBytes-SourceOffset, 8U)*8); 1504 } 1505 1506 1507 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally 1508 /// be used as elements of a two register pair to pass or return, return a 1509 /// first class aggregate to represent them. For example, if the low part of 1510 /// a by-value argument should be passed as i32* and the high part as float, 1511 /// return {i32*, float}. 1512 static const llvm::Type * 1513 GetX86_64ByValArgumentPair(const llvm::Type *Lo, const llvm::Type *Hi, 1514 const llvm::TargetData &TD) { 1515 // In order to correctly satisfy the ABI, we need to the high part to start 1516 // at offset 8. If the high and low parts we inferred are both 4-byte types 1517 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have 1518 // the second element at offset 8. Check for this: 1519 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo); 1520 unsigned HiAlign = TD.getABITypeAlignment(Hi); 1521 unsigned HiStart = llvm::TargetData::RoundUpAlignment(LoSize, HiAlign); 1522 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!"); 1523 1524 // To handle this, we have to increase the size of the low part so that the 1525 // second element will start at an 8 byte offset. We can't increase the size 1526 // of the second element because it might make us access off the end of the 1527 // struct. 1528 if (HiStart != 8) { 1529 // There are only two sorts of types the ABI generation code can produce for 1530 // the low part of a pair that aren't 8 bytes in size: float or i8/i16/i32. 1531 // Promote these to a larger type. 1532 if (Lo->isFloatTy()) 1533 Lo = llvm::Type::getDoubleTy(Lo->getContext()); 1534 else { 1535 assert(Lo->isIntegerTy() && "Invalid/unknown lo type"); 1536 Lo = llvm::Type::getInt64Ty(Lo->getContext()); 1537 } 1538 } 1539 1540 const llvm::StructType *Result = 1541 llvm::StructType::get(Lo->getContext(), Lo, Hi, NULL); 1542 1543 1544 // Verify that the second element is at an 8-byte offset. 1545 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 && 1546 "Invalid x86-64 argument pair!"); 1547 return Result; 1548 } 1549 1550 ABIArgInfo X86_64ABIInfo:: 1551 classifyReturnType(QualType RetTy) const { 1552 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the 1553 // classification algorithm. 1554 X86_64ABIInfo::Class Lo, Hi; 1555 classify(RetTy, 0, Lo, Hi); 1556 1557 // Check some invariants. 1558 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 1559 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 1560 1561 const llvm::Type *ResType = 0; 1562 switch (Lo) { 1563 case NoClass: 1564 if (Hi == NoClass) 1565 return ABIArgInfo::getIgnore(); 1566 // If the low part is just padding, it takes no register, leave ResType 1567 // null. 1568 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 1569 "Unknown missing lo part"); 1570 break; 1571 1572 case SSEUp: 1573 case X87Up: 1574 assert(0 && "Invalid classification for lo word."); 1575 1576 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via 1577 // hidden argument. 1578 case Memory: 1579 return getIndirectReturnResult(RetTy); 1580 1581 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next 1582 // available register of the sequence %rax, %rdx is used. 1583 case Integer: 1584 ResType = GetINTEGERTypeAtOffset(CGT.ConvertTypeRecursive(RetTy), 0, 1585 RetTy, 0); 1586 1587 // If we have a sign or zero extended integer, make sure to return Extend 1588 // so that the parameter gets the right LLVM IR attributes. 1589 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 1590 // Treat an enum type as its underlying type. 1591 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 1592 RetTy = EnumTy->getDecl()->getIntegerType(); 1593 1594 if (RetTy->isIntegralOrEnumerationType() && 1595 RetTy->isPromotableIntegerType()) 1596 return ABIArgInfo::getExtend(); 1597 } 1598 break; 1599 1600 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next 1601 // available SSE register of the sequence %xmm0, %xmm1 is used. 1602 case SSE: 1603 ResType = GetSSETypeAtOffset(CGT.ConvertTypeRecursive(RetTy), 0, RetTy, 0); 1604 break; 1605 1606 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is 1607 // returned on the X87 stack in %st0 as 80-bit x87 number. 1608 case X87: 1609 ResType = llvm::Type::getX86_FP80Ty(getVMContext()); 1610 break; 1611 1612 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real 1613 // part of the value is returned in %st0 and the imaginary part in 1614 // %st1. 1615 case ComplexX87: 1616 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification."); 1617 ResType = llvm::StructType::get(getVMContext(), 1618 llvm::Type::getX86_FP80Ty(getVMContext()), 1619 llvm::Type::getX86_FP80Ty(getVMContext()), 1620 NULL); 1621 break; 1622 } 1623 1624 const llvm::Type *HighPart = 0; 1625 switch (Hi) { 1626 // Memory was handled previously and X87 should 1627 // never occur as a hi class. 1628 case Memory: 1629 case X87: 1630 assert(0 && "Invalid classification for hi word."); 1631 1632 case ComplexX87: // Previously handled. 1633 case NoClass: 1634 break; 1635 1636 case Integer: 1637 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertTypeRecursive(RetTy), 1638 8, RetTy, 8); 1639 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 1640 return ABIArgInfo::getDirect(HighPart, 8); 1641 break; 1642 case SSE: 1643 HighPart = GetSSETypeAtOffset(CGT.ConvertTypeRecursive(RetTy), 8, RetTy, 8); 1644 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 1645 return ABIArgInfo::getDirect(HighPart, 8); 1646 break; 1647 1648 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte 1649 // is passed in the upper half of the last used SSE register. 1650 // 1651 // SSEUP should always be preceeded by SSE, just widen. 1652 case SSEUp: 1653 assert(Lo == SSE && "Unexpected SSEUp classification."); 1654 ResType = Get16ByteVectorType(RetTy); 1655 break; 1656 1657 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is 1658 // returned together with the previous X87 value in %st0. 1659 case X87Up: 1660 // If X87Up is preceeded by X87, we don't need to do 1661 // anything. However, in some cases with unions it may not be 1662 // preceeded by X87. In such situations we follow gcc and pass the 1663 // extra bits in an SSE reg. 1664 if (Lo != X87) { 1665 HighPart = GetSSETypeAtOffset(CGT.ConvertTypeRecursive(RetTy), 1666 8, RetTy, 8); 1667 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 1668 return ABIArgInfo::getDirect(HighPart, 8); 1669 } 1670 break; 1671 } 1672 1673 // If a high part was specified, merge it together with the low part. It is 1674 // known to pass in the high eightbyte of the result. We do this by forming a 1675 // first class struct aggregate with the high and low part: {low, high} 1676 if (HighPart) 1677 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getTargetData()); 1678 1679 return ABIArgInfo::getDirect(ResType); 1680 } 1681 1682 ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt, 1683 unsigned &neededSSE) const { 1684 X86_64ABIInfo::Class Lo, Hi; 1685 classify(Ty, 0, Lo, Hi); 1686 1687 // Check some invariants. 1688 // FIXME: Enforce these by construction. 1689 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 1690 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 1691 1692 neededInt = 0; 1693 neededSSE = 0; 1694 const llvm::Type *ResType = 0; 1695 switch (Lo) { 1696 case NoClass: 1697 if (Hi == NoClass) 1698 return ABIArgInfo::getIgnore(); 1699 // If the low part is just padding, it takes no register, leave ResType 1700 // null. 1701 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 1702 "Unknown missing lo part"); 1703 break; 1704 1705 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument 1706 // on the stack. 1707 case Memory: 1708 1709 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or 1710 // COMPLEX_X87, it is passed in memory. 1711 case X87: 1712 case ComplexX87: 1713 return getIndirectResult(Ty); 1714 1715 case SSEUp: 1716 case X87Up: 1717 assert(0 && "Invalid classification for lo word."); 1718 1719 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next 1720 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8 1721 // and %r9 is used. 1722 case Integer: 1723 ++neededInt; 1724 1725 // Pick an 8-byte type based on the preferred type. 1726 ResType = GetINTEGERTypeAtOffset(CGT.ConvertTypeRecursive(Ty), 0, Ty, 0); 1727 1728 // If we have a sign or zero extended integer, make sure to return Extend 1729 // so that the parameter gets the right LLVM IR attributes. 1730 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 1731 // Treat an enum type as its underlying type. 1732 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1733 Ty = EnumTy->getDecl()->getIntegerType(); 1734 1735 if (Ty->isIntegralOrEnumerationType() && 1736 Ty->isPromotableIntegerType()) 1737 return ABIArgInfo::getExtend(); 1738 } 1739 1740 break; 1741 1742 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next 1743 // available SSE register is used, the registers are taken in the 1744 // order from %xmm0 to %xmm7. 1745 case SSE: { 1746 const llvm::Type *IRType = CGT.ConvertTypeRecursive(Ty); 1747 if (Hi != NoClass || !UseX86_MMXType(IRType)) 1748 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0); 1749 else 1750 // This is an MMX type. Treat it as such. 1751 ResType = llvm::Type::getX86_MMXTy(getVMContext()); 1752 1753 ++neededSSE; 1754 break; 1755 } 1756 } 1757 1758 const llvm::Type *HighPart = 0; 1759 switch (Hi) { 1760 // Memory was handled previously, ComplexX87 and X87 should 1761 // never occur as hi classes, and X87Up must be preceed by X87, 1762 // which is passed in memory. 1763 case Memory: 1764 case X87: 1765 case ComplexX87: 1766 assert(0 && "Invalid classification for hi word."); 1767 break; 1768 1769 case NoClass: break; 1770 1771 case Integer: 1772 ++neededInt; 1773 // Pick an 8-byte type based on the preferred type. 1774 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertTypeRecursive(Ty), 8, Ty, 8); 1775 1776 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 1777 return ABIArgInfo::getDirect(HighPart, 8); 1778 break; 1779 1780 // X87Up generally doesn't occur here (long double is passed in 1781 // memory), except in situations involving unions. 1782 case X87Up: 1783 case SSE: 1784 HighPart = GetSSETypeAtOffset(CGT.ConvertTypeRecursive(Ty), 8, Ty, 8); 1785 1786 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 1787 return ABIArgInfo::getDirect(HighPart, 8); 1788 1789 ++neededSSE; 1790 break; 1791 1792 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the 1793 // eightbyte is passed in the upper half of the last used SSE 1794 // register. This only happens when 128-bit vectors are passed. 1795 case SSEUp: 1796 assert(Lo == SSE && "Unexpected SSEUp classification"); 1797 ResType = Get16ByteVectorType(Ty); 1798 break; 1799 } 1800 1801 // If a high part was specified, merge it together with the low part. It is 1802 // known to pass in the high eightbyte of the result. We do this by forming a 1803 // first class struct aggregate with the high and low part: {low, high} 1804 if (HighPart) 1805 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getTargetData()); 1806 1807 return ABIArgInfo::getDirect(ResType); 1808 } 1809 1810 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 1811 1812 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 1813 1814 // Keep track of the number of assigned registers. 1815 unsigned freeIntRegs = 6, freeSSERegs = 8; 1816 1817 // If the return value is indirect, then the hidden argument is consuming one 1818 // integer register. 1819 if (FI.getReturnInfo().isIndirect()) 1820 --freeIntRegs; 1821 1822 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers 1823 // get assigned (in left-to-right order) for passing as follows... 1824 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 1825 it != ie; ++it) { 1826 unsigned neededInt, neededSSE; 1827 it->info = classifyArgumentType(it->type, neededInt, neededSSE); 1828 1829 // AMD64-ABI 3.2.3p3: If there are no registers available for any 1830 // eightbyte of an argument, the whole argument is passed on the 1831 // stack. If registers have already been assigned for some 1832 // eightbytes of such an argument, the assignments get reverted. 1833 if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) { 1834 freeIntRegs -= neededInt; 1835 freeSSERegs -= neededSSE; 1836 } else { 1837 it->info = getIndirectResult(it->type); 1838 } 1839 } 1840 } 1841 1842 static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr, 1843 QualType Ty, 1844 CodeGenFunction &CGF) { 1845 llvm::Value *overflow_arg_area_p = 1846 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p"); 1847 llvm::Value *overflow_arg_area = 1848 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area"); 1849 1850 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16 1851 // byte boundary if alignment needed by type exceeds 8 byte boundary. 1852 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8; 1853 if (Align > 8) { 1854 // Note that we follow the ABI & gcc here, even though the type 1855 // could in theory have an alignment greater than 16. This case 1856 // shouldn't ever matter in practice. 1857 1858 // overflow_arg_area = (overflow_arg_area + 15) & ~15; 1859 llvm::Value *Offset = 1860 llvm::ConstantInt::get(CGF.Int32Ty, 15); 1861 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset); 1862 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area, 1863 CGF.Int64Ty); 1864 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, ~15LL); 1865 overflow_arg_area = 1866 CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), 1867 overflow_arg_area->getType(), 1868 "overflow_arg_area.align"); 1869 } 1870 1871 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area. 1872 const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 1873 llvm::Value *Res = 1874 CGF.Builder.CreateBitCast(overflow_arg_area, 1875 llvm::PointerType::getUnqual(LTy)); 1876 1877 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to: 1878 // l->overflow_arg_area + sizeof(type). 1879 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to 1880 // an 8 byte boundary. 1881 1882 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8; 1883 llvm::Value *Offset = 1884 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7); 1885 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset, 1886 "overflow_arg_area.next"); 1887 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p); 1888 1889 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type. 1890 return Res; 1891 } 1892 1893 llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1894 CodeGenFunction &CGF) const { 1895 llvm::LLVMContext &VMContext = CGF.getLLVMContext(); 1896 1897 // Assume that va_list type is correct; should be pointer to LLVM type: 1898 // struct { 1899 // i32 gp_offset; 1900 // i32 fp_offset; 1901 // i8* overflow_arg_area; 1902 // i8* reg_save_area; 1903 // }; 1904 unsigned neededInt, neededSSE; 1905 1906 Ty = CGF.getContext().getCanonicalType(Ty); 1907 ABIArgInfo AI = classifyArgumentType(Ty, neededInt, neededSSE); 1908 1909 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed 1910 // in the registers. If not go to step 7. 1911 if (!neededInt && !neededSSE) 1912 return EmitVAArgFromMemory(VAListAddr, Ty, CGF); 1913 1914 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of 1915 // general purpose registers needed to pass type and num_fp to hold 1916 // the number of floating point registers needed. 1917 1918 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into 1919 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or 1920 // l->fp_offset > 304 - num_fp * 16 go to step 7. 1921 // 1922 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of 1923 // register save space). 1924 1925 llvm::Value *InRegs = 0; 1926 llvm::Value *gp_offset_p = 0, *gp_offset = 0; 1927 llvm::Value *fp_offset_p = 0, *fp_offset = 0; 1928 if (neededInt) { 1929 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p"); 1930 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset"); 1931 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8); 1932 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp"); 1933 } 1934 1935 if (neededSSE) { 1936 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p"); 1937 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset"); 1938 llvm::Value *FitsInFP = 1939 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16); 1940 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp"); 1941 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP; 1942 } 1943 1944 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 1945 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); 1946 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 1947 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); 1948 1949 // Emit code to load the value if it was passed in registers. 1950 1951 CGF.EmitBlock(InRegBlock); 1952 1953 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with 1954 // an offset of l->gp_offset and/or l->fp_offset. This may require 1955 // copying to a temporary location in case the parameter is passed 1956 // in different register classes or requires an alignment greater 1957 // than 8 for general purpose registers and 16 for XMM registers. 1958 // 1959 // FIXME: This really results in shameful code when we end up needing to 1960 // collect arguments from different places; often what should result in a 1961 // simple assembling of a structure from scattered addresses has many more 1962 // loads than necessary. Can we clean this up? 1963 const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 1964 llvm::Value *RegAddr = 1965 CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3), 1966 "reg_save_area"); 1967 if (neededInt && neededSSE) { 1968 // FIXME: Cleanup. 1969 assert(AI.isDirect() && "Unexpected ABI info for mixed regs"); 1970 const llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType()); 1971 llvm::Value *Tmp = CGF.CreateTempAlloca(ST); 1972 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs"); 1973 const llvm::Type *TyLo = ST->getElementType(0); 1974 const llvm::Type *TyHi = ST->getElementType(1); 1975 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) && 1976 "Unexpected ABI info for mixed regs"); 1977 const llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo); 1978 const llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi); 1979 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 1980 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 1981 llvm::Value *RegLoAddr = TyLo->isFloatingPointTy() ? FPAddr : GPAddr; 1982 llvm::Value *RegHiAddr = TyLo->isFloatingPointTy() ? GPAddr : FPAddr; 1983 llvm::Value *V = 1984 CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo)); 1985 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 1986 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi)); 1987 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 1988 1989 RegAddr = CGF.Builder.CreateBitCast(Tmp, 1990 llvm::PointerType::getUnqual(LTy)); 1991 } else if (neededInt) { 1992 RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 1993 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 1994 llvm::PointerType::getUnqual(LTy)); 1995 } else if (neededSSE == 1) { 1996 RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 1997 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 1998 llvm::PointerType::getUnqual(LTy)); 1999 } else { 2000 assert(neededSSE == 2 && "Invalid number of needed registers!"); 2001 // SSE registers are spaced 16 bytes apart in the register save 2002 // area, we need to collect the two eightbytes together. 2003 llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2004 llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16); 2005 const llvm::Type *DoubleTy = llvm::Type::getDoubleTy(VMContext); 2006 const llvm::Type *DblPtrTy = 2007 llvm::PointerType::getUnqual(DoubleTy); 2008 const llvm::StructType *ST = llvm::StructType::get(VMContext, DoubleTy, 2009 DoubleTy, NULL); 2010 llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST); 2011 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo, 2012 DblPtrTy)); 2013 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 2014 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi, 2015 DblPtrTy)); 2016 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 2017 RegAddr = CGF.Builder.CreateBitCast(Tmp, 2018 llvm::PointerType::getUnqual(LTy)); 2019 } 2020 2021 // AMD64-ABI 3.5.7p5: Step 5. Set: 2022 // l->gp_offset = l->gp_offset + num_gp * 8 2023 // l->fp_offset = l->fp_offset + num_fp * 16. 2024 if (neededInt) { 2025 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8); 2026 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset), 2027 gp_offset_p); 2028 } 2029 if (neededSSE) { 2030 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16); 2031 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset), 2032 fp_offset_p); 2033 } 2034 CGF.EmitBranch(ContBlock); 2035 2036 // Emit code to load the value if it was passed in memory. 2037 2038 CGF.EmitBlock(InMemBlock); 2039 llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF); 2040 2041 // Return the appropriate result. 2042 2043 CGF.EmitBlock(ContBlock); 2044 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(), 2045 "vaarg.addr"); 2046 ResAddr->reserveOperandSpace(2); 2047 ResAddr->addIncoming(RegAddr, InRegBlock); 2048 ResAddr->addIncoming(MemAddr, InMemBlock); 2049 return ResAddr; 2050 } 2051 2052 llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2053 CodeGenFunction &CGF) const { 2054 const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); 2055 const llvm::Type *BPP = llvm::PointerType::getUnqual(BP); 2056 2057 CGBuilderTy &Builder = CGF.Builder; 2058 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 2059 "ap"); 2060 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 2061 llvm::Type *PTy = 2062 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 2063 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 2064 2065 uint64_t Offset = 2066 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8); 2067 llvm::Value *NextAddr = 2068 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 2069 "ap.next"); 2070 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 2071 2072 return AddrTyped; 2073 } 2074 2075 // PowerPC-32 2076 2077 namespace { 2078 class PPC32TargetCodeGenInfo : public DefaultTargetCodeGenInfo { 2079 public: 2080 PPC32TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} 2081 2082 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2083 // This is recovered from gcc output. 2084 return 1; // r1 is the dedicated stack pointer 2085 } 2086 2087 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2088 llvm::Value *Address) const; 2089 }; 2090 2091 } 2092 2093 bool 2094 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2095 llvm::Value *Address) const { 2096 // This is calculated from the LLVM and GCC tables and verified 2097 // against gcc output. AFAIK all ABIs use the same encoding. 2098 2099 CodeGen::CGBuilderTy &Builder = CGF.Builder; 2100 llvm::LLVMContext &Context = CGF.getLLVMContext(); 2101 2102 const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context); 2103 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 2104 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 2105 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 2106 2107 // 0-31: r0-31, the 4-byte general-purpose registers 2108 AssignToArrayRange(Builder, Address, Four8, 0, 31); 2109 2110 // 32-63: fp0-31, the 8-byte floating-point registers 2111 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 2112 2113 // 64-76 are various 4-byte special-purpose registers: 2114 // 64: mq 2115 // 65: lr 2116 // 66: ctr 2117 // 67: ap 2118 // 68-75 cr0-7 2119 // 76: xer 2120 AssignToArrayRange(Builder, Address, Four8, 64, 76); 2121 2122 // 77-108: v0-31, the 16-byte vector registers 2123 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 2124 2125 // 109: vrsave 2126 // 110: vscr 2127 // 111: spe_acc 2128 // 112: spefscr 2129 // 113: sfp 2130 AssignToArrayRange(Builder, Address, Four8, 109, 113); 2131 2132 return false; 2133 } 2134 2135 2136 //===----------------------------------------------------------------------===// 2137 // ARM ABI Implementation 2138 //===----------------------------------------------------------------------===// 2139 2140 namespace { 2141 2142 class ARMABIInfo : public ABIInfo { 2143 public: 2144 enum ABIKind { 2145 APCS = 0, 2146 AAPCS = 1, 2147 AAPCS_VFP 2148 }; 2149 2150 private: 2151 ABIKind Kind; 2152 2153 public: 2154 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind) {} 2155 2156 private: 2157 ABIKind getABIKind() const { return Kind; } 2158 2159 ABIArgInfo classifyReturnType(QualType RetTy) const; 2160 ABIArgInfo classifyArgumentType(QualType RetTy) const; 2161 2162 virtual void computeInfo(CGFunctionInfo &FI) const; 2163 2164 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2165 CodeGenFunction &CGF) const; 2166 }; 2167 2168 class ARMTargetCodeGenInfo : public TargetCodeGenInfo { 2169 public: 2170 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K) 2171 :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {} 2172 2173 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2174 return 13; 2175 } 2176 }; 2177 2178 } 2179 2180 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const { 2181 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2182 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2183 it != ie; ++it) 2184 it->info = classifyArgumentType(it->type); 2185 2186 const llvm::Triple &Triple(getContext().Target.getTriple()); 2187 llvm::CallingConv::ID DefaultCC; 2188 if (Triple.getEnvironmentName() == "gnueabi" || 2189 Triple.getEnvironmentName() == "eabi") 2190 DefaultCC = llvm::CallingConv::ARM_AAPCS; 2191 else 2192 DefaultCC = llvm::CallingConv::ARM_APCS; 2193 2194 switch (getABIKind()) { 2195 case APCS: 2196 if (DefaultCC != llvm::CallingConv::ARM_APCS) 2197 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_APCS); 2198 break; 2199 2200 case AAPCS: 2201 if (DefaultCC != llvm::CallingConv::ARM_AAPCS) 2202 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS); 2203 break; 2204 2205 case AAPCS_VFP: 2206 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS_VFP); 2207 break; 2208 } 2209 } 2210 2211 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty) const { 2212 if (!isAggregateTypeForABI(Ty)) { 2213 // Treat an enum type as its underlying type. 2214 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2215 Ty = EnumTy->getDecl()->getIntegerType(); 2216 2217 return (Ty->isPromotableIntegerType() ? 2218 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2219 } 2220 2221 // Ignore empty records. 2222 if (isEmptyRecord(getContext(), Ty, true)) 2223 return ABIArgInfo::getIgnore(); 2224 2225 // Structures with either a non-trivial destructor or a non-trivial 2226 // copy constructor are always indirect. 2227 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 2228 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2229 2230 // NEON vectors are implemented as (theoretically) opaque structures wrapping 2231 // the underlying vector type. We trust the backend to pass the underlying 2232 // vectors appropriately, so we can unwrap the structs which generally will 2233 // lead to much cleaner IR. 2234 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext())) { 2235 if (SeltTy->isVectorType()) 2236 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 2237 } 2238 2239 // Otherwise, pass by coercing to a structure of the appropriate size. 2240 // 2241 // FIXME: This is kind of nasty... but there isn't much choice because the ARM 2242 // backend doesn't support byval. 2243 // FIXME: This doesn't handle alignment > 64 bits. 2244 const llvm::Type* ElemTy; 2245 unsigned SizeRegs; 2246 if (getContext().getTypeAlign(Ty) > 32) { 2247 ElemTy = llvm::Type::getInt64Ty(getVMContext()); 2248 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64; 2249 } else { 2250 ElemTy = llvm::Type::getInt32Ty(getVMContext()); 2251 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32; 2252 } 2253 std::vector<const llvm::Type*> LLVMFields; 2254 LLVMFields.push_back(llvm::ArrayType::get(ElemTy, SizeRegs)); 2255 const llvm::Type* STy = llvm::StructType::get(getVMContext(), LLVMFields, 2256 true); 2257 return ABIArgInfo::getDirect(STy); 2258 } 2259 2260 static bool isIntegerLikeType(QualType Ty, ASTContext &Context, 2261 llvm::LLVMContext &VMContext) { 2262 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure 2263 // is called integer-like if its size is less than or equal to one word, and 2264 // the offset of each of its addressable sub-fields is zero. 2265 2266 uint64_t Size = Context.getTypeSize(Ty); 2267 2268 // Check that the type fits in a word. 2269 if (Size > 32) 2270 return false; 2271 2272 // FIXME: Handle vector types! 2273 if (Ty->isVectorType()) 2274 return false; 2275 2276 // Float types are never treated as "integer like". 2277 if (Ty->isRealFloatingType()) 2278 return false; 2279 2280 // If this is a builtin or pointer type then it is ok. 2281 if (Ty->getAs<BuiltinType>() || Ty->isPointerType()) 2282 return true; 2283 2284 // Small complex integer types are "integer like". 2285 if (const ComplexType *CT = Ty->getAs<ComplexType>()) 2286 return isIntegerLikeType(CT->getElementType(), Context, VMContext); 2287 2288 // Single element and zero sized arrays should be allowed, by the definition 2289 // above, but they are not. 2290 2291 // Otherwise, it must be a record type. 2292 const RecordType *RT = Ty->getAs<RecordType>(); 2293 if (!RT) return false; 2294 2295 // Ignore records with flexible arrays. 2296 const RecordDecl *RD = RT->getDecl(); 2297 if (RD->hasFlexibleArrayMember()) 2298 return false; 2299 2300 // Check that all sub-fields are at offset 0, and are themselves "integer 2301 // like". 2302 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 2303 2304 bool HadField = false; 2305 unsigned idx = 0; 2306 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 2307 i != e; ++i, ++idx) { 2308 const FieldDecl *FD = *i; 2309 2310 // Bit-fields are not addressable, we only need to verify they are "integer 2311 // like". We still have to disallow a subsequent non-bitfield, for example: 2312 // struct { int : 0; int x } 2313 // is non-integer like according to gcc. 2314 if (FD->isBitField()) { 2315 if (!RD->isUnion()) 2316 HadField = true; 2317 2318 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 2319 return false; 2320 2321 continue; 2322 } 2323 2324 // Check if this field is at offset 0. 2325 if (Layout.getFieldOffset(idx) != 0) 2326 return false; 2327 2328 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 2329 return false; 2330 2331 // Only allow at most one field in a structure. This doesn't match the 2332 // wording above, but follows gcc in situations with a field following an 2333 // empty structure. 2334 if (!RD->isUnion()) { 2335 if (HadField) 2336 return false; 2337 2338 HadField = true; 2339 } 2340 } 2341 2342 return true; 2343 } 2344 2345 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy) const { 2346 if (RetTy->isVoidType()) 2347 return ABIArgInfo::getIgnore(); 2348 2349 // Large vector types should be returned via memory. 2350 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) 2351 return ABIArgInfo::getIndirect(0); 2352 2353 if (!isAggregateTypeForABI(RetTy)) { 2354 // Treat an enum type as its underlying type. 2355 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 2356 RetTy = EnumTy->getDecl()->getIntegerType(); 2357 2358 return (RetTy->isPromotableIntegerType() ? 2359 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2360 } 2361 2362 // Structures with either a non-trivial destructor or a non-trivial 2363 // copy constructor are always indirect. 2364 if (isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy)) 2365 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2366 2367 // Are we following APCS? 2368 if (getABIKind() == APCS) { 2369 if (isEmptyRecord(getContext(), RetTy, false)) 2370 return ABIArgInfo::getIgnore(); 2371 2372 // Complex types are all returned as packed integers. 2373 // 2374 // FIXME: Consider using 2 x vector types if the back end handles them 2375 // correctly. 2376 if (RetTy->isAnyComplexType()) 2377 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2378 getContext().getTypeSize(RetTy))); 2379 2380 // Integer like structures are returned in r0. 2381 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) { 2382 // Return in the smallest viable integer type. 2383 uint64_t Size = getContext().getTypeSize(RetTy); 2384 if (Size <= 8) 2385 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 2386 if (Size <= 16) 2387 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 2388 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 2389 } 2390 2391 // Otherwise return in memory. 2392 return ABIArgInfo::getIndirect(0); 2393 } 2394 2395 // Otherwise this is an AAPCS variant. 2396 2397 if (isEmptyRecord(getContext(), RetTy, true)) 2398 return ABIArgInfo::getIgnore(); 2399 2400 // Aggregates <= 4 bytes are returned in r0; other aggregates 2401 // are returned indirectly. 2402 uint64_t Size = getContext().getTypeSize(RetTy); 2403 if (Size <= 32) { 2404 // Return in the smallest viable integer type. 2405 if (Size <= 8) 2406 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 2407 if (Size <= 16) 2408 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 2409 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 2410 } 2411 2412 return ABIArgInfo::getIndirect(0); 2413 } 2414 2415 llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2416 CodeGenFunction &CGF) const { 2417 // FIXME: Need to handle alignment 2418 const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); 2419 const llvm::Type *BPP = llvm::PointerType::getUnqual(BP); 2420 2421 CGBuilderTy &Builder = CGF.Builder; 2422 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 2423 "ap"); 2424 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 2425 llvm::Type *PTy = 2426 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 2427 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 2428 2429 uint64_t Offset = 2430 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); 2431 llvm::Value *NextAddr = 2432 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 2433 "ap.next"); 2434 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 2435 2436 return AddrTyped; 2437 } 2438 2439 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const { 2440 if (RetTy->isVoidType()) 2441 return ABIArgInfo::getIgnore(); 2442 2443 if (isAggregateTypeForABI(RetTy)) 2444 return ABIArgInfo::getIndirect(0); 2445 2446 // Treat an enum type as its underlying type. 2447 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 2448 RetTy = EnumTy->getDecl()->getIntegerType(); 2449 2450 return (RetTy->isPromotableIntegerType() ? 2451 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2452 } 2453 2454 //===----------------------------------------------------------------------===// 2455 // SystemZ ABI Implementation 2456 //===----------------------------------------------------------------------===// 2457 2458 namespace { 2459 2460 class SystemZABIInfo : public ABIInfo { 2461 public: 2462 SystemZABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 2463 2464 bool isPromotableIntegerType(QualType Ty) const; 2465 2466 ABIArgInfo classifyReturnType(QualType RetTy) const; 2467 ABIArgInfo classifyArgumentType(QualType RetTy) const; 2468 2469 virtual void computeInfo(CGFunctionInfo &FI) const { 2470 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2471 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2472 it != ie; ++it) 2473 it->info = classifyArgumentType(it->type); 2474 } 2475 2476 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2477 CodeGenFunction &CGF) const; 2478 }; 2479 2480 class SystemZTargetCodeGenInfo : public TargetCodeGenInfo { 2481 public: 2482 SystemZTargetCodeGenInfo(CodeGenTypes &CGT) 2483 : TargetCodeGenInfo(new SystemZABIInfo(CGT)) {} 2484 }; 2485 2486 } 2487 2488 bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const { 2489 // SystemZ ABI requires all 8, 16 and 32 bit quantities to be extended. 2490 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 2491 switch (BT->getKind()) { 2492 case BuiltinType::Bool: 2493 case BuiltinType::Char_S: 2494 case BuiltinType::Char_U: 2495 case BuiltinType::SChar: 2496 case BuiltinType::UChar: 2497 case BuiltinType::Short: 2498 case BuiltinType::UShort: 2499 case BuiltinType::Int: 2500 case BuiltinType::UInt: 2501 return true; 2502 default: 2503 return false; 2504 } 2505 return false; 2506 } 2507 2508 llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2509 CodeGenFunction &CGF) const { 2510 // FIXME: Implement 2511 return 0; 2512 } 2513 2514 2515 ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const { 2516 if (RetTy->isVoidType()) 2517 return ABIArgInfo::getIgnore(); 2518 if (isAggregateTypeForABI(RetTy)) 2519 return ABIArgInfo::getIndirect(0); 2520 2521 return (isPromotableIntegerType(RetTy) ? 2522 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2523 } 2524 2525 ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const { 2526 if (isAggregateTypeForABI(Ty)) 2527 return ABIArgInfo::getIndirect(0); 2528 2529 return (isPromotableIntegerType(Ty) ? 2530 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2531 } 2532 2533 //===----------------------------------------------------------------------===// 2534 // MSP430 ABI Implementation 2535 //===----------------------------------------------------------------------===// 2536 2537 namespace { 2538 2539 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo { 2540 public: 2541 MSP430TargetCodeGenInfo(CodeGenTypes &CGT) 2542 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 2543 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 2544 CodeGen::CodeGenModule &M) const; 2545 }; 2546 2547 } 2548 2549 void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D, 2550 llvm::GlobalValue *GV, 2551 CodeGen::CodeGenModule &M) const { 2552 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 2553 if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) { 2554 // Handle 'interrupt' attribute: 2555 llvm::Function *F = cast<llvm::Function>(GV); 2556 2557 // Step 1: Set ISR calling convention. 2558 F->setCallingConv(llvm::CallingConv::MSP430_INTR); 2559 2560 // Step 2: Add attributes goodness. 2561 F->addFnAttr(llvm::Attribute::NoInline); 2562 2563 // Step 3: Emit ISR vector alias. 2564 unsigned Num = attr->getNumber() + 0xffe0; 2565 new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage, 2566 "vector_" + 2567 llvm::LowercaseString(llvm::utohexstr(Num)), 2568 GV, &M.getModule()); 2569 } 2570 } 2571 } 2572 2573 //===----------------------------------------------------------------------===// 2574 // MIPS ABI Implementation. This works for both little-endian and 2575 // big-endian variants. 2576 //===----------------------------------------------------------------------===// 2577 2578 namespace { 2579 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo { 2580 public: 2581 MIPSTargetCodeGenInfo(CodeGenTypes &CGT) 2582 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 2583 2584 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 2585 return 29; 2586 } 2587 2588 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2589 llvm::Value *Address) const; 2590 }; 2591 } 2592 2593 bool 2594 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2595 llvm::Value *Address) const { 2596 // This information comes from gcc's implementation, which seems to 2597 // as canonical as it gets. 2598 2599 CodeGen::CGBuilderTy &Builder = CGF.Builder; 2600 llvm::LLVMContext &Context = CGF.getLLVMContext(); 2601 2602 // Everything on MIPS is 4 bytes. Double-precision FP registers 2603 // are aliased to pairs of single-precision FP registers. 2604 const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context); 2605 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 2606 2607 // 0-31 are the general purpose registers, $0 - $31. 2608 // 32-63 are the floating-point registers, $f0 - $f31. 2609 // 64 and 65 are the multiply/divide registers, $hi and $lo. 2610 // 66 is the (notional, I think) register for signal-handler return. 2611 AssignToArrayRange(Builder, Address, Four8, 0, 65); 2612 2613 // 67-74 are the floating-point status registers, $fcc0 - $fcc7. 2614 // They are one bit wide and ignored here. 2615 2616 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31. 2617 // (coprocessor 1 is the FP unit) 2618 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31. 2619 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31. 2620 // 176-181 are the DSP accumulator registers. 2621 AssignToArrayRange(Builder, Address, Four8, 80, 181); 2622 2623 return false; 2624 } 2625 2626 2627 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() { 2628 if (TheTargetCodeGenInfo) 2629 return *TheTargetCodeGenInfo; 2630 2631 // For now we just cache the TargetCodeGenInfo in CodeGenModule and don't 2632 // free it. 2633 2634 const llvm::Triple &Triple = getContext().Target.getTriple(); 2635 switch (Triple.getArch()) { 2636 default: 2637 return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types)); 2638 2639 case llvm::Triple::mips: 2640 case llvm::Triple::mipsel: 2641 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types)); 2642 2643 case llvm::Triple::arm: 2644 case llvm::Triple::thumb: 2645 // FIXME: We want to know the float calling convention as well. 2646 if (strcmp(getContext().Target.getABI(), "apcs-gnu") == 0) 2647 return *(TheTargetCodeGenInfo = 2648 new ARMTargetCodeGenInfo(Types, ARMABIInfo::APCS)); 2649 2650 return *(TheTargetCodeGenInfo = 2651 new ARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS)); 2652 2653 case llvm::Triple::ppc: 2654 return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types)); 2655 2656 case llvm::Triple::systemz: 2657 return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo(Types)); 2658 2659 case llvm::Triple::msp430: 2660 return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types)); 2661 2662 case llvm::Triple::x86: 2663 switch (Triple.getOS()) { 2664 case llvm::Triple::Darwin: 2665 return *(TheTargetCodeGenInfo = 2666 new X86_32TargetCodeGenInfo(Types, true, true)); 2667 case llvm::Triple::Cygwin: 2668 case llvm::Triple::MinGW32: 2669 case llvm::Triple::AuroraUX: 2670 case llvm::Triple::DragonFly: 2671 case llvm::Triple::FreeBSD: 2672 case llvm::Triple::OpenBSD: 2673 return *(TheTargetCodeGenInfo = 2674 new X86_32TargetCodeGenInfo(Types, false, true)); 2675 2676 default: 2677 return *(TheTargetCodeGenInfo = 2678 new X86_32TargetCodeGenInfo(Types, false, false)); 2679 } 2680 2681 case llvm::Triple::x86_64: 2682 switch (Triple.getOS()) { 2683 case llvm::Triple::Win32: 2684 case llvm::Triple::MinGW64: 2685 case llvm::Triple::Cygwin: 2686 return *(TheTargetCodeGenInfo = new WinX86_64TargetCodeGenInfo(Types)); 2687 default: 2688 return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo(Types)); 2689 } 2690 } 2691 } 2692