1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // These classes wrap the information about a call or function 11 // definition used to handle ABI compliancy. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "TargetInfo.h" 16 #include "ABIInfo.h" 17 #include "CodeGenFunction.h" 18 #include "clang/AST/RecordLayout.h" 19 #include "clang/Frontend/CodeGenOptions.h" 20 #include "llvm/Type.h" 21 #include "llvm/Target/TargetData.h" 22 #include "llvm/ADT/Triple.h" 23 #include "llvm/Support/raw_ostream.h" 24 using namespace clang; 25 using namespace CodeGen; 26 27 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, 28 llvm::Value *Array, 29 llvm::Value *Value, 30 unsigned FirstIndex, 31 unsigned LastIndex) { 32 // Alternatively, we could emit this as a loop in the source. 33 for (unsigned I = FirstIndex; I <= LastIndex; ++I) { 34 llvm::Value *Cell = Builder.CreateConstInBoundsGEP1_32(Array, I); 35 Builder.CreateStore(Value, Cell); 36 } 37 } 38 39 static bool isAggregateTypeForABI(QualType T) { 40 return CodeGenFunction::hasAggregateLLVMType(T) || 41 T->isMemberFunctionPointerType(); 42 } 43 44 ABIInfo::~ABIInfo() {} 45 46 ASTContext &ABIInfo::getContext() const { 47 return CGT.getContext(); 48 } 49 50 llvm::LLVMContext &ABIInfo::getVMContext() const { 51 return CGT.getLLVMContext(); 52 } 53 54 const llvm::TargetData &ABIInfo::getTargetData() const { 55 return CGT.getTargetData(); 56 } 57 58 59 void ABIArgInfo::dump() const { 60 raw_ostream &OS = llvm::errs(); 61 OS << "(ABIArgInfo Kind="; 62 switch (TheKind) { 63 case Direct: 64 OS << "Direct Type="; 65 if (llvm::Type *Ty = getCoerceToType()) 66 Ty->print(OS); 67 else 68 OS << "null"; 69 break; 70 case Extend: 71 OS << "Extend"; 72 break; 73 case Ignore: 74 OS << "Ignore"; 75 break; 76 case Indirect: 77 OS << "Indirect Align=" << getIndirectAlign() 78 << " ByVal=" << getIndirectByVal() 79 << " Realign=" << getIndirectRealign(); 80 break; 81 case Expand: 82 OS << "Expand"; 83 break; 84 } 85 OS << ")\n"; 86 } 87 88 TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; } 89 90 // If someone can figure out a general rule for this, that would be great. 91 // It's probably just doomed to be platform-dependent, though. 92 unsigned TargetCodeGenInfo::getSizeOfUnwindException() const { 93 // Verified for: 94 // x86-64 FreeBSD, Linux, Darwin 95 // x86-32 FreeBSD, Linux, Darwin 96 // PowerPC Linux, Darwin 97 // ARM Darwin (*not* EABI) 98 return 32; 99 } 100 101 bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args, 102 const FunctionNoProtoType *fnType) const { 103 // The following conventions are known to require this to be false: 104 // x86_stdcall 105 // MIPS 106 // For everything else, we just prefer false unless we opt out. 107 return false; 108 } 109 110 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays); 111 112 /// isEmptyField - Return true iff a the field is "empty", that is it 113 /// is an unnamed bit-field or an (array of) empty record(s). 114 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD, 115 bool AllowArrays) { 116 if (FD->isUnnamedBitfield()) 117 return true; 118 119 QualType FT = FD->getType(); 120 121 // Constant arrays of empty records count as empty, strip them off. 122 // Constant arrays of zero length always count as empty. 123 if (AllowArrays) 124 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 125 if (AT->getSize() == 0) 126 return true; 127 FT = AT->getElementType(); 128 } 129 130 const RecordType *RT = FT->getAs<RecordType>(); 131 if (!RT) 132 return false; 133 134 // C++ record fields are never empty, at least in the Itanium ABI. 135 // 136 // FIXME: We should use a predicate for whether this behavior is true in the 137 // current ABI. 138 if (isa<CXXRecordDecl>(RT->getDecl())) 139 return false; 140 141 return isEmptyRecord(Context, FT, AllowArrays); 142 } 143 144 /// isEmptyRecord - Return true iff a structure contains only empty 145 /// fields. Note that a structure with a flexible array member is not 146 /// considered empty. 147 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) { 148 const RecordType *RT = T->getAs<RecordType>(); 149 if (!RT) 150 return 0; 151 const RecordDecl *RD = RT->getDecl(); 152 if (RD->hasFlexibleArrayMember()) 153 return false; 154 155 // If this is a C++ record, check the bases first. 156 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 157 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 158 e = CXXRD->bases_end(); i != e; ++i) 159 if (!isEmptyRecord(Context, i->getType(), true)) 160 return false; 161 162 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 163 i != e; ++i) 164 if (!isEmptyField(Context, *i, AllowArrays)) 165 return false; 166 return true; 167 } 168 169 /// hasNonTrivialDestructorOrCopyConstructor - Determine if a type has either 170 /// a non-trivial destructor or a non-trivial copy constructor. 171 static bool hasNonTrivialDestructorOrCopyConstructor(const RecordType *RT) { 172 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 173 if (!RD) 174 return false; 175 176 return !RD->hasTrivialDestructor() || !RD->hasTrivialCopyConstructor(); 177 } 178 179 /// isRecordWithNonTrivialDestructorOrCopyConstructor - Determine if a type is 180 /// a record type with either a non-trivial destructor or a non-trivial copy 181 /// constructor. 182 static bool isRecordWithNonTrivialDestructorOrCopyConstructor(QualType T) { 183 const RecordType *RT = T->getAs<RecordType>(); 184 if (!RT) 185 return false; 186 187 return hasNonTrivialDestructorOrCopyConstructor(RT); 188 } 189 190 /// isSingleElementStruct - Determine if a structure is a "single 191 /// element struct", i.e. it has exactly one non-empty field or 192 /// exactly one field which is itself a single element 193 /// struct. Structures with flexible array members are never 194 /// considered single element structs. 195 /// 196 /// \return The field declaration for the single non-empty field, if 197 /// it exists. 198 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) { 199 const RecordType *RT = T->getAsStructureType(); 200 if (!RT) 201 return 0; 202 203 const RecordDecl *RD = RT->getDecl(); 204 if (RD->hasFlexibleArrayMember()) 205 return 0; 206 207 const Type *Found = 0; 208 209 // If this is a C++ record, check the bases first. 210 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 211 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 212 e = CXXRD->bases_end(); i != e; ++i) { 213 // Ignore empty records. 214 if (isEmptyRecord(Context, i->getType(), true)) 215 continue; 216 217 // If we already found an element then this isn't a single-element struct. 218 if (Found) 219 return 0; 220 221 // If this is non-empty and not a single element struct, the composite 222 // cannot be a single element struct. 223 Found = isSingleElementStruct(i->getType(), Context); 224 if (!Found) 225 return 0; 226 } 227 } 228 229 // Check for single element. 230 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 231 i != e; ++i) { 232 const FieldDecl *FD = *i; 233 QualType FT = FD->getType(); 234 235 // Ignore empty fields. 236 if (isEmptyField(Context, FD, true)) 237 continue; 238 239 // If we already found an element then this isn't a single-element 240 // struct. 241 if (Found) 242 return 0; 243 244 // Treat single element arrays as the element. 245 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 246 if (AT->getSize().getZExtValue() != 1) 247 break; 248 FT = AT->getElementType(); 249 } 250 251 if (!isAggregateTypeForABI(FT)) { 252 Found = FT.getTypePtr(); 253 } else { 254 Found = isSingleElementStruct(FT, Context); 255 if (!Found) 256 return 0; 257 } 258 } 259 260 // We don't consider a struct a single-element struct if it has 261 // padding beyond the element type. 262 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T)) 263 return 0; 264 265 return Found; 266 } 267 268 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) { 269 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() && 270 !Ty->isAnyComplexType() && !Ty->isEnumeralType() && 271 !Ty->isBlockPointerType()) 272 return false; 273 274 uint64_t Size = Context.getTypeSize(Ty); 275 return Size == 32 || Size == 64; 276 } 277 278 /// canExpandIndirectArgument - Test whether an argument type which is to be 279 /// passed indirectly (on the stack) would have the equivalent layout if it was 280 /// expanded into separate arguments. If so, we prefer to do the latter to avoid 281 /// inhibiting optimizations. 282 /// 283 // FIXME: This predicate is missing many cases, currently it just follows 284 // llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We 285 // should probably make this smarter, or better yet make the LLVM backend 286 // capable of handling it. 287 static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) { 288 // We can only expand structure types. 289 const RecordType *RT = Ty->getAs<RecordType>(); 290 if (!RT) 291 return false; 292 293 // We can only expand (C) structures. 294 // 295 // FIXME: This needs to be generalized to handle classes as well. 296 const RecordDecl *RD = RT->getDecl(); 297 if (!RD->isStruct() || isa<CXXRecordDecl>(RD)) 298 return false; 299 300 uint64_t Size = 0; 301 302 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 303 i != e; ++i) { 304 const FieldDecl *FD = *i; 305 306 if (!is32Or64BitBasicType(FD->getType(), Context)) 307 return false; 308 309 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know 310 // how to expand them yet, and the predicate for telling if a bitfield still 311 // counts as "basic" is more complicated than what we were doing previously. 312 if (FD->isBitField()) 313 return false; 314 315 Size += Context.getTypeSize(FD->getType()); 316 } 317 318 // Make sure there are not any holes in the struct. 319 if (Size != Context.getTypeSize(Ty)) 320 return false; 321 322 return true; 323 } 324 325 namespace { 326 /// DefaultABIInfo - The default implementation for ABI specific 327 /// details. This implementation provides information which results in 328 /// self-consistent and sensible LLVM IR generation, but does not 329 /// conform to any particular ABI. 330 class DefaultABIInfo : public ABIInfo { 331 public: 332 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 333 334 ABIArgInfo classifyReturnType(QualType RetTy) const; 335 ABIArgInfo classifyArgumentType(QualType RetTy) const; 336 337 virtual void computeInfo(CGFunctionInfo &FI) const { 338 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 339 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 340 it != ie; ++it) 341 it->info = classifyArgumentType(it->type); 342 } 343 344 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 345 CodeGenFunction &CGF) const; 346 }; 347 348 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo { 349 public: 350 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 351 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 352 }; 353 354 llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 355 CodeGenFunction &CGF) const { 356 return 0; 357 } 358 359 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const { 360 if (isAggregateTypeForABI(Ty)) { 361 // Records with non trivial destructors/constructors should not be passed 362 // by value. 363 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 364 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 365 366 return ABIArgInfo::getIndirect(0); 367 } 368 369 // Treat an enum type as its underlying type. 370 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 371 Ty = EnumTy->getDecl()->getIntegerType(); 372 373 return (Ty->isPromotableIntegerType() ? 374 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 375 } 376 377 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const { 378 if (RetTy->isVoidType()) 379 return ABIArgInfo::getIgnore(); 380 381 if (isAggregateTypeForABI(RetTy)) 382 return ABIArgInfo::getIndirect(0); 383 384 // Treat an enum type as its underlying type. 385 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 386 RetTy = EnumTy->getDecl()->getIntegerType(); 387 388 return (RetTy->isPromotableIntegerType() ? 389 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 390 } 391 392 /// UseX86_MMXType - Return true if this is an MMX type that should use the 393 /// special x86_mmx type. 394 bool UseX86_MMXType(llvm::Type *IRType) { 395 // If the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>, use the 396 // special x86_mmx type. 397 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 && 398 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() && 399 IRType->getScalarSizeInBits() != 64; 400 } 401 402 static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 403 StringRef Constraint, 404 llvm::Type* Ty) { 405 if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy()) 406 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext()); 407 return Ty; 408 } 409 410 //===----------------------------------------------------------------------===// 411 // X86-32 ABI Implementation 412 //===----------------------------------------------------------------------===// 413 414 /// X86_32ABIInfo - The X86-32 ABI information. 415 class X86_32ABIInfo : public ABIInfo { 416 static const unsigned MinABIStackAlignInBytes = 4; 417 418 bool IsDarwinVectorABI; 419 bool IsSmallStructInRegABI; 420 bool IsMMXDisabled; 421 bool IsWin32FloatStructABI; 422 423 static bool isRegisterSize(unsigned Size) { 424 return (Size == 8 || Size == 16 || Size == 32 || Size == 64); 425 } 426 427 static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context, 428 unsigned callingConvention); 429 430 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 431 /// such that the argument will be passed in memory. 432 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal = true) const; 433 434 /// \brief Return the alignment to use for the given type on the stack. 435 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const; 436 437 public: 438 439 ABIArgInfo classifyReturnType(QualType RetTy, 440 unsigned callingConvention) const; 441 ABIArgInfo classifyArgumentType(QualType RetTy) const; 442 443 virtual void computeInfo(CGFunctionInfo &FI) const { 444 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), 445 FI.getCallingConvention()); 446 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 447 it != ie; ++it) 448 it->info = classifyArgumentType(it->type); 449 } 450 451 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 452 CodeGenFunction &CGF) const; 453 454 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool m, bool w) 455 : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p), 456 IsMMXDisabled(m), IsWin32FloatStructABI(w) {} 457 }; 458 459 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo { 460 public: 461 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, 462 bool d, bool p, bool m, bool w) 463 :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, m, w)) {} 464 465 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 466 CodeGen::CodeGenModule &CGM) const; 467 468 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 469 // Darwin uses different dwarf register numbers for EH. 470 if (CGM.isTargetDarwin()) return 5; 471 472 return 4; 473 } 474 475 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 476 llvm::Value *Address) const; 477 478 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 479 StringRef Constraint, 480 llvm::Type* Ty) const { 481 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 482 } 483 484 }; 485 486 } 487 488 /// shouldReturnTypeInRegister - Determine if the given type should be 489 /// passed in a register (for the Darwin ABI). 490 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty, 491 ASTContext &Context, 492 unsigned callingConvention) { 493 uint64_t Size = Context.getTypeSize(Ty); 494 495 // Type must be register sized. 496 if (!isRegisterSize(Size)) 497 return false; 498 499 if (Ty->isVectorType()) { 500 // 64- and 128- bit vectors inside structures are not returned in 501 // registers. 502 if (Size == 64 || Size == 128) 503 return false; 504 505 return true; 506 } 507 508 // If this is a builtin, pointer, enum, complex type, member pointer, or 509 // member function pointer it is ok. 510 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() || 511 Ty->isAnyComplexType() || Ty->isEnumeralType() || 512 Ty->isBlockPointerType() || Ty->isMemberPointerType()) 513 return true; 514 515 // Arrays are treated like records. 516 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) 517 return shouldReturnTypeInRegister(AT->getElementType(), Context, 518 callingConvention); 519 520 // Otherwise, it must be a record type. 521 const RecordType *RT = Ty->getAs<RecordType>(); 522 if (!RT) return false; 523 524 // FIXME: Traverse bases here too. 525 526 // For thiscall conventions, structures will never be returned in 527 // a register. This is for compatibility with the MSVC ABI 528 if (callingConvention == llvm::CallingConv::X86_ThisCall && 529 RT->isStructureType()) { 530 return false; 531 } 532 533 // Structure types are passed in register if all fields would be 534 // passed in a register. 535 for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(), 536 e = RT->getDecl()->field_end(); i != e; ++i) { 537 const FieldDecl *FD = *i; 538 539 // Empty fields are ignored. 540 if (isEmptyField(Context, FD, true)) 541 continue; 542 543 // Check fields recursively. 544 if (!shouldReturnTypeInRegister(FD->getType(), Context, 545 callingConvention)) 546 return false; 547 } 548 return true; 549 } 550 551 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, 552 unsigned callingConvention) const { 553 if (RetTy->isVoidType()) 554 return ABIArgInfo::getIgnore(); 555 556 if (const VectorType *VT = RetTy->getAs<VectorType>()) { 557 // On Darwin, some vectors are returned in registers. 558 if (IsDarwinVectorABI) { 559 uint64_t Size = getContext().getTypeSize(RetTy); 560 561 // 128-bit vectors are a special case; they are returned in 562 // registers and we need to make sure to pick a type the LLVM 563 // backend will like. 564 if (Size == 128) 565 return ABIArgInfo::getDirect(llvm::VectorType::get( 566 llvm::Type::getInt64Ty(getVMContext()), 2)); 567 568 // Always return in register if it fits in a general purpose 569 // register, or if it is 64 bits and has a single element. 570 if ((Size == 8 || Size == 16 || Size == 32) || 571 (Size == 64 && VT->getNumElements() == 1)) 572 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 573 Size)); 574 575 return ABIArgInfo::getIndirect(0); 576 } 577 578 return ABIArgInfo::getDirect(); 579 } 580 581 if (isAggregateTypeForABI(RetTy)) { 582 if (const RecordType *RT = RetTy->getAs<RecordType>()) { 583 // Structures with either a non-trivial destructor or a non-trivial 584 // copy constructor are always indirect. 585 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 586 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 587 588 // Structures with flexible arrays are always indirect. 589 if (RT->getDecl()->hasFlexibleArrayMember()) 590 return ABIArgInfo::getIndirect(0); 591 } 592 593 // If specified, structs and unions are always indirect. 594 if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType()) 595 return ABIArgInfo::getIndirect(0); 596 597 // Small structures which are register sized are generally returned 598 // in a register. 599 if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, getContext(), 600 callingConvention)) { 601 uint64_t Size = getContext().getTypeSize(RetTy); 602 603 // As a special-case, if the struct is a "single-element" struct, and 604 // the field is of type "float" or "double", return it in a 605 // floating-point register. (MSVC does not apply this special case.) 606 // We apply a similar transformation for pointer types to improve the 607 // quality of the generated IR. 608 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) 609 if ((!IsWin32FloatStructABI && SeltTy->isRealFloatingType()) 610 || SeltTy->hasPointerRepresentation()) 611 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 612 613 // FIXME: We should be able to narrow this integer in cases with dead 614 // padding. 615 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size)); 616 } 617 618 return ABIArgInfo::getIndirect(0); 619 } 620 621 // Treat an enum type as its underlying type. 622 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 623 RetTy = EnumTy->getDecl()->getIntegerType(); 624 625 return (RetTy->isPromotableIntegerType() ? 626 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 627 } 628 629 static bool isSSEVectorType(ASTContext &Context, QualType Ty) { 630 return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128; 631 } 632 633 static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) { 634 const RecordType *RT = Ty->getAs<RecordType>(); 635 if (!RT) 636 return 0; 637 const RecordDecl *RD = RT->getDecl(); 638 639 // If this is a C++ record, check the bases first. 640 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 641 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 642 e = CXXRD->bases_end(); i != e; ++i) 643 if (!isRecordWithSSEVectorType(Context, i->getType())) 644 return false; 645 646 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 647 i != e; ++i) { 648 QualType FT = i->getType(); 649 650 if (isSSEVectorType(Context, FT)) 651 return true; 652 653 if (isRecordWithSSEVectorType(Context, FT)) 654 return true; 655 } 656 657 return false; 658 } 659 660 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty, 661 unsigned Align) const { 662 // Otherwise, if the alignment is less than or equal to the minimum ABI 663 // alignment, just use the default; the backend will handle this. 664 if (Align <= MinABIStackAlignInBytes) 665 return 0; // Use default alignment. 666 667 // On non-Darwin, the stack type alignment is always 4. 668 if (!IsDarwinVectorABI) { 669 // Set explicit alignment, since we may need to realign the top. 670 return MinABIStackAlignInBytes; 671 } 672 673 // Otherwise, if the type contains an SSE vector type, the alignment is 16. 674 if (Align >= 16 && (isSSEVectorType(getContext(), Ty) || 675 isRecordWithSSEVectorType(getContext(), Ty))) 676 return 16; 677 678 return MinABIStackAlignInBytes; 679 } 680 681 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal) const { 682 if (!ByVal) 683 return ABIArgInfo::getIndirect(0, false); 684 685 // Compute the byval alignment. 686 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; 687 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign); 688 if (StackAlign == 0) 689 return ABIArgInfo::getIndirect(4); 690 691 // If the stack alignment is less than the type alignment, realign the 692 // argument. 693 if (StackAlign < TypeAlign) 694 return ABIArgInfo::getIndirect(StackAlign, /*ByVal=*/true, 695 /*Realign=*/true); 696 697 return ABIArgInfo::getIndirect(StackAlign); 698 } 699 700 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const { 701 // FIXME: Set alignment on indirect arguments. 702 if (isAggregateTypeForABI(Ty)) { 703 // Structures with flexible arrays are always indirect. 704 if (const RecordType *RT = Ty->getAs<RecordType>()) { 705 // Structures with either a non-trivial destructor or a non-trivial 706 // copy constructor are always indirect. 707 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 708 return getIndirectResult(Ty, /*ByVal=*/false); 709 710 if (RT->getDecl()->hasFlexibleArrayMember()) 711 return getIndirectResult(Ty); 712 } 713 714 // Ignore empty structs/unions. 715 if (isEmptyRecord(getContext(), Ty, true)) 716 return ABIArgInfo::getIgnore(); 717 718 // Expand small (<= 128-bit) record types when we know that the stack layout 719 // of those arguments will match the struct. This is important because the 720 // LLVM backend isn't smart enough to remove byval, which inhibits many 721 // optimizations. 722 if (getContext().getTypeSize(Ty) <= 4*32 && 723 canExpandIndirectArgument(Ty, getContext())) 724 return ABIArgInfo::getExpand(); 725 726 return getIndirectResult(Ty); 727 } 728 729 if (const VectorType *VT = Ty->getAs<VectorType>()) { 730 // On Darwin, some vectors are passed in memory, we handle this by passing 731 // it as an i8/i16/i32/i64. 732 if (IsDarwinVectorABI) { 733 uint64_t Size = getContext().getTypeSize(Ty); 734 if ((Size == 8 || Size == 16 || Size == 32) || 735 (Size == 64 && VT->getNumElements() == 1)) 736 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 737 Size)); 738 } 739 740 llvm::Type *IRType = CGT.ConvertType(Ty); 741 if (UseX86_MMXType(IRType)) { 742 if (IsMMXDisabled) 743 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 744 64)); 745 ABIArgInfo AAI = ABIArgInfo::getDirect(IRType); 746 AAI.setCoerceToType(llvm::Type::getX86_MMXTy(getVMContext())); 747 return AAI; 748 } 749 750 return ABIArgInfo::getDirect(); 751 } 752 753 754 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 755 Ty = EnumTy->getDecl()->getIntegerType(); 756 757 return (Ty->isPromotableIntegerType() ? 758 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 759 } 760 761 llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 762 CodeGenFunction &CGF) const { 763 llvm::Type *BPP = CGF.Int8PtrPtrTy; 764 765 CGBuilderTy &Builder = CGF.Builder; 766 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 767 "ap"); 768 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 769 770 // Compute if the address needs to be aligned 771 unsigned Align = CGF.getContext().getTypeAlignInChars(Ty).getQuantity(); 772 Align = getTypeStackAlignInBytes(Ty, Align); 773 Align = std::max(Align, 4U); 774 if (Align > 4) { 775 // addr = (addr + align - 1) & -align; 776 llvm::Value *Offset = 777 llvm::ConstantInt::get(CGF.Int32Ty, Align - 1); 778 Addr = CGF.Builder.CreateGEP(Addr, Offset); 779 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(Addr, 780 CGF.Int32Ty); 781 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -Align); 782 Addr = CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), 783 Addr->getType(), 784 "ap.cur.aligned"); 785 } 786 787 llvm::Type *PTy = 788 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 789 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 790 791 uint64_t Offset = 792 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, Align); 793 llvm::Value *NextAddr = 794 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 795 "ap.next"); 796 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 797 798 return AddrTyped; 799 } 800 801 void X86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D, 802 llvm::GlobalValue *GV, 803 CodeGen::CodeGenModule &CGM) const { 804 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 805 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { 806 // Get the LLVM function. 807 llvm::Function *Fn = cast<llvm::Function>(GV); 808 809 // Now add the 'alignstack' attribute with a value of 16. 810 Fn->addFnAttr(llvm::Attribute::constructStackAlignmentFromInt(16)); 811 } 812 } 813 } 814 815 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable( 816 CodeGen::CodeGenFunction &CGF, 817 llvm::Value *Address) const { 818 CodeGen::CGBuilderTy &Builder = CGF.Builder; 819 820 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 821 822 // 0-7 are the eight integer registers; the order is different 823 // on Darwin (for EH), but the range is the same. 824 // 8 is %eip. 825 AssignToArrayRange(Builder, Address, Four8, 0, 8); 826 827 if (CGF.CGM.isTargetDarwin()) { 828 // 12-16 are st(0..4). Not sure why we stop at 4. 829 // These have size 16, which is sizeof(long double) on 830 // platforms with 8-byte alignment for that type. 831 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16); 832 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16); 833 834 } else { 835 // 9 is %eflags, which doesn't get a size on Darwin for some 836 // reason. 837 Builder.CreateStore(Four8, Builder.CreateConstInBoundsGEP1_32(Address, 9)); 838 839 // 11-16 are st(0..5). Not sure why we stop at 5. 840 // These have size 12, which is sizeof(long double) on 841 // platforms with 4-byte alignment for that type. 842 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12); 843 AssignToArrayRange(Builder, Address, Twelve8, 11, 16); 844 } 845 846 return false; 847 } 848 849 //===----------------------------------------------------------------------===// 850 // X86-64 ABI Implementation 851 //===----------------------------------------------------------------------===// 852 853 854 namespace { 855 /// X86_64ABIInfo - The X86_64 ABI information. 856 class X86_64ABIInfo : public ABIInfo { 857 enum Class { 858 Integer = 0, 859 SSE, 860 SSEUp, 861 X87, 862 X87Up, 863 ComplexX87, 864 NoClass, 865 Memory 866 }; 867 868 /// merge - Implement the X86_64 ABI merging algorithm. 869 /// 870 /// Merge an accumulating classification \arg Accum with a field 871 /// classification \arg Field. 872 /// 873 /// \param Accum - The accumulating classification. This should 874 /// always be either NoClass or the result of a previous merge 875 /// call. In addition, this should never be Memory (the caller 876 /// should just return Memory for the aggregate). 877 static Class merge(Class Accum, Class Field); 878 879 /// postMerge - Implement the X86_64 ABI post merging algorithm. 880 /// 881 /// Post merger cleanup, reduces a malformed Hi and Lo pair to 882 /// final MEMORY or SSE classes when necessary. 883 /// 884 /// \param AggregateSize - The size of the current aggregate in 885 /// the classification process. 886 /// 887 /// \param Lo - The classification for the parts of the type 888 /// residing in the low word of the containing object. 889 /// 890 /// \param Hi - The classification for the parts of the type 891 /// residing in the higher words of the containing object. 892 /// 893 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const; 894 895 /// classify - Determine the x86_64 register classes in which the 896 /// given type T should be passed. 897 /// 898 /// \param Lo - The classification for the parts of the type 899 /// residing in the low word of the containing object. 900 /// 901 /// \param Hi - The classification for the parts of the type 902 /// residing in the high word of the containing object. 903 /// 904 /// \param OffsetBase - The bit offset of this type in the 905 /// containing object. Some parameters are classified different 906 /// depending on whether they straddle an eightbyte boundary. 907 /// 908 /// If a word is unused its result will be NoClass; if a type should 909 /// be passed in Memory then at least the classification of \arg Lo 910 /// will be Memory. 911 /// 912 /// The \arg Lo class will be NoClass iff the argument is ignored. 913 /// 914 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will 915 /// also be ComplexX87. 916 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi) const; 917 918 llvm::Type *GetByteVectorType(QualType Ty) const; 919 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType, 920 unsigned IROffset, QualType SourceTy, 921 unsigned SourceOffset) const; 922 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType, 923 unsigned IROffset, QualType SourceTy, 924 unsigned SourceOffset) const; 925 926 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 927 /// such that the argument will be returned in memory. 928 ABIArgInfo getIndirectReturnResult(QualType Ty) const; 929 930 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 931 /// such that the argument will be passed in memory. 932 /// 933 /// \param freeIntRegs - The number of free integer registers remaining 934 /// available. 935 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const; 936 937 ABIArgInfo classifyReturnType(QualType RetTy) const; 938 939 ABIArgInfo classifyArgumentType(QualType Ty, 940 unsigned freeIntRegs, 941 unsigned &neededInt, 942 unsigned &neededSSE) const; 943 944 bool IsIllegalVectorType(QualType Ty) const; 945 946 /// The 0.98 ABI revision clarified a lot of ambiguities, 947 /// unfortunately in ways that were not always consistent with 948 /// certain previous compilers. In particular, platforms which 949 /// required strict binary compatibility with older versions of GCC 950 /// may need to exempt themselves. 951 bool honorsRevision0_98() const { 952 return !getContext().getTargetInfo().getTriple().isOSDarwin(); 953 } 954 955 bool HasAVX; 956 957 public: 958 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool hasavx) : 959 ABIInfo(CGT), HasAVX(hasavx) {} 960 961 bool isPassedUsingAVXType(QualType type) const { 962 unsigned neededInt, neededSSE; 963 // The freeIntRegs argument doesn't matter here. 964 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE); 965 if (info.isDirect()) { 966 llvm::Type *ty = info.getCoerceToType(); 967 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty)) 968 return (vectorTy->getBitWidth() > 128); 969 } 970 return false; 971 } 972 973 virtual void computeInfo(CGFunctionInfo &FI) const; 974 975 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 976 CodeGenFunction &CGF) const; 977 }; 978 979 /// WinX86_64ABIInfo - The Windows X86_64 ABI information. 980 class WinX86_64ABIInfo : public ABIInfo { 981 982 ABIArgInfo classify(QualType Ty) const; 983 984 public: 985 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 986 987 virtual void computeInfo(CGFunctionInfo &FI) const; 988 989 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 990 CodeGenFunction &CGF) const; 991 }; 992 993 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo { 994 public: 995 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) 996 : TargetCodeGenInfo(new X86_64ABIInfo(CGT, HasAVX)) {} 997 998 const X86_64ABIInfo &getABIInfo() const { 999 return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo()); 1000 } 1001 1002 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 1003 return 7; 1004 } 1005 1006 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 1007 llvm::Value *Address) const { 1008 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 1009 1010 // 0-15 are the 16 integer registers. 1011 // 16 is %rip. 1012 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 1013 return false; 1014 } 1015 1016 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 1017 StringRef Constraint, 1018 llvm::Type* Ty) const { 1019 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 1020 } 1021 1022 bool isNoProtoCallVariadic(const CallArgList &args, 1023 const FunctionNoProtoType *fnType) const { 1024 // The default CC on x86-64 sets %al to the number of SSA 1025 // registers used, and GCC sets this when calling an unprototyped 1026 // function, so we override the default behavior. However, don't do 1027 // that when AVX types are involved: the ABI explicitly states it is 1028 // undefined, and it doesn't work in practice because of how the ABI 1029 // defines varargs anyway. 1030 if (fnType->getCallConv() == CC_Default || fnType->getCallConv() == CC_C) { 1031 bool HasAVXType = false; 1032 for (CallArgList::const_iterator 1033 it = args.begin(), ie = args.end(); it != ie; ++it) { 1034 if (getABIInfo().isPassedUsingAVXType(it->Ty)) { 1035 HasAVXType = true; 1036 break; 1037 } 1038 } 1039 1040 if (!HasAVXType) 1041 return true; 1042 } 1043 1044 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType); 1045 } 1046 1047 }; 1048 1049 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo { 1050 public: 1051 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 1052 : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {} 1053 1054 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 1055 return 7; 1056 } 1057 1058 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 1059 llvm::Value *Address) const { 1060 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 1061 1062 // 0-15 are the 16 integer registers. 1063 // 16 is %rip. 1064 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 1065 return false; 1066 } 1067 }; 1068 1069 } 1070 1071 void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo, 1072 Class &Hi) const { 1073 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done: 1074 // 1075 // (a) If one of the classes is Memory, the whole argument is passed in 1076 // memory. 1077 // 1078 // (b) If X87UP is not preceded by X87, the whole argument is passed in 1079 // memory. 1080 // 1081 // (c) If the size of the aggregate exceeds two eightbytes and the first 1082 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole 1083 // argument is passed in memory. NOTE: This is necessary to keep the 1084 // ABI working for processors that don't support the __m256 type. 1085 // 1086 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE. 1087 // 1088 // Some of these are enforced by the merging logic. Others can arise 1089 // only with unions; for example: 1090 // union { _Complex double; unsigned; } 1091 // 1092 // Note that clauses (b) and (c) were added in 0.98. 1093 // 1094 if (Hi == Memory) 1095 Lo = Memory; 1096 if (Hi == X87Up && Lo != X87 && honorsRevision0_98()) 1097 Lo = Memory; 1098 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp)) 1099 Lo = Memory; 1100 if (Hi == SSEUp && Lo != SSE) 1101 Hi = SSE; 1102 } 1103 1104 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) { 1105 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is 1106 // classified recursively so that always two fields are 1107 // considered. The resulting class is calculated according to 1108 // the classes of the fields in the eightbyte: 1109 // 1110 // (a) If both classes are equal, this is the resulting class. 1111 // 1112 // (b) If one of the classes is NO_CLASS, the resulting class is 1113 // the other class. 1114 // 1115 // (c) If one of the classes is MEMORY, the result is the MEMORY 1116 // class. 1117 // 1118 // (d) If one of the classes is INTEGER, the result is the 1119 // INTEGER. 1120 // 1121 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class, 1122 // MEMORY is used as class. 1123 // 1124 // (f) Otherwise class SSE is used. 1125 1126 // Accum should never be memory (we should have returned) or 1127 // ComplexX87 (because this cannot be passed in a structure). 1128 assert((Accum != Memory && Accum != ComplexX87) && 1129 "Invalid accumulated classification during merge."); 1130 if (Accum == Field || Field == NoClass) 1131 return Accum; 1132 if (Field == Memory) 1133 return Memory; 1134 if (Accum == NoClass) 1135 return Field; 1136 if (Accum == Integer || Field == Integer) 1137 return Integer; 1138 if (Field == X87 || Field == X87Up || Field == ComplexX87 || 1139 Accum == X87 || Accum == X87Up) 1140 return Memory; 1141 return SSE; 1142 } 1143 1144 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, 1145 Class &Lo, Class &Hi) const { 1146 // FIXME: This code can be simplified by introducing a simple value class for 1147 // Class pairs with appropriate constructor methods for the various 1148 // situations. 1149 1150 // FIXME: Some of the split computations are wrong; unaligned vectors 1151 // shouldn't be passed in registers for example, so there is no chance they 1152 // can straddle an eightbyte. Verify & simplify. 1153 1154 Lo = Hi = NoClass; 1155 1156 Class &Current = OffsetBase < 64 ? Lo : Hi; 1157 Current = Memory; 1158 1159 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 1160 BuiltinType::Kind k = BT->getKind(); 1161 1162 if (k == BuiltinType::Void) { 1163 Current = NoClass; 1164 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) { 1165 Lo = Integer; 1166 Hi = Integer; 1167 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) { 1168 Current = Integer; 1169 } else if (k == BuiltinType::Float || k == BuiltinType::Double) { 1170 Current = SSE; 1171 } else if (k == BuiltinType::LongDouble) { 1172 Lo = X87; 1173 Hi = X87Up; 1174 } 1175 // FIXME: _Decimal32 and _Decimal64 are SSE. 1176 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). 1177 return; 1178 } 1179 1180 if (const EnumType *ET = Ty->getAs<EnumType>()) { 1181 // Classify the underlying integer type. 1182 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi); 1183 return; 1184 } 1185 1186 if (Ty->hasPointerRepresentation()) { 1187 Current = Integer; 1188 return; 1189 } 1190 1191 if (Ty->isMemberPointerType()) { 1192 if (Ty->isMemberFunctionPointerType()) 1193 Lo = Hi = Integer; 1194 else 1195 Current = Integer; 1196 return; 1197 } 1198 1199 if (const VectorType *VT = Ty->getAs<VectorType>()) { 1200 uint64_t Size = getContext().getTypeSize(VT); 1201 if (Size == 32) { 1202 // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x 1203 // float> as integer. 1204 Current = Integer; 1205 1206 // If this type crosses an eightbyte boundary, it should be 1207 // split. 1208 uint64_t EB_Real = (OffsetBase) / 64; 1209 uint64_t EB_Imag = (OffsetBase + Size - 1) / 64; 1210 if (EB_Real != EB_Imag) 1211 Hi = Lo; 1212 } else if (Size == 64) { 1213 // gcc passes <1 x double> in memory. :( 1214 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) 1215 return; 1216 1217 // gcc passes <1 x long long> as INTEGER. 1218 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong) || 1219 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULongLong) || 1220 VT->getElementType()->isSpecificBuiltinType(BuiltinType::Long) || 1221 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULong)) 1222 Current = Integer; 1223 else 1224 Current = SSE; 1225 1226 // If this type crosses an eightbyte boundary, it should be 1227 // split. 1228 if (OffsetBase && OffsetBase != 64) 1229 Hi = Lo; 1230 } else if (Size == 128 || (HasAVX && Size == 256)) { 1231 // Arguments of 256-bits are split into four eightbyte chunks. The 1232 // least significant one belongs to class SSE and all the others to class 1233 // SSEUP. The original Lo and Hi design considers that types can't be 1234 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense. 1235 // This design isn't correct for 256-bits, but since there're no cases 1236 // where the upper parts would need to be inspected, avoid adding 1237 // complexity and just consider Hi to match the 64-256 part. 1238 Lo = SSE; 1239 Hi = SSEUp; 1240 } 1241 return; 1242 } 1243 1244 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 1245 QualType ET = getContext().getCanonicalType(CT->getElementType()); 1246 1247 uint64_t Size = getContext().getTypeSize(Ty); 1248 if (ET->isIntegralOrEnumerationType()) { 1249 if (Size <= 64) 1250 Current = Integer; 1251 else if (Size <= 128) 1252 Lo = Hi = Integer; 1253 } else if (ET == getContext().FloatTy) 1254 Current = SSE; 1255 else if (ET == getContext().DoubleTy) 1256 Lo = Hi = SSE; 1257 else if (ET == getContext().LongDoubleTy) 1258 Current = ComplexX87; 1259 1260 // If this complex type crosses an eightbyte boundary then it 1261 // should be split. 1262 uint64_t EB_Real = (OffsetBase) / 64; 1263 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64; 1264 if (Hi == NoClass && EB_Real != EB_Imag) 1265 Hi = Lo; 1266 1267 return; 1268 } 1269 1270 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 1271 // Arrays are treated like structures. 1272 1273 uint64_t Size = getContext().getTypeSize(Ty); 1274 1275 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 1276 // than four eightbytes, ..., it has class MEMORY. 1277 if (Size > 256) 1278 return; 1279 1280 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned 1281 // fields, it has class MEMORY. 1282 // 1283 // Only need to check alignment of array base. 1284 if (OffsetBase % getContext().getTypeAlign(AT->getElementType())) 1285 return; 1286 1287 // Otherwise implement simplified merge. We could be smarter about 1288 // this, but it isn't worth it and would be harder to verify. 1289 Current = NoClass; 1290 uint64_t EltSize = getContext().getTypeSize(AT->getElementType()); 1291 uint64_t ArraySize = AT->getSize().getZExtValue(); 1292 1293 // The only case a 256-bit wide vector could be used is when the array 1294 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 1295 // to work for sizes wider than 128, early check and fallback to memory. 1296 if (Size > 128 && EltSize != 256) 1297 return; 1298 1299 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) { 1300 Class FieldLo, FieldHi; 1301 classify(AT->getElementType(), Offset, FieldLo, FieldHi); 1302 Lo = merge(Lo, FieldLo); 1303 Hi = merge(Hi, FieldHi); 1304 if (Lo == Memory || Hi == Memory) 1305 break; 1306 } 1307 1308 postMerge(Size, Lo, Hi); 1309 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification."); 1310 return; 1311 } 1312 1313 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1314 uint64_t Size = getContext().getTypeSize(Ty); 1315 1316 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 1317 // than four eightbytes, ..., it has class MEMORY. 1318 if (Size > 256) 1319 return; 1320 1321 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial 1322 // copy constructor or a non-trivial destructor, it is passed by invisible 1323 // reference. 1324 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 1325 return; 1326 1327 const RecordDecl *RD = RT->getDecl(); 1328 1329 // Assume variable sized types are passed in memory. 1330 if (RD->hasFlexibleArrayMember()) 1331 return; 1332 1333 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 1334 1335 // Reset Lo class, this will be recomputed. 1336 Current = NoClass; 1337 1338 // If this is a C++ record, classify the bases first. 1339 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 1340 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 1341 e = CXXRD->bases_end(); i != e; ++i) { 1342 assert(!i->isVirtual() && !i->getType()->isDependentType() && 1343 "Unexpected base class!"); 1344 const CXXRecordDecl *Base = 1345 cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); 1346 1347 // Classify this field. 1348 // 1349 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a 1350 // single eightbyte, each is classified separately. Each eightbyte gets 1351 // initialized to class NO_CLASS. 1352 Class FieldLo, FieldHi; 1353 uint64_t Offset = 1354 OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base)); 1355 classify(i->getType(), Offset, FieldLo, FieldHi); 1356 Lo = merge(Lo, FieldLo); 1357 Hi = merge(Hi, FieldHi); 1358 if (Lo == Memory || Hi == Memory) 1359 break; 1360 } 1361 } 1362 1363 // Classify the fields one at a time, merging the results. 1364 unsigned idx = 0; 1365 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1366 i != e; ++i, ++idx) { 1367 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 1368 bool BitField = i->isBitField(); 1369 1370 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than 1371 // four eightbytes, or it contains unaligned fields, it has class MEMORY. 1372 // 1373 // The only case a 256-bit wide vector could be used is when the struct 1374 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 1375 // to work for sizes wider than 128, early check and fallback to memory. 1376 // 1377 if (Size > 128 && getContext().getTypeSize(i->getType()) != 256) { 1378 Lo = Memory; 1379 return; 1380 } 1381 // Note, skip this test for bit-fields, see below. 1382 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) { 1383 Lo = Memory; 1384 return; 1385 } 1386 1387 // Classify this field. 1388 // 1389 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate 1390 // exceeds a single eightbyte, each is classified 1391 // separately. Each eightbyte gets initialized to class 1392 // NO_CLASS. 1393 Class FieldLo, FieldHi; 1394 1395 // Bit-fields require special handling, they do not force the 1396 // structure to be passed in memory even if unaligned, and 1397 // therefore they can straddle an eightbyte. 1398 if (BitField) { 1399 // Ignore padding bit-fields. 1400 if (i->isUnnamedBitfield()) 1401 continue; 1402 1403 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 1404 uint64_t Size = i->getBitWidthValue(getContext()); 1405 1406 uint64_t EB_Lo = Offset / 64; 1407 uint64_t EB_Hi = (Offset + Size - 1) / 64; 1408 FieldLo = FieldHi = NoClass; 1409 if (EB_Lo) { 1410 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes."); 1411 FieldLo = NoClass; 1412 FieldHi = Integer; 1413 } else { 1414 FieldLo = Integer; 1415 FieldHi = EB_Hi ? Integer : NoClass; 1416 } 1417 } else 1418 classify(i->getType(), Offset, FieldLo, FieldHi); 1419 Lo = merge(Lo, FieldLo); 1420 Hi = merge(Hi, FieldHi); 1421 if (Lo == Memory || Hi == Memory) 1422 break; 1423 } 1424 1425 postMerge(Size, Lo, Hi); 1426 } 1427 } 1428 1429 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const { 1430 // If this is a scalar LLVM value then assume LLVM will pass it in the right 1431 // place naturally. 1432 if (!isAggregateTypeForABI(Ty)) { 1433 // Treat an enum type as its underlying type. 1434 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1435 Ty = EnumTy->getDecl()->getIntegerType(); 1436 1437 return (Ty->isPromotableIntegerType() ? 1438 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 1439 } 1440 1441 return ABIArgInfo::getIndirect(0); 1442 } 1443 1444 bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const { 1445 if (const VectorType *VecTy = Ty->getAs<VectorType>()) { 1446 uint64_t Size = getContext().getTypeSize(VecTy); 1447 unsigned LargestVector = HasAVX ? 256 : 128; 1448 if (Size <= 64 || Size > LargestVector) 1449 return true; 1450 } 1451 1452 return false; 1453 } 1454 1455 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty, 1456 unsigned freeIntRegs) const { 1457 // If this is a scalar LLVM value then assume LLVM will pass it in the right 1458 // place naturally. 1459 // 1460 // This assumption is optimistic, as there could be free registers available 1461 // when we need to pass this argument in memory, and LLVM could try to pass 1462 // the argument in the free register. This does not seem to happen currently, 1463 // but this code would be much safer if we could mark the argument with 1464 // 'onstack'. See PR12193. 1465 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) { 1466 // Treat an enum type as its underlying type. 1467 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1468 Ty = EnumTy->getDecl()->getIntegerType(); 1469 1470 return (Ty->isPromotableIntegerType() ? 1471 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 1472 } 1473 1474 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 1475 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 1476 1477 // Compute the byval alignment. We specify the alignment of the byval in all 1478 // cases so that the mid-level optimizer knows the alignment of the byval. 1479 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U); 1480 1481 // Attempt to avoid passing indirect results using byval when possible. This 1482 // is important for good codegen. 1483 // 1484 // We do this by coercing the value into a scalar type which the backend can 1485 // handle naturally (i.e., without using byval). 1486 // 1487 // For simplicity, we currently only do this when we have exhausted all of the 1488 // free integer registers. Doing this when there are free integer registers 1489 // would require more care, as we would have to ensure that the coerced value 1490 // did not claim the unused register. That would require either reording the 1491 // arguments to the function (so that any subsequent inreg values came first), 1492 // or only doing this optimization when there were no following arguments that 1493 // might be inreg. 1494 // 1495 // We currently expect it to be rare (particularly in well written code) for 1496 // arguments to be passed on the stack when there are still free integer 1497 // registers available (this would typically imply large structs being passed 1498 // by value), so this seems like a fair tradeoff for now. 1499 // 1500 // We can revisit this if the backend grows support for 'onstack' parameter 1501 // attributes. See PR12193. 1502 if (freeIntRegs == 0) { 1503 uint64_t Size = getContext().getTypeSize(Ty); 1504 1505 // If this type fits in an eightbyte, coerce it into the matching integral 1506 // type, which will end up on the stack (with alignment 8). 1507 if (Align == 8 && Size <= 64) 1508 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 1509 Size)); 1510 } 1511 1512 return ABIArgInfo::getIndirect(Align); 1513 } 1514 1515 /// GetByteVectorType - The ABI specifies that a value should be passed in an 1516 /// full vector XMM/YMM register. Pick an LLVM IR type that will be passed as a 1517 /// vector register. 1518 llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const { 1519 llvm::Type *IRType = CGT.ConvertType(Ty); 1520 1521 // Wrapper structs that just contain vectors are passed just like vectors, 1522 // strip them off if present. 1523 llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType); 1524 while (STy && STy->getNumElements() == 1) { 1525 IRType = STy->getElementType(0); 1526 STy = dyn_cast<llvm::StructType>(IRType); 1527 } 1528 1529 // If the preferred type is a 16-byte vector, prefer to pass it. 1530 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){ 1531 llvm::Type *EltTy = VT->getElementType(); 1532 unsigned BitWidth = VT->getBitWidth(); 1533 if ((BitWidth >= 128 && BitWidth <= 256) && 1534 (EltTy->isFloatTy() || EltTy->isDoubleTy() || 1535 EltTy->isIntegerTy(8) || EltTy->isIntegerTy(16) || 1536 EltTy->isIntegerTy(32) || EltTy->isIntegerTy(64) || 1537 EltTy->isIntegerTy(128))) 1538 return VT; 1539 } 1540 1541 return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), 2); 1542 } 1543 1544 /// BitsContainNoUserData - Return true if the specified [start,end) bit range 1545 /// is known to either be off the end of the specified type or being in 1546 /// alignment padding. The user type specified is known to be at most 128 bits 1547 /// in size, and have passed through X86_64ABIInfo::classify with a successful 1548 /// classification that put one of the two halves in the INTEGER class. 1549 /// 1550 /// It is conservatively correct to return false. 1551 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, 1552 unsigned EndBit, ASTContext &Context) { 1553 // If the bytes being queried are off the end of the type, there is no user 1554 // data hiding here. This handles analysis of builtins, vectors and other 1555 // types that don't contain interesting padding. 1556 unsigned TySize = (unsigned)Context.getTypeSize(Ty); 1557 if (TySize <= StartBit) 1558 return true; 1559 1560 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 1561 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType()); 1562 unsigned NumElts = (unsigned)AT->getSize().getZExtValue(); 1563 1564 // Check each element to see if the element overlaps with the queried range. 1565 for (unsigned i = 0; i != NumElts; ++i) { 1566 // If the element is after the span we care about, then we're done.. 1567 unsigned EltOffset = i*EltSize; 1568 if (EltOffset >= EndBit) break; 1569 1570 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0; 1571 if (!BitsContainNoUserData(AT->getElementType(), EltStart, 1572 EndBit-EltOffset, Context)) 1573 return false; 1574 } 1575 // If it overlaps no elements, then it is safe to process as padding. 1576 return true; 1577 } 1578 1579 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1580 const RecordDecl *RD = RT->getDecl(); 1581 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 1582 1583 // If this is a C++ record, check the bases first. 1584 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 1585 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 1586 e = CXXRD->bases_end(); i != e; ++i) { 1587 assert(!i->isVirtual() && !i->getType()->isDependentType() && 1588 "Unexpected base class!"); 1589 const CXXRecordDecl *Base = 1590 cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); 1591 1592 // If the base is after the span we care about, ignore it. 1593 unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base)); 1594 if (BaseOffset >= EndBit) continue; 1595 1596 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0; 1597 if (!BitsContainNoUserData(i->getType(), BaseStart, 1598 EndBit-BaseOffset, Context)) 1599 return false; 1600 } 1601 } 1602 1603 // Verify that no field has data that overlaps the region of interest. Yes 1604 // this could be sped up a lot by being smarter about queried fields, 1605 // however we're only looking at structs up to 16 bytes, so we don't care 1606 // much. 1607 unsigned idx = 0; 1608 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1609 i != e; ++i, ++idx) { 1610 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx); 1611 1612 // If we found a field after the region we care about, then we're done. 1613 if (FieldOffset >= EndBit) break; 1614 1615 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0; 1616 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset, 1617 Context)) 1618 return false; 1619 } 1620 1621 // If nothing in this record overlapped the area of interest, then we're 1622 // clean. 1623 return true; 1624 } 1625 1626 return false; 1627 } 1628 1629 /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a 1630 /// float member at the specified offset. For example, {int,{float}} has a 1631 /// float at offset 4. It is conservatively correct for this routine to return 1632 /// false. 1633 static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset, 1634 const llvm::TargetData &TD) { 1635 // Base case if we find a float. 1636 if (IROffset == 0 && IRType->isFloatTy()) 1637 return true; 1638 1639 // If this is a struct, recurse into the field at the specified offset. 1640 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 1641 const llvm::StructLayout *SL = TD.getStructLayout(STy); 1642 unsigned Elt = SL->getElementContainingOffset(IROffset); 1643 IROffset -= SL->getElementOffset(Elt); 1644 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD); 1645 } 1646 1647 // If this is an array, recurse into the field at the specified offset. 1648 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 1649 llvm::Type *EltTy = ATy->getElementType(); 1650 unsigned EltSize = TD.getTypeAllocSize(EltTy); 1651 IROffset -= IROffset/EltSize*EltSize; 1652 return ContainsFloatAtOffset(EltTy, IROffset, TD); 1653 } 1654 1655 return false; 1656 } 1657 1658 1659 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the 1660 /// low 8 bytes of an XMM register, corresponding to the SSE class. 1661 llvm::Type *X86_64ABIInfo:: 1662 GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset, 1663 QualType SourceTy, unsigned SourceOffset) const { 1664 // The only three choices we have are either double, <2 x float>, or float. We 1665 // pass as float if the last 4 bytes is just padding. This happens for 1666 // structs that contain 3 floats. 1667 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32, 1668 SourceOffset*8+64, getContext())) 1669 return llvm::Type::getFloatTy(getVMContext()); 1670 1671 // We want to pass as <2 x float> if the LLVM IR type contains a float at 1672 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the 1673 // case. 1674 if (ContainsFloatAtOffset(IRType, IROffset, getTargetData()) && 1675 ContainsFloatAtOffset(IRType, IROffset+4, getTargetData())) 1676 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2); 1677 1678 return llvm::Type::getDoubleTy(getVMContext()); 1679 } 1680 1681 1682 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in 1683 /// an 8-byte GPR. This means that we either have a scalar or we are talking 1684 /// about the high or low part of an up-to-16-byte struct. This routine picks 1685 /// the best LLVM IR type to represent this, which may be i64 or may be anything 1686 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*, 1687 /// etc). 1688 /// 1689 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for 1690 /// the source type. IROffset is an offset in bytes into the LLVM IR type that 1691 /// the 8-byte value references. PrefType may be null. 1692 /// 1693 /// SourceTy is the source level type for the entire argument. SourceOffset is 1694 /// an offset into this that we're processing (which is always either 0 or 8). 1695 /// 1696 llvm::Type *X86_64ABIInfo:: 1697 GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset, 1698 QualType SourceTy, unsigned SourceOffset) const { 1699 // If we're dealing with an un-offset LLVM IR type, then it means that we're 1700 // returning an 8-byte unit starting with it. See if we can safely use it. 1701 if (IROffset == 0) { 1702 // Pointers and int64's always fill the 8-byte unit. 1703 if (isa<llvm::PointerType>(IRType) || IRType->isIntegerTy(64)) 1704 return IRType; 1705 1706 // If we have a 1/2/4-byte integer, we can use it only if the rest of the 1707 // goodness in the source type is just tail padding. This is allowed to 1708 // kick in for struct {double,int} on the int, but not on 1709 // struct{double,int,int} because we wouldn't return the second int. We 1710 // have to do this analysis on the source type because we can't depend on 1711 // unions being lowered a specific way etc. 1712 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) || 1713 IRType->isIntegerTy(32)) { 1714 unsigned BitWidth = cast<llvm::IntegerType>(IRType)->getBitWidth(); 1715 1716 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth, 1717 SourceOffset*8+64, getContext())) 1718 return IRType; 1719 } 1720 } 1721 1722 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 1723 // If this is a struct, recurse into the field at the specified offset. 1724 const llvm::StructLayout *SL = getTargetData().getStructLayout(STy); 1725 if (IROffset < SL->getSizeInBytes()) { 1726 unsigned FieldIdx = SL->getElementContainingOffset(IROffset); 1727 IROffset -= SL->getElementOffset(FieldIdx); 1728 1729 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset, 1730 SourceTy, SourceOffset); 1731 } 1732 } 1733 1734 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 1735 llvm::Type *EltTy = ATy->getElementType(); 1736 unsigned EltSize = getTargetData().getTypeAllocSize(EltTy); 1737 unsigned EltOffset = IROffset/EltSize*EltSize; 1738 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy, 1739 SourceOffset); 1740 } 1741 1742 // Okay, we don't have any better idea of what to pass, so we pass this in an 1743 // integer register that isn't too big to fit the rest of the struct. 1744 unsigned TySizeInBytes = 1745 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity(); 1746 1747 assert(TySizeInBytes != SourceOffset && "Empty field?"); 1748 1749 // It is always safe to classify this as an integer type up to i64 that 1750 // isn't larger than the structure. 1751 return llvm::IntegerType::get(getVMContext(), 1752 std::min(TySizeInBytes-SourceOffset, 8U)*8); 1753 } 1754 1755 1756 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally 1757 /// be used as elements of a two register pair to pass or return, return a 1758 /// first class aggregate to represent them. For example, if the low part of 1759 /// a by-value argument should be passed as i32* and the high part as float, 1760 /// return {i32*, float}. 1761 static llvm::Type * 1762 GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, 1763 const llvm::TargetData &TD) { 1764 // In order to correctly satisfy the ABI, we need to the high part to start 1765 // at offset 8. If the high and low parts we inferred are both 4-byte types 1766 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have 1767 // the second element at offset 8. Check for this: 1768 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo); 1769 unsigned HiAlign = TD.getABITypeAlignment(Hi); 1770 unsigned HiStart = llvm::TargetData::RoundUpAlignment(LoSize, HiAlign); 1771 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!"); 1772 1773 // To handle this, we have to increase the size of the low part so that the 1774 // second element will start at an 8 byte offset. We can't increase the size 1775 // of the second element because it might make us access off the end of the 1776 // struct. 1777 if (HiStart != 8) { 1778 // There are only two sorts of types the ABI generation code can produce for 1779 // the low part of a pair that aren't 8 bytes in size: float or i8/i16/i32. 1780 // Promote these to a larger type. 1781 if (Lo->isFloatTy()) 1782 Lo = llvm::Type::getDoubleTy(Lo->getContext()); 1783 else { 1784 assert(Lo->isIntegerTy() && "Invalid/unknown lo type"); 1785 Lo = llvm::Type::getInt64Ty(Lo->getContext()); 1786 } 1787 } 1788 1789 llvm::StructType *Result = llvm::StructType::get(Lo, Hi, NULL); 1790 1791 1792 // Verify that the second element is at an 8-byte offset. 1793 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 && 1794 "Invalid x86-64 argument pair!"); 1795 return Result; 1796 } 1797 1798 ABIArgInfo X86_64ABIInfo:: 1799 classifyReturnType(QualType RetTy) const { 1800 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the 1801 // classification algorithm. 1802 X86_64ABIInfo::Class Lo, Hi; 1803 classify(RetTy, 0, Lo, Hi); 1804 1805 // Check some invariants. 1806 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 1807 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 1808 1809 llvm::Type *ResType = 0; 1810 switch (Lo) { 1811 case NoClass: 1812 if (Hi == NoClass) 1813 return ABIArgInfo::getIgnore(); 1814 // If the low part is just padding, it takes no register, leave ResType 1815 // null. 1816 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 1817 "Unknown missing lo part"); 1818 break; 1819 1820 case SSEUp: 1821 case X87Up: 1822 llvm_unreachable("Invalid classification for lo word."); 1823 1824 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via 1825 // hidden argument. 1826 case Memory: 1827 return getIndirectReturnResult(RetTy); 1828 1829 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next 1830 // available register of the sequence %rax, %rdx is used. 1831 case Integer: 1832 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 1833 1834 // If we have a sign or zero extended integer, make sure to return Extend 1835 // so that the parameter gets the right LLVM IR attributes. 1836 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 1837 // Treat an enum type as its underlying type. 1838 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 1839 RetTy = EnumTy->getDecl()->getIntegerType(); 1840 1841 if (RetTy->isIntegralOrEnumerationType() && 1842 RetTy->isPromotableIntegerType()) 1843 return ABIArgInfo::getExtend(); 1844 } 1845 break; 1846 1847 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next 1848 // available SSE register of the sequence %xmm0, %xmm1 is used. 1849 case SSE: 1850 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 1851 break; 1852 1853 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is 1854 // returned on the X87 stack in %st0 as 80-bit x87 number. 1855 case X87: 1856 ResType = llvm::Type::getX86_FP80Ty(getVMContext()); 1857 break; 1858 1859 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real 1860 // part of the value is returned in %st0 and the imaginary part in 1861 // %st1. 1862 case ComplexX87: 1863 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification."); 1864 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()), 1865 llvm::Type::getX86_FP80Ty(getVMContext()), 1866 NULL); 1867 break; 1868 } 1869 1870 llvm::Type *HighPart = 0; 1871 switch (Hi) { 1872 // Memory was handled previously and X87 should 1873 // never occur as a hi class. 1874 case Memory: 1875 case X87: 1876 llvm_unreachable("Invalid classification for hi word."); 1877 1878 case ComplexX87: // Previously handled. 1879 case NoClass: 1880 break; 1881 1882 case Integer: 1883 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 1884 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 1885 return ABIArgInfo::getDirect(HighPart, 8); 1886 break; 1887 case SSE: 1888 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 1889 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 1890 return ABIArgInfo::getDirect(HighPart, 8); 1891 break; 1892 1893 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte 1894 // is passed in the next available eightbyte chunk if the last used 1895 // vector register. 1896 // 1897 // SSEUP should always be preceded by SSE, just widen. 1898 case SSEUp: 1899 assert(Lo == SSE && "Unexpected SSEUp classification."); 1900 ResType = GetByteVectorType(RetTy); 1901 break; 1902 1903 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is 1904 // returned together with the previous X87 value in %st0. 1905 case X87Up: 1906 // If X87Up is preceded by X87, we don't need to do 1907 // anything. However, in some cases with unions it may not be 1908 // preceded by X87. In such situations we follow gcc and pass the 1909 // extra bits in an SSE reg. 1910 if (Lo != X87) { 1911 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 1912 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 1913 return ABIArgInfo::getDirect(HighPart, 8); 1914 } 1915 break; 1916 } 1917 1918 // If a high part was specified, merge it together with the low part. It is 1919 // known to pass in the high eightbyte of the result. We do this by forming a 1920 // first class struct aggregate with the high and low part: {low, high} 1921 if (HighPart) 1922 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getTargetData()); 1923 1924 return ABIArgInfo::getDirect(ResType); 1925 } 1926 1927 ABIArgInfo X86_64ABIInfo::classifyArgumentType( 1928 QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE) 1929 const 1930 { 1931 X86_64ABIInfo::Class Lo, Hi; 1932 classify(Ty, 0, Lo, Hi); 1933 1934 // Check some invariants. 1935 // FIXME: Enforce these by construction. 1936 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 1937 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 1938 1939 neededInt = 0; 1940 neededSSE = 0; 1941 llvm::Type *ResType = 0; 1942 switch (Lo) { 1943 case NoClass: 1944 if (Hi == NoClass) 1945 return ABIArgInfo::getIgnore(); 1946 // If the low part is just padding, it takes no register, leave ResType 1947 // null. 1948 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 1949 "Unknown missing lo part"); 1950 break; 1951 1952 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument 1953 // on the stack. 1954 case Memory: 1955 1956 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or 1957 // COMPLEX_X87, it is passed in memory. 1958 case X87: 1959 case ComplexX87: 1960 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 1961 ++neededInt; 1962 return getIndirectResult(Ty, freeIntRegs); 1963 1964 case SSEUp: 1965 case X87Up: 1966 llvm_unreachable("Invalid classification for lo word."); 1967 1968 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next 1969 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8 1970 // and %r9 is used. 1971 case Integer: 1972 ++neededInt; 1973 1974 // Pick an 8-byte type based on the preferred type. 1975 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0); 1976 1977 // If we have a sign or zero extended integer, make sure to return Extend 1978 // so that the parameter gets the right LLVM IR attributes. 1979 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 1980 // Treat an enum type as its underlying type. 1981 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1982 Ty = EnumTy->getDecl()->getIntegerType(); 1983 1984 if (Ty->isIntegralOrEnumerationType() && 1985 Ty->isPromotableIntegerType()) 1986 return ABIArgInfo::getExtend(); 1987 } 1988 1989 break; 1990 1991 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next 1992 // available SSE register is used, the registers are taken in the 1993 // order from %xmm0 to %xmm7. 1994 case SSE: { 1995 llvm::Type *IRType = CGT.ConvertType(Ty); 1996 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0); 1997 ++neededSSE; 1998 break; 1999 } 2000 } 2001 2002 llvm::Type *HighPart = 0; 2003 switch (Hi) { 2004 // Memory was handled previously, ComplexX87 and X87 should 2005 // never occur as hi classes, and X87Up must be preceded by X87, 2006 // which is passed in memory. 2007 case Memory: 2008 case X87: 2009 case ComplexX87: 2010 llvm_unreachable("Invalid classification for hi word."); 2011 2012 case NoClass: break; 2013 2014 case Integer: 2015 ++neededInt; 2016 // Pick an 8-byte type based on the preferred type. 2017 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 2018 2019 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 2020 return ABIArgInfo::getDirect(HighPart, 8); 2021 break; 2022 2023 // X87Up generally doesn't occur here (long double is passed in 2024 // memory), except in situations involving unions. 2025 case X87Up: 2026 case SSE: 2027 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 2028 2029 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 2030 return ABIArgInfo::getDirect(HighPart, 8); 2031 2032 ++neededSSE; 2033 break; 2034 2035 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the 2036 // eightbyte is passed in the upper half of the last used SSE 2037 // register. This only happens when 128-bit vectors are passed. 2038 case SSEUp: 2039 assert(Lo == SSE && "Unexpected SSEUp classification"); 2040 ResType = GetByteVectorType(Ty); 2041 break; 2042 } 2043 2044 // If a high part was specified, merge it together with the low part. It is 2045 // known to pass in the high eightbyte of the result. We do this by forming a 2046 // first class struct aggregate with the high and low part: {low, high} 2047 if (HighPart) 2048 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getTargetData()); 2049 2050 return ABIArgInfo::getDirect(ResType); 2051 } 2052 2053 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 2054 2055 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2056 2057 // Keep track of the number of assigned registers. 2058 unsigned freeIntRegs = 6, freeSSERegs = 8; 2059 2060 // If the return value is indirect, then the hidden argument is consuming one 2061 // integer register. 2062 if (FI.getReturnInfo().isIndirect()) 2063 --freeIntRegs; 2064 2065 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers 2066 // get assigned (in left-to-right order) for passing as follows... 2067 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2068 it != ie; ++it) { 2069 unsigned neededInt, neededSSE; 2070 it->info = classifyArgumentType(it->type, freeIntRegs, neededInt, 2071 neededSSE); 2072 2073 // AMD64-ABI 3.2.3p3: If there are no registers available for any 2074 // eightbyte of an argument, the whole argument is passed on the 2075 // stack. If registers have already been assigned for some 2076 // eightbytes of such an argument, the assignments get reverted. 2077 if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) { 2078 freeIntRegs -= neededInt; 2079 freeSSERegs -= neededSSE; 2080 } else { 2081 it->info = getIndirectResult(it->type, freeIntRegs); 2082 } 2083 } 2084 } 2085 2086 static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr, 2087 QualType Ty, 2088 CodeGenFunction &CGF) { 2089 llvm::Value *overflow_arg_area_p = 2090 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p"); 2091 llvm::Value *overflow_arg_area = 2092 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area"); 2093 2094 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16 2095 // byte boundary if alignment needed by type exceeds 8 byte boundary. 2096 // It isn't stated explicitly in the standard, but in practice we use 2097 // alignment greater than 16 where necessary. 2098 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8; 2099 if (Align > 8) { 2100 // overflow_arg_area = (overflow_arg_area + align - 1) & -align; 2101 llvm::Value *Offset = 2102 llvm::ConstantInt::get(CGF.Int64Ty, Align - 1); 2103 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset); 2104 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area, 2105 CGF.Int64Ty); 2106 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, -(uint64_t)Align); 2107 overflow_arg_area = 2108 CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), 2109 overflow_arg_area->getType(), 2110 "overflow_arg_area.align"); 2111 } 2112 2113 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area. 2114 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 2115 llvm::Value *Res = 2116 CGF.Builder.CreateBitCast(overflow_arg_area, 2117 llvm::PointerType::getUnqual(LTy)); 2118 2119 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to: 2120 // l->overflow_arg_area + sizeof(type). 2121 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to 2122 // an 8 byte boundary. 2123 2124 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8; 2125 llvm::Value *Offset = 2126 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7); 2127 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset, 2128 "overflow_arg_area.next"); 2129 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p); 2130 2131 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type. 2132 return Res; 2133 } 2134 2135 llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2136 CodeGenFunction &CGF) const { 2137 // Assume that va_list type is correct; should be pointer to LLVM type: 2138 // struct { 2139 // i32 gp_offset; 2140 // i32 fp_offset; 2141 // i8* overflow_arg_area; 2142 // i8* reg_save_area; 2143 // }; 2144 unsigned neededInt, neededSSE; 2145 2146 Ty = CGF.getContext().getCanonicalType(Ty); 2147 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE); 2148 2149 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed 2150 // in the registers. If not go to step 7. 2151 if (!neededInt && !neededSSE) 2152 return EmitVAArgFromMemory(VAListAddr, Ty, CGF); 2153 2154 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of 2155 // general purpose registers needed to pass type and num_fp to hold 2156 // the number of floating point registers needed. 2157 2158 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into 2159 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or 2160 // l->fp_offset > 304 - num_fp * 16 go to step 7. 2161 // 2162 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of 2163 // register save space). 2164 2165 llvm::Value *InRegs = 0; 2166 llvm::Value *gp_offset_p = 0, *gp_offset = 0; 2167 llvm::Value *fp_offset_p = 0, *fp_offset = 0; 2168 if (neededInt) { 2169 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p"); 2170 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset"); 2171 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8); 2172 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp"); 2173 } 2174 2175 if (neededSSE) { 2176 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p"); 2177 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset"); 2178 llvm::Value *FitsInFP = 2179 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16); 2180 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp"); 2181 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP; 2182 } 2183 2184 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 2185 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); 2186 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 2187 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); 2188 2189 // Emit code to load the value if it was passed in registers. 2190 2191 CGF.EmitBlock(InRegBlock); 2192 2193 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with 2194 // an offset of l->gp_offset and/or l->fp_offset. This may require 2195 // copying to a temporary location in case the parameter is passed 2196 // in different register classes or requires an alignment greater 2197 // than 8 for general purpose registers and 16 for XMM registers. 2198 // 2199 // FIXME: This really results in shameful code when we end up needing to 2200 // collect arguments from different places; often what should result in a 2201 // simple assembling of a structure from scattered addresses has many more 2202 // loads than necessary. Can we clean this up? 2203 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 2204 llvm::Value *RegAddr = 2205 CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3), 2206 "reg_save_area"); 2207 if (neededInt && neededSSE) { 2208 // FIXME: Cleanup. 2209 assert(AI.isDirect() && "Unexpected ABI info for mixed regs"); 2210 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType()); 2211 llvm::Value *Tmp = CGF.CreateTempAlloca(ST); 2212 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs"); 2213 llvm::Type *TyLo = ST->getElementType(0); 2214 llvm::Type *TyHi = ST->getElementType(1); 2215 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) && 2216 "Unexpected ABI info for mixed regs"); 2217 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo); 2218 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi); 2219 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 2220 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2221 llvm::Value *RegLoAddr = TyLo->isFloatingPointTy() ? FPAddr : GPAddr; 2222 llvm::Value *RegHiAddr = TyLo->isFloatingPointTy() ? GPAddr : FPAddr; 2223 llvm::Value *V = 2224 CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo)); 2225 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 2226 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi)); 2227 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 2228 2229 RegAddr = CGF.Builder.CreateBitCast(Tmp, 2230 llvm::PointerType::getUnqual(LTy)); 2231 } else if (neededInt) { 2232 RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 2233 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 2234 llvm::PointerType::getUnqual(LTy)); 2235 } else if (neededSSE == 1) { 2236 RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2237 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 2238 llvm::PointerType::getUnqual(LTy)); 2239 } else { 2240 assert(neededSSE == 2 && "Invalid number of needed registers!"); 2241 // SSE registers are spaced 16 bytes apart in the register save 2242 // area, we need to collect the two eightbytes together. 2243 llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2244 llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16); 2245 llvm::Type *DoubleTy = CGF.DoubleTy; 2246 llvm::Type *DblPtrTy = 2247 llvm::PointerType::getUnqual(DoubleTy); 2248 llvm::StructType *ST = llvm::StructType::get(DoubleTy, 2249 DoubleTy, NULL); 2250 llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST); 2251 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo, 2252 DblPtrTy)); 2253 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 2254 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi, 2255 DblPtrTy)); 2256 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 2257 RegAddr = CGF.Builder.CreateBitCast(Tmp, 2258 llvm::PointerType::getUnqual(LTy)); 2259 } 2260 2261 // AMD64-ABI 3.5.7p5: Step 5. Set: 2262 // l->gp_offset = l->gp_offset + num_gp * 8 2263 // l->fp_offset = l->fp_offset + num_fp * 16. 2264 if (neededInt) { 2265 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8); 2266 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset), 2267 gp_offset_p); 2268 } 2269 if (neededSSE) { 2270 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16); 2271 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset), 2272 fp_offset_p); 2273 } 2274 CGF.EmitBranch(ContBlock); 2275 2276 // Emit code to load the value if it was passed in memory. 2277 2278 CGF.EmitBlock(InMemBlock); 2279 llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF); 2280 2281 // Return the appropriate result. 2282 2283 CGF.EmitBlock(ContBlock); 2284 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(), 2, 2285 "vaarg.addr"); 2286 ResAddr->addIncoming(RegAddr, InRegBlock); 2287 ResAddr->addIncoming(MemAddr, InMemBlock); 2288 return ResAddr; 2289 } 2290 2291 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty) const { 2292 2293 if (Ty->isVoidType()) 2294 return ABIArgInfo::getIgnore(); 2295 2296 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2297 Ty = EnumTy->getDecl()->getIntegerType(); 2298 2299 uint64_t Size = getContext().getTypeSize(Ty); 2300 2301 if (const RecordType *RT = Ty->getAs<RecordType>()) { 2302 if (hasNonTrivialDestructorOrCopyConstructor(RT) || 2303 RT->getDecl()->hasFlexibleArrayMember()) 2304 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2305 2306 // FIXME: mingw-w64-gcc emits 128-bit struct as i128 2307 if (Size == 128 && 2308 getContext().getTargetInfo().getTriple().getOS() 2309 == llvm::Triple::MinGW32) 2310 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2311 Size)); 2312 2313 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is 2314 // not 1, 2, 4, or 8 bytes, must be passed by reference." 2315 if (Size <= 64 && 2316 (Size & (Size - 1)) == 0) 2317 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2318 Size)); 2319 2320 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2321 } 2322 2323 if (Ty->isPromotableIntegerType()) 2324 return ABIArgInfo::getExtend(); 2325 2326 return ABIArgInfo::getDirect(); 2327 } 2328 2329 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 2330 2331 QualType RetTy = FI.getReturnType(); 2332 FI.getReturnInfo() = classify(RetTy); 2333 2334 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2335 it != ie; ++it) 2336 it->info = classify(it->type); 2337 } 2338 2339 llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2340 CodeGenFunction &CGF) const { 2341 llvm::Type *BPP = CGF.Int8PtrPtrTy; 2342 2343 CGBuilderTy &Builder = CGF.Builder; 2344 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 2345 "ap"); 2346 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 2347 llvm::Type *PTy = 2348 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 2349 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 2350 2351 uint64_t Offset = 2352 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8); 2353 llvm::Value *NextAddr = 2354 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 2355 "ap.next"); 2356 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 2357 2358 return AddrTyped; 2359 } 2360 2361 // PowerPC-32 2362 2363 namespace { 2364 class PPC32TargetCodeGenInfo : public DefaultTargetCodeGenInfo { 2365 public: 2366 PPC32TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} 2367 2368 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2369 // This is recovered from gcc output. 2370 return 1; // r1 is the dedicated stack pointer 2371 } 2372 2373 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2374 llvm::Value *Address) const; 2375 }; 2376 2377 } 2378 2379 bool 2380 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2381 llvm::Value *Address) const { 2382 // This is calculated from the LLVM and GCC tables and verified 2383 // against gcc output. AFAIK all ABIs use the same encoding. 2384 2385 CodeGen::CGBuilderTy &Builder = CGF.Builder; 2386 2387 llvm::IntegerType *i8 = CGF.Int8Ty; 2388 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 2389 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 2390 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 2391 2392 // 0-31: r0-31, the 4-byte general-purpose registers 2393 AssignToArrayRange(Builder, Address, Four8, 0, 31); 2394 2395 // 32-63: fp0-31, the 8-byte floating-point registers 2396 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 2397 2398 // 64-76 are various 4-byte special-purpose registers: 2399 // 64: mq 2400 // 65: lr 2401 // 66: ctr 2402 // 67: ap 2403 // 68-75 cr0-7 2404 // 76: xer 2405 AssignToArrayRange(Builder, Address, Four8, 64, 76); 2406 2407 // 77-108: v0-31, the 16-byte vector registers 2408 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 2409 2410 // 109: vrsave 2411 // 110: vscr 2412 // 111: spe_acc 2413 // 112: spefscr 2414 // 113: sfp 2415 AssignToArrayRange(Builder, Address, Four8, 109, 113); 2416 2417 return false; 2418 } 2419 2420 // PowerPC-64 2421 2422 namespace { 2423 class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo { 2424 public: 2425 PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} 2426 2427 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2428 // This is recovered from gcc output. 2429 return 1; // r1 is the dedicated stack pointer 2430 } 2431 2432 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2433 llvm::Value *Address) const; 2434 }; 2435 2436 } 2437 2438 bool 2439 PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2440 llvm::Value *Address) const { 2441 // This is calculated from the LLVM and GCC tables and verified 2442 // against gcc output. AFAIK all ABIs use the same encoding. 2443 2444 CodeGen::CGBuilderTy &Builder = CGF.Builder; 2445 2446 llvm::IntegerType *i8 = CGF.Int8Ty; 2447 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 2448 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 2449 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 2450 2451 // 0-31: r0-31, the 8-byte general-purpose registers 2452 AssignToArrayRange(Builder, Address, Eight8, 0, 31); 2453 2454 // 32-63: fp0-31, the 8-byte floating-point registers 2455 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 2456 2457 // 64-76 are various 4-byte special-purpose registers: 2458 // 64: mq 2459 // 65: lr 2460 // 66: ctr 2461 // 67: ap 2462 // 68-75 cr0-7 2463 // 76: xer 2464 AssignToArrayRange(Builder, Address, Four8, 64, 76); 2465 2466 // 77-108: v0-31, the 16-byte vector registers 2467 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 2468 2469 // 109: vrsave 2470 // 110: vscr 2471 // 111: spe_acc 2472 // 112: spefscr 2473 // 113: sfp 2474 AssignToArrayRange(Builder, Address, Four8, 109, 113); 2475 2476 return false; 2477 } 2478 2479 //===----------------------------------------------------------------------===// 2480 // ARM ABI Implementation 2481 //===----------------------------------------------------------------------===// 2482 2483 namespace { 2484 2485 class ARMABIInfo : public ABIInfo { 2486 public: 2487 enum ABIKind { 2488 APCS = 0, 2489 AAPCS = 1, 2490 AAPCS_VFP 2491 }; 2492 2493 private: 2494 ABIKind Kind; 2495 2496 public: 2497 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind) {} 2498 2499 bool isEABI() const { 2500 StringRef Env = 2501 getContext().getTargetInfo().getTriple().getEnvironmentName(); 2502 return (Env == "gnueabi" || Env == "eabi" || Env == "androideabi"); 2503 } 2504 2505 private: 2506 ABIKind getABIKind() const { return Kind; } 2507 2508 ABIArgInfo classifyReturnType(QualType RetTy) const; 2509 ABIArgInfo classifyArgumentType(QualType RetTy) const; 2510 2511 virtual void computeInfo(CGFunctionInfo &FI) const; 2512 2513 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2514 CodeGenFunction &CGF) const; 2515 }; 2516 2517 class ARMTargetCodeGenInfo : public TargetCodeGenInfo { 2518 public: 2519 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K) 2520 :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {} 2521 2522 const ARMABIInfo &getABIInfo() const { 2523 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo()); 2524 } 2525 2526 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2527 return 13; 2528 } 2529 2530 StringRef getARCRetainAutoreleasedReturnValueMarker() const { 2531 return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue"; 2532 } 2533 2534 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2535 llvm::Value *Address) const { 2536 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 2537 2538 // 0-15 are the 16 integer registers. 2539 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15); 2540 return false; 2541 } 2542 2543 unsigned getSizeOfUnwindException() const { 2544 if (getABIInfo().isEABI()) return 88; 2545 return TargetCodeGenInfo::getSizeOfUnwindException(); 2546 } 2547 }; 2548 2549 } 2550 2551 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const { 2552 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2553 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2554 it != ie; ++it) 2555 it->info = classifyArgumentType(it->type); 2556 2557 // Always honor user-specified calling convention. 2558 if (FI.getCallingConvention() != llvm::CallingConv::C) 2559 return; 2560 2561 // Calling convention as default by an ABI. 2562 llvm::CallingConv::ID DefaultCC; 2563 if (isEABI()) 2564 DefaultCC = llvm::CallingConv::ARM_AAPCS; 2565 else 2566 DefaultCC = llvm::CallingConv::ARM_APCS; 2567 2568 // If user did not ask for specific calling convention explicitly (e.g. via 2569 // pcs attribute), set effective calling convention if it's different than ABI 2570 // default. 2571 switch (getABIKind()) { 2572 case APCS: 2573 if (DefaultCC != llvm::CallingConv::ARM_APCS) 2574 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_APCS); 2575 break; 2576 case AAPCS: 2577 if (DefaultCC != llvm::CallingConv::ARM_AAPCS) 2578 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS); 2579 break; 2580 case AAPCS_VFP: 2581 if (DefaultCC != llvm::CallingConv::ARM_AAPCS_VFP) 2582 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS_VFP); 2583 break; 2584 } 2585 } 2586 2587 /// isHomogeneousAggregate - Return true if a type is an AAPCS-VFP homogeneous 2588 /// aggregate. If HAMembers is non-null, the number of base elements 2589 /// contained in the type is returned through it; this is used for the 2590 /// recursive calls that check aggregate component types. 2591 static bool isHomogeneousAggregate(QualType Ty, const Type *&Base, 2592 ASTContext &Context, 2593 uint64_t *HAMembers = 0) { 2594 uint64_t Members = 0; 2595 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 2596 if (!isHomogeneousAggregate(AT->getElementType(), Base, Context, &Members)) 2597 return false; 2598 Members *= AT->getSize().getZExtValue(); 2599 } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 2600 const RecordDecl *RD = RT->getDecl(); 2601 if (RD->hasFlexibleArrayMember()) 2602 return false; 2603 2604 Members = 0; 2605 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 2606 i != e; ++i) { 2607 const FieldDecl *FD = *i; 2608 uint64_t FldMembers; 2609 if (!isHomogeneousAggregate(FD->getType(), Base, Context, &FldMembers)) 2610 return false; 2611 2612 Members = (RD->isUnion() ? 2613 std::max(Members, FldMembers) : Members + FldMembers); 2614 } 2615 } else { 2616 Members = 1; 2617 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 2618 Members = 2; 2619 Ty = CT->getElementType(); 2620 } 2621 2622 // Homogeneous aggregates for AAPCS-VFP must have base types of float, 2623 // double, or 64-bit or 128-bit vectors. 2624 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 2625 if (BT->getKind() != BuiltinType::Float && 2626 BT->getKind() != BuiltinType::Double) 2627 return false; 2628 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 2629 unsigned VecSize = Context.getTypeSize(VT); 2630 if (VecSize != 64 && VecSize != 128) 2631 return false; 2632 } else { 2633 return false; 2634 } 2635 2636 // The base type must be the same for all members. Vector types of the 2637 // same total size are treated as being equivalent here. 2638 const Type *TyPtr = Ty.getTypePtr(); 2639 if (!Base) 2640 Base = TyPtr; 2641 if (Base != TyPtr && 2642 (!Base->isVectorType() || !TyPtr->isVectorType() || 2643 Context.getTypeSize(Base) != Context.getTypeSize(TyPtr))) 2644 return false; 2645 } 2646 2647 // Homogeneous Aggregates can have at most 4 members of the base type. 2648 if (HAMembers) 2649 *HAMembers = Members; 2650 2651 return (Members > 0 && Members <= 4); 2652 } 2653 2654 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty) const { 2655 if (!isAggregateTypeForABI(Ty)) { 2656 // Treat an enum type as its underlying type. 2657 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2658 Ty = EnumTy->getDecl()->getIntegerType(); 2659 2660 return (Ty->isPromotableIntegerType() ? 2661 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2662 } 2663 2664 // Ignore empty records. 2665 if (isEmptyRecord(getContext(), Ty, true)) 2666 return ABIArgInfo::getIgnore(); 2667 2668 // Structures with either a non-trivial destructor or a non-trivial 2669 // copy constructor are always indirect. 2670 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 2671 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2672 2673 if (getABIKind() == ARMABIInfo::AAPCS_VFP) { 2674 // Homogeneous Aggregates need to be expanded. 2675 const Type *Base = 0; 2676 if (isHomogeneousAggregate(Ty, Base, getContext())) { 2677 assert(Base && "Base class should be set for homogeneous aggregate"); 2678 return ABIArgInfo::getExpand(); 2679 } 2680 } 2681 2682 // Otherwise, pass by coercing to a structure of the appropriate size. 2683 // 2684 // FIXME: This doesn't handle alignment > 64 bits. 2685 llvm::Type* ElemTy; 2686 unsigned SizeRegs; 2687 if (getContext().getTypeSizeInChars(Ty) <= CharUnits::fromQuantity(64)) { 2688 ElemTy = llvm::Type::getInt32Ty(getVMContext()); 2689 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32; 2690 } else if (getABIKind() == ARMABIInfo::APCS) { 2691 // Initial ARM ByVal support is APCS-only. 2692 return ABIArgInfo::getIndirect(0, /*ByVal=*/true); 2693 } else { 2694 // FIXME: This is kind of nasty... but there isn't much choice 2695 // because most of the ARM calling conventions don't yet support 2696 // byval. 2697 ElemTy = llvm::Type::getInt64Ty(getVMContext()); 2698 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64; 2699 } 2700 2701 llvm::Type *STy = 2702 llvm::StructType::get(llvm::ArrayType::get(ElemTy, SizeRegs), NULL); 2703 return ABIArgInfo::getDirect(STy); 2704 } 2705 2706 static bool isIntegerLikeType(QualType Ty, ASTContext &Context, 2707 llvm::LLVMContext &VMContext) { 2708 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure 2709 // is called integer-like if its size is less than or equal to one word, and 2710 // the offset of each of its addressable sub-fields is zero. 2711 2712 uint64_t Size = Context.getTypeSize(Ty); 2713 2714 // Check that the type fits in a word. 2715 if (Size > 32) 2716 return false; 2717 2718 // FIXME: Handle vector types! 2719 if (Ty->isVectorType()) 2720 return false; 2721 2722 // Float types are never treated as "integer like". 2723 if (Ty->isRealFloatingType()) 2724 return false; 2725 2726 // If this is a builtin or pointer type then it is ok. 2727 if (Ty->getAs<BuiltinType>() || Ty->isPointerType()) 2728 return true; 2729 2730 // Small complex integer types are "integer like". 2731 if (const ComplexType *CT = Ty->getAs<ComplexType>()) 2732 return isIntegerLikeType(CT->getElementType(), Context, VMContext); 2733 2734 // Single element and zero sized arrays should be allowed, by the definition 2735 // above, but they are not. 2736 2737 // Otherwise, it must be a record type. 2738 const RecordType *RT = Ty->getAs<RecordType>(); 2739 if (!RT) return false; 2740 2741 // Ignore records with flexible arrays. 2742 const RecordDecl *RD = RT->getDecl(); 2743 if (RD->hasFlexibleArrayMember()) 2744 return false; 2745 2746 // Check that all sub-fields are at offset 0, and are themselves "integer 2747 // like". 2748 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 2749 2750 bool HadField = false; 2751 unsigned idx = 0; 2752 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 2753 i != e; ++i, ++idx) { 2754 const FieldDecl *FD = *i; 2755 2756 // Bit-fields are not addressable, we only need to verify they are "integer 2757 // like". We still have to disallow a subsequent non-bitfield, for example: 2758 // struct { int : 0; int x } 2759 // is non-integer like according to gcc. 2760 if (FD->isBitField()) { 2761 if (!RD->isUnion()) 2762 HadField = true; 2763 2764 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 2765 return false; 2766 2767 continue; 2768 } 2769 2770 // Check if this field is at offset 0. 2771 if (Layout.getFieldOffset(idx) != 0) 2772 return false; 2773 2774 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 2775 return false; 2776 2777 // Only allow at most one field in a structure. This doesn't match the 2778 // wording above, but follows gcc in situations with a field following an 2779 // empty structure. 2780 if (!RD->isUnion()) { 2781 if (HadField) 2782 return false; 2783 2784 HadField = true; 2785 } 2786 } 2787 2788 return true; 2789 } 2790 2791 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy) const { 2792 if (RetTy->isVoidType()) 2793 return ABIArgInfo::getIgnore(); 2794 2795 // Large vector types should be returned via memory. 2796 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) 2797 return ABIArgInfo::getIndirect(0); 2798 2799 if (!isAggregateTypeForABI(RetTy)) { 2800 // Treat an enum type as its underlying type. 2801 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 2802 RetTy = EnumTy->getDecl()->getIntegerType(); 2803 2804 return (RetTy->isPromotableIntegerType() ? 2805 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2806 } 2807 2808 // Structures with either a non-trivial destructor or a non-trivial 2809 // copy constructor are always indirect. 2810 if (isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy)) 2811 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2812 2813 // Are we following APCS? 2814 if (getABIKind() == APCS) { 2815 if (isEmptyRecord(getContext(), RetTy, false)) 2816 return ABIArgInfo::getIgnore(); 2817 2818 // Complex types are all returned as packed integers. 2819 // 2820 // FIXME: Consider using 2 x vector types if the back end handles them 2821 // correctly. 2822 if (RetTy->isAnyComplexType()) 2823 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2824 getContext().getTypeSize(RetTy))); 2825 2826 // Integer like structures are returned in r0. 2827 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) { 2828 // Return in the smallest viable integer type. 2829 uint64_t Size = getContext().getTypeSize(RetTy); 2830 if (Size <= 8) 2831 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 2832 if (Size <= 16) 2833 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 2834 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 2835 } 2836 2837 // Otherwise return in memory. 2838 return ABIArgInfo::getIndirect(0); 2839 } 2840 2841 // Otherwise this is an AAPCS variant. 2842 2843 if (isEmptyRecord(getContext(), RetTy, true)) 2844 return ABIArgInfo::getIgnore(); 2845 2846 // Check for homogeneous aggregates with AAPCS-VFP. 2847 if (getABIKind() == AAPCS_VFP) { 2848 const Type *Base = 0; 2849 if (isHomogeneousAggregate(RetTy, Base, getContext())) { 2850 assert(Base && "Base class should be set for homogeneous aggregate"); 2851 // Homogeneous Aggregates are returned directly. 2852 return ABIArgInfo::getDirect(); 2853 } 2854 } 2855 2856 // Aggregates <= 4 bytes are returned in r0; other aggregates 2857 // are returned indirectly. 2858 uint64_t Size = getContext().getTypeSize(RetTy); 2859 if (Size <= 32) { 2860 // Return in the smallest viable integer type. 2861 if (Size <= 8) 2862 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 2863 if (Size <= 16) 2864 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 2865 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 2866 } 2867 2868 return ABIArgInfo::getIndirect(0); 2869 } 2870 2871 llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2872 CodeGenFunction &CGF) const { 2873 llvm::Type *BP = CGF.Int8PtrTy; 2874 llvm::Type *BPP = CGF.Int8PtrPtrTy; 2875 2876 CGBuilderTy &Builder = CGF.Builder; 2877 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 2878 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 2879 // Handle address alignment for type alignment > 32 bits 2880 uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8; 2881 if (TyAlign > 4) { 2882 assert((TyAlign & (TyAlign - 1)) == 0 && 2883 "Alignment is not power of 2!"); 2884 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty); 2885 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1)); 2886 AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1))); 2887 Addr = Builder.CreateIntToPtr(AddrAsInt, BP); 2888 } 2889 llvm::Type *PTy = 2890 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 2891 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 2892 2893 uint64_t Offset = 2894 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); 2895 llvm::Value *NextAddr = 2896 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 2897 "ap.next"); 2898 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 2899 2900 return AddrTyped; 2901 } 2902 2903 //===----------------------------------------------------------------------===// 2904 // NVPTX ABI Implementation 2905 //===----------------------------------------------------------------------===// 2906 2907 namespace { 2908 2909 class NVPTXABIInfo : public ABIInfo { 2910 public: 2911 NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 2912 2913 ABIArgInfo classifyReturnType(QualType RetTy) const; 2914 ABIArgInfo classifyArgumentType(QualType Ty) const; 2915 2916 virtual void computeInfo(CGFunctionInfo &FI) const; 2917 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2918 CodeGenFunction &CFG) const; 2919 }; 2920 2921 class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo { 2922 public: 2923 NVPTXTargetCodeGenInfo(CodeGenTypes &CGT) 2924 : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {} 2925 2926 virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 2927 CodeGen::CodeGenModule &M) const; 2928 }; 2929 2930 ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const { 2931 if (RetTy->isVoidType()) 2932 return ABIArgInfo::getIgnore(); 2933 if (isAggregateTypeForABI(RetTy)) 2934 return ABIArgInfo::getIndirect(0); 2935 return ABIArgInfo::getDirect(); 2936 } 2937 2938 ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const { 2939 if (isAggregateTypeForABI(Ty)) 2940 return ABIArgInfo::getIndirect(0); 2941 2942 return ABIArgInfo::getDirect(); 2943 } 2944 2945 void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const { 2946 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2947 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2948 it != ie; ++it) 2949 it->info = classifyArgumentType(it->type); 2950 2951 // Always honor user-specified calling convention. 2952 if (FI.getCallingConvention() != llvm::CallingConv::C) 2953 return; 2954 2955 // Calling convention as default by an ABI. 2956 // We're still using the PTX_Kernel/PTX_Device calling conventions here, 2957 // but we should switch to NVVM metadata later on. 2958 llvm::CallingConv::ID DefaultCC; 2959 const LangOptions &LangOpts = getContext().getLangOpts(); 2960 if (LangOpts.OpenCL || LangOpts.CUDA) { 2961 // If we are in OpenCL or CUDA mode, then default to device functions 2962 DefaultCC = llvm::CallingConv::PTX_Device; 2963 } else { 2964 // If we are in standard C/C++ mode, use the triple to decide on the default 2965 StringRef Env = 2966 getContext().getTargetInfo().getTriple().getEnvironmentName(); 2967 if (Env == "device") 2968 DefaultCC = llvm::CallingConv::PTX_Device; 2969 else 2970 DefaultCC = llvm::CallingConv::PTX_Kernel; 2971 } 2972 FI.setEffectiveCallingConvention(DefaultCC); 2973 2974 } 2975 2976 llvm::Value *NVPTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2977 CodeGenFunction &CFG) const { 2978 llvm_unreachable("NVPTX does not support varargs"); 2979 } 2980 2981 void NVPTXTargetCodeGenInfo:: 2982 SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 2983 CodeGen::CodeGenModule &M) const{ 2984 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 2985 if (!FD) return; 2986 2987 llvm::Function *F = cast<llvm::Function>(GV); 2988 2989 // Perform special handling in OpenCL mode 2990 if (M.getLangOpts().OpenCL) { 2991 // Use OpenCL function attributes to set proper calling conventions 2992 // By default, all functions are device functions 2993 if (FD->hasAttr<OpenCLKernelAttr>()) { 2994 // OpenCL __kernel functions get a kernel calling convention 2995 F->setCallingConv(llvm::CallingConv::PTX_Kernel); 2996 // And kernel functions are not subject to inlining 2997 F->addFnAttr(llvm::Attribute::NoInline); 2998 } 2999 } 3000 3001 // Perform special handling in CUDA mode. 3002 if (M.getLangOpts().CUDA) { 3003 // CUDA __global__ functions get a kernel calling convention. Since 3004 // __global__ functions cannot be called from the device, we do not 3005 // need to set the noinline attribute. 3006 if (FD->getAttr<CUDAGlobalAttr>()) 3007 F->setCallingConv(llvm::CallingConv::PTX_Kernel); 3008 } 3009 } 3010 3011 } 3012 3013 //===----------------------------------------------------------------------===// 3014 // MBlaze ABI Implementation 3015 //===----------------------------------------------------------------------===// 3016 3017 namespace { 3018 3019 class MBlazeABIInfo : public ABIInfo { 3020 public: 3021 MBlazeABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 3022 3023 bool isPromotableIntegerType(QualType Ty) const; 3024 3025 ABIArgInfo classifyReturnType(QualType RetTy) const; 3026 ABIArgInfo classifyArgumentType(QualType RetTy) const; 3027 3028 virtual void computeInfo(CGFunctionInfo &FI) const { 3029 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 3030 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 3031 it != ie; ++it) 3032 it->info = classifyArgumentType(it->type); 3033 } 3034 3035 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3036 CodeGenFunction &CGF) const; 3037 }; 3038 3039 class MBlazeTargetCodeGenInfo : public TargetCodeGenInfo { 3040 public: 3041 MBlazeTargetCodeGenInfo(CodeGenTypes &CGT) 3042 : TargetCodeGenInfo(new MBlazeABIInfo(CGT)) {} 3043 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3044 CodeGen::CodeGenModule &M) const; 3045 }; 3046 3047 } 3048 3049 bool MBlazeABIInfo::isPromotableIntegerType(QualType Ty) const { 3050 // MBlaze ABI requires all 8 and 16 bit quantities to be extended. 3051 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 3052 switch (BT->getKind()) { 3053 case BuiltinType::Bool: 3054 case BuiltinType::Char_S: 3055 case BuiltinType::Char_U: 3056 case BuiltinType::SChar: 3057 case BuiltinType::UChar: 3058 case BuiltinType::Short: 3059 case BuiltinType::UShort: 3060 return true; 3061 default: 3062 return false; 3063 } 3064 return false; 3065 } 3066 3067 llvm::Value *MBlazeABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3068 CodeGenFunction &CGF) const { 3069 // FIXME: Implement 3070 return 0; 3071 } 3072 3073 3074 ABIArgInfo MBlazeABIInfo::classifyReturnType(QualType RetTy) const { 3075 if (RetTy->isVoidType()) 3076 return ABIArgInfo::getIgnore(); 3077 if (isAggregateTypeForABI(RetTy)) 3078 return ABIArgInfo::getIndirect(0); 3079 3080 return (isPromotableIntegerType(RetTy) ? 3081 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3082 } 3083 3084 ABIArgInfo MBlazeABIInfo::classifyArgumentType(QualType Ty) const { 3085 if (isAggregateTypeForABI(Ty)) 3086 return ABIArgInfo::getIndirect(0); 3087 3088 return (isPromotableIntegerType(Ty) ? 3089 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3090 } 3091 3092 void MBlazeTargetCodeGenInfo::SetTargetAttributes(const Decl *D, 3093 llvm::GlobalValue *GV, 3094 CodeGen::CodeGenModule &M) 3095 const { 3096 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 3097 if (!FD) return; 3098 3099 llvm::CallingConv::ID CC = llvm::CallingConv::C; 3100 if (FD->hasAttr<MBlazeInterruptHandlerAttr>()) 3101 CC = llvm::CallingConv::MBLAZE_INTR; 3102 else if (FD->hasAttr<MBlazeSaveVolatilesAttr>()) 3103 CC = llvm::CallingConv::MBLAZE_SVOL; 3104 3105 if (CC != llvm::CallingConv::C) { 3106 // Handle 'interrupt_handler' attribute: 3107 llvm::Function *F = cast<llvm::Function>(GV); 3108 3109 // Step 1: Set ISR calling convention. 3110 F->setCallingConv(CC); 3111 3112 // Step 2: Add attributes goodness. 3113 F->addFnAttr(llvm::Attribute::NoInline); 3114 } 3115 3116 // Step 3: Emit _interrupt_handler alias. 3117 if (CC == llvm::CallingConv::MBLAZE_INTR) 3118 new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage, 3119 "_interrupt_handler", GV, &M.getModule()); 3120 } 3121 3122 3123 //===----------------------------------------------------------------------===// 3124 // MSP430 ABI Implementation 3125 //===----------------------------------------------------------------------===// 3126 3127 namespace { 3128 3129 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo { 3130 public: 3131 MSP430TargetCodeGenInfo(CodeGenTypes &CGT) 3132 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 3133 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3134 CodeGen::CodeGenModule &M) const; 3135 }; 3136 3137 } 3138 3139 void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D, 3140 llvm::GlobalValue *GV, 3141 CodeGen::CodeGenModule &M) const { 3142 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 3143 if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) { 3144 // Handle 'interrupt' attribute: 3145 llvm::Function *F = cast<llvm::Function>(GV); 3146 3147 // Step 1: Set ISR calling convention. 3148 F->setCallingConv(llvm::CallingConv::MSP430_INTR); 3149 3150 // Step 2: Add attributes goodness. 3151 F->addFnAttr(llvm::Attribute::NoInline); 3152 3153 // Step 3: Emit ISR vector alias. 3154 unsigned Num = attr->getNumber() + 0xffe0; 3155 new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage, 3156 "vector_" + Twine::utohexstr(Num), 3157 GV, &M.getModule()); 3158 } 3159 } 3160 } 3161 3162 //===----------------------------------------------------------------------===// 3163 // MIPS ABI Implementation. This works for both little-endian and 3164 // big-endian variants. 3165 //===----------------------------------------------------------------------===// 3166 3167 namespace { 3168 class MipsABIInfo : public ABIInfo { 3169 bool IsO32; 3170 unsigned MinABIStackAlignInBytes, StackAlignInBytes; 3171 void CoerceToIntArgs(uint64_t TySize, 3172 SmallVector<llvm::Type*, 8> &ArgList) const; 3173 llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const; 3174 llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const; 3175 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const; 3176 public: 3177 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) : 3178 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8), 3179 StackAlignInBytes(IsO32 ? 8 : 16) {} 3180 3181 ABIArgInfo classifyReturnType(QualType RetTy) const; 3182 ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const; 3183 virtual void computeInfo(CGFunctionInfo &FI) const; 3184 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3185 CodeGenFunction &CGF) const; 3186 }; 3187 3188 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo { 3189 unsigned SizeOfUnwindException; 3190 public: 3191 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32) 3192 : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)), 3193 SizeOfUnwindException(IsO32 ? 24 : 32) {} 3194 3195 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 3196 return 29; 3197 } 3198 3199 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3200 llvm::Value *Address) const; 3201 3202 unsigned getSizeOfUnwindException() const { 3203 return SizeOfUnwindException; 3204 } 3205 }; 3206 } 3207 3208 void MipsABIInfo::CoerceToIntArgs(uint64_t TySize, 3209 SmallVector<llvm::Type*, 8> &ArgList) const { 3210 llvm::IntegerType *IntTy = 3211 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8); 3212 3213 // Add (TySize / MinABIStackAlignInBytes) args of IntTy. 3214 for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N) 3215 ArgList.push_back(IntTy); 3216 3217 // If necessary, add one more integer type to ArgList. 3218 unsigned R = TySize % (MinABIStackAlignInBytes * 8); 3219 3220 if (R) 3221 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R)); 3222 } 3223 3224 // In N32/64, an aligned double precision floating point field is passed in 3225 // a register. 3226 llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const { 3227 SmallVector<llvm::Type*, 8> ArgList, IntArgList; 3228 3229 if (IsO32) { 3230 CoerceToIntArgs(TySize, ArgList); 3231 return llvm::StructType::get(getVMContext(), ArgList); 3232 } 3233 3234 if (Ty->isComplexType()) 3235 return CGT.ConvertType(Ty); 3236 3237 const RecordType *RT = Ty->getAs<RecordType>(); 3238 3239 // Unions/vectors are passed in integer registers. 3240 if (!RT || !RT->isStructureOrClassType()) { 3241 CoerceToIntArgs(TySize, ArgList); 3242 return llvm::StructType::get(getVMContext(), ArgList); 3243 } 3244 3245 const RecordDecl *RD = RT->getDecl(); 3246 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 3247 assert(!(TySize % 8) && "Size of structure must be multiple of 8."); 3248 3249 uint64_t LastOffset = 0; 3250 unsigned idx = 0; 3251 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64); 3252 3253 // Iterate over fields in the struct/class and check if there are any aligned 3254 // double fields. 3255 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 3256 i != e; ++i, ++idx) { 3257 const QualType Ty = i->getType(); 3258 const BuiltinType *BT = Ty->getAs<BuiltinType>(); 3259 3260 if (!BT || BT->getKind() != BuiltinType::Double) 3261 continue; 3262 3263 uint64_t Offset = Layout.getFieldOffset(idx); 3264 if (Offset % 64) // Ignore doubles that are not aligned. 3265 continue; 3266 3267 // Add ((Offset - LastOffset) / 64) args of type i64. 3268 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j) 3269 ArgList.push_back(I64); 3270 3271 // Add double type. 3272 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext())); 3273 LastOffset = Offset + 64; 3274 } 3275 3276 CoerceToIntArgs(TySize - LastOffset, IntArgList); 3277 ArgList.append(IntArgList.begin(), IntArgList.end()); 3278 3279 return llvm::StructType::get(getVMContext(), ArgList); 3280 } 3281 3282 llvm::Type *MipsABIInfo::getPaddingType(uint64_t Align, uint64_t Offset) const { 3283 assert((Offset % MinABIStackAlignInBytes) == 0); 3284 3285 if ((Align - 1) & Offset) 3286 return llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8); 3287 3288 return 0; 3289 } 3290 3291 ABIArgInfo 3292 MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const { 3293 uint64_t OrigOffset = Offset; 3294 uint64_t TySize = getContext().getTypeSize(Ty); 3295 uint64_t Align = getContext().getTypeAlign(Ty) / 8; 3296 3297 Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes), 3298 (uint64_t)StackAlignInBytes); 3299 Offset = llvm::RoundUpToAlignment(Offset, Align); 3300 Offset += llvm::RoundUpToAlignment(TySize, Align * 8) / 8; 3301 3302 if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) { 3303 // Ignore empty aggregates. 3304 if (TySize == 0) 3305 return ABIArgInfo::getIgnore(); 3306 3307 // Records with non trivial destructors/constructors should not be passed 3308 // by value. 3309 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) { 3310 Offset = OrigOffset + MinABIStackAlignInBytes; 3311 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3312 } 3313 3314 // If we have reached here, aggregates are passed directly by coercing to 3315 // another structure type. Padding is inserted if the offset of the 3316 // aggregate is unaligned. 3317 return ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0, 3318 getPaddingType(Align, OrigOffset)); 3319 } 3320 3321 // Treat an enum type as its underlying type. 3322 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3323 Ty = EnumTy->getDecl()->getIntegerType(); 3324 3325 if (Ty->isPromotableIntegerType()) 3326 return ABIArgInfo::getExtend(); 3327 3328 return ABIArgInfo::getDirect(0, 0, getPaddingType(Align, OrigOffset)); 3329 } 3330 3331 llvm::Type* 3332 MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const { 3333 const RecordType *RT = RetTy->getAs<RecordType>(); 3334 SmallVector<llvm::Type*, 8> RTList; 3335 3336 if (RT && RT->isStructureOrClassType()) { 3337 const RecordDecl *RD = RT->getDecl(); 3338 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 3339 unsigned FieldCnt = Layout.getFieldCount(); 3340 3341 // N32/64 returns struct/classes in floating point registers if the 3342 // following conditions are met: 3343 // 1. The size of the struct/class is no larger than 128-bit. 3344 // 2. The struct/class has one or two fields all of which are floating 3345 // point types. 3346 // 3. The offset of the first field is zero (this follows what gcc does). 3347 // 3348 // Any other composite results are returned in integer registers. 3349 // 3350 if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) { 3351 RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end(); 3352 for (; b != e; ++b) { 3353 const BuiltinType *BT = b->getType()->getAs<BuiltinType>(); 3354 3355 if (!BT || !BT->isFloatingPoint()) 3356 break; 3357 3358 RTList.push_back(CGT.ConvertType(b->getType())); 3359 } 3360 3361 if (b == e) 3362 return llvm::StructType::get(getVMContext(), RTList, 3363 RD->hasAttr<PackedAttr>()); 3364 3365 RTList.clear(); 3366 } 3367 } 3368 3369 CoerceToIntArgs(Size, RTList); 3370 return llvm::StructType::get(getVMContext(), RTList); 3371 } 3372 3373 ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const { 3374 uint64_t Size = getContext().getTypeSize(RetTy); 3375 3376 if (RetTy->isVoidType() || Size == 0) 3377 return ABIArgInfo::getIgnore(); 3378 3379 if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) { 3380 if (Size <= 128) { 3381 if (RetTy->isAnyComplexType()) 3382 return ABIArgInfo::getDirect(); 3383 3384 // O32 returns integer vectors in registers. 3385 if (IsO32 && RetTy->isVectorType() && !RetTy->hasFloatingRepresentation()) 3386 return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size)); 3387 3388 if (!IsO32 && !isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy)) 3389 return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size)); 3390 } 3391 3392 return ABIArgInfo::getIndirect(0); 3393 } 3394 3395 // Treat an enum type as its underlying type. 3396 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 3397 RetTy = EnumTy->getDecl()->getIntegerType(); 3398 3399 return (RetTy->isPromotableIntegerType() ? 3400 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3401 } 3402 3403 void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const { 3404 ABIArgInfo &RetInfo = FI.getReturnInfo(); 3405 RetInfo = classifyReturnType(FI.getReturnType()); 3406 3407 // Check if a pointer to an aggregate is passed as a hidden argument. 3408 uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0; 3409 3410 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 3411 it != ie; ++it) 3412 it->info = classifyArgumentType(it->type, Offset); 3413 } 3414 3415 llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3416 CodeGenFunction &CGF) const { 3417 llvm::Type *BP = CGF.Int8PtrTy; 3418 llvm::Type *BPP = CGF.Int8PtrPtrTy; 3419 3420 CGBuilderTy &Builder = CGF.Builder; 3421 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 3422 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 3423 int64_t TypeAlign = getContext().getTypeAlign(Ty) / 8; 3424 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 3425 llvm::Value *AddrTyped; 3426 unsigned PtrWidth = getContext().getTargetInfo().getPointerWidth(0); 3427 llvm::IntegerType *IntTy = (PtrWidth == 32) ? CGF.Int32Ty : CGF.Int64Ty; 3428 3429 if (TypeAlign > MinABIStackAlignInBytes) { 3430 llvm::Value *AddrAsInt = CGF.Builder.CreatePtrToInt(Addr, IntTy); 3431 llvm::Value *Inc = llvm::ConstantInt::get(IntTy, TypeAlign - 1); 3432 llvm::Value *Mask = llvm::ConstantInt::get(IntTy, -TypeAlign); 3433 llvm::Value *Add = CGF.Builder.CreateAdd(AddrAsInt, Inc); 3434 llvm::Value *And = CGF.Builder.CreateAnd(Add, Mask); 3435 AddrTyped = CGF.Builder.CreateIntToPtr(And, PTy); 3436 } 3437 else 3438 AddrTyped = Builder.CreateBitCast(Addr, PTy); 3439 3440 llvm::Value *AlignedAddr = Builder.CreateBitCast(AddrTyped, BP); 3441 TypeAlign = std::max((unsigned)TypeAlign, MinABIStackAlignInBytes); 3442 uint64_t Offset = 3443 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, TypeAlign); 3444 llvm::Value *NextAddr = 3445 Builder.CreateGEP(AlignedAddr, llvm::ConstantInt::get(IntTy, Offset), 3446 "ap.next"); 3447 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 3448 3449 return AddrTyped; 3450 } 3451 3452 bool 3453 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3454 llvm::Value *Address) const { 3455 // This information comes from gcc's implementation, which seems to 3456 // as canonical as it gets. 3457 3458 // Everything on MIPS is 4 bytes. Double-precision FP registers 3459 // are aliased to pairs of single-precision FP registers. 3460 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 3461 3462 // 0-31 are the general purpose registers, $0 - $31. 3463 // 32-63 are the floating-point registers, $f0 - $f31. 3464 // 64 and 65 are the multiply/divide registers, $hi and $lo. 3465 // 66 is the (notional, I think) register for signal-handler return. 3466 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65); 3467 3468 // 67-74 are the floating-point status registers, $fcc0 - $fcc7. 3469 // They are one bit wide and ignored here. 3470 3471 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31. 3472 // (coprocessor 1 is the FP unit) 3473 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31. 3474 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31. 3475 // 176-181 are the DSP accumulator registers. 3476 AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181); 3477 return false; 3478 } 3479 3480 //===----------------------------------------------------------------------===// 3481 // TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults. 3482 // Currently subclassed only to implement custom OpenCL C function attribute 3483 // handling. 3484 //===----------------------------------------------------------------------===// 3485 3486 namespace { 3487 3488 class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo { 3489 public: 3490 TCETargetCodeGenInfo(CodeGenTypes &CGT) 3491 : DefaultTargetCodeGenInfo(CGT) {} 3492 3493 virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3494 CodeGen::CodeGenModule &M) const; 3495 }; 3496 3497 void TCETargetCodeGenInfo::SetTargetAttributes(const Decl *D, 3498 llvm::GlobalValue *GV, 3499 CodeGen::CodeGenModule &M) const { 3500 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 3501 if (!FD) return; 3502 3503 llvm::Function *F = cast<llvm::Function>(GV); 3504 3505 if (M.getLangOpts().OpenCL) { 3506 if (FD->hasAttr<OpenCLKernelAttr>()) { 3507 // OpenCL C Kernel functions are not subject to inlining 3508 F->addFnAttr(llvm::Attribute::NoInline); 3509 3510 if (FD->hasAttr<ReqdWorkGroupSizeAttr>()) { 3511 3512 // Convert the reqd_work_group_size() attributes to metadata. 3513 llvm::LLVMContext &Context = F->getContext(); 3514 llvm::NamedMDNode *OpenCLMetadata = 3515 M.getModule().getOrInsertNamedMetadata("opencl.kernel_wg_size_info"); 3516 3517 SmallVector<llvm::Value*, 5> Operands; 3518 Operands.push_back(F); 3519 3520 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty, 3521 llvm::APInt(32, 3522 FD->getAttr<ReqdWorkGroupSizeAttr>()->getXDim()))); 3523 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty, 3524 llvm::APInt(32, 3525 FD->getAttr<ReqdWorkGroupSizeAttr>()->getYDim()))); 3526 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty, 3527 llvm::APInt(32, 3528 FD->getAttr<ReqdWorkGroupSizeAttr>()->getZDim()))); 3529 3530 // Add a boolean constant operand for "required" (true) or "hint" (false) 3531 // for implementing the work_group_size_hint attr later. Currently 3532 // always true as the hint is not yet implemented. 3533 Operands.push_back(llvm::ConstantInt::getTrue(Context)); 3534 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands)); 3535 } 3536 } 3537 } 3538 } 3539 3540 } 3541 3542 //===----------------------------------------------------------------------===// 3543 // Hexagon ABI Implementation 3544 //===----------------------------------------------------------------------===// 3545 3546 namespace { 3547 3548 class HexagonABIInfo : public ABIInfo { 3549 3550 3551 public: 3552 HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 3553 3554 private: 3555 3556 ABIArgInfo classifyReturnType(QualType RetTy) const; 3557 ABIArgInfo classifyArgumentType(QualType RetTy) const; 3558 3559 virtual void computeInfo(CGFunctionInfo &FI) const; 3560 3561 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3562 CodeGenFunction &CGF) const; 3563 }; 3564 3565 class HexagonTargetCodeGenInfo : public TargetCodeGenInfo { 3566 public: 3567 HexagonTargetCodeGenInfo(CodeGenTypes &CGT) 3568 :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {} 3569 3570 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 3571 return 29; 3572 } 3573 }; 3574 3575 } 3576 3577 void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const { 3578 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 3579 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 3580 it != ie; ++it) 3581 it->info = classifyArgumentType(it->type); 3582 } 3583 3584 ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const { 3585 if (!isAggregateTypeForABI(Ty)) { 3586 // Treat an enum type as its underlying type. 3587 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3588 Ty = EnumTy->getDecl()->getIntegerType(); 3589 3590 return (Ty->isPromotableIntegerType() ? 3591 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3592 } 3593 3594 // Ignore empty records. 3595 if (isEmptyRecord(getContext(), Ty, true)) 3596 return ABIArgInfo::getIgnore(); 3597 3598 // Structures with either a non-trivial destructor or a non-trivial 3599 // copy constructor are always indirect. 3600 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 3601 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3602 3603 uint64_t Size = getContext().getTypeSize(Ty); 3604 if (Size > 64) 3605 return ABIArgInfo::getIndirect(0, /*ByVal=*/true); 3606 // Pass in the smallest viable integer type. 3607 else if (Size > 32) 3608 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext())); 3609 else if (Size > 16) 3610 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 3611 else if (Size > 8) 3612 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 3613 else 3614 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 3615 } 3616 3617 ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const { 3618 if (RetTy->isVoidType()) 3619 return ABIArgInfo::getIgnore(); 3620 3621 // Large vector types should be returned via memory. 3622 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64) 3623 return ABIArgInfo::getIndirect(0); 3624 3625 if (!isAggregateTypeForABI(RetTy)) { 3626 // Treat an enum type as its underlying type. 3627 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 3628 RetTy = EnumTy->getDecl()->getIntegerType(); 3629 3630 return (RetTy->isPromotableIntegerType() ? 3631 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3632 } 3633 3634 // Structures with either a non-trivial destructor or a non-trivial 3635 // copy constructor are always indirect. 3636 if (isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy)) 3637 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3638 3639 if (isEmptyRecord(getContext(), RetTy, true)) 3640 return ABIArgInfo::getIgnore(); 3641 3642 // Aggregates <= 8 bytes are returned in r0; other aggregates 3643 // are returned indirectly. 3644 uint64_t Size = getContext().getTypeSize(RetTy); 3645 if (Size <= 64) { 3646 // Return in the smallest viable integer type. 3647 if (Size <= 8) 3648 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 3649 if (Size <= 16) 3650 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 3651 if (Size <= 32) 3652 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 3653 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext())); 3654 } 3655 3656 return ABIArgInfo::getIndirect(0, /*ByVal=*/true); 3657 } 3658 3659 llvm::Value *HexagonABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3660 CodeGenFunction &CGF) const { 3661 // FIXME: Need to handle alignment 3662 llvm::Type *BPP = CGF.Int8PtrPtrTy; 3663 3664 CGBuilderTy &Builder = CGF.Builder; 3665 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 3666 "ap"); 3667 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 3668 llvm::Type *PTy = 3669 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 3670 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 3671 3672 uint64_t Offset = 3673 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); 3674 llvm::Value *NextAddr = 3675 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 3676 "ap.next"); 3677 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 3678 3679 return AddrTyped; 3680 } 3681 3682 3683 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() { 3684 if (TheTargetCodeGenInfo) 3685 return *TheTargetCodeGenInfo; 3686 3687 const llvm::Triple &Triple = getContext().getTargetInfo().getTriple(); 3688 switch (Triple.getArch()) { 3689 default: 3690 return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types)); 3691 3692 case llvm::Triple::mips: 3693 case llvm::Triple::mipsel: 3694 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, true)); 3695 3696 case llvm::Triple::mips64: 3697 case llvm::Triple::mips64el: 3698 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, false)); 3699 3700 case llvm::Triple::arm: 3701 case llvm::Triple::thumb: 3702 { 3703 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS; 3704 3705 if (strcmp(getContext().getTargetInfo().getABI(), "apcs-gnu") == 0) 3706 Kind = ARMABIInfo::APCS; 3707 else if (CodeGenOpts.FloatABI == "hard") 3708 Kind = ARMABIInfo::AAPCS_VFP; 3709 3710 return *(TheTargetCodeGenInfo = new ARMTargetCodeGenInfo(Types, Kind)); 3711 } 3712 3713 case llvm::Triple::ppc: 3714 return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types)); 3715 case llvm::Triple::ppc64: 3716 return *(TheTargetCodeGenInfo = new PPC64TargetCodeGenInfo(Types)); 3717 3718 case llvm::Triple::nvptx: 3719 case llvm::Triple::nvptx64: 3720 return *(TheTargetCodeGenInfo = new NVPTXTargetCodeGenInfo(Types)); 3721 3722 case llvm::Triple::mblaze: 3723 return *(TheTargetCodeGenInfo = new MBlazeTargetCodeGenInfo(Types)); 3724 3725 case llvm::Triple::msp430: 3726 return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types)); 3727 3728 case llvm::Triple::tce: 3729 return *(TheTargetCodeGenInfo = new TCETargetCodeGenInfo(Types)); 3730 3731 case llvm::Triple::x86: { 3732 bool DisableMMX = strcmp(getContext().getTargetInfo().getABI(), "no-mmx") == 0; 3733 3734 if (Triple.isOSDarwin()) 3735 return *(TheTargetCodeGenInfo = 3736 new X86_32TargetCodeGenInfo( 3737 Types, true, true, DisableMMX, false)); 3738 3739 switch (Triple.getOS()) { 3740 case llvm::Triple::Cygwin: 3741 case llvm::Triple::MinGW32: 3742 case llvm::Triple::AuroraUX: 3743 case llvm::Triple::DragonFly: 3744 case llvm::Triple::FreeBSD: 3745 case llvm::Triple::OpenBSD: 3746 return *(TheTargetCodeGenInfo = 3747 new X86_32TargetCodeGenInfo( 3748 Types, false, true, DisableMMX, false)); 3749 3750 case llvm::Triple::Win32: 3751 return *(TheTargetCodeGenInfo = 3752 new X86_32TargetCodeGenInfo( 3753 Types, false, true, DisableMMX, true)); 3754 3755 default: 3756 return *(TheTargetCodeGenInfo = 3757 new X86_32TargetCodeGenInfo( 3758 Types, false, false, DisableMMX, false)); 3759 } 3760 } 3761 3762 case llvm::Triple::x86_64: { 3763 bool HasAVX = strcmp(getContext().getTargetInfo().getABI(), "avx") == 0; 3764 3765 switch (Triple.getOS()) { 3766 case llvm::Triple::Win32: 3767 case llvm::Triple::MinGW32: 3768 case llvm::Triple::Cygwin: 3769 return *(TheTargetCodeGenInfo = new WinX86_64TargetCodeGenInfo(Types)); 3770 default: 3771 return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo(Types, 3772 HasAVX)); 3773 } 3774 } 3775 case llvm::Triple::hexagon: 3776 return *(TheTargetCodeGenInfo = new HexagonTargetCodeGenInfo(Types)); 3777 } 3778 } 3779