1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // These classes wrap the information about a call or function 11 // definition used to handle ABI compliancy. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "TargetInfo.h" 16 #include "ABIInfo.h" 17 #include "CodeGenFunction.h" 18 #include "clang/AST/RecordLayout.h" 19 #include "clang/Frontend/CodeGenOptions.h" 20 #include "llvm/Type.h" 21 #include "llvm/Target/TargetData.h" 22 #include "llvm/ADT/Triple.h" 23 #include "llvm/Support/raw_ostream.h" 24 using namespace clang; 25 using namespace CodeGen; 26 27 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, 28 llvm::Value *Array, 29 llvm::Value *Value, 30 unsigned FirstIndex, 31 unsigned LastIndex) { 32 // Alternatively, we could emit this as a loop in the source. 33 for (unsigned I = FirstIndex; I <= LastIndex; ++I) { 34 llvm::Value *Cell = Builder.CreateConstInBoundsGEP1_32(Array, I); 35 Builder.CreateStore(Value, Cell); 36 } 37 } 38 39 static bool isAggregateTypeForABI(QualType T) { 40 return CodeGenFunction::hasAggregateLLVMType(T) || 41 T->isMemberFunctionPointerType(); 42 } 43 44 ABIInfo::~ABIInfo() {} 45 46 ASTContext &ABIInfo::getContext() const { 47 return CGT.getContext(); 48 } 49 50 llvm::LLVMContext &ABIInfo::getVMContext() const { 51 return CGT.getLLVMContext(); 52 } 53 54 const llvm::TargetData &ABIInfo::getTargetData() const { 55 return CGT.getTargetData(); 56 } 57 58 59 void ABIArgInfo::dump() const { 60 raw_ostream &OS = llvm::errs(); 61 OS << "(ABIArgInfo Kind="; 62 switch (TheKind) { 63 case Direct: 64 OS << "Direct Type="; 65 if (llvm::Type *Ty = getCoerceToType()) 66 Ty->print(OS); 67 else 68 OS << "null"; 69 break; 70 case Extend: 71 OS << "Extend"; 72 break; 73 case Ignore: 74 OS << "Ignore"; 75 break; 76 case Indirect: 77 OS << "Indirect Align=" << getIndirectAlign() 78 << " ByVal=" << getIndirectByVal() 79 << " Realign=" << getIndirectRealign(); 80 break; 81 case Expand: 82 OS << "Expand"; 83 break; 84 } 85 OS << ")\n"; 86 } 87 88 TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; } 89 90 // If someone can figure out a general rule for this, that would be great. 91 // It's probably just doomed to be platform-dependent, though. 92 unsigned TargetCodeGenInfo::getSizeOfUnwindException() const { 93 // Verified for: 94 // x86-64 FreeBSD, Linux, Darwin 95 // x86-32 FreeBSD, Linux, Darwin 96 // PowerPC Linux, Darwin 97 // ARM Darwin (*not* EABI) 98 return 32; 99 } 100 101 bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args, 102 const FunctionNoProtoType *fnType) const { 103 // The following conventions are known to require this to be false: 104 // x86_stdcall 105 // MIPS 106 // For everything else, we just prefer false unless we opt out. 107 return false; 108 } 109 110 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays); 111 112 /// isEmptyField - Return true iff a the field is "empty", that is it 113 /// is an unnamed bit-field or an (array of) empty record(s). 114 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD, 115 bool AllowArrays) { 116 if (FD->isUnnamedBitfield()) 117 return true; 118 119 QualType FT = FD->getType(); 120 121 // Constant arrays of empty records count as empty, strip them off. 122 // Constant arrays of zero length always count as empty. 123 if (AllowArrays) 124 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 125 if (AT->getSize() == 0) 126 return true; 127 FT = AT->getElementType(); 128 } 129 130 const RecordType *RT = FT->getAs<RecordType>(); 131 if (!RT) 132 return false; 133 134 // C++ record fields are never empty, at least in the Itanium ABI. 135 // 136 // FIXME: We should use a predicate for whether this behavior is true in the 137 // current ABI. 138 if (isa<CXXRecordDecl>(RT->getDecl())) 139 return false; 140 141 return isEmptyRecord(Context, FT, AllowArrays); 142 } 143 144 /// isEmptyRecord - Return true iff a structure contains only empty 145 /// fields. Note that a structure with a flexible array member is not 146 /// considered empty. 147 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) { 148 const RecordType *RT = T->getAs<RecordType>(); 149 if (!RT) 150 return 0; 151 const RecordDecl *RD = RT->getDecl(); 152 if (RD->hasFlexibleArrayMember()) 153 return false; 154 155 // If this is a C++ record, check the bases first. 156 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 157 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 158 e = CXXRD->bases_end(); i != e; ++i) 159 if (!isEmptyRecord(Context, i->getType(), true)) 160 return false; 161 162 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 163 i != e; ++i) 164 if (!isEmptyField(Context, *i, AllowArrays)) 165 return false; 166 return true; 167 } 168 169 /// hasNonTrivialDestructorOrCopyConstructor - Determine if a type has either 170 /// a non-trivial destructor or a non-trivial copy constructor. 171 static bool hasNonTrivialDestructorOrCopyConstructor(const RecordType *RT) { 172 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 173 if (!RD) 174 return false; 175 176 return !RD->hasTrivialDestructor() || !RD->hasTrivialCopyConstructor(); 177 } 178 179 /// isRecordWithNonTrivialDestructorOrCopyConstructor - Determine if a type is 180 /// a record type with either a non-trivial destructor or a non-trivial copy 181 /// constructor. 182 static bool isRecordWithNonTrivialDestructorOrCopyConstructor(QualType T) { 183 const RecordType *RT = T->getAs<RecordType>(); 184 if (!RT) 185 return false; 186 187 return hasNonTrivialDestructorOrCopyConstructor(RT); 188 } 189 190 /// isSingleElementStruct - Determine if a structure is a "single 191 /// element struct", i.e. it has exactly one non-empty field or 192 /// exactly one field which is itself a single element 193 /// struct. Structures with flexible array members are never 194 /// considered single element structs. 195 /// 196 /// \return The field declaration for the single non-empty field, if 197 /// it exists. 198 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) { 199 const RecordType *RT = T->getAsStructureType(); 200 if (!RT) 201 return 0; 202 203 const RecordDecl *RD = RT->getDecl(); 204 if (RD->hasFlexibleArrayMember()) 205 return 0; 206 207 const Type *Found = 0; 208 209 // If this is a C++ record, check the bases first. 210 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 211 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 212 e = CXXRD->bases_end(); i != e; ++i) { 213 // Ignore empty records. 214 if (isEmptyRecord(Context, i->getType(), true)) 215 continue; 216 217 // If we already found an element then this isn't a single-element struct. 218 if (Found) 219 return 0; 220 221 // If this is non-empty and not a single element struct, the composite 222 // cannot be a single element struct. 223 Found = isSingleElementStruct(i->getType(), Context); 224 if (!Found) 225 return 0; 226 } 227 } 228 229 // Check for single element. 230 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 231 i != e; ++i) { 232 const FieldDecl *FD = *i; 233 QualType FT = FD->getType(); 234 235 // Ignore empty fields. 236 if (isEmptyField(Context, FD, true)) 237 continue; 238 239 // If we already found an element then this isn't a single-element 240 // struct. 241 if (Found) 242 return 0; 243 244 // Treat single element arrays as the element. 245 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 246 if (AT->getSize().getZExtValue() != 1) 247 break; 248 FT = AT->getElementType(); 249 } 250 251 if (!isAggregateTypeForABI(FT)) { 252 Found = FT.getTypePtr(); 253 } else { 254 Found = isSingleElementStruct(FT, Context); 255 if (!Found) 256 return 0; 257 } 258 } 259 260 // We don't consider a struct a single-element struct if it has 261 // padding beyond the element type. 262 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T)) 263 return 0; 264 265 return Found; 266 } 267 268 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) { 269 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() && 270 !Ty->isAnyComplexType() && !Ty->isEnumeralType() && 271 !Ty->isBlockPointerType()) 272 return false; 273 274 uint64_t Size = Context.getTypeSize(Ty); 275 return Size == 32 || Size == 64; 276 } 277 278 /// canExpandIndirectArgument - Test whether an argument type which is to be 279 /// passed indirectly (on the stack) would have the equivalent layout if it was 280 /// expanded into separate arguments. If so, we prefer to do the latter to avoid 281 /// inhibiting optimizations. 282 /// 283 // FIXME: This predicate is missing many cases, currently it just follows 284 // llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We 285 // should probably make this smarter, or better yet make the LLVM backend 286 // capable of handling it. 287 static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) { 288 // We can only expand structure types. 289 const RecordType *RT = Ty->getAs<RecordType>(); 290 if (!RT) 291 return false; 292 293 // We can only expand (C) structures. 294 // 295 // FIXME: This needs to be generalized to handle classes as well. 296 const RecordDecl *RD = RT->getDecl(); 297 if (!RD->isStruct() || isa<CXXRecordDecl>(RD)) 298 return false; 299 300 uint64_t Size = 0; 301 302 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 303 i != e; ++i) { 304 const FieldDecl *FD = *i; 305 306 if (!is32Or64BitBasicType(FD->getType(), Context)) 307 return false; 308 309 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know 310 // how to expand them yet, and the predicate for telling if a bitfield still 311 // counts as "basic" is more complicated than what we were doing previously. 312 if (FD->isBitField()) 313 return false; 314 315 Size += Context.getTypeSize(FD->getType()); 316 } 317 318 // Make sure there are not any holes in the struct. 319 if (Size != Context.getTypeSize(Ty)) 320 return false; 321 322 return true; 323 } 324 325 namespace { 326 /// DefaultABIInfo - The default implementation for ABI specific 327 /// details. This implementation provides information which results in 328 /// self-consistent and sensible LLVM IR generation, but does not 329 /// conform to any particular ABI. 330 class DefaultABIInfo : public ABIInfo { 331 public: 332 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 333 334 ABIArgInfo classifyReturnType(QualType RetTy) const; 335 ABIArgInfo classifyArgumentType(QualType RetTy) const; 336 337 virtual void computeInfo(CGFunctionInfo &FI) const { 338 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 339 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 340 it != ie; ++it) 341 it->info = classifyArgumentType(it->type); 342 } 343 344 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 345 CodeGenFunction &CGF) const; 346 }; 347 348 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo { 349 public: 350 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 351 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 352 }; 353 354 llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 355 CodeGenFunction &CGF) const { 356 return 0; 357 } 358 359 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const { 360 if (isAggregateTypeForABI(Ty)) { 361 // Records with non trivial destructors/constructors should not be passed 362 // by value. 363 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 364 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 365 366 return ABIArgInfo::getIndirect(0); 367 } 368 369 // Treat an enum type as its underlying type. 370 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 371 Ty = EnumTy->getDecl()->getIntegerType(); 372 373 return (Ty->isPromotableIntegerType() ? 374 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 375 } 376 377 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const { 378 if (RetTy->isVoidType()) 379 return ABIArgInfo::getIgnore(); 380 381 if (isAggregateTypeForABI(RetTy)) 382 return ABIArgInfo::getIndirect(0); 383 384 // Treat an enum type as its underlying type. 385 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 386 RetTy = EnumTy->getDecl()->getIntegerType(); 387 388 return (RetTy->isPromotableIntegerType() ? 389 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 390 } 391 392 /// UseX86_MMXType - Return true if this is an MMX type that should use the 393 /// special x86_mmx type. 394 bool UseX86_MMXType(llvm::Type *IRType) { 395 // If the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>, use the 396 // special x86_mmx type. 397 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 && 398 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() && 399 IRType->getScalarSizeInBits() != 64; 400 } 401 402 static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 403 StringRef Constraint, 404 llvm::Type* Ty) { 405 if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy()) 406 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext()); 407 return Ty; 408 } 409 410 //===----------------------------------------------------------------------===// 411 // X86-32 ABI Implementation 412 //===----------------------------------------------------------------------===// 413 414 /// X86_32ABIInfo - The X86-32 ABI information. 415 class X86_32ABIInfo : public ABIInfo { 416 static const unsigned MinABIStackAlignInBytes = 4; 417 418 bool IsDarwinVectorABI; 419 bool IsSmallStructInRegABI; 420 bool IsMMXDisabled; 421 bool IsWin32FloatStructABI; 422 423 static bool isRegisterSize(unsigned Size) { 424 return (Size == 8 || Size == 16 || Size == 32 || Size == 64); 425 } 426 427 static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context, 428 unsigned callingConvention); 429 430 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 431 /// such that the argument will be passed in memory. 432 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal = true) const; 433 434 /// \brief Return the alignment to use for the given type on the stack. 435 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const; 436 437 public: 438 439 ABIArgInfo classifyReturnType(QualType RetTy, 440 unsigned callingConvention) const; 441 ABIArgInfo classifyArgumentType(QualType RetTy) const; 442 443 virtual void computeInfo(CGFunctionInfo &FI) const { 444 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), 445 FI.getCallingConvention()); 446 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 447 it != ie; ++it) 448 it->info = classifyArgumentType(it->type); 449 } 450 451 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 452 CodeGenFunction &CGF) const; 453 454 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool m, bool w) 455 : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p), 456 IsMMXDisabled(m), IsWin32FloatStructABI(w) {} 457 }; 458 459 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo { 460 public: 461 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, 462 bool d, bool p, bool m, bool w) 463 :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, m, w)) {} 464 465 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 466 CodeGen::CodeGenModule &CGM) const; 467 468 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 469 // Darwin uses different dwarf register numbers for EH. 470 if (CGM.isTargetDarwin()) return 5; 471 472 return 4; 473 } 474 475 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 476 llvm::Value *Address) const; 477 478 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 479 StringRef Constraint, 480 llvm::Type* Ty) const { 481 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 482 } 483 484 }; 485 486 } 487 488 /// shouldReturnTypeInRegister - Determine if the given type should be 489 /// passed in a register (for the Darwin ABI). 490 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty, 491 ASTContext &Context, 492 unsigned callingConvention) { 493 uint64_t Size = Context.getTypeSize(Ty); 494 495 // Type must be register sized. 496 if (!isRegisterSize(Size)) 497 return false; 498 499 if (Ty->isVectorType()) { 500 // 64- and 128- bit vectors inside structures are not returned in 501 // registers. 502 if (Size == 64 || Size == 128) 503 return false; 504 505 return true; 506 } 507 508 // If this is a builtin, pointer, enum, complex type, member pointer, or 509 // member function pointer it is ok. 510 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() || 511 Ty->isAnyComplexType() || Ty->isEnumeralType() || 512 Ty->isBlockPointerType() || Ty->isMemberPointerType()) 513 return true; 514 515 // Arrays are treated like records. 516 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) 517 return shouldReturnTypeInRegister(AT->getElementType(), Context, 518 callingConvention); 519 520 // Otherwise, it must be a record type. 521 const RecordType *RT = Ty->getAs<RecordType>(); 522 if (!RT) return false; 523 524 // FIXME: Traverse bases here too. 525 526 // For thiscall conventions, structures will never be returned in 527 // a register. This is for compatibility with the MSVC ABI 528 if (callingConvention == llvm::CallingConv::X86_ThisCall && 529 RT->isStructureType()) { 530 return false; 531 } 532 533 // Structure types are passed in register if all fields would be 534 // passed in a register. 535 for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(), 536 e = RT->getDecl()->field_end(); i != e; ++i) { 537 const FieldDecl *FD = *i; 538 539 // Empty fields are ignored. 540 if (isEmptyField(Context, FD, true)) 541 continue; 542 543 // Check fields recursively. 544 if (!shouldReturnTypeInRegister(FD->getType(), Context, 545 callingConvention)) 546 return false; 547 } 548 return true; 549 } 550 551 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, 552 unsigned callingConvention) const { 553 if (RetTy->isVoidType()) 554 return ABIArgInfo::getIgnore(); 555 556 if (const VectorType *VT = RetTy->getAs<VectorType>()) { 557 // On Darwin, some vectors are returned in registers. 558 if (IsDarwinVectorABI) { 559 uint64_t Size = getContext().getTypeSize(RetTy); 560 561 // 128-bit vectors are a special case; they are returned in 562 // registers and we need to make sure to pick a type the LLVM 563 // backend will like. 564 if (Size == 128) 565 return ABIArgInfo::getDirect(llvm::VectorType::get( 566 llvm::Type::getInt64Ty(getVMContext()), 2)); 567 568 // Always return in register if it fits in a general purpose 569 // register, or if it is 64 bits and has a single element. 570 if ((Size == 8 || Size == 16 || Size == 32) || 571 (Size == 64 && VT->getNumElements() == 1)) 572 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 573 Size)); 574 575 return ABIArgInfo::getIndirect(0); 576 } 577 578 return ABIArgInfo::getDirect(); 579 } 580 581 if (isAggregateTypeForABI(RetTy)) { 582 if (const RecordType *RT = RetTy->getAs<RecordType>()) { 583 // Structures with either a non-trivial destructor or a non-trivial 584 // copy constructor are always indirect. 585 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 586 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 587 588 // Structures with flexible arrays are always indirect. 589 if (RT->getDecl()->hasFlexibleArrayMember()) 590 return ABIArgInfo::getIndirect(0); 591 } 592 593 // If specified, structs and unions are always indirect. 594 if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType()) 595 return ABIArgInfo::getIndirect(0); 596 597 // Small structures which are register sized are generally returned 598 // in a register. 599 if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, getContext(), 600 callingConvention)) { 601 uint64_t Size = getContext().getTypeSize(RetTy); 602 603 // As a special-case, if the struct is a "single-element" struct, and 604 // the field is of type "float" or "double", return it in a 605 // floating-point register. (MSVC does not apply this special case.) 606 // We apply a similar transformation for pointer types to improve the 607 // quality of the generated IR. 608 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) 609 if ((!IsWin32FloatStructABI && SeltTy->isRealFloatingType()) 610 || SeltTy->hasPointerRepresentation()) 611 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 612 613 // FIXME: We should be able to narrow this integer in cases with dead 614 // padding. 615 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size)); 616 } 617 618 return ABIArgInfo::getIndirect(0); 619 } 620 621 // Treat an enum type as its underlying type. 622 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 623 RetTy = EnumTy->getDecl()->getIntegerType(); 624 625 return (RetTy->isPromotableIntegerType() ? 626 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 627 } 628 629 static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) { 630 const RecordType *RT = Ty->getAs<RecordType>(); 631 if (!RT) 632 return 0; 633 const RecordDecl *RD = RT->getDecl(); 634 635 // If this is a C++ record, check the bases first. 636 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 637 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 638 e = CXXRD->bases_end(); i != e; ++i) 639 if (!isRecordWithSSEVectorType(Context, i->getType())) 640 return false; 641 642 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 643 i != e; ++i) { 644 QualType FT = i->getType(); 645 646 if (FT->getAs<VectorType>() && Context.getTypeSize(FT) == 128) 647 return true; 648 649 if (isRecordWithSSEVectorType(Context, FT)) 650 return true; 651 } 652 653 return false; 654 } 655 656 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty, 657 unsigned Align) const { 658 // Otherwise, if the alignment is less than or equal to the minimum ABI 659 // alignment, just use the default; the backend will handle this. 660 if (Align <= MinABIStackAlignInBytes) 661 return 0; // Use default alignment. 662 663 // On non-Darwin, the stack type alignment is always 4. 664 if (!IsDarwinVectorABI) { 665 // Set explicit alignment, since we may need to realign the top. 666 return MinABIStackAlignInBytes; 667 } 668 669 // Otherwise, if the type contains an SSE vector type, the alignment is 16. 670 if (Align >= 16 && isRecordWithSSEVectorType(getContext(), Ty)) 671 return 16; 672 673 return MinABIStackAlignInBytes; 674 } 675 676 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal) const { 677 if (!ByVal) 678 return ABIArgInfo::getIndirect(0, false); 679 680 // Compute the byval alignment. 681 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; 682 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign); 683 if (StackAlign == 0) 684 return ABIArgInfo::getIndirect(4); 685 686 // If the stack alignment is less than the type alignment, realign the 687 // argument. 688 if (StackAlign < TypeAlign) 689 return ABIArgInfo::getIndirect(StackAlign, /*ByVal=*/true, 690 /*Realign=*/true); 691 692 return ABIArgInfo::getIndirect(StackAlign); 693 } 694 695 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const { 696 // FIXME: Set alignment on indirect arguments. 697 if (isAggregateTypeForABI(Ty)) { 698 // Structures with flexible arrays are always indirect. 699 if (const RecordType *RT = Ty->getAs<RecordType>()) { 700 // Structures with either a non-trivial destructor or a non-trivial 701 // copy constructor are always indirect. 702 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 703 return getIndirectResult(Ty, /*ByVal=*/false); 704 705 if (RT->getDecl()->hasFlexibleArrayMember()) 706 return getIndirectResult(Ty); 707 } 708 709 // Ignore empty structs/unions. 710 if (isEmptyRecord(getContext(), Ty, true)) 711 return ABIArgInfo::getIgnore(); 712 713 // Expand small (<= 128-bit) record types when we know that the stack layout 714 // of those arguments will match the struct. This is important because the 715 // LLVM backend isn't smart enough to remove byval, which inhibits many 716 // optimizations. 717 if (getContext().getTypeSize(Ty) <= 4*32 && 718 canExpandIndirectArgument(Ty, getContext())) 719 return ABIArgInfo::getExpand(); 720 721 return getIndirectResult(Ty); 722 } 723 724 if (const VectorType *VT = Ty->getAs<VectorType>()) { 725 // On Darwin, some vectors are passed in memory, we handle this by passing 726 // it as an i8/i16/i32/i64. 727 if (IsDarwinVectorABI) { 728 uint64_t Size = getContext().getTypeSize(Ty); 729 if ((Size == 8 || Size == 16 || Size == 32) || 730 (Size == 64 && VT->getNumElements() == 1)) 731 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 732 Size)); 733 } 734 735 llvm::Type *IRType = CGT.ConvertType(Ty); 736 if (UseX86_MMXType(IRType)) { 737 if (IsMMXDisabled) 738 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 739 64)); 740 ABIArgInfo AAI = ABIArgInfo::getDirect(IRType); 741 AAI.setCoerceToType(llvm::Type::getX86_MMXTy(getVMContext())); 742 return AAI; 743 } 744 745 return ABIArgInfo::getDirect(); 746 } 747 748 749 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 750 Ty = EnumTy->getDecl()->getIntegerType(); 751 752 return (Ty->isPromotableIntegerType() ? 753 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 754 } 755 756 llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 757 CodeGenFunction &CGF) const { 758 llvm::Type *BPP = CGF.Int8PtrPtrTy; 759 760 CGBuilderTy &Builder = CGF.Builder; 761 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 762 "ap"); 763 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 764 765 // Compute if the address needs to be aligned 766 unsigned Align = CGF.getContext().getTypeAlignInChars(Ty).getQuantity(); 767 Align = getTypeStackAlignInBytes(Ty, Align); 768 Align = std::max(Align, 4U); 769 if (Align > 4) { 770 // addr = (addr + align - 1) & -align; 771 llvm::Value *Offset = 772 llvm::ConstantInt::get(CGF.Int32Ty, Align - 1); 773 Addr = CGF.Builder.CreateGEP(Addr, Offset); 774 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(Addr, 775 CGF.Int32Ty); 776 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -Align); 777 Addr = CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), 778 Addr->getType(), 779 "ap.cur.aligned"); 780 } 781 782 llvm::Type *PTy = 783 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 784 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 785 786 uint64_t Offset = 787 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, Align); 788 llvm::Value *NextAddr = 789 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 790 "ap.next"); 791 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 792 793 return AddrTyped; 794 } 795 796 void X86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D, 797 llvm::GlobalValue *GV, 798 CodeGen::CodeGenModule &CGM) const { 799 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 800 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { 801 // Get the LLVM function. 802 llvm::Function *Fn = cast<llvm::Function>(GV); 803 804 // Now add the 'alignstack' attribute with a value of 16. 805 Fn->addFnAttr(llvm::Attribute::constructStackAlignmentFromInt(16)); 806 } 807 } 808 } 809 810 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable( 811 CodeGen::CodeGenFunction &CGF, 812 llvm::Value *Address) const { 813 CodeGen::CGBuilderTy &Builder = CGF.Builder; 814 815 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 816 817 // 0-7 are the eight integer registers; the order is different 818 // on Darwin (for EH), but the range is the same. 819 // 8 is %eip. 820 AssignToArrayRange(Builder, Address, Four8, 0, 8); 821 822 if (CGF.CGM.isTargetDarwin()) { 823 // 12-16 are st(0..4). Not sure why we stop at 4. 824 // These have size 16, which is sizeof(long double) on 825 // platforms with 8-byte alignment for that type. 826 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16); 827 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16); 828 829 } else { 830 // 9 is %eflags, which doesn't get a size on Darwin for some 831 // reason. 832 Builder.CreateStore(Four8, Builder.CreateConstInBoundsGEP1_32(Address, 9)); 833 834 // 11-16 are st(0..5). Not sure why we stop at 5. 835 // These have size 12, which is sizeof(long double) on 836 // platforms with 4-byte alignment for that type. 837 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12); 838 AssignToArrayRange(Builder, Address, Twelve8, 11, 16); 839 } 840 841 return false; 842 } 843 844 //===----------------------------------------------------------------------===// 845 // X86-64 ABI Implementation 846 //===----------------------------------------------------------------------===// 847 848 849 namespace { 850 /// X86_64ABIInfo - The X86_64 ABI information. 851 class X86_64ABIInfo : public ABIInfo { 852 enum Class { 853 Integer = 0, 854 SSE, 855 SSEUp, 856 X87, 857 X87Up, 858 ComplexX87, 859 NoClass, 860 Memory 861 }; 862 863 /// merge - Implement the X86_64 ABI merging algorithm. 864 /// 865 /// Merge an accumulating classification \arg Accum with a field 866 /// classification \arg Field. 867 /// 868 /// \param Accum - The accumulating classification. This should 869 /// always be either NoClass or the result of a previous merge 870 /// call. In addition, this should never be Memory (the caller 871 /// should just return Memory for the aggregate). 872 static Class merge(Class Accum, Class Field); 873 874 /// postMerge - Implement the X86_64 ABI post merging algorithm. 875 /// 876 /// Post merger cleanup, reduces a malformed Hi and Lo pair to 877 /// final MEMORY or SSE classes when necessary. 878 /// 879 /// \param AggregateSize - The size of the current aggregate in 880 /// the classification process. 881 /// 882 /// \param Lo - The classification for the parts of the type 883 /// residing in the low word of the containing object. 884 /// 885 /// \param Hi - The classification for the parts of the type 886 /// residing in the higher words of the containing object. 887 /// 888 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const; 889 890 /// classify - Determine the x86_64 register classes in which the 891 /// given type T should be passed. 892 /// 893 /// \param Lo - The classification for the parts of the type 894 /// residing in the low word of the containing object. 895 /// 896 /// \param Hi - The classification for the parts of the type 897 /// residing in the high word of the containing object. 898 /// 899 /// \param OffsetBase - The bit offset of this type in the 900 /// containing object. Some parameters are classified different 901 /// depending on whether they straddle an eightbyte boundary. 902 /// 903 /// If a word is unused its result will be NoClass; if a type should 904 /// be passed in Memory then at least the classification of \arg Lo 905 /// will be Memory. 906 /// 907 /// The \arg Lo class will be NoClass iff the argument is ignored. 908 /// 909 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will 910 /// also be ComplexX87. 911 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi) const; 912 913 llvm::Type *GetByteVectorType(QualType Ty) const; 914 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType, 915 unsigned IROffset, QualType SourceTy, 916 unsigned SourceOffset) const; 917 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType, 918 unsigned IROffset, QualType SourceTy, 919 unsigned SourceOffset) const; 920 921 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 922 /// such that the argument will be returned in memory. 923 ABIArgInfo getIndirectReturnResult(QualType Ty) const; 924 925 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 926 /// such that the argument will be passed in memory. 927 ABIArgInfo getIndirectResult(QualType Ty) const; 928 929 ABIArgInfo classifyReturnType(QualType RetTy) const; 930 931 ABIArgInfo classifyArgumentType(QualType Ty, 932 unsigned &neededInt, 933 unsigned &neededSSE) const; 934 935 bool IsIllegalVectorType(QualType Ty) const; 936 937 /// The 0.98 ABI revision clarified a lot of ambiguities, 938 /// unfortunately in ways that were not always consistent with 939 /// certain previous compilers. In particular, platforms which 940 /// required strict binary compatibility with older versions of GCC 941 /// may need to exempt themselves. 942 bool honorsRevision0_98() const { 943 return !getContext().getTargetInfo().getTriple().isOSDarwin(); 944 } 945 946 bool HasAVX; 947 948 public: 949 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool hasavx) : 950 ABIInfo(CGT), HasAVX(hasavx) {} 951 952 bool isPassedUsingAVXType(QualType type) const { 953 unsigned neededInt, neededSSE; 954 ABIArgInfo info = classifyArgumentType(type, neededInt, neededSSE); 955 if (info.isDirect()) { 956 llvm::Type *ty = info.getCoerceToType(); 957 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty)) 958 return (vectorTy->getBitWidth() > 128); 959 } 960 return false; 961 } 962 963 virtual void computeInfo(CGFunctionInfo &FI) const; 964 965 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 966 CodeGenFunction &CGF) const; 967 }; 968 969 /// WinX86_64ABIInfo - The Windows X86_64 ABI information. 970 class WinX86_64ABIInfo : public ABIInfo { 971 972 ABIArgInfo classify(QualType Ty) const; 973 974 public: 975 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 976 977 virtual void computeInfo(CGFunctionInfo &FI) const; 978 979 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 980 CodeGenFunction &CGF) const; 981 }; 982 983 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo { 984 public: 985 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) 986 : TargetCodeGenInfo(new X86_64ABIInfo(CGT, HasAVX)) {} 987 988 const X86_64ABIInfo &getABIInfo() const { 989 return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo()); 990 } 991 992 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 993 return 7; 994 } 995 996 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 997 llvm::Value *Address) const { 998 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 999 1000 // 0-15 are the 16 integer registers. 1001 // 16 is %rip. 1002 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 1003 return false; 1004 } 1005 1006 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 1007 StringRef Constraint, 1008 llvm::Type* Ty) const { 1009 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 1010 } 1011 1012 bool isNoProtoCallVariadic(const CallArgList &args, 1013 const FunctionNoProtoType *fnType) const { 1014 // The default CC on x86-64 sets %al to the number of SSA 1015 // registers used, and GCC sets this when calling an unprototyped 1016 // function, so we override the default behavior. However, don't do 1017 // that when AVX types are involved: the ABI explicitly states it is 1018 // undefined, and it doesn't work in practice because of how the ABI 1019 // defines varargs anyway. 1020 if (fnType->getCallConv() == CC_Default || fnType->getCallConv() == CC_C) { 1021 bool HasAVXType = false; 1022 for (CallArgList::const_iterator 1023 it = args.begin(), ie = args.end(); it != ie; ++it) { 1024 if (getABIInfo().isPassedUsingAVXType(it->Ty)) { 1025 HasAVXType = true; 1026 break; 1027 } 1028 } 1029 1030 if (!HasAVXType) 1031 return true; 1032 } 1033 1034 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType); 1035 } 1036 1037 }; 1038 1039 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo { 1040 public: 1041 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 1042 : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {} 1043 1044 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 1045 return 7; 1046 } 1047 1048 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 1049 llvm::Value *Address) const { 1050 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 1051 1052 // 0-15 are the 16 integer registers. 1053 // 16 is %rip. 1054 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 1055 return false; 1056 } 1057 }; 1058 1059 } 1060 1061 void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo, 1062 Class &Hi) const { 1063 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done: 1064 // 1065 // (a) If one of the classes is Memory, the whole argument is passed in 1066 // memory. 1067 // 1068 // (b) If X87UP is not preceded by X87, the whole argument is passed in 1069 // memory. 1070 // 1071 // (c) If the size of the aggregate exceeds two eightbytes and the first 1072 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole 1073 // argument is passed in memory. NOTE: This is necessary to keep the 1074 // ABI working for processors that don't support the __m256 type. 1075 // 1076 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE. 1077 // 1078 // Some of these are enforced by the merging logic. Others can arise 1079 // only with unions; for example: 1080 // union { _Complex double; unsigned; } 1081 // 1082 // Note that clauses (b) and (c) were added in 0.98. 1083 // 1084 if (Hi == Memory) 1085 Lo = Memory; 1086 if (Hi == X87Up && Lo != X87 && honorsRevision0_98()) 1087 Lo = Memory; 1088 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp)) 1089 Lo = Memory; 1090 if (Hi == SSEUp && Lo != SSE) 1091 Hi = SSE; 1092 } 1093 1094 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) { 1095 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is 1096 // classified recursively so that always two fields are 1097 // considered. The resulting class is calculated according to 1098 // the classes of the fields in the eightbyte: 1099 // 1100 // (a) If both classes are equal, this is the resulting class. 1101 // 1102 // (b) If one of the classes is NO_CLASS, the resulting class is 1103 // the other class. 1104 // 1105 // (c) If one of the classes is MEMORY, the result is the MEMORY 1106 // class. 1107 // 1108 // (d) If one of the classes is INTEGER, the result is the 1109 // INTEGER. 1110 // 1111 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class, 1112 // MEMORY is used as class. 1113 // 1114 // (f) Otherwise class SSE is used. 1115 1116 // Accum should never be memory (we should have returned) or 1117 // ComplexX87 (because this cannot be passed in a structure). 1118 assert((Accum != Memory && Accum != ComplexX87) && 1119 "Invalid accumulated classification during merge."); 1120 if (Accum == Field || Field == NoClass) 1121 return Accum; 1122 if (Field == Memory) 1123 return Memory; 1124 if (Accum == NoClass) 1125 return Field; 1126 if (Accum == Integer || Field == Integer) 1127 return Integer; 1128 if (Field == X87 || Field == X87Up || Field == ComplexX87 || 1129 Accum == X87 || Accum == X87Up) 1130 return Memory; 1131 return SSE; 1132 } 1133 1134 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, 1135 Class &Lo, Class &Hi) const { 1136 // FIXME: This code can be simplified by introducing a simple value class for 1137 // Class pairs with appropriate constructor methods for the various 1138 // situations. 1139 1140 // FIXME: Some of the split computations are wrong; unaligned vectors 1141 // shouldn't be passed in registers for example, so there is no chance they 1142 // can straddle an eightbyte. Verify & simplify. 1143 1144 Lo = Hi = NoClass; 1145 1146 Class &Current = OffsetBase < 64 ? Lo : Hi; 1147 Current = Memory; 1148 1149 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 1150 BuiltinType::Kind k = BT->getKind(); 1151 1152 if (k == BuiltinType::Void) { 1153 Current = NoClass; 1154 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) { 1155 Lo = Integer; 1156 Hi = Integer; 1157 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) { 1158 Current = Integer; 1159 } else if (k == BuiltinType::Float || k == BuiltinType::Double) { 1160 Current = SSE; 1161 } else if (k == BuiltinType::LongDouble) { 1162 Lo = X87; 1163 Hi = X87Up; 1164 } 1165 // FIXME: _Decimal32 and _Decimal64 are SSE. 1166 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). 1167 return; 1168 } 1169 1170 if (const EnumType *ET = Ty->getAs<EnumType>()) { 1171 // Classify the underlying integer type. 1172 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi); 1173 return; 1174 } 1175 1176 if (Ty->hasPointerRepresentation()) { 1177 Current = Integer; 1178 return; 1179 } 1180 1181 if (Ty->isMemberPointerType()) { 1182 if (Ty->isMemberFunctionPointerType()) 1183 Lo = Hi = Integer; 1184 else 1185 Current = Integer; 1186 return; 1187 } 1188 1189 if (const VectorType *VT = Ty->getAs<VectorType>()) { 1190 uint64_t Size = getContext().getTypeSize(VT); 1191 if (Size == 32) { 1192 // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x 1193 // float> as integer. 1194 Current = Integer; 1195 1196 // If this type crosses an eightbyte boundary, it should be 1197 // split. 1198 uint64_t EB_Real = (OffsetBase) / 64; 1199 uint64_t EB_Imag = (OffsetBase + Size - 1) / 64; 1200 if (EB_Real != EB_Imag) 1201 Hi = Lo; 1202 } else if (Size == 64) { 1203 // gcc passes <1 x double> in memory. :( 1204 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) 1205 return; 1206 1207 // gcc passes <1 x long long> as INTEGER. 1208 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong) || 1209 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULongLong) || 1210 VT->getElementType()->isSpecificBuiltinType(BuiltinType::Long) || 1211 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULong)) 1212 Current = Integer; 1213 else 1214 Current = SSE; 1215 1216 // If this type crosses an eightbyte boundary, it should be 1217 // split. 1218 if (OffsetBase && OffsetBase != 64) 1219 Hi = Lo; 1220 } else if (Size == 128 || (HasAVX && Size == 256)) { 1221 // Arguments of 256-bits are split into four eightbyte chunks. The 1222 // least significant one belongs to class SSE and all the others to class 1223 // SSEUP. The original Lo and Hi design considers that types can't be 1224 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense. 1225 // This design isn't correct for 256-bits, but since there're no cases 1226 // where the upper parts would need to be inspected, avoid adding 1227 // complexity and just consider Hi to match the 64-256 part. 1228 Lo = SSE; 1229 Hi = SSEUp; 1230 } 1231 return; 1232 } 1233 1234 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 1235 QualType ET = getContext().getCanonicalType(CT->getElementType()); 1236 1237 uint64_t Size = getContext().getTypeSize(Ty); 1238 if (ET->isIntegralOrEnumerationType()) { 1239 if (Size <= 64) 1240 Current = Integer; 1241 else if (Size <= 128) 1242 Lo = Hi = Integer; 1243 } else if (ET == getContext().FloatTy) 1244 Current = SSE; 1245 else if (ET == getContext().DoubleTy) 1246 Lo = Hi = SSE; 1247 else if (ET == getContext().LongDoubleTy) 1248 Current = ComplexX87; 1249 1250 // If this complex type crosses an eightbyte boundary then it 1251 // should be split. 1252 uint64_t EB_Real = (OffsetBase) / 64; 1253 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64; 1254 if (Hi == NoClass && EB_Real != EB_Imag) 1255 Hi = Lo; 1256 1257 return; 1258 } 1259 1260 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 1261 // Arrays are treated like structures. 1262 1263 uint64_t Size = getContext().getTypeSize(Ty); 1264 1265 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 1266 // than four eightbytes, ..., it has class MEMORY. 1267 if (Size > 256) 1268 return; 1269 1270 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned 1271 // fields, it has class MEMORY. 1272 // 1273 // Only need to check alignment of array base. 1274 if (OffsetBase % getContext().getTypeAlign(AT->getElementType())) 1275 return; 1276 1277 // Otherwise implement simplified merge. We could be smarter about 1278 // this, but it isn't worth it and would be harder to verify. 1279 Current = NoClass; 1280 uint64_t EltSize = getContext().getTypeSize(AT->getElementType()); 1281 uint64_t ArraySize = AT->getSize().getZExtValue(); 1282 1283 // The only case a 256-bit wide vector could be used is when the array 1284 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 1285 // to work for sizes wider than 128, early check and fallback to memory. 1286 if (Size > 128 && EltSize != 256) 1287 return; 1288 1289 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) { 1290 Class FieldLo, FieldHi; 1291 classify(AT->getElementType(), Offset, FieldLo, FieldHi); 1292 Lo = merge(Lo, FieldLo); 1293 Hi = merge(Hi, FieldHi); 1294 if (Lo == Memory || Hi == Memory) 1295 break; 1296 } 1297 1298 postMerge(Size, Lo, Hi); 1299 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification."); 1300 return; 1301 } 1302 1303 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1304 uint64_t Size = getContext().getTypeSize(Ty); 1305 1306 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 1307 // than four eightbytes, ..., it has class MEMORY. 1308 if (Size > 256) 1309 return; 1310 1311 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial 1312 // copy constructor or a non-trivial destructor, it is passed by invisible 1313 // reference. 1314 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 1315 return; 1316 1317 const RecordDecl *RD = RT->getDecl(); 1318 1319 // Assume variable sized types are passed in memory. 1320 if (RD->hasFlexibleArrayMember()) 1321 return; 1322 1323 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 1324 1325 // Reset Lo class, this will be recomputed. 1326 Current = NoClass; 1327 1328 // If this is a C++ record, classify the bases first. 1329 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 1330 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 1331 e = CXXRD->bases_end(); i != e; ++i) { 1332 assert(!i->isVirtual() && !i->getType()->isDependentType() && 1333 "Unexpected base class!"); 1334 const CXXRecordDecl *Base = 1335 cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); 1336 1337 // Classify this field. 1338 // 1339 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a 1340 // single eightbyte, each is classified separately. Each eightbyte gets 1341 // initialized to class NO_CLASS. 1342 Class FieldLo, FieldHi; 1343 uint64_t Offset = OffsetBase + Layout.getBaseClassOffsetInBits(Base); 1344 classify(i->getType(), Offset, FieldLo, FieldHi); 1345 Lo = merge(Lo, FieldLo); 1346 Hi = merge(Hi, FieldHi); 1347 if (Lo == Memory || Hi == Memory) 1348 break; 1349 } 1350 } 1351 1352 // Classify the fields one at a time, merging the results. 1353 unsigned idx = 0; 1354 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1355 i != e; ++i, ++idx) { 1356 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 1357 bool BitField = i->isBitField(); 1358 1359 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than 1360 // four eightbytes, or it contains unaligned fields, it has class MEMORY. 1361 // 1362 // The only case a 256-bit wide vector could be used is when the struct 1363 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 1364 // to work for sizes wider than 128, early check and fallback to memory. 1365 // 1366 if (Size > 128 && getContext().getTypeSize(i->getType()) != 256) { 1367 Lo = Memory; 1368 return; 1369 } 1370 // Note, skip this test for bit-fields, see below. 1371 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) { 1372 Lo = Memory; 1373 return; 1374 } 1375 1376 // Classify this field. 1377 // 1378 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate 1379 // exceeds a single eightbyte, each is classified 1380 // separately. Each eightbyte gets initialized to class 1381 // NO_CLASS. 1382 Class FieldLo, FieldHi; 1383 1384 // Bit-fields require special handling, they do not force the 1385 // structure to be passed in memory even if unaligned, and 1386 // therefore they can straddle an eightbyte. 1387 if (BitField) { 1388 // Ignore padding bit-fields. 1389 if (i->isUnnamedBitfield()) 1390 continue; 1391 1392 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 1393 uint64_t Size = i->getBitWidthValue(getContext()); 1394 1395 uint64_t EB_Lo = Offset / 64; 1396 uint64_t EB_Hi = (Offset + Size - 1) / 64; 1397 FieldLo = FieldHi = NoClass; 1398 if (EB_Lo) { 1399 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes."); 1400 FieldLo = NoClass; 1401 FieldHi = Integer; 1402 } else { 1403 FieldLo = Integer; 1404 FieldHi = EB_Hi ? Integer : NoClass; 1405 } 1406 } else 1407 classify(i->getType(), Offset, FieldLo, FieldHi); 1408 Lo = merge(Lo, FieldLo); 1409 Hi = merge(Hi, FieldHi); 1410 if (Lo == Memory || Hi == Memory) 1411 break; 1412 } 1413 1414 postMerge(Size, Lo, Hi); 1415 } 1416 } 1417 1418 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const { 1419 // If this is a scalar LLVM value then assume LLVM will pass it in the right 1420 // place naturally. 1421 if (!isAggregateTypeForABI(Ty)) { 1422 // Treat an enum type as its underlying type. 1423 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1424 Ty = EnumTy->getDecl()->getIntegerType(); 1425 1426 return (Ty->isPromotableIntegerType() ? 1427 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 1428 } 1429 1430 return ABIArgInfo::getIndirect(0); 1431 } 1432 1433 bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const { 1434 if (const VectorType *VecTy = Ty->getAs<VectorType>()) { 1435 uint64_t Size = getContext().getTypeSize(VecTy); 1436 unsigned LargestVector = HasAVX ? 256 : 128; 1437 if (Size <= 64 || Size > LargestVector) 1438 return true; 1439 } 1440 1441 return false; 1442 } 1443 1444 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty) const { 1445 // If this is a scalar LLVM value then assume LLVM will pass it in the right 1446 // place naturally. 1447 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) { 1448 // Treat an enum type as its underlying type. 1449 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1450 Ty = EnumTy->getDecl()->getIntegerType(); 1451 1452 return (Ty->isPromotableIntegerType() ? 1453 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 1454 } 1455 1456 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 1457 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 1458 1459 // Compute the byval alignment. We specify the alignment of the byval in all 1460 // cases so that the mid-level optimizer knows the alignment of the byval. 1461 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U); 1462 return ABIArgInfo::getIndirect(Align); 1463 } 1464 1465 /// GetByteVectorType - The ABI specifies that a value should be passed in an 1466 /// full vector XMM/YMM register. Pick an LLVM IR type that will be passed as a 1467 /// vector register. 1468 llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const { 1469 llvm::Type *IRType = CGT.ConvertType(Ty); 1470 1471 // Wrapper structs that just contain vectors are passed just like vectors, 1472 // strip them off if present. 1473 llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType); 1474 while (STy && STy->getNumElements() == 1) { 1475 IRType = STy->getElementType(0); 1476 STy = dyn_cast<llvm::StructType>(IRType); 1477 } 1478 1479 // If the preferred type is a 16-byte vector, prefer to pass it. 1480 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){ 1481 llvm::Type *EltTy = VT->getElementType(); 1482 unsigned BitWidth = VT->getBitWidth(); 1483 if ((BitWidth >= 128 && BitWidth <= 256) && 1484 (EltTy->isFloatTy() || EltTy->isDoubleTy() || 1485 EltTy->isIntegerTy(8) || EltTy->isIntegerTy(16) || 1486 EltTy->isIntegerTy(32) || EltTy->isIntegerTy(64) || 1487 EltTy->isIntegerTy(128))) 1488 return VT; 1489 } 1490 1491 return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), 2); 1492 } 1493 1494 /// BitsContainNoUserData - Return true if the specified [start,end) bit range 1495 /// is known to either be off the end of the specified type or being in 1496 /// alignment padding. The user type specified is known to be at most 128 bits 1497 /// in size, and have passed through X86_64ABIInfo::classify with a successful 1498 /// classification that put one of the two halves in the INTEGER class. 1499 /// 1500 /// It is conservatively correct to return false. 1501 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, 1502 unsigned EndBit, ASTContext &Context) { 1503 // If the bytes being queried are off the end of the type, there is no user 1504 // data hiding here. This handles analysis of builtins, vectors and other 1505 // types that don't contain interesting padding. 1506 unsigned TySize = (unsigned)Context.getTypeSize(Ty); 1507 if (TySize <= StartBit) 1508 return true; 1509 1510 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 1511 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType()); 1512 unsigned NumElts = (unsigned)AT->getSize().getZExtValue(); 1513 1514 // Check each element to see if the element overlaps with the queried range. 1515 for (unsigned i = 0; i != NumElts; ++i) { 1516 // If the element is after the span we care about, then we're done.. 1517 unsigned EltOffset = i*EltSize; 1518 if (EltOffset >= EndBit) break; 1519 1520 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0; 1521 if (!BitsContainNoUserData(AT->getElementType(), EltStart, 1522 EndBit-EltOffset, Context)) 1523 return false; 1524 } 1525 // If it overlaps no elements, then it is safe to process as padding. 1526 return true; 1527 } 1528 1529 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1530 const RecordDecl *RD = RT->getDecl(); 1531 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 1532 1533 // If this is a C++ record, check the bases first. 1534 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 1535 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 1536 e = CXXRD->bases_end(); i != e; ++i) { 1537 assert(!i->isVirtual() && !i->getType()->isDependentType() && 1538 "Unexpected base class!"); 1539 const CXXRecordDecl *Base = 1540 cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); 1541 1542 // If the base is after the span we care about, ignore it. 1543 unsigned BaseOffset = (unsigned)Layout.getBaseClassOffsetInBits(Base); 1544 if (BaseOffset >= EndBit) continue; 1545 1546 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0; 1547 if (!BitsContainNoUserData(i->getType(), BaseStart, 1548 EndBit-BaseOffset, Context)) 1549 return false; 1550 } 1551 } 1552 1553 // Verify that no field has data that overlaps the region of interest. Yes 1554 // this could be sped up a lot by being smarter about queried fields, 1555 // however we're only looking at structs up to 16 bytes, so we don't care 1556 // much. 1557 unsigned idx = 0; 1558 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1559 i != e; ++i, ++idx) { 1560 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx); 1561 1562 // If we found a field after the region we care about, then we're done. 1563 if (FieldOffset >= EndBit) break; 1564 1565 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0; 1566 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset, 1567 Context)) 1568 return false; 1569 } 1570 1571 // If nothing in this record overlapped the area of interest, then we're 1572 // clean. 1573 return true; 1574 } 1575 1576 return false; 1577 } 1578 1579 /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a 1580 /// float member at the specified offset. For example, {int,{float}} has a 1581 /// float at offset 4. It is conservatively correct for this routine to return 1582 /// false. 1583 static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset, 1584 const llvm::TargetData &TD) { 1585 // Base case if we find a float. 1586 if (IROffset == 0 && IRType->isFloatTy()) 1587 return true; 1588 1589 // If this is a struct, recurse into the field at the specified offset. 1590 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 1591 const llvm::StructLayout *SL = TD.getStructLayout(STy); 1592 unsigned Elt = SL->getElementContainingOffset(IROffset); 1593 IROffset -= SL->getElementOffset(Elt); 1594 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD); 1595 } 1596 1597 // If this is an array, recurse into the field at the specified offset. 1598 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 1599 llvm::Type *EltTy = ATy->getElementType(); 1600 unsigned EltSize = TD.getTypeAllocSize(EltTy); 1601 IROffset -= IROffset/EltSize*EltSize; 1602 return ContainsFloatAtOffset(EltTy, IROffset, TD); 1603 } 1604 1605 return false; 1606 } 1607 1608 1609 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the 1610 /// low 8 bytes of an XMM register, corresponding to the SSE class. 1611 llvm::Type *X86_64ABIInfo:: 1612 GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset, 1613 QualType SourceTy, unsigned SourceOffset) const { 1614 // The only three choices we have are either double, <2 x float>, or float. We 1615 // pass as float if the last 4 bytes is just padding. This happens for 1616 // structs that contain 3 floats. 1617 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32, 1618 SourceOffset*8+64, getContext())) 1619 return llvm::Type::getFloatTy(getVMContext()); 1620 1621 // We want to pass as <2 x float> if the LLVM IR type contains a float at 1622 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the 1623 // case. 1624 if (ContainsFloatAtOffset(IRType, IROffset, getTargetData()) && 1625 ContainsFloatAtOffset(IRType, IROffset+4, getTargetData())) 1626 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2); 1627 1628 return llvm::Type::getDoubleTy(getVMContext()); 1629 } 1630 1631 1632 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in 1633 /// an 8-byte GPR. This means that we either have a scalar or we are talking 1634 /// about the high or low part of an up-to-16-byte struct. This routine picks 1635 /// the best LLVM IR type to represent this, which may be i64 or may be anything 1636 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*, 1637 /// etc). 1638 /// 1639 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for 1640 /// the source type. IROffset is an offset in bytes into the LLVM IR type that 1641 /// the 8-byte value references. PrefType may be null. 1642 /// 1643 /// SourceTy is the source level type for the entire argument. SourceOffset is 1644 /// an offset into this that we're processing (which is always either 0 or 8). 1645 /// 1646 llvm::Type *X86_64ABIInfo:: 1647 GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset, 1648 QualType SourceTy, unsigned SourceOffset) const { 1649 // If we're dealing with an un-offset LLVM IR type, then it means that we're 1650 // returning an 8-byte unit starting with it. See if we can safely use it. 1651 if (IROffset == 0) { 1652 // Pointers and int64's always fill the 8-byte unit. 1653 if (isa<llvm::PointerType>(IRType) || IRType->isIntegerTy(64)) 1654 return IRType; 1655 1656 // If we have a 1/2/4-byte integer, we can use it only if the rest of the 1657 // goodness in the source type is just tail padding. This is allowed to 1658 // kick in for struct {double,int} on the int, but not on 1659 // struct{double,int,int} because we wouldn't return the second int. We 1660 // have to do this analysis on the source type because we can't depend on 1661 // unions being lowered a specific way etc. 1662 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) || 1663 IRType->isIntegerTy(32)) { 1664 unsigned BitWidth = cast<llvm::IntegerType>(IRType)->getBitWidth(); 1665 1666 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth, 1667 SourceOffset*8+64, getContext())) 1668 return IRType; 1669 } 1670 } 1671 1672 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 1673 // If this is a struct, recurse into the field at the specified offset. 1674 const llvm::StructLayout *SL = getTargetData().getStructLayout(STy); 1675 if (IROffset < SL->getSizeInBytes()) { 1676 unsigned FieldIdx = SL->getElementContainingOffset(IROffset); 1677 IROffset -= SL->getElementOffset(FieldIdx); 1678 1679 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset, 1680 SourceTy, SourceOffset); 1681 } 1682 } 1683 1684 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 1685 llvm::Type *EltTy = ATy->getElementType(); 1686 unsigned EltSize = getTargetData().getTypeAllocSize(EltTy); 1687 unsigned EltOffset = IROffset/EltSize*EltSize; 1688 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy, 1689 SourceOffset); 1690 } 1691 1692 // Okay, we don't have any better idea of what to pass, so we pass this in an 1693 // integer register that isn't too big to fit the rest of the struct. 1694 unsigned TySizeInBytes = 1695 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity(); 1696 1697 assert(TySizeInBytes != SourceOffset && "Empty field?"); 1698 1699 // It is always safe to classify this as an integer type up to i64 that 1700 // isn't larger than the structure. 1701 return llvm::IntegerType::get(getVMContext(), 1702 std::min(TySizeInBytes-SourceOffset, 8U)*8); 1703 } 1704 1705 1706 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally 1707 /// be used as elements of a two register pair to pass or return, return a 1708 /// first class aggregate to represent them. For example, if the low part of 1709 /// a by-value argument should be passed as i32* and the high part as float, 1710 /// return {i32*, float}. 1711 static llvm::Type * 1712 GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, 1713 const llvm::TargetData &TD) { 1714 // In order to correctly satisfy the ABI, we need to the high part to start 1715 // at offset 8. If the high and low parts we inferred are both 4-byte types 1716 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have 1717 // the second element at offset 8. Check for this: 1718 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo); 1719 unsigned HiAlign = TD.getABITypeAlignment(Hi); 1720 unsigned HiStart = llvm::TargetData::RoundUpAlignment(LoSize, HiAlign); 1721 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!"); 1722 1723 // To handle this, we have to increase the size of the low part so that the 1724 // second element will start at an 8 byte offset. We can't increase the size 1725 // of the second element because it might make us access off the end of the 1726 // struct. 1727 if (HiStart != 8) { 1728 // There are only two sorts of types the ABI generation code can produce for 1729 // the low part of a pair that aren't 8 bytes in size: float or i8/i16/i32. 1730 // Promote these to a larger type. 1731 if (Lo->isFloatTy()) 1732 Lo = llvm::Type::getDoubleTy(Lo->getContext()); 1733 else { 1734 assert(Lo->isIntegerTy() && "Invalid/unknown lo type"); 1735 Lo = llvm::Type::getInt64Ty(Lo->getContext()); 1736 } 1737 } 1738 1739 llvm::StructType *Result = llvm::StructType::get(Lo, Hi, NULL); 1740 1741 1742 // Verify that the second element is at an 8-byte offset. 1743 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 && 1744 "Invalid x86-64 argument pair!"); 1745 return Result; 1746 } 1747 1748 ABIArgInfo X86_64ABIInfo:: 1749 classifyReturnType(QualType RetTy) const { 1750 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the 1751 // classification algorithm. 1752 X86_64ABIInfo::Class Lo, Hi; 1753 classify(RetTy, 0, Lo, Hi); 1754 1755 // Check some invariants. 1756 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 1757 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 1758 1759 llvm::Type *ResType = 0; 1760 switch (Lo) { 1761 case NoClass: 1762 if (Hi == NoClass) 1763 return ABIArgInfo::getIgnore(); 1764 // If the low part is just padding, it takes no register, leave ResType 1765 // null. 1766 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 1767 "Unknown missing lo part"); 1768 break; 1769 1770 case SSEUp: 1771 case X87Up: 1772 llvm_unreachable("Invalid classification for lo word."); 1773 1774 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via 1775 // hidden argument. 1776 case Memory: 1777 return getIndirectReturnResult(RetTy); 1778 1779 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next 1780 // available register of the sequence %rax, %rdx is used. 1781 case Integer: 1782 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 1783 1784 // If we have a sign or zero extended integer, make sure to return Extend 1785 // so that the parameter gets the right LLVM IR attributes. 1786 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 1787 // Treat an enum type as its underlying type. 1788 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 1789 RetTy = EnumTy->getDecl()->getIntegerType(); 1790 1791 if (RetTy->isIntegralOrEnumerationType() && 1792 RetTy->isPromotableIntegerType()) 1793 return ABIArgInfo::getExtend(); 1794 } 1795 break; 1796 1797 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next 1798 // available SSE register of the sequence %xmm0, %xmm1 is used. 1799 case SSE: 1800 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 1801 break; 1802 1803 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is 1804 // returned on the X87 stack in %st0 as 80-bit x87 number. 1805 case X87: 1806 ResType = llvm::Type::getX86_FP80Ty(getVMContext()); 1807 break; 1808 1809 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real 1810 // part of the value is returned in %st0 and the imaginary part in 1811 // %st1. 1812 case ComplexX87: 1813 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification."); 1814 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()), 1815 llvm::Type::getX86_FP80Ty(getVMContext()), 1816 NULL); 1817 break; 1818 } 1819 1820 llvm::Type *HighPart = 0; 1821 switch (Hi) { 1822 // Memory was handled previously and X87 should 1823 // never occur as a hi class. 1824 case Memory: 1825 case X87: 1826 llvm_unreachable("Invalid classification for hi word."); 1827 1828 case ComplexX87: // Previously handled. 1829 case NoClass: 1830 break; 1831 1832 case Integer: 1833 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 1834 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 1835 return ABIArgInfo::getDirect(HighPart, 8); 1836 break; 1837 case SSE: 1838 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 1839 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 1840 return ABIArgInfo::getDirect(HighPart, 8); 1841 break; 1842 1843 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte 1844 // is passed in the next available eightbyte chunk if the last used 1845 // vector register. 1846 // 1847 // SSEUP should always be preceded by SSE, just widen. 1848 case SSEUp: 1849 assert(Lo == SSE && "Unexpected SSEUp classification."); 1850 ResType = GetByteVectorType(RetTy); 1851 break; 1852 1853 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is 1854 // returned together with the previous X87 value in %st0. 1855 case X87Up: 1856 // If X87Up is preceded by X87, we don't need to do 1857 // anything. However, in some cases with unions it may not be 1858 // preceded by X87. In such situations we follow gcc and pass the 1859 // extra bits in an SSE reg. 1860 if (Lo != X87) { 1861 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 1862 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 1863 return ABIArgInfo::getDirect(HighPart, 8); 1864 } 1865 break; 1866 } 1867 1868 // If a high part was specified, merge it together with the low part. It is 1869 // known to pass in the high eightbyte of the result. We do this by forming a 1870 // first class struct aggregate with the high and low part: {low, high} 1871 if (HighPart) 1872 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getTargetData()); 1873 1874 return ABIArgInfo::getDirect(ResType); 1875 } 1876 1877 ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt, 1878 unsigned &neededSSE) const { 1879 X86_64ABIInfo::Class Lo, Hi; 1880 classify(Ty, 0, Lo, Hi); 1881 1882 // Check some invariants. 1883 // FIXME: Enforce these by construction. 1884 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 1885 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 1886 1887 neededInt = 0; 1888 neededSSE = 0; 1889 llvm::Type *ResType = 0; 1890 switch (Lo) { 1891 case NoClass: 1892 if (Hi == NoClass) 1893 return ABIArgInfo::getIgnore(); 1894 // If the low part is just padding, it takes no register, leave ResType 1895 // null. 1896 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 1897 "Unknown missing lo part"); 1898 break; 1899 1900 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument 1901 // on the stack. 1902 case Memory: 1903 1904 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or 1905 // COMPLEX_X87, it is passed in memory. 1906 case X87: 1907 case ComplexX87: 1908 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 1909 ++neededInt; 1910 return getIndirectResult(Ty); 1911 1912 case SSEUp: 1913 case X87Up: 1914 llvm_unreachable("Invalid classification for lo word."); 1915 1916 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next 1917 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8 1918 // and %r9 is used. 1919 case Integer: 1920 ++neededInt; 1921 1922 // Pick an 8-byte type based on the preferred type. 1923 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0); 1924 1925 // If we have a sign or zero extended integer, make sure to return Extend 1926 // so that the parameter gets the right LLVM IR attributes. 1927 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 1928 // Treat an enum type as its underlying type. 1929 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1930 Ty = EnumTy->getDecl()->getIntegerType(); 1931 1932 if (Ty->isIntegralOrEnumerationType() && 1933 Ty->isPromotableIntegerType()) 1934 return ABIArgInfo::getExtend(); 1935 } 1936 1937 break; 1938 1939 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next 1940 // available SSE register is used, the registers are taken in the 1941 // order from %xmm0 to %xmm7. 1942 case SSE: { 1943 llvm::Type *IRType = CGT.ConvertType(Ty); 1944 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0); 1945 ++neededSSE; 1946 break; 1947 } 1948 } 1949 1950 llvm::Type *HighPart = 0; 1951 switch (Hi) { 1952 // Memory was handled previously, ComplexX87 and X87 should 1953 // never occur as hi classes, and X87Up must be preceded by X87, 1954 // which is passed in memory. 1955 case Memory: 1956 case X87: 1957 case ComplexX87: 1958 llvm_unreachable("Invalid classification for hi word."); 1959 1960 case NoClass: break; 1961 1962 case Integer: 1963 ++neededInt; 1964 // Pick an 8-byte type based on the preferred type. 1965 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 1966 1967 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 1968 return ABIArgInfo::getDirect(HighPart, 8); 1969 break; 1970 1971 // X87Up generally doesn't occur here (long double is passed in 1972 // memory), except in situations involving unions. 1973 case X87Up: 1974 case SSE: 1975 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 1976 1977 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 1978 return ABIArgInfo::getDirect(HighPart, 8); 1979 1980 ++neededSSE; 1981 break; 1982 1983 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the 1984 // eightbyte is passed in the upper half of the last used SSE 1985 // register. This only happens when 128-bit vectors are passed. 1986 case SSEUp: 1987 assert(Lo == SSE && "Unexpected SSEUp classification"); 1988 ResType = GetByteVectorType(Ty); 1989 break; 1990 } 1991 1992 // If a high part was specified, merge it together with the low part. It is 1993 // known to pass in the high eightbyte of the result. We do this by forming a 1994 // first class struct aggregate with the high and low part: {low, high} 1995 if (HighPart) 1996 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getTargetData()); 1997 1998 return ABIArgInfo::getDirect(ResType); 1999 } 2000 2001 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 2002 2003 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2004 2005 // Keep track of the number of assigned registers. 2006 unsigned freeIntRegs = 6, freeSSERegs = 8; 2007 2008 // If the return value is indirect, then the hidden argument is consuming one 2009 // integer register. 2010 if (FI.getReturnInfo().isIndirect()) 2011 --freeIntRegs; 2012 2013 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers 2014 // get assigned (in left-to-right order) for passing as follows... 2015 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2016 it != ie; ++it) { 2017 unsigned neededInt, neededSSE; 2018 it->info = classifyArgumentType(it->type, neededInt, neededSSE); 2019 2020 // AMD64-ABI 3.2.3p3: If there are no registers available for any 2021 // eightbyte of an argument, the whole argument is passed on the 2022 // stack. If registers have already been assigned for some 2023 // eightbytes of such an argument, the assignments get reverted. 2024 if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) { 2025 freeIntRegs -= neededInt; 2026 freeSSERegs -= neededSSE; 2027 } else { 2028 it->info = getIndirectResult(it->type); 2029 } 2030 } 2031 } 2032 2033 static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr, 2034 QualType Ty, 2035 CodeGenFunction &CGF) { 2036 llvm::Value *overflow_arg_area_p = 2037 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p"); 2038 llvm::Value *overflow_arg_area = 2039 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area"); 2040 2041 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16 2042 // byte boundary if alignment needed by type exceeds 8 byte boundary. 2043 // It isn't stated explicitly in the standard, but in practice we use 2044 // alignment greater than 16 where necessary. 2045 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8; 2046 if (Align > 8) { 2047 // overflow_arg_area = (overflow_arg_area + align - 1) & -align; 2048 llvm::Value *Offset = 2049 llvm::ConstantInt::get(CGF.Int64Ty, Align - 1); 2050 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset); 2051 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area, 2052 CGF.Int64Ty); 2053 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, -(uint64_t)Align); 2054 overflow_arg_area = 2055 CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), 2056 overflow_arg_area->getType(), 2057 "overflow_arg_area.align"); 2058 } 2059 2060 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area. 2061 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 2062 llvm::Value *Res = 2063 CGF.Builder.CreateBitCast(overflow_arg_area, 2064 llvm::PointerType::getUnqual(LTy)); 2065 2066 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to: 2067 // l->overflow_arg_area + sizeof(type). 2068 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to 2069 // an 8 byte boundary. 2070 2071 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8; 2072 llvm::Value *Offset = 2073 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7); 2074 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset, 2075 "overflow_arg_area.next"); 2076 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p); 2077 2078 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type. 2079 return Res; 2080 } 2081 2082 llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2083 CodeGenFunction &CGF) const { 2084 // Assume that va_list type is correct; should be pointer to LLVM type: 2085 // struct { 2086 // i32 gp_offset; 2087 // i32 fp_offset; 2088 // i8* overflow_arg_area; 2089 // i8* reg_save_area; 2090 // }; 2091 unsigned neededInt, neededSSE; 2092 2093 Ty = CGF.getContext().getCanonicalType(Ty); 2094 ABIArgInfo AI = classifyArgumentType(Ty, neededInt, neededSSE); 2095 2096 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed 2097 // in the registers. If not go to step 7. 2098 if (!neededInt && !neededSSE) 2099 return EmitVAArgFromMemory(VAListAddr, Ty, CGF); 2100 2101 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of 2102 // general purpose registers needed to pass type and num_fp to hold 2103 // the number of floating point registers needed. 2104 2105 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into 2106 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or 2107 // l->fp_offset > 304 - num_fp * 16 go to step 7. 2108 // 2109 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of 2110 // register save space). 2111 2112 llvm::Value *InRegs = 0; 2113 llvm::Value *gp_offset_p = 0, *gp_offset = 0; 2114 llvm::Value *fp_offset_p = 0, *fp_offset = 0; 2115 if (neededInt) { 2116 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p"); 2117 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset"); 2118 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8); 2119 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp"); 2120 } 2121 2122 if (neededSSE) { 2123 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p"); 2124 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset"); 2125 llvm::Value *FitsInFP = 2126 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16); 2127 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp"); 2128 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP; 2129 } 2130 2131 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 2132 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); 2133 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 2134 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); 2135 2136 // Emit code to load the value if it was passed in registers. 2137 2138 CGF.EmitBlock(InRegBlock); 2139 2140 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with 2141 // an offset of l->gp_offset and/or l->fp_offset. This may require 2142 // copying to a temporary location in case the parameter is passed 2143 // in different register classes or requires an alignment greater 2144 // than 8 for general purpose registers and 16 for XMM registers. 2145 // 2146 // FIXME: This really results in shameful code when we end up needing to 2147 // collect arguments from different places; often what should result in a 2148 // simple assembling of a structure from scattered addresses has many more 2149 // loads than necessary. Can we clean this up? 2150 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 2151 llvm::Value *RegAddr = 2152 CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3), 2153 "reg_save_area"); 2154 if (neededInt && neededSSE) { 2155 // FIXME: Cleanup. 2156 assert(AI.isDirect() && "Unexpected ABI info for mixed regs"); 2157 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType()); 2158 llvm::Value *Tmp = CGF.CreateTempAlloca(ST); 2159 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs"); 2160 llvm::Type *TyLo = ST->getElementType(0); 2161 llvm::Type *TyHi = ST->getElementType(1); 2162 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) && 2163 "Unexpected ABI info for mixed regs"); 2164 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo); 2165 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi); 2166 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 2167 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2168 llvm::Value *RegLoAddr = TyLo->isFloatingPointTy() ? FPAddr : GPAddr; 2169 llvm::Value *RegHiAddr = TyLo->isFloatingPointTy() ? GPAddr : FPAddr; 2170 llvm::Value *V = 2171 CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo)); 2172 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 2173 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi)); 2174 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 2175 2176 RegAddr = CGF.Builder.CreateBitCast(Tmp, 2177 llvm::PointerType::getUnqual(LTy)); 2178 } else if (neededInt) { 2179 RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 2180 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 2181 llvm::PointerType::getUnqual(LTy)); 2182 } else if (neededSSE == 1) { 2183 RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2184 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 2185 llvm::PointerType::getUnqual(LTy)); 2186 } else { 2187 assert(neededSSE == 2 && "Invalid number of needed registers!"); 2188 // SSE registers are spaced 16 bytes apart in the register save 2189 // area, we need to collect the two eightbytes together. 2190 llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2191 llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16); 2192 llvm::Type *DoubleTy = CGF.DoubleTy; 2193 llvm::Type *DblPtrTy = 2194 llvm::PointerType::getUnqual(DoubleTy); 2195 llvm::StructType *ST = llvm::StructType::get(DoubleTy, 2196 DoubleTy, NULL); 2197 llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST); 2198 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo, 2199 DblPtrTy)); 2200 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 2201 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi, 2202 DblPtrTy)); 2203 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 2204 RegAddr = CGF.Builder.CreateBitCast(Tmp, 2205 llvm::PointerType::getUnqual(LTy)); 2206 } 2207 2208 // AMD64-ABI 3.5.7p5: Step 5. Set: 2209 // l->gp_offset = l->gp_offset + num_gp * 8 2210 // l->fp_offset = l->fp_offset + num_fp * 16. 2211 if (neededInt) { 2212 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8); 2213 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset), 2214 gp_offset_p); 2215 } 2216 if (neededSSE) { 2217 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16); 2218 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset), 2219 fp_offset_p); 2220 } 2221 CGF.EmitBranch(ContBlock); 2222 2223 // Emit code to load the value if it was passed in memory. 2224 2225 CGF.EmitBlock(InMemBlock); 2226 llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF); 2227 2228 // Return the appropriate result. 2229 2230 CGF.EmitBlock(ContBlock); 2231 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(), 2, 2232 "vaarg.addr"); 2233 ResAddr->addIncoming(RegAddr, InRegBlock); 2234 ResAddr->addIncoming(MemAddr, InMemBlock); 2235 return ResAddr; 2236 } 2237 2238 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty) const { 2239 2240 if (Ty->isVoidType()) 2241 return ABIArgInfo::getIgnore(); 2242 2243 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2244 Ty = EnumTy->getDecl()->getIntegerType(); 2245 2246 uint64_t Size = getContext().getTypeSize(Ty); 2247 2248 if (const RecordType *RT = Ty->getAs<RecordType>()) { 2249 if (hasNonTrivialDestructorOrCopyConstructor(RT) || 2250 RT->getDecl()->hasFlexibleArrayMember()) 2251 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2252 2253 // FIXME: mingw-w64-gcc emits 128-bit struct as i128 2254 if (Size == 128 && 2255 getContext().getTargetInfo().getTriple().getOS() 2256 == llvm::Triple::MinGW32) 2257 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2258 Size)); 2259 2260 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is 2261 // not 1, 2, 4, or 8 bytes, must be passed by reference." 2262 if (Size <= 64 && 2263 (Size & (Size - 1)) == 0) 2264 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2265 Size)); 2266 2267 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2268 } 2269 2270 if (Ty->isPromotableIntegerType()) 2271 return ABIArgInfo::getExtend(); 2272 2273 return ABIArgInfo::getDirect(); 2274 } 2275 2276 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 2277 2278 QualType RetTy = FI.getReturnType(); 2279 FI.getReturnInfo() = classify(RetTy); 2280 2281 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2282 it != ie; ++it) 2283 it->info = classify(it->type); 2284 } 2285 2286 llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2287 CodeGenFunction &CGF) const { 2288 llvm::Type *BPP = CGF.Int8PtrPtrTy; 2289 2290 CGBuilderTy &Builder = CGF.Builder; 2291 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 2292 "ap"); 2293 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 2294 llvm::Type *PTy = 2295 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 2296 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 2297 2298 uint64_t Offset = 2299 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8); 2300 llvm::Value *NextAddr = 2301 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 2302 "ap.next"); 2303 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 2304 2305 return AddrTyped; 2306 } 2307 2308 // PowerPC-32 2309 2310 namespace { 2311 class PPC32TargetCodeGenInfo : public DefaultTargetCodeGenInfo { 2312 public: 2313 PPC32TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} 2314 2315 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2316 // This is recovered from gcc output. 2317 return 1; // r1 is the dedicated stack pointer 2318 } 2319 2320 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2321 llvm::Value *Address) const; 2322 }; 2323 2324 } 2325 2326 bool 2327 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2328 llvm::Value *Address) const { 2329 // This is calculated from the LLVM and GCC tables and verified 2330 // against gcc output. AFAIK all ABIs use the same encoding. 2331 2332 CodeGen::CGBuilderTy &Builder = CGF.Builder; 2333 2334 llvm::IntegerType *i8 = CGF.Int8Ty; 2335 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 2336 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 2337 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 2338 2339 // 0-31: r0-31, the 4-byte general-purpose registers 2340 AssignToArrayRange(Builder, Address, Four8, 0, 31); 2341 2342 // 32-63: fp0-31, the 8-byte floating-point registers 2343 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 2344 2345 // 64-76 are various 4-byte special-purpose registers: 2346 // 64: mq 2347 // 65: lr 2348 // 66: ctr 2349 // 67: ap 2350 // 68-75 cr0-7 2351 // 76: xer 2352 AssignToArrayRange(Builder, Address, Four8, 64, 76); 2353 2354 // 77-108: v0-31, the 16-byte vector registers 2355 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 2356 2357 // 109: vrsave 2358 // 110: vscr 2359 // 111: spe_acc 2360 // 112: spefscr 2361 // 113: sfp 2362 AssignToArrayRange(Builder, Address, Four8, 109, 113); 2363 2364 return false; 2365 } 2366 2367 2368 //===----------------------------------------------------------------------===// 2369 // ARM ABI Implementation 2370 //===----------------------------------------------------------------------===// 2371 2372 namespace { 2373 2374 class ARMABIInfo : public ABIInfo { 2375 public: 2376 enum ABIKind { 2377 APCS = 0, 2378 AAPCS = 1, 2379 AAPCS_VFP 2380 }; 2381 2382 private: 2383 ABIKind Kind; 2384 2385 public: 2386 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind) {} 2387 2388 bool isEABI() const { 2389 StringRef Env = 2390 getContext().getTargetInfo().getTriple().getEnvironmentName(); 2391 return (Env == "gnueabi" || Env == "eabi" || Env == "androideabi"); 2392 } 2393 2394 private: 2395 ABIKind getABIKind() const { return Kind; } 2396 2397 ABIArgInfo classifyReturnType(QualType RetTy) const; 2398 ABIArgInfo classifyArgumentType(QualType RetTy) const; 2399 2400 virtual void computeInfo(CGFunctionInfo &FI) const; 2401 2402 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2403 CodeGenFunction &CGF) const; 2404 }; 2405 2406 class ARMTargetCodeGenInfo : public TargetCodeGenInfo { 2407 public: 2408 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K) 2409 :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {} 2410 2411 const ARMABIInfo &getABIInfo() const { 2412 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo()); 2413 } 2414 2415 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2416 return 13; 2417 } 2418 2419 StringRef getARCRetainAutoreleasedReturnValueMarker() const { 2420 return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue"; 2421 } 2422 2423 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2424 llvm::Value *Address) const { 2425 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 2426 2427 // 0-15 are the 16 integer registers. 2428 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15); 2429 return false; 2430 } 2431 2432 unsigned getSizeOfUnwindException() const { 2433 if (getABIInfo().isEABI()) return 88; 2434 return TargetCodeGenInfo::getSizeOfUnwindException(); 2435 } 2436 }; 2437 2438 } 2439 2440 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const { 2441 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2442 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2443 it != ie; ++it) 2444 it->info = classifyArgumentType(it->type); 2445 2446 // Always honor user-specified calling convention. 2447 if (FI.getCallingConvention() != llvm::CallingConv::C) 2448 return; 2449 2450 // Calling convention as default by an ABI. 2451 llvm::CallingConv::ID DefaultCC; 2452 if (isEABI()) 2453 DefaultCC = llvm::CallingConv::ARM_AAPCS; 2454 else 2455 DefaultCC = llvm::CallingConv::ARM_APCS; 2456 2457 // If user did not ask for specific calling convention explicitly (e.g. via 2458 // pcs attribute), set effective calling convention if it's different than ABI 2459 // default. 2460 switch (getABIKind()) { 2461 case APCS: 2462 if (DefaultCC != llvm::CallingConv::ARM_APCS) 2463 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_APCS); 2464 break; 2465 case AAPCS: 2466 if (DefaultCC != llvm::CallingConv::ARM_AAPCS) 2467 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS); 2468 break; 2469 case AAPCS_VFP: 2470 if (DefaultCC != llvm::CallingConv::ARM_AAPCS_VFP) 2471 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS_VFP); 2472 break; 2473 } 2474 } 2475 2476 /// isHomogeneousAggregate - Return true if a type is an AAPCS-VFP homogeneous 2477 /// aggregate. If HAMembers is non-null, the number of base elements 2478 /// contained in the type is returned through it; this is used for the 2479 /// recursive calls that check aggregate component types. 2480 static bool isHomogeneousAggregate(QualType Ty, const Type *&Base, 2481 ASTContext &Context, 2482 uint64_t *HAMembers = 0) { 2483 uint64_t Members; 2484 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 2485 if (!isHomogeneousAggregate(AT->getElementType(), Base, Context, &Members)) 2486 return false; 2487 Members *= AT->getSize().getZExtValue(); 2488 } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 2489 const RecordDecl *RD = RT->getDecl(); 2490 if (RD->isUnion() || RD->hasFlexibleArrayMember()) 2491 return false; 2492 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 2493 if (!CXXRD->isAggregate()) 2494 return false; 2495 } 2496 Members = 0; 2497 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 2498 i != e; ++i) { 2499 const FieldDecl *FD = *i; 2500 uint64_t FldMembers; 2501 if (!isHomogeneousAggregate(FD->getType(), Base, Context, &FldMembers)) 2502 return false; 2503 Members += FldMembers; 2504 } 2505 } else { 2506 Members = 1; 2507 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 2508 Members = 2; 2509 Ty = CT->getElementType(); 2510 } 2511 2512 // Homogeneous aggregates for AAPCS-VFP must have base types of float, 2513 // double, or 64-bit or 128-bit vectors. 2514 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 2515 if (BT->getKind() != BuiltinType::Float && 2516 BT->getKind() != BuiltinType::Double) 2517 return false; 2518 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 2519 unsigned VecSize = Context.getTypeSize(VT); 2520 if (VecSize != 64 && VecSize != 128) 2521 return false; 2522 } else { 2523 return false; 2524 } 2525 2526 // The base type must be the same for all members. Vector types of the 2527 // same total size are treated as being equivalent here. 2528 const Type *TyPtr = Ty.getTypePtr(); 2529 if (!Base) 2530 Base = TyPtr; 2531 if (Base != TyPtr && 2532 (!Base->isVectorType() || !TyPtr->isVectorType() || 2533 Context.getTypeSize(Base) != Context.getTypeSize(TyPtr))) 2534 return false; 2535 } 2536 2537 // Homogeneous Aggregates can have at most 4 members of the base type. 2538 if (HAMembers) 2539 *HAMembers = Members; 2540 return (Members <= 4); 2541 } 2542 2543 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty) const { 2544 if (!isAggregateTypeForABI(Ty)) { 2545 // Treat an enum type as its underlying type. 2546 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2547 Ty = EnumTy->getDecl()->getIntegerType(); 2548 2549 return (Ty->isPromotableIntegerType() ? 2550 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2551 } 2552 2553 // Ignore empty records. 2554 if (isEmptyRecord(getContext(), Ty, true)) 2555 return ABIArgInfo::getIgnore(); 2556 2557 // Structures with either a non-trivial destructor or a non-trivial 2558 // copy constructor are always indirect. 2559 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 2560 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2561 2562 if (getABIKind() == ARMABIInfo::AAPCS_VFP) { 2563 // Homogeneous Aggregates need to be expanded. 2564 const Type *Base = 0; 2565 if (isHomogeneousAggregate(Ty, Base, getContext())) 2566 return ABIArgInfo::getExpand(); 2567 } 2568 2569 // Otherwise, pass by coercing to a structure of the appropriate size. 2570 // 2571 // FIXME: This is kind of nasty... but there isn't much choice because the ARM 2572 // backend doesn't support byval. 2573 // FIXME: This doesn't handle alignment > 64 bits. 2574 llvm::Type* ElemTy; 2575 unsigned SizeRegs; 2576 if (getContext().getTypeAlign(Ty) > 32) { 2577 ElemTy = llvm::Type::getInt64Ty(getVMContext()); 2578 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64; 2579 } else { 2580 ElemTy = llvm::Type::getInt32Ty(getVMContext()); 2581 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32; 2582 } 2583 2584 llvm::Type *STy = 2585 llvm::StructType::get(llvm::ArrayType::get(ElemTy, SizeRegs), NULL); 2586 return ABIArgInfo::getDirect(STy); 2587 } 2588 2589 static bool isIntegerLikeType(QualType Ty, ASTContext &Context, 2590 llvm::LLVMContext &VMContext) { 2591 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure 2592 // is called integer-like if its size is less than or equal to one word, and 2593 // the offset of each of its addressable sub-fields is zero. 2594 2595 uint64_t Size = Context.getTypeSize(Ty); 2596 2597 // Check that the type fits in a word. 2598 if (Size > 32) 2599 return false; 2600 2601 // FIXME: Handle vector types! 2602 if (Ty->isVectorType()) 2603 return false; 2604 2605 // Float types are never treated as "integer like". 2606 if (Ty->isRealFloatingType()) 2607 return false; 2608 2609 // If this is a builtin or pointer type then it is ok. 2610 if (Ty->getAs<BuiltinType>() || Ty->isPointerType()) 2611 return true; 2612 2613 // Small complex integer types are "integer like". 2614 if (const ComplexType *CT = Ty->getAs<ComplexType>()) 2615 return isIntegerLikeType(CT->getElementType(), Context, VMContext); 2616 2617 // Single element and zero sized arrays should be allowed, by the definition 2618 // above, but they are not. 2619 2620 // Otherwise, it must be a record type. 2621 const RecordType *RT = Ty->getAs<RecordType>(); 2622 if (!RT) return false; 2623 2624 // Ignore records with flexible arrays. 2625 const RecordDecl *RD = RT->getDecl(); 2626 if (RD->hasFlexibleArrayMember()) 2627 return false; 2628 2629 // Check that all sub-fields are at offset 0, and are themselves "integer 2630 // like". 2631 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 2632 2633 bool HadField = false; 2634 unsigned idx = 0; 2635 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 2636 i != e; ++i, ++idx) { 2637 const FieldDecl *FD = *i; 2638 2639 // Bit-fields are not addressable, we only need to verify they are "integer 2640 // like". We still have to disallow a subsequent non-bitfield, for example: 2641 // struct { int : 0; int x } 2642 // is non-integer like according to gcc. 2643 if (FD->isBitField()) { 2644 if (!RD->isUnion()) 2645 HadField = true; 2646 2647 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 2648 return false; 2649 2650 continue; 2651 } 2652 2653 // Check if this field is at offset 0. 2654 if (Layout.getFieldOffset(idx) != 0) 2655 return false; 2656 2657 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 2658 return false; 2659 2660 // Only allow at most one field in a structure. This doesn't match the 2661 // wording above, but follows gcc in situations with a field following an 2662 // empty structure. 2663 if (!RD->isUnion()) { 2664 if (HadField) 2665 return false; 2666 2667 HadField = true; 2668 } 2669 } 2670 2671 return true; 2672 } 2673 2674 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy) const { 2675 if (RetTy->isVoidType()) 2676 return ABIArgInfo::getIgnore(); 2677 2678 // Large vector types should be returned via memory. 2679 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) 2680 return ABIArgInfo::getIndirect(0); 2681 2682 if (!isAggregateTypeForABI(RetTy)) { 2683 // Treat an enum type as its underlying type. 2684 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 2685 RetTy = EnumTy->getDecl()->getIntegerType(); 2686 2687 return (RetTy->isPromotableIntegerType() ? 2688 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2689 } 2690 2691 // Structures with either a non-trivial destructor or a non-trivial 2692 // copy constructor are always indirect. 2693 if (isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy)) 2694 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2695 2696 // Are we following APCS? 2697 if (getABIKind() == APCS) { 2698 if (isEmptyRecord(getContext(), RetTy, false)) 2699 return ABIArgInfo::getIgnore(); 2700 2701 // Complex types are all returned as packed integers. 2702 // 2703 // FIXME: Consider using 2 x vector types if the back end handles them 2704 // correctly. 2705 if (RetTy->isAnyComplexType()) 2706 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2707 getContext().getTypeSize(RetTy))); 2708 2709 // Integer like structures are returned in r0. 2710 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) { 2711 // Return in the smallest viable integer type. 2712 uint64_t Size = getContext().getTypeSize(RetTy); 2713 if (Size <= 8) 2714 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 2715 if (Size <= 16) 2716 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 2717 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 2718 } 2719 2720 // Otherwise return in memory. 2721 return ABIArgInfo::getIndirect(0); 2722 } 2723 2724 // Otherwise this is an AAPCS variant. 2725 2726 if (isEmptyRecord(getContext(), RetTy, true)) 2727 return ABIArgInfo::getIgnore(); 2728 2729 // Check for homogeneous aggregates with AAPCS-VFP. 2730 if (getABIKind() == AAPCS_VFP) { 2731 const Type *Base = 0; 2732 if (isHomogeneousAggregate(RetTy, Base, getContext())) 2733 // Homogeneous Aggregates are returned directly. 2734 return ABIArgInfo::getDirect(); 2735 } 2736 2737 // Aggregates <= 4 bytes are returned in r0; other aggregates 2738 // are returned indirectly. 2739 uint64_t Size = getContext().getTypeSize(RetTy); 2740 if (Size <= 32) { 2741 // Return in the smallest viable integer type. 2742 if (Size <= 8) 2743 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 2744 if (Size <= 16) 2745 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 2746 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 2747 } 2748 2749 return ABIArgInfo::getIndirect(0); 2750 } 2751 2752 llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2753 CodeGenFunction &CGF) const { 2754 llvm::Type *BP = CGF.Int8PtrTy; 2755 llvm::Type *BPP = CGF.Int8PtrPtrTy; 2756 2757 CGBuilderTy &Builder = CGF.Builder; 2758 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 2759 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 2760 // Handle address alignment for type alignment > 32 bits 2761 uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8; 2762 if (TyAlign > 4) { 2763 assert((TyAlign & (TyAlign - 1)) == 0 && 2764 "Alignment is not power of 2!"); 2765 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty); 2766 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1)); 2767 AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1))); 2768 Addr = Builder.CreateIntToPtr(AddrAsInt, BP); 2769 } 2770 llvm::Type *PTy = 2771 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 2772 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 2773 2774 uint64_t Offset = 2775 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); 2776 llvm::Value *NextAddr = 2777 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 2778 "ap.next"); 2779 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 2780 2781 return AddrTyped; 2782 } 2783 2784 //===----------------------------------------------------------------------===// 2785 // PTX ABI Implementation 2786 //===----------------------------------------------------------------------===// 2787 2788 namespace { 2789 2790 class PTXABIInfo : public ABIInfo { 2791 public: 2792 PTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 2793 2794 ABIArgInfo classifyReturnType(QualType RetTy) const; 2795 ABIArgInfo classifyArgumentType(QualType Ty) const; 2796 2797 virtual void computeInfo(CGFunctionInfo &FI) const; 2798 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2799 CodeGenFunction &CFG) const; 2800 }; 2801 2802 class PTXTargetCodeGenInfo : public TargetCodeGenInfo { 2803 public: 2804 PTXTargetCodeGenInfo(CodeGenTypes &CGT) 2805 : TargetCodeGenInfo(new PTXABIInfo(CGT)) {} 2806 2807 virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 2808 CodeGen::CodeGenModule &M) const; 2809 }; 2810 2811 ABIArgInfo PTXABIInfo::classifyReturnType(QualType RetTy) const { 2812 if (RetTy->isVoidType()) 2813 return ABIArgInfo::getIgnore(); 2814 if (isAggregateTypeForABI(RetTy)) 2815 return ABIArgInfo::getIndirect(0); 2816 return ABIArgInfo::getDirect(); 2817 } 2818 2819 ABIArgInfo PTXABIInfo::classifyArgumentType(QualType Ty) const { 2820 if (isAggregateTypeForABI(Ty)) 2821 return ABIArgInfo::getIndirect(0); 2822 2823 return ABIArgInfo::getDirect(); 2824 } 2825 2826 void PTXABIInfo::computeInfo(CGFunctionInfo &FI) const { 2827 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2828 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2829 it != ie; ++it) 2830 it->info = classifyArgumentType(it->type); 2831 2832 // Always honor user-specified calling convention. 2833 if (FI.getCallingConvention() != llvm::CallingConv::C) 2834 return; 2835 2836 // Calling convention as default by an ABI. 2837 llvm::CallingConv::ID DefaultCC; 2838 const LangOptions &LangOpts = getContext().getLangOptions(); 2839 if (LangOpts.OpenCL || LangOpts.CUDA) { 2840 // If we are in OpenCL or CUDA mode, then default to device functions 2841 DefaultCC = llvm::CallingConv::PTX_Device; 2842 } else { 2843 // If we are in standard C/C++ mode, use the triple to decide on the default 2844 StringRef Env = 2845 getContext().getTargetInfo().getTriple().getEnvironmentName(); 2846 if (Env == "device") 2847 DefaultCC = llvm::CallingConv::PTX_Device; 2848 else 2849 DefaultCC = llvm::CallingConv::PTX_Kernel; 2850 } 2851 FI.setEffectiveCallingConvention(DefaultCC); 2852 2853 } 2854 2855 llvm::Value *PTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2856 CodeGenFunction &CFG) const { 2857 llvm_unreachable("PTX does not support varargs"); 2858 } 2859 2860 void PTXTargetCodeGenInfo::SetTargetAttributes(const Decl *D, 2861 llvm::GlobalValue *GV, 2862 CodeGen::CodeGenModule &M) const{ 2863 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 2864 if (!FD) return; 2865 2866 llvm::Function *F = cast<llvm::Function>(GV); 2867 2868 // Perform special handling in OpenCL mode 2869 if (M.getLangOptions().OpenCL) { 2870 // Use OpenCL function attributes to set proper calling conventions 2871 // By default, all functions are device functions 2872 if (FD->hasAttr<OpenCLKernelAttr>()) { 2873 // OpenCL __kernel functions get a kernel calling convention 2874 F->setCallingConv(llvm::CallingConv::PTX_Kernel); 2875 // And kernel functions are not subject to inlining 2876 F->addFnAttr(llvm::Attribute::NoInline); 2877 } 2878 } 2879 2880 // Perform special handling in CUDA mode. 2881 if (M.getLangOptions().CUDA) { 2882 // CUDA __global__ functions get a kernel calling convention. Since 2883 // __global__ functions cannot be called from the device, we do not 2884 // need to set the noinline attribute. 2885 if (FD->getAttr<CUDAGlobalAttr>()) 2886 F->setCallingConv(llvm::CallingConv::PTX_Kernel); 2887 } 2888 } 2889 2890 } 2891 2892 //===----------------------------------------------------------------------===// 2893 // MBlaze ABI Implementation 2894 //===----------------------------------------------------------------------===// 2895 2896 namespace { 2897 2898 class MBlazeABIInfo : public ABIInfo { 2899 public: 2900 MBlazeABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 2901 2902 bool isPromotableIntegerType(QualType Ty) const; 2903 2904 ABIArgInfo classifyReturnType(QualType RetTy) const; 2905 ABIArgInfo classifyArgumentType(QualType RetTy) const; 2906 2907 virtual void computeInfo(CGFunctionInfo &FI) const { 2908 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2909 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2910 it != ie; ++it) 2911 it->info = classifyArgumentType(it->type); 2912 } 2913 2914 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2915 CodeGenFunction &CGF) const; 2916 }; 2917 2918 class MBlazeTargetCodeGenInfo : public TargetCodeGenInfo { 2919 public: 2920 MBlazeTargetCodeGenInfo(CodeGenTypes &CGT) 2921 : TargetCodeGenInfo(new MBlazeABIInfo(CGT)) {} 2922 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 2923 CodeGen::CodeGenModule &M) const; 2924 }; 2925 2926 } 2927 2928 bool MBlazeABIInfo::isPromotableIntegerType(QualType Ty) const { 2929 // MBlaze ABI requires all 8 and 16 bit quantities to be extended. 2930 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 2931 switch (BT->getKind()) { 2932 case BuiltinType::Bool: 2933 case BuiltinType::Char_S: 2934 case BuiltinType::Char_U: 2935 case BuiltinType::SChar: 2936 case BuiltinType::UChar: 2937 case BuiltinType::Short: 2938 case BuiltinType::UShort: 2939 return true; 2940 default: 2941 return false; 2942 } 2943 return false; 2944 } 2945 2946 llvm::Value *MBlazeABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2947 CodeGenFunction &CGF) const { 2948 // FIXME: Implement 2949 return 0; 2950 } 2951 2952 2953 ABIArgInfo MBlazeABIInfo::classifyReturnType(QualType RetTy) const { 2954 if (RetTy->isVoidType()) 2955 return ABIArgInfo::getIgnore(); 2956 if (isAggregateTypeForABI(RetTy)) 2957 return ABIArgInfo::getIndirect(0); 2958 2959 return (isPromotableIntegerType(RetTy) ? 2960 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2961 } 2962 2963 ABIArgInfo MBlazeABIInfo::classifyArgumentType(QualType Ty) const { 2964 if (isAggregateTypeForABI(Ty)) 2965 return ABIArgInfo::getIndirect(0); 2966 2967 return (isPromotableIntegerType(Ty) ? 2968 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2969 } 2970 2971 void MBlazeTargetCodeGenInfo::SetTargetAttributes(const Decl *D, 2972 llvm::GlobalValue *GV, 2973 CodeGen::CodeGenModule &M) 2974 const { 2975 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 2976 if (!FD) return; 2977 2978 llvm::CallingConv::ID CC = llvm::CallingConv::C; 2979 if (FD->hasAttr<MBlazeInterruptHandlerAttr>()) 2980 CC = llvm::CallingConv::MBLAZE_INTR; 2981 else if (FD->hasAttr<MBlazeSaveVolatilesAttr>()) 2982 CC = llvm::CallingConv::MBLAZE_SVOL; 2983 2984 if (CC != llvm::CallingConv::C) { 2985 // Handle 'interrupt_handler' attribute: 2986 llvm::Function *F = cast<llvm::Function>(GV); 2987 2988 // Step 1: Set ISR calling convention. 2989 F->setCallingConv(CC); 2990 2991 // Step 2: Add attributes goodness. 2992 F->addFnAttr(llvm::Attribute::NoInline); 2993 } 2994 2995 // Step 3: Emit _interrupt_handler alias. 2996 if (CC == llvm::CallingConv::MBLAZE_INTR) 2997 new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage, 2998 "_interrupt_handler", GV, &M.getModule()); 2999 } 3000 3001 3002 //===----------------------------------------------------------------------===// 3003 // MSP430 ABI Implementation 3004 //===----------------------------------------------------------------------===// 3005 3006 namespace { 3007 3008 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo { 3009 public: 3010 MSP430TargetCodeGenInfo(CodeGenTypes &CGT) 3011 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 3012 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3013 CodeGen::CodeGenModule &M) const; 3014 }; 3015 3016 } 3017 3018 void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D, 3019 llvm::GlobalValue *GV, 3020 CodeGen::CodeGenModule &M) const { 3021 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 3022 if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) { 3023 // Handle 'interrupt' attribute: 3024 llvm::Function *F = cast<llvm::Function>(GV); 3025 3026 // Step 1: Set ISR calling convention. 3027 F->setCallingConv(llvm::CallingConv::MSP430_INTR); 3028 3029 // Step 2: Add attributes goodness. 3030 F->addFnAttr(llvm::Attribute::NoInline); 3031 3032 // Step 3: Emit ISR vector alias. 3033 unsigned Num = attr->getNumber() + 0xffe0; 3034 new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage, 3035 "vector_" + Twine::utohexstr(Num), 3036 GV, &M.getModule()); 3037 } 3038 } 3039 } 3040 3041 //===----------------------------------------------------------------------===// 3042 // MIPS ABI Implementation. This works for both little-endian and 3043 // big-endian variants. 3044 //===----------------------------------------------------------------------===// 3045 3046 namespace { 3047 class MipsABIInfo : public ABIInfo { 3048 bool IsO32; 3049 unsigned MinABIStackAlignInBytes; 3050 llvm::Type* HandleAggregates(QualType Ty) const; 3051 llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const; 3052 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const; 3053 public: 3054 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) : 3055 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8) {} 3056 3057 ABIArgInfo classifyReturnType(QualType RetTy) const; 3058 ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const; 3059 virtual void computeInfo(CGFunctionInfo &FI) const; 3060 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3061 CodeGenFunction &CGF) const; 3062 }; 3063 3064 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo { 3065 unsigned SizeOfUnwindException; 3066 public: 3067 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32) 3068 : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)), 3069 SizeOfUnwindException(IsO32 ? 24 : 32) {} 3070 3071 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 3072 return 29; 3073 } 3074 3075 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3076 llvm::Value *Address) const; 3077 3078 unsigned getSizeOfUnwindException() const { 3079 return SizeOfUnwindException; 3080 } 3081 }; 3082 } 3083 3084 // In N32/64, an aligned double precision floating point field is passed in 3085 // a register. 3086 llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty) const { 3087 if (IsO32) 3088 return 0; 3089 3090 if (Ty->isComplexType()) 3091 return CGT.ConvertType(Ty); 3092 3093 const RecordType *RT = Ty->getAs<RecordType>(); 3094 3095 // Unions are passed in integer registers. 3096 if (!RT || !RT->isStructureOrClassType()) 3097 return 0; 3098 3099 const RecordDecl *RD = RT->getDecl(); 3100 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 3101 uint64_t StructSize = getContext().getTypeSize(Ty); 3102 assert(!(StructSize % 8) && "Size of structure must be multiple of 8."); 3103 3104 uint64_t LastOffset = 0; 3105 unsigned idx = 0; 3106 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64); 3107 SmallVector<llvm::Type*, 8> ArgList; 3108 3109 // Iterate over fields in the struct/class and check if there are any aligned 3110 // double fields. 3111 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 3112 i != e; ++i, ++idx) { 3113 const QualType Ty = (*i)->getType(); 3114 const BuiltinType *BT = Ty->getAs<BuiltinType>(); 3115 3116 if (!BT || BT->getKind() != BuiltinType::Double) 3117 continue; 3118 3119 uint64_t Offset = Layout.getFieldOffset(idx); 3120 if (Offset % 64) // Ignore doubles that are not aligned. 3121 continue; 3122 3123 // Add ((Offset - LastOffset) / 64) args of type i64. 3124 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j) 3125 ArgList.push_back(I64); 3126 3127 // Add double type. 3128 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext())); 3129 LastOffset = Offset + 64; 3130 } 3131 3132 // This struct/class doesn't have an aligned double field. 3133 if (!LastOffset) 3134 return 0; 3135 3136 // Add ((StructSize - LastOffset) / 64) args of type i64. 3137 for (unsigned N = (StructSize - LastOffset) / 64; N; --N) 3138 ArgList.push_back(I64); 3139 3140 // If the size of the remainder is not zero, add one more integer type to 3141 // ArgList. 3142 unsigned R = (StructSize - LastOffset) % 64; 3143 if (R) 3144 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R)); 3145 3146 return llvm::StructType::get(getVMContext(), ArgList); 3147 } 3148 3149 llvm::Type *MipsABIInfo::getPaddingType(uint64_t Align, uint64_t Offset) const { 3150 // Padding is inserted only for N32/64. 3151 if (IsO32) 3152 return 0; 3153 3154 assert(Align <= 16 && "Alignment larger than 16 not handled."); 3155 return (Align == 16 && Offset & 0xf) ? 3156 llvm::IntegerType::get(getVMContext(), 64) : 0; 3157 } 3158 3159 ABIArgInfo 3160 MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const { 3161 uint64_t OrigOffset = Offset; 3162 uint64_t TySize = 3163 llvm::RoundUpToAlignment(getContext().getTypeSize(Ty), 64) / 8; 3164 uint64_t Align = getContext().getTypeAlign(Ty) / 8; 3165 Offset = llvm::RoundUpToAlignment(Offset, std::max(Align, (uint64_t)8)); 3166 Offset += TySize; 3167 3168 if (isAggregateTypeForABI(Ty)) { 3169 // Ignore empty aggregates. 3170 if (TySize == 0) 3171 return ABIArgInfo::getIgnore(); 3172 3173 // Records with non trivial destructors/constructors should not be passed 3174 // by value. 3175 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) { 3176 Offset = OrigOffset + 8; 3177 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3178 } 3179 3180 // If we have reached here, aggregates are passed either indirectly via a 3181 // byval pointer or directly by coercing to another structure type. In the 3182 // latter case, padding is inserted if the offset of the aggregate is 3183 // unaligned. 3184 llvm::Type *ResType = HandleAggregates(Ty); 3185 3186 if (!ResType) 3187 return ABIArgInfo::getIndirect(0); 3188 3189 return ABIArgInfo::getDirect(ResType, 0, getPaddingType(Align, OrigOffset)); 3190 } 3191 3192 // Treat an enum type as its underlying type. 3193 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3194 Ty = EnumTy->getDecl()->getIntegerType(); 3195 3196 if (Ty->isPromotableIntegerType()) 3197 return ABIArgInfo::getExtend(); 3198 3199 return ABIArgInfo::getDirect(0, 0, getPaddingType(Align, OrigOffset)); 3200 } 3201 3202 llvm::Type* 3203 MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const { 3204 const RecordType *RT = RetTy->getAs<RecordType>(); 3205 SmallVector<llvm::Type*, 2> RTList; 3206 3207 if (RT && RT->isStructureOrClassType()) { 3208 const RecordDecl *RD = RT->getDecl(); 3209 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 3210 unsigned FieldCnt = Layout.getFieldCount(); 3211 3212 // N32/64 returns struct/classes in floating point registers if the 3213 // following conditions are met: 3214 // 1. The size of the struct/class is no larger than 128-bit. 3215 // 2. The struct/class has one or two fields all of which are floating 3216 // point types. 3217 // 3. The offset of the first field is zero (this follows what gcc does). 3218 // 3219 // Any other composite results are returned in integer registers. 3220 // 3221 if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) { 3222 RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end(); 3223 for (; b != e; ++b) { 3224 const BuiltinType *BT = (*b)->getType()->getAs<BuiltinType>(); 3225 3226 if (!BT || !BT->isFloatingPoint()) 3227 break; 3228 3229 RTList.push_back(CGT.ConvertType((*b)->getType())); 3230 } 3231 3232 if (b == e) 3233 return llvm::StructType::get(getVMContext(), RTList, 3234 RD->hasAttr<PackedAttr>()); 3235 3236 RTList.clear(); 3237 } 3238 } 3239 3240 RTList.push_back(llvm::IntegerType::get(getVMContext(), 3241 std::min(Size, (uint64_t)64))); 3242 if (Size > 64) 3243 RTList.push_back(llvm::IntegerType::get(getVMContext(), Size - 64)); 3244 3245 return llvm::StructType::get(getVMContext(), RTList); 3246 } 3247 3248 ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const { 3249 uint64_t Size = getContext().getTypeSize(RetTy); 3250 3251 if (RetTy->isVoidType() || Size == 0) 3252 return ABIArgInfo::getIgnore(); 3253 3254 if (isAggregateTypeForABI(RetTy)) { 3255 if (Size <= 128) { 3256 if (RetTy->isAnyComplexType()) 3257 return ABIArgInfo::getDirect(); 3258 3259 if (!IsO32 && !isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy)) 3260 return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size)); 3261 } 3262 3263 return ABIArgInfo::getIndirect(0); 3264 } 3265 3266 // Treat an enum type as its underlying type. 3267 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 3268 RetTy = EnumTy->getDecl()->getIntegerType(); 3269 3270 return (RetTy->isPromotableIntegerType() ? 3271 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3272 } 3273 3274 void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const { 3275 ABIArgInfo &RetInfo = FI.getReturnInfo(); 3276 RetInfo = classifyReturnType(FI.getReturnType()); 3277 3278 // Check if a pointer to an aggregate is passed as a hidden argument. 3279 uint64_t Offset = RetInfo.isIndirect() ? 8 : 0; 3280 3281 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 3282 it != ie; ++it) 3283 it->info = classifyArgumentType(it->type, Offset); 3284 } 3285 3286 llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3287 CodeGenFunction &CGF) const { 3288 llvm::Type *BP = CGF.Int8PtrTy; 3289 llvm::Type *BPP = CGF.Int8PtrPtrTy; 3290 3291 CGBuilderTy &Builder = CGF.Builder; 3292 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 3293 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 3294 int64_t TypeAlign = getContext().getTypeAlign(Ty) / 8; 3295 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 3296 llvm::Value *AddrTyped; 3297 unsigned PtrWidth = getContext().getTargetInfo().getPointerWidth(0); 3298 llvm::IntegerType *IntTy = (PtrWidth == 32) ? CGF.Int32Ty : CGF.Int64Ty; 3299 3300 if (TypeAlign > MinABIStackAlignInBytes) { 3301 llvm::Value *AddrAsInt = CGF.Builder.CreatePtrToInt(Addr, IntTy); 3302 llvm::Value *Inc = llvm::ConstantInt::get(IntTy, TypeAlign - 1); 3303 llvm::Value *Mask = llvm::ConstantInt::get(IntTy, -TypeAlign); 3304 llvm::Value *Add = CGF.Builder.CreateAdd(AddrAsInt, Inc); 3305 llvm::Value *And = CGF.Builder.CreateAnd(Add, Mask); 3306 AddrTyped = CGF.Builder.CreateIntToPtr(And, PTy); 3307 } 3308 else 3309 AddrTyped = Builder.CreateBitCast(Addr, PTy); 3310 3311 llvm::Value *AlignedAddr = Builder.CreateBitCast(AddrTyped, BP); 3312 TypeAlign = std::max((unsigned)TypeAlign, MinABIStackAlignInBytes); 3313 uint64_t Offset = 3314 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, TypeAlign); 3315 llvm::Value *NextAddr = 3316 Builder.CreateGEP(AlignedAddr, llvm::ConstantInt::get(IntTy, Offset), 3317 "ap.next"); 3318 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 3319 3320 return AddrTyped; 3321 } 3322 3323 bool 3324 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3325 llvm::Value *Address) const { 3326 // This information comes from gcc's implementation, which seems to 3327 // as canonical as it gets. 3328 3329 // Everything on MIPS is 4 bytes. Double-precision FP registers 3330 // are aliased to pairs of single-precision FP registers. 3331 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 3332 3333 // 0-31 are the general purpose registers, $0 - $31. 3334 // 32-63 are the floating-point registers, $f0 - $f31. 3335 // 64 and 65 are the multiply/divide registers, $hi and $lo. 3336 // 66 is the (notional, I think) register for signal-handler return. 3337 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65); 3338 3339 // 67-74 are the floating-point status registers, $fcc0 - $fcc7. 3340 // They are one bit wide and ignored here. 3341 3342 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31. 3343 // (coprocessor 1 is the FP unit) 3344 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31. 3345 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31. 3346 // 176-181 are the DSP accumulator registers. 3347 AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181); 3348 return false; 3349 } 3350 3351 //===----------------------------------------------------------------------===// 3352 // TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults. 3353 // Currently subclassed only to implement custom OpenCL C function attribute 3354 // handling. 3355 //===----------------------------------------------------------------------===// 3356 3357 namespace { 3358 3359 class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo { 3360 public: 3361 TCETargetCodeGenInfo(CodeGenTypes &CGT) 3362 : DefaultTargetCodeGenInfo(CGT) {} 3363 3364 virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3365 CodeGen::CodeGenModule &M) const; 3366 }; 3367 3368 void TCETargetCodeGenInfo::SetTargetAttributes(const Decl *D, 3369 llvm::GlobalValue *GV, 3370 CodeGen::CodeGenModule &M) const { 3371 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 3372 if (!FD) return; 3373 3374 llvm::Function *F = cast<llvm::Function>(GV); 3375 3376 if (M.getLangOptions().OpenCL) { 3377 if (FD->hasAttr<OpenCLKernelAttr>()) { 3378 // OpenCL C Kernel functions are not subject to inlining 3379 F->addFnAttr(llvm::Attribute::NoInline); 3380 3381 if (FD->hasAttr<ReqdWorkGroupSizeAttr>()) { 3382 3383 // Convert the reqd_work_group_size() attributes to metadata. 3384 llvm::LLVMContext &Context = F->getContext(); 3385 llvm::NamedMDNode *OpenCLMetadata = 3386 M.getModule().getOrInsertNamedMetadata("opencl.kernel_wg_size_info"); 3387 3388 SmallVector<llvm::Value*, 5> Operands; 3389 Operands.push_back(F); 3390 3391 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty, 3392 llvm::APInt(32, 3393 FD->getAttr<ReqdWorkGroupSizeAttr>()->getXDim()))); 3394 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty, 3395 llvm::APInt(32, 3396 FD->getAttr<ReqdWorkGroupSizeAttr>()->getYDim()))); 3397 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty, 3398 llvm::APInt(32, 3399 FD->getAttr<ReqdWorkGroupSizeAttr>()->getZDim()))); 3400 3401 // Add a boolean constant operand for "required" (true) or "hint" (false) 3402 // for implementing the work_group_size_hint attr later. Currently 3403 // always true as the hint is not yet implemented. 3404 Operands.push_back(llvm::ConstantInt::getTrue(Context)); 3405 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands)); 3406 } 3407 } 3408 } 3409 } 3410 3411 } 3412 3413 //===----------------------------------------------------------------------===// 3414 // Hexagon ABI Implementation 3415 //===----------------------------------------------------------------------===// 3416 3417 namespace { 3418 3419 class HexagonABIInfo : public ABIInfo { 3420 3421 3422 public: 3423 HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 3424 3425 private: 3426 3427 ABIArgInfo classifyReturnType(QualType RetTy) const; 3428 ABIArgInfo classifyArgumentType(QualType RetTy) const; 3429 3430 virtual void computeInfo(CGFunctionInfo &FI) const; 3431 3432 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3433 CodeGenFunction &CGF) const; 3434 }; 3435 3436 class HexagonTargetCodeGenInfo : public TargetCodeGenInfo { 3437 public: 3438 HexagonTargetCodeGenInfo(CodeGenTypes &CGT) 3439 :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {} 3440 3441 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 3442 return 29; 3443 } 3444 }; 3445 3446 } 3447 3448 void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const { 3449 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 3450 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 3451 it != ie; ++it) 3452 it->info = classifyArgumentType(it->type); 3453 } 3454 3455 ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const { 3456 if (!isAggregateTypeForABI(Ty)) { 3457 // Treat an enum type as its underlying type. 3458 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3459 Ty = EnumTy->getDecl()->getIntegerType(); 3460 3461 return (Ty->isPromotableIntegerType() ? 3462 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3463 } 3464 3465 // Ignore empty records. 3466 if (isEmptyRecord(getContext(), Ty, true)) 3467 return ABIArgInfo::getIgnore(); 3468 3469 // Structures with either a non-trivial destructor or a non-trivial 3470 // copy constructor are always indirect. 3471 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 3472 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3473 3474 uint64_t Size = getContext().getTypeSize(Ty); 3475 if (Size > 64) 3476 return ABIArgInfo::getIndirect(0, /*ByVal=*/true); 3477 // Pass in the smallest viable integer type. 3478 else if (Size > 32) 3479 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext())); 3480 else if (Size > 16) 3481 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 3482 else if (Size > 8) 3483 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 3484 else 3485 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 3486 } 3487 3488 ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const { 3489 if (RetTy->isVoidType()) 3490 return ABIArgInfo::getIgnore(); 3491 3492 // Large vector types should be returned via memory. 3493 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64) 3494 return ABIArgInfo::getIndirect(0); 3495 3496 if (!isAggregateTypeForABI(RetTy)) { 3497 // Treat an enum type as its underlying type. 3498 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 3499 RetTy = EnumTy->getDecl()->getIntegerType(); 3500 3501 return (RetTy->isPromotableIntegerType() ? 3502 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3503 } 3504 3505 // Structures with either a non-trivial destructor or a non-trivial 3506 // copy constructor are always indirect. 3507 if (isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy)) 3508 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3509 3510 if (isEmptyRecord(getContext(), RetTy, true)) 3511 return ABIArgInfo::getIgnore(); 3512 3513 // Aggregates <= 8 bytes are returned in r0; other aggregates 3514 // are returned indirectly. 3515 uint64_t Size = getContext().getTypeSize(RetTy); 3516 if (Size <= 64) { 3517 // Return in the smallest viable integer type. 3518 if (Size <= 8) 3519 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 3520 if (Size <= 16) 3521 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 3522 if (Size <= 32) 3523 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 3524 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext())); 3525 } 3526 3527 return ABIArgInfo::getIndirect(0, /*ByVal=*/true); 3528 } 3529 3530 llvm::Value *HexagonABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3531 CodeGenFunction &CGF) const { 3532 // FIXME: Need to handle alignment 3533 llvm::Type *BPP = CGF.Int8PtrPtrTy; 3534 3535 CGBuilderTy &Builder = CGF.Builder; 3536 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 3537 "ap"); 3538 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 3539 llvm::Type *PTy = 3540 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 3541 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 3542 3543 uint64_t Offset = 3544 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); 3545 llvm::Value *NextAddr = 3546 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 3547 "ap.next"); 3548 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 3549 3550 return AddrTyped; 3551 } 3552 3553 3554 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() { 3555 if (TheTargetCodeGenInfo) 3556 return *TheTargetCodeGenInfo; 3557 3558 const llvm::Triple &Triple = getContext().getTargetInfo().getTriple(); 3559 switch (Triple.getArch()) { 3560 default: 3561 return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types)); 3562 3563 case llvm::Triple::mips: 3564 case llvm::Triple::mipsel: 3565 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, true)); 3566 3567 case llvm::Triple::mips64: 3568 case llvm::Triple::mips64el: 3569 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, false)); 3570 3571 case llvm::Triple::arm: 3572 case llvm::Triple::thumb: 3573 { 3574 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS; 3575 3576 if (strcmp(getContext().getTargetInfo().getABI(), "apcs-gnu") == 0) 3577 Kind = ARMABIInfo::APCS; 3578 else if (CodeGenOpts.FloatABI == "hard") 3579 Kind = ARMABIInfo::AAPCS_VFP; 3580 3581 return *(TheTargetCodeGenInfo = new ARMTargetCodeGenInfo(Types, Kind)); 3582 } 3583 3584 case llvm::Triple::ppc: 3585 return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types)); 3586 3587 case llvm::Triple::ptx32: 3588 case llvm::Triple::ptx64: 3589 return *(TheTargetCodeGenInfo = new PTXTargetCodeGenInfo(Types)); 3590 3591 case llvm::Triple::mblaze: 3592 return *(TheTargetCodeGenInfo = new MBlazeTargetCodeGenInfo(Types)); 3593 3594 case llvm::Triple::msp430: 3595 return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types)); 3596 3597 case llvm::Triple::tce: 3598 return *(TheTargetCodeGenInfo = new TCETargetCodeGenInfo(Types)); 3599 3600 case llvm::Triple::x86: { 3601 bool DisableMMX = strcmp(getContext().getTargetInfo().getABI(), "no-mmx") == 0; 3602 3603 if (Triple.isOSDarwin()) 3604 return *(TheTargetCodeGenInfo = 3605 new X86_32TargetCodeGenInfo( 3606 Types, true, true, DisableMMX, false)); 3607 3608 switch (Triple.getOS()) { 3609 case llvm::Triple::Cygwin: 3610 case llvm::Triple::MinGW32: 3611 case llvm::Triple::AuroraUX: 3612 case llvm::Triple::DragonFly: 3613 case llvm::Triple::FreeBSD: 3614 case llvm::Triple::OpenBSD: 3615 return *(TheTargetCodeGenInfo = 3616 new X86_32TargetCodeGenInfo( 3617 Types, false, true, DisableMMX, false)); 3618 3619 case llvm::Triple::Win32: 3620 return *(TheTargetCodeGenInfo = 3621 new X86_32TargetCodeGenInfo( 3622 Types, false, true, DisableMMX, true)); 3623 3624 default: 3625 return *(TheTargetCodeGenInfo = 3626 new X86_32TargetCodeGenInfo( 3627 Types, false, false, DisableMMX, false)); 3628 } 3629 } 3630 3631 case llvm::Triple::x86_64: { 3632 bool HasAVX = strcmp(getContext().getTargetInfo().getABI(), "avx") == 0; 3633 3634 switch (Triple.getOS()) { 3635 case llvm::Triple::Win32: 3636 case llvm::Triple::MinGW32: 3637 case llvm::Triple::Cygwin: 3638 return *(TheTargetCodeGenInfo = new WinX86_64TargetCodeGenInfo(Types)); 3639 default: 3640 return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo(Types, 3641 HasAVX)); 3642 } 3643 } 3644 case llvm::Triple::hexagon: 3645 return *(TheTargetCodeGenInfo = new HexagonTargetCodeGenInfo(Types)); 3646 } 3647 } 3648