1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // These classes wrap the information about a call or function 11 // definition used to handle ABI compliancy. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "TargetInfo.h" 16 #include "ABIInfo.h" 17 #include "CodeGenFunction.h" 18 #include "clang/AST/RecordLayout.h" 19 #include "clang/Frontend/CodeGenOptions.h" 20 #include "llvm/Type.h" 21 #include "llvm/Target/TargetData.h" 22 #include "llvm/ADT/Triple.h" 23 #include "llvm/Support/raw_ostream.h" 24 using namespace clang; 25 using namespace CodeGen; 26 27 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, 28 llvm::Value *Array, 29 llvm::Value *Value, 30 unsigned FirstIndex, 31 unsigned LastIndex) { 32 // Alternatively, we could emit this as a loop in the source. 33 for (unsigned I = FirstIndex; I <= LastIndex; ++I) { 34 llvm::Value *Cell = Builder.CreateConstInBoundsGEP1_32(Array, I); 35 Builder.CreateStore(Value, Cell); 36 } 37 } 38 39 static bool isAggregateTypeForABI(QualType T) { 40 return CodeGenFunction::hasAggregateLLVMType(T) || 41 T->isMemberFunctionPointerType(); 42 } 43 44 ABIInfo::~ABIInfo() {} 45 46 ASTContext &ABIInfo::getContext() const { 47 return CGT.getContext(); 48 } 49 50 llvm::LLVMContext &ABIInfo::getVMContext() const { 51 return CGT.getLLVMContext(); 52 } 53 54 const llvm::TargetData &ABIInfo::getTargetData() const { 55 return CGT.getTargetData(); 56 } 57 58 59 void ABIArgInfo::dump() const { 60 raw_ostream &OS = llvm::errs(); 61 OS << "(ABIArgInfo Kind="; 62 switch (TheKind) { 63 case Direct: 64 OS << "Direct Type="; 65 if (llvm::Type *Ty = getCoerceToType()) 66 Ty->print(OS); 67 else 68 OS << "null"; 69 break; 70 case Extend: 71 OS << "Extend"; 72 break; 73 case Ignore: 74 OS << "Ignore"; 75 break; 76 case Indirect: 77 OS << "Indirect Align=" << getIndirectAlign() 78 << " ByVal=" << getIndirectByVal() 79 << " Realign=" << getIndirectRealign(); 80 break; 81 case Expand: 82 OS << "Expand"; 83 break; 84 } 85 OS << ")\n"; 86 } 87 88 TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; } 89 90 // If someone can figure out a general rule for this, that would be great. 91 // It's probably just doomed to be platform-dependent, though. 92 unsigned TargetCodeGenInfo::getSizeOfUnwindException() const { 93 // Verified for: 94 // x86-64 FreeBSD, Linux, Darwin 95 // x86-32 FreeBSD, Linux, Darwin 96 // PowerPC Linux, Darwin 97 // ARM Darwin (*not* EABI) 98 return 32; 99 } 100 101 bool TargetCodeGenInfo::isNoProtoCallVariadic( 102 const CodeGen::CGFunctionInfo &) const { 103 // The following conventions are known to require this to be false: 104 // x86_stdcall 105 // MIPS 106 // For everything else, we just prefer false unless we opt out. 107 return false; 108 } 109 110 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays); 111 112 /// isEmptyField - Return true iff a the field is "empty", that is it 113 /// is an unnamed bit-field or an (array of) empty record(s). 114 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD, 115 bool AllowArrays) { 116 if (FD->isUnnamedBitfield()) 117 return true; 118 119 QualType FT = FD->getType(); 120 121 // Constant arrays of empty records count as empty, strip them off. 122 // Constant arrays of zero length always count as empty. 123 if (AllowArrays) 124 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 125 if (AT->getSize() == 0) 126 return true; 127 FT = AT->getElementType(); 128 } 129 130 const RecordType *RT = FT->getAs<RecordType>(); 131 if (!RT) 132 return false; 133 134 // C++ record fields are never empty, at least in the Itanium ABI. 135 // 136 // FIXME: We should use a predicate for whether this behavior is true in the 137 // current ABI. 138 if (isa<CXXRecordDecl>(RT->getDecl())) 139 return false; 140 141 return isEmptyRecord(Context, FT, AllowArrays); 142 } 143 144 /// isEmptyRecord - Return true iff a structure contains only empty 145 /// fields. Note that a structure with a flexible array member is not 146 /// considered empty. 147 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) { 148 const RecordType *RT = T->getAs<RecordType>(); 149 if (!RT) 150 return 0; 151 const RecordDecl *RD = RT->getDecl(); 152 if (RD->hasFlexibleArrayMember()) 153 return false; 154 155 // If this is a C++ record, check the bases first. 156 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 157 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 158 e = CXXRD->bases_end(); i != e; ++i) 159 if (!isEmptyRecord(Context, i->getType(), true)) 160 return false; 161 162 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 163 i != e; ++i) 164 if (!isEmptyField(Context, *i, AllowArrays)) 165 return false; 166 return true; 167 } 168 169 /// hasNonTrivialDestructorOrCopyConstructor - Determine if a type has either 170 /// a non-trivial destructor or a non-trivial copy constructor. 171 static bool hasNonTrivialDestructorOrCopyConstructor(const RecordType *RT) { 172 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 173 if (!RD) 174 return false; 175 176 return !RD->hasTrivialDestructor() || !RD->hasTrivialCopyConstructor(); 177 } 178 179 /// isRecordWithNonTrivialDestructorOrCopyConstructor - Determine if a type is 180 /// a record type with either a non-trivial destructor or a non-trivial copy 181 /// constructor. 182 static bool isRecordWithNonTrivialDestructorOrCopyConstructor(QualType T) { 183 const RecordType *RT = T->getAs<RecordType>(); 184 if (!RT) 185 return false; 186 187 return hasNonTrivialDestructorOrCopyConstructor(RT); 188 } 189 190 /// isSingleElementStruct - Determine if a structure is a "single 191 /// element struct", i.e. it has exactly one non-empty field or 192 /// exactly one field which is itself a single element 193 /// struct. Structures with flexible array members are never 194 /// considered single element structs. 195 /// 196 /// \return The field declaration for the single non-empty field, if 197 /// it exists. 198 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) { 199 const RecordType *RT = T->getAsStructureType(); 200 if (!RT) 201 return 0; 202 203 const RecordDecl *RD = RT->getDecl(); 204 if (RD->hasFlexibleArrayMember()) 205 return 0; 206 207 const Type *Found = 0; 208 209 // If this is a C++ record, check the bases first. 210 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 211 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 212 e = CXXRD->bases_end(); i != e; ++i) { 213 // Ignore empty records. 214 if (isEmptyRecord(Context, i->getType(), true)) 215 continue; 216 217 // If we already found an element then this isn't a single-element struct. 218 if (Found) 219 return 0; 220 221 // If this is non-empty and not a single element struct, the composite 222 // cannot be a single element struct. 223 Found = isSingleElementStruct(i->getType(), Context); 224 if (!Found) 225 return 0; 226 } 227 } 228 229 // Check for single element. 230 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 231 i != e; ++i) { 232 const FieldDecl *FD = *i; 233 QualType FT = FD->getType(); 234 235 // Ignore empty fields. 236 if (isEmptyField(Context, FD, true)) 237 continue; 238 239 // If we already found an element then this isn't a single-element 240 // struct. 241 if (Found) 242 return 0; 243 244 // Treat single element arrays as the element. 245 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 246 if (AT->getSize().getZExtValue() != 1) 247 break; 248 FT = AT->getElementType(); 249 } 250 251 if (!isAggregateTypeForABI(FT)) { 252 Found = FT.getTypePtr(); 253 } else { 254 Found = isSingleElementStruct(FT, Context); 255 if (!Found) 256 return 0; 257 } 258 } 259 260 // We don't consider a struct a single-element struct if it has 261 // padding beyond the element type. 262 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T)) 263 return 0; 264 265 return Found; 266 } 267 268 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) { 269 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() && 270 !Ty->isAnyComplexType() && !Ty->isEnumeralType() && 271 !Ty->isBlockPointerType()) 272 return false; 273 274 uint64_t Size = Context.getTypeSize(Ty); 275 return Size == 32 || Size == 64; 276 } 277 278 /// canExpandIndirectArgument - Test whether an argument type which is to be 279 /// passed indirectly (on the stack) would have the equivalent layout if it was 280 /// expanded into separate arguments. If so, we prefer to do the latter to avoid 281 /// inhibiting optimizations. 282 /// 283 // FIXME: This predicate is missing many cases, currently it just follows 284 // llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We 285 // should probably make this smarter, or better yet make the LLVM backend 286 // capable of handling it. 287 static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) { 288 // We can only expand structure types. 289 const RecordType *RT = Ty->getAs<RecordType>(); 290 if (!RT) 291 return false; 292 293 // We can only expand (C) structures. 294 // 295 // FIXME: This needs to be generalized to handle classes as well. 296 const RecordDecl *RD = RT->getDecl(); 297 if (!RD->isStruct() || isa<CXXRecordDecl>(RD)) 298 return false; 299 300 uint64_t Size = 0; 301 302 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 303 i != e; ++i) { 304 const FieldDecl *FD = *i; 305 306 if (!is32Or64BitBasicType(FD->getType(), Context)) 307 return false; 308 309 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know 310 // how to expand them yet, and the predicate for telling if a bitfield still 311 // counts as "basic" is more complicated than what we were doing previously. 312 if (FD->isBitField()) 313 return false; 314 315 Size += Context.getTypeSize(FD->getType()); 316 } 317 318 // Make sure there are not any holes in the struct. 319 if (Size != Context.getTypeSize(Ty)) 320 return false; 321 322 return true; 323 } 324 325 namespace { 326 /// DefaultABIInfo - The default implementation for ABI specific 327 /// details. This implementation provides information which results in 328 /// self-consistent and sensible LLVM IR generation, but does not 329 /// conform to any particular ABI. 330 class DefaultABIInfo : public ABIInfo { 331 public: 332 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 333 334 ABIArgInfo classifyReturnType(QualType RetTy) const; 335 ABIArgInfo classifyArgumentType(QualType RetTy) const; 336 337 virtual void computeInfo(CGFunctionInfo &FI) const { 338 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 339 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 340 it != ie; ++it) 341 it->info = classifyArgumentType(it->type); 342 } 343 344 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 345 CodeGenFunction &CGF) const; 346 }; 347 348 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo { 349 public: 350 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 351 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 352 }; 353 354 llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 355 CodeGenFunction &CGF) const { 356 return 0; 357 } 358 359 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const { 360 if (isAggregateTypeForABI(Ty)) { 361 // Records with non trivial destructors/constructors should not be passed 362 // by value. 363 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 364 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 365 366 return ABIArgInfo::getIndirect(0); 367 } 368 369 // Treat an enum type as its underlying type. 370 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 371 Ty = EnumTy->getDecl()->getIntegerType(); 372 373 return (Ty->isPromotableIntegerType() ? 374 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 375 } 376 377 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const { 378 if (RetTy->isVoidType()) 379 return ABIArgInfo::getIgnore(); 380 381 if (isAggregateTypeForABI(RetTy)) 382 return ABIArgInfo::getIndirect(0); 383 384 // Treat an enum type as its underlying type. 385 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 386 RetTy = EnumTy->getDecl()->getIntegerType(); 387 388 return (RetTy->isPromotableIntegerType() ? 389 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 390 } 391 392 /// UseX86_MMXType - Return true if this is an MMX type that should use the special 393 /// x86_mmx type. 394 bool UseX86_MMXType(llvm::Type *IRType) { 395 // If the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>, use the 396 // special x86_mmx type. 397 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 && 398 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() && 399 IRType->getScalarSizeInBits() != 64; 400 } 401 402 static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 403 StringRef Constraint, 404 llvm::Type* Ty) { 405 if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy()) 406 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext()); 407 return Ty; 408 } 409 410 //===----------------------------------------------------------------------===// 411 // X86-32 ABI Implementation 412 //===----------------------------------------------------------------------===// 413 414 /// X86_32ABIInfo - The X86-32 ABI information. 415 class X86_32ABIInfo : public ABIInfo { 416 static const unsigned MinABIStackAlignInBytes = 4; 417 418 bool IsDarwinVectorABI; 419 bool IsSmallStructInRegABI; 420 bool IsMMXDisabled; 421 422 static bool isRegisterSize(unsigned Size) { 423 return (Size == 8 || Size == 16 || Size == 32 || Size == 64); 424 } 425 426 static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context); 427 428 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 429 /// such that the argument will be passed in memory. 430 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal = true) const; 431 432 /// \brief Return the alignment to use for the given type on the stack. 433 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const; 434 435 public: 436 437 ABIArgInfo classifyReturnType(QualType RetTy) const; 438 ABIArgInfo classifyArgumentType(QualType RetTy) const; 439 440 virtual void computeInfo(CGFunctionInfo &FI) const { 441 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 442 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 443 it != ie; ++it) 444 it->info = classifyArgumentType(it->type); 445 } 446 447 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 448 CodeGenFunction &CGF) const; 449 450 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool m) 451 : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p), 452 IsMMXDisabled(m) {} 453 }; 454 455 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo { 456 public: 457 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool m) 458 :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, m)) {} 459 460 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 461 CodeGen::CodeGenModule &CGM) const; 462 463 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 464 // Darwin uses different dwarf register numbers for EH. 465 if (CGM.isTargetDarwin()) return 5; 466 467 return 4; 468 } 469 470 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 471 llvm::Value *Address) const; 472 473 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 474 StringRef Constraint, 475 llvm::Type* Ty) const { 476 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 477 } 478 479 }; 480 481 } 482 483 /// shouldReturnTypeInRegister - Determine if the given type should be 484 /// passed in a register (for the Darwin ABI). 485 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty, 486 ASTContext &Context) { 487 uint64_t Size = Context.getTypeSize(Ty); 488 489 // Type must be register sized. 490 if (!isRegisterSize(Size)) 491 return false; 492 493 if (Ty->isVectorType()) { 494 // 64- and 128- bit vectors inside structures are not returned in 495 // registers. 496 if (Size == 64 || Size == 128) 497 return false; 498 499 return true; 500 } 501 502 // If this is a builtin, pointer, enum, complex type, member pointer, or 503 // member function pointer it is ok. 504 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() || 505 Ty->isAnyComplexType() || Ty->isEnumeralType() || 506 Ty->isBlockPointerType() || Ty->isMemberPointerType()) 507 return true; 508 509 // Arrays are treated like records. 510 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) 511 return shouldReturnTypeInRegister(AT->getElementType(), Context); 512 513 // Otherwise, it must be a record type. 514 const RecordType *RT = Ty->getAs<RecordType>(); 515 if (!RT) return false; 516 517 // FIXME: Traverse bases here too. 518 519 // Structure types are passed in register if all fields would be 520 // passed in a register. 521 for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(), 522 e = RT->getDecl()->field_end(); i != e; ++i) { 523 const FieldDecl *FD = *i; 524 525 // Empty fields are ignored. 526 if (isEmptyField(Context, FD, true)) 527 continue; 528 529 // Check fields recursively. 530 if (!shouldReturnTypeInRegister(FD->getType(), Context)) 531 return false; 532 } 533 534 return true; 535 } 536 537 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy) const { 538 if (RetTy->isVoidType()) 539 return ABIArgInfo::getIgnore(); 540 541 if (const VectorType *VT = RetTy->getAs<VectorType>()) { 542 // On Darwin, some vectors are returned in registers. 543 if (IsDarwinVectorABI) { 544 uint64_t Size = getContext().getTypeSize(RetTy); 545 546 // 128-bit vectors are a special case; they are returned in 547 // registers and we need to make sure to pick a type the LLVM 548 // backend will like. 549 if (Size == 128) 550 return ABIArgInfo::getDirect(llvm::VectorType::get( 551 llvm::Type::getInt64Ty(getVMContext()), 2)); 552 553 // Always return in register if it fits in a general purpose 554 // register, or if it is 64 bits and has a single element. 555 if ((Size == 8 || Size == 16 || Size == 32) || 556 (Size == 64 && VT->getNumElements() == 1)) 557 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 558 Size)); 559 560 return ABIArgInfo::getIndirect(0); 561 } 562 563 return ABIArgInfo::getDirect(); 564 } 565 566 if (isAggregateTypeForABI(RetTy)) { 567 if (const RecordType *RT = RetTy->getAs<RecordType>()) { 568 // Structures with either a non-trivial destructor or a non-trivial 569 // copy constructor are always indirect. 570 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 571 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 572 573 // Structures with flexible arrays are always indirect. 574 if (RT->getDecl()->hasFlexibleArrayMember()) 575 return ABIArgInfo::getIndirect(0); 576 } 577 578 // If specified, structs and unions are always indirect. 579 if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType()) 580 return ABIArgInfo::getIndirect(0); 581 582 // Small structures which are register sized are generally returned 583 // in a register. 584 if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, getContext())) { 585 uint64_t Size = getContext().getTypeSize(RetTy); 586 587 // As a special-case, if the struct is a "single-element" struct, and 588 // the field is of type "float" or "double", return it in a 589 // floating-point register. We apply a similar transformation for 590 // pointer types to improve the quality of the generated IR. 591 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) 592 if (SeltTy->isRealFloatingType() || SeltTy->hasPointerRepresentation()) 593 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 594 595 // FIXME: We should be able to narrow this integer in cases with dead 596 // padding. 597 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size)); 598 } 599 600 return ABIArgInfo::getIndirect(0); 601 } 602 603 // Treat an enum type as its underlying type. 604 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 605 RetTy = EnumTy->getDecl()->getIntegerType(); 606 607 return (RetTy->isPromotableIntegerType() ? 608 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 609 } 610 611 static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) { 612 const RecordType *RT = Ty->getAs<RecordType>(); 613 if (!RT) 614 return 0; 615 const RecordDecl *RD = RT->getDecl(); 616 617 // If this is a C++ record, check the bases first. 618 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 619 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 620 e = CXXRD->bases_end(); i != e; ++i) 621 if (!isRecordWithSSEVectorType(Context, i->getType())) 622 return false; 623 624 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 625 i != e; ++i) { 626 QualType FT = i->getType(); 627 628 if (FT->getAs<VectorType>() && Context.getTypeSize(FT) == 128) 629 return true; 630 631 if (isRecordWithSSEVectorType(Context, FT)) 632 return true; 633 } 634 635 return false; 636 } 637 638 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty, 639 unsigned Align) const { 640 // Otherwise, if the alignment is less than or equal to the minimum ABI 641 // alignment, just use the default; the backend will handle this. 642 if (Align <= MinABIStackAlignInBytes) 643 return 0; // Use default alignment. 644 645 // On non-Darwin, the stack type alignment is always 4. 646 if (!IsDarwinVectorABI) { 647 // Set explicit alignment, since we may need to realign the top. 648 return MinABIStackAlignInBytes; 649 } 650 651 // Otherwise, if the type contains an SSE vector type, the alignment is 16. 652 if (Align >= 16 && isRecordWithSSEVectorType(getContext(), Ty)) 653 return 16; 654 655 return MinABIStackAlignInBytes; 656 } 657 658 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal) const { 659 if (!ByVal) 660 return ABIArgInfo::getIndirect(0, false); 661 662 // Compute the byval alignment. 663 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; 664 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign); 665 if (StackAlign == 0) 666 return ABIArgInfo::getIndirect(4); 667 668 // If the stack alignment is less than the type alignment, realign the 669 // argument. 670 if (StackAlign < TypeAlign) 671 return ABIArgInfo::getIndirect(StackAlign, /*ByVal=*/true, 672 /*Realign=*/true); 673 674 return ABIArgInfo::getIndirect(StackAlign); 675 } 676 677 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const { 678 // FIXME: Set alignment on indirect arguments. 679 if (isAggregateTypeForABI(Ty)) { 680 // Structures with flexible arrays are always indirect. 681 if (const RecordType *RT = Ty->getAs<RecordType>()) { 682 // Structures with either a non-trivial destructor or a non-trivial 683 // copy constructor are always indirect. 684 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 685 return getIndirectResult(Ty, /*ByVal=*/false); 686 687 if (RT->getDecl()->hasFlexibleArrayMember()) 688 return getIndirectResult(Ty); 689 } 690 691 // Ignore empty structs/unions. 692 if (isEmptyRecord(getContext(), Ty, true)) 693 return ABIArgInfo::getIgnore(); 694 695 // Expand small (<= 128-bit) record types when we know that the stack layout 696 // of those arguments will match the struct. This is important because the 697 // LLVM backend isn't smart enough to remove byval, which inhibits many 698 // optimizations. 699 if (getContext().getTypeSize(Ty) <= 4*32 && 700 canExpandIndirectArgument(Ty, getContext())) 701 return ABIArgInfo::getExpand(); 702 703 return getIndirectResult(Ty); 704 } 705 706 if (const VectorType *VT = Ty->getAs<VectorType>()) { 707 // On Darwin, some vectors are passed in memory, we handle this by passing 708 // it as an i8/i16/i32/i64. 709 if (IsDarwinVectorABI) { 710 uint64_t Size = getContext().getTypeSize(Ty); 711 if ((Size == 8 || Size == 16 || Size == 32) || 712 (Size == 64 && VT->getNumElements() == 1)) 713 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 714 Size)); 715 } 716 717 llvm::Type *IRType = CGT.ConvertType(Ty); 718 if (UseX86_MMXType(IRType)) { 719 if (IsMMXDisabled) 720 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 721 64)); 722 ABIArgInfo AAI = ABIArgInfo::getDirect(IRType); 723 AAI.setCoerceToType(llvm::Type::getX86_MMXTy(getVMContext())); 724 return AAI; 725 } 726 727 return ABIArgInfo::getDirect(); 728 } 729 730 731 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 732 Ty = EnumTy->getDecl()->getIntegerType(); 733 734 return (Ty->isPromotableIntegerType() ? 735 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 736 } 737 738 llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 739 CodeGenFunction &CGF) const { 740 llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); 741 llvm::Type *BPP = llvm::PointerType::getUnqual(BP); 742 743 CGBuilderTy &Builder = CGF.Builder; 744 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 745 "ap"); 746 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 747 748 // Compute if the address needs to be aligned 749 unsigned Align = CGF.getContext().getTypeAlignInChars(Ty).getQuantity(); 750 Align = getTypeStackAlignInBytes(Ty, Align); 751 Align = std::max(Align, 4U); 752 if (Align > 4) { 753 // addr = (addr + align - 1) & -align; 754 llvm::Value *Offset = 755 llvm::ConstantInt::get(CGF.Int32Ty, Align - 1); 756 Addr = CGF.Builder.CreateGEP(Addr, Offset); 757 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(Addr, 758 CGF.Int32Ty); 759 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -Align); 760 Addr = CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), 761 Addr->getType(), 762 "ap.cur.aligned"); 763 } 764 765 llvm::Type *PTy = 766 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 767 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 768 769 uint64_t Offset = 770 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, Align); 771 llvm::Value *NextAddr = 772 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 773 "ap.next"); 774 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 775 776 return AddrTyped; 777 } 778 779 void X86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D, 780 llvm::GlobalValue *GV, 781 CodeGen::CodeGenModule &CGM) const { 782 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 783 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { 784 // Get the LLVM function. 785 llvm::Function *Fn = cast<llvm::Function>(GV); 786 787 // Now add the 'alignstack' attribute with a value of 16. 788 Fn->addFnAttr(llvm::Attribute::constructStackAlignmentFromInt(16)); 789 } 790 } 791 } 792 793 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable( 794 CodeGen::CodeGenFunction &CGF, 795 llvm::Value *Address) const { 796 CodeGen::CGBuilderTy &Builder = CGF.Builder; 797 llvm::LLVMContext &Context = CGF.getLLVMContext(); 798 799 llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context); 800 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 801 802 // 0-7 are the eight integer registers; the order is different 803 // on Darwin (for EH), but the range is the same. 804 // 8 is %eip. 805 AssignToArrayRange(Builder, Address, Four8, 0, 8); 806 807 if (CGF.CGM.isTargetDarwin()) { 808 // 12-16 are st(0..4). Not sure why we stop at 4. 809 // These have size 16, which is sizeof(long double) on 810 // platforms with 8-byte alignment for that type. 811 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 812 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16); 813 814 } else { 815 // 9 is %eflags, which doesn't get a size on Darwin for some 816 // reason. 817 Builder.CreateStore(Four8, Builder.CreateConstInBoundsGEP1_32(Address, 9)); 818 819 // 11-16 are st(0..5). Not sure why we stop at 5. 820 // These have size 12, which is sizeof(long double) on 821 // platforms with 4-byte alignment for that type. 822 llvm::Value *Twelve8 = llvm::ConstantInt::get(i8, 12); 823 AssignToArrayRange(Builder, Address, Twelve8, 11, 16); 824 } 825 826 return false; 827 } 828 829 //===----------------------------------------------------------------------===// 830 // X86-64 ABI Implementation 831 //===----------------------------------------------------------------------===// 832 833 834 namespace { 835 /// X86_64ABIInfo - The X86_64 ABI information. 836 class X86_64ABIInfo : public ABIInfo { 837 enum Class { 838 Integer = 0, 839 SSE, 840 SSEUp, 841 X87, 842 X87Up, 843 ComplexX87, 844 NoClass, 845 Memory 846 }; 847 848 /// merge - Implement the X86_64 ABI merging algorithm. 849 /// 850 /// Merge an accumulating classification \arg Accum with a field 851 /// classification \arg Field. 852 /// 853 /// \param Accum - The accumulating classification. This should 854 /// always be either NoClass or the result of a previous merge 855 /// call. In addition, this should never be Memory (the caller 856 /// should just return Memory for the aggregate). 857 static Class merge(Class Accum, Class Field); 858 859 /// postMerge - Implement the X86_64 ABI post merging algorithm. 860 /// 861 /// Post merger cleanup, reduces a malformed Hi and Lo pair to 862 /// final MEMORY or SSE classes when necessary. 863 /// 864 /// \param AggregateSize - The size of the current aggregate in 865 /// the classification process. 866 /// 867 /// \param Lo - The classification for the parts of the type 868 /// residing in the low word of the containing object. 869 /// 870 /// \param Hi - The classification for the parts of the type 871 /// residing in the higher words of the containing object. 872 /// 873 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const; 874 875 /// classify - Determine the x86_64 register classes in which the 876 /// given type T should be passed. 877 /// 878 /// \param Lo - The classification for the parts of the type 879 /// residing in the low word of the containing object. 880 /// 881 /// \param Hi - The classification for the parts of the type 882 /// residing in the high word of the containing object. 883 /// 884 /// \param OffsetBase - The bit offset of this type in the 885 /// containing object. Some parameters are classified different 886 /// depending on whether they straddle an eightbyte boundary. 887 /// 888 /// If a word is unused its result will be NoClass; if a type should 889 /// be passed in Memory then at least the classification of \arg Lo 890 /// will be Memory. 891 /// 892 /// The \arg Lo class will be NoClass iff the argument is ignored. 893 /// 894 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will 895 /// also be ComplexX87. 896 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi) const; 897 898 llvm::Type *GetByteVectorType(QualType Ty) const; 899 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType, 900 unsigned IROffset, QualType SourceTy, 901 unsigned SourceOffset) const; 902 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType, 903 unsigned IROffset, QualType SourceTy, 904 unsigned SourceOffset) const; 905 906 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 907 /// such that the argument will be returned in memory. 908 ABIArgInfo getIndirectReturnResult(QualType Ty) const; 909 910 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 911 /// such that the argument will be passed in memory. 912 ABIArgInfo getIndirectResult(QualType Ty) const; 913 914 ABIArgInfo classifyReturnType(QualType RetTy) const; 915 916 ABIArgInfo classifyArgumentType(QualType Ty, 917 unsigned &neededInt, 918 unsigned &neededSSE) const; 919 920 bool IsIllegalVectorType(QualType Ty) const; 921 922 /// The 0.98 ABI revision clarified a lot of ambiguities, 923 /// unfortunately in ways that were not always consistent with 924 /// certain previous compilers. In particular, platforms which 925 /// required strict binary compatibility with older versions of GCC 926 /// may need to exempt themselves. 927 bool honorsRevision0_98() const { 928 return !getContext().getTargetInfo().getTriple().isOSDarwin(); 929 } 930 931 bool HasAVX; 932 933 public: 934 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool hasavx) : 935 ABIInfo(CGT), HasAVX(hasavx) {} 936 937 virtual void computeInfo(CGFunctionInfo &FI) const; 938 939 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 940 CodeGenFunction &CGF) const; 941 }; 942 943 /// WinX86_64ABIInfo - The Windows X86_64 ABI information. 944 class WinX86_64ABIInfo : public ABIInfo { 945 946 ABIArgInfo classify(QualType Ty) const; 947 948 public: 949 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 950 951 virtual void computeInfo(CGFunctionInfo &FI) const; 952 953 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 954 CodeGenFunction &CGF) const; 955 }; 956 957 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo { 958 public: 959 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) 960 : TargetCodeGenInfo(new X86_64ABIInfo(CGT, HasAVX)) {} 961 962 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 963 return 7; 964 } 965 966 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 967 llvm::Value *Address) const { 968 CodeGen::CGBuilderTy &Builder = CGF.Builder; 969 llvm::LLVMContext &Context = CGF.getLLVMContext(); 970 971 llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context); 972 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 973 974 // 0-15 are the 16 integer registers. 975 // 16 is %rip. 976 AssignToArrayRange(Builder, Address, Eight8, 0, 16); 977 978 return false; 979 } 980 981 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 982 StringRef Constraint, 983 llvm::Type* Ty) const { 984 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 985 } 986 987 bool isNoProtoCallVariadic(const CodeGen::CGFunctionInfo &FI) const { 988 // The default CC on x86-64 sets %al to the number of SSA 989 // registers used, and GCC sets this when calling an unprototyped 990 // function, so we override the default behavior. However, don't do 991 // that when AVX types are involved. 992 if (FI.getCallingConvention() == llvm::CallingConv::C) { 993 bool HasAVXType = false; 994 for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 995 ie = FI.arg_end(); 996 it != ie; ++it) { 997 if (it->info.isDirect()) { 998 llvm::Type *Ty = it->info.getCoerceToType(); 999 if (llvm::VectorType *VTy = dyn_cast_or_null<llvm::VectorType>(Ty)) { 1000 if (VTy->getBitWidth() > 128) { 1001 HasAVXType = true; 1002 break; 1003 } 1004 } 1005 } 1006 } 1007 if (!HasAVXType) 1008 return true; 1009 } 1010 1011 return TargetCodeGenInfo::isNoProtoCallVariadic(FI); 1012 } 1013 1014 }; 1015 1016 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo { 1017 public: 1018 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 1019 : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {} 1020 1021 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 1022 return 7; 1023 } 1024 1025 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 1026 llvm::Value *Address) const { 1027 CodeGen::CGBuilderTy &Builder = CGF.Builder; 1028 llvm::LLVMContext &Context = CGF.getLLVMContext(); 1029 1030 llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context); 1031 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 1032 1033 // 0-15 are the 16 integer registers. 1034 // 16 is %rip. 1035 AssignToArrayRange(Builder, Address, Eight8, 0, 16); 1036 1037 return false; 1038 } 1039 }; 1040 1041 } 1042 1043 void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo, 1044 Class &Hi) const { 1045 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done: 1046 // 1047 // (a) If one of the classes is Memory, the whole argument is passed in 1048 // memory. 1049 // 1050 // (b) If X87UP is not preceded by X87, the whole argument is passed in 1051 // memory. 1052 // 1053 // (c) If the size of the aggregate exceeds two eightbytes and the first 1054 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole 1055 // argument is passed in memory. NOTE: This is necessary to keep the 1056 // ABI working for processors that don't support the __m256 type. 1057 // 1058 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE. 1059 // 1060 // Some of these are enforced by the merging logic. Others can arise 1061 // only with unions; for example: 1062 // union { _Complex double; unsigned; } 1063 // 1064 // Note that clauses (b) and (c) were added in 0.98. 1065 // 1066 if (Hi == Memory) 1067 Lo = Memory; 1068 if (Hi == X87Up && Lo != X87 && honorsRevision0_98()) 1069 Lo = Memory; 1070 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp)) 1071 Lo = Memory; 1072 if (Hi == SSEUp && Lo != SSE) 1073 Hi = SSE; 1074 } 1075 1076 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) { 1077 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is 1078 // classified recursively so that always two fields are 1079 // considered. The resulting class is calculated according to 1080 // the classes of the fields in the eightbyte: 1081 // 1082 // (a) If both classes are equal, this is the resulting class. 1083 // 1084 // (b) If one of the classes is NO_CLASS, the resulting class is 1085 // the other class. 1086 // 1087 // (c) If one of the classes is MEMORY, the result is the MEMORY 1088 // class. 1089 // 1090 // (d) If one of the classes is INTEGER, the result is the 1091 // INTEGER. 1092 // 1093 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class, 1094 // MEMORY is used as class. 1095 // 1096 // (f) Otherwise class SSE is used. 1097 1098 // Accum should never be memory (we should have returned) or 1099 // ComplexX87 (because this cannot be passed in a structure). 1100 assert((Accum != Memory && Accum != ComplexX87) && 1101 "Invalid accumulated classification during merge."); 1102 if (Accum == Field || Field == NoClass) 1103 return Accum; 1104 if (Field == Memory) 1105 return Memory; 1106 if (Accum == NoClass) 1107 return Field; 1108 if (Accum == Integer || Field == Integer) 1109 return Integer; 1110 if (Field == X87 || Field == X87Up || Field == ComplexX87 || 1111 Accum == X87 || Accum == X87Up) 1112 return Memory; 1113 return SSE; 1114 } 1115 1116 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, 1117 Class &Lo, Class &Hi) const { 1118 // FIXME: This code can be simplified by introducing a simple value class for 1119 // Class pairs with appropriate constructor methods for the various 1120 // situations. 1121 1122 // FIXME: Some of the split computations are wrong; unaligned vectors 1123 // shouldn't be passed in registers for example, so there is no chance they 1124 // can straddle an eightbyte. Verify & simplify. 1125 1126 Lo = Hi = NoClass; 1127 1128 Class &Current = OffsetBase < 64 ? Lo : Hi; 1129 Current = Memory; 1130 1131 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 1132 BuiltinType::Kind k = BT->getKind(); 1133 1134 if (k == BuiltinType::Void) { 1135 Current = NoClass; 1136 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) { 1137 Lo = Integer; 1138 Hi = Integer; 1139 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) { 1140 Current = Integer; 1141 } else if (k == BuiltinType::Float || k == BuiltinType::Double) { 1142 Current = SSE; 1143 } else if (k == BuiltinType::LongDouble) { 1144 Lo = X87; 1145 Hi = X87Up; 1146 } 1147 // FIXME: _Decimal32 and _Decimal64 are SSE. 1148 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). 1149 return; 1150 } 1151 1152 if (const EnumType *ET = Ty->getAs<EnumType>()) { 1153 // Classify the underlying integer type. 1154 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi); 1155 return; 1156 } 1157 1158 if (Ty->hasPointerRepresentation()) { 1159 Current = Integer; 1160 return; 1161 } 1162 1163 if (Ty->isMemberPointerType()) { 1164 if (Ty->isMemberFunctionPointerType()) 1165 Lo = Hi = Integer; 1166 else 1167 Current = Integer; 1168 return; 1169 } 1170 1171 if (const VectorType *VT = Ty->getAs<VectorType>()) { 1172 uint64_t Size = getContext().getTypeSize(VT); 1173 if (Size == 32) { 1174 // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x 1175 // float> as integer. 1176 Current = Integer; 1177 1178 // If this type crosses an eightbyte boundary, it should be 1179 // split. 1180 uint64_t EB_Real = (OffsetBase) / 64; 1181 uint64_t EB_Imag = (OffsetBase + Size - 1) / 64; 1182 if (EB_Real != EB_Imag) 1183 Hi = Lo; 1184 } else if (Size == 64) { 1185 // gcc passes <1 x double> in memory. :( 1186 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) 1187 return; 1188 1189 // gcc passes <1 x long long> as INTEGER. 1190 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong) || 1191 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULongLong) || 1192 VT->getElementType()->isSpecificBuiltinType(BuiltinType::Long) || 1193 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULong)) 1194 Current = Integer; 1195 else 1196 Current = SSE; 1197 1198 // If this type crosses an eightbyte boundary, it should be 1199 // split. 1200 if (OffsetBase && OffsetBase != 64) 1201 Hi = Lo; 1202 } else if (Size == 128 || (HasAVX && Size == 256)) { 1203 // Arguments of 256-bits are split into four eightbyte chunks. The 1204 // least significant one belongs to class SSE and all the others to class 1205 // SSEUP. The original Lo and Hi design considers that types can't be 1206 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense. 1207 // This design isn't correct for 256-bits, but since there're no cases 1208 // where the upper parts would need to be inspected, avoid adding 1209 // complexity and just consider Hi to match the 64-256 part. 1210 Lo = SSE; 1211 Hi = SSEUp; 1212 } 1213 return; 1214 } 1215 1216 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 1217 QualType ET = getContext().getCanonicalType(CT->getElementType()); 1218 1219 uint64_t Size = getContext().getTypeSize(Ty); 1220 if (ET->isIntegralOrEnumerationType()) { 1221 if (Size <= 64) 1222 Current = Integer; 1223 else if (Size <= 128) 1224 Lo = Hi = Integer; 1225 } else if (ET == getContext().FloatTy) 1226 Current = SSE; 1227 else if (ET == getContext().DoubleTy) 1228 Lo = Hi = SSE; 1229 else if (ET == getContext().LongDoubleTy) 1230 Current = ComplexX87; 1231 1232 // If this complex type crosses an eightbyte boundary then it 1233 // should be split. 1234 uint64_t EB_Real = (OffsetBase) / 64; 1235 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64; 1236 if (Hi == NoClass && EB_Real != EB_Imag) 1237 Hi = Lo; 1238 1239 return; 1240 } 1241 1242 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 1243 // Arrays are treated like structures. 1244 1245 uint64_t Size = getContext().getTypeSize(Ty); 1246 1247 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 1248 // than four eightbytes, ..., it has class MEMORY. 1249 if (Size > 256) 1250 return; 1251 1252 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned 1253 // fields, it has class MEMORY. 1254 // 1255 // Only need to check alignment of array base. 1256 if (OffsetBase % getContext().getTypeAlign(AT->getElementType())) 1257 return; 1258 1259 // Otherwise implement simplified merge. We could be smarter about 1260 // this, but it isn't worth it and would be harder to verify. 1261 Current = NoClass; 1262 uint64_t EltSize = getContext().getTypeSize(AT->getElementType()); 1263 uint64_t ArraySize = AT->getSize().getZExtValue(); 1264 1265 // The only case a 256-bit wide vector could be used is when the array 1266 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 1267 // to work for sizes wider than 128, early check and fallback to memory. 1268 if (Size > 128 && EltSize != 256) 1269 return; 1270 1271 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) { 1272 Class FieldLo, FieldHi; 1273 classify(AT->getElementType(), Offset, FieldLo, FieldHi); 1274 Lo = merge(Lo, FieldLo); 1275 Hi = merge(Hi, FieldHi); 1276 if (Lo == Memory || Hi == Memory) 1277 break; 1278 } 1279 1280 postMerge(Size, Lo, Hi); 1281 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification."); 1282 return; 1283 } 1284 1285 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1286 uint64_t Size = getContext().getTypeSize(Ty); 1287 1288 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 1289 // than four eightbytes, ..., it has class MEMORY. 1290 if (Size > 256) 1291 return; 1292 1293 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial 1294 // copy constructor or a non-trivial destructor, it is passed by invisible 1295 // reference. 1296 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 1297 return; 1298 1299 const RecordDecl *RD = RT->getDecl(); 1300 1301 // Assume variable sized types are passed in memory. 1302 if (RD->hasFlexibleArrayMember()) 1303 return; 1304 1305 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 1306 1307 // Reset Lo class, this will be recomputed. 1308 Current = NoClass; 1309 1310 // If this is a C++ record, classify the bases first. 1311 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 1312 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 1313 e = CXXRD->bases_end(); i != e; ++i) { 1314 assert(!i->isVirtual() && !i->getType()->isDependentType() && 1315 "Unexpected base class!"); 1316 const CXXRecordDecl *Base = 1317 cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); 1318 1319 // Classify this field. 1320 // 1321 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a 1322 // single eightbyte, each is classified separately. Each eightbyte gets 1323 // initialized to class NO_CLASS. 1324 Class FieldLo, FieldHi; 1325 uint64_t Offset = OffsetBase + Layout.getBaseClassOffsetInBits(Base); 1326 classify(i->getType(), Offset, FieldLo, FieldHi); 1327 Lo = merge(Lo, FieldLo); 1328 Hi = merge(Hi, FieldHi); 1329 if (Lo == Memory || Hi == Memory) 1330 break; 1331 } 1332 } 1333 1334 // Classify the fields one at a time, merging the results. 1335 unsigned idx = 0; 1336 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1337 i != e; ++i, ++idx) { 1338 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 1339 bool BitField = i->isBitField(); 1340 1341 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than 1342 // four eightbytes, or it contains unaligned fields, it has class MEMORY. 1343 // 1344 // The only case a 256-bit wide vector could be used is when the struct 1345 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 1346 // to work for sizes wider than 128, early check and fallback to memory. 1347 // 1348 if (Size > 128 && getContext().getTypeSize(i->getType()) != 256) { 1349 Lo = Memory; 1350 return; 1351 } 1352 // Note, skip this test for bit-fields, see below. 1353 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) { 1354 Lo = Memory; 1355 return; 1356 } 1357 1358 // Classify this field. 1359 // 1360 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate 1361 // exceeds a single eightbyte, each is classified 1362 // separately. Each eightbyte gets initialized to class 1363 // NO_CLASS. 1364 Class FieldLo, FieldHi; 1365 1366 // Bit-fields require special handling, they do not force the 1367 // structure to be passed in memory even if unaligned, and 1368 // therefore they can straddle an eightbyte. 1369 if (BitField) { 1370 // Ignore padding bit-fields. 1371 if (i->isUnnamedBitfield()) 1372 continue; 1373 1374 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 1375 uint64_t Size = i->getBitWidthValue(getContext()); 1376 1377 uint64_t EB_Lo = Offset / 64; 1378 uint64_t EB_Hi = (Offset + Size - 1) / 64; 1379 FieldLo = FieldHi = NoClass; 1380 if (EB_Lo) { 1381 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes."); 1382 FieldLo = NoClass; 1383 FieldHi = Integer; 1384 } else { 1385 FieldLo = Integer; 1386 FieldHi = EB_Hi ? Integer : NoClass; 1387 } 1388 } else 1389 classify(i->getType(), Offset, FieldLo, FieldHi); 1390 Lo = merge(Lo, FieldLo); 1391 Hi = merge(Hi, FieldHi); 1392 if (Lo == Memory || Hi == Memory) 1393 break; 1394 } 1395 1396 postMerge(Size, Lo, Hi); 1397 } 1398 } 1399 1400 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const { 1401 // If this is a scalar LLVM value then assume LLVM will pass it in the right 1402 // place naturally. 1403 if (!isAggregateTypeForABI(Ty)) { 1404 // Treat an enum type as its underlying type. 1405 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1406 Ty = EnumTy->getDecl()->getIntegerType(); 1407 1408 return (Ty->isPromotableIntegerType() ? 1409 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 1410 } 1411 1412 return ABIArgInfo::getIndirect(0); 1413 } 1414 1415 bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const { 1416 if (const VectorType *VecTy = Ty->getAs<VectorType>()) { 1417 uint64_t Size = getContext().getTypeSize(VecTy); 1418 unsigned LargestVector = HasAVX ? 256 : 128; 1419 if (Size <= 64 || Size > LargestVector) 1420 return true; 1421 } 1422 1423 return false; 1424 } 1425 1426 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty) const { 1427 // If this is a scalar LLVM value then assume LLVM will pass it in the right 1428 // place naturally. 1429 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) { 1430 // Treat an enum type as its underlying type. 1431 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1432 Ty = EnumTy->getDecl()->getIntegerType(); 1433 1434 return (Ty->isPromotableIntegerType() ? 1435 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 1436 } 1437 1438 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 1439 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 1440 1441 // Compute the byval alignment. We specify the alignment of the byval in all 1442 // cases so that the mid-level optimizer knows the alignment of the byval. 1443 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U); 1444 return ABIArgInfo::getIndirect(Align); 1445 } 1446 1447 /// GetByteVectorType - The ABI specifies that a value should be passed in an 1448 /// full vector XMM/YMM register. Pick an LLVM IR type that will be passed as a 1449 /// vector register. 1450 llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const { 1451 llvm::Type *IRType = CGT.ConvertType(Ty); 1452 1453 // Wrapper structs that just contain vectors are passed just like vectors, 1454 // strip them off if present. 1455 llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType); 1456 while (STy && STy->getNumElements() == 1) { 1457 IRType = STy->getElementType(0); 1458 STy = dyn_cast<llvm::StructType>(IRType); 1459 } 1460 1461 // If the preferred type is a 16-byte vector, prefer to pass it. 1462 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){ 1463 llvm::Type *EltTy = VT->getElementType(); 1464 unsigned BitWidth = VT->getBitWidth(); 1465 if ((BitWidth >= 128 && BitWidth <= 256) && 1466 (EltTy->isFloatTy() || EltTy->isDoubleTy() || 1467 EltTy->isIntegerTy(8) || EltTy->isIntegerTy(16) || 1468 EltTy->isIntegerTy(32) || EltTy->isIntegerTy(64) || 1469 EltTy->isIntegerTy(128))) 1470 return VT; 1471 } 1472 1473 return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), 2); 1474 } 1475 1476 /// BitsContainNoUserData - Return true if the specified [start,end) bit range 1477 /// is known to either be off the end of the specified type or being in 1478 /// alignment padding. The user type specified is known to be at most 128 bits 1479 /// in size, and have passed through X86_64ABIInfo::classify with a successful 1480 /// classification that put one of the two halves in the INTEGER class. 1481 /// 1482 /// It is conservatively correct to return false. 1483 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, 1484 unsigned EndBit, ASTContext &Context) { 1485 // If the bytes being queried are off the end of the type, there is no user 1486 // data hiding here. This handles analysis of builtins, vectors and other 1487 // types that don't contain interesting padding. 1488 unsigned TySize = (unsigned)Context.getTypeSize(Ty); 1489 if (TySize <= StartBit) 1490 return true; 1491 1492 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 1493 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType()); 1494 unsigned NumElts = (unsigned)AT->getSize().getZExtValue(); 1495 1496 // Check each element to see if the element overlaps with the queried range. 1497 for (unsigned i = 0; i != NumElts; ++i) { 1498 // If the element is after the span we care about, then we're done.. 1499 unsigned EltOffset = i*EltSize; 1500 if (EltOffset >= EndBit) break; 1501 1502 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0; 1503 if (!BitsContainNoUserData(AT->getElementType(), EltStart, 1504 EndBit-EltOffset, Context)) 1505 return false; 1506 } 1507 // If it overlaps no elements, then it is safe to process as padding. 1508 return true; 1509 } 1510 1511 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1512 const RecordDecl *RD = RT->getDecl(); 1513 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 1514 1515 // If this is a C++ record, check the bases first. 1516 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 1517 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 1518 e = CXXRD->bases_end(); i != e; ++i) { 1519 assert(!i->isVirtual() && !i->getType()->isDependentType() && 1520 "Unexpected base class!"); 1521 const CXXRecordDecl *Base = 1522 cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); 1523 1524 // If the base is after the span we care about, ignore it. 1525 unsigned BaseOffset = (unsigned)Layout.getBaseClassOffsetInBits(Base); 1526 if (BaseOffset >= EndBit) continue; 1527 1528 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0; 1529 if (!BitsContainNoUserData(i->getType(), BaseStart, 1530 EndBit-BaseOffset, Context)) 1531 return false; 1532 } 1533 } 1534 1535 // Verify that no field has data that overlaps the region of interest. Yes 1536 // this could be sped up a lot by being smarter about queried fields, 1537 // however we're only looking at structs up to 16 bytes, so we don't care 1538 // much. 1539 unsigned idx = 0; 1540 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1541 i != e; ++i, ++idx) { 1542 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx); 1543 1544 // If we found a field after the region we care about, then we're done. 1545 if (FieldOffset >= EndBit) break; 1546 1547 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0; 1548 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset, 1549 Context)) 1550 return false; 1551 } 1552 1553 // If nothing in this record overlapped the area of interest, then we're 1554 // clean. 1555 return true; 1556 } 1557 1558 return false; 1559 } 1560 1561 /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a 1562 /// float member at the specified offset. For example, {int,{float}} has a 1563 /// float at offset 4. It is conservatively correct for this routine to return 1564 /// false. 1565 static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset, 1566 const llvm::TargetData &TD) { 1567 // Base case if we find a float. 1568 if (IROffset == 0 && IRType->isFloatTy()) 1569 return true; 1570 1571 // If this is a struct, recurse into the field at the specified offset. 1572 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 1573 const llvm::StructLayout *SL = TD.getStructLayout(STy); 1574 unsigned Elt = SL->getElementContainingOffset(IROffset); 1575 IROffset -= SL->getElementOffset(Elt); 1576 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD); 1577 } 1578 1579 // If this is an array, recurse into the field at the specified offset. 1580 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 1581 llvm::Type *EltTy = ATy->getElementType(); 1582 unsigned EltSize = TD.getTypeAllocSize(EltTy); 1583 IROffset -= IROffset/EltSize*EltSize; 1584 return ContainsFloatAtOffset(EltTy, IROffset, TD); 1585 } 1586 1587 return false; 1588 } 1589 1590 1591 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the 1592 /// low 8 bytes of an XMM register, corresponding to the SSE class. 1593 llvm::Type *X86_64ABIInfo:: 1594 GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset, 1595 QualType SourceTy, unsigned SourceOffset) const { 1596 // The only three choices we have are either double, <2 x float>, or float. We 1597 // pass as float if the last 4 bytes is just padding. This happens for 1598 // structs that contain 3 floats. 1599 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32, 1600 SourceOffset*8+64, getContext())) 1601 return llvm::Type::getFloatTy(getVMContext()); 1602 1603 // We want to pass as <2 x float> if the LLVM IR type contains a float at 1604 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the 1605 // case. 1606 if (ContainsFloatAtOffset(IRType, IROffset, getTargetData()) && 1607 ContainsFloatAtOffset(IRType, IROffset+4, getTargetData())) 1608 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2); 1609 1610 return llvm::Type::getDoubleTy(getVMContext()); 1611 } 1612 1613 1614 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in 1615 /// an 8-byte GPR. This means that we either have a scalar or we are talking 1616 /// about the high or low part of an up-to-16-byte struct. This routine picks 1617 /// the best LLVM IR type to represent this, which may be i64 or may be anything 1618 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*, 1619 /// etc). 1620 /// 1621 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for 1622 /// the source type. IROffset is an offset in bytes into the LLVM IR type that 1623 /// the 8-byte value references. PrefType may be null. 1624 /// 1625 /// SourceTy is the source level type for the entire argument. SourceOffset is 1626 /// an offset into this that we're processing (which is always either 0 or 8). 1627 /// 1628 llvm::Type *X86_64ABIInfo:: 1629 GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset, 1630 QualType SourceTy, unsigned SourceOffset) const { 1631 // If we're dealing with an un-offset LLVM IR type, then it means that we're 1632 // returning an 8-byte unit starting with it. See if we can safely use it. 1633 if (IROffset == 0) { 1634 // Pointers and int64's always fill the 8-byte unit. 1635 if (isa<llvm::PointerType>(IRType) || IRType->isIntegerTy(64)) 1636 return IRType; 1637 1638 // If we have a 1/2/4-byte integer, we can use it only if the rest of the 1639 // goodness in the source type is just tail padding. This is allowed to 1640 // kick in for struct {double,int} on the int, but not on 1641 // struct{double,int,int} because we wouldn't return the second int. We 1642 // have to do this analysis on the source type because we can't depend on 1643 // unions being lowered a specific way etc. 1644 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) || 1645 IRType->isIntegerTy(32)) { 1646 unsigned BitWidth = cast<llvm::IntegerType>(IRType)->getBitWidth(); 1647 1648 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth, 1649 SourceOffset*8+64, getContext())) 1650 return IRType; 1651 } 1652 } 1653 1654 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 1655 // If this is a struct, recurse into the field at the specified offset. 1656 const llvm::StructLayout *SL = getTargetData().getStructLayout(STy); 1657 if (IROffset < SL->getSizeInBytes()) { 1658 unsigned FieldIdx = SL->getElementContainingOffset(IROffset); 1659 IROffset -= SL->getElementOffset(FieldIdx); 1660 1661 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset, 1662 SourceTy, SourceOffset); 1663 } 1664 } 1665 1666 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 1667 llvm::Type *EltTy = ATy->getElementType(); 1668 unsigned EltSize = getTargetData().getTypeAllocSize(EltTy); 1669 unsigned EltOffset = IROffset/EltSize*EltSize; 1670 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy, 1671 SourceOffset); 1672 } 1673 1674 // Okay, we don't have any better idea of what to pass, so we pass this in an 1675 // integer register that isn't too big to fit the rest of the struct. 1676 unsigned TySizeInBytes = 1677 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity(); 1678 1679 assert(TySizeInBytes != SourceOffset && "Empty field?"); 1680 1681 // It is always safe to classify this as an integer type up to i64 that 1682 // isn't larger than the structure. 1683 return llvm::IntegerType::get(getVMContext(), 1684 std::min(TySizeInBytes-SourceOffset, 8U)*8); 1685 } 1686 1687 1688 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally 1689 /// be used as elements of a two register pair to pass or return, return a 1690 /// first class aggregate to represent them. For example, if the low part of 1691 /// a by-value argument should be passed as i32* and the high part as float, 1692 /// return {i32*, float}. 1693 static llvm::Type * 1694 GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, 1695 const llvm::TargetData &TD) { 1696 // In order to correctly satisfy the ABI, we need to the high part to start 1697 // at offset 8. If the high and low parts we inferred are both 4-byte types 1698 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have 1699 // the second element at offset 8. Check for this: 1700 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo); 1701 unsigned HiAlign = TD.getABITypeAlignment(Hi); 1702 unsigned HiStart = llvm::TargetData::RoundUpAlignment(LoSize, HiAlign); 1703 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!"); 1704 1705 // To handle this, we have to increase the size of the low part so that the 1706 // second element will start at an 8 byte offset. We can't increase the size 1707 // of the second element because it might make us access off the end of the 1708 // struct. 1709 if (HiStart != 8) { 1710 // There are only two sorts of types the ABI generation code can produce for 1711 // the low part of a pair that aren't 8 bytes in size: float or i8/i16/i32. 1712 // Promote these to a larger type. 1713 if (Lo->isFloatTy()) 1714 Lo = llvm::Type::getDoubleTy(Lo->getContext()); 1715 else { 1716 assert(Lo->isIntegerTy() && "Invalid/unknown lo type"); 1717 Lo = llvm::Type::getInt64Ty(Lo->getContext()); 1718 } 1719 } 1720 1721 llvm::StructType *Result = llvm::StructType::get(Lo, Hi, NULL); 1722 1723 1724 // Verify that the second element is at an 8-byte offset. 1725 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 && 1726 "Invalid x86-64 argument pair!"); 1727 return Result; 1728 } 1729 1730 ABIArgInfo X86_64ABIInfo:: 1731 classifyReturnType(QualType RetTy) const { 1732 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the 1733 // classification algorithm. 1734 X86_64ABIInfo::Class Lo, Hi; 1735 classify(RetTy, 0, Lo, Hi); 1736 1737 // Check some invariants. 1738 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 1739 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 1740 1741 llvm::Type *ResType = 0; 1742 switch (Lo) { 1743 case NoClass: 1744 if (Hi == NoClass) 1745 return ABIArgInfo::getIgnore(); 1746 // If the low part is just padding, it takes no register, leave ResType 1747 // null. 1748 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 1749 "Unknown missing lo part"); 1750 break; 1751 1752 case SSEUp: 1753 case X87Up: 1754 llvm_unreachable("Invalid classification for lo word."); 1755 1756 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via 1757 // hidden argument. 1758 case Memory: 1759 return getIndirectReturnResult(RetTy); 1760 1761 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next 1762 // available register of the sequence %rax, %rdx is used. 1763 case Integer: 1764 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 1765 1766 // If we have a sign or zero extended integer, make sure to return Extend 1767 // so that the parameter gets the right LLVM IR attributes. 1768 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 1769 // Treat an enum type as its underlying type. 1770 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 1771 RetTy = EnumTy->getDecl()->getIntegerType(); 1772 1773 if (RetTy->isIntegralOrEnumerationType() && 1774 RetTy->isPromotableIntegerType()) 1775 return ABIArgInfo::getExtend(); 1776 } 1777 break; 1778 1779 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next 1780 // available SSE register of the sequence %xmm0, %xmm1 is used. 1781 case SSE: 1782 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 1783 break; 1784 1785 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is 1786 // returned on the X87 stack in %st0 as 80-bit x87 number. 1787 case X87: 1788 ResType = llvm::Type::getX86_FP80Ty(getVMContext()); 1789 break; 1790 1791 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real 1792 // part of the value is returned in %st0 and the imaginary part in 1793 // %st1. 1794 case ComplexX87: 1795 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification."); 1796 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()), 1797 llvm::Type::getX86_FP80Ty(getVMContext()), 1798 NULL); 1799 break; 1800 } 1801 1802 llvm::Type *HighPart = 0; 1803 switch (Hi) { 1804 // Memory was handled previously and X87 should 1805 // never occur as a hi class. 1806 case Memory: 1807 case X87: 1808 llvm_unreachable("Invalid classification for hi word."); 1809 1810 case ComplexX87: // Previously handled. 1811 case NoClass: 1812 break; 1813 1814 case Integer: 1815 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 1816 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 1817 return ABIArgInfo::getDirect(HighPart, 8); 1818 break; 1819 case SSE: 1820 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 1821 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 1822 return ABIArgInfo::getDirect(HighPart, 8); 1823 break; 1824 1825 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte 1826 // is passed in the next available eightbyte chunk if the last used 1827 // vector register. 1828 // 1829 // SSEUP should always be preceded by SSE, just widen. 1830 case SSEUp: 1831 assert(Lo == SSE && "Unexpected SSEUp classification."); 1832 ResType = GetByteVectorType(RetTy); 1833 break; 1834 1835 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is 1836 // returned together with the previous X87 value in %st0. 1837 case X87Up: 1838 // If X87Up is preceded by X87, we don't need to do 1839 // anything. However, in some cases with unions it may not be 1840 // preceded by X87. In such situations we follow gcc and pass the 1841 // extra bits in an SSE reg. 1842 if (Lo != X87) { 1843 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 1844 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 1845 return ABIArgInfo::getDirect(HighPart, 8); 1846 } 1847 break; 1848 } 1849 1850 // If a high part was specified, merge it together with the low part. It is 1851 // known to pass in the high eightbyte of the result. We do this by forming a 1852 // first class struct aggregate with the high and low part: {low, high} 1853 if (HighPart) 1854 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getTargetData()); 1855 1856 return ABIArgInfo::getDirect(ResType); 1857 } 1858 1859 ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt, 1860 unsigned &neededSSE) const { 1861 X86_64ABIInfo::Class Lo, Hi; 1862 classify(Ty, 0, Lo, Hi); 1863 1864 // Check some invariants. 1865 // FIXME: Enforce these by construction. 1866 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 1867 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 1868 1869 neededInt = 0; 1870 neededSSE = 0; 1871 llvm::Type *ResType = 0; 1872 switch (Lo) { 1873 case NoClass: 1874 if (Hi == NoClass) 1875 return ABIArgInfo::getIgnore(); 1876 // If the low part is just padding, it takes no register, leave ResType 1877 // null. 1878 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 1879 "Unknown missing lo part"); 1880 break; 1881 1882 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument 1883 // on the stack. 1884 case Memory: 1885 1886 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or 1887 // COMPLEX_X87, it is passed in memory. 1888 case X87: 1889 case ComplexX87: 1890 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 1891 ++neededInt; 1892 return getIndirectResult(Ty); 1893 1894 case SSEUp: 1895 case X87Up: 1896 llvm_unreachable("Invalid classification for lo word."); 1897 1898 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next 1899 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8 1900 // and %r9 is used. 1901 case Integer: 1902 ++neededInt; 1903 1904 // Pick an 8-byte type based on the preferred type. 1905 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0); 1906 1907 // If we have a sign or zero extended integer, make sure to return Extend 1908 // so that the parameter gets the right LLVM IR attributes. 1909 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 1910 // Treat an enum type as its underlying type. 1911 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1912 Ty = EnumTy->getDecl()->getIntegerType(); 1913 1914 if (Ty->isIntegralOrEnumerationType() && 1915 Ty->isPromotableIntegerType()) 1916 return ABIArgInfo::getExtend(); 1917 } 1918 1919 break; 1920 1921 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next 1922 // available SSE register is used, the registers are taken in the 1923 // order from %xmm0 to %xmm7. 1924 case SSE: { 1925 llvm::Type *IRType = CGT.ConvertType(Ty); 1926 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0); 1927 ++neededSSE; 1928 break; 1929 } 1930 } 1931 1932 llvm::Type *HighPart = 0; 1933 switch (Hi) { 1934 // Memory was handled previously, ComplexX87 and X87 should 1935 // never occur as hi classes, and X87Up must be preceded by X87, 1936 // which is passed in memory. 1937 case Memory: 1938 case X87: 1939 case ComplexX87: 1940 llvm_unreachable("Invalid classification for hi word."); 1941 1942 case NoClass: break; 1943 1944 case Integer: 1945 ++neededInt; 1946 // Pick an 8-byte type based on the preferred type. 1947 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 1948 1949 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 1950 return ABIArgInfo::getDirect(HighPart, 8); 1951 break; 1952 1953 // X87Up generally doesn't occur here (long double is passed in 1954 // memory), except in situations involving unions. 1955 case X87Up: 1956 case SSE: 1957 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 1958 1959 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 1960 return ABIArgInfo::getDirect(HighPart, 8); 1961 1962 ++neededSSE; 1963 break; 1964 1965 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the 1966 // eightbyte is passed in the upper half of the last used SSE 1967 // register. This only happens when 128-bit vectors are passed. 1968 case SSEUp: 1969 assert(Lo == SSE && "Unexpected SSEUp classification"); 1970 ResType = GetByteVectorType(Ty); 1971 break; 1972 } 1973 1974 // If a high part was specified, merge it together with the low part. It is 1975 // known to pass in the high eightbyte of the result. We do this by forming a 1976 // first class struct aggregate with the high and low part: {low, high} 1977 if (HighPart) 1978 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getTargetData()); 1979 1980 return ABIArgInfo::getDirect(ResType); 1981 } 1982 1983 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 1984 1985 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 1986 1987 // Keep track of the number of assigned registers. 1988 unsigned freeIntRegs = 6, freeSSERegs = 8; 1989 1990 // If the return value is indirect, then the hidden argument is consuming one 1991 // integer register. 1992 if (FI.getReturnInfo().isIndirect()) 1993 --freeIntRegs; 1994 1995 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers 1996 // get assigned (in left-to-right order) for passing as follows... 1997 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 1998 it != ie; ++it) { 1999 unsigned neededInt, neededSSE; 2000 it->info = classifyArgumentType(it->type, neededInt, neededSSE); 2001 2002 // AMD64-ABI 3.2.3p3: If there are no registers available for any 2003 // eightbyte of an argument, the whole argument is passed on the 2004 // stack. If registers have already been assigned for some 2005 // eightbytes of such an argument, the assignments get reverted. 2006 if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) { 2007 freeIntRegs -= neededInt; 2008 freeSSERegs -= neededSSE; 2009 } else { 2010 it->info = getIndirectResult(it->type); 2011 } 2012 } 2013 } 2014 2015 static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr, 2016 QualType Ty, 2017 CodeGenFunction &CGF) { 2018 llvm::Value *overflow_arg_area_p = 2019 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p"); 2020 llvm::Value *overflow_arg_area = 2021 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area"); 2022 2023 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16 2024 // byte boundary if alignment needed by type exceeds 8 byte boundary. 2025 // It isn't stated explicitly in the standard, but in practice we use 2026 // alignment greater than 16 where necessary. 2027 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8; 2028 if (Align > 8) { 2029 // overflow_arg_area = (overflow_arg_area + align - 1) & -align; 2030 llvm::Value *Offset = 2031 llvm::ConstantInt::get(CGF.Int64Ty, Align - 1); 2032 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset); 2033 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area, 2034 CGF.Int64Ty); 2035 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, -(uint64_t)Align); 2036 overflow_arg_area = 2037 CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), 2038 overflow_arg_area->getType(), 2039 "overflow_arg_area.align"); 2040 } 2041 2042 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area. 2043 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 2044 llvm::Value *Res = 2045 CGF.Builder.CreateBitCast(overflow_arg_area, 2046 llvm::PointerType::getUnqual(LTy)); 2047 2048 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to: 2049 // l->overflow_arg_area + sizeof(type). 2050 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to 2051 // an 8 byte boundary. 2052 2053 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8; 2054 llvm::Value *Offset = 2055 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7); 2056 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset, 2057 "overflow_arg_area.next"); 2058 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p); 2059 2060 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type. 2061 return Res; 2062 } 2063 2064 llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2065 CodeGenFunction &CGF) const { 2066 llvm::LLVMContext &VMContext = CGF.getLLVMContext(); 2067 2068 // Assume that va_list type is correct; should be pointer to LLVM type: 2069 // struct { 2070 // i32 gp_offset; 2071 // i32 fp_offset; 2072 // i8* overflow_arg_area; 2073 // i8* reg_save_area; 2074 // }; 2075 unsigned neededInt, neededSSE; 2076 2077 Ty = CGF.getContext().getCanonicalType(Ty); 2078 ABIArgInfo AI = classifyArgumentType(Ty, neededInt, neededSSE); 2079 2080 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed 2081 // in the registers. If not go to step 7. 2082 if (!neededInt && !neededSSE) 2083 return EmitVAArgFromMemory(VAListAddr, Ty, CGF); 2084 2085 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of 2086 // general purpose registers needed to pass type and num_fp to hold 2087 // the number of floating point registers needed. 2088 2089 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into 2090 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or 2091 // l->fp_offset > 304 - num_fp * 16 go to step 7. 2092 // 2093 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of 2094 // register save space). 2095 2096 llvm::Value *InRegs = 0; 2097 llvm::Value *gp_offset_p = 0, *gp_offset = 0; 2098 llvm::Value *fp_offset_p = 0, *fp_offset = 0; 2099 if (neededInt) { 2100 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p"); 2101 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset"); 2102 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8); 2103 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp"); 2104 } 2105 2106 if (neededSSE) { 2107 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p"); 2108 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset"); 2109 llvm::Value *FitsInFP = 2110 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16); 2111 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp"); 2112 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP; 2113 } 2114 2115 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 2116 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); 2117 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 2118 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); 2119 2120 // Emit code to load the value if it was passed in registers. 2121 2122 CGF.EmitBlock(InRegBlock); 2123 2124 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with 2125 // an offset of l->gp_offset and/or l->fp_offset. This may require 2126 // copying to a temporary location in case the parameter is passed 2127 // in different register classes or requires an alignment greater 2128 // than 8 for general purpose registers and 16 for XMM registers. 2129 // 2130 // FIXME: This really results in shameful code when we end up needing to 2131 // collect arguments from different places; often what should result in a 2132 // simple assembling of a structure from scattered addresses has many more 2133 // loads than necessary. Can we clean this up? 2134 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 2135 llvm::Value *RegAddr = 2136 CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3), 2137 "reg_save_area"); 2138 if (neededInt && neededSSE) { 2139 // FIXME: Cleanup. 2140 assert(AI.isDirect() && "Unexpected ABI info for mixed regs"); 2141 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType()); 2142 llvm::Value *Tmp = CGF.CreateTempAlloca(ST); 2143 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs"); 2144 llvm::Type *TyLo = ST->getElementType(0); 2145 llvm::Type *TyHi = ST->getElementType(1); 2146 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) && 2147 "Unexpected ABI info for mixed regs"); 2148 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo); 2149 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi); 2150 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 2151 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2152 llvm::Value *RegLoAddr = TyLo->isFloatingPointTy() ? FPAddr : GPAddr; 2153 llvm::Value *RegHiAddr = TyLo->isFloatingPointTy() ? GPAddr : FPAddr; 2154 llvm::Value *V = 2155 CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo)); 2156 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 2157 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi)); 2158 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 2159 2160 RegAddr = CGF.Builder.CreateBitCast(Tmp, 2161 llvm::PointerType::getUnqual(LTy)); 2162 } else if (neededInt) { 2163 RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 2164 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 2165 llvm::PointerType::getUnqual(LTy)); 2166 } else if (neededSSE == 1) { 2167 RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2168 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 2169 llvm::PointerType::getUnqual(LTy)); 2170 } else { 2171 assert(neededSSE == 2 && "Invalid number of needed registers!"); 2172 // SSE registers are spaced 16 bytes apart in the register save 2173 // area, we need to collect the two eightbytes together. 2174 llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2175 llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16); 2176 llvm::Type *DoubleTy = llvm::Type::getDoubleTy(VMContext); 2177 llvm::Type *DblPtrTy = 2178 llvm::PointerType::getUnqual(DoubleTy); 2179 llvm::StructType *ST = llvm::StructType::get(DoubleTy, 2180 DoubleTy, NULL); 2181 llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST); 2182 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo, 2183 DblPtrTy)); 2184 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 2185 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi, 2186 DblPtrTy)); 2187 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 2188 RegAddr = CGF.Builder.CreateBitCast(Tmp, 2189 llvm::PointerType::getUnqual(LTy)); 2190 } 2191 2192 // AMD64-ABI 3.5.7p5: Step 5. Set: 2193 // l->gp_offset = l->gp_offset + num_gp * 8 2194 // l->fp_offset = l->fp_offset + num_fp * 16. 2195 if (neededInt) { 2196 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8); 2197 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset), 2198 gp_offset_p); 2199 } 2200 if (neededSSE) { 2201 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16); 2202 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset), 2203 fp_offset_p); 2204 } 2205 CGF.EmitBranch(ContBlock); 2206 2207 // Emit code to load the value if it was passed in memory. 2208 2209 CGF.EmitBlock(InMemBlock); 2210 llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF); 2211 2212 // Return the appropriate result. 2213 2214 CGF.EmitBlock(ContBlock); 2215 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(), 2, 2216 "vaarg.addr"); 2217 ResAddr->addIncoming(RegAddr, InRegBlock); 2218 ResAddr->addIncoming(MemAddr, InMemBlock); 2219 return ResAddr; 2220 } 2221 2222 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty) const { 2223 2224 if (Ty->isVoidType()) 2225 return ABIArgInfo::getIgnore(); 2226 2227 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2228 Ty = EnumTy->getDecl()->getIntegerType(); 2229 2230 uint64_t Size = getContext().getTypeSize(Ty); 2231 2232 if (const RecordType *RT = Ty->getAs<RecordType>()) { 2233 if (hasNonTrivialDestructorOrCopyConstructor(RT) || 2234 RT->getDecl()->hasFlexibleArrayMember()) 2235 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2236 2237 // FIXME: mingw-w64-gcc emits 128-bit struct as i128 2238 if (Size == 128 && 2239 getContext().getTargetInfo().getTriple().getOS() == llvm::Triple::MinGW32) 2240 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2241 Size)); 2242 2243 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is 2244 // not 1, 2, 4, or 8 bytes, must be passed by reference." 2245 if (Size <= 64 && 2246 (Size & (Size - 1)) == 0) 2247 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2248 Size)); 2249 2250 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2251 } 2252 2253 if (Ty->isPromotableIntegerType()) 2254 return ABIArgInfo::getExtend(); 2255 2256 return ABIArgInfo::getDirect(); 2257 } 2258 2259 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 2260 2261 QualType RetTy = FI.getReturnType(); 2262 FI.getReturnInfo() = classify(RetTy); 2263 2264 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2265 it != ie; ++it) 2266 it->info = classify(it->type); 2267 } 2268 2269 llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2270 CodeGenFunction &CGF) const { 2271 llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); 2272 llvm::Type *BPP = llvm::PointerType::getUnqual(BP); 2273 2274 CGBuilderTy &Builder = CGF.Builder; 2275 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 2276 "ap"); 2277 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 2278 llvm::Type *PTy = 2279 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 2280 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 2281 2282 uint64_t Offset = 2283 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8); 2284 llvm::Value *NextAddr = 2285 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 2286 "ap.next"); 2287 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 2288 2289 return AddrTyped; 2290 } 2291 2292 // PowerPC-32 2293 2294 namespace { 2295 class PPC32TargetCodeGenInfo : public DefaultTargetCodeGenInfo { 2296 public: 2297 PPC32TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} 2298 2299 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2300 // This is recovered from gcc output. 2301 return 1; // r1 is the dedicated stack pointer 2302 } 2303 2304 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2305 llvm::Value *Address) const; 2306 }; 2307 2308 } 2309 2310 bool 2311 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2312 llvm::Value *Address) const { 2313 // This is calculated from the LLVM and GCC tables and verified 2314 // against gcc output. AFAIK all ABIs use the same encoding. 2315 2316 CodeGen::CGBuilderTy &Builder = CGF.Builder; 2317 llvm::LLVMContext &Context = CGF.getLLVMContext(); 2318 2319 llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context); 2320 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 2321 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 2322 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 2323 2324 // 0-31: r0-31, the 4-byte general-purpose registers 2325 AssignToArrayRange(Builder, Address, Four8, 0, 31); 2326 2327 // 32-63: fp0-31, the 8-byte floating-point registers 2328 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 2329 2330 // 64-76 are various 4-byte special-purpose registers: 2331 // 64: mq 2332 // 65: lr 2333 // 66: ctr 2334 // 67: ap 2335 // 68-75 cr0-7 2336 // 76: xer 2337 AssignToArrayRange(Builder, Address, Four8, 64, 76); 2338 2339 // 77-108: v0-31, the 16-byte vector registers 2340 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 2341 2342 // 109: vrsave 2343 // 110: vscr 2344 // 111: spe_acc 2345 // 112: spefscr 2346 // 113: sfp 2347 AssignToArrayRange(Builder, Address, Four8, 109, 113); 2348 2349 return false; 2350 } 2351 2352 2353 //===----------------------------------------------------------------------===// 2354 // ARM ABI Implementation 2355 //===----------------------------------------------------------------------===// 2356 2357 namespace { 2358 2359 class ARMABIInfo : public ABIInfo { 2360 public: 2361 enum ABIKind { 2362 APCS = 0, 2363 AAPCS = 1, 2364 AAPCS_VFP 2365 }; 2366 2367 private: 2368 ABIKind Kind; 2369 2370 public: 2371 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind) {} 2372 2373 bool isEABI() const { 2374 StringRef Env = getContext().getTargetInfo().getTriple().getEnvironmentName(); 2375 return (Env == "gnueabi" || Env == "eabi"); 2376 } 2377 2378 private: 2379 ABIKind getABIKind() const { return Kind; } 2380 2381 ABIArgInfo classifyReturnType(QualType RetTy) const; 2382 ABIArgInfo classifyArgumentType(QualType RetTy) const; 2383 2384 virtual void computeInfo(CGFunctionInfo &FI) const; 2385 2386 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2387 CodeGenFunction &CGF) const; 2388 }; 2389 2390 class ARMTargetCodeGenInfo : public TargetCodeGenInfo { 2391 public: 2392 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K) 2393 :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {} 2394 2395 const ARMABIInfo &getABIInfo() const { 2396 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo()); 2397 } 2398 2399 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2400 return 13; 2401 } 2402 2403 StringRef getARCRetainAutoreleasedReturnValueMarker() const { 2404 return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue"; 2405 } 2406 2407 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2408 llvm::Value *Address) const { 2409 CodeGen::CGBuilderTy &Builder = CGF.Builder; 2410 llvm::LLVMContext &Context = CGF.getLLVMContext(); 2411 2412 llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context); 2413 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 2414 2415 // 0-15 are the 16 integer registers. 2416 AssignToArrayRange(Builder, Address, Four8, 0, 15); 2417 2418 return false; 2419 } 2420 2421 unsigned getSizeOfUnwindException() const { 2422 if (getABIInfo().isEABI()) return 88; 2423 return TargetCodeGenInfo::getSizeOfUnwindException(); 2424 } 2425 }; 2426 2427 } 2428 2429 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const { 2430 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2431 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2432 it != ie; ++it) 2433 it->info = classifyArgumentType(it->type); 2434 2435 // Always honor user-specified calling convention. 2436 if (FI.getCallingConvention() != llvm::CallingConv::C) 2437 return; 2438 2439 // Calling convention as default by an ABI. 2440 llvm::CallingConv::ID DefaultCC; 2441 if (isEABI()) 2442 DefaultCC = llvm::CallingConv::ARM_AAPCS; 2443 else 2444 DefaultCC = llvm::CallingConv::ARM_APCS; 2445 2446 // If user did not ask for specific calling convention explicitly (e.g. via 2447 // pcs attribute), set effective calling convention if it's different than ABI 2448 // default. 2449 switch (getABIKind()) { 2450 case APCS: 2451 if (DefaultCC != llvm::CallingConv::ARM_APCS) 2452 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_APCS); 2453 break; 2454 case AAPCS: 2455 if (DefaultCC != llvm::CallingConv::ARM_AAPCS) 2456 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS); 2457 break; 2458 case AAPCS_VFP: 2459 if (DefaultCC != llvm::CallingConv::ARM_AAPCS_VFP) 2460 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS_VFP); 2461 break; 2462 } 2463 } 2464 2465 /// isHomogeneousAggregate - Return true if a type is an AAPCS-VFP homogeneous 2466 /// aggregate. If HAMembers is non-null, the number of base elements 2467 /// contained in the type is returned through it; this is used for the 2468 /// recursive calls that check aggregate component types. 2469 static bool isHomogeneousAggregate(QualType Ty, const Type *&Base, 2470 ASTContext &Context, 2471 uint64_t *HAMembers = 0) { 2472 uint64_t Members; 2473 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 2474 if (!isHomogeneousAggregate(AT->getElementType(), Base, Context, &Members)) 2475 return false; 2476 Members *= AT->getSize().getZExtValue(); 2477 } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 2478 const RecordDecl *RD = RT->getDecl(); 2479 if (RD->isUnion() || RD->hasFlexibleArrayMember()) 2480 return false; 2481 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 2482 if (!CXXRD->isAggregate()) 2483 return false; 2484 } 2485 Members = 0; 2486 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 2487 i != e; ++i) { 2488 const FieldDecl *FD = *i; 2489 uint64_t FldMembers; 2490 if (!isHomogeneousAggregate(FD->getType(), Base, Context, &FldMembers)) 2491 return false; 2492 Members += FldMembers; 2493 } 2494 } else { 2495 Members = 1; 2496 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 2497 Members = 2; 2498 Ty = CT->getElementType(); 2499 } 2500 2501 // Homogeneous aggregates for AAPCS-VFP must have base types of float, 2502 // double, or 64-bit or 128-bit vectors. 2503 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 2504 if (BT->getKind() != BuiltinType::Float && 2505 BT->getKind() != BuiltinType::Double) 2506 return false; 2507 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 2508 unsigned VecSize = Context.getTypeSize(VT); 2509 if (VecSize != 64 && VecSize != 128) 2510 return false; 2511 } else { 2512 return false; 2513 } 2514 2515 // The base type must be the same for all members. Vector types of the 2516 // same total size are treated as being equivalent here. 2517 const Type *TyPtr = Ty.getTypePtr(); 2518 if (!Base) 2519 Base = TyPtr; 2520 if (Base != TyPtr && 2521 (!Base->isVectorType() || !TyPtr->isVectorType() || 2522 Context.getTypeSize(Base) != Context.getTypeSize(TyPtr))) 2523 return false; 2524 } 2525 2526 // Homogeneous Aggregates can have at most 4 members of the base type. 2527 if (HAMembers) 2528 *HAMembers = Members; 2529 return (Members <= 4); 2530 } 2531 2532 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty) const { 2533 if (!isAggregateTypeForABI(Ty)) { 2534 // Treat an enum type as its underlying type. 2535 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2536 Ty = EnumTy->getDecl()->getIntegerType(); 2537 2538 return (Ty->isPromotableIntegerType() ? 2539 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2540 } 2541 2542 // Ignore empty records. 2543 if (isEmptyRecord(getContext(), Ty, true)) 2544 return ABIArgInfo::getIgnore(); 2545 2546 // Structures with either a non-trivial destructor or a non-trivial 2547 // copy constructor are always indirect. 2548 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 2549 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2550 2551 if (getABIKind() == ARMABIInfo::AAPCS_VFP) { 2552 // Homogeneous Aggregates need to be expanded. 2553 const Type *Base = 0; 2554 if (isHomogeneousAggregate(Ty, Base, getContext())) 2555 return ABIArgInfo::getExpand(); 2556 } 2557 2558 // Otherwise, pass by coercing to a structure of the appropriate size. 2559 // 2560 // FIXME: This is kind of nasty... but there isn't much choice because the ARM 2561 // backend doesn't support byval. 2562 // FIXME: This doesn't handle alignment > 64 bits. 2563 llvm::Type* ElemTy; 2564 unsigned SizeRegs; 2565 if (getContext().getTypeAlign(Ty) > 32) { 2566 ElemTy = llvm::Type::getInt64Ty(getVMContext()); 2567 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64; 2568 } else { 2569 ElemTy = llvm::Type::getInt32Ty(getVMContext()); 2570 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32; 2571 } 2572 2573 llvm::Type *STy = 2574 llvm::StructType::get(llvm::ArrayType::get(ElemTy, SizeRegs), NULL); 2575 return ABIArgInfo::getDirect(STy); 2576 } 2577 2578 static bool isIntegerLikeType(QualType Ty, ASTContext &Context, 2579 llvm::LLVMContext &VMContext) { 2580 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure 2581 // is called integer-like if its size is less than or equal to one word, and 2582 // the offset of each of its addressable sub-fields is zero. 2583 2584 uint64_t Size = Context.getTypeSize(Ty); 2585 2586 // Check that the type fits in a word. 2587 if (Size > 32) 2588 return false; 2589 2590 // FIXME: Handle vector types! 2591 if (Ty->isVectorType()) 2592 return false; 2593 2594 // Float types are never treated as "integer like". 2595 if (Ty->isRealFloatingType()) 2596 return false; 2597 2598 // If this is a builtin or pointer type then it is ok. 2599 if (Ty->getAs<BuiltinType>() || Ty->isPointerType()) 2600 return true; 2601 2602 // Small complex integer types are "integer like". 2603 if (const ComplexType *CT = Ty->getAs<ComplexType>()) 2604 return isIntegerLikeType(CT->getElementType(), Context, VMContext); 2605 2606 // Single element and zero sized arrays should be allowed, by the definition 2607 // above, but they are not. 2608 2609 // Otherwise, it must be a record type. 2610 const RecordType *RT = Ty->getAs<RecordType>(); 2611 if (!RT) return false; 2612 2613 // Ignore records with flexible arrays. 2614 const RecordDecl *RD = RT->getDecl(); 2615 if (RD->hasFlexibleArrayMember()) 2616 return false; 2617 2618 // Check that all sub-fields are at offset 0, and are themselves "integer 2619 // like". 2620 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 2621 2622 bool HadField = false; 2623 unsigned idx = 0; 2624 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 2625 i != e; ++i, ++idx) { 2626 const FieldDecl *FD = *i; 2627 2628 // Bit-fields are not addressable, we only need to verify they are "integer 2629 // like". We still have to disallow a subsequent non-bitfield, for example: 2630 // struct { int : 0; int x } 2631 // is non-integer like according to gcc. 2632 if (FD->isBitField()) { 2633 if (!RD->isUnion()) 2634 HadField = true; 2635 2636 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 2637 return false; 2638 2639 continue; 2640 } 2641 2642 // Check if this field is at offset 0. 2643 if (Layout.getFieldOffset(idx) != 0) 2644 return false; 2645 2646 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 2647 return false; 2648 2649 // Only allow at most one field in a structure. This doesn't match the 2650 // wording above, but follows gcc in situations with a field following an 2651 // empty structure. 2652 if (!RD->isUnion()) { 2653 if (HadField) 2654 return false; 2655 2656 HadField = true; 2657 } 2658 } 2659 2660 return true; 2661 } 2662 2663 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy) const { 2664 if (RetTy->isVoidType()) 2665 return ABIArgInfo::getIgnore(); 2666 2667 // Large vector types should be returned via memory. 2668 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) 2669 return ABIArgInfo::getIndirect(0); 2670 2671 if (!isAggregateTypeForABI(RetTy)) { 2672 // Treat an enum type as its underlying type. 2673 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 2674 RetTy = EnumTy->getDecl()->getIntegerType(); 2675 2676 return (RetTy->isPromotableIntegerType() ? 2677 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2678 } 2679 2680 // Structures with either a non-trivial destructor or a non-trivial 2681 // copy constructor are always indirect. 2682 if (isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy)) 2683 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2684 2685 // Are we following APCS? 2686 if (getABIKind() == APCS) { 2687 if (isEmptyRecord(getContext(), RetTy, false)) 2688 return ABIArgInfo::getIgnore(); 2689 2690 // Complex types are all returned as packed integers. 2691 // 2692 // FIXME: Consider using 2 x vector types if the back end handles them 2693 // correctly. 2694 if (RetTy->isAnyComplexType()) 2695 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2696 getContext().getTypeSize(RetTy))); 2697 2698 // Integer like structures are returned in r0. 2699 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) { 2700 // Return in the smallest viable integer type. 2701 uint64_t Size = getContext().getTypeSize(RetTy); 2702 if (Size <= 8) 2703 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 2704 if (Size <= 16) 2705 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 2706 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 2707 } 2708 2709 // Otherwise return in memory. 2710 return ABIArgInfo::getIndirect(0); 2711 } 2712 2713 // Otherwise this is an AAPCS variant. 2714 2715 if (isEmptyRecord(getContext(), RetTy, true)) 2716 return ABIArgInfo::getIgnore(); 2717 2718 // Check for homogeneous aggregates with AAPCS-VFP. 2719 if (getABIKind() == AAPCS_VFP) { 2720 const Type *Base = 0; 2721 if (isHomogeneousAggregate(RetTy, Base, getContext())) 2722 // Homogeneous Aggregates are returned directly. 2723 return ABIArgInfo::getDirect(); 2724 } 2725 2726 // Aggregates <= 4 bytes are returned in r0; other aggregates 2727 // are returned indirectly. 2728 uint64_t Size = getContext().getTypeSize(RetTy); 2729 if (Size <= 32) { 2730 // Return in the smallest viable integer type. 2731 if (Size <= 8) 2732 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 2733 if (Size <= 16) 2734 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 2735 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 2736 } 2737 2738 return ABIArgInfo::getIndirect(0); 2739 } 2740 2741 llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2742 CodeGenFunction &CGF) const { 2743 llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); 2744 llvm::Type *BPP = llvm::PointerType::getUnqual(BP); 2745 2746 CGBuilderTy &Builder = CGF.Builder; 2747 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 2748 "ap"); 2749 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 2750 // Handle address alignment for type alignment > 32 bits 2751 uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8; 2752 if (TyAlign > 4) { 2753 assert((TyAlign & (TyAlign - 1)) == 0 && 2754 "Alignment is not power of 2!"); 2755 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty); 2756 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1)); 2757 AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1))); 2758 Addr = Builder.CreateIntToPtr(AddrAsInt, BP); 2759 } 2760 llvm::Type *PTy = 2761 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 2762 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 2763 2764 uint64_t Offset = 2765 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); 2766 llvm::Value *NextAddr = 2767 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 2768 "ap.next"); 2769 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 2770 2771 return AddrTyped; 2772 } 2773 2774 //===----------------------------------------------------------------------===// 2775 // PTX ABI Implementation 2776 //===----------------------------------------------------------------------===// 2777 2778 namespace { 2779 2780 class PTXABIInfo : public ABIInfo { 2781 public: 2782 PTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 2783 2784 ABIArgInfo classifyReturnType(QualType RetTy) const; 2785 ABIArgInfo classifyArgumentType(QualType Ty) const; 2786 2787 virtual void computeInfo(CGFunctionInfo &FI) const; 2788 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2789 CodeGenFunction &CFG) const; 2790 }; 2791 2792 class PTXTargetCodeGenInfo : public TargetCodeGenInfo { 2793 public: 2794 PTXTargetCodeGenInfo(CodeGenTypes &CGT) 2795 : TargetCodeGenInfo(new PTXABIInfo(CGT)) {} 2796 2797 virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 2798 CodeGen::CodeGenModule &M) const; 2799 }; 2800 2801 ABIArgInfo PTXABIInfo::classifyReturnType(QualType RetTy) const { 2802 if (RetTy->isVoidType()) 2803 return ABIArgInfo::getIgnore(); 2804 if (isAggregateTypeForABI(RetTy)) 2805 return ABIArgInfo::getIndirect(0); 2806 return ABIArgInfo::getDirect(); 2807 } 2808 2809 ABIArgInfo PTXABIInfo::classifyArgumentType(QualType Ty) const { 2810 if (isAggregateTypeForABI(Ty)) 2811 return ABIArgInfo::getIndirect(0); 2812 2813 return ABIArgInfo::getDirect(); 2814 } 2815 2816 void PTXABIInfo::computeInfo(CGFunctionInfo &FI) const { 2817 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2818 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2819 it != ie; ++it) 2820 it->info = classifyArgumentType(it->type); 2821 2822 // Always honor user-specified calling convention. 2823 if (FI.getCallingConvention() != llvm::CallingConv::C) 2824 return; 2825 2826 // Calling convention as default by an ABI. 2827 llvm::CallingConv::ID DefaultCC; 2828 const LangOptions &LangOpts = getContext().getLangOptions(); 2829 if (LangOpts.OpenCL || LangOpts.CUDA) { 2830 // If we are in OpenCL or CUDA mode, then default to device functions 2831 DefaultCC = llvm::CallingConv::PTX_Device; 2832 } else { 2833 // If we are in standard C/C++ mode, use the triple to decide on the default 2834 StringRef Env = 2835 getContext().getTargetInfo().getTriple().getEnvironmentName(); 2836 if (Env == "device") 2837 DefaultCC = llvm::CallingConv::PTX_Device; 2838 else 2839 DefaultCC = llvm::CallingConv::PTX_Kernel; 2840 } 2841 FI.setEffectiveCallingConvention(DefaultCC); 2842 2843 } 2844 2845 llvm::Value *PTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2846 CodeGenFunction &CFG) const { 2847 llvm_unreachable("PTX does not support varargs"); 2848 return 0; 2849 } 2850 2851 void PTXTargetCodeGenInfo::SetTargetAttributes(const Decl *D, 2852 llvm::GlobalValue *GV, 2853 CodeGen::CodeGenModule &M) const{ 2854 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 2855 if (!FD) return; 2856 2857 llvm::Function *F = cast<llvm::Function>(GV); 2858 2859 // Perform special handling in OpenCL mode 2860 if (M.getLangOptions().OpenCL) { 2861 // Use OpenCL function attributes to set proper calling conventions 2862 // By default, all functions are device functions 2863 if (FD->hasAttr<OpenCLKernelAttr>()) { 2864 // OpenCL __kernel functions get a kernel calling convention 2865 F->setCallingConv(llvm::CallingConv::PTX_Kernel); 2866 // And kernel functions are not subject to inlining 2867 F->addFnAttr(llvm::Attribute::NoInline); 2868 } 2869 } 2870 2871 // Perform special handling in CUDA mode. 2872 if (M.getLangOptions().CUDA) { 2873 // CUDA __global__ functions get a kernel calling convention. Since 2874 // __global__ functions cannot be called from the device, we do not 2875 // need to set the noinline attribute. 2876 if (FD->getAttr<CUDAGlobalAttr>()) 2877 F->setCallingConv(llvm::CallingConv::PTX_Kernel); 2878 } 2879 } 2880 2881 } 2882 2883 //===----------------------------------------------------------------------===// 2884 // MBlaze ABI Implementation 2885 //===----------------------------------------------------------------------===// 2886 2887 namespace { 2888 2889 class MBlazeABIInfo : public ABIInfo { 2890 public: 2891 MBlazeABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 2892 2893 bool isPromotableIntegerType(QualType Ty) const; 2894 2895 ABIArgInfo classifyReturnType(QualType RetTy) const; 2896 ABIArgInfo classifyArgumentType(QualType RetTy) const; 2897 2898 virtual void computeInfo(CGFunctionInfo &FI) const { 2899 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2900 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2901 it != ie; ++it) 2902 it->info = classifyArgumentType(it->type); 2903 } 2904 2905 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2906 CodeGenFunction &CGF) const; 2907 }; 2908 2909 class MBlazeTargetCodeGenInfo : public TargetCodeGenInfo { 2910 public: 2911 MBlazeTargetCodeGenInfo(CodeGenTypes &CGT) 2912 : TargetCodeGenInfo(new MBlazeABIInfo(CGT)) {} 2913 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 2914 CodeGen::CodeGenModule &M) const; 2915 }; 2916 2917 } 2918 2919 bool MBlazeABIInfo::isPromotableIntegerType(QualType Ty) const { 2920 // MBlaze ABI requires all 8 and 16 bit quantities to be extended. 2921 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 2922 switch (BT->getKind()) { 2923 case BuiltinType::Bool: 2924 case BuiltinType::Char_S: 2925 case BuiltinType::Char_U: 2926 case BuiltinType::SChar: 2927 case BuiltinType::UChar: 2928 case BuiltinType::Short: 2929 case BuiltinType::UShort: 2930 return true; 2931 default: 2932 return false; 2933 } 2934 return false; 2935 } 2936 2937 llvm::Value *MBlazeABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2938 CodeGenFunction &CGF) const { 2939 // FIXME: Implement 2940 return 0; 2941 } 2942 2943 2944 ABIArgInfo MBlazeABIInfo::classifyReturnType(QualType RetTy) const { 2945 if (RetTy->isVoidType()) 2946 return ABIArgInfo::getIgnore(); 2947 if (isAggregateTypeForABI(RetTy)) 2948 return ABIArgInfo::getIndirect(0); 2949 2950 return (isPromotableIntegerType(RetTy) ? 2951 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2952 } 2953 2954 ABIArgInfo MBlazeABIInfo::classifyArgumentType(QualType Ty) const { 2955 if (isAggregateTypeForABI(Ty)) 2956 return ABIArgInfo::getIndirect(0); 2957 2958 return (isPromotableIntegerType(Ty) ? 2959 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2960 } 2961 2962 void MBlazeTargetCodeGenInfo::SetTargetAttributes(const Decl *D, 2963 llvm::GlobalValue *GV, 2964 CodeGen::CodeGenModule &M) 2965 const { 2966 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 2967 if (!FD) return; 2968 2969 llvm::CallingConv::ID CC = llvm::CallingConv::C; 2970 if (FD->hasAttr<MBlazeInterruptHandlerAttr>()) 2971 CC = llvm::CallingConv::MBLAZE_INTR; 2972 else if (FD->hasAttr<MBlazeSaveVolatilesAttr>()) 2973 CC = llvm::CallingConv::MBLAZE_SVOL; 2974 2975 if (CC != llvm::CallingConv::C) { 2976 // Handle 'interrupt_handler' attribute: 2977 llvm::Function *F = cast<llvm::Function>(GV); 2978 2979 // Step 1: Set ISR calling convention. 2980 F->setCallingConv(CC); 2981 2982 // Step 2: Add attributes goodness. 2983 F->addFnAttr(llvm::Attribute::NoInline); 2984 } 2985 2986 // Step 3: Emit _interrupt_handler alias. 2987 if (CC == llvm::CallingConv::MBLAZE_INTR) 2988 new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage, 2989 "_interrupt_handler", GV, &M.getModule()); 2990 } 2991 2992 2993 //===----------------------------------------------------------------------===// 2994 // MSP430 ABI Implementation 2995 //===----------------------------------------------------------------------===// 2996 2997 namespace { 2998 2999 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo { 3000 public: 3001 MSP430TargetCodeGenInfo(CodeGenTypes &CGT) 3002 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 3003 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3004 CodeGen::CodeGenModule &M) const; 3005 }; 3006 3007 } 3008 3009 void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D, 3010 llvm::GlobalValue *GV, 3011 CodeGen::CodeGenModule &M) const { 3012 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 3013 if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) { 3014 // Handle 'interrupt' attribute: 3015 llvm::Function *F = cast<llvm::Function>(GV); 3016 3017 // Step 1: Set ISR calling convention. 3018 F->setCallingConv(llvm::CallingConv::MSP430_INTR); 3019 3020 // Step 2: Add attributes goodness. 3021 F->addFnAttr(llvm::Attribute::NoInline); 3022 3023 // Step 3: Emit ISR vector alias. 3024 unsigned Num = attr->getNumber() + 0xffe0; 3025 new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage, 3026 "vector_" + Twine::utohexstr(Num), 3027 GV, &M.getModule()); 3028 } 3029 } 3030 } 3031 3032 //===----------------------------------------------------------------------===// 3033 // MIPS ABI Implementation. This works for both little-endian and 3034 // big-endian variants. 3035 //===----------------------------------------------------------------------===// 3036 3037 namespace { 3038 class MipsABIInfo : public ABIInfo { 3039 bool IsO32; 3040 unsigned MinABIStackAlignInBytes; 3041 llvm::Type* HandleStructTy(QualType Ty) const; 3042 public: 3043 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) : 3044 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8) {} 3045 3046 ABIArgInfo classifyReturnType(QualType RetTy) const; 3047 ABIArgInfo classifyArgumentType(QualType RetTy) const; 3048 virtual void computeInfo(CGFunctionInfo &FI) const; 3049 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3050 CodeGenFunction &CGF) const; 3051 }; 3052 3053 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo { 3054 unsigned SizeOfUnwindException; 3055 public: 3056 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32) 3057 : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)), 3058 SizeOfUnwindException(IsO32 ? 24 : 32) {} 3059 3060 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 3061 return 29; 3062 } 3063 3064 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3065 llvm::Value *Address) const; 3066 3067 unsigned getSizeOfUnwindException() const { 3068 return SizeOfUnwindException; 3069 } 3070 }; 3071 } 3072 3073 // In N32/64, an aligned double precision floating point field is passed in 3074 // a register. 3075 llvm::Type* MipsABIInfo::HandleStructTy(QualType Ty) const { 3076 if (IsO32) 3077 return 0; 3078 3079 const RecordType *RT = Ty->getAsStructureType(); 3080 3081 if (!RT) 3082 return 0; 3083 3084 const RecordDecl *RD = RT->getDecl(); 3085 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 3086 uint64_t StructSize = getContext().getTypeSize(Ty); 3087 assert(!(StructSize % 8) && "Size of structure must be multiple of 8."); 3088 3089 SmallVector<llvm::Type*, 8> ArgList; 3090 uint64_t LastOffset = 0; 3091 unsigned idx = 0; 3092 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64); 3093 3094 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 3095 i != e; ++i, ++idx) { 3096 const QualType Ty = (*i)->getType(); 3097 const BuiltinType *BT = Ty->getAs<BuiltinType>(); 3098 3099 if (!BT || BT->getKind() != BuiltinType::Double) 3100 continue; 3101 3102 uint64_t Offset = Layout.getFieldOffset(idx); 3103 if (Offset % 64) // Ignore doubles that are not aligned. 3104 continue; 3105 3106 // Add ((Offset - LastOffset) / 64) args of type i64. 3107 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j) 3108 ArgList.push_back(I64); 3109 3110 // Add double type. 3111 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext())); 3112 LastOffset = Offset + 64; 3113 } 3114 3115 // This structure doesn't have an aligned double field. 3116 if (!LastOffset) 3117 return 0; 3118 3119 // Add ((StructSize - LastOffset) / 64) args of type i64. 3120 for (unsigned N = (StructSize - LastOffset) / 64; N; --N) 3121 ArgList.push_back(I64); 3122 3123 // If the size of the remainder is not zero, add one more integer type to 3124 // ArgList. 3125 unsigned R = (StructSize - LastOffset) % 64; 3126 if (R) 3127 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R)); 3128 3129 return llvm::StructType::get(getVMContext(), ArgList); 3130 } 3131 3132 ABIArgInfo MipsABIInfo::classifyArgumentType(QualType Ty) const { 3133 if (isAggregateTypeForABI(Ty)) { 3134 // Ignore empty aggregates. 3135 if (getContext().getTypeSize(Ty) == 0) 3136 return ABIArgInfo::getIgnore(); 3137 3138 // Records with non trivial destructors/constructors should not be passed 3139 // by value. 3140 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 3141 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3142 3143 llvm::Type *ResType; 3144 if ((ResType = HandleStructTy(Ty))) 3145 return ABIArgInfo::getDirect(ResType); 3146 3147 return ABIArgInfo::getIndirect(0); 3148 } 3149 3150 // Treat an enum type as its underlying type. 3151 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3152 Ty = EnumTy->getDecl()->getIntegerType(); 3153 3154 return (Ty->isPromotableIntegerType() ? 3155 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3156 } 3157 3158 ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const { 3159 if (RetTy->isVoidType()) 3160 return ABIArgInfo::getIgnore(); 3161 3162 if (isAggregateTypeForABI(RetTy)) { 3163 if ((IsO32 && RetTy->isAnyComplexType()) || 3164 (!IsO32 && (getContext().getTypeSize(RetTy) <= 128))) 3165 return ABIArgInfo::getDirect(); 3166 3167 return ABIArgInfo::getIndirect(0); 3168 } 3169 3170 // Treat an enum type as its underlying type. 3171 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 3172 RetTy = EnumTy->getDecl()->getIntegerType(); 3173 3174 return (RetTy->isPromotableIntegerType() ? 3175 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3176 } 3177 3178 void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const { 3179 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 3180 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 3181 it != ie; ++it) 3182 it->info = classifyArgumentType(it->type); 3183 } 3184 3185 llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3186 CodeGenFunction &CGF) const { 3187 llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); 3188 llvm::Type *BPP = llvm::PointerType::getUnqual(BP); 3189 3190 CGBuilderTy &Builder = CGF.Builder; 3191 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 3192 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 3193 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; 3194 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 3195 llvm::Value *AddrTyped; 3196 3197 if (TypeAlign > MinABIStackAlignInBytes) { 3198 llvm::Value *AddrAsInt32 = CGF.Builder.CreatePtrToInt(Addr, CGF.Int32Ty); 3199 llvm::Value *Inc = llvm::ConstantInt::get(CGF.Int32Ty, TypeAlign - 1); 3200 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -TypeAlign); 3201 llvm::Value *Add = CGF.Builder.CreateAdd(AddrAsInt32, Inc); 3202 llvm::Value *And = CGF.Builder.CreateAnd(Add, Mask); 3203 AddrTyped = CGF.Builder.CreateIntToPtr(And, PTy); 3204 } 3205 else 3206 AddrTyped = Builder.CreateBitCast(Addr, PTy); 3207 3208 llvm::Value *AlignedAddr = Builder.CreateBitCast(AddrTyped, BP); 3209 TypeAlign = std::max(TypeAlign, MinABIStackAlignInBytes); 3210 uint64_t Offset = 3211 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, TypeAlign); 3212 llvm::Value *NextAddr = 3213 Builder.CreateGEP(AlignedAddr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 3214 "ap.next"); 3215 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 3216 3217 return AddrTyped; 3218 } 3219 3220 bool 3221 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3222 llvm::Value *Address) const { 3223 // This information comes from gcc's implementation, which seems to 3224 // as canonical as it gets. 3225 3226 CodeGen::CGBuilderTy &Builder = CGF.Builder; 3227 llvm::LLVMContext &Context = CGF.getLLVMContext(); 3228 3229 // Everything on MIPS is 4 bytes. Double-precision FP registers 3230 // are aliased to pairs of single-precision FP registers. 3231 llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context); 3232 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 3233 3234 // 0-31 are the general purpose registers, $0 - $31. 3235 // 32-63 are the floating-point registers, $f0 - $f31. 3236 // 64 and 65 are the multiply/divide registers, $hi and $lo. 3237 // 66 is the (notional, I think) register for signal-handler return. 3238 AssignToArrayRange(Builder, Address, Four8, 0, 65); 3239 3240 // 67-74 are the floating-point status registers, $fcc0 - $fcc7. 3241 // They are one bit wide and ignored here. 3242 3243 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31. 3244 // (coprocessor 1 is the FP unit) 3245 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31. 3246 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31. 3247 // 176-181 are the DSP accumulator registers. 3248 AssignToArrayRange(Builder, Address, Four8, 80, 181); 3249 3250 return false; 3251 } 3252 3253 //===----------------------------------------------------------------------===// 3254 // TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults. 3255 // Currently subclassed only to implement custom OpenCL C function attribute 3256 // handling. 3257 //===----------------------------------------------------------------------===// 3258 3259 namespace { 3260 3261 class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo { 3262 public: 3263 TCETargetCodeGenInfo(CodeGenTypes &CGT) 3264 : DefaultTargetCodeGenInfo(CGT) {} 3265 3266 virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3267 CodeGen::CodeGenModule &M) const; 3268 }; 3269 3270 void TCETargetCodeGenInfo::SetTargetAttributes(const Decl *D, 3271 llvm::GlobalValue *GV, 3272 CodeGen::CodeGenModule &M) const { 3273 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 3274 if (!FD) return; 3275 3276 llvm::Function *F = cast<llvm::Function>(GV); 3277 3278 if (M.getLangOptions().OpenCL) { 3279 if (FD->hasAttr<OpenCLKernelAttr>()) { 3280 // OpenCL C Kernel functions are not subject to inlining 3281 F->addFnAttr(llvm::Attribute::NoInline); 3282 3283 if (FD->hasAttr<ReqdWorkGroupSizeAttr>()) { 3284 3285 // Convert the reqd_work_group_size() attributes to metadata. 3286 llvm::LLVMContext &Context = F->getContext(); 3287 llvm::NamedMDNode *OpenCLMetadata = 3288 M.getModule().getOrInsertNamedMetadata("opencl.kernel_wg_size_info"); 3289 3290 SmallVector<llvm::Value*, 5> Operands; 3291 Operands.push_back(F); 3292 3293 Operands.push_back(llvm::Constant::getIntegerValue( 3294 llvm::Type::getInt32Ty(Context), 3295 llvm::APInt( 3296 32, 3297 FD->getAttr<ReqdWorkGroupSizeAttr>()->getXDim()))); 3298 Operands.push_back(llvm::Constant::getIntegerValue( 3299 llvm::Type::getInt32Ty(Context), 3300 llvm::APInt( 3301 32, 3302 FD->getAttr<ReqdWorkGroupSizeAttr>()->getYDim()))); 3303 Operands.push_back(llvm::Constant::getIntegerValue( 3304 llvm::Type::getInt32Ty(Context), 3305 llvm::APInt( 3306 32, 3307 FD->getAttr<ReqdWorkGroupSizeAttr>()->getZDim()))); 3308 3309 // Add a boolean constant operand for "required" (true) or "hint" (false) 3310 // for implementing the work_group_size_hint attr later. Currently 3311 // always true as the hint is not yet implemented. 3312 Operands.push_back(llvm::ConstantInt::getTrue(llvm::Type::getInt1Ty(Context))); 3313 3314 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands)); 3315 } 3316 } 3317 } 3318 } 3319 3320 } 3321 3322 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() { 3323 if (TheTargetCodeGenInfo) 3324 return *TheTargetCodeGenInfo; 3325 3326 const llvm::Triple &Triple = getContext().getTargetInfo().getTriple(); 3327 switch (Triple.getArch()) { 3328 default: 3329 return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types)); 3330 3331 case llvm::Triple::mips: 3332 case llvm::Triple::mipsel: 3333 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, true)); 3334 3335 case llvm::Triple::mips64: 3336 case llvm::Triple::mips64el: 3337 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, false)); 3338 3339 case llvm::Triple::arm: 3340 case llvm::Triple::thumb: 3341 { 3342 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS; 3343 3344 if (strcmp(getContext().getTargetInfo().getABI(), "apcs-gnu") == 0) 3345 Kind = ARMABIInfo::APCS; 3346 else if (CodeGenOpts.FloatABI == "hard") 3347 Kind = ARMABIInfo::AAPCS_VFP; 3348 3349 return *(TheTargetCodeGenInfo = new ARMTargetCodeGenInfo(Types, Kind)); 3350 } 3351 3352 case llvm::Triple::ppc: 3353 return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types)); 3354 3355 case llvm::Triple::ptx32: 3356 case llvm::Triple::ptx64: 3357 return *(TheTargetCodeGenInfo = new PTXTargetCodeGenInfo(Types)); 3358 3359 case llvm::Triple::mblaze: 3360 return *(TheTargetCodeGenInfo = new MBlazeTargetCodeGenInfo(Types)); 3361 3362 case llvm::Triple::msp430: 3363 return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types)); 3364 3365 case llvm::Triple::tce: 3366 return *(TheTargetCodeGenInfo = new TCETargetCodeGenInfo(Types)); 3367 3368 case llvm::Triple::x86: { 3369 bool DisableMMX = strcmp(getContext().getTargetInfo().getABI(), "no-mmx") == 0; 3370 3371 if (Triple.isOSDarwin()) 3372 return *(TheTargetCodeGenInfo = 3373 new X86_32TargetCodeGenInfo(Types, true, true, DisableMMX)); 3374 3375 switch (Triple.getOS()) { 3376 case llvm::Triple::Cygwin: 3377 case llvm::Triple::MinGW32: 3378 case llvm::Triple::AuroraUX: 3379 case llvm::Triple::DragonFly: 3380 case llvm::Triple::FreeBSD: 3381 case llvm::Triple::OpenBSD: 3382 case llvm::Triple::NetBSD: 3383 return *(TheTargetCodeGenInfo = 3384 new X86_32TargetCodeGenInfo(Types, false, true, DisableMMX)); 3385 3386 default: 3387 return *(TheTargetCodeGenInfo = 3388 new X86_32TargetCodeGenInfo(Types, false, false, DisableMMX)); 3389 } 3390 } 3391 3392 case llvm::Triple::x86_64: { 3393 bool HasAVX = strcmp(getContext().getTargetInfo().getABI(), "avx") == 0; 3394 3395 switch (Triple.getOS()) { 3396 case llvm::Triple::Win32: 3397 case llvm::Triple::MinGW32: 3398 case llvm::Triple::Cygwin: 3399 return *(TheTargetCodeGenInfo = new WinX86_64TargetCodeGenInfo(Types)); 3400 default: 3401 return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo(Types, 3402 HasAVX)); 3403 } 3404 } 3405 } 3406 } 3407