1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // These classes wrap the information about a call or function 11 // definition used to handle ABI compliancy. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "TargetInfo.h" 16 #include "ABIInfo.h" 17 #include "CodeGenFunction.h" 18 #include "clang/AST/RecordLayout.h" 19 #include "clang/Frontend/CodeGenOptions.h" 20 #include "llvm/Type.h" 21 #include "llvm/DataLayout.h" 22 #include "llvm/ADT/Triple.h" 23 #include "llvm/Support/raw_ostream.h" 24 using namespace clang; 25 using namespace CodeGen; 26 27 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, 28 llvm::Value *Array, 29 llvm::Value *Value, 30 unsigned FirstIndex, 31 unsigned LastIndex) { 32 // Alternatively, we could emit this as a loop in the source. 33 for (unsigned I = FirstIndex; I <= LastIndex; ++I) { 34 llvm::Value *Cell = Builder.CreateConstInBoundsGEP1_32(Array, I); 35 Builder.CreateStore(Value, Cell); 36 } 37 } 38 39 static bool isAggregateTypeForABI(QualType T) { 40 return CodeGenFunction::hasAggregateLLVMType(T) || 41 T->isMemberFunctionPointerType(); 42 } 43 44 ABIInfo::~ABIInfo() {} 45 46 ASTContext &ABIInfo::getContext() const { 47 return CGT.getContext(); 48 } 49 50 llvm::LLVMContext &ABIInfo::getVMContext() const { 51 return CGT.getLLVMContext(); 52 } 53 54 const llvm::DataLayout &ABIInfo::getDataLayout() const { 55 return CGT.getDataLayout(); 56 } 57 58 59 void ABIArgInfo::dump() const { 60 raw_ostream &OS = llvm::errs(); 61 OS << "(ABIArgInfo Kind="; 62 switch (TheKind) { 63 case Direct: 64 OS << "Direct Type="; 65 if (llvm::Type *Ty = getCoerceToType()) 66 Ty->print(OS); 67 else 68 OS << "null"; 69 break; 70 case Extend: 71 OS << "Extend"; 72 break; 73 case Ignore: 74 OS << "Ignore"; 75 break; 76 case Indirect: 77 OS << "Indirect Align=" << getIndirectAlign() 78 << " ByVal=" << getIndirectByVal() 79 << " Realign=" << getIndirectRealign(); 80 break; 81 case Expand: 82 OS << "Expand"; 83 break; 84 } 85 OS << ")\n"; 86 } 87 88 TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; } 89 90 // If someone can figure out a general rule for this, that would be great. 91 // It's probably just doomed to be platform-dependent, though. 92 unsigned TargetCodeGenInfo::getSizeOfUnwindException() const { 93 // Verified for: 94 // x86-64 FreeBSD, Linux, Darwin 95 // x86-32 FreeBSD, Linux, Darwin 96 // PowerPC Linux, Darwin 97 // ARM Darwin (*not* EABI) 98 return 32; 99 } 100 101 bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args, 102 const FunctionNoProtoType *fnType) const { 103 // The following conventions are known to require this to be false: 104 // x86_stdcall 105 // MIPS 106 // For everything else, we just prefer false unless we opt out. 107 return false; 108 } 109 110 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays); 111 112 /// isEmptyField - Return true iff a the field is "empty", that is it 113 /// is an unnamed bit-field or an (array of) empty record(s). 114 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD, 115 bool AllowArrays) { 116 if (FD->isUnnamedBitfield()) 117 return true; 118 119 QualType FT = FD->getType(); 120 121 // Constant arrays of empty records count as empty, strip them off. 122 // Constant arrays of zero length always count as empty. 123 if (AllowArrays) 124 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 125 if (AT->getSize() == 0) 126 return true; 127 FT = AT->getElementType(); 128 } 129 130 const RecordType *RT = FT->getAs<RecordType>(); 131 if (!RT) 132 return false; 133 134 // C++ record fields are never empty, at least in the Itanium ABI. 135 // 136 // FIXME: We should use a predicate for whether this behavior is true in the 137 // current ABI. 138 if (isa<CXXRecordDecl>(RT->getDecl())) 139 return false; 140 141 return isEmptyRecord(Context, FT, AllowArrays); 142 } 143 144 /// isEmptyRecord - Return true iff a structure contains only empty 145 /// fields. Note that a structure with a flexible array member is not 146 /// considered empty. 147 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) { 148 const RecordType *RT = T->getAs<RecordType>(); 149 if (!RT) 150 return 0; 151 const RecordDecl *RD = RT->getDecl(); 152 if (RD->hasFlexibleArrayMember()) 153 return false; 154 155 // If this is a C++ record, check the bases first. 156 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 157 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 158 e = CXXRD->bases_end(); i != e; ++i) 159 if (!isEmptyRecord(Context, i->getType(), true)) 160 return false; 161 162 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 163 i != e; ++i) 164 if (!isEmptyField(Context, *i, AllowArrays)) 165 return false; 166 return true; 167 } 168 169 /// hasNonTrivialDestructorOrCopyConstructor - Determine if a type has either 170 /// a non-trivial destructor or a non-trivial copy constructor. 171 static bool hasNonTrivialDestructorOrCopyConstructor(const RecordType *RT) { 172 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 173 if (!RD) 174 return false; 175 176 return !RD->hasTrivialDestructor() || !RD->hasTrivialCopyConstructor(); 177 } 178 179 /// isRecordWithNonTrivialDestructorOrCopyConstructor - Determine if a type is 180 /// a record type with either a non-trivial destructor or a non-trivial copy 181 /// constructor. 182 static bool isRecordWithNonTrivialDestructorOrCopyConstructor(QualType T) { 183 const RecordType *RT = T->getAs<RecordType>(); 184 if (!RT) 185 return false; 186 187 return hasNonTrivialDestructorOrCopyConstructor(RT); 188 } 189 190 /// isSingleElementStruct - Determine if a structure is a "single 191 /// element struct", i.e. it has exactly one non-empty field or 192 /// exactly one field which is itself a single element 193 /// struct. Structures with flexible array members are never 194 /// considered single element structs. 195 /// 196 /// \return The field declaration for the single non-empty field, if 197 /// it exists. 198 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) { 199 const RecordType *RT = T->getAsStructureType(); 200 if (!RT) 201 return 0; 202 203 const RecordDecl *RD = RT->getDecl(); 204 if (RD->hasFlexibleArrayMember()) 205 return 0; 206 207 const Type *Found = 0; 208 209 // If this is a C++ record, check the bases first. 210 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 211 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 212 e = CXXRD->bases_end(); i != e; ++i) { 213 // Ignore empty records. 214 if (isEmptyRecord(Context, i->getType(), true)) 215 continue; 216 217 // If we already found an element then this isn't a single-element struct. 218 if (Found) 219 return 0; 220 221 // If this is non-empty and not a single element struct, the composite 222 // cannot be a single element struct. 223 Found = isSingleElementStruct(i->getType(), Context); 224 if (!Found) 225 return 0; 226 } 227 } 228 229 // Check for single element. 230 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 231 i != e; ++i) { 232 const FieldDecl *FD = *i; 233 QualType FT = FD->getType(); 234 235 // Ignore empty fields. 236 if (isEmptyField(Context, FD, true)) 237 continue; 238 239 // If we already found an element then this isn't a single-element 240 // struct. 241 if (Found) 242 return 0; 243 244 // Treat single element arrays as the element. 245 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 246 if (AT->getSize().getZExtValue() != 1) 247 break; 248 FT = AT->getElementType(); 249 } 250 251 if (!isAggregateTypeForABI(FT)) { 252 Found = FT.getTypePtr(); 253 } else { 254 Found = isSingleElementStruct(FT, Context); 255 if (!Found) 256 return 0; 257 } 258 } 259 260 // We don't consider a struct a single-element struct if it has 261 // padding beyond the element type. 262 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T)) 263 return 0; 264 265 return Found; 266 } 267 268 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) { 269 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() && 270 !Ty->isAnyComplexType() && !Ty->isEnumeralType() && 271 !Ty->isBlockPointerType()) 272 return false; 273 274 uint64_t Size = Context.getTypeSize(Ty); 275 return Size == 32 || Size == 64; 276 } 277 278 /// canExpandIndirectArgument - Test whether an argument type which is to be 279 /// passed indirectly (on the stack) would have the equivalent layout if it was 280 /// expanded into separate arguments. If so, we prefer to do the latter to avoid 281 /// inhibiting optimizations. 282 /// 283 // FIXME: This predicate is missing many cases, currently it just follows 284 // llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We 285 // should probably make this smarter, or better yet make the LLVM backend 286 // capable of handling it. 287 static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) { 288 // We can only expand structure types. 289 const RecordType *RT = Ty->getAs<RecordType>(); 290 if (!RT) 291 return false; 292 293 // We can only expand (C) structures. 294 // 295 // FIXME: This needs to be generalized to handle classes as well. 296 const RecordDecl *RD = RT->getDecl(); 297 if (!RD->isStruct() || isa<CXXRecordDecl>(RD)) 298 return false; 299 300 uint64_t Size = 0; 301 302 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 303 i != e; ++i) { 304 const FieldDecl *FD = *i; 305 306 if (!is32Or64BitBasicType(FD->getType(), Context)) 307 return false; 308 309 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know 310 // how to expand them yet, and the predicate for telling if a bitfield still 311 // counts as "basic" is more complicated than what we were doing previously. 312 if (FD->isBitField()) 313 return false; 314 315 Size += Context.getTypeSize(FD->getType()); 316 } 317 318 // Make sure there are not any holes in the struct. 319 if (Size != Context.getTypeSize(Ty)) 320 return false; 321 322 return true; 323 } 324 325 namespace { 326 /// DefaultABIInfo - The default implementation for ABI specific 327 /// details. This implementation provides information which results in 328 /// self-consistent and sensible LLVM IR generation, but does not 329 /// conform to any particular ABI. 330 class DefaultABIInfo : public ABIInfo { 331 public: 332 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 333 334 ABIArgInfo classifyReturnType(QualType RetTy) const; 335 ABIArgInfo classifyArgumentType(QualType RetTy) const; 336 337 virtual void computeInfo(CGFunctionInfo &FI) const { 338 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 339 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 340 it != ie; ++it) 341 it->info = classifyArgumentType(it->type); 342 } 343 344 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 345 CodeGenFunction &CGF) const; 346 }; 347 348 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo { 349 public: 350 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 351 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 352 }; 353 354 llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 355 CodeGenFunction &CGF) const { 356 return 0; 357 } 358 359 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const { 360 if (isAggregateTypeForABI(Ty)) { 361 // Records with non trivial destructors/constructors should not be passed 362 // by value. 363 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 364 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 365 366 return ABIArgInfo::getIndirect(0); 367 } 368 369 // Treat an enum type as its underlying type. 370 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 371 Ty = EnumTy->getDecl()->getIntegerType(); 372 373 return (Ty->isPromotableIntegerType() ? 374 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 375 } 376 377 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const { 378 if (RetTy->isVoidType()) 379 return ABIArgInfo::getIgnore(); 380 381 if (isAggregateTypeForABI(RetTy)) 382 return ABIArgInfo::getIndirect(0); 383 384 // Treat an enum type as its underlying type. 385 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 386 RetTy = EnumTy->getDecl()->getIntegerType(); 387 388 return (RetTy->isPromotableIntegerType() ? 389 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 390 } 391 392 //===----------------------------------------------------------------------===// 393 // le32/PNaCl bitcode ABI Implementation 394 //===----------------------------------------------------------------------===// 395 396 class PNaClABIInfo : public ABIInfo { 397 public: 398 PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 399 400 ABIArgInfo classifyReturnType(QualType RetTy) const; 401 ABIArgInfo classifyArgumentType(QualType RetTy, unsigned &FreeRegs) const; 402 403 virtual void computeInfo(CGFunctionInfo &FI) const; 404 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 405 CodeGenFunction &CGF) const; 406 }; 407 408 class PNaClTargetCodeGenInfo : public TargetCodeGenInfo { 409 public: 410 PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 411 : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {} 412 }; 413 414 void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const { 415 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 416 417 unsigned FreeRegs = FI.getHasRegParm() ? FI.getRegParm() : 0; 418 419 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 420 it != ie; ++it) 421 it->info = classifyArgumentType(it->type, FreeRegs); 422 } 423 424 llvm::Value *PNaClABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 425 CodeGenFunction &CGF) const { 426 return 0; 427 } 428 429 ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty, 430 unsigned &FreeRegs) const { 431 if (isAggregateTypeForABI(Ty)) { 432 // Records with non trivial destructors/constructors should not be passed 433 // by value. 434 FreeRegs = 0; 435 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 436 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 437 438 return ABIArgInfo::getIndirect(0); 439 } 440 441 // Treat an enum type as its underlying type. 442 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 443 Ty = EnumTy->getDecl()->getIntegerType(); 444 445 ABIArgInfo BaseInfo = (Ty->isPromotableIntegerType() ? 446 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 447 448 // Regparm regs hold 32 bits. 449 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32; 450 if (SizeInRegs == 0) return BaseInfo; 451 if (SizeInRegs > FreeRegs) { 452 FreeRegs = 0; 453 return BaseInfo; 454 } 455 FreeRegs -= SizeInRegs; 456 return BaseInfo.isDirect() ? 457 ABIArgInfo::getDirectInReg(BaseInfo.getCoerceToType()) : 458 ABIArgInfo::getExtendInReg(BaseInfo.getCoerceToType()); 459 } 460 461 ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const { 462 if (RetTy->isVoidType()) 463 return ABIArgInfo::getIgnore(); 464 465 if (isAggregateTypeForABI(RetTy)) 466 return ABIArgInfo::getIndirect(0); 467 468 // Treat an enum type as its underlying type. 469 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 470 RetTy = EnumTy->getDecl()->getIntegerType(); 471 472 return (RetTy->isPromotableIntegerType() ? 473 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 474 } 475 476 /// UseX86_MMXType - Return true if this is an MMX type that should use the 477 /// special x86_mmx type. 478 bool UseX86_MMXType(llvm::Type *IRType) { 479 // If the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>, use the 480 // special x86_mmx type. 481 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 && 482 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() && 483 IRType->getScalarSizeInBits() != 64; 484 } 485 486 static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 487 StringRef Constraint, 488 llvm::Type* Ty) { 489 if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy()) 490 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext()); 491 return Ty; 492 } 493 494 //===----------------------------------------------------------------------===// 495 // X86-32 ABI Implementation 496 //===----------------------------------------------------------------------===// 497 498 /// X86_32ABIInfo - The X86-32 ABI information. 499 class X86_32ABIInfo : public ABIInfo { 500 enum Class { 501 Integer, 502 Float 503 }; 504 505 static const unsigned MinABIStackAlignInBytes = 4; 506 507 bool IsDarwinVectorABI; 508 bool IsSmallStructInRegABI; 509 bool IsMMXDisabled; 510 bool IsWin32FloatStructABI; 511 unsigned DefaultNumRegisterParameters; 512 513 static bool isRegisterSize(unsigned Size) { 514 return (Size == 8 || Size == 16 || Size == 32 || Size == 64); 515 } 516 517 static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context, 518 unsigned callingConvention); 519 520 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 521 /// such that the argument will be passed in memory. 522 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, 523 unsigned &FreeRegs) const; 524 525 /// \brief Return the alignment to use for the given type on the stack. 526 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const; 527 528 Class classify(QualType Ty) const; 529 ABIArgInfo classifyReturnType(QualType RetTy, 530 unsigned callingConvention) const; 531 ABIArgInfo classifyArgumentType(QualType RetTy, unsigned &FreeRegs, 532 bool IsFastCall) const; 533 bool shouldUseInReg(QualType Ty, unsigned &FreeRegs, 534 bool IsFastCall, bool &NeedsPadding) const; 535 536 public: 537 538 virtual void computeInfo(CGFunctionInfo &FI) const; 539 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 540 CodeGenFunction &CGF) const; 541 542 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool m, bool w, 543 unsigned r) 544 : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p), 545 IsMMXDisabled(m), IsWin32FloatStructABI(w), 546 DefaultNumRegisterParameters(r) {} 547 }; 548 549 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo { 550 public: 551 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, 552 bool d, bool p, bool m, bool w, unsigned r) 553 :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, m, w, r)) {} 554 555 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 556 CodeGen::CodeGenModule &CGM) const; 557 558 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 559 // Darwin uses different dwarf register numbers for EH. 560 if (CGM.isTargetDarwin()) return 5; 561 562 return 4; 563 } 564 565 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 566 llvm::Value *Address) const; 567 568 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 569 StringRef Constraint, 570 llvm::Type* Ty) const { 571 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 572 } 573 574 }; 575 576 } 577 578 /// shouldReturnTypeInRegister - Determine if the given type should be 579 /// passed in a register (for the Darwin ABI). 580 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty, 581 ASTContext &Context, 582 unsigned callingConvention) { 583 uint64_t Size = Context.getTypeSize(Ty); 584 585 // Type must be register sized. 586 if (!isRegisterSize(Size)) 587 return false; 588 589 if (Ty->isVectorType()) { 590 // 64- and 128- bit vectors inside structures are not returned in 591 // registers. 592 if (Size == 64 || Size == 128) 593 return false; 594 595 return true; 596 } 597 598 // If this is a builtin, pointer, enum, complex type, member pointer, or 599 // member function pointer it is ok. 600 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() || 601 Ty->isAnyComplexType() || Ty->isEnumeralType() || 602 Ty->isBlockPointerType() || Ty->isMemberPointerType()) 603 return true; 604 605 // Arrays are treated like records. 606 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) 607 return shouldReturnTypeInRegister(AT->getElementType(), Context, 608 callingConvention); 609 610 // Otherwise, it must be a record type. 611 const RecordType *RT = Ty->getAs<RecordType>(); 612 if (!RT) return false; 613 614 // FIXME: Traverse bases here too. 615 616 // For thiscall conventions, structures will never be returned in 617 // a register. This is for compatibility with the MSVC ABI 618 if (callingConvention == llvm::CallingConv::X86_ThisCall && 619 RT->isStructureType()) { 620 return false; 621 } 622 623 // Structure types are passed in register if all fields would be 624 // passed in a register. 625 for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(), 626 e = RT->getDecl()->field_end(); i != e; ++i) { 627 const FieldDecl *FD = *i; 628 629 // Empty fields are ignored. 630 if (isEmptyField(Context, FD, true)) 631 continue; 632 633 // Check fields recursively. 634 if (!shouldReturnTypeInRegister(FD->getType(), Context, 635 callingConvention)) 636 return false; 637 } 638 return true; 639 } 640 641 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, 642 unsigned callingConvention) const { 643 if (RetTy->isVoidType()) 644 return ABIArgInfo::getIgnore(); 645 646 if (const VectorType *VT = RetTy->getAs<VectorType>()) { 647 // On Darwin, some vectors are returned in registers. 648 if (IsDarwinVectorABI) { 649 uint64_t Size = getContext().getTypeSize(RetTy); 650 651 // 128-bit vectors are a special case; they are returned in 652 // registers and we need to make sure to pick a type the LLVM 653 // backend will like. 654 if (Size == 128) 655 return ABIArgInfo::getDirect(llvm::VectorType::get( 656 llvm::Type::getInt64Ty(getVMContext()), 2)); 657 658 // Always return in register if it fits in a general purpose 659 // register, or if it is 64 bits and has a single element. 660 if ((Size == 8 || Size == 16 || Size == 32) || 661 (Size == 64 && VT->getNumElements() == 1)) 662 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 663 Size)); 664 665 return ABIArgInfo::getIndirect(0); 666 } 667 668 return ABIArgInfo::getDirect(); 669 } 670 671 if (isAggregateTypeForABI(RetTy)) { 672 if (const RecordType *RT = RetTy->getAs<RecordType>()) { 673 // Structures with either a non-trivial destructor or a non-trivial 674 // copy constructor are always indirect. 675 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 676 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 677 678 // Structures with flexible arrays are always indirect. 679 if (RT->getDecl()->hasFlexibleArrayMember()) 680 return ABIArgInfo::getIndirect(0); 681 } 682 683 // If specified, structs and unions are always indirect. 684 if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType()) 685 return ABIArgInfo::getIndirect(0); 686 687 // Small structures which are register sized are generally returned 688 // in a register. 689 if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, getContext(), 690 callingConvention)) { 691 uint64_t Size = getContext().getTypeSize(RetTy); 692 693 // As a special-case, if the struct is a "single-element" struct, and 694 // the field is of type "float" or "double", return it in a 695 // floating-point register. (MSVC does not apply this special case.) 696 // We apply a similar transformation for pointer types to improve the 697 // quality of the generated IR. 698 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) 699 if ((!IsWin32FloatStructABI && SeltTy->isRealFloatingType()) 700 || SeltTy->hasPointerRepresentation()) 701 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 702 703 // FIXME: We should be able to narrow this integer in cases with dead 704 // padding. 705 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size)); 706 } 707 708 return ABIArgInfo::getIndirect(0); 709 } 710 711 // Treat an enum type as its underlying type. 712 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 713 RetTy = EnumTy->getDecl()->getIntegerType(); 714 715 return (RetTy->isPromotableIntegerType() ? 716 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 717 } 718 719 static bool isSSEVectorType(ASTContext &Context, QualType Ty) { 720 return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128; 721 } 722 723 static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) { 724 const RecordType *RT = Ty->getAs<RecordType>(); 725 if (!RT) 726 return 0; 727 const RecordDecl *RD = RT->getDecl(); 728 729 // If this is a C++ record, check the bases first. 730 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 731 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 732 e = CXXRD->bases_end(); i != e; ++i) 733 if (!isRecordWithSSEVectorType(Context, i->getType())) 734 return false; 735 736 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 737 i != e; ++i) { 738 QualType FT = i->getType(); 739 740 if (isSSEVectorType(Context, FT)) 741 return true; 742 743 if (isRecordWithSSEVectorType(Context, FT)) 744 return true; 745 } 746 747 return false; 748 } 749 750 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty, 751 unsigned Align) const { 752 // Otherwise, if the alignment is less than or equal to the minimum ABI 753 // alignment, just use the default; the backend will handle this. 754 if (Align <= MinABIStackAlignInBytes) 755 return 0; // Use default alignment. 756 757 // On non-Darwin, the stack type alignment is always 4. 758 if (!IsDarwinVectorABI) { 759 // Set explicit alignment, since we may need to realign the top. 760 return MinABIStackAlignInBytes; 761 } 762 763 // Otherwise, if the type contains an SSE vector type, the alignment is 16. 764 if (Align >= 16 && (isSSEVectorType(getContext(), Ty) || 765 isRecordWithSSEVectorType(getContext(), Ty))) 766 return 16; 767 768 return MinABIStackAlignInBytes; 769 } 770 771 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal, 772 unsigned &FreeRegs) const { 773 if (!ByVal) { 774 if (FreeRegs) { 775 --FreeRegs; // Non byval indirects just use one pointer. 776 return ABIArgInfo::getIndirectInReg(0, false); 777 } 778 return ABIArgInfo::getIndirect(0, false); 779 } 780 781 // Compute the byval alignment. 782 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; 783 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign); 784 if (StackAlign == 0) 785 return ABIArgInfo::getIndirect(4); 786 787 // If the stack alignment is less than the type alignment, realign the 788 // argument. 789 if (StackAlign < TypeAlign) 790 return ABIArgInfo::getIndirect(StackAlign, /*ByVal=*/true, 791 /*Realign=*/true); 792 793 return ABIArgInfo::getIndirect(StackAlign); 794 } 795 796 X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const { 797 const Type *T = isSingleElementStruct(Ty, getContext()); 798 if (!T) 799 T = Ty.getTypePtr(); 800 801 if (const BuiltinType *BT = T->getAs<BuiltinType>()) { 802 BuiltinType::Kind K = BT->getKind(); 803 if (K == BuiltinType::Float || K == BuiltinType::Double) 804 return Float; 805 } 806 return Integer; 807 } 808 809 bool X86_32ABIInfo::shouldUseInReg(QualType Ty, unsigned &FreeRegs, 810 bool IsFastCall, bool &NeedsPadding) const { 811 NeedsPadding = false; 812 Class C = classify(Ty); 813 if (C == Float) 814 return false; 815 816 unsigned Size = getContext().getTypeSize(Ty); 817 unsigned SizeInRegs = (Size + 31) / 32; 818 819 if (SizeInRegs == 0) 820 return false; 821 822 if (SizeInRegs > FreeRegs) { 823 FreeRegs = 0; 824 return false; 825 } 826 827 FreeRegs -= SizeInRegs; 828 829 if (IsFastCall) { 830 if (Size > 32) 831 return false; 832 833 if (Ty->isIntegralOrEnumerationType()) 834 return true; 835 836 if (Ty->isPointerType()) 837 return true; 838 839 if (Ty->isReferenceType()) 840 return true; 841 842 if (FreeRegs) 843 NeedsPadding = true; 844 845 return false; 846 } 847 848 return true; 849 } 850 851 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, 852 unsigned &FreeRegs, 853 bool IsFastCall) const { 854 // FIXME: Set alignment on indirect arguments. 855 if (isAggregateTypeForABI(Ty)) { 856 // Structures with flexible arrays are always indirect. 857 if (const RecordType *RT = Ty->getAs<RecordType>()) { 858 // Structures with either a non-trivial destructor or a non-trivial 859 // copy constructor are always indirect. 860 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 861 return getIndirectResult(Ty, false, FreeRegs); 862 863 if (RT->getDecl()->hasFlexibleArrayMember()) 864 return getIndirectResult(Ty, true, FreeRegs); 865 } 866 867 // Ignore empty structs/unions. 868 if (isEmptyRecord(getContext(), Ty, true)) 869 return ABIArgInfo::getIgnore(); 870 871 llvm::LLVMContext &LLVMContext = getVMContext(); 872 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext); 873 bool NeedsPadding; 874 if (shouldUseInReg(Ty, FreeRegs, IsFastCall, NeedsPadding)) { 875 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32; 876 SmallVector<llvm::Type*, 3> Elements; 877 for (unsigned I = 0; I < SizeInRegs; ++I) 878 Elements.push_back(Int32); 879 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements); 880 return ABIArgInfo::getDirectInReg(Result); 881 } 882 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : 0; 883 884 // Expand small (<= 128-bit) record types when we know that the stack layout 885 // of those arguments will match the struct. This is important because the 886 // LLVM backend isn't smart enough to remove byval, which inhibits many 887 // optimizations. 888 if (getContext().getTypeSize(Ty) <= 4*32 && 889 canExpandIndirectArgument(Ty, getContext())) 890 return ABIArgInfo::getExpandWithPadding(IsFastCall, PaddingType); 891 892 return getIndirectResult(Ty, true, FreeRegs); 893 } 894 895 if (const VectorType *VT = Ty->getAs<VectorType>()) { 896 // On Darwin, some vectors are passed in memory, we handle this by passing 897 // it as an i8/i16/i32/i64. 898 if (IsDarwinVectorABI) { 899 uint64_t Size = getContext().getTypeSize(Ty); 900 if ((Size == 8 || Size == 16 || Size == 32) || 901 (Size == 64 && VT->getNumElements() == 1)) 902 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 903 Size)); 904 } 905 906 llvm::Type *IRType = CGT.ConvertType(Ty); 907 if (UseX86_MMXType(IRType)) { 908 if (IsMMXDisabled) 909 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 910 64)); 911 ABIArgInfo AAI = ABIArgInfo::getDirect(IRType); 912 AAI.setCoerceToType(llvm::Type::getX86_MMXTy(getVMContext())); 913 return AAI; 914 } 915 916 return ABIArgInfo::getDirect(); 917 } 918 919 920 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 921 Ty = EnumTy->getDecl()->getIntegerType(); 922 923 bool NeedsPadding; 924 bool InReg = shouldUseInReg(Ty, FreeRegs, IsFastCall, NeedsPadding); 925 926 if (Ty->isPromotableIntegerType()) { 927 if (InReg) 928 return ABIArgInfo::getExtendInReg(); 929 return ABIArgInfo::getExtend(); 930 } 931 if (InReg) 932 return ABIArgInfo::getDirectInReg(); 933 return ABIArgInfo::getDirect(); 934 } 935 936 void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const { 937 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), 938 FI.getCallingConvention()); 939 940 unsigned CC = FI.getCallingConvention(); 941 bool IsFastCall = CC == llvm::CallingConv::X86_FastCall; 942 unsigned FreeRegs; 943 if (IsFastCall) 944 FreeRegs = 2; 945 else if (FI.getHasRegParm()) 946 FreeRegs = FI.getRegParm(); 947 else 948 FreeRegs = DefaultNumRegisterParameters; 949 950 // If the return value is indirect, then the hidden argument is consuming one 951 // integer register. 952 if (FI.getReturnInfo().isIndirect() && FreeRegs) { 953 --FreeRegs; 954 ABIArgInfo &Old = FI.getReturnInfo(); 955 Old = ABIArgInfo::getIndirectInReg(Old.getIndirectAlign(), 956 Old.getIndirectByVal(), 957 Old.getIndirectRealign()); 958 } 959 960 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 961 it != ie; ++it) 962 it->info = classifyArgumentType(it->type, FreeRegs, IsFastCall); 963 } 964 965 llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 966 CodeGenFunction &CGF) const { 967 llvm::Type *BPP = CGF.Int8PtrPtrTy; 968 969 CGBuilderTy &Builder = CGF.Builder; 970 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 971 "ap"); 972 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 973 974 // Compute if the address needs to be aligned 975 unsigned Align = CGF.getContext().getTypeAlignInChars(Ty).getQuantity(); 976 Align = getTypeStackAlignInBytes(Ty, Align); 977 Align = std::max(Align, 4U); 978 if (Align > 4) { 979 // addr = (addr + align - 1) & -align; 980 llvm::Value *Offset = 981 llvm::ConstantInt::get(CGF.Int32Ty, Align - 1); 982 Addr = CGF.Builder.CreateGEP(Addr, Offset); 983 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(Addr, 984 CGF.Int32Ty); 985 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -Align); 986 Addr = CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), 987 Addr->getType(), 988 "ap.cur.aligned"); 989 } 990 991 llvm::Type *PTy = 992 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 993 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 994 995 uint64_t Offset = 996 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, Align); 997 llvm::Value *NextAddr = 998 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 999 "ap.next"); 1000 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 1001 1002 return AddrTyped; 1003 } 1004 1005 void X86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D, 1006 llvm::GlobalValue *GV, 1007 CodeGen::CodeGenModule &CGM) const { 1008 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 1009 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { 1010 // Get the LLVM function. 1011 llvm::Function *Fn = cast<llvm::Function>(GV); 1012 1013 // Now add the 'alignstack' attribute with a value of 16. 1014 llvm::AttrBuilder B; 1015 B.addStackAlignmentAttr(16); 1016 Fn->addAttribute(llvm::AttrListPtr::FunctionIndex, 1017 llvm::Attributes::get(CGM.getLLVMContext(), B)); 1018 } 1019 } 1020 } 1021 1022 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable( 1023 CodeGen::CodeGenFunction &CGF, 1024 llvm::Value *Address) const { 1025 CodeGen::CGBuilderTy &Builder = CGF.Builder; 1026 1027 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 1028 1029 // 0-7 are the eight integer registers; the order is different 1030 // on Darwin (for EH), but the range is the same. 1031 // 8 is %eip. 1032 AssignToArrayRange(Builder, Address, Four8, 0, 8); 1033 1034 if (CGF.CGM.isTargetDarwin()) { 1035 // 12-16 are st(0..4). Not sure why we stop at 4. 1036 // These have size 16, which is sizeof(long double) on 1037 // platforms with 8-byte alignment for that type. 1038 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16); 1039 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16); 1040 1041 } else { 1042 // 9 is %eflags, which doesn't get a size on Darwin for some 1043 // reason. 1044 Builder.CreateStore(Four8, Builder.CreateConstInBoundsGEP1_32(Address, 9)); 1045 1046 // 11-16 are st(0..5). Not sure why we stop at 5. 1047 // These have size 12, which is sizeof(long double) on 1048 // platforms with 4-byte alignment for that type. 1049 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12); 1050 AssignToArrayRange(Builder, Address, Twelve8, 11, 16); 1051 } 1052 1053 return false; 1054 } 1055 1056 //===----------------------------------------------------------------------===// 1057 // X86-64 ABI Implementation 1058 //===----------------------------------------------------------------------===// 1059 1060 1061 namespace { 1062 /// X86_64ABIInfo - The X86_64 ABI information. 1063 class X86_64ABIInfo : public ABIInfo { 1064 enum Class { 1065 Integer = 0, 1066 SSE, 1067 SSEUp, 1068 X87, 1069 X87Up, 1070 ComplexX87, 1071 NoClass, 1072 Memory 1073 }; 1074 1075 /// merge - Implement the X86_64 ABI merging algorithm. 1076 /// 1077 /// Merge an accumulating classification \arg Accum with a field 1078 /// classification \arg Field. 1079 /// 1080 /// \param Accum - The accumulating classification. This should 1081 /// always be either NoClass or the result of a previous merge 1082 /// call. In addition, this should never be Memory (the caller 1083 /// should just return Memory for the aggregate). 1084 static Class merge(Class Accum, Class Field); 1085 1086 /// postMerge - Implement the X86_64 ABI post merging algorithm. 1087 /// 1088 /// Post merger cleanup, reduces a malformed Hi and Lo pair to 1089 /// final MEMORY or SSE classes when necessary. 1090 /// 1091 /// \param AggregateSize - The size of the current aggregate in 1092 /// the classification process. 1093 /// 1094 /// \param Lo - The classification for the parts of the type 1095 /// residing in the low word of the containing object. 1096 /// 1097 /// \param Hi - The classification for the parts of the type 1098 /// residing in the higher words of the containing object. 1099 /// 1100 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const; 1101 1102 /// classify - Determine the x86_64 register classes in which the 1103 /// given type T should be passed. 1104 /// 1105 /// \param Lo - The classification for the parts of the type 1106 /// residing in the low word of the containing object. 1107 /// 1108 /// \param Hi - The classification for the parts of the type 1109 /// residing in the high word of the containing object. 1110 /// 1111 /// \param OffsetBase - The bit offset of this type in the 1112 /// containing object. Some parameters are classified different 1113 /// depending on whether they straddle an eightbyte boundary. 1114 /// 1115 /// If a word is unused its result will be NoClass; if a type should 1116 /// be passed in Memory then at least the classification of \arg Lo 1117 /// will be Memory. 1118 /// 1119 /// The \arg Lo class will be NoClass iff the argument is ignored. 1120 /// 1121 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will 1122 /// also be ComplexX87. 1123 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi) const; 1124 1125 llvm::Type *GetByteVectorType(QualType Ty) const; 1126 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType, 1127 unsigned IROffset, QualType SourceTy, 1128 unsigned SourceOffset) const; 1129 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType, 1130 unsigned IROffset, QualType SourceTy, 1131 unsigned SourceOffset) const; 1132 1133 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 1134 /// such that the argument will be returned in memory. 1135 ABIArgInfo getIndirectReturnResult(QualType Ty) const; 1136 1137 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 1138 /// such that the argument will be passed in memory. 1139 /// 1140 /// \param freeIntRegs - The number of free integer registers remaining 1141 /// available. 1142 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const; 1143 1144 ABIArgInfo classifyReturnType(QualType RetTy) const; 1145 1146 ABIArgInfo classifyArgumentType(QualType Ty, 1147 unsigned freeIntRegs, 1148 unsigned &neededInt, 1149 unsigned &neededSSE) const; 1150 1151 bool IsIllegalVectorType(QualType Ty) const; 1152 1153 /// The 0.98 ABI revision clarified a lot of ambiguities, 1154 /// unfortunately in ways that were not always consistent with 1155 /// certain previous compilers. In particular, platforms which 1156 /// required strict binary compatibility with older versions of GCC 1157 /// may need to exempt themselves. 1158 bool honorsRevision0_98() const { 1159 return !getContext().getTargetInfo().getTriple().isOSDarwin(); 1160 } 1161 1162 bool HasAVX; 1163 // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on 1164 // 64-bit hardware. 1165 bool Has64BitPointers; 1166 1167 public: 1168 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool hasavx) : 1169 ABIInfo(CGT), HasAVX(hasavx), 1170 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) { 1171 } 1172 1173 bool isPassedUsingAVXType(QualType type) const { 1174 unsigned neededInt, neededSSE; 1175 // The freeIntRegs argument doesn't matter here. 1176 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE); 1177 if (info.isDirect()) { 1178 llvm::Type *ty = info.getCoerceToType(); 1179 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty)) 1180 return (vectorTy->getBitWidth() > 128); 1181 } 1182 return false; 1183 } 1184 1185 virtual void computeInfo(CGFunctionInfo &FI) const; 1186 1187 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1188 CodeGenFunction &CGF) const; 1189 }; 1190 1191 /// WinX86_64ABIInfo - The Windows X86_64 ABI information. 1192 class WinX86_64ABIInfo : public ABIInfo { 1193 1194 ABIArgInfo classify(QualType Ty) const; 1195 1196 public: 1197 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 1198 1199 virtual void computeInfo(CGFunctionInfo &FI) const; 1200 1201 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1202 CodeGenFunction &CGF) const; 1203 }; 1204 1205 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo { 1206 public: 1207 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) 1208 : TargetCodeGenInfo(new X86_64ABIInfo(CGT, HasAVX)) {} 1209 1210 const X86_64ABIInfo &getABIInfo() const { 1211 return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo()); 1212 } 1213 1214 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 1215 return 7; 1216 } 1217 1218 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 1219 llvm::Value *Address) const { 1220 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 1221 1222 // 0-15 are the 16 integer registers. 1223 // 16 is %rip. 1224 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 1225 return false; 1226 } 1227 1228 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 1229 StringRef Constraint, 1230 llvm::Type* Ty) const { 1231 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 1232 } 1233 1234 bool isNoProtoCallVariadic(const CallArgList &args, 1235 const FunctionNoProtoType *fnType) const { 1236 // The default CC on x86-64 sets %al to the number of SSA 1237 // registers used, and GCC sets this when calling an unprototyped 1238 // function, so we override the default behavior. However, don't do 1239 // that when AVX types are involved: the ABI explicitly states it is 1240 // undefined, and it doesn't work in practice because of how the ABI 1241 // defines varargs anyway. 1242 if (fnType->getCallConv() == CC_Default || fnType->getCallConv() == CC_C) { 1243 bool HasAVXType = false; 1244 for (CallArgList::const_iterator 1245 it = args.begin(), ie = args.end(); it != ie; ++it) { 1246 if (getABIInfo().isPassedUsingAVXType(it->Ty)) { 1247 HasAVXType = true; 1248 break; 1249 } 1250 } 1251 1252 if (!HasAVXType) 1253 return true; 1254 } 1255 1256 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType); 1257 } 1258 1259 }; 1260 1261 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo { 1262 public: 1263 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 1264 : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {} 1265 1266 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 1267 return 7; 1268 } 1269 1270 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 1271 llvm::Value *Address) const { 1272 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 1273 1274 // 0-15 are the 16 integer registers. 1275 // 16 is %rip. 1276 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 1277 return false; 1278 } 1279 }; 1280 1281 } 1282 1283 void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo, 1284 Class &Hi) const { 1285 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done: 1286 // 1287 // (a) If one of the classes is Memory, the whole argument is passed in 1288 // memory. 1289 // 1290 // (b) If X87UP is not preceded by X87, the whole argument is passed in 1291 // memory. 1292 // 1293 // (c) If the size of the aggregate exceeds two eightbytes and the first 1294 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole 1295 // argument is passed in memory. NOTE: This is necessary to keep the 1296 // ABI working for processors that don't support the __m256 type. 1297 // 1298 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE. 1299 // 1300 // Some of these are enforced by the merging logic. Others can arise 1301 // only with unions; for example: 1302 // union { _Complex double; unsigned; } 1303 // 1304 // Note that clauses (b) and (c) were added in 0.98. 1305 // 1306 if (Hi == Memory) 1307 Lo = Memory; 1308 if (Hi == X87Up && Lo != X87 && honorsRevision0_98()) 1309 Lo = Memory; 1310 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp)) 1311 Lo = Memory; 1312 if (Hi == SSEUp && Lo != SSE) 1313 Hi = SSE; 1314 } 1315 1316 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) { 1317 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is 1318 // classified recursively so that always two fields are 1319 // considered. The resulting class is calculated according to 1320 // the classes of the fields in the eightbyte: 1321 // 1322 // (a) If both classes are equal, this is the resulting class. 1323 // 1324 // (b) If one of the classes is NO_CLASS, the resulting class is 1325 // the other class. 1326 // 1327 // (c) If one of the classes is MEMORY, the result is the MEMORY 1328 // class. 1329 // 1330 // (d) If one of the classes is INTEGER, the result is the 1331 // INTEGER. 1332 // 1333 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class, 1334 // MEMORY is used as class. 1335 // 1336 // (f) Otherwise class SSE is used. 1337 1338 // Accum should never be memory (we should have returned) or 1339 // ComplexX87 (because this cannot be passed in a structure). 1340 assert((Accum != Memory && Accum != ComplexX87) && 1341 "Invalid accumulated classification during merge."); 1342 if (Accum == Field || Field == NoClass) 1343 return Accum; 1344 if (Field == Memory) 1345 return Memory; 1346 if (Accum == NoClass) 1347 return Field; 1348 if (Accum == Integer || Field == Integer) 1349 return Integer; 1350 if (Field == X87 || Field == X87Up || Field == ComplexX87 || 1351 Accum == X87 || Accum == X87Up) 1352 return Memory; 1353 return SSE; 1354 } 1355 1356 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, 1357 Class &Lo, Class &Hi) const { 1358 // FIXME: This code can be simplified by introducing a simple value class for 1359 // Class pairs with appropriate constructor methods for the various 1360 // situations. 1361 1362 // FIXME: Some of the split computations are wrong; unaligned vectors 1363 // shouldn't be passed in registers for example, so there is no chance they 1364 // can straddle an eightbyte. Verify & simplify. 1365 1366 Lo = Hi = NoClass; 1367 1368 Class &Current = OffsetBase < 64 ? Lo : Hi; 1369 Current = Memory; 1370 1371 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 1372 BuiltinType::Kind k = BT->getKind(); 1373 1374 if (k == BuiltinType::Void) { 1375 Current = NoClass; 1376 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) { 1377 Lo = Integer; 1378 Hi = Integer; 1379 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) { 1380 Current = Integer; 1381 } else if ((k == BuiltinType::Float || k == BuiltinType::Double) || 1382 (k == BuiltinType::LongDouble && 1383 getContext().getTargetInfo().getTriple().getOS() == 1384 llvm::Triple::NativeClient)) { 1385 Current = SSE; 1386 } else if (k == BuiltinType::LongDouble) { 1387 Lo = X87; 1388 Hi = X87Up; 1389 } 1390 // FIXME: _Decimal32 and _Decimal64 are SSE. 1391 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). 1392 return; 1393 } 1394 1395 if (const EnumType *ET = Ty->getAs<EnumType>()) { 1396 // Classify the underlying integer type. 1397 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi); 1398 return; 1399 } 1400 1401 if (Ty->hasPointerRepresentation()) { 1402 Current = Integer; 1403 return; 1404 } 1405 1406 if (Ty->isMemberPointerType()) { 1407 if (Ty->isMemberFunctionPointerType() && Has64BitPointers) 1408 Lo = Hi = Integer; 1409 else 1410 Current = Integer; 1411 return; 1412 } 1413 1414 if (const VectorType *VT = Ty->getAs<VectorType>()) { 1415 uint64_t Size = getContext().getTypeSize(VT); 1416 if (Size == 32) { 1417 // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x 1418 // float> as integer. 1419 Current = Integer; 1420 1421 // If this type crosses an eightbyte boundary, it should be 1422 // split. 1423 uint64_t EB_Real = (OffsetBase) / 64; 1424 uint64_t EB_Imag = (OffsetBase + Size - 1) / 64; 1425 if (EB_Real != EB_Imag) 1426 Hi = Lo; 1427 } else if (Size == 64) { 1428 // gcc passes <1 x double> in memory. :( 1429 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) 1430 return; 1431 1432 // gcc passes <1 x long long> as INTEGER. 1433 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong) || 1434 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULongLong) || 1435 VT->getElementType()->isSpecificBuiltinType(BuiltinType::Long) || 1436 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULong)) 1437 Current = Integer; 1438 else 1439 Current = SSE; 1440 1441 // If this type crosses an eightbyte boundary, it should be 1442 // split. 1443 if (OffsetBase && OffsetBase != 64) 1444 Hi = Lo; 1445 } else if (Size == 128 || (HasAVX && Size == 256)) { 1446 // Arguments of 256-bits are split into four eightbyte chunks. The 1447 // least significant one belongs to class SSE and all the others to class 1448 // SSEUP. The original Lo and Hi design considers that types can't be 1449 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense. 1450 // This design isn't correct for 256-bits, but since there're no cases 1451 // where the upper parts would need to be inspected, avoid adding 1452 // complexity and just consider Hi to match the 64-256 part. 1453 Lo = SSE; 1454 Hi = SSEUp; 1455 } 1456 return; 1457 } 1458 1459 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 1460 QualType ET = getContext().getCanonicalType(CT->getElementType()); 1461 1462 uint64_t Size = getContext().getTypeSize(Ty); 1463 if (ET->isIntegralOrEnumerationType()) { 1464 if (Size <= 64) 1465 Current = Integer; 1466 else if (Size <= 128) 1467 Lo = Hi = Integer; 1468 } else if (ET == getContext().FloatTy) 1469 Current = SSE; 1470 else if (ET == getContext().DoubleTy || 1471 (ET == getContext().LongDoubleTy && 1472 getContext().getTargetInfo().getTriple().getOS() == 1473 llvm::Triple::NativeClient)) 1474 Lo = Hi = SSE; 1475 else if (ET == getContext().LongDoubleTy) 1476 Current = ComplexX87; 1477 1478 // If this complex type crosses an eightbyte boundary then it 1479 // should be split. 1480 uint64_t EB_Real = (OffsetBase) / 64; 1481 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64; 1482 if (Hi == NoClass && EB_Real != EB_Imag) 1483 Hi = Lo; 1484 1485 return; 1486 } 1487 1488 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 1489 // Arrays are treated like structures. 1490 1491 uint64_t Size = getContext().getTypeSize(Ty); 1492 1493 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 1494 // than four eightbytes, ..., it has class MEMORY. 1495 if (Size > 256) 1496 return; 1497 1498 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned 1499 // fields, it has class MEMORY. 1500 // 1501 // Only need to check alignment of array base. 1502 if (OffsetBase % getContext().getTypeAlign(AT->getElementType())) 1503 return; 1504 1505 // Otherwise implement simplified merge. We could be smarter about 1506 // this, but it isn't worth it and would be harder to verify. 1507 Current = NoClass; 1508 uint64_t EltSize = getContext().getTypeSize(AT->getElementType()); 1509 uint64_t ArraySize = AT->getSize().getZExtValue(); 1510 1511 // The only case a 256-bit wide vector could be used is when the array 1512 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 1513 // to work for sizes wider than 128, early check and fallback to memory. 1514 if (Size > 128 && EltSize != 256) 1515 return; 1516 1517 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) { 1518 Class FieldLo, FieldHi; 1519 classify(AT->getElementType(), Offset, FieldLo, FieldHi); 1520 Lo = merge(Lo, FieldLo); 1521 Hi = merge(Hi, FieldHi); 1522 if (Lo == Memory || Hi == Memory) 1523 break; 1524 } 1525 1526 postMerge(Size, Lo, Hi); 1527 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification."); 1528 return; 1529 } 1530 1531 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1532 uint64_t Size = getContext().getTypeSize(Ty); 1533 1534 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 1535 // than four eightbytes, ..., it has class MEMORY. 1536 if (Size > 256) 1537 return; 1538 1539 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial 1540 // copy constructor or a non-trivial destructor, it is passed by invisible 1541 // reference. 1542 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 1543 return; 1544 1545 const RecordDecl *RD = RT->getDecl(); 1546 1547 // Assume variable sized types are passed in memory. 1548 if (RD->hasFlexibleArrayMember()) 1549 return; 1550 1551 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 1552 1553 // Reset Lo class, this will be recomputed. 1554 Current = NoClass; 1555 1556 // If this is a C++ record, classify the bases first. 1557 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 1558 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 1559 e = CXXRD->bases_end(); i != e; ++i) { 1560 assert(!i->isVirtual() && !i->getType()->isDependentType() && 1561 "Unexpected base class!"); 1562 const CXXRecordDecl *Base = 1563 cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); 1564 1565 // Classify this field. 1566 // 1567 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a 1568 // single eightbyte, each is classified separately. Each eightbyte gets 1569 // initialized to class NO_CLASS. 1570 Class FieldLo, FieldHi; 1571 uint64_t Offset = 1572 OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base)); 1573 classify(i->getType(), Offset, FieldLo, FieldHi); 1574 Lo = merge(Lo, FieldLo); 1575 Hi = merge(Hi, FieldHi); 1576 if (Lo == Memory || Hi == Memory) 1577 break; 1578 } 1579 } 1580 1581 // Classify the fields one at a time, merging the results. 1582 unsigned idx = 0; 1583 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1584 i != e; ++i, ++idx) { 1585 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 1586 bool BitField = i->isBitField(); 1587 1588 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than 1589 // four eightbytes, or it contains unaligned fields, it has class MEMORY. 1590 // 1591 // The only case a 256-bit wide vector could be used is when the struct 1592 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 1593 // to work for sizes wider than 128, early check and fallback to memory. 1594 // 1595 if (Size > 128 && getContext().getTypeSize(i->getType()) != 256) { 1596 Lo = Memory; 1597 return; 1598 } 1599 // Note, skip this test for bit-fields, see below. 1600 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) { 1601 Lo = Memory; 1602 return; 1603 } 1604 1605 // Classify this field. 1606 // 1607 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate 1608 // exceeds a single eightbyte, each is classified 1609 // separately. Each eightbyte gets initialized to class 1610 // NO_CLASS. 1611 Class FieldLo, FieldHi; 1612 1613 // Bit-fields require special handling, they do not force the 1614 // structure to be passed in memory even if unaligned, and 1615 // therefore they can straddle an eightbyte. 1616 if (BitField) { 1617 // Ignore padding bit-fields. 1618 if (i->isUnnamedBitfield()) 1619 continue; 1620 1621 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 1622 uint64_t Size = i->getBitWidthValue(getContext()); 1623 1624 uint64_t EB_Lo = Offset / 64; 1625 uint64_t EB_Hi = (Offset + Size - 1) / 64; 1626 FieldLo = FieldHi = NoClass; 1627 if (EB_Lo) { 1628 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes."); 1629 FieldLo = NoClass; 1630 FieldHi = Integer; 1631 } else { 1632 FieldLo = Integer; 1633 FieldHi = EB_Hi ? Integer : NoClass; 1634 } 1635 } else 1636 classify(i->getType(), Offset, FieldLo, FieldHi); 1637 Lo = merge(Lo, FieldLo); 1638 Hi = merge(Hi, FieldHi); 1639 if (Lo == Memory || Hi == Memory) 1640 break; 1641 } 1642 1643 postMerge(Size, Lo, Hi); 1644 } 1645 } 1646 1647 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const { 1648 // If this is a scalar LLVM value then assume LLVM will pass it in the right 1649 // place naturally. 1650 if (!isAggregateTypeForABI(Ty)) { 1651 // Treat an enum type as its underlying type. 1652 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1653 Ty = EnumTy->getDecl()->getIntegerType(); 1654 1655 return (Ty->isPromotableIntegerType() ? 1656 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 1657 } 1658 1659 return ABIArgInfo::getIndirect(0); 1660 } 1661 1662 bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const { 1663 if (const VectorType *VecTy = Ty->getAs<VectorType>()) { 1664 uint64_t Size = getContext().getTypeSize(VecTy); 1665 unsigned LargestVector = HasAVX ? 256 : 128; 1666 if (Size <= 64 || Size > LargestVector) 1667 return true; 1668 } 1669 1670 return false; 1671 } 1672 1673 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty, 1674 unsigned freeIntRegs) const { 1675 // If this is a scalar LLVM value then assume LLVM will pass it in the right 1676 // place naturally. 1677 // 1678 // This assumption is optimistic, as there could be free registers available 1679 // when we need to pass this argument in memory, and LLVM could try to pass 1680 // the argument in the free register. This does not seem to happen currently, 1681 // but this code would be much safer if we could mark the argument with 1682 // 'onstack'. See PR12193. 1683 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) { 1684 // Treat an enum type as its underlying type. 1685 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1686 Ty = EnumTy->getDecl()->getIntegerType(); 1687 1688 return (Ty->isPromotableIntegerType() ? 1689 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 1690 } 1691 1692 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 1693 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 1694 1695 // Compute the byval alignment. We specify the alignment of the byval in all 1696 // cases so that the mid-level optimizer knows the alignment of the byval. 1697 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U); 1698 1699 // Attempt to avoid passing indirect results using byval when possible. This 1700 // is important for good codegen. 1701 // 1702 // We do this by coercing the value into a scalar type which the backend can 1703 // handle naturally (i.e., without using byval). 1704 // 1705 // For simplicity, we currently only do this when we have exhausted all of the 1706 // free integer registers. Doing this when there are free integer registers 1707 // would require more care, as we would have to ensure that the coerced value 1708 // did not claim the unused register. That would require either reording the 1709 // arguments to the function (so that any subsequent inreg values came first), 1710 // or only doing this optimization when there were no following arguments that 1711 // might be inreg. 1712 // 1713 // We currently expect it to be rare (particularly in well written code) for 1714 // arguments to be passed on the stack when there are still free integer 1715 // registers available (this would typically imply large structs being passed 1716 // by value), so this seems like a fair tradeoff for now. 1717 // 1718 // We can revisit this if the backend grows support for 'onstack' parameter 1719 // attributes. See PR12193. 1720 if (freeIntRegs == 0) { 1721 uint64_t Size = getContext().getTypeSize(Ty); 1722 1723 // If this type fits in an eightbyte, coerce it into the matching integral 1724 // type, which will end up on the stack (with alignment 8). 1725 if (Align == 8 && Size <= 64) 1726 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 1727 Size)); 1728 } 1729 1730 return ABIArgInfo::getIndirect(Align); 1731 } 1732 1733 /// GetByteVectorType - The ABI specifies that a value should be passed in an 1734 /// full vector XMM/YMM register. Pick an LLVM IR type that will be passed as a 1735 /// vector register. 1736 llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const { 1737 llvm::Type *IRType = CGT.ConvertType(Ty); 1738 1739 // Wrapper structs that just contain vectors are passed just like vectors, 1740 // strip them off if present. 1741 llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType); 1742 while (STy && STy->getNumElements() == 1) { 1743 IRType = STy->getElementType(0); 1744 STy = dyn_cast<llvm::StructType>(IRType); 1745 } 1746 1747 // If the preferred type is a 16-byte vector, prefer to pass it. 1748 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){ 1749 llvm::Type *EltTy = VT->getElementType(); 1750 unsigned BitWidth = VT->getBitWidth(); 1751 if ((BitWidth >= 128 && BitWidth <= 256) && 1752 (EltTy->isFloatTy() || EltTy->isDoubleTy() || 1753 EltTy->isIntegerTy(8) || EltTy->isIntegerTy(16) || 1754 EltTy->isIntegerTy(32) || EltTy->isIntegerTy(64) || 1755 EltTy->isIntegerTy(128))) 1756 return VT; 1757 } 1758 1759 return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), 2); 1760 } 1761 1762 /// BitsContainNoUserData - Return true if the specified [start,end) bit range 1763 /// is known to either be off the end of the specified type or being in 1764 /// alignment padding. The user type specified is known to be at most 128 bits 1765 /// in size, and have passed through X86_64ABIInfo::classify with a successful 1766 /// classification that put one of the two halves in the INTEGER class. 1767 /// 1768 /// It is conservatively correct to return false. 1769 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, 1770 unsigned EndBit, ASTContext &Context) { 1771 // If the bytes being queried are off the end of the type, there is no user 1772 // data hiding here. This handles analysis of builtins, vectors and other 1773 // types that don't contain interesting padding. 1774 unsigned TySize = (unsigned)Context.getTypeSize(Ty); 1775 if (TySize <= StartBit) 1776 return true; 1777 1778 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 1779 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType()); 1780 unsigned NumElts = (unsigned)AT->getSize().getZExtValue(); 1781 1782 // Check each element to see if the element overlaps with the queried range. 1783 for (unsigned i = 0; i != NumElts; ++i) { 1784 // If the element is after the span we care about, then we're done.. 1785 unsigned EltOffset = i*EltSize; 1786 if (EltOffset >= EndBit) break; 1787 1788 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0; 1789 if (!BitsContainNoUserData(AT->getElementType(), EltStart, 1790 EndBit-EltOffset, Context)) 1791 return false; 1792 } 1793 // If it overlaps no elements, then it is safe to process as padding. 1794 return true; 1795 } 1796 1797 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1798 const RecordDecl *RD = RT->getDecl(); 1799 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 1800 1801 // If this is a C++ record, check the bases first. 1802 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 1803 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 1804 e = CXXRD->bases_end(); i != e; ++i) { 1805 assert(!i->isVirtual() && !i->getType()->isDependentType() && 1806 "Unexpected base class!"); 1807 const CXXRecordDecl *Base = 1808 cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); 1809 1810 // If the base is after the span we care about, ignore it. 1811 unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base)); 1812 if (BaseOffset >= EndBit) continue; 1813 1814 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0; 1815 if (!BitsContainNoUserData(i->getType(), BaseStart, 1816 EndBit-BaseOffset, Context)) 1817 return false; 1818 } 1819 } 1820 1821 // Verify that no field has data that overlaps the region of interest. Yes 1822 // this could be sped up a lot by being smarter about queried fields, 1823 // however we're only looking at structs up to 16 bytes, so we don't care 1824 // much. 1825 unsigned idx = 0; 1826 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1827 i != e; ++i, ++idx) { 1828 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx); 1829 1830 // If we found a field after the region we care about, then we're done. 1831 if (FieldOffset >= EndBit) break; 1832 1833 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0; 1834 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset, 1835 Context)) 1836 return false; 1837 } 1838 1839 // If nothing in this record overlapped the area of interest, then we're 1840 // clean. 1841 return true; 1842 } 1843 1844 return false; 1845 } 1846 1847 /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a 1848 /// float member at the specified offset. For example, {int,{float}} has a 1849 /// float at offset 4. It is conservatively correct for this routine to return 1850 /// false. 1851 static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset, 1852 const llvm::DataLayout &TD) { 1853 // Base case if we find a float. 1854 if (IROffset == 0 && IRType->isFloatTy()) 1855 return true; 1856 1857 // If this is a struct, recurse into the field at the specified offset. 1858 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 1859 const llvm::StructLayout *SL = TD.getStructLayout(STy); 1860 unsigned Elt = SL->getElementContainingOffset(IROffset); 1861 IROffset -= SL->getElementOffset(Elt); 1862 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD); 1863 } 1864 1865 // If this is an array, recurse into the field at the specified offset. 1866 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 1867 llvm::Type *EltTy = ATy->getElementType(); 1868 unsigned EltSize = TD.getTypeAllocSize(EltTy); 1869 IROffset -= IROffset/EltSize*EltSize; 1870 return ContainsFloatAtOffset(EltTy, IROffset, TD); 1871 } 1872 1873 return false; 1874 } 1875 1876 1877 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the 1878 /// low 8 bytes of an XMM register, corresponding to the SSE class. 1879 llvm::Type *X86_64ABIInfo:: 1880 GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset, 1881 QualType SourceTy, unsigned SourceOffset) const { 1882 // The only three choices we have are either double, <2 x float>, or float. We 1883 // pass as float if the last 4 bytes is just padding. This happens for 1884 // structs that contain 3 floats. 1885 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32, 1886 SourceOffset*8+64, getContext())) 1887 return llvm::Type::getFloatTy(getVMContext()); 1888 1889 // We want to pass as <2 x float> if the LLVM IR type contains a float at 1890 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the 1891 // case. 1892 if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) && 1893 ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout())) 1894 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2); 1895 1896 return llvm::Type::getDoubleTy(getVMContext()); 1897 } 1898 1899 1900 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in 1901 /// an 8-byte GPR. This means that we either have a scalar or we are talking 1902 /// about the high or low part of an up-to-16-byte struct. This routine picks 1903 /// the best LLVM IR type to represent this, which may be i64 or may be anything 1904 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*, 1905 /// etc). 1906 /// 1907 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for 1908 /// the source type. IROffset is an offset in bytes into the LLVM IR type that 1909 /// the 8-byte value references. PrefType may be null. 1910 /// 1911 /// SourceTy is the source level type for the entire argument. SourceOffset is 1912 /// an offset into this that we're processing (which is always either 0 or 8). 1913 /// 1914 llvm::Type *X86_64ABIInfo:: 1915 GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset, 1916 QualType SourceTy, unsigned SourceOffset) const { 1917 // If we're dealing with an un-offset LLVM IR type, then it means that we're 1918 // returning an 8-byte unit starting with it. See if we can safely use it. 1919 if (IROffset == 0) { 1920 // Pointers and int64's always fill the 8-byte unit. 1921 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) || 1922 IRType->isIntegerTy(64)) 1923 return IRType; 1924 1925 // If we have a 1/2/4-byte integer, we can use it only if the rest of the 1926 // goodness in the source type is just tail padding. This is allowed to 1927 // kick in for struct {double,int} on the int, but not on 1928 // struct{double,int,int} because we wouldn't return the second int. We 1929 // have to do this analysis on the source type because we can't depend on 1930 // unions being lowered a specific way etc. 1931 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) || 1932 IRType->isIntegerTy(32) || 1933 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) { 1934 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 : 1935 cast<llvm::IntegerType>(IRType)->getBitWidth(); 1936 1937 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth, 1938 SourceOffset*8+64, getContext())) 1939 return IRType; 1940 } 1941 } 1942 1943 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 1944 // If this is a struct, recurse into the field at the specified offset. 1945 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy); 1946 if (IROffset < SL->getSizeInBytes()) { 1947 unsigned FieldIdx = SL->getElementContainingOffset(IROffset); 1948 IROffset -= SL->getElementOffset(FieldIdx); 1949 1950 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset, 1951 SourceTy, SourceOffset); 1952 } 1953 } 1954 1955 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 1956 llvm::Type *EltTy = ATy->getElementType(); 1957 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy); 1958 unsigned EltOffset = IROffset/EltSize*EltSize; 1959 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy, 1960 SourceOffset); 1961 } 1962 1963 // Okay, we don't have any better idea of what to pass, so we pass this in an 1964 // integer register that isn't too big to fit the rest of the struct. 1965 unsigned TySizeInBytes = 1966 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity(); 1967 1968 assert(TySizeInBytes != SourceOffset && "Empty field?"); 1969 1970 // It is always safe to classify this as an integer type up to i64 that 1971 // isn't larger than the structure. 1972 return llvm::IntegerType::get(getVMContext(), 1973 std::min(TySizeInBytes-SourceOffset, 8U)*8); 1974 } 1975 1976 1977 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally 1978 /// be used as elements of a two register pair to pass or return, return a 1979 /// first class aggregate to represent them. For example, if the low part of 1980 /// a by-value argument should be passed as i32* and the high part as float, 1981 /// return {i32*, float}. 1982 static llvm::Type * 1983 GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, 1984 const llvm::DataLayout &TD) { 1985 // In order to correctly satisfy the ABI, we need to the high part to start 1986 // at offset 8. If the high and low parts we inferred are both 4-byte types 1987 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have 1988 // the second element at offset 8. Check for this: 1989 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo); 1990 unsigned HiAlign = TD.getABITypeAlignment(Hi); 1991 unsigned HiStart = llvm::DataLayout::RoundUpAlignment(LoSize, HiAlign); 1992 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!"); 1993 1994 // To handle this, we have to increase the size of the low part so that the 1995 // second element will start at an 8 byte offset. We can't increase the size 1996 // of the second element because it might make us access off the end of the 1997 // struct. 1998 if (HiStart != 8) { 1999 // There are only two sorts of types the ABI generation code can produce for 2000 // the low part of a pair that aren't 8 bytes in size: float or i8/i16/i32. 2001 // Promote these to a larger type. 2002 if (Lo->isFloatTy()) 2003 Lo = llvm::Type::getDoubleTy(Lo->getContext()); 2004 else { 2005 assert(Lo->isIntegerTy() && "Invalid/unknown lo type"); 2006 Lo = llvm::Type::getInt64Ty(Lo->getContext()); 2007 } 2008 } 2009 2010 llvm::StructType *Result = llvm::StructType::get(Lo, Hi, NULL); 2011 2012 2013 // Verify that the second element is at an 8-byte offset. 2014 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 && 2015 "Invalid x86-64 argument pair!"); 2016 return Result; 2017 } 2018 2019 ABIArgInfo X86_64ABIInfo:: 2020 classifyReturnType(QualType RetTy) const { 2021 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the 2022 // classification algorithm. 2023 X86_64ABIInfo::Class Lo, Hi; 2024 classify(RetTy, 0, Lo, Hi); 2025 2026 // Check some invariants. 2027 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 2028 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 2029 2030 llvm::Type *ResType = 0; 2031 switch (Lo) { 2032 case NoClass: 2033 if (Hi == NoClass) 2034 return ABIArgInfo::getIgnore(); 2035 // If the low part is just padding, it takes no register, leave ResType 2036 // null. 2037 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 2038 "Unknown missing lo part"); 2039 break; 2040 2041 case SSEUp: 2042 case X87Up: 2043 llvm_unreachable("Invalid classification for lo word."); 2044 2045 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via 2046 // hidden argument. 2047 case Memory: 2048 return getIndirectReturnResult(RetTy); 2049 2050 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next 2051 // available register of the sequence %rax, %rdx is used. 2052 case Integer: 2053 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 2054 2055 // If we have a sign or zero extended integer, make sure to return Extend 2056 // so that the parameter gets the right LLVM IR attributes. 2057 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 2058 // Treat an enum type as its underlying type. 2059 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 2060 RetTy = EnumTy->getDecl()->getIntegerType(); 2061 2062 if (RetTy->isIntegralOrEnumerationType() && 2063 RetTy->isPromotableIntegerType()) 2064 return ABIArgInfo::getExtend(); 2065 } 2066 break; 2067 2068 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next 2069 // available SSE register of the sequence %xmm0, %xmm1 is used. 2070 case SSE: 2071 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 2072 break; 2073 2074 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is 2075 // returned on the X87 stack in %st0 as 80-bit x87 number. 2076 case X87: 2077 ResType = llvm::Type::getX86_FP80Ty(getVMContext()); 2078 break; 2079 2080 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real 2081 // part of the value is returned in %st0 and the imaginary part in 2082 // %st1. 2083 case ComplexX87: 2084 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification."); 2085 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()), 2086 llvm::Type::getX86_FP80Ty(getVMContext()), 2087 NULL); 2088 break; 2089 } 2090 2091 llvm::Type *HighPart = 0; 2092 switch (Hi) { 2093 // Memory was handled previously and X87 should 2094 // never occur as a hi class. 2095 case Memory: 2096 case X87: 2097 llvm_unreachable("Invalid classification for hi word."); 2098 2099 case ComplexX87: // Previously handled. 2100 case NoClass: 2101 break; 2102 2103 case Integer: 2104 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 2105 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 2106 return ABIArgInfo::getDirect(HighPart, 8); 2107 break; 2108 case SSE: 2109 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 2110 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 2111 return ABIArgInfo::getDirect(HighPart, 8); 2112 break; 2113 2114 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte 2115 // is passed in the next available eightbyte chunk if the last used 2116 // vector register. 2117 // 2118 // SSEUP should always be preceded by SSE, just widen. 2119 case SSEUp: 2120 assert(Lo == SSE && "Unexpected SSEUp classification."); 2121 ResType = GetByteVectorType(RetTy); 2122 break; 2123 2124 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is 2125 // returned together with the previous X87 value in %st0. 2126 case X87Up: 2127 // If X87Up is preceded by X87, we don't need to do 2128 // anything. However, in some cases with unions it may not be 2129 // preceded by X87. In such situations we follow gcc and pass the 2130 // extra bits in an SSE reg. 2131 if (Lo != X87) { 2132 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 2133 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 2134 return ABIArgInfo::getDirect(HighPart, 8); 2135 } 2136 break; 2137 } 2138 2139 // If a high part was specified, merge it together with the low part. It is 2140 // known to pass in the high eightbyte of the result. We do this by forming a 2141 // first class struct aggregate with the high and low part: {low, high} 2142 if (HighPart) 2143 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); 2144 2145 return ABIArgInfo::getDirect(ResType); 2146 } 2147 2148 ABIArgInfo X86_64ABIInfo::classifyArgumentType( 2149 QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE) 2150 const 2151 { 2152 X86_64ABIInfo::Class Lo, Hi; 2153 classify(Ty, 0, Lo, Hi); 2154 2155 // Check some invariants. 2156 // FIXME: Enforce these by construction. 2157 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 2158 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 2159 2160 neededInt = 0; 2161 neededSSE = 0; 2162 llvm::Type *ResType = 0; 2163 switch (Lo) { 2164 case NoClass: 2165 if (Hi == NoClass) 2166 return ABIArgInfo::getIgnore(); 2167 // If the low part is just padding, it takes no register, leave ResType 2168 // null. 2169 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 2170 "Unknown missing lo part"); 2171 break; 2172 2173 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument 2174 // on the stack. 2175 case Memory: 2176 2177 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or 2178 // COMPLEX_X87, it is passed in memory. 2179 case X87: 2180 case ComplexX87: 2181 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 2182 ++neededInt; 2183 return getIndirectResult(Ty, freeIntRegs); 2184 2185 case SSEUp: 2186 case X87Up: 2187 llvm_unreachable("Invalid classification for lo word."); 2188 2189 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next 2190 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8 2191 // and %r9 is used. 2192 case Integer: 2193 ++neededInt; 2194 2195 // Pick an 8-byte type based on the preferred type. 2196 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0); 2197 2198 // If we have a sign or zero extended integer, make sure to return Extend 2199 // so that the parameter gets the right LLVM IR attributes. 2200 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 2201 // Treat an enum type as its underlying type. 2202 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2203 Ty = EnumTy->getDecl()->getIntegerType(); 2204 2205 if (Ty->isIntegralOrEnumerationType() && 2206 Ty->isPromotableIntegerType()) 2207 return ABIArgInfo::getExtend(); 2208 } 2209 2210 break; 2211 2212 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next 2213 // available SSE register is used, the registers are taken in the 2214 // order from %xmm0 to %xmm7. 2215 case SSE: { 2216 llvm::Type *IRType = CGT.ConvertType(Ty); 2217 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0); 2218 ++neededSSE; 2219 break; 2220 } 2221 } 2222 2223 llvm::Type *HighPart = 0; 2224 switch (Hi) { 2225 // Memory was handled previously, ComplexX87 and X87 should 2226 // never occur as hi classes, and X87Up must be preceded by X87, 2227 // which is passed in memory. 2228 case Memory: 2229 case X87: 2230 case ComplexX87: 2231 llvm_unreachable("Invalid classification for hi word."); 2232 2233 case NoClass: break; 2234 2235 case Integer: 2236 ++neededInt; 2237 // Pick an 8-byte type based on the preferred type. 2238 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 2239 2240 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 2241 return ABIArgInfo::getDirect(HighPart, 8); 2242 break; 2243 2244 // X87Up generally doesn't occur here (long double is passed in 2245 // memory), except in situations involving unions. 2246 case X87Up: 2247 case SSE: 2248 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 2249 2250 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 2251 return ABIArgInfo::getDirect(HighPart, 8); 2252 2253 ++neededSSE; 2254 break; 2255 2256 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the 2257 // eightbyte is passed in the upper half of the last used SSE 2258 // register. This only happens when 128-bit vectors are passed. 2259 case SSEUp: 2260 assert(Lo == SSE && "Unexpected SSEUp classification"); 2261 ResType = GetByteVectorType(Ty); 2262 break; 2263 } 2264 2265 // If a high part was specified, merge it together with the low part. It is 2266 // known to pass in the high eightbyte of the result. We do this by forming a 2267 // first class struct aggregate with the high and low part: {low, high} 2268 if (HighPart) 2269 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); 2270 2271 return ABIArgInfo::getDirect(ResType); 2272 } 2273 2274 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 2275 2276 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2277 2278 // Keep track of the number of assigned registers. 2279 unsigned freeIntRegs = 6, freeSSERegs = 8; 2280 2281 // If the return value is indirect, then the hidden argument is consuming one 2282 // integer register. 2283 if (FI.getReturnInfo().isIndirect()) 2284 --freeIntRegs; 2285 2286 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers 2287 // get assigned (in left-to-right order) for passing as follows... 2288 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2289 it != ie; ++it) { 2290 unsigned neededInt, neededSSE; 2291 it->info = classifyArgumentType(it->type, freeIntRegs, neededInt, 2292 neededSSE); 2293 2294 // AMD64-ABI 3.2.3p3: If there are no registers available for any 2295 // eightbyte of an argument, the whole argument is passed on the 2296 // stack. If registers have already been assigned for some 2297 // eightbytes of such an argument, the assignments get reverted. 2298 if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) { 2299 freeIntRegs -= neededInt; 2300 freeSSERegs -= neededSSE; 2301 } else { 2302 it->info = getIndirectResult(it->type, freeIntRegs); 2303 } 2304 } 2305 } 2306 2307 static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr, 2308 QualType Ty, 2309 CodeGenFunction &CGF) { 2310 llvm::Value *overflow_arg_area_p = 2311 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p"); 2312 llvm::Value *overflow_arg_area = 2313 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area"); 2314 2315 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16 2316 // byte boundary if alignment needed by type exceeds 8 byte boundary. 2317 // It isn't stated explicitly in the standard, but in practice we use 2318 // alignment greater than 16 where necessary. 2319 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8; 2320 if (Align > 8) { 2321 // overflow_arg_area = (overflow_arg_area + align - 1) & -align; 2322 llvm::Value *Offset = 2323 llvm::ConstantInt::get(CGF.Int64Ty, Align - 1); 2324 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset); 2325 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area, 2326 CGF.Int64Ty); 2327 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, -(uint64_t)Align); 2328 overflow_arg_area = 2329 CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), 2330 overflow_arg_area->getType(), 2331 "overflow_arg_area.align"); 2332 } 2333 2334 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area. 2335 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 2336 llvm::Value *Res = 2337 CGF.Builder.CreateBitCast(overflow_arg_area, 2338 llvm::PointerType::getUnqual(LTy)); 2339 2340 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to: 2341 // l->overflow_arg_area + sizeof(type). 2342 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to 2343 // an 8 byte boundary. 2344 2345 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8; 2346 llvm::Value *Offset = 2347 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7); 2348 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset, 2349 "overflow_arg_area.next"); 2350 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p); 2351 2352 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type. 2353 return Res; 2354 } 2355 2356 llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2357 CodeGenFunction &CGF) const { 2358 // Assume that va_list type is correct; should be pointer to LLVM type: 2359 // struct { 2360 // i32 gp_offset; 2361 // i32 fp_offset; 2362 // i8* overflow_arg_area; 2363 // i8* reg_save_area; 2364 // }; 2365 unsigned neededInt, neededSSE; 2366 2367 Ty = CGF.getContext().getCanonicalType(Ty); 2368 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE); 2369 2370 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed 2371 // in the registers. If not go to step 7. 2372 if (!neededInt && !neededSSE) 2373 return EmitVAArgFromMemory(VAListAddr, Ty, CGF); 2374 2375 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of 2376 // general purpose registers needed to pass type and num_fp to hold 2377 // the number of floating point registers needed. 2378 2379 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into 2380 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or 2381 // l->fp_offset > 304 - num_fp * 16 go to step 7. 2382 // 2383 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of 2384 // register save space). 2385 2386 llvm::Value *InRegs = 0; 2387 llvm::Value *gp_offset_p = 0, *gp_offset = 0; 2388 llvm::Value *fp_offset_p = 0, *fp_offset = 0; 2389 if (neededInt) { 2390 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p"); 2391 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset"); 2392 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8); 2393 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp"); 2394 } 2395 2396 if (neededSSE) { 2397 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p"); 2398 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset"); 2399 llvm::Value *FitsInFP = 2400 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16); 2401 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp"); 2402 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP; 2403 } 2404 2405 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 2406 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); 2407 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 2408 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); 2409 2410 // Emit code to load the value if it was passed in registers. 2411 2412 CGF.EmitBlock(InRegBlock); 2413 2414 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with 2415 // an offset of l->gp_offset and/or l->fp_offset. This may require 2416 // copying to a temporary location in case the parameter is passed 2417 // in different register classes or requires an alignment greater 2418 // than 8 for general purpose registers and 16 for XMM registers. 2419 // 2420 // FIXME: This really results in shameful code when we end up needing to 2421 // collect arguments from different places; often what should result in a 2422 // simple assembling of a structure from scattered addresses has many more 2423 // loads than necessary. Can we clean this up? 2424 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 2425 llvm::Value *RegAddr = 2426 CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3), 2427 "reg_save_area"); 2428 if (neededInt && neededSSE) { 2429 // FIXME: Cleanup. 2430 assert(AI.isDirect() && "Unexpected ABI info for mixed regs"); 2431 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType()); 2432 llvm::Value *Tmp = CGF.CreateTempAlloca(ST); 2433 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs"); 2434 llvm::Type *TyLo = ST->getElementType(0); 2435 llvm::Type *TyHi = ST->getElementType(1); 2436 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) && 2437 "Unexpected ABI info for mixed regs"); 2438 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo); 2439 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi); 2440 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 2441 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2442 llvm::Value *RegLoAddr = TyLo->isFloatingPointTy() ? FPAddr : GPAddr; 2443 llvm::Value *RegHiAddr = TyLo->isFloatingPointTy() ? GPAddr : FPAddr; 2444 llvm::Value *V = 2445 CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo)); 2446 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 2447 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi)); 2448 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 2449 2450 RegAddr = CGF.Builder.CreateBitCast(Tmp, 2451 llvm::PointerType::getUnqual(LTy)); 2452 } else if (neededInt) { 2453 RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 2454 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 2455 llvm::PointerType::getUnqual(LTy)); 2456 } else if (neededSSE == 1) { 2457 RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2458 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 2459 llvm::PointerType::getUnqual(LTy)); 2460 } else { 2461 assert(neededSSE == 2 && "Invalid number of needed registers!"); 2462 // SSE registers are spaced 16 bytes apart in the register save 2463 // area, we need to collect the two eightbytes together. 2464 llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2465 llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16); 2466 llvm::Type *DoubleTy = CGF.DoubleTy; 2467 llvm::Type *DblPtrTy = 2468 llvm::PointerType::getUnqual(DoubleTy); 2469 llvm::StructType *ST = llvm::StructType::get(DoubleTy, 2470 DoubleTy, NULL); 2471 llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST); 2472 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo, 2473 DblPtrTy)); 2474 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 2475 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi, 2476 DblPtrTy)); 2477 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 2478 RegAddr = CGF.Builder.CreateBitCast(Tmp, 2479 llvm::PointerType::getUnqual(LTy)); 2480 } 2481 2482 // AMD64-ABI 3.5.7p5: Step 5. Set: 2483 // l->gp_offset = l->gp_offset + num_gp * 8 2484 // l->fp_offset = l->fp_offset + num_fp * 16. 2485 if (neededInt) { 2486 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8); 2487 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset), 2488 gp_offset_p); 2489 } 2490 if (neededSSE) { 2491 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16); 2492 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset), 2493 fp_offset_p); 2494 } 2495 CGF.EmitBranch(ContBlock); 2496 2497 // Emit code to load the value if it was passed in memory. 2498 2499 CGF.EmitBlock(InMemBlock); 2500 llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF); 2501 2502 // Return the appropriate result. 2503 2504 CGF.EmitBlock(ContBlock); 2505 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(), 2, 2506 "vaarg.addr"); 2507 ResAddr->addIncoming(RegAddr, InRegBlock); 2508 ResAddr->addIncoming(MemAddr, InMemBlock); 2509 return ResAddr; 2510 } 2511 2512 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty) const { 2513 2514 if (Ty->isVoidType()) 2515 return ABIArgInfo::getIgnore(); 2516 2517 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2518 Ty = EnumTy->getDecl()->getIntegerType(); 2519 2520 uint64_t Size = getContext().getTypeSize(Ty); 2521 2522 if (const RecordType *RT = Ty->getAs<RecordType>()) { 2523 if (hasNonTrivialDestructorOrCopyConstructor(RT) || 2524 RT->getDecl()->hasFlexibleArrayMember()) 2525 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2526 2527 // FIXME: mingw-w64-gcc emits 128-bit struct as i128 2528 if (Size == 128 && 2529 getContext().getTargetInfo().getTriple().getOS() 2530 == llvm::Triple::MinGW32) 2531 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2532 Size)); 2533 2534 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is 2535 // not 1, 2, 4, or 8 bytes, must be passed by reference." 2536 if (Size <= 64 && 2537 (Size & (Size - 1)) == 0) 2538 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2539 Size)); 2540 2541 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2542 } 2543 2544 if (Ty->isPromotableIntegerType()) 2545 return ABIArgInfo::getExtend(); 2546 2547 return ABIArgInfo::getDirect(); 2548 } 2549 2550 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 2551 2552 QualType RetTy = FI.getReturnType(); 2553 FI.getReturnInfo() = classify(RetTy); 2554 2555 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2556 it != ie; ++it) 2557 it->info = classify(it->type); 2558 } 2559 2560 llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2561 CodeGenFunction &CGF) const { 2562 llvm::Type *BPP = CGF.Int8PtrPtrTy; 2563 2564 CGBuilderTy &Builder = CGF.Builder; 2565 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 2566 "ap"); 2567 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 2568 llvm::Type *PTy = 2569 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 2570 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 2571 2572 uint64_t Offset = 2573 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8); 2574 llvm::Value *NextAddr = 2575 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 2576 "ap.next"); 2577 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 2578 2579 return AddrTyped; 2580 } 2581 2582 namespace { 2583 2584 class NaClX86_64ABIInfo : public ABIInfo { 2585 public: 2586 NaClX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) 2587 : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, HasAVX) {} 2588 virtual void computeInfo(CGFunctionInfo &FI) const; 2589 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2590 CodeGenFunction &CGF) const; 2591 private: 2592 PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv. 2593 X86_64ABIInfo NInfo; // Used for everything else. 2594 }; 2595 2596 class NaClX86_64TargetCodeGenInfo : public TargetCodeGenInfo { 2597 public: 2598 NaClX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) 2599 : TargetCodeGenInfo(new NaClX86_64ABIInfo(CGT, HasAVX)) {} 2600 }; 2601 2602 } 2603 2604 void NaClX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 2605 if (FI.getASTCallingConvention() == CC_PnaclCall) 2606 PInfo.computeInfo(FI); 2607 else 2608 NInfo.computeInfo(FI); 2609 } 2610 2611 llvm::Value *NaClX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2612 CodeGenFunction &CGF) const { 2613 // Always use the native convention; calling pnacl-style varargs functions 2614 // is unuspported. 2615 return NInfo.EmitVAArg(VAListAddr, Ty, CGF); 2616 } 2617 2618 2619 // PowerPC-32 2620 2621 namespace { 2622 class PPC32TargetCodeGenInfo : public DefaultTargetCodeGenInfo { 2623 public: 2624 PPC32TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} 2625 2626 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2627 // This is recovered from gcc output. 2628 return 1; // r1 is the dedicated stack pointer 2629 } 2630 2631 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2632 llvm::Value *Address) const; 2633 }; 2634 2635 } 2636 2637 bool 2638 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2639 llvm::Value *Address) const { 2640 // This is calculated from the LLVM and GCC tables and verified 2641 // against gcc output. AFAIK all ABIs use the same encoding. 2642 2643 CodeGen::CGBuilderTy &Builder = CGF.Builder; 2644 2645 llvm::IntegerType *i8 = CGF.Int8Ty; 2646 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 2647 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 2648 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 2649 2650 // 0-31: r0-31, the 4-byte general-purpose registers 2651 AssignToArrayRange(Builder, Address, Four8, 0, 31); 2652 2653 // 32-63: fp0-31, the 8-byte floating-point registers 2654 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 2655 2656 // 64-76 are various 4-byte special-purpose registers: 2657 // 64: mq 2658 // 65: lr 2659 // 66: ctr 2660 // 67: ap 2661 // 68-75 cr0-7 2662 // 76: xer 2663 AssignToArrayRange(Builder, Address, Four8, 64, 76); 2664 2665 // 77-108: v0-31, the 16-byte vector registers 2666 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 2667 2668 // 109: vrsave 2669 // 110: vscr 2670 // 111: spe_acc 2671 // 112: spefscr 2672 // 113: sfp 2673 AssignToArrayRange(Builder, Address, Four8, 109, 113); 2674 2675 return false; 2676 } 2677 2678 // PowerPC-64 2679 2680 namespace { 2681 /// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information. 2682 class PPC64_SVR4_ABIInfo : public DefaultABIInfo { 2683 2684 public: 2685 PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 2686 2687 // TODO: We can add more logic to computeInfo to improve performance. 2688 // Example: For aggregate arguments that fit in a register, we could 2689 // use getDirectInReg (as is done below for structs containing a single 2690 // floating-point value) to avoid pushing them to memory on function 2691 // entry. This would require changing the logic in PPCISelLowering 2692 // when lowering the parameters in the caller and args in the callee. 2693 virtual void computeInfo(CGFunctionInfo &FI) const { 2694 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2695 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2696 it != ie; ++it) { 2697 // We rely on the default argument classification for the most part. 2698 // One exception: An aggregate containing a single floating-point 2699 // item must be passed in a register if one is available. 2700 const Type *T = isSingleElementStruct(it->type, getContext()); 2701 if (T) { 2702 const BuiltinType *BT = T->getAs<BuiltinType>(); 2703 if (BT && BT->isFloatingPoint()) { 2704 QualType QT(T, 0); 2705 it->info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT)); 2706 continue; 2707 } 2708 } 2709 it->info = classifyArgumentType(it->type); 2710 } 2711 } 2712 2713 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, 2714 QualType Ty, 2715 CodeGenFunction &CGF) const; 2716 }; 2717 2718 class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo { 2719 public: 2720 PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT) 2721 : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT)) {} 2722 2723 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2724 // This is recovered from gcc output. 2725 return 1; // r1 is the dedicated stack pointer 2726 } 2727 2728 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2729 llvm::Value *Address) const; 2730 }; 2731 2732 class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo { 2733 public: 2734 PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} 2735 2736 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2737 // This is recovered from gcc output. 2738 return 1; // r1 is the dedicated stack pointer 2739 } 2740 2741 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2742 llvm::Value *Address) const; 2743 }; 2744 2745 } 2746 2747 // Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine. 2748 llvm::Value *PPC64_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr, 2749 QualType Ty, 2750 CodeGenFunction &CGF) const { 2751 llvm::Type *BP = CGF.Int8PtrTy; 2752 llvm::Type *BPP = CGF.Int8PtrPtrTy; 2753 2754 CGBuilderTy &Builder = CGF.Builder; 2755 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 2756 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 2757 2758 // Update the va_list pointer. 2759 unsigned SizeInBytes = CGF.getContext().getTypeSize(Ty) / 8; 2760 unsigned Offset = llvm::RoundUpToAlignment(SizeInBytes, 8); 2761 llvm::Value *NextAddr = 2762 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), 2763 "ap.next"); 2764 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 2765 2766 // If the argument is smaller than 8 bytes, it is right-adjusted in 2767 // its doubleword slot. Adjust the pointer to pick it up from the 2768 // correct offset. 2769 if (SizeInBytes < 8) { 2770 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty); 2771 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(8 - SizeInBytes)); 2772 Addr = Builder.CreateIntToPtr(AddrAsInt, BP); 2773 } 2774 2775 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 2776 return Builder.CreateBitCast(Addr, PTy); 2777 } 2778 2779 static bool 2780 PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2781 llvm::Value *Address) { 2782 // This is calculated from the LLVM and GCC tables and verified 2783 // against gcc output. AFAIK all ABIs use the same encoding. 2784 2785 CodeGen::CGBuilderTy &Builder = CGF.Builder; 2786 2787 llvm::IntegerType *i8 = CGF.Int8Ty; 2788 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 2789 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 2790 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 2791 2792 // 0-31: r0-31, the 8-byte general-purpose registers 2793 AssignToArrayRange(Builder, Address, Eight8, 0, 31); 2794 2795 // 32-63: fp0-31, the 8-byte floating-point registers 2796 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 2797 2798 // 64-76 are various 4-byte special-purpose registers: 2799 // 64: mq 2800 // 65: lr 2801 // 66: ctr 2802 // 67: ap 2803 // 68-75 cr0-7 2804 // 76: xer 2805 AssignToArrayRange(Builder, Address, Four8, 64, 76); 2806 2807 // 77-108: v0-31, the 16-byte vector registers 2808 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 2809 2810 // 109: vrsave 2811 // 110: vscr 2812 // 111: spe_acc 2813 // 112: spefscr 2814 // 113: sfp 2815 AssignToArrayRange(Builder, Address, Four8, 109, 113); 2816 2817 return false; 2818 } 2819 2820 bool 2821 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable( 2822 CodeGen::CodeGenFunction &CGF, 2823 llvm::Value *Address) const { 2824 2825 return PPC64_initDwarfEHRegSizeTable(CGF, Address); 2826 } 2827 2828 bool 2829 PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2830 llvm::Value *Address) const { 2831 2832 return PPC64_initDwarfEHRegSizeTable(CGF, Address); 2833 } 2834 2835 //===----------------------------------------------------------------------===// 2836 // ARM ABI Implementation 2837 //===----------------------------------------------------------------------===// 2838 2839 namespace { 2840 2841 class ARMABIInfo : public ABIInfo { 2842 public: 2843 enum ABIKind { 2844 APCS = 0, 2845 AAPCS = 1, 2846 AAPCS_VFP 2847 }; 2848 2849 private: 2850 ABIKind Kind; 2851 2852 public: 2853 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind) {} 2854 2855 bool isEABI() const { 2856 StringRef Env = 2857 getContext().getTargetInfo().getTriple().getEnvironmentName(); 2858 return (Env == "gnueabi" || Env == "eabi" || 2859 Env == "android" || Env == "androideabi"); 2860 } 2861 2862 private: 2863 ABIKind getABIKind() const { return Kind; } 2864 2865 ABIArgInfo classifyReturnType(QualType RetTy) const; 2866 ABIArgInfo classifyArgumentType(QualType RetTy, int *VFPRegs, 2867 unsigned &AllocatedVFP, 2868 bool &IsHA) const; 2869 bool isIllegalVectorType(QualType Ty) const; 2870 2871 virtual void computeInfo(CGFunctionInfo &FI) const; 2872 2873 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2874 CodeGenFunction &CGF) const; 2875 }; 2876 2877 class ARMTargetCodeGenInfo : public TargetCodeGenInfo { 2878 public: 2879 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K) 2880 :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {} 2881 2882 const ARMABIInfo &getABIInfo() const { 2883 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo()); 2884 } 2885 2886 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2887 return 13; 2888 } 2889 2890 StringRef getARCRetainAutoreleasedReturnValueMarker() const { 2891 return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue"; 2892 } 2893 2894 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2895 llvm::Value *Address) const { 2896 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 2897 2898 // 0-15 are the 16 integer registers. 2899 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15); 2900 return false; 2901 } 2902 2903 unsigned getSizeOfUnwindException() const { 2904 if (getABIInfo().isEABI()) return 88; 2905 return TargetCodeGenInfo::getSizeOfUnwindException(); 2906 } 2907 }; 2908 2909 } 2910 2911 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const { 2912 // To correctly handle Homogeneous Aggregate, we need to keep track of the 2913 // VFP registers allocated so far. 2914 // C.1.vfp If the argument is a VFP CPRC and there are sufficient consecutive 2915 // VFP registers of the appropriate type unallocated then the argument is 2916 // allocated to the lowest-numbered sequence of such registers. 2917 // C.2.vfp If the argument is a VFP CPRC then any VFP registers that are 2918 // unallocated are marked as unavailable. 2919 unsigned AllocatedVFP = 0; 2920 int VFPRegs[16] = { 0 }; 2921 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2922 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2923 it != ie; ++it) { 2924 unsigned PreAllocation = AllocatedVFP; 2925 bool IsHA = false; 2926 // 6.1.2.3 There is one VFP co-processor register class using registers 2927 // s0-s15 (d0-d7) for passing arguments. 2928 const unsigned NumVFPs = 16; 2929 it->info = classifyArgumentType(it->type, VFPRegs, AllocatedVFP, IsHA); 2930 // If we do not have enough VFP registers for the HA, any VFP registers 2931 // that are unallocated are marked as unavailable. To achieve this, we add 2932 // padding of (NumVFPs - PreAllocation) floats. 2933 if (IsHA && AllocatedVFP > NumVFPs && PreAllocation < NumVFPs) { 2934 llvm::Type *PaddingTy = llvm::ArrayType::get( 2935 llvm::Type::getFloatTy(getVMContext()), NumVFPs - PreAllocation); 2936 it->info = ABIArgInfo::getExpandWithPadding(false, PaddingTy); 2937 } 2938 } 2939 2940 // Always honor user-specified calling convention. 2941 if (FI.getCallingConvention() != llvm::CallingConv::C) 2942 return; 2943 2944 // Calling convention as default by an ABI. 2945 llvm::CallingConv::ID DefaultCC; 2946 if (getContext().getTargetInfo().getTriple().getEnvironmentName()=="gnueabihf") 2947 DefaultCC = llvm::CallingConv::ARM_AAPCS_VFP; 2948 else if (isEABI()) 2949 DefaultCC = llvm::CallingConv::ARM_AAPCS; 2950 else 2951 DefaultCC = llvm::CallingConv::ARM_APCS; 2952 2953 // If user did not ask for specific calling convention explicitly (e.g. via 2954 // pcs attribute), set effective calling convention if it's different than ABI 2955 // default. 2956 switch (getABIKind()) { 2957 case APCS: 2958 if (DefaultCC != llvm::CallingConv::ARM_APCS) 2959 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_APCS); 2960 break; 2961 case AAPCS: 2962 if (DefaultCC != llvm::CallingConv::ARM_AAPCS) 2963 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS); 2964 break; 2965 case AAPCS_VFP: 2966 if (DefaultCC != llvm::CallingConv::ARM_AAPCS_VFP) 2967 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS_VFP); 2968 break; 2969 } 2970 } 2971 2972 /// isHomogeneousAggregate - Return true if a type is an AAPCS-VFP homogeneous 2973 /// aggregate. If HAMembers is non-null, the number of base elements 2974 /// contained in the type is returned through it; this is used for the 2975 /// recursive calls that check aggregate component types. 2976 static bool isHomogeneousAggregate(QualType Ty, const Type *&Base, 2977 ASTContext &Context, 2978 uint64_t *HAMembers = 0) { 2979 uint64_t Members = 0; 2980 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 2981 if (!isHomogeneousAggregate(AT->getElementType(), Base, Context, &Members)) 2982 return false; 2983 Members *= AT->getSize().getZExtValue(); 2984 } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 2985 const RecordDecl *RD = RT->getDecl(); 2986 if (RD->hasFlexibleArrayMember()) 2987 return false; 2988 2989 Members = 0; 2990 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 2991 i != e; ++i) { 2992 const FieldDecl *FD = *i; 2993 uint64_t FldMembers; 2994 if (!isHomogeneousAggregate(FD->getType(), Base, Context, &FldMembers)) 2995 return false; 2996 2997 Members = (RD->isUnion() ? 2998 std::max(Members, FldMembers) : Members + FldMembers); 2999 } 3000 } else { 3001 Members = 1; 3002 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 3003 Members = 2; 3004 Ty = CT->getElementType(); 3005 } 3006 3007 // Homogeneous aggregates for AAPCS-VFP must have base types of float, 3008 // double, or 64-bit or 128-bit vectors. 3009 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 3010 if (BT->getKind() != BuiltinType::Float && 3011 BT->getKind() != BuiltinType::Double && 3012 BT->getKind() != BuiltinType::LongDouble) 3013 return false; 3014 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 3015 unsigned VecSize = Context.getTypeSize(VT); 3016 if (VecSize != 64 && VecSize != 128) 3017 return false; 3018 } else { 3019 return false; 3020 } 3021 3022 // The base type must be the same for all members. Vector types of the 3023 // same total size are treated as being equivalent here. 3024 const Type *TyPtr = Ty.getTypePtr(); 3025 if (!Base) 3026 Base = TyPtr; 3027 if (Base != TyPtr && 3028 (!Base->isVectorType() || !TyPtr->isVectorType() || 3029 Context.getTypeSize(Base) != Context.getTypeSize(TyPtr))) 3030 return false; 3031 } 3032 3033 // Homogeneous Aggregates can have at most 4 members of the base type. 3034 if (HAMembers) 3035 *HAMembers = Members; 3036 3037 return (Members > 0 && Members <= 4); 3038 } 3039 3040 /// markAllocatedVFPs - update VFPRegs according to the alignment and 3041 /// number of VFP registers (unit is S register) requested. 3042 static void markAllocatedVFPs(int *VFPRegs, unsigned &AllocatedVFP, 3043 unsigned Alignment, 3044 unsigned NumRequired) { 3045 // Early Exit. 3046 if (AllocatedVFP >= 16) 3047 return; 3048 // C.1.vfp If the argument is a VFP CPRC and there are sufficient consecutive 3049 // VFP registers of the appropriate type unallocated then the argument is 3050 // allocated to the lowest-numbered sequence of such registers. 3051 for (unsigned I = 0; I < 16; I += Alignment) { 3052 bool FoundSlot = true; 3053 for (unsigned J = I, JEnd = I + NumRequired; J < JEnd; J++) 3054 if (J >= 16 || VFPRegs[J]) { 3055 FoundSlot = false; 3056 break; 3057 } 3058 if (FoundSlot) { 3059 for (unsigned J = I, JEnd = I + NumRequired; J < JEnd; J++) 3060 VFPRegs[J] = 1; 3061 AllocatedVFP += NumRequired; 3062 return; 3063 } 3064 } 3065 // C.2.vfp If the argument is a VFP CPRC then any VFP registers that are 3066 // unallocated are marked as unavailable. 3067 for (unsigned I = 0; I < 16; I++) 3068 VFPRegs[I] = 1; 3069 AllocatedVFP = 17; // We do not have enough VFP registers. 3070 } 3071 3072 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, int *VFPRegs, 3073 unsigned &AllocatedVFP, 3074 bool &IsHA) const { 3075 // We update number of allocated VFPs according to 3076 // 6.1.2.1 The following argument types are VFP CPRCs: 3077 // A single-precision floating-point type (including promoted 3078 // half-precision types); A double-precision floating-point type; 3079 // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate 3080 // with a Base Type of a single- or double-precision floating-point type, 3081 // 64-bit containerized vectors or 128-bit containerized vectors with one 3082 // to four Elements. 3083 3084 // Handle illegal vector types here. 3085 if (isIllegalVectorType(Ty)) { 3086 uint64_t Size = getContext().getTypeSize(Ty); 3087 if (Size <= 32) { 3088 llvm::Type *ResType = 3089 llvm::Type::getInt32Ty(getVMContext()); 3090 return ABIArgInfo::getDirect(ResType); 3091 } 3092 if (Size == 64) { 3093 llvm::Type *ResType = llvm::VectorType::get( 3094 llvm::Type::getInt32Ty(getVMContext()), 2); 3095 markAllocatedVFPs(VFPRegs, AllocatedVFP, 2, 2); 3096 return ABIArgInfo::getDirect(ResType); 3097 } 3098 if (Size == 128) { 3099 llvm::Type *ResType = llvm::VectorType::get( 3100 llvm::Type::getInt32Ty(getVMContext()), 4); 3101 markAllocatedVFPs(VFPRegs, AllocatedVFP, 4, 4); 3102 return ABIArgInfo::getDirect(ResType); 3103 } 3104 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3105 } 3106 // Update VFPRegs for legal vector types. 3107 if (const VectorType *VT = Ty->getAs<VectorType>()) { 3108 uint64_t Size = getContext().getTypeSize(VT); 3109 // Size of a legal vector should be power of 2 and above 64. 3110 markAllocatedVFPs(VFPRegs, AllocatedVFP, Size >= 128 ? 4 : 2, Size / 32); 3111 } 3112 // Update VFPRegs for floating point types. 3113 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 3114 if (BT->getKind() == BuiltinType::Half || 3115 BT->getKind() == BuiltinType::Float) 3116 markAllocatedVFPs(VFPRegs, AllocatedVFP, 1, 1); 3117 if (BT->getKind() == BuiltinType::Double || 3118 BT->getKind() == BuiltinType::LongDouble) 3119 markAllocatedVFPs(VFPRegs, AllocatedVFP, 2, 2); 3120 } 3121 3122 if (!isAggregateTypeForABI(Ty)) { 3123 // Treat an enum type as its underlying type. 3124 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3125 Ty = EnumTy->getDecl()->getIntegerType(); 3126 3127 return (Ty->isPromotableIntegerType() ? 3128 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3129 } 3130 3131 // Ignore empty records. 3132 if (isEmptyRecord(getContext(), Ty, true)) 3133 return ABIArgInfo::getIgnore(); 3134 3135 // Structures with either a non-trivial destructor or a non-trivial 3136 // copy constructor are always indirect. 3137 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 3138 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3139 3140 if (getABIKind() == ARMABIInfo::AAPCS_VFP) { 3141 // Homogeneous Aggregates need to be expanded when we can fit the aggregate 3142 // into VFP registers. 3143 const Type *Base = 0; 3144 uint64_t Members = 0; 3145 if (isHomogeneousAggregate(Ty, Base, getContext(), &Members)) { 3146 assert(Base && "Base class should be set for homogeneous aggregate"); 3147 // Base can be a floating-point or a vector. 3148 if (Base->isVectorType()) { 3149 // ElementSize is in number of floats. 3150 unsigned ElementSize = getContext().getTypeSize(Base) == 64 ? 2 : 4; 3151 markAllocatedVFPs(VFPRegs, AllocatedVFP, ElementSize, Members * ElementSize); 3152 } else if (Base->isSpecificBuiltinType(BuiltinType::Float)) 3153 markAllocatedVFPs(VFPRegs, AllocatedVFP, 1, Members); 3154 else { 3155 assert(Base->isSpecificBuiltinType(BuiltinType::Double) || 3156 Base->isSpecificBuiltinType(BuiltinType::LongDouble)); 3157 markAllocatedVFPs(VFPRegs, AllocatedVFP, 2, Members * 2); 3158 } 3159 IsHA = true; 3160 return ABIArgInfo::getExpand(); 3161 } 3162 } 3163 3164 // Support byval for ARM. 3165 if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64) || 3166 getContext().getTypeAlign(Ty) > 64) { 3167 return ABIArgInfo::getIndirect(0, /*ByVal=*/true); 3168 } 3169 3170 // Otherwise, pass by coercing to a structure of the appropriate size. 3171 llvm::Type* ElemTy; 3172 unsigned SizeRegs; 3173 // FIXME: Try to match the types of the arguments more accurately where 3174 // we can. 3175 if (getContext().getTypeAlign(Ty) <= 32) { 3176 ElemTy = llvm::Type::getInt32Ty(getVMContext()); 3177 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32; 3178 } else { 3179 ElemTy = llvm::Type::getInt64Ty(getVMContext()); 3180 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64; 3181 } 3182 3183 llvm::Type *STy = 3184 llvm::StructType::get(llvm::ArrayType::get(ElemTy, SizeRegs), NULL); 3185 return ABIArgInfo::getDirect(STy); 3186 } 3187 3188 static bool isIntegerLikeType(QualType Ty, ASTContext &Context, 3189 llvm::LLVMContext &VMContext) { 3190 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure 3191 // is called integer-like if its size is less than or equal to one word, and 3192 // the offset of each of its addressable sub-fields is zero. 3193 3194 uint64_t Size = Context.getTypeSize(Ty); 3195 3196 // Check that the type fits in a word. 3197 if (Size > 32) 3198 return false; 3199 3200 // FIXME: Handle vector types! 3201 if (Ty->isVectorType()) 3202 return false; 3203 3204 // Float types are never treated as "integer like". 3205 if (Ty->isRealFloatingType()) 3206 return false; 3207 3208 // If this is a builtin or pointer type then it is ok. 3209 if (Ty->getAs<BuiltinType>() || Ty->isPointerType()) 3210 return true; 3211 3212 // Small complex integer types are "integer like". 3213 if (const ComplexType *CT = Ty->getAs<ComplexType>()) 3214 return isIntegerLikeType(CT->getElementType(), Context, VMContext); 3215 3216 // Single element and zero sized arrays should be allowed, by the definition 3217 // above, but they are not. 3218 3219 // Otherwise, it must be a record type. 3220 const RecordType *RT = Ty->getAs<RecordType>(); 3221 if (!RT) return false; 3222 3223 // Ignore records with flexible arrays. 3224 const RecordDecl *RD = RT->getDecl(); 3225 if (RD->hasFlexibleArrayMember()) 3226 return false; 3227 3228 // Check that all sub-fields are at offset 0, and are themselves "integer 3229 // like". 3230 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 3231 3232 bool HadField = false; 3233 unsigned idx = 0; 3234 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 3235 i != e; ++i, ++idx) { 3236 const FieldDecl *FD = *i; 3237 3238 // Bit-fields are not addressable, we only need to verify they are "integer 3239 // like". We still have to disallow a subsequent non-bitfield, for example: 3240 // struct { int : 0; int x } 3241 // is non-integer like according to gcc. 3242 if (FD->isBitField()) { 3243 if (!RD->isUnion()) 3244 HadField = true; 3245 3246 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 3247 return false; 3248 3249 continue; 3250 } 3251 3252 // Check if this field is at offset 0. 3253 if (Layout.getFieldOffset(idx) != 0) 3254 return false; 3255 3256 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 3257 return false; 3258 3259 // Only allow at most one field in a structure. This doesn't match the 3260 // wording above, but follows gcc in situations with a field following an 3261 // empty structure. 3262 if (!RD->isUnion()) { 3263 if (HadField) 3264 return false; 3265 3266 HadField = true; 3267 } 3268 } 3269 3270 return true; 3271 } 3272 3273 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy) const { 3274 if (RetTy->isVoidType()) 3275 return ABIArgInfo::getIgnore(); 3276 3277 // Large vector types should be returned via memory. 3278 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) 3279 return ABIArgInfo::getIndirect(0); 3280 3281 if (!isAggregateTypeForABI(RetTy)) { 3282 // Treat an enum type as its underlying type. 3283 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 3284 RetTy = EnumTy->getDecl()->getIntegerType(); 3285 3286 return (RetTy->isPromotableIntegerType() ? 3287 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3288 } 3289 3290 // Structures with either a non-trivial destructor or a non-trivial 3291 // copy constructor are always indirect. 3292 if (isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy)) 3293 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3294 3295 // Are we following APCS? 3296 if (getABIKind() == APCS) { 3297 if (isEmptyRecord(getContext(), RetTy, false)) 3298 return ABIArgInfo::getIgnore(); 3299 3300 // Complex types are all returned as packed integers. 3301 // 3302 // FIXME: Consider using 2 x vector types if the back end handles them 3303 // correctly. 3304 if (RetTy->isAnyComplexType()) 3305 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 3306 getContext().getTypeSize(RetTy))); 3307 3308 // Integer like structures are returned in r0. 3309 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) { 3310 // Return in the smallest viable integer type. 3311 uint64_t Size = getContext().getTypeSize(RetTy); 3312 if (Size <= 8) 3313 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 3314 if (Size <= 16) 3315 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 3316 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 3317 } 3318 3319 // Otherwise return in memory. 3320 return ABIArgInfo::getIndirect(0); 3321 } 3322 3323 // Otherwise this is an AAPCS variant. 3324 3325 if (isEmptyRecord(getContext(), RetTy, true)) 3326 return ABIArgInfo::getIgnore(); 3327 3328 // Check for homogeneous aggregates with AAPCS-VFP. 3329 if (getABIKind() == AAPCS_VFP) { 3330 const Type *Base = 0; 3331 if (isHomogeneousAggregate(RetTy, Base, getContext())) { 3332 assert(Base && "Base class should be set for homogeneous aggregate"); 3333 // Homogeneous Aggregates are returned directly. 3334 return ABIArgInfo::getDirect(); 3335 } 3336 } 3337 3338 // Aggregates <= 4 bytes are returned in r0; other aggregates 3339 // are returned indirectly. 3340 uint64_t Size = getContext().getTypeSize(RetTy); 3341 if (Size <= 32) { 3342 // Return in the smallest viable integer type. 3343 if (Size <= 8) 3344 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 3345 if (Size <= 16) 3346 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 3347 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 3348 } 3349 3350 return ABIArgInfo::getIndirect(0); 3351 } 3352 3353 /// isIllegalVector - check whether Ty is an illegal vector type. 3354 bool ARMABIInfo::isIllegalVectorType(QualType Ty) const { 3355 if (const VectorType *VT = Ty->getAs<VectorType>()) { 3356 // Check whether VT is legal. 3357 unsigned NumElements = VT->getNumElements(); 3358 uint64_t Size = getContext().getTypeSize(VT); 3359 // NumElements should be power of 2. 3360 if ((NumElements & (NumElements - 1)) != 0) 3361 return true; 3362 // Size should be greater than 32 bits. 3363 return Size <= 32; 3364 } 3365 return false; 3366 } 3367 3368 llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3369 CodeGenFunction &CGF) const { 3370 llvm::Type *BP = CGF.Int8PtrTy; 3371 llvm::Type *BPP = CGF.Int8PtrPtrTy; 3372 3373 CGBuilderTy &Builder = CGF.Builder; 3374 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 3375 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 3376 3377 uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8; 3378 uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8; 3379 bool IsIndirect = false; 3380 3381 // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for 3382 // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte. 3383 if (getABIKind() == ARMABIInfo::AAPCS_VFP || 3384 getABIKind() == ARMABIInfo::AAPCS) 3385 TyAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8); 3386 else 3387 TyAlign = 4; 3388 // Use indirect if size of the illegal vector is bigger than 16 bytes. 3389 if (isIllegalVectorType(Ty) && Size > 16) { 3390 IsIndirect = true; 3391 Size = 4; 3392 TyAlign = 4; 3393 } 3394 3395 // Handle address alignment for ABI alignment > 4 bytes. 3396 if (TyAlign > 4) { 3397 assert((TyAlign & (TyAlign - 1)) == 0 && 3398 "Alignment is not power of 2!"); 3399 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty); 3400 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1)); 3401 AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1))); 3402 Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align"); 3403 } 3404 3405 uint64_t Offset = 3406 llvm::RoundUpToAlignment(Size, 4); 3407 llvm::Value *NextAddr = 3408 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 3409 "ap.next"); 3410 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 3411 3412 if (IsIndirect) 3413 Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP)); 3414 else if (TyAlign < CGF.getContext().getTypeAlign(Ty) / 8) { 3415 // We can't directly cast ap.cur to pointer to a vector type, since ap.cur 3416 // may not be correctly aligned for the vector type. We create an aligned 3417 // temporary space and copy the content over from ap.cur to the temporary 3418 // space. This is necessary if the natural alignment of the type is greater 3419 // than the ABI alignment. 3420 llvm::Type *I8PtrTy = Builder.getInt8PtrTy(); 3421 CharUnits CharSize = getContext().getTypeSizeInChars(Ty); 3422 llvm::Value *AlignedTemp = CGF.CreateTempAlloca(CGF.ConvertType(Ty), 3423 "var.align"); 3424 llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy); 3425 llvm::Value *Src = Builder.CreateBitCast(Addr, I8PtrTy); 3426 Builder.CreateMemCpy(Dst, Src, 3427 llvm::ConstantInt::get(CGF.IntPtrTy, CharSize.getQuantity()), 3428 TyAlign, false); 3429 Addr = AlignedTemp; //The content is in aligned location. 3430 } 3431 llvm::Type *PTy = 3432 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 3433 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 3434 3435 return AddrTyped; 3436 } 3437 3438 namespace { 3439 3440 class NaClARMABIInfo : public ABIInfo { 3441 public: 3442 NaClARMABIInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind) 3443 : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, Kind) {} 3444 virtual void computeInfo(CGFunctionInfo &FI) const; 3445 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3446 CodeGenFunction &CGF) const; 3447 private: 3448 PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv. 3449 ARMABIInfo NInfo; // Used for everything else. 3450 }; 3451 3452 class NaClARMTargetCodeGenInfo : public TargetCodeGenInfo { 3453 public: 3454 NaClARMTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind) 3455 : TargetCodeGenInfo(new NaClARMABIInfo(CGT, Kind)) {} 3456 }; 3457 3458 } 3459 3460 void NaClARMABIInfo::computeInfo(CGFunctionInfo &FI) const { 3461 if (FI.getASTCallingConvention() == CC_PnaclCall) 3462 PInfo.computeInfo(FI); 3463 else 3464 static_cast<const ABIInfo&>(NInfo).computeInfo(FI); 3465 } 3466 3467 llvm::Value *NaClARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3468 CodeGenFunction &CGF) const { 3469 // Always use the native convention; calling pnacl-style varargs functions 3470 // is unsupported. 3471 return static_cast<const ABIInfo&>(NInfo).EmitVAArg(VAListAddr, Ty, CGF); 3472 } 3473 3474 //===----------------------------------------------------------------------===// 3475 // NVPTX ABI Implementation 3476 //===----------------------------------------------------------------------===// 3477 3478 namespace { 3479 3480 class NVPTXABIInfo : public ABIInfo { 3481 public: 3482 NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 3483 3484 ABIArgInfo classifyReturnType(QualType RetTy) const; 3485 ABIArgInfo classifyArgumentType(QualType Ty) const; 3486 3487 virtual void computeInfo(CGFunctionInfo &FI) const; 3488 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3489 CodeGenFunction &CFG) const; 3490 }; 3491 3492 class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo { 3493 public: 3494 NVPTXTargetCodeGenInfo(CodeGenTypes &CGT) 3495 : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {} 3496 3497 virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3498 CodeGen::CodeGenModule &M) const; 3499 }; 3500 3501 ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const { 3502 if (RetTy->isVoidType()) 3503 return ABIArgInfo::getIgnore(); 3504 if (isAggregateTypeForABI(RetTy)) 3505 return ABIArgInfo::getIndirect(0); 3506 return ABIArgInfo::getDirect(); 3507 } 3508 3509 ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const { 3510 if (isAggregateTypeForABI(Ty)) 3511 return ABIArgInfo::getIndirect(0); 3512 3513 return ABIArgInfo::getDirect(); 3514 } 3515 3516 void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const { 3517 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 3518 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 3519 it != ie; ++it) 3520 it->info = classifyArgumentType(it->type); 3521 3522 // Always honor user-specified calling convention. 3523 if (FI.getCallingConvention() != llvm::CallingConv::C) 3524 return; 3525 3526 // Calling convention as default by an ABI. 3527 // We're still using the PTX_Kernel/PTX_Device calling conventions here, 3528 // but we should switch to NVVM metadata later on. 3529 llvm::CallingConv::ID DefaultCC; 3530 const LangOptions &LangOpts = getContext().getLangOpts(); 3531 if (LangOpts.OpenCL || LangOpts.CUDA) { 3532 // If we are in OpenCL or CUDA mode, then default to device functions 3533 DefaultCC = llvm::CallingConv::PTX_Device; 3534 } else { 3535 // If we are in standard C/C++ mode, use the triple to decide on the default 3536 StringRef Env = 3537 getContext().getTargetInfo().getTriple().getEnvironmentName(); 3538 if (Env == "device") 3539 DefaultCC = llvm::CallingConv::PTX_Device; 3540 else 3541 DefaultCC = llvm::CallingConv::PTX_Kernel; 3542 } 3543 FI.setEffectiveCallingConvention(DefaultCC); 3544 3545 } 3546 3547 llvm::Value *NVPTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3548 CodeGenFunction &CFG) const { 3549 llvm_unreachable("NVPTX does not support varargs"); 3550 } 3551 3552 void NVPTXTargetCodeGenInfo:: 3553 SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3554 CodeGen::CodeGenModule &M) const{ 3555 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 3556 if (!FD) return; 3557 3558 llvm::Function *F = cast<llvm::Function>(GV); 3559 3560 // Perform special handling in OpenCL mode 3561 if (M.getLangOpts().OpenCL) { 3562 // Use OpenCL function attributes to set proper calling conventions 3563 // By default, all functions are device functions 3564 if (FD->hasAttr<OpenCLKernelAttr>()) { 3565 // OpenCL __kernel functions get a kernel calling convention 3566 F->setCallingConv(llvm::CallingConv::PTX_Kernel); 3567 // And kernel functions are not subject to inlining 3568 F->addFnAttr(llvm::Attributes::NoInline); 3569 } 3570 } 3571 3572 // Perform special handling in CUDA mode. 3573 if (M.getLangOpts().CUDA) { 3574 // CUDA __global__ functions get a kernel calling convention. Since 3575 // __global__ functions cannot be called from the device, we do not 3576 // need to set the noinline attribute. 3577 if (FD->getAttr<CUDAGlobalAttr>()) 3578 F->setCallingConv(llvm::CallingConv::PTX_Kernel); 3579 } 3580 } 3581 3582 } 3583 3584 //===----------------------------------------------------------------------===// 3585 // MBlaze ABI Implementation 3586 //===----------------------------------------------------------------------===// 3587 3588 namespace { 3589 3590 class MBlazeABIInfo : public ABIInfo { 3591 public: 3592 MBlazeABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 3593 3594 bool isPromotableIntegerType(QualType Ty) const; 3595 3596 ABIArgInfo classifyReturnType(QualType RetTy) const; 3597 ABIArgInfo classifyArgumentType(QualType RetTy) const; 3598 3599 virtual void computeInfo(CGFunctionInfo &FI) const { 3600 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 3601 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 3602 it != ie; ++it) 3603 it->info = classifyArgumentType(it->type); 3604 } 3605 3606 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3607 CodeGenFunction &CGF) const; 3608 }; 3609 3610 class MBlazeTargetCodeGenInfo : public TargetCodeGenInfo { 3611 public: 3612 MBlazeTargetCodeGenInfo(CodeGenTypes &CGT) 3613 : TargetCodeGenInfo(new MBlazeABIInfo(CGT)) {} 3614 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3615 CodeGen::CodeGenModule &M) const; 3616 }; 3617 3618 } 3619 3620 bool MBlazeABIInfo::isPromotableIntegerType(QualType Ty) const { 3621 // MBlaze ABI requires all 8 and 16 bit quantities to be extended. 3622 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 3623 switch (BT->getKind()) { 3624 case BuiltinType::Bool: 3625 case BuiltinType::Char_S: 3626 case BuiltinType::Char_U: 3627 case BuiltinType::SChar: 3628 case BuiltinType::UChar: 3629 case BuiltinType::Short: 3630 case BuiltinType::UShort: 3631 return true; 3632 default: 3633 return false; 3634 } 3635 return false; 3636 } 3637 3638 llvm::Value *MBlazeABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3639 CodeGenFunction &CGF) const { 3640 // FIXME: Implement 3641 return 0; 3642 } 3643 3644 3645 ABIArgInfo MBlazeABIInfo::classifyReturnType(QualType RetTy) const { 3646 if (RetTy->isVoidType()) 3647 return ABIArgInfo::getIgnore(); 3648 if (isAggregateTypeForABI(RetTy)) 3649 return ABIArgInfo::getIndirect(0); 3650 3651 return (isPromotableIntegerType(RetTy) ? 3652 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3653 } 3654 3655 ABIArgInfo MBlazeABIInfo::classifyArgumentType(QualType Ty) const { 3656 if (isAggregateTypeForABI(Ty)) 3657 return ABIArgInfo::getIndirect(0); 3658 3659 return (isPromotableIntegerType(Ty) ? 3660 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3661 } 3662 3663 void MBlazeTargetCodeGenInfo::SetTargetAttributes(const Decl *D, 3664 llvm::GlobalValue *GV, 3665 CodeGen::CodeGenModule &M) 3666 const { 3667 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 3668 if (!FD) return; 3669 3670 llvm::CallingConv::ID CC = llvm::CallingConv::C; 3671 if (FD->hasAttr<MBlazeInterruptHandlerAttr>()) 3672 CC = llvm::CallingConv::MBLAZE_INTR; 3673 else if (FD->hasAttr<MBlazeSaveVolatilesAttr>()) 3674 CC = llvm::CallingConv::MBLAZE_SVOL; 3675 3676 if (CC != llvm::CallingConv::C) { 3677 // Handle 'interrupt_handler' attribute: 3678 llvm::Function *F = cast<llvm::Function>(GV); 3679 3680 // Step 1: Set ISR calling convention. 3681 F->setCallingConv(CC); 3682 3683 // Step 2: Add attributes goodness. 3684 F->addFnAttr(llvm::Attributes::NoInline); 3685 } 3686 3687 // Step 3: Emit _interrupt_handler alias. 3688 if (CC == llvm::CallingConv::MBLAZE_INTR) 3689 new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage, 3690 "_interrupt_handler", GV, &M.getModule()); 3691 } 3692 3693 3694 //===----------------------------------------------------------------------===// 3695 // MSP430 ABI Implementation 3696 //===----------------------------------------------------------------------===// 3697 3698 namespace { 3699 3700 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo { 3701 public: 3702 MSP430TargetCodeGenInfo(CodeGenTypes &CGT) 3703 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 3704 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3705 CodeGen::CodeGenModule &M) const; 3706 }; 3707 3708 } 3709 3710 void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D, 3711 llvm::GlobalValue *GV, 3712 CodeGen::CodeGenModule &M) const { 3713 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 3714 if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) { 3715 // Handle 'interrupt' attribute: 3716 llvm::Function *F = cast<llvm::Function>(GV); 3717 3718 // Step 1: Set ISR calling convention. 3719 F->setCallingConv(llvm::CallingConv::MSP430_INTR); 3720 3721 // Step 2: Add attributes goodness. 3722 F->addFnAttr(llvm::Attributes::NoInline); 3723 3724 // Step 3: Emit ISR vector alias. 3725 unsigned Num = attr->getNumber() + 0xffe0; 3726 new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage, 3727 "vector_" + Twine::utohexstr(Num), 3728 GV, &M.getModule()); 3729 } 3730 } 3731 } 3732 3733 //===----------------------------------------------------------------------===// 3734 // MIPS ABI Implementation. This works for both little-endian and 3735 // big-endian variants. 3736 //===----------------------------------------------------------------------===// 3737 3738 namespace { 3739 class MipsABIInfo : public ABIInfo { 3740 bool IsO32; 3741 unsigned MinABIStackAlignInBytes, StackAlignInBytes; 3742 void CoerceToIntArgs(uint64_t TySize, 3743 SmallVector<llvm::Type*, 8> &ArgList) const; 3744 llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const; 3745 llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const; 3746 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const; 3747 public: 3748 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) : 3749 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8), 3750 StackAlignInBytes(IsO32 ? 8 : 16) {} 3751 3752 ABIArgInfo classifyReturnType(QualType RetTy) const; 3753 ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const; 3754 virtual void computeInfo(CGFunctionInfo &FI) const; 3755 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3756 CodeGenFunction &CGF) const; 3757 }; 3758 3759 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo { 3760 unsigned SizeOfUnwindException; 3761 public: 3762 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32) 3763 : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)), 3764 SizeOfUnwindException(IsO32 ? 24 : 32) {} 3765 3766 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 3767 return 29; 3768 } 3769 3770 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3771 llvm::Value *Address) const; 3772 3773 unsigned getSizeOfUnwindException() const { 3774 return SizeOfUnwindException; 3775 } 3776 }; 3777 } 3778 3779 void MipsABIInfo::CoerceToIntArgs(uint64_t TySize, 3780 SmallVector<llvm::Type*, 8> &ArgList) const { 3781 llvm::IntegerType *IntTy = 3782 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8); 3783 3784 // Add (TySize / MinABIStackAlignInBytes) args of IntTy. 3785 for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N) 3786 ArgList.push_back(IntTy); 3787 3788 // If necessary, add one more integer type to ArgList. 3789 unsigned R = TySize % (MinABIStackAlignInBytes * 8); 3790 3791 if (R) 3792 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R)); 3793 } 3794 3795 // In N32/64, an aligned double precision floating point field is passed in 3796 // a register. 3797 llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const { 3798 SmallVector<llvm::Type*, 8> ArgList, IntArgList; 3799 3800 if (IsO32) { 3801 CoerceToIntArgs(TySize, ArgList); 3802 return llvm::StructType::get(getVMContext(), ArgList); 3803 } 3804 3805 if (Ty->isComplexType()) 3806 return CGT.ConvertType(Ty); 3807 3808 const RecordType *RT = Ty->getAs<RecordType>(); 3809 3810 // Unions/vectors are passed in integer registers. 3811 if (!RT || !RT->isStructureOrClassType()) { 3812 CoerceToIntArgs(TySize, ArgList); 3813 return llvm::StructType::get(getVMContext(), ArgList); 3814 } 3815 3816 const RecordDecl *RD = RT->getDecl(); 3817 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 3818 assert(!(TySize % 8) && "Size of structure must be multiple of 8."); 3819 3820 uint64_t LastOffset = 0; 3821 unsigned idx = 0; 3822 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64); 3823 3824 // Iterate over fields in the struct/class and check if there are any aligned 3825 // double fields. 3826 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 3827 i != e; ++i, ++idx) { 3828 const QualType Ty = i->getType(); 3829 const BuiltinType *BT = Ty->getAs<BuiltinType>(); 3830 3831 if (!BT || BT->getKind() != BuiltinType::Double) 3832 continue; 3833 3834 uint64_t Offset = Layout.getFieldOffset(idx); 3835 if (Offset % 64) // Ignore doubles that are not aligned. 3836 continue; 3837 3838 // Add ((Offset - LastOffset) / 64) args of type i64. 3839 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j) 3840 ArgList.push_back(I64); 3841 3842 // Add double type. 3843 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext())); 3844 LastOffset = Offset + 64; 3845 } 3846 3847 CoerceToIntArgs(TySize - LastOffset, IntArgList); 3848 ArgList.append(IntArgList.begin(), IntArgList.end()); 3849 3850 return llvm::StructType::get(getVMContext(), ArgList); 3851 } 3852 3853 llvm::Type *MipsABIInfo::getPaddingType(uint64_t Align, uint64_t Offset) const { 3854 assert((Offset % MinABIStackAlignInBytes) == 0); 3855 3856 if ((Align - 1) & Offset) 3857 return llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8); 3858 3859 return 0; 3860 } 3861 3862 ABIArgInfo 3863 MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const { 3864 uint64_t OrigOffset = Offset; 3865 uint64_t TySize = getContext().getTypeSize(Ty); 3866 uint64_t Align = getContext().getTypeAlign(Ty) / 8; 3867 3868 Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes), 3869 (uint64_t)StackAlignInBytes); 3870 Offset = llvm::RoundUpToAlignment(Offset, Align); 3871 Offset += llvm::RoundUpToAlignment(TySize, Align * 8) / 8; 3872 3873 if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) { 3874 // Ignore empty aggregates. 3875 if (TySize == 0) 3876 return ABIArgInfo::getIgnore(); 3877 3878 // Records with non trivial destructors/constructors should not be passed 3879 // by value. 3880 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) { 3881 Offset = OrigOffset + MinABIStackAlignInBytes; 3882 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3883 } 3884 3885 // If we have reached here, aggregates are passed directly by coercing to 3886 // another structure type. Padding is inserted if the offset of the 3887 // aggregate is unaligned. 3888 return ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0, 3889 getPaddingType(Align, OrigOffset)); 3890 } 3891 3892 // Treat an enum type as its underlying type. 3893 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3894 Ty = EnumTy->getDecl()->getIntegerType(); 3895 3896 if (Ty->isPromotableIntegerType()) 3897 return ABIArgInfo::getExtend(); 3898 3899 return ABIArgInfo::getDirect(0, 0, getPaddingType(Align, OrigOffset)); 3900 } 3901 3902 llvm::Type* 3903 MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const { 3904 const RecordType *RT = RetTy->getAs<RecordType>(); 3905 SmallVector<llvm::Type*, 8> RTList; 3906 3907 if (RT && RT->isStructureOrClassType()) { 3908 const RecordDecl *RD = RT->getDecl(); 3909 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 3910 unsigned FieldCnt = Layout.getFieldCount(); 3911 3912 // N32/64 returns struct/classes in floating point registers if the 3913 // following conditions are met: 3914 // 1. The size of the struct/class is no larger than 128-bit. 3915 // 2. The struct/class has one or two fields all of which are floating 3916 // point types. 3917 // 3. The offset of the first field is zero (this follows what gcc does). 3918 // 3919 // Any other composite results are returned in integer registers. 3920 // 3921 if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) { 3922 RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end(); 3923 for (; b != e; ++b) { 3924 const BuiltinType *BT = b->getType()->getAs<BuiltinType>(); 3925 3926 if (!BT || !BT->isFloatingPoint()) 3927 break; 3928 3929 RTList.push_back(CGT.ConvertType(b->getType())); 3930 } 3931 3932 if (b == e) 3933 return llvm::StructType::get(getVMContext(), RTList, 3934 RD->hasAttr<PackedAttr>()); 3935 3936 RTList.clear(); 3937 } 3938 } 3939 3940 CoerceToIntArgs(Size, RTList); 3941 return llvm::StructType::get(getVMContext(), RTList); 3942 } 3943 3944 ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const { 3945 uint64_t Size = getContext().getTypeSize(RetTy); 3946 3947 if (RetTy->isVoidType() || Size == 0) 3948 return ABIArgInfo::getIgnore(); 3949 3950 if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) { 3951 if (Size <= 128) { 3952 if (RetTy->isAnyComplexType()) 3953 return ABIArgInfo::getDirect(); 3954 3955 // O32 returns integer vectors in registers. 3956 if (IsO32 && RetTy->isVectorType() && !RetTy->hasFloatingRepresentation()) 3957 return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size)); 3958 3959 if (!IsO32 && !isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy)) 3960 return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size)); 3961 } 3962 3963 return ABIArgInfo::getIndirect(0); 3964 } 3965 3966 // Treat an enum type as its underlying type. 3967 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 3968 RetTy = EnumTy->getDecl()->getIntegerType(); 3969 3970 return (RetTy->isPromotableIntegerType() ? 3971 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3972 } 3973 3974 void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const { 3975 ABIArgInfo &RetInfo = FI.getReturnInfo(); 3976 RetInfo = classifyReturnType(FI.getReturnType()); 3977 3978 // Check if a pointer to an aggregate is passed as a hidden argument. 3979 uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0; 3980 3981 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 3982 it != ie; ++it) 3983 it->info = classifyArgumentType(it->type, Offset); 3984 } 3985 3986 llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3987 CodeGenFunction &CGF) const { 3988 llvm::Type *BP = CGF.Int8PtrTy; 3989 llvm::Type *BPP = CGF.Int8PtrPtrTy; 3990 3991 CGBuilderTy &Builder = CGF.Builder; 3992 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 3993 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 3994 int64_t TypeAlign = getContext().getTypeAlign(Ty) / 8; 3995 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 3996 llvm::Value *AddrTyped; 3997 unsigned PtrWidth = getContext().getTargetInfo().getPointerWidth(0); 3998 llvm::IntegerType *IntTy = (PtrWidth == 32) ? CGF.Int32Ty : CGF.Int64Ty; 3999 4000 if (TypeAlign > MinABIStackAlignInBytes) { 4001 llvm::Value *AddrAsInt = CGF.Builder.CreatePtrToInt(Addr, IntTy); 4002 llvm::Value *Inc = llvm::ConstantInt::get(IntTy, TypeAlign - 1); 4003 llvm::Value *Mask = llvm::ConstantInt::get(IntTy, -TypeAlign); 4004 llvm::Value *Add = CGF.Builder.CreateAdd(AddrAsInt, Inc); 4005 llvm::Value *And = CGF.Builder.CreateAnd(Add, Mask); 4006 AddrTyped = CGF.Builder.CreateIntToPtr(And, PTy); 4007 } 4008 else 4009 AddrTyped = Builder.CreateBitCast(Addr, PTy); 4010 4011 llvm::Value *AlignedAddr = Builder.CreateBitCast(AddrTyped, BP); 4012 TypeAlign = std::max((unsigned)TypeAlign, MinABIStackAlignInBytes); 4013 uint64_t Offset = 4014 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, TypeAlign); 4015 llvm::Value *NextAddr = 4016 Builder.CreateGEP(AlignedAddr, llvm::ConstantInt::get(IntTy, Offset), 4017 "ap.next"); 4018 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 4019 4020 return AddrTyped; 4021 } 4022 4023 bool 4024 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4025 llvm::Value *Address) const { 4026 // This information comes from gcc's implementation, which seems to 4027 // as canonical as it gets. 4028 4029 // Everything on MIPS is 4 bytes. Double-precision FP registers 4030 // are aliased to pairs of single-precision FP registers. 4031 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 4032 4033 // 0-31 are the general purpose registers, $0 - $31. 4034 // 32-63 are the floating-point registers, $f0 - $f31. 4035 // 64 and 65 are the multiply/divide registers, $hi and $lo. 4036 // 66 is the (notional, I think) register for signal-handler return. 4037 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65); 4038 4039 // 67-74 are the floating-point status registers, $fcc0 - $fcc7. 4040 // They are one bit wide and ignored here. 4041 4042 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31. 4043 // (coprocessor 1 is the FP unit) 4044 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31. 4045 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31. 4046 // 176-181 are the DSP accumulator registers. 4047 AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181); 4048 return false; 4049 } 4050 4051 //===----------------------------------------------------------------------===// 4052 // TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults. 4053 // Currently subclassed only to implement custom OpenCL C function attribute 4054 // handling. 4055 //===----------------------------------------------------------------------===// 4056 4057 namespace { 4058 4059 class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo { 4060 public: 4061 TCETargetCodeGenInfo(CodeGenTypes &CGT) 4062 : DefaultTargetCodeGenInfo(CGT) {} 4063 4064 virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 4065 CodeGen::CodeGenModule &M) const; 4066 }; 4067 4068 void TCETargetCodeGenInfo::SetTargetAttributes(const Decl *D, 4069 llvm::GlobalValue *GV, 4070 CodeGen::CodeGenModule &M) const { 4071 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 4072 if (!FD) return; 4073 4074 llvm::Function *F = cast<llvm::Function>(GV); 4075 4076 if (M.getLangOpts().OpenCL) { 4077 if (FD->hasAttr<OpenCLKernelAttr>()) { 4078 // OpenCL C Kernel functions are not subject to inlining 4079 F->addFnAttr(llvm::Attributes::NoInline); 4080 4081 if (FD->hasAttr<ReqdWorkGroupSizeAttr>()) { 4082 4083 // Convert the reqd_work_group_size() attributes to metadata. 4084 llvm::LLVMContext &Context = F->getContext(); 4085 llvm::NamedMDNode *OpenCLMetadata = 4086 M.getModule().getOrInsertNamedMetadata("opencl.kernel_wg_size_info"); 4087 4088 SmallVector<llvm::Value*, 5> Operands; 4089 Operands.push_back(F); 4090 4091 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty, 4092 llvm::APInt(32, 4093 FD->getAttr<ReqdWorkGroupSizeAttr>()->getXDim()))); 4094 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty, 4095 llvm::APInt(32, 4096 FD->getAttr<ReqdWorkGroupSizeAttr>()->getYDim()))); 4097 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty, 4098 llvm::APInt(32, 4099 FD->getAttr<ReqdWorkGroupSizeAttr>()->getZDim()))); 4100 4101 // Add a boolean constant operand for "required" (true) or "hint" (false) 4102 // for implementing the work_group_size_hint attr later. Currently 4103 // always true as the hint is not yet implemented. 4104 Operands.push_back(llvm::ConstantInt::getTrue(Context)); 4105 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands)); 4106 } 4107 } 4108 } 4109 } 4110 4111 } 4112 4113 //===----------------------------------------------------------------------===// 4114 // Hexagon ABI Implementation 4115 //===----------------------------------------------------------------------===// 4116 4117 namespace { 4118 4119 class HexagonABIInfo : public ABIInfo { 4120 4121 4122 public: 4123 HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 4124 4125 private: 4126 4127 ABIArgInfo classifyReturnType(QualType RetTy) const; 4128 ABIArgInfo classifyArgumentType(QualType RetTy) const; 4129 4130 virtual void computeInfo(CGFunctionInfo &FI) const; 4131 4132 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 4133 CodeGenFunction &CGF) const; 4134 }; 4135 4136 class HexagonTargetCodeGenInfo : public TargetCodeGenInfo { 4137 public: 4138 HexagonTargetCodeGenInfo(CodeGenTypes &CGT) 4139 :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {} 4140 4141 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 4142 return 29; 4143 } 4144 }; 4145 4146 } 4147 4148 void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const { 4149 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 4150 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 4151 it != ie; ++it) 4152 it->info = classifyArgumentType(it->type); 4153 } 4154 4155 ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const { 4156 if (!isAggregateTypeForABI(Ty)) { 4157 // Treat an enum type as its underlying type. 4158 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 4159 Ty = EnumTy->getDecl()->getIntegerType(); 4160 4161 return (Ty->isPromotableIntegerType() ? 4162 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 4163 } 4164 4165 // Ignore empty records. 4166 if (isEmptyRecord(getContext(), Ty, true)) 4167 return ABIArgInfo::getIgnore(); 4168 4169 // Structures with either a non-trivial destructor or a non-trivial 4170 // copy constructor are always indirect. 4171 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 4172 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 4173 4174 uint64_t Size = getContext().getTypeSize(Ty); 4175 if (Size > 64) 4176 return ABIArgInfo::getIndirect(0, /*ByVal=*/true); 4177 // Pass in the smallest viable integer type. 4178 else if (Size > 32) 4179 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext())); 4180 else if (Size > 16) 4181 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 4182 else if (Size > 8) 4183 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 4184 else 4185 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 4186 } 4187 4188 ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const { 4189 if (RetTy->isVoidType()) 4190 return ABIArgInfo::getIgnore(); 4191 4192 // Large vector types should be returned via memory. 4193 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64) 4194 return ABIArgInfo::getIndirect(0); 4195 4196 if (!isAggregateTypeForABI(RetTy)) { 4197 // Treat an enum type as its underlying type. 4198 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 4199 RetTy = EnumTy->getDecl()->getIntegerType(); 4200 4201 return (RetTy->isPromotableIntegerType() ? 4202 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 4203 } 4204 4205 // Structures with either a non-trivial destructor or a non-trivial 4206 // copy constructor are always indirect. 4207 if (isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy)) 4208 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 4209 4210 if (isEmptyRecord(getContext(), RetTy, true)) 4211 return ABIArgInfo::getIgnore(); 4212 4213 // Aggregates <= 8 bytes are returned in r0; other aggregates 4214 // are returned indirectly. 4215 uint64_t Size = getContext().getTypeSize(RetTy); 4216 if (Size <= 64) { 4217 // Return in the smallest viable integer type. 4218 if (Size <= 8) 4219 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 4220 if (Size <= 16) 4221 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 4222 if (Size <= 32) 4223 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 4224 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext())); 4225 } 4226 4227 return ABIArgInfo::getIndirect(0, /*ByVal=*/true); 4228 } 4229 4230 llvm::Value *HexagonABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 4231 CodeGenFunction &CGF) const { 4232 // FIXME: Need to handle alignment 4233 llvm::Type *BPP = CGF.Int8PtrPtrTy; 4234 4235 CGBuilderTy &Builder = CGF.Builder; 4236 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 4237 "ap"); 4238 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 4239 llvm::Type *PTy = 4240 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 4241 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 4242 4243 uint64_t Offset = 4244 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); 4245 llvm::Value *NextAddr = 4246 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 4247 "ap.next"); 4248 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 4249 4250 return AddrTyped; 4251 } 4252 4253 4254 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() { 4255 if (TheTargetCodeGenInfo) 4256 return *TheTargetCodeGenInfo; 4257 4258 const llvm::Triple &Triple = getContext().getTargetInfo().getTriple(); 4259 switch (Triple.getArch()) { 4260 default: 4261 return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types)); 4262 4263 case llvm::Triple::le32: 4264 return *(TheTargetCodeGenInfo = new PNaClTargetCodeGenInfo(Types)); 4265 case llvm::Triple::mips: 4266 case llvm::Triple::mipsel: 4267 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, true)); 4268 4269 case llvm::Triple::mips64: 4270 case llvm::Triple::mips64el: 4271 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, false)); 4272 4273 case llvm::Triple::arm: 4274 case llvm::Triple::thumb: 4275 { 4276 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS; 4277 if (strcmp(getContext().getTargetInfo().getABI(), "apcs-gnu") == 0) 4278 Kind = ARMABIInfo::APCS; 4279 else if (CodeGenOpts.FloatABI == "hard" || 4280 (CodeGenOpts.FloatABI != "soft" && Triple.getEnvironment()==llvm::Triple::GNUEABIHF)) 4281 Kind = ARMABIInfo::AAPCS_VFP; 4282 4283 switch (Triple.getOS()) { 4284 case llvm::Triple::NativeClient: 4285 return *(TheTargetCodeGenInfo = 4286 new NaClARMTargetCodeGenInfo(Types, Kind)); 4287 default: 4288 return *(TheTargetCodeGenInfo = 4289 new ARMTargetCodeGenInfo(Types, Kind)); 4290 } 4291 } 4292 4293 case llvm::Triple::ppc: 4294 return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types)); 4295 case llvm::Triple::ppc64: 4296 if (Triple.isOSBinFormatELF()) 4297 return *(TheTargetCodeGenInfo = new PPC64_SVR4_TargetCodeGenInfo(Types)); 4298 else 4299 return *(TheTargetCodeGenInfo = new PPC64TargetCodeGenInfo(Types)); 4300 4301 case llvm::Triple::nvptx: 4302 case llvm::Triple::nvptx64: 4303 return *(TheTargetCodeGenInfo = new NVPTXTargetCodeGenInfo(Types)); 4304 4305 case llvm::Triple::mblaze: 4306 return *(TheTargetCodeGenInfo = new MBlazeTargetCodeGenInfo(Types)); 4307 4308 case llvm::Triple::msp430: 4309 return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types)); 4310 4311 case llvm::Triple::tce: 4312 return *(TheTargetCodeGenInfo = new TCETargetCodeGenInfo(Types)); 4313 4314 case llvm::Triple::x86: { 4315 bool DisableMMX = strcmp(getContext().getTargetInfo().getABI(), "no-mmx") == 0; 4316 4317 if (Triple.isOSDarwin()) 4318 return *(TheTargetCodeGenInfo = 4319 new X86_32TargetCodeGenInfo(Types, true, true, DisableMMX, false, 4320 CodeGenOpts.NumRegisterParameters)); 4321 4322 switch (Triple.getOS()) { 4323 case llvm::Triple::Cygwin: 4324 case llvm::Triple::MinGW32: 4325 case llvm::Triple::AuroraUX: 4326 case llvm::Triple::DragonFly: 4327 case llvm::Triple::FreeBSD: 4328 case llvm::Triple::OpenBSD: 4329 case llvm::Triple::Bitrig: 4330 return *(TheTargetCodeGenInfo = 4331 new X86_32TargetCodeGenInfo(Types, false, true, DisableMMX, 4332 false, 4333 CodeGenOpts.NumRegisterParameters)); 4334 4335 case llvm::Triple::Win32: 4336 return *(TheTargetCodeGenInfo = 4337 new X86_32TargetCodeGenInfo(Types, false, true, DisableMMX, true, 4338 CodeGenOpts.NumRegisterParameters)); 4339 4340 default: 4341 return *(TheTargetCodeGenInfo = 4342 new X86_32TargetCodeGenInfo(Types, false, false, DisableMMX, 4343 false, 4344 CodeGenOpts.NumRegisterParameters)); 4345 } 4346 } 4347 4348 case llvm::Triple::x86_64: { 4349 bool HasAVX = strcmp(getContext().getTargetInfo().getABI(), "avx") == 0; 4350 4351 switch (Triple.getOS()) { 4352 case llvm::Triple::Win32: 4353 case llvm::Triple::MinGW32: 4354 case llvm::Triple::Cygwin: 4355 return *(TheTargetCodeGenInfo = new WinX86_64TargetCodeGenInfo(Types)); 4356 case llvm::Triple::NativeClient: 4357 return *(TheTargetCodeGenInfo = new NaClX86_64TargetCodeGenInfo(Types, HasAVX)); 4358 default: 4359 return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo(Types, 4360 HasAVX)); 4361 } 4362 } 4363 case llvm::Triple::hexagon: 4364 return *(TheTargetCodeGenInfo = new HexagonTargetCodeGenInfo(Types)); 4365 } 4366 } 4367