1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // These classes wrap the information about a call or function 11 // definition used to handle ABI compliancy. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "TargetInfo.h" 16 #include "ABIInfo.h" 17 #include "CodeGenFunction.h" 18 #include "clang/AST/RecordLayout.h" 19 #include "clang/Frontend/CodeGenOptions.h" 20 #include "llvm/Type.h" 21 #include "llvm/DataLayout.h" 22 #include "llvm/ADT/Triple.h" 23 #include "llvm/Support/raw_ostream.h" 24 using namespace clang; 25 using namespace CodeGen; 26 27 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, 28 llvm::Value *Array, 29 llvm::Value *Value, 30 unsigned FirstIndex, 31 unsigned LastIndex) { 32 // Alternatively, we could emit this as a loop in the source. 33 for (unsigned I = FirstIndex; I <= LastIndex; ++I) { 34 llvm::Value *Cell = Builder.CreateConstInBoundsGEP1_32(Array, I); 35 Builder.CreateStore(Value, Cell); 36 } 37 } 38 39 static bool isAggregateTypeForABI(QualType T) { 40 return CodeGenFunction::hasAggregateLLVMType(T) || 41 T->isMemberFunctionPointerType(); 42 } 43 44 ABIInfo::~ABIInfo() {} 45 46 ASTContext &ABIInfo::getContext() const { 47 return CGT.getContext(); 48 } 49 50 llvm::LLVMContext &ABIInfo::getVMContext() const { 51 return CGT.getLLVMContext(); 52 } 53 54 const llvm::DataLayout &ABIInfo::getDataLayout() const { 55 return CGT.getDataLayout(); 56 } 57 58 59 void ABIArgInfo::dump() const { 60 raw_ostream &OS = llvm::errs(); 61 OS << "(ABIArgInfo Kind="; 62 switch (TheKind) { 63 case Direct: 64 OS << "Direct Type="; 65 if (llvm::Type *Ty = getCoerceToType()) 66 Ty->print(OS); 67 else 68 OS << "null"; 69 break; 70 case Extend: 71 OS << "Extend"; 72 break; 73 case Ignore: 74 OS << "Ignore"; 75 break; 76 case Indirect: 77 OS << "Indirect Align=" << getIndirectAlign() 78 << " ByVal=" << getIndirectByVal() 79 << " Realign=" << getIndirectRealign(); 80 break; 81 case Expand: 82 OS << "Expand"; 83 break; 84 } 85 OS << ")\n"; 86 } 87 88 TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; } 89 90 // If someone can figure out a general rule for this, that would be great. 91 // It's probably just doomed to be platform-dependent, though. 92 unsigned TargetCodeGenInfo::getSizeOfUnwindException() const { 93 // Verified for: 94 // x86-64 FreeBSD, Linux, Darwin 95 // x86-32 FreeBSD, Linux, Darwin 96 // PowerPC Linux, Darwin 97 // ARM Darwin (*not* EABI) 98 return 32; 99 } 100 101 bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args, 102 const FunctionNoProtoType *fnType) const { 103 // The following conventions are known to require this to be false: 104 // x86_stdcall 105 // MIPS 106 // For everything else, we just prefer false unless we opt out. 107 return false; 108 } 109 110 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays); 111 112 /// isEmptyField - Return true iff a the field is "empty", that is it 113 /// is an unnamed bit-field or an (array of) empty record(s). 114 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD, 115 bool AllowArrays) { 116 if (FD->isUnnamedBitfield()) 117 return true; 118 119 QualType FT = FD->getType(); 120 121 // Constant arrays of empty records count as empty, strip them off. 122 // Constant arrays of zero length always count as empty. 123 if (AllowArrays) 124 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 125 if (AT->getSize() == 0) 126 return true; 127 FT = AT->getElementType(); 128 } 129 130 const RecordType *RT = FT->getAs<RecordType>(); 131 if (!RT) 132 return false; 133 134 // C++ record fields are never empty, at least in the Itanium ABI. 135 // 136 // FIXME: We should use a predicate for whether this behavior is true in the 137 // current ABI. 138 if (isa<CXXRecordDecl>(RT->getDecl())) 139 return false; 140 141 return isEmptyRecord(Context, FT, AllowArrays); 142 } 143 144 /// isEmptyRecord - Return true iff a structure contains only empty 145 /// fields. Note that a structure with a flexible array member is not 146 /// considered empty. 147 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) { 148 const RecordType *RT = T->getAs<RecordType>(); 149 if (!RT) 150 return 0; 151 const RecordDecl *RD = RT->getDecl(); 152 if (RD->hasFlexibleArrayMember()) 153 return false; 154 155 // If this is a C++ record, check the bases first. 156 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 157 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 158 e = CXXRD->bases_end(); i != e; ++i) 159 if (!isEmptyRecord(Context, i->getType(), true)) 160 return false; 161 162 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 163 i != e; ++i) 164 if (!isEmptyField(Context, *i, AllowArrays)) 165 return false; 166 return true; 167 } 168 169 /// hasNonTrivialDestructorOrCopyConstructor - Determine if a type has either 170 /// a non-trivial destructor or a non-trivial copy constructor. 171 static bool hasNonTrivialDestructorOrCopyConstructor(const RecordType *RT) { 172 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 173 if (!RD) 174 return false; 175 176 return !RD->hasTrivialDestructor() || !RD->hasTrivialCopyConstructor(); 177 } 178 179 /// isRecordWithNonTrivialDestructorOrCopyConstructor - Determine if a type is 180 /// a record type with either a non-trivial destructor or a non-trivial copy 181 /// constructor. 182 static bool isRecordWithNonTrivialDestructorOrCopyConstructor(QualType T) { 183 const RecordType *RT = T->getAs<RecordType>(); 184 if (!RT) 185 return false; 186 187 return hasNonTrivialDestructorOrCopyConstructor(RT); 188 } 189 190 /// isSingleElementStruct - Determine if a structure is a "single 191 /// element struct", i.e. it has exactly one non-empty field or 192 /// exactly one field which is itself a single element 193 /// struct. Structures with flexible array members are never 194 /// considered single element structs. 195 /// 196 /// \return The field declaration for the single non-empty field, if 197 /// it exists. 198 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) { 199 const RecordType *RT = T->getAsStructureType(); 200 if (!RT) 201 return 0; 202 203 const RecordDecl *RD = RT->getDecl(); 204 if (RD->hasFlexibleArrayMember()) 205 return 0; 206 207 const Type *Found = 0; 208 209 // If this is a C++ record, check the bases first. 210 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 211 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 212 e = CXXRD->bases_end(); i != e; ++i) { 213 // Ignore empty records. 214 if (isEmptyRecord(Context, i->getType(), true)) 215 continue; 216 217 // If we already found an element then this isn't a single-element struct. 218 if (Found) 219 return 0; 220 221 // If this is non-empty and not a single element struct, the composite 222 // cannot be a single element struct. 223 Found = isSingleElementStruct(i->getType(), Context); 224 if (!Found) 225 return 0; 226 } 227 } 228 229 // Check for single element. 230 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 231 i != e; ++i) { 232 const FieldDecl *FD = *i; 233 QualType FT = FD->getType(); 234 235 // Ignore empty fields. 236 if (isEmptyField(Context, FD, true)) 237 continue; 238 239 // If we already found an element then this isn't a single-element 240 // struct. 241 if (Found) 242 return 0; 243 244 // Treat single element arrays as the element. 245 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 246 if (AT->getSize().getZExtValue() != 1) 247 break; 248 FT = AT->getElementType(); 249 } 250 251 if (!isAggregateTypeForABI(FT)) { 252 Found = FT.getTypePtr(); 253 } else { 254 Found = isSingleElementStruct(FT, Context); 255 if (!Found) 256 return 0; 257 } 258 } 259 260 // We don't consider a struct a single-element struct if it has 261 // padding beyond the element type. 262 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T)) 263 return 0; 264 265 return Found; 266 } 267 268 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) { 269 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() && 270 !Ty->isAnyComplexType() && !Ty->isEnumeralType() && 271 !Ty->isBlockPointerType()) 272 return false; 273 274 uint64_t Size = Context.getTypeSize(Ty); 275 return Size == 32 || Size == 64; 276 } 277 278 /// canExpandIndirectArgument - Test whether an argument type which is to be 279 /// passed indirectly (on the stack) would have the equivalent layout if it was 280 /// expanded into separate arguments. If so, we prefer to do the latter to avoid 281 /// inhibiting optimizations. 282 /// 283 // FIXME: This predicate is missing many cases, currently it just follows 284 // llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We 285 // should probably make this smarter, or better yet make the LLVM backend 286 // capable of handling it. 287 static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) { 288 // We can only expand structure types. 289 const RecordType *RT = Ty->getAs<RecordType>(); 290 if (!RT) 291 return false; 292 293 // We can only expand (C) structures. 294 // 295 // FIXME: This needs to be generalized to handle classes as well. 296 const RecordDecl *RD = RT->getDecl(); 297 if (!RD->isStruct() || isa<CXXRecordDecl>(RD)) 298 return false; 299 300 uint64_t Size = 0; 301 302 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 303 i != e; ++i) { 304 const FieldDecl *FD = *i; 305 306 if (!is32Or64BitBasicType(FD->getType(), Context)) 307 return false; 308 309 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know 310 // how to expand them yet, and the predicate for telling if a bitfield still 311 // counts as "basic" is more complicated than what we were doing previously. 312 if (FD->isBitField()) 313 return false; 314 315 Size += Context.getTypeSize(FD->getType()); 316 } 317 318 // Make sure there are not any holes in the struct. 319 if (Size != Context.getTypeSize(Ty)) 320 return false; 321 322 return true; 323 } 324 325 namespace { 326 /// DefaultABIInfo - The default implementation for ABI specific 327 /// details. This implementation provides information which results in 328 /// self-consistent and sensible LLVM IR generation, but does not 329 /// conform to any particular ABI. 330 class DefaultABIInfo : public ABIInfo { 331 public: 332 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 333 334 ABIArgInfo classifyReturnType(QualType RetTy) const; 335 ABIArgInfo classifyArgumentType(QualType RetTy) const; 336 337 virtual void computeInfo(CGFunctionInfo &FI) const { 338 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 339 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 340 it != ie; ++it) 341 it->info = classifyArgumentType(it->type); 342 } 343 344 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 345 CodeGenFunction &CGF) const; 346 }; 347 348 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo { 349 public: 350 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 351 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 352 }; 353 354 llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 355 CodeGenFunction &CGF) const { 356 return 0; 357 } 358 359 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const { 360 if (isAggregateTypeForABI(Ty)) { 361 // Records with non trivial destructors/constructors should not be passed 362 // by value. 363 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 364 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 365 366 return ABIArgInfo::getIndirect(0); 367 } 368 369 // Treat an enum type as its underlying type. 370 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 371 Ty = EnumTy->getDecl()->getIntegerType(); 372 373 return (Ty->isPromotableIntegerType() ? 374 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 375 } 376 377 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const { 378 if (RetTy->isVoidType()) 379 return ABIArgInfo::getIgnore(); 380 381 if (isAggregateTypeForABI(RetTy)) 382 return ABIArgInfo::getIndirect(0); 383 384 // Treat an enum type as its underlying type. 385 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 386 RetTy = EnumTy->getDecl()->getIntegerType(); 387 388 return (RetTy->isPromotableIntegerType() ? 389 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 390 } 391 392 //===----------------------------------------------------------------------===// 393 // le32/PNaCl bitcode ABI Implementation 394 //===----------------------------------------------------------------------===// 395 396 class PNaClABIInfo : public ABIInfo { 397 public: 398 PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 399 400 ABIArgInfo classifyReturnType(QualType RetTy) const; 401 ABIArgInfo classifyArgumentType(QualType RetTy, unsigned &FreeRegs) const; 402 403 virtual void computeInfo(CGFunctionInfo &FI) const; 404 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 405 CodeGenFunction &CGF) const; 406 }; 407 408 class PNaClTargetCodeGenInfo : public TargetCodeGenInfo { 409 public: 410 PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 411 : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {} 412 }; 413 414 void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const { 415 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 416 417 unsigned FreeRegs = FI.getHasRegParm() ? FI.getRegParm() : 0; 418 419 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 420 it != ie; ++it) 421 it->info = classifyArgumentType(it->type, FreeRegs); 422 } 423 424 llvm::Value *PNaClABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 425 CodeGenFunction &CGF) const { 426 return 0; 427 } 428 429 ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty, 430 unsigned &FreeRegs) const { 431 if (isAggregateTypeForABI(Ty)) { 432 // Records with non trivial destructors/constructors should not be passed 433 // by value. 434 FreeRegs = 0; 435 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 436 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 437 438 return ABIArgInfo::getIndirect(0); 439 } 440 441 // Treat an enum type as its underlying type. 442 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 443 Ty = EnumTy->getDecl()->getIntegerType(); 444 445 ABIArgInfo BaseInfo = (Ty->isPromotableIntegerType() ? 446 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 447 448 // Regparm regs hold 32 bits. 449 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32; 450 if (SizeInRegs == 0) return BaseInfo; 451 if (SizeInRegs > FreeRegs) { 452 FreeRegs = 0; 453 return BaseInfo; 454 } 455 FreeRegs -= SizeInRegs; 456 return BaseInfo.isDirect() ? 457 ABIArgInfo::getDirectInReg(BaseInfo.getCoerceToType()) : 458 ABIArgInfo::getExtendInReg(BaseInfo.getCoerceToType()); 459 } 460 461 ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const { 462 if (RetTy->isVoidType()) 463 return ABIArgInfo::getIgnore(); 464 465 if (isAggregateTypeForABI(RetTy)) 466 return ABIArgInfo::getIndirect(0); 467 468 // Treat an enum type as its underlying type. 469 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 470 RetTy = EnumTy->getDecl()->getIntegerType(); 471 472 return (RetTy->isPromotableIntegerType() ? 473 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 474 } 475 476 /// UseX86_MMXType - Return true if this is an MMX type that should use the 477 /// special x86_mmx type. 478 bool UseX86_MMXType(llvm::Type *IRType) { 479 // If the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>, use the 480 // special x86_mmx type. 481 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 && 482 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() && 483 IRType->getScalarSizeInBits() != 64; 484 } 485 486 static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 487 StringRef Constraint, 488 llvm::Type* Ty) { 489 if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy()) 490 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext()); 491 return Ty; 492 } 493 494 //===----------------------------------------------------------------------===// 495 // X86-32 ABI Implementation 496 //===----------------------------------------------------------------------===// 497 498 /// X86_32ABIInfo - The X86-32 ABI information. 499 class X86_32ABIInfo : public ABIInfo { 500 enum Class { 501 Integer, 502 Float 503 }; 504 505 static const unsigned MinABIStackAlignInBytes = 4; 506 507 bool IsDarwinVectorABI; 508 bool IsSmallStructInRegABI; 509 bool IsMMXDisabled; 510 bool IsWin32FloatStructABI; 511 unsigned DefaultNumRegisterParameters; 512 513 static bool isRegisterSize(unsigned Size) { 514 return (Size == 8 || Size == 16 || Size == 32 || Size == 64); 515 } 516 517 static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context, 518 unsigned callingConvention); 519 520 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 521 /// such that the argument will be passed in memory. 522 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, 523 unsigned &FreeRegs) const; 524 525 /// \brief Return the alignment to use for the given type on the stack. 526 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const; 527 528 Class classify(QualType Ty) const; 529 ABIArgInfo classifyReturnType(QualType RetTy, 530 unsigned callingConvention) const; 531 ABIArgInfo classifyArgumentType(QualType RetTy, unsigned &FreeRegs, 532 bool IsFastCall) const; 533 bool shouldUseInReg(QualType Ty, unsigned &FreeRegs, 534 bool IsFastCall, bool &NeedsPadding) const; 535 536 public: 537 538 virtual void computeInfo(CGFunctionInfo &FI) const; 539 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 540 CodeGenFunction &CGF) const; 541 542 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool m, bool w, 543 unsigned r) 544 : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p), 545 IsMMXDisabled(m), IsWin32FloatStructABI(w), 546 DefaultNumRegisterParameters(r) {} 547 }; 548 549 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo { 550 public: 551 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, 552 bool d, bool p, bool m, bool w, unsigned r) 553 :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, m, w, r)) {} 554 555 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 556 CodeGen::CodeGenModule &CGM) const; 557 558 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 559 // Darwin uses different dwarf register numbers for EH. 560 if (CGM.isTargetDarwin()) return 5; 561 562 return 4; 563 } 564 565 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 566 llvm::Value *Address) const; 567 568 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 569 StringRef Constraint, 570 llvm::Type* Ty) const { 571 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 572 } 573 574 }; 575 576 } 577 578 /// shouldReturnTypeInRegister - Determine if the given type should be 579 /// passed in a register (for the Darwin ABI). 580 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty, 581 ASTContext &Context, 582 unsigned callingConvention) { 583 uint64_t Size = Context.getTypeSize(Ty); 584 585 // Type must be register sized. 586 if (!isRegisterSize(Size)) 587 return false; 588 589 if (Ty->isVectorType()) { 590 // 64- and 128- bit vectors inside structures are not returned in 591 // registers. 592 if (Size == 64 || Size == 128) 593 return false; 594 595 return true; 596 } 597 598 // If this is a builtin, pointer, enum, complex type, member pointer, or 599 // member function pointer it is ok. 600 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() || 601 Ty->isAnyComplexType() || Ty->isEnumeralType() || 602 Ty->isBlockPointerType() || Ty->isMemberPointerType()) 603 return true; 604 605 // Arrays are treated like records. 606 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) 607 return shouldReturnTypeInRegister(AT->getElementType(), Context, 608 callingConvention); 609 610 // Otherwise, it must be a record type. 611 const RecordType *RT = Ty->getAs<RecordType>(); 612 if (!RT) return false; 613 614 // FIXME: Traverse bases here too. 615 616 // For thiscall conventions, structures will never be returned in 617 // a register. This is for compatibility with the MSVC ABI 618 if (callingConvention == llvm::CallingConv::X86_ThisCall && 619 RT->isStructureType()) { 620 return false; 621 } 622 623 // Structure types are passed in register if all fields would be 624 // passed in a register. 625 for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(), 626 e = RT->getDecl()->field_end(); i != e; ++i) { 627 const FieldDecl *FD = *i; 628 629 // Empty fields are ignored. 630 if (isEmptyField(Context, FD, true)) 631 continue; 632 633 // Check fields recursively. 634 if (!shouldReturnTypeInRegister(FD->getType(), Context, 635 callingConvention)) 636 return false; 637 } 638 return true; 639 } 640 641 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, 642 unsigned callingConvention) const { 643 if (RetTy->isVoidType()) 644 return ABIArgInfo::getIgnore(); 645 646 if (const VectorType *VT = RetTy->getAs<VectorType>()) { 647 // On Darwin, some vectors are returned in registers. 648 if (IsDarwinVectorABI) { 649 uint64_t Size = getContext().getTypeSize(RetTy); 650 651 // 128-bit vectors are a special case; they are returned in 652 // registers and we need to make sure to pick a type the LLVM 653 // backend will like. 654 if (Size == 128) 655 return ABIArgInfo::getDirect(llvm::VectorType::get( 656 llvm::Type::getInt64Ty(getVMContext()), 2)); 657 658 // Always return in register if it fits in a general purpose 659 // register, or if it is 64 bits and has a single element. 660 if ((Size == 8 || Size == 16 || Size == 32) || 661 (Size == 64 && VT->getNumElements() == 1)) 662 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 663 Size)); 664 665 return ABIArgInfo::getIndirect(0); 666 } 667 668 return ABIArgInfo::getDirect(); 669 } 670 671 if (isAggregateTypeForABI(RetTy)) { 672 if (const RecordType *RT = RetTy->getAs<RecordType>()) { 673 // Structures with either a non-trivial destructor or a non-trivial 674 // copy constructor are always indirect. 675 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 676 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 677 678 // Structures with flexible arrays are always indirect. 679 if (RT->getDecl()->hasFlexibleArrayMember()) 680 return ABIArgInfo::getIndirect(0); 681 } 682 683 // If specified, structs and unions are always indirect. 684 if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType()) 685 return ABIArgInfo::getIndirect(0); 686 687 // Small structures which are register sized are generally returned 688 // in a register. 689 if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, getContext(), 690 callingConvention)) { 691 uint64_t Size = getContext().getTypeSize(RetTy); 692 693 // As a special-case, if the struct is a "single-element" struct, and 694 // the field is of type "float" or "double", return it in a 695 // floating-point register. (MSVC does not apply this special case.) 696 // We apply a similar transformation for pointer types to improve the 697 // quality of the generated IR. 698 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) 699 if ((!IsWin32FloatStructABI && SeltTy->isRealFloatingType()) 700 || SeltTy->hasPointerRepresentation()) 701 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 702 703 // FIXME: We should be able to narrow this integer in cases with dead 704 // padding. 705 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size)); 706 } 707 708 return ABIArgInfo::getIndirect(0); 709 } 710 711 // Treat an enum type as its underlying type. 712 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 713 RetTy = EnumTy->getDecl()->getIntegerType(); 714 715 return (RetTy->isPromotableIntegerType() ? 716 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 717 } 718 719 static bool isSSEVectorType(ASTContext &Context, QualType Ty) { 720 return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128; 721 } 722 723 static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) { 724 const RecordType *RT = Ty->getAs<RecordType>(); 725 if (!RT) 726 return 0; 727 const RecordDecl *RD = RT->getDecl(); 728 729 // If this is a C++ record, check the bases first. 730 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 731 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 732 e = CXXRD->bases_end(); i != e; ++i) 733 if (!isRecordWithSSEVectorType(Context, i->getType())) 734 return false; 735 736 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 737 i != e; ++i) { 738 QualType FT = i->getType(); 739 740 if (isSSEVectorType(Context, FT)) 741 return true; 742 743 if (isRecordWithSSEVectorType(Context, FT)) 744 return true; 745 } 746 747 return false; 748 } 749 750 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty, 751 unsigned Align) const { 752 // Otherwise, if the alignment is less than or equal to the minimum ABI 753 // alignment, just use the default; the backend will handle this. 754 if (Align <= MinABIStackAlignInBytes) 755 return 0; // Use default alignment. 756 757 // On non-Darwin, the stack type alignment is always 4. 758 if (!IsDarwinVectorABI) { 759 // Set explicit alignment, since we may need to realign the top. 760 return MinABIStackAlignInBytes; 761 } 762 763 // Otherwise, if the type contains an SSE vector type, the alignment is 16. 764 if (Align >= 16 && (isSSEVectorType(getContext(), Ty) || 765 isRecordWithSSEVectorType(getContext(), Ty))) 766 return 16; 767 768 return MinABIStackAlignInBytes; 769 } 770 771 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal, 772 unsigned &FreeRegs) const { 773 if (!ByVal) { 774 if (FreeRegs) { 775 --FreeRegs; // Non byval indirects just use one pointer. 776 return ABIArgInfo::getIndirectInReg(0, false); 777 } 778 return ABIArgInfo::getIndirect(0, false); 779 } 780 781 // Compute the byval alignment. 782 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; 783 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign); 784 if (StackAlign == 0) 785 return ABIArgInfo::getIndirect(4); 786 787 // If the stack alignment is less than the type alignment, realign the 788 // argument. 789 if (StackAlign < TypeAlign) 790 return ABIArgInfo::getIndirect(StackAlign, /*ByVal=*/true, 791 /*Realign=*/true); 792 793 return ABIArgInfo::getIndirect(StackAlign); 794 } 795 796 X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const { 797 const Type *T = isSingleElementStruct(Ty, getContext()); 798 if (!T) 799 T = Ty.getTypePtr(); 800 801 if (const BuiltinType *BT = T->getAs<BuiltinType>()) { 802 BuiltinType::Kind K = BT->getKind(); 803 if (K == BuiltinType::Float || K == BuiltinType::Double) 804 return Float; 805 } 806 return Integer; 807 } 808 809 bool X86_32ABIInfo::shouldUseInReg(QualType Ty, unsigned &FreeRegs, 810 bool IsFastCall, bool &NeedsPadding) const { 811 NeedsPadding = false; 812 Class C = classify(Ty); 813 if (C == Float) 814 return false; 815 816 unsigned Size = getContext().getTypeSize(Ty); 817 unsigned SizeInRegs = (Size + 31) / 32; 818 819 if (SizeInRegs == 0) 820 return false; 821 822 if (SizeInRegs > FreeRegs) { 823 FreeRegs = 0; 824 return false; 825 } 826 827 FreeRegs -= SizeInRegs; 828 829 if (IsFastCall) { 830 if (Size > 32) 831 return false; 832 833 if (Ty->isIntegralOrEnumerationType()) 834 return true; 835 836 if (Ty->isPointerType()) 837 return true; 838 839 if (Ty->isReferenceType()) 840 return true; 841 842 if (FreeRegs) 843 NeedsPadding = true; 844 845 return false; 846 } 847 848 return true; 849 } 850 851 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, 852 unsigned &FreeRegs, 853 bool IsFastCall) const { 854 // FIXME: Set alignment on indirect arguments. 855 if (isAggregateTypeForABI(Ty)) { 856 // Structures with flexible arrays are always indirect. 857 if (const RecordType *RT = Ty->getAs<RecordType>()) { 858 // Structures with either a non-trivial destructor or a non-trivial 859 // copy constructor are always indirect. 860 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 861 return getIndirectResult(Ty, false, FreeRegs); 862 863 if (RT->getDecl()->hasFlexibleArrayMember()) 864 return getIndirectResult(Ty, true, FreeRegs); 865 } 866 867 // Ignore empty structs/unions. 868 if (isEmptyRecord(getContext(), Ty, true)) 869 return ABIArgInfo::getIgnore(); 870 871 llvm::LLVMContext &LLVMContext = getVMContext(); 872 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext); 873 bool NeedsPadding; 874 if (shouldUseInReg(Ty, FreeRegs, IsFastCall, NeedsPadding)) { 875 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32; 876 SmallVector<llvm::Type*, 3> Elements; 877 for (unsigned I = 0; I < SizeInRegs; ++I) 878 Elements.push_back(Int32); 879 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements); 880 return ABIArgInfo::getDirectInReg(Result); 881 } 882 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : 0; 883 884 // Expand small (<= 128-bit) record types when we know that the stack layout 885 // of those arguments will match the struct. This is important because the 886 // LLVM backend isn't smart enough to remove byval, which inhibits many 887 // optimizations. 888 if (getContext().getTypeSize(Ty) <= 4*32 && 889 canExpandIndirectArgument(Ty, getContext())) 890 return ABIArgInfo::getExpandWithPadding(IsFastCall, PaddingType); 891 892 return getIndirectResult(Ty, true, FreeRegs); 893 } 894 895 if (const VectorType *VT = Ty->getAs<VectorType>()) { 896 // On Darwin, some vectors are passed in memory, we handle this by passing 897 // it as an i8/i16/i32/i64. 898 if (IsDarwinVectorABI) { 899 uint64_t Size = getContext().getTypeSize(Ty); 900 if ((Size == 8 || Size == 16 || Size == 32) || 901 (Size == 64 && VT->getNumElements() == 1)) 902 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 903 Size)); 904 } 905 906 llvm::Type *IRType = CGT.ConvertType(Ty); 907 if (UseX86_MMXType(IRType)) { 908 if (IsMMXDisabled) 909 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 910 64)); 911 ABIArgInfo AAI = ABIArgInfo::getDirect(IRType); 912 AAI.setCoerceToType(llvm::Type::getX86_MMXTy(getVMContext())); 913 return AAI; 914 } 915 916 return ABIArgInfo::getDirect(); 917 } 918 919 920 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 921 Ty = EnumTy->getDecl()->getIntegerType(); 922 923 bool NeedsPadding; 924 bool InReg = shouldUseInReg(Ty, FreeRegs, IsFastCall, NeedsPadding); 925 926 if (Ty->isPromotableIntegerType()) { 927 if (InReg) 928 return ABIArgInfo::getExtendInReg(); 929 return ABIArgInfo::getExtend(); 930 } 931 if (InReg) 932 return ABIArgInfo::getDirectInReg(); 933 return ABIArgInfo::getDirect(); 934 } 935 936 void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const { 937 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), 938 FI.getCallingConvention()); 939 940 unsigned CC = FI.getCallingConvention(); 941 bool IsFastCall = CC == llvm::CallingConv::X86_FastCall; 942 unsigned FreeRegs; 943 if (IsFastCall) 944 FreeRegs = 2; 945 else if (FI.getHasRegParm()) 946 FreeRegs = FI.getRegParm(); 947 else 948 FreeRegs = DefaultNumRegisterParameters; 949 950 // If the return value is indirect, then the hidden argument is consuming one 951 // integer register. 952 if (FI.getReturnInfo().isIndirect() && FreeRegs) { 953 --FreeRegs; 954 ABIArgInfo &Old = FI.getReturnInfo(); 955 Old = ABIArgInfo::getIndirectInReg(Old.getIndirectAlign(), 956 Old.getIndirectByVal(), 957 Old.getIndirectRealign()); 958 } 959 960 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 961 it != ie; ++it) 962 it->info = classifyArgumentType(it->type, FreeRegs, IsFastCall); 963 } 964 965 llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 966 CodeGenFunction &CGF) const { 967 llvm::Type *BPP = CGF.Int8PtrPtrTy; 968 969 CGBuilderTy &Builder = CGF.Builder; 970 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 971 "ap"); 972 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 973 974 // Compute if the address needs to be aligned 975 unsigned Align = CGF.getContext().getTypeAlignInChars(Ty).getQuantity(); 976 Align = getTypeStackAlignInBytes(Ty, Align); 977 Align = std::max(Align, 4U); 978 if (Align > 4) { 979 // addr = (addr + align - 1) & -align; 980 llvm::Value *Offset = 981 llvm::ConstantInt::get(CGF.Int32Ty, Align - 1); 982 Addr = CGF.Builder.CreateGEP(Addr, Offset); 983 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(Addr, 984 CGF.Int32Ty); 985 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -Align); 986 Addr = CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), 987 Addr->getType(), 988 "ap.cur.aligned"); 989 } 990 991 llvm::Type *PTy = 992 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 993 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 994 995 uint64_t Offset = 996 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, Align); 997 llvm::Value *NextAddr = 998 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 999 "ap.next"); 1000 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 1001 1002 return AddrTyped; 1003 } 1004 1005 void X86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D, 1006 llvm::GlobalValue *GV, 1007 CodeGen::CodeGenModule &CGM) const { 1008 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 1009 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { 1010 // Get the LLVM function. 1011 llvm::Function *Fn = cast<llvm::Function>(GV); 1012 1013 // Now add the 'alignstack' attribute with a value of 16. 1014 llvm::AttrBuilder B; 1015 B.addStackAlignmentAttr(16); 1016 Fn->addAttribute(llvm::AttrListPtr::FunctionIndex, 1017 llvm::Attributes::get(CGM.getLLVMContext(), B)); 1018 } 1019 } 1020 } 1021 1022 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable( 1023 CodeGen::CodeGenFunction &CGF, 1024 llvm::Value *Address) const { 1025 CodeGen::CGBuilderTy &Builder = CGF.Builder; 1026 1027 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 1028 1029 // 0-7 are the eight integer registers; the order is different 1030 // on Darwin (for EH), but the range is the same. 1031 // 8 is %eip. 1032 AssignToArrayRange(Builder, Address, Four8, 0, 8); 1033 1034 if (CGF.CGM.isTargetDarwin()) { 1035 // 12-16 are st(0..4). Not sure why we stop at 4. 1036 // These have size 16, which is sizeof(long double) on 1037 // platforms with 8-byte alignment for that type. 1038 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16); 1039 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16); 1040 1041 } else { 1042 // 9 is %eflags, which doesn't get a size on Darwin for some 1043 // reason. 1044 Builder.CreateStore(Four8, Builder.CreateConstInBoundsGEP1_32(Address, 9)); 1045 1046 // 11-16 are st(0..5). Not sure why we stop at 5. 1047 // These have size 12, which is sizeof(long double) on 1048 // platforms with 4-byte alignment for that type. 1049 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12); 1050 AssignToArrayRange(Builder, Address, Twelve8, 11, 16); 1051 } 1052 1053 return false; 1054 } 1055 1056 //===----------------------------------------------------------------------===// 1057 // X86-64 ABI Implementation 1058 //===----------------------------------------------------------------------===// 1059 1060 1061 namespace { 1062 /// X86_64ABIInfo - The X86_64 ABI information. 1063 class X86_64ABIInfo : public ABIInfo { 1064 enum Class { 1065 Integer = 0, 1066 SSE, 1067 SSEUp, 1068 X87, 1069 X87Up, 1070 ComplexX87, 1071 NoClass, 1072 Memory 1073 }; 1074 1075 /// merge - Implement the X86_64 ABI merging algorithm. 1076 /// 1077 /// Merge an accumulating classification \arg Accum with a field 1078 /// classification \arg Field. 1079 /// 1080 /// \param Accum - The accumulating classification. This should 1081 /// always be either NoClass or the result of a previous merge 1082 /// call. In addition, this should never be Memory (the caller 1083 /// should just return Memory for the aggregate). 1084 static Class merge(Class Accum, Class Field); 1085 1086 /// postMerge - Implement the X86_64 ABI post merging algorithm. 1087 /// 1088 /// Post merger cleanup, reduces a malformed Hi and Lo pair to 1089 /// final MEMORY or SSE classes when necessary. 1090 /// 1091 /// \param AggregateSize - The size of the current aggregate in 1092 /// the classification process. 1093 /// 1094 /// \param Lo - The classification for the parts of the type 1095 /// residing in the low word of the containing object. 1096 /// 1097 /// \param Hi - The classification for the parts of the type 1098 /// residing in the higher words of the containing object. 1099 /// 1100 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const; 1101 1102 /// classify - Determine the x86_64 register classes in which the 1103 /// given type T should be passed. 1104 /// 1105 /// \param Lo - The classification for the parts of the type 1106 /// residing in the low word of the containing object. 1107 /// 1108 /// \param Hi - The classification for the parts of the type 1109 /// residing in the high word of the containing object. 1110 /// 1111 /// \param OffsetBase - The bit offset of this type in the 1112 /// containing object. Some parameters are classified different 1113 /// depending on whether they straddle an eightbyte boundary. 1114 /// 1115 /// If a word is unused its result will be NoClass; if a type should 1116 /// be passed in Memory then at least the classification of \arg Lo 1117 /// will be Memory. 1118 /// 1119 /// The \arg Lo class will be NoClass iff the argument is ignored. 1120 /// 1121 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will 1122 /// also be ComplexX87. 1123 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi) const; 1124 1125 llvm::Type *GetByteVectorType(QualType Ty) const; 1126 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType, 1127 unsigned IROffset, QualType SourceTy, 1128 unsigned SourceOffset) const; 1129 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType, 1130 unsigned IROffset, QualType SourceTy, 1131 unsigned SourceOffset) const; 1132 1133 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 1134 /// such that the argument will be returned in memory. 1135 ABIArgInfo getIndirectReturnResult(QualType Ty) const; 1136 1137 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 1138 /// such that the argument will be passed in memory. 1139 /// 1140 /// \param freeIntRegs - The number of free integer registers remaining 1141 /// available. 1142 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const; 1143 1144 ABIArgInfo classifyReturnType(QualType RetTy) const; 1145 1146 ABIArgInfo classifyArgumentType(QualType Ty, 1147 unsigned freeIntRegs, 1148 unsigned &neededInt, 1149 unsigned &neededSSE) const; 1150 1151 bool IsIllegalVectorType(QualType Ty) const; 1152 1153 /// The 0.98 ABI revision clarified a lot of ambiguities, 1154 /// unfortunately in ways that were not always consistent with 1155 /// certain previous compilers. In particular, platforms which 1156 /// required strict binary compatibility with older versions of GCC 1157 /// may need to exempt themselves. 1158 bool honorsRevision0_98() const { 1159 return !getContext().getTargetInfo().getTriple().isOSDarwin(); 1160 } 1161 1162 bool HasAVX; 1163 // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on 1164 // 64-bit hardware. 1165 bool Has64BitPointers; 1166 1167 public: 1168 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool hasavx) : 1169 ABIInfo(CGT), HasAVX(hasavx), 1170 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) { 1171 } 1172 1173 bool isPassedUsingAVXType(QualType type) const { 1174 unsigned neededInt, neededSSE; 1175 // The freeIntRegs argument doesn't matter here. 1176 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE); 1177 if (info.isDirect()) { 1178 llvm::Type *ty = info.getCoerceToType(); 1179 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty)) 1180 return (vectorTy->getBitWidth() > 128); 1181 } 1182 return false; 1183 } 1184 1185 virtual void computeInfo(CGFunctionInfo &FI) const; 1186 1187 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1188 CodeGenFunction &CGF) const; 1189 }; 1190 1191 /// WinX86_64ABIInfo - The Windows X86_64 ABI information. 1192 class WinX86_64ABIInfo : public ABIInfo { 1193 1194 ABIArgInfo classify(QualType Ty) const; 1195 1196 public: 1197 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 1198 1199 virtual void computeInfo(CGFunctionInfo &FI) const; 1200 1201 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1202 CodeGenFunction &CGF) const; 1203 }; 1204 1205 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo { 1206 public: 1207 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) 1208 : TargetCodeGenInfo(new X86_64ABIInfo(CGT, HasAVX)) {} 1209 1210 const X86_64ABIInfo &getABIInfo() const { 1211 return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo()); 1212 } 1213 1214 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 1215 return 7; 1216 } 1217 1218 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 1219 llvm::Value *Address) const { 1220 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 1221 1222 // 0-15 are the 16 integer registers. 1223 // 16 is %rip. 1224 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 1225 return false; 1226 } 1227 1228 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 1229 StringRef Constraint, 1230 llvm::Type* Ty) const { 1231 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 1232 } 1233 1234 bool isNoProtoCallVariadic(const CallArgList &args, 1235 const FunctionNoProtoType *fnType) const { 1236 // The default CC on x86-64 sets %al to the number of SSA 1237 // registers used, and GCC sets this when calling an unprototyped 1238 // function, so we override the default behavior. However, don't do 1239 // that when AVX types are involved: the ABI explicitly states it is 1240 // undefined, and it doesn't work in practice because of how the ABI 1241 // defines varargs anyway. 1242 if (fnType->getCallConv() == CC_Default || fnType->getCallConv() == CC_C) { 1243 bool HasAVXType = false; 1244 for (CallArgList::const_iterator 1245 it = args.begin(), ie = args.end(); it != ie; ++it) { 1246 if (getABIInfo().isPassedUsingAVXType(it->Ty)) { 1247 HasAVXType = true; 1248 break; 1249 } 1250 } 1251 1252 if (!HasAVXType) 1253 return true; 1254 } 1255 1256 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType); 1257 } 1258 1259 }; 1260 1261 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo { 1262 public: 1263 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 1264 : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {} 1265 1266 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 1267 return 7; 1268 } 1269 1270 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 1271 llvm::Value *Address) const { 1272 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 1273 1274 // 0-15 are the 16 integer registers. 1275 // 16 is %rip. 1276 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 1277 return false; 1278 } 1279 }; 1280 1281 } 1282 1283 void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo, 1284 Class &Hi) const { 1285 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done: 1286 // 1287 // (a) If one of the classes is Memory, the whole argument is passed in 1288 // memory. 1289 // 1290 // (b) If X87UP is not preceded by X87, the whole argument is passed in 1291 // memory. 1292 // 1293 // (c) If the size of the aggregate exceeds two eightbytes and the first 1294 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole 1295 // argument is passed in memory. NOTE: This is necessary to keep the 1296 // ABI working for processors that don't support the __m256 type. 1297 // 1298 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE. 1299 // 1300 // Some of these are enforced by the merging logic. Others can arise 1301 // only with unions; for example: 1302 // union { _Complex double; unsigned; } 1303 // 1304 // Note that clauses (b) and (c) were added in 0.98. 1305 // 1306 if (Hi == Memory) 1307 Lo = Memory; 1308 if (Hi == X87Up && Lo != X87 && honorsRevision0_98()) 1309 Lo = Memory; 1310 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp)) 1311 Lo = Memory; 1312 if (Hi == SSEUp && Lo != SSE) 1313 Hi = SSE; 1314 } 1315 1316 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) { 1317 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is 1318 // classified recursively so that always two fields are 1319 // considered. The resulting class is calculated according to 1320 // the classes of the fields in the eightbyte: 1321 // 1322 // (a) If both classes are equal, this is the resulting class. 1323 // 1324 // (b) If one of the classes is NO_CLASS, the resulting class is 1325 // the other class. 1326 // 1327 // (c) If one of the classes is MEMORY, the result is the MEMORY 1328 // class. 1329 // 1330 // (d) If one of the classes is INTEGER, the result is the 1331 // INTEGER. 1332 // 1333 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class, 1334 // MEMORY is used as class. 1335 // 1336 // (f) Otherwise class SSE is used. 1337 1338 // Accum should never be memory (we should have returned) or 1339 // ComplexX87 (because this cannot be passed in a structure). 1340 assert((Accum != Memory && Accum != ComplexX87) && 1341 "Invalid accumulated classification during merge."); 1342 if (Accum == Field || Field == NoClass) 1343 return Accum; 1344 if (Field == Memory) 1345 return Memory; 1346 if (Accum == NoClass) 1347 return Field; 1348 if (Accum == Integer || Field == Integer) 1349 return Integer; 1350 if (Field == X87 || Field == X87Up || Field == ComplexX87 || 1351 Accum == X87 || Accum == X87Up) 1352 return Memory; 1353 return SSE; 1354 } 1355 1356 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, 1357 Class &Lo, Class &Hi) const { 1358 // FIXME: This code can be simplified by introducing a simple value class for 1359 // Class pairs with appropriate constructor methods for the various 1360 // situations. 1361 1362 // FIXME: Some of the split computations are wrong; unaligned vectors 1363 // shouldn't be passed in registers for example, so there is no chance they 1364 // can straddle an eightbyte. Verify & simplify. 1365 1366 Lo = Hi = NoClass; 1367 1368 Class &Current = OffsetBase < 64 ? Lo : Hi; 1369 Current = Memory; 1370 1371 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 1372 BuiltinType::Kind k = BT->getKind(); 1373 1374 if (k == BuiltinType::Void) { 1375 Current = NoClass; 1376 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) { 1377 Lo = Integer; 1378 Hi = Integer; 1379 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) { 1380 Current = Integer; 1381 } else if ((k == BuiltinType::Float || k == BuiltinType::Double) || 1382 (k == BuiltinType::LongDouble && 1383 getContext().getTargetInfo().getTriple().getOS() == 1384 llvm::Triple::NativeClient)) { 1385 Current = SSE; 1386 } else if (k == BuiltinType::LongDouble) { 1387 Lo = X87; 1388 Hi = X87Up; 1389 } 1390 // FIXME: _Decimal32 and _Decimal64 are SSE. 1391 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). 1392 return; 1393 } 1394 1395 if (const EnumType *ET = Ty->getAs<EnumType>()) { 1396 // Classify the underlying integer type. 1397 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi); 1398 return; 1399 } 1400 1401 if (Ty->hasPointerRepresentation()) { 1402 Current = Integer; 1403 return; 1404 } 1405 1406 if (Ty->isMemberPointerType()) { 1407 if (Ty->isMemberFunctionPointerType() && Has64BitPointers) 1408 Lo = Hi = Integer; 1409 else 1410 Current = Integer; 1411 return; 1412 } 1413 1414 if (const VectorType *VT = Ty->getAs<VectorType>()) { 1415 uint64_t Size = getContext().getTypeSize(VT); 1416 if (Size == 32) { 1417 // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x 1418 // float> as integer. 1419 Current = Integer; 1420 1421 // If this type crosses an eightbyte boundary, it should be 1422 // split. 1423 uint64_t EB_Real = (OffsetBase) / 64; 1424 uint64_t EB_Imag = (OffsetBase + Size - 1) / 64; 1425 if (EB_Real != EB_Imag) 1426 Hi = Lo; 1427 } else if (Size == 64) { 1428 // gcc passes <1 x double> in memory. :( 1429 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) 1430 return; 1431 1432 // gcc passes <1 x long long> as INTEGER. 1433 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong) || 1434 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULongLong) || 1435 VT->getElementType()->isSpecificBuiltinType(BuiltinType::Long) || 1436 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULong)) 1437 Current = Integer; 1438 else 1439 Current = SSE; 1440 1441 // If this type crosses an eightbyte boundary, it should be 1442 // split. 1443 if (OffsetBase && OffsetBase != 64) 1444 Hi = Lo; 1445 } else if (Size == 128 || (HasAVX && Size == 256)) { 1446 // Arguments of 256-bits are split into four eightbyte chunks. The 1447 // least significant one belongs to class SSE and all the others to class 1448 // SSEUP. The original Lo and Hi design considers that types can't be 1449 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense. 1450 // This design isn't correct for 256-bits, but since there're no cases 1451 // where the upper parts would need to be inspected, avoid adding 1452 // complexity and just consider Hi to match the 64-256 part. 1453 Lo = SSE; 1454 Hi = SSEUp; 1455 } 1456 return; 1457 } 1458 1459 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 1460 QualType ET = getContext().getCanonicalType(CT->getElementType()); 1461 1462 uint64_t Size = getContext().getTypeSize(Ty); 1463 if (ET->isIntegralOrEnumerationType()) { 1464 if (Size <= 64) 1465 Current = Integer; 1466 else if (Size <= 128) 1467 Lo = Hi = Integer; 1468 } else if (ET == getContext().FloatTy) 1469 Current = SSE; 1470 else if (ET == getContext().DoubleTy || 1471 (ET == getContext().LongDoubleTy && 1472 getContext().getTargetInfo().getTriple().getOS() == 1473 llvm::Triple::NativeClient)) 1474 Lo = Hi = SSE; 1475 else if (ET == getContext().LongDoubleTy) 1476 Current = ComplexX87; 1477 1478 // If this complex type crosses an eightbyte boundary then it 1479 // should be split. 1480 uint64_t EB_Real = (OffsetBase) / 64; 1481 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64; 1482 if (Hi == NoClass && EB_Real != EB_Imag) 1483 Hi = Lo; 1484 1485 return; 1486 } 1487 1488 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 1489 // Arrays are treated like structures. 1490 1491 uint64_t Size = getContext().getTypeSize(Ty); 1492 1493 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 1494 // than four eightbytes, ..., it has class MEMORY. 1495 if (Size > 256) 1496 return; 1497 1498 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned 1499 // fields, it has class MEMORY. 1500 // 1501 // Only need to check alignment of array base. 1502 if (OffsetBase % getContext().getTypeAlign(AT->getElementType())) 1503 return; 1504 1505 // Otherwise implement simplified merge. We could be smarter about 1506 // this, but it isn't worth it and would be harder to verify. 1507 Current = NoClass; 1508 uint64_t EltSize = getContext().getTypeSize(AT->getElementType()); 1509 uint64_t ArraySize = AT->getSize().getZExtValue(); 1510 1511 // The only case a 256-bit wide vector could be used is when the array 1512 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 1513 // to work for sizes wider than 128, early check and fallback to memory. 1514 if (Size > 128 && EltSize != 256) 1515 return; 1516 1517 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) { 1518 Class FieldLo, FieldHi; 1519 classify(AT->getElementType(), Offset, FieldLo, FieldHi); 1520 Lo = merge(Lo, FieldLo); 1521 Hi = merge(Hi, FieldHi); 1522 if (Lo == Memory || Hi == Memory) 1523 break; 1524 } 1525 1526 postMerge(Size, Lo, Hi); 1527 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification."); 1528 return; 1529 } 1530 1531 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1532 uint64_t Size = getContext().getTypeSize(Ty); 1533 1534 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 1535 // than four eightbytes, ..., it has class MEMORY. 1536 if (Size > 256) 1537 return; 1538 1539 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial 1540 // copy constructor or a non-trivial destructor, it is passed by invisible 1541 // reference. 1542 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 1543 return; 1544 1545 const RecordDecl *RD = RT->getDecl(); 1546 1547 // Assume variable sized types are passed in memory. 1548 if (RD->hasFlexibleArrayMember()) 1549 return; 1550 1551 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 1552 1553 // Reset Lo class, this will be recomputed. 1554 Current = NoClass; 1555 1556 // If this is a C++ record, classify the bases first. 1557 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 1558 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 1559 e = CXXRD->bases_end(); i != e; ++i) { 1560 assert(!i->isVirtual() && !i->getType()->isDependentType() && 1561 "Unexpected base class!"); 1562 const CXXRecordDecl *Base = 1563 cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); 1564 1565 // Classify this field. 1566 // 1567 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a 1568 // single eightbyte, each is classified separately. Each eightbyte gets 1569 // initialized to class NO_CLASS. 1570 Class FieldLo, FieldHi; 1571 uint64_t Offset = 1572 OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base)); 1573 classify(i->getType(), Offset, FieldLo, FieldHi); 1574 Lo = merge(Lo, FieldLo); 1575 Hi = merge(Hi, FieldHi); 1576 if (Lo == Memory || Hi == Memory) 1577 break; 1578 } 1579 } 1580 1581 // Classify the fields one at a time, merging the results. 1582 unsigned idx = 0; 1583 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1584 i != e; ++i, ++idx) { 1585 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 1586 bool BitField = i->isBitField(); 1587 1588 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than 1589 // four eightbytes, or it contains unaligned fields, it has class MEMORY. 1590 // 1591 // The only case a 256-bit wide vector could be used is when the struct 1592 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 1593 // to work for sizes wider than 128, early check and fallback to memory. 1594 // 1595 if (Size > 128 && getContext().getTypeSize(i->getType()) != 256) { 1596 Lo = Memory; 1597 return; 1598 } 1599 // Note, skip this test for bit-fields, see below. 1600 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) { 1601 Lo = Memory; 1602 return; 1603 } 1604 1605 // Classify this field. 1606 // 1607 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate 1608 // exceeds a single eightbyte, each is classified 1609 // separately. Each eightbyte gets initialized to class 1610 // NO_CLASS. 1611 Class FieldLo, FieldHi; 1612 1613 // Bit-fields require special handling, they do not force the 1614 // structure to be passed in memory even if unaligned, and 1615 // therefore they can straddle an eightbyte. 1616 if (BitField) { 1617 // Ignore padding bit-fields. 1618 if (i->isUnnamedBitfield()) 1619 continue; 1620 1621 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 1622 uint64_t Size = i->getBitWidthValue(getContext()); 1623 1624 uint64_t EB_Lo = Offset / 64; 1625 uint64_t EB_Hi = (Offset + Size - 1) / 64; 1626 FieldLo = FieldHi = NoClass; 1627 if (EB_Lo) { 1628 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes."); 1629 FieldLo = NoClass; 1630 FieldHi = Integer; 1631 } else { 1632 FieldLo = Integer; 1633 FieldHi = EB_Hi ? Integer : NoClass; 1634 } 1635 } else 1636 classify(i->getType(), Offset, FieldLo, FieldHi); 1637 Lo = merge(Lo, FieldLo); 1638 Hi = merge(Hi, FieldHi); 1639 if (Lo == Memory || Hi == Memory) 1640 break; 1641 } 1642 1643 postMerge(Size, Lo, Hi); 1644 } 1645 } 1646 1647 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const { 1648 // If this is a scalar LLVM value then assume LLVM will pass it in the right 1649 // place naturally. 1650 if (!isAggregateTypeForABI(Ty)) { 1651 // Treat an enum type as its underlying type. 1652 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1653 Ty = EnumTy->getDecl()->getIntegerType(); 1654 1655 return (Ty->isPromotableIntegerType() ? 1656 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 1657 } 1658 1659 return ABIArgInfo::getIndirect(0); 1660 } 1661 1662 bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const { 1663 if (const VectorType *VecTy = Ty->getAs<VectorType>()) { 1664 uint64_t Size = getContext().getTypeSize(VecTy); 1665 unsigned LargestVector = HasAVX ? 256 : 128; 1666 if (Size <= 64 || Size > LargestVector) 1667 return true; 1668 } 1669 1670 return false; 1671 } 1672 1673 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty, 1674 unsigned freeIntRegs) const { 1675 // If this is a scalar LLVM value then assume LLVM will pass it in the right 1676 // place naturally. 1677 // 1678 // This assumption is optimistic, as there could be free registers available 1679 // when we need to pass this argument in memory, and LLVM could try to pass 1680 // the argument in the free register. This does not seem to happen currently, 1681 // but this code would be much safer if we could mark the argument with 1682 // 'onstack'. See PR12193. 1683 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) { 1684 // Treat an enum type as its underlying type. 1685 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1686 Ty = EnumTy->getDecl()->getIntegerType(); 1687 1688 return (Ty->isPromotableIntegerType() ? 1689 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 1690 } 1691 1692 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 1693 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 1694 1695 // Compute the byval alignment. We specify the alignment of the byval in all 1696 // cases so that the mid-level optimizer knows the alignment of the byval. 1697 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U); 1698 1699 // Attempt to avoid passing indirect results using byval when possible. This 1700 // is important for good codegen. 1701 // 1702 // We do this by coercing the value into a scalar type which the backend can 1703 // handle naturally (i.e., without using byval). 1704 // 1705 // For simplicity, we currently only do this when we have exhausted all of the 1706 // free integer registers. Doing this when there are free integer registers 1707 // would require more care, as we would have to ensure that the coerced value 1708 // did not claim the unused register. That would require either reording the 1709 // arguments to the function (so that any subsequent inreg values came first), 1710 // or only doing this optimization when there were no following arguments that 1711 // might be inreg. 1712 // 1713 // We currently expect it to be rare (particularly in well written code) for 1714 // arguments to be passed on the stack when there are still free integer 1715 // registers available (this would typically imply large structs being passed 1716 // by value), so this seems like a fair tradeoff for now. 1717 // 1718 // We can revisit this if the backend grows support for 'onstack' parameter 1719 // attributes. See PR12193. 1720 if (freeIntRegs == 0) { 1721 uint64_t Size = getContext().getTypeSize(Ty); 1722 1723 // If this type fits in an eightbyte, coerce it into the matching integral 1724 // type, which will end up on the stack (with alignment 8). 1725 if (Align == 8 && Size <= 64) 1726 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 1727 Size)); 1728 } 1729 1730 return ABIArgInfo::getIndirect(Align); 1731 } 1732 1733 /// GetByteVectorType - The ABI specifies that a value should be passed in an 1734 /// full vector XMM/YMM register. Pick an LLVM IR type that will be passed as a 1735 /// vector register. 1736 llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const { 1737 llvm::Type *IRType = CGT.ConvertType(Ty); 1738 1739 // Wrapper structs that just contain vectors are passed just like vectors, 1740 // strip them off if present. 1741 llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType); 1742 while (STy && STy->getNumElements() == 1) { 1743 IRType = STy->getElementType(0); 1744 STy = dyn_cast<llvm::StructType>(IRType); 1745 } 1746 1747 // If the preferred type is a 16-byte vector, prefer to pass it. 1748 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){ 1749 llvm::Type *EltTy = VT->getElementType(); 1750 unsigned BitWidth = VT->getBitWidth(); 1751 if ((BitWidth >= 128 && BitWidth <= 256) && 1752 (EltTy->isFloatTy() || EltTy->isDoubleTy() || 1753 EltTy->isIntegerTy(8) || EltTy->isIntegerTy(16) || 1754 EltTy->isIntegerTy(32) || EltTy->isIntegerTy(64) || 1755 EltTy->isIntegerTy(128))) 1756 return VT; 1757 } 1758 1759 return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), 2); 1760 } 1761 1762 /// BitsContainNoUserData - Return true if the specified [start,end) bit range 1763 /// is known to either be off the end of the specified type or being in 1764 /// alignment padding. The user type specified is known to be at most 128 bits 1765 /// in size, and have passed through X86_64ABIInfo::classify with a successful 1766 /// classification that put one of the two halves in the INTEGER class. 1767 /// 1768 /// It is conservatively correct to return false. 1769 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, 1770 unsigned EndBit, ASTContext &Context) { 1771 // If the bytes being queried are off the end of the type, there is no user 1772 // data hiding here. This handles analysis of builtins, vectors and other 1773 // types that don't contain interesting padding. 1774 unsigned TySize = (unsigned)Context.getTypeSize(Ty); 1775 if (TySize <= StartBit) 1776 return true; 1777 1778 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 1779 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType()); 1780 unsigned NumElts = (unsigned)AT->getSize().getZExtValue(); 1781 1782 // Check each element to see if the element overlaps with the queried range. 1783 for (unsigned i = 0; i != NumElts; ++i) { 1784 // If the element is after the span we care about, then we're done.. 1785 unsigned EltOffset = i*EltSize; 1786 if (EltOffset >= EndBit) break; 1787 1788 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0; 1789 if (!BitsContainNoUserData(AT->getElementType(), EltStart, 1790 EndBit-EltOffset, Context)) 1791 return false; 1792 } 1793 // If it overlaps no elements, then it is safe to process as padding. 1794 return true; 1795 } 1796 1797 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1798 const RecordDecl *RD = RT->getDecl(); 1799 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 1800 1801 // If this is a C++ record, check the bases first. 1802 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 1803 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 1804 e = CXXRD->bases_end(); i != e; ++i) { 1805 assert(!i->isVirtual() && !i->getType()->isDependentType() && 1806 "Unexpected base class!"); 1807 const CXXRecordDecl *Base = 1808 cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); 1809 1810 // If the base is after the span we care about, ignore it. 1811 unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base)); 1812 if (BaseOffset >= EndBit) continue; 1813 1814 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0; 1815 if (!BitsContainNoUserData(i->getType(), BaseStart, 1816 EndBit-BaseOffset, Context)) 1817 return false; 1818 } 1819 } 1820 1821 // Verify that no field has data that overlaps the region of interest. Yes 1822 // this could be sped up a lot by being smarter about queried fields, 1823 // however we're only looking at structs up to 16 bytes, so we don't care 1824 // much. 1825 unsigned idx = 0; 1826 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1827 i != e; ++i, ++idx) { 1828 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx); 1829 1830 // If we found a field after the region we care about, then we're done. 1831 if (FieldOffset >= EndBit) break; 1832 1833 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0; 1834 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset, 1835 Context)) 1836 return false; 1837 } 1838 1839 // If nothing in this record overlapped the area of interest, then we're 1840 // clean. 1841 return true; 1842 } 1843 1844 return false; 1845 } 1846 1847 /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a 1848 /// float member at the specified offset. For example, {int,{float}} has a 1849 /// float at offset 4. It is conservatively correct for this routine to return 1850 /// false. 1851 static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset, 1852 const llvm::DataLayout &TD) { 1853 // Base case if we find a float. 1854 if (IROffset == 0 && IRType->isFloatTy()) 1855 return true; 1856 1857 // If this is a struct, recurse into the field at the specified offset. 1858 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 1859 const llvm::StructLayout *SL = TD.getStructLayout(STy); 1860 unsigned Elt = SL->getElementContainingOffset(IROffset); 1861 IROffset -= SL->getElementOffset(Elt); 1862 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD); 1863 } 1864 1865 // If this is an array, recurse into the field at the specified offset. 1866 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 1867 llvm::Type *EltTy = ATy->getElementType(); 1868 unsigned EltSize = TD.getTypeAllocSize(EltTy); 1869 IROffset -= IROffset/EltSize*EltSize; 1870 return ContainsFloatAtOffset(EltTy, IROffset, TD); 1871 } 1872 1873 return false; 1874 } 1875 1876 1877 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the 1878 /// low 8 bytes of an XMM register, corresponding to the SSE class. 1879 llvm::Type *X86_64ABIInfo:: 1880 GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset, 1881 QualType SourceTy, unsigned SourceOffset) const { 1882 // The only three choices we have are either double, <2 x float>, or float. We 1883 // pass as float if the last 4 bytes is just padding. This happens for 1884 // structs that contain 3 floats. 1885 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32, 1886 SourceOffset*8+64, getContext())) 1887 return llvm::Type::getFloatTy(getVMContext()); 1888 1889 // We want to pass as <2 x float> if the LLVM IR type contains a float at 1890 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the 1891 // case. 1892 if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) && 1893 ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout())) 1894 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2); 1895 1896 return llvm::Type::getDoubleTy(getVMContext()); 1897 } 1898 1899 1900 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in 1901 /// an 8-byte GPR. This means that we either have a scalar or we are talking 1902 /// about the high or low part of an up-to-16-byte struct. This routine picks 1903 /// the best LLVM IR type to represent this, which may be i64 or may be anything 1904 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*, 1905 /// etc). 1906 /// 1907 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for 1908 /// the source type. IROffset is an offset in bytes into the LLVM IR type that 1909 /// the 8-byte value references. PrefType may be null. 1910 /// 1911 /// SourceTy is the source level type for the entire argument. SourceOffset is 1912 /// an offset into this that we're processing (which is always either 0 or 8). 1913 /// 1914 llvm::Type *X86_64ABIInfo:: 1915 GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset, 1916 QualType SourceTy, unsigned SourceOffset) const { 1917 // If we're dealing with an un-offset LLVM IR type, then it means that we're 1918 // returning an 8-byte unit starting with it. See if we can safely use it. 1919 if (IROffset == 0) { 1920 // Pointers and int64's always fill the 8-byte unit. 1921 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) || 1922 IRType->isIntegerTy(64)) 1923 return IRType; 1924 1925 // If we have a 1/2/4-byte integer, we can use it only if the rest of the 1926 // goodness in the source type is just tail padding. This is allowed to 1927 // kick in for struct {double,int} on the int, but not on 1928 // struct{double,int,int} because we wouldn't return the second int. We 1929 // have to do this analysis on the source type because we can't depend on 1930 // unions being lowered a specific way etc. 1931 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) || 1932 IRType->isIntegerTy(32) || 1933 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) { 1934 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 : 1935 cast<llvm::IntegerType>(IRType)->getBitWidth(); 1936 1937 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth, 1938 SourceOffset*8+64, getContext())) 1939 return IRType; 1940 } 1941 } 1942 1943 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 1944 // If this is a struct, recurse into the field at the specified offset. 1945 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy); 1946 if (IROffset < SL->getSizeInBytes()) { 1947 unsigned FieldIdx = SL->getElementContainingOffset(IROffset); 1948 IROffset -= SL->getElementOffset(FieldIdx); 1949 1950 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset, 1951 SourceTy, SourceOffset); 1952 } 1953 } 1954 1955 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 1956 llvm::Type *EltTy = ATy->getElementType(); 1957 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy); 1958 unsigned EltOffset = IROffset/EltSize*EltSize; 1959 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy, 1960 SourceOffset); 1961 } 1962 1963 // Okay, we don't have any better idea of what to pass, so we pass this in an 1964 // integer register that isn't too big to fit the rest of the struct. 1965 unsigned TySizeInBytes = 1966 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity(); 1967 1968 assert(TySizeInBytes != SourceOffset && "Empty field?"); 1969 1970 // It is always safe to classify this as an integer type up to i64 that 1971 // isn't larger than the structure. 1972 return llvm::IntegerType::get(getVMContext(), 1973 std::min(TySizeInBytes-SourceOffset, 8U)*8); 1974 } 1975 1976 1977 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally 1978 /// be used as elements of a two register pair to pass or return, return a 1979 /// first class aggregate to represent them. For example, if the low part of 1980 /// a by-value argument should be passed as i32* and the high part as float, 1981 /// return {i32*, float}. 1982 static llvm::Type * 1983 GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, 1984 const llvm::DataLayout &TD) { 1985 // In order to correctly satisfy the ABI, we need to the high part to start 1986 // at offset 8. If the high and low parts we inferred are both 4-byte types 1987 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have 1988 // the second element at offset 8. Check for this: 1989 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo); 1990 unsigned HiAlign = TD.getABITypeAlignment(Hi); 1991 unsigned HiStart = llvm::DataLayout::RoundUpAlignment(LoSize, HiAlign); 1992 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!"); 1993 1994 // To handle this, we have to increase the size of the low part so that the 1995 // second element will start at an 8 byte offset. We can't increase the size 1996 // of the second element because it might make us access off the end of the 1997 // struct. 1998 if (HiStart != 8) { 1999 // There are only two sorts of types the ABI generation code can produce for 2000 // the low part of a pair that aren't 8 bytes in size: float or i8/i16/i32. 2001 // Promote these to a larger type. 2002 if (Lo->isFloatTy()) 2003 Lo = llvm::Type::getDoubleTy(Lo->getContext()); 2004 else { 2005 assert(Lo->isIntegerTy() && "Invalid/unknown lo type"); 2006 Lo = llvm::Type::getInt64Ty(Lo->getContext()); 2007 } 2008 } 2009 2010 llvm::StructType *Result = llvm::StructType::get(Lo, Hi, NULL); 2011 2012 2013 // Verify that the second element is at an 8-byte offset. 2014 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 && 2015 "Invalid x86-64 argument pair!"); 2016 return Result; 2017 } 2018 2019 ABIArgInfo X86_64ABIInfo:: 2020 classifyReturnType(QualType RetTy) const { 2021 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the 2022 // classification algorithm. 2023 X86_64ABIInfo::Class Lo, Hi; 2024 classify(RetTy, 0, Lo, Hi); 2025 2026 // Check some invariants. 2027 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 2028 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 2029 2030 llvm::Type *ResType = 0; 2031 switch (Lo) { 2032 case NoClass: 2033 if (Hi == NoClass) 2034 return ABIArgInfo::getIgnore(); 2035 // If the low part is just padding, it takes no register, leave ResType 2036 // null. 2037 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 2038 "Unknown missing lo part"); 2039 break; 2040 2041 case SSEUp: 2042 case X87Up: 2043 llvm_unreachable("Invalid classification for lo word."); 2044 2045 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via 2046 // hidden argument. 2047 case Memory: 2048 return getIndirectReturnResult(RetTy); 2049 2050 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next 2051 // available register of the sequence %rax, %rdx is used. 2052 case Integer: 2053 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 2054 2055 // If we have a sign or zero extended integer, make sure to return Extend 2056 // so that the parameter gets the right LLVM IR attributes. 2057 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 2058 // Treat an enum type as its underlying type. 2059 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 2060 RetTy = EnumTy->getDecl()->getIntegerType(); 2061 2062 if (RetTy->isIntegralOrEnumerationType() && 2063 RetTy->isPromotableIntegerType()) 2064 return ABIArgInfo::getExtend(); 2065 } 2066 break; 2067 2068 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next 2069 // available SSE register of the sequence %xmm0, %xmm1 is used. 2070 case SSE: 2071 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 2072 break; 2073 2074 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is 2075 // returned on the X87 stack in %st0 as 80-bit x87 number. 2076 case X87: 2077 ResType = llvm::Type::getX86_FP80Ty(getVMContext()); 2078 break; 2079 2080 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real 2081 // part of the value is returned in %st0 and the imaginary part in 2082 // %st1. 2083 case ComplexX87: 2084 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification."); 2085 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()), 2086 llvm::Type::getX86_FP80Ty(getVMContext()), 2087 NULL); 2088 break; 2089 } 2090 2091 llvm::Type *HighPart = 0; 2092 switch (Hi) { 2093 // Memory was handled previously and X87 should 2094 // never occur as a hi class. 2095 case Memory: 2096 case X87: 2097 llvm_unreachable("Invalid classification for hi word."); 2098 2099 case ComplexX87: // Previously handled. 2100 case NoClass: 2101 break; 2102 2103 case Integer: 2104 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 2105 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 2106 return ABIArgInfo::getDirect(HighPart, 8); 2107 break; 2108 case SSE: 2109 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 2110 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 2111 return ABIArgInfo::getDirect(HighPart, 8); 2112 break; 2113 2114 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte 2115 // is passed in the next available eightbyte chunk if the last used 2116 // vector register. 2117 // 2118 // SSEUP should always be preceded by SSE, just widen. 2119 case SSEUp: 2120 assert(Lo == SSE && "Unexpected SSEUp classification."); 2121 ResType = GetByteVectorType(RetTy); 2122 break; 2123 2124 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is 2125 // returned together with the previous X87 value in %st0. 2126 case X87Up: 2127 // If X87Up is preceded by X87, we don't need to do 2128 // anything. However, in some cases with unions it may not be 2129 // preceded by X87. In such situations we follow gcc and pass the 2130 // extra bits in an SSE reg. 2131 if (Lo != X87) { 2132 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 2133 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 2134 return ABIArgInfo::getDirect(HighPart, 8); 2135 } 2136 break; 2137 } 2138 2139 // If a high part was specified, merge it together with the low part. It is 2140 // known to pass in the high eightbyte of the result. We do this by forming a 2141 // first class struct aggregate with the high and low part: {low, high} 2142 if (HighPart) 2143 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); 2144 2145 return ABIArgInfo::getDirect(ResType); 2146 } 2147 2148 ABIArgInfo X86_64ABIInfo::classifyArgumentType( 2149 QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE) 2150 const 2151 { 2152 X86_64ABIInfo::Class Lo, Hi; 2153 classify(Ty, 0, Lo, Hi); 2154 2155 // Check some invariants. 2156 // FIXME: Enforce these by construction. 2157 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 2158 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 2159 2160 neededInt = 0; 2161 neededSSE = 0; 2162 llvm::Type *ResType = 0; 2163 switch (Lo) { 2164 case NoClass: 2165 if (Hi == NoClass) 2166 return ABIArgInfo::getIgnore(); 2167 // If the low part is just padding, it takes no register, leave ResType 2168 // null. 2169 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 2170 "Unknown missing lo part"); 2171 break; 2172 2173 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument 2174 // on the stack. 2175 case Memory: 2176 2177 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or 2178 // COMPLEX_X87, it is passed in memory. 2179 case X87: 2180 case ComplexX87: 2181 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 2182 ++neededInt; 2183 return getIndirectResult(Ty, freeIntRegs); 2184 2185 case SSEUp: 2186 case X87Up: 2187 llvm_unreachable("Invalid classification for lo word."); 2188 2189 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next 2190 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8 2191 // and %r9 is used. 2192 case Integer: 2193 ++neededInt; 2194 2195 // Pick an 8-byte type based on the preferred type. 2196 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0); 2197 2198 // If we have a sign or zero extended integer, make sure to return Extend 2199 // so that the parameter gets the right LLVM IR attributes. 2200 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 2201 // Treat an enum type as its underlying type. 2202 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2203 Ty = EnumTy->getDecl()->getIntegerType(); 2204 2205 if (Ty->isIntegralOrEnumerationType() && 2206 Ty->isPromotableIntegerType()) 2207 return ABIArgInfo::getExtend(); 2208 } 2209 2210 break; 2211 2212 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next 2213 // available SSE register is used, the registers are taken in the 2214 // order from %xmm0 to %xmm7. 2215 case SSE: { 2216 llvm::Type *IRType = CGT.ConvertType(Ty); 2217 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0); 2218 ++neededSSE; 2219 break; 2220 } 2221 } 2222 2223 llvm::Type *HighPart = 0; 2224 switch (Hi) { 2225 // Memory was handled previously, ComplexX87 and X87 should 2226 // never occur as hi classes, and X87Up must be preceded by X87, 2227 // which is passed in memory. 2228 case Memory: 2229 case X87: 2230 case ComplexX87: 2231 llvm_unreachable("Invalid classification for hi word."); 2232 2233 case NoClass: break; 2234 2235 case Integer: 2236 ++neededInt; 2237 // Pick an 8-byte type based on the preferred type. 2238 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 2239 2240 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 2241 return ABIArgInfo::getDirect(HighPart, 8); 2242 break; 2243 2244 // X87Up generally doesn't occur here (long double is passed in 2245 // memory), except in situations involving unions. 2246 case X87Up: 2247 case SSE: 2248 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 2249 2250 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 2251 return ABIArgInfo::getDirect(HighPart, 8); 2252 2253 ++neededSSE; 2254 break; 2255 2256 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the 2257 // eightbyte is passed in the upper half of the last used SSE 2258 // register. This only happens when 128-bit vectors are passed. 2259 case SSEUp: 2260 assert(Lo == SSE && "Unexpected SSEUp classification"); 2261 ResType = GetByteVectorType(Ty); 2262 break; 2263 } 2264 2265 // If a high part was specified, merge it together with the low part. It is 2266 // known to pass in the high eightbyte of the result. We do this by forming a 2267 // first class struct aggregate with the high and low part: {low, high} 2268 if (HighPart) 2269 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); 2270 2271 return ABIArgInfo::getDirect(ResType); 2272 } 2273 2274 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 2275 2276 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2277 2278 // Keep track of the number of assigned registers. 2279 unsigned freeIntRegs = 6, freeSSERegs = 8; 2280 2281 // If the return value is indirect, then the hidden argument is consuming one 2282 // integer register. 2283 if (FI.getReturnInfo().isIndirect()) 2284 --freeIntRegs; 2285 2286 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers 2287 // get assigned (in left-to-right order) for passing as follows... 2288 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2289 it != ie; ++it) { 2290 unsigned neededInt, neededSSE; 2291 it->info = classifyArgumentType(it->type, freeIntRegs, neededInt, 2292 neededSSE); 2293 2294 // AMD64-ABI 3.2.3p3: If there are no registers available for any 2295 // eightbyte of an argument, the whole argument is passed on the 2296 // stack. If registers have already been assigned for some 2297 // eightbytes of such an argument, the assignments get reverted. 2298 if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) { 2299 freeIntRegs -= neededInt; 2300 freeSSERegs -= neededSSE; 2301 } else { 2302 it->info = getIndirectResult(it->type, freeIntRegs); 2303 } 2304 } 2305 } 2306 2307 static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr, 2308 QualType Ty, 2309 CodeGenFunction &CGF) { 2310 llvm::Value *overflow_arg_area_p = 2311 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p"); 2312 llvm::Value *overflow_arg_area = 2313 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area"); 2314 2315 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16 2316 // byte boundary if alignment needed by type exceeds 8 byte boundary. 2317 // It isn't stated explicitly in the standard, but in practice we use 2318 // alignment greater than 16 where necessary. 2319 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8; 2320 if (Align > 8) { 2321 // overflow_arg_area = (overflow_arg_area + align - 1) & -align; 2322 llvm::Value *Offset = 2323 llvm::ConstantInt::get(CGF.Int64Ty, Align - 1); 2324 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset); 2325 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area, 2326 CGF.Int64Ty); 2327 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, -(uint64_t)Align); 2328 overflow_arg_area = 2329 CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), 2330 overflow_arg_area->getType(), 2331 "overflow_arg_area.align"); 2332 } 2333 2334 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area. 2335 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 2336 llvm::Value *Res = 2337 CGF.Builder.CreateBitCast(overflow_arg_area, 2338 llvm::PointerType::getUnqual(LTy)); 2339 2340 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to: 2341 // l->overflow_arg_area + sizeof(type). 2342 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to 2343 // an 8 byte boundary. 2344 2345 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8; 2346 llvm::Value *Offset = 2347 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7); 2348 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset, 2349 "overflow_arg_area.next"); 2350 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p); 2351 2352 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type. 2353 return Res; 2354 } 2355 2356 llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2357 CodeGenFunction &CGF) const { 2358 // Assume that va_list type is correct; should be pointer to LLVM type: 2359 // struct { 2360 // i32 gp_offset; 2361 // i32 fp_offset; 2362 // i8* overflow_arg_area; 2363 // i8* reg_save_area; 2364 // }; 2365 unsigned neededInt, neededSSE; 2366 2367 Ty = CGF.getContext().getCanonicalType(Ty); 2368 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE); 2369 2370 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed 2371 // in the registers. If not go to step 7. 2372 if (!neededInt && !neededSSE) 2373 return EmitVAArgFromMemory(VAListAddr, Ty, CGF); 2374 2375 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of 2376 // general purpose registers needed to pass type and num_fp to hold 2377 // the number of floating point registers needed. 2378 2379 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into 2380 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or 2381 // l->fp_offset > 304 - num_fp * 16 go to step 7. 2382 // 2383 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of 2384 // register save space). 2385 2386 llvm::Value *InRegs = 0; 2387 llvm::Value *gp_offset_p = 0, *gp_offset = 0; 2388 llvm::Value *fp_offset_p = 0, *fp_offset = 0; 2389 if (neededInt) { 2390 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p"); 2391 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset"); 2392 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8); 2393 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp"); 2394 } 2395 2396 if (neededSSE) { 2397 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p"); 2398 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset"); 2399 llvm::Value *FitsInFP = 2400 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16); 2401 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp"); 2402 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP; 2403 } 2404 2405 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 2406 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); 2407 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 2408 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); 2409 2410 // Emit code to load the value if it was passed in registers. 2411 2412 CGF.EmitBlock(InRegBlock); 2413 2414 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with 2415 // an offset of l->gp_offset and/or l->fp_offset. This may require 2416 // copying to a temporary location in case the parameter is passed 2417 // in different register classes or requires an alignment greater 2418 // than 8 for general purpose registers and 16 for XMM registers. 2419 // 2420 // FIXME: This really results in shameful code when we end up needing to 2421 // collect arguments from different places; often what should result in a 2422 // simple assembling of a structure from scattered addresses has many more 2423 // loads than necessary. Can we clean this up? 2424 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 2425 llvm::Value *RegAddr = 2426 CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3), 2427 "reg_save_area"); 2428 if (neededInt && neededSSE) { 2429 // FIXME: Cleanup. 2430 assert(AI.isDirect() && "Unexpected ABI info for mixed regs"); 2431 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType()); 2432 llvm::Value *Tmp = CGF.CreateTempAlloca(ST); 2433 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs"); 2434 llvm::Type *TyLo = ST->getElementType(0); 2435 llvm::Type *TyHi = ST->getElementType(1); 2436 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) && 2437 "Unexpected ABI info for mixed regs"); 2438 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo); 2439 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi); 2440 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 2441 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2442 llvm::Value *RegLoAddr = TyLo->isFloatingPointTy() ? FPAddr : GPAddr; 2443 llvm::Value *RegHiAddr = TyLo->isFloatingPointTy() ? GPAddr : FPAddr; 2444 llvm::Value *V = 2445 CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo)); 2446 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 2447 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi)); 2448 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 2449 2450 RegAddr = CGF.Builder.CreateBitCast(Tmp, 2451 llvm::PointerType::getUnqual(LTy)); 2452 } else if (neededInt) { 2453 RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 2454 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 2455 llvm::PointerType::getUnqual(LTy)); 2456 } else if (neededSSE == 1) { 2457 RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2458 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 2459 llvm::PointerType::getUnqual(LTy)); 2460 } else { 2461 assert(neededSSE == 2 && "Invalid number of needed registers!"); 2462 // SSE registers are spaced 16 bytes apart in the register save 2463 // area, we need to collect the two eightbytes together. 2464 llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2465 llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16); 2466 llvm::Type *DoubleTy = CGF.DoubleTy; 2467 llvm::Type *DblPtrTy = 2468 llvm::PointerType::getUnqual(DoubleTy); 2469 llvm::StructType *ST = llvm::StructType::get(DoubleTy, 2470 DoubleTy, NULL); 2471 llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST); 2472 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo, 2473 DblPtrTy)); 2474 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 2475 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi, 2476 DblPtrTy)); 2477 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 2478 RegAddr = CGF.Builder.CreateBitCast(Tmp, 2479 llvm::PointerType::getUnqual(LTy)); 2480 } 2481 2482 // AMD64-ABI 3.5.7p5: Step 5. Set: 2483 // l->gp_offset = l->gp_offset + num_gp * 8 2484 // l->fp_offset = l->fp_offset + num_fp * 16. 2485 if (neededInt) { 2486 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8); 2487 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset), 2488 gp_offset_p); 2489 } 2490 if (neededSSE) { 2491 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16); 2492 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset), 2493 fp_offset_p); 2494 } 2495 CGF.EmitBranch(ContBlock); 2496 2497 // Emit code to load the value if it was passed in memory. 2498 2499 CGF.EmitBlock(InMemBlock); 2500 llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF); 2501 2502 // Return the appropriate result. 2503 2504 CGF.EmitBlock(ContBlock); 2505 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(), 2, 2506 "vaarg.addr"); 2507 ResAddr->addIncoming(RegAddr, InRegBlock); 2508 ResAddr->addIncoming(MemAddr, InMemBlock); 2509 return ResAddr; 2510 } 2511 2512 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty) const { 2513 2514 if (Ty->isVoidType()) 2515 return ABIArgInfo::getIgnore(); 2516 2517 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2518 Ty = EnumTy->getDecl()->getIntegerType(); 2519 2520 uint64_t Size = getContext().getTypeSize(Ty); 2521 2522 if (const RecordType *RT = Ty->getAs<RecordType>()) { 2523 if (hasNonTrivialDestructorOrCopyConstructor(RT) || 2524 RT->getDecl()->hasFlexibleArrayMember()) 2525 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2526 2527 // FIXME: mingw-w64-gcc emits 128-bit struct as i128 2528 if (Size == 128 && 2529 getContext().getTargetInfo().getTriple().getOS() 2530 == llvm::Triple::MinGW32) 2531 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2532 Size)); 2533 2534 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is 2535 // not 1, 2, 4, or 8 bytes, must be passed by reference." 2536 if (Size <= 64 && 2537 (Size & (Size - 1)) == 0) 2538 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2539 Size)); 2540 2541 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2542 } 2543 2544 if (Ty->isPromotableIntegerType()) 2545 return ABIArgInfo::getExtend(); 2546 2547 return ABIArgInfo::getDirect(); 2548 } 2549 2550 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 2551 2552 QualType RetTy = FI.getReturnType(); 2553 FI.getReturnInfo() = classify(RetTy); 2554 2555 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2556 it != ie; ++it) 2557 it->info = classify(it->type); 2558 } 2559 2560 llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2561 CodeGenFunction &CGF) const { 2562 llvm::Type *BPP = CGF.Int8PtrPtrTy; 2563 2564 CGBuilderTy &Builder = CGF.Builder; 2565 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 2566 "ap"); 2567 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 2568 llvm::Type *PTy = 2569 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 2570 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 2571 2572 uint64_t Offset = 2573 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8); 2574 llvm::Value *NextAddr = 2575 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 2576 "ap.next"); 2577 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 2578 2579 return AddrTyped; 2580 } 2581 2582 namespace { 2583 2584 class NaClX86_64ABIInfo : public ABIInfo { 2585 public: 2586 NaClX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) 2587 : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, HasAVX) {} 2588 virtual void computeInfo(CGFunctionInfo &FI) const; 2589 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2590 CodeGenFunction &CGF) const; 2591 private: 2592 PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv. 2593 X86_64ABIInfo NInfo; // Used for everything else. 2594 }; 2595 2596 class NaClX86_64TargetCodeGenInfo : public TargetCodeGenInfo { 2597 public: 2598 NaClX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) 2599 : TargetCodeGenInfo(new NaClX86_64ABIInfo(CGT, HasAVX)) {} 2600 }; 2601 2602 } 2603 2604 void NaClX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 2605 if (FI.getASTCallingConvention() == CC_PnaclCall) 2606 PInfo.computeInfo(FI); 2607 else 2608 NInfo.computeInfo(FI); 2609 } 2610 2611 llvm::Value *NaClX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2612 CodeGenFunction &CGF) const { 2613 // Always use the native convention; calling pnacl-style varargs functions 2614 // is unuspported. 2615 return NInfo.EmitVAArg(VAListAddr, Ty, CGF); 2616 } 2617 2618 2619 // PowerPC-32 2620 2621 namespace { 2622 class PPC32TargetCodeGenInfo : public DefaultTargetCodeGenInfo { 2623 public: 2624 PPC32TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} 2625 2626 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2627 // This is recovered from gcc output. 2628 return 1; // r1 is the dedicated stack pointer 2629 } 2630 2631 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2632 llvm::Value *Address) const; 2633 }; 2634 2635 } 2636 2637 bool 2638 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2639 llvm::Value *Address) const { 2640 // This is calculated from the LLVM and GCC tables and verified 2641 // against gcc output. AFAIK all ABIs use the same encoding. 2642 2643 CodeGen::CGBuilderTy &Builder = CGF.Builder; 2644 2645 llvm::IntegerType *i8 = CGF.Int8Ty; 2646 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 2647 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 2648 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 2649 2650 // 0-31: r0-31, the 4-byte general-purpose registers 2651 AssignToArrayRange(Builder, Address, Four8, 0, 31); 2652 2653 // 32-63: fp0-31, the 8-byte floating-point registers 2654 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 2655 2656 // 64-76 are various 4-byte special-purpose registers: 2657 // 64: mq 2658 // 65: lr 2659 // 66: ctr 2660 // 67: ap 2661 // 68-75 cr0-7 2662 // 76: xer 2663 AssignToArrayRange(Builder, Address, Four8, 64, 76); 2664 2665 // 77-108: v0-31, the 16-byte vector registers 2666 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 2667 2668 // 109: vrsave 2669 // 110: vscr 2670 // 111: spe_acc 2671 // 112: spefscr 2672 // 113: sfp 2673 AssignToArrayRange(Builder, Address, Four8, 109, 113); 2674 2675 return false; 2676 } 2677 2678 // PowerPC-64 2679 2680 namespace { 2681 /// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information. 2682 class PPC64_SVR4_ABIInfo : public DefaultABIInfo { 2683 2684 public: 2685 PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 2686 2687 // TODO: We can add more logic to computeInfo to improve performance. 2688 // Example: For aggregate arguments that fit in a register, we could 2689 // use getDirectInReg (as is done below for structs containing a single 2690 // floating-point value) to avoid pushing them to memory on function 2691 // entry. This would require changing the logic in PPCISelLowering 2692 // when lowering the parameters in the caller and args in the callee. 2693 virtual void computeInfo(CGFunctionInfo &FI) const { 2694 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2695 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2696 it != ie; ++it) { 2697 // We rely on the default argument classification for the most part. 2698 // One exception: An aggregate containing a single floating-point 2699 // item must be passed in a register if one is available. 2700 const Type *T = isSingleElementStruct(it->type, getContext()); 2701 if (T) { 2702 const BuiltinType *BT = T->getAs<BuiltinType>(); 2703 if (BT && BT->isFloatingPoint()) { 2704 QualType QT(T, 0); 2705 it->info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT)); 2706 continue; 2707 } 2708 } 2709 it->info = classifyArgumentType(it->type); 2710 } 2711 } 2712 2713 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, 2714 QualType Ty, 2715 CodeGenFunction &CGF) const; 2716 }; 2717 2718 class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo { 2719 public: 2720 PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT) 2721 : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT)) {} 2722 2723 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2724 // This is recovered from gcc output. 2725 return 1; // r1 is the dedicated stack pointer 2726 } 2727 2728 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2729 llvm::Value *Address) const; 2730 }; 2731 2732 class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo { 2733 public: 2734 PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} 2735 2736 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2737 // This is recovered from gcc output. 2738 return 1; // r1 is the dedicated stack pointer 2739 } 2740 2741 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2742 llvm::Value *Address) const; 2743 }; 2744 2745 } 2746 2747 // Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine. 2748 llvm::Value *PPC64_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr, 2749 QualType Ty, 2750 CodeGenFunction &CGF) const { 2751 llvm::Type *BP = CGF.Int8PtrTy; 2752 llvm::Type *BPP = CGF.Int8PtrPtrTy; 2753 2754 CGBuilderTy &Builder = CGF.Builder; 2755 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 2756 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 2757 2758 // Update the va_list pointer. 2759 unsigned SizeInBytes = CGF.getContext().getTypeSize(Ty) / 8; 2760 unsigned Offset = llvm::RoundUpToAlignment(SizeInBytes, 8); 2761 llvm::Value *NextAddr = 2762 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), 2763 "ap.next"); 2764 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 2765 2766 // If the argument is smaller than 8 bytes, it is right-adjusted in 2767 // its doubleword slot. Adjust the pointer to pick it up from the 2768 // correct offset. 2769 if (SizeInBytes < 8) { 2770 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty); 2771 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(8 - SizeInBytes)); 2772 Addr = Builder.CreateIntToPtr(AddrAsInt, BP); 2773 } 2774 2775 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 2776 return Builder.CreateBitCast(Addr, PTy); 2777 } 2778 2779 static bool 2780 PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2781 llvm::Value *Address) { 2782 // This is calculated from the LLVM and GCC tables and verified 2783 // against gcc output. AFAIK all ABIs use the same encoding. 2784 2785 CodeGen::CGBuilderTy &Builder = CGF.Builder; 2786 2787 llvm::IntegerType *i8 = CGF.Int8Ty; 2788 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 2789 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 2790 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 2791 2792 // 0-31: r0-31, the 8-byte general-purpose registers 2793 AssignToArrayRange(Builder, Address, Eight8, 0, 31); 2794 2795 // 32-63: fp0-31, the 8-byte floating-point registers 2796 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 2797 2798 // 64-76 are various 4-byte special-purpose registers: 2799 // 64: mq 2800 // 65: lr 2801 // 66: ctr 2802 // 67: ap 2803 // 68-75 cr0-7 2804 // 76: xer 2805 AssignToArrayRange(Builder, Address, Four8, 64, 76); 2806 2807 // 77-108: v0-31, the 16-byte vector registers 2808 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 2809 2810 // 109: vrsave 2811 // 110: vscr 2812 // 111: spe_acc 2813 // 112: spefscr 2814 // 113: sfp 2815 AssignToArrayRange(Builder, Address, Four8, 109, 113); 2816 2817 return false; 2818 } 2819 2820 bool 2821 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable( 2822 CodeGen::CodeGenFunction &CGF, 2823 llvm::Value *Address) const { 2824 2825 return PPC64_initDwarfEHRegSizeTable(CGF, Address); 2826 } 2827 2828 bool 2829 PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2830 llvm::Value *Address) const { 2831 2832 return PPC64_initDwarfEHRegSizeTable(CGF, Address); 2833 } 2834 2835 //===----------------------------------------------------------------------===// 2836 // ARM ABI Implementation 2837 //===----------------------------------------------------------------------===// 2838 2839 namespace { 2840 2841 class ARMABIInfo : public ABIInfo { 2842 public: 2843 enum ABIKind { 2844 APCS = 0, 2845 AAPCS = 1, 2846 AAPCS_VFP 2847 }; 2848 2849 private: 2850 ABIKind Kind; 2851 2852 public: 2853 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind) {} 2854 2855 bool isEABI() const { 2856 StringRef Env = 2857 getContext().getTargetInfo().getTriple().getEnvironmentName(); 2858 return (Env == "gnueabi" || Env == "eabi" || 2859 Env == "android" || Env == "androideabi"); 2860 } 2861 2862 private: 2863 ABIKind getABIKind() const { return Kind; } 2864 2865 ABIArgInfo classifyReturnType(QualType RetTy) const; 2866 ABIArgInfo classifyArgumentType(QualType RetTy) const; 2867 bool isIllegalVectorType(QualType Ty) const; 2868 2869 virtual void computeInfo(CGFunctionInfo &FI) const; 2870 2871 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2872 CodeGenFunction &CGF) const; 2873 }; 2874 2875 class ARMTargetCodeGenInfo : public TargetCodeGenInfo { 2876 public: 2877 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K) 2878 :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {} 2879 2880 const ARMABIInfo &getABIInfo() const { 2881 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo()); 2882 } 2883 2884 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2885 return 13; 2886 } 2887 2888 StringRef getARCRetainAutoreleasedReturnValueMarker() const { 2889 return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue"; 2890 } 2891 2892 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2893 llvm::Value *Address) const { 2894 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 2895 2896 // 0-15 are the 16 integer registers. 2897 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15); 2898 return false; 2899 } 2900 2901 unsigned getSizeOfUnwindException() const { 2902 if (getABIInfo().isEABI()) return 88; 2903 return TargetCodeGenInfo::getSizeOfUnwindException(); 2904 } 2905 }; 2906 2907 } 2908 2909 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const { 2910 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2911 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2912 it != ie; ++it) 2913 it->info = classifyArgumentType(it->type); 2914 2915 // Always honor user-specified calling convention. 2916 if (FI.getCallingConvention() != llvm::CallingConv::C) 2917 return; 2918 2919 // Calling convention as default by an ABI. 2920 llvm::CallingConv::ID DefaultCC; 2921 if (getContext().getTargetInfo().getTriple().getEnvironmentName()=="gnueabihf") 2922 DefaultCC = llvm::CallingConv::ARM_AAPCS_VFP; 2923 else if (isEABI()) 2924 DefaultCC = llvm::CallingConv::ARM_AAPCS; 2925 else 2926 DefaultCC = llvm::CallingConv::ARM_APCS; 2927 2928 // If user did not ask for specific calling convention explicitly (e.g. via 2929 // pcs attribute), set effective calling convention if it's different than ABI 2930 // default. 2931 switch (getABIKind()) { 2932 case APCS: 2933 if (DefaultCC != llvm::CallingConv::ARM_APCS) 2934 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_APCS); 2935 break; 2936 case AAPCS: 2937 if (DefaultCC != llvm::CallingConv::ARM_AAPCS) 2938 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS); 2939 break; 2940 case AAPCS_VFP: 2941 if (DefaultCC != llvm::CallingConv::ARM_AAPCS_VFP) 2942 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS_VFP); 2943 break; 2944 } 2945 } 2946 2947 /// isHomogeneousAggregate - Return true if a type is an AAPCS-VFP homogeneous 2948 /// aggregate. If HAMembers is non-null, the number of base elements 2949 /// contained in the type is returned through it; this is used for the 2950 /// recursive calls that check aggregate component types. 2951 static bool isHomogeneousAggregate(QualType Ty, const Type *&Base, 2952 ASTContext &Context, 2953 uint64_t *HAMembers = 0) { 2954 uint64_t Members = 0; 2955 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 2956 if (!isHomogeneousAggregate(AT->getElementType(), Base, Context, &Members)) 2957 return false; 2958 Members *= AT->getSize().getZExtValue(); 2959 } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 2960 const RecordDecl *RD = RT->getDecl(); 2961 if (RD->hasFlexibleArrayMember()) 2962 return false; 2963 2964 Members = 0; 2965 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 2966 i != e; ++i) { 2967 const FieldDecl *FD = *i; 2968 uint64_t FldMembers; 2969 if (!isHomogeneousAggregate(FD->getType(), Base, Context, &FldMembers)) 2970 return false; 2971 2972 Members = (RD->isUnion() ? 2973 std::max(Members, FldMembers) : Members + FldMembers); 2974 } 2975 } else { 2976 Members = 1; 2977 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 2978 Members = 2; 2979 Ty = CT->getElementType(); 2980 } 2981 2982 // Homogeneous aggregates for AAPCS-VFP must have base types of float, 2983 // double, or 64-bit or 128-bit vectors. 2984 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 2985 if (BT->getKind() != BuiltinType::Float && 2986 BT->getKind() != BuiltinType::Double && 2987 BT->getKind() != BuiltinType::LongDouble) 2988 return false; 2989 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 2990 unsigned VecSize = Context.getTypeSize(VT); 2991 if (VecSize != 64 && VecSize != 128) 2992 return false; 2993 } else { 2994 return false; 2995 } 2996 2997 // The base type must be the same for all members. Vector types of the 2998 // same total size are treated as being equivalent here. 2999 const Type *TyPtr = Ty.getTypePtr(); 3000 if (!Base) 3001 Base = TyPtr; 3002 if (Base != TyPtr && 3003 (!Base->isVectorType() || !TyPtr->isVectorType() || 3004 Context.getTypeSize(Base) != Context.getTypeSize(TyPtr))) 3005 return false; 3006 } 3007 3008 // Homogeneous Aggregates can have at most 4 members of the base type. 3009 if (HAMembers) 3010 *HAMembers = Members; 3011 3012 return (Members > 0 && Members <= 4); 3013 } 3014 3015 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty) const { 3016 // Handle illegal vector types here. 3017 if (isIllegalVectorType(Ty)) { 3018 uint64_t Size = getContext().getTypeSize(Ty); 3019 if (Size <= 32) { 3020 llvm::Type *ResType = 3021 llvm::Type::getInt32Ty(getVMContext()); 3022 return ABIArgInfo::getDirect(ResType); 3023 } 3024 if (Size == 64) { 3025 llvm::Type *ResType = llvm::VectorType::get( 3026 llvm::Type::getInt32Ty(getVMContext()), 2); 3027 return ABIArgInfo::getDirect(ResType); 3028 } 3029 if (Size == 128) { 3030 llvm::Type *ResType = llvm::VectorType::get( 3031 llvm::Type::getInt32Ty(getVMContext()), 4); 3032 return ABIArgInfo::getDirect(ResType); 3033 } 3034 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3035 } 3036 3037 if (!isAggregateTypeForABI(Ty)) { 3038 // Treat an enum type as its underlying type. 3039 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3040 Ty = EnumTy->getDecl()->getIntegerType(); 3041 3042 return (Ty->isPromotableIntegerType() ? 3043 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3044 } 3045 3046 // Ignore empty records. 3047 if (isEmptyRecord(getContext(), Ty, true)) 3048 return ABIArgInfo::getIgnore(); 3049 3050 // Structures with either a non-trivial destructor or a non-trivial 3051 // copy constructor are always indirect. 3052 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 3053 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3054 3055 if (getABIKind() == ARMABIInfo::AAPCS_VFP) { 3056 // Homogeneous Aggregates need to be expanded. 3057 const Type *Base = 0; 3058 if (isHomogeneousAggregate(Ty, Base, getContext())) { 3059 assert(Base && "Base class should be set for homogeneous aggregate"); 3060 return ABIArgInfo::getExpand(); 3061 } 3062 } 3063 3064 // Support byval for ARM. 3065 if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64) || 3066 getContext().getTypeAlign(Ty) > 64) { 3067 return ABIArgInfo::getIndirect(0, /*ByVal=*/true); 3068 } 3069 3070 // Otherwise, pass by coercing to a structure of the appropriate size. 3071 llvm::Type* ElemTy; 3072 unsigned SizeRegs; 3073 // FIXME: Try to match the types of the arguments more accurately where 3074 // we can. 3075 if (getContext().getTypeAlign(Ty) <= 32) { 3076 ElemTy = llvm::Type::getInt32Ty(getVMContext()); 3077 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32; 3078 } else { 3079 ElemTy = llvm::Type::getInt64Ty(getVMContext()); 3080 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64; 3081 } 3082 3083 llvm::Type *STy = 3084 llvm::StructType::get(llvm::ArrayType::get(ElemTy, SizeRegs), NULL); 3085 return ABIArgInfo::getDirect(STy); 3086 } 3087 3088 static bool isIntegerLikeType(QualType Ty, ASTContext &Context, 3089 llvm::LLVMContext &VMContext) { 3090 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure 3091 // is called integer-like if its size is less than or equal to one word, and 3092 // the offset of each of its addressable sub-fields is zero. 3093 3094 uint64_t Size = Context.getTypeSize(Ty); 3095 3096 // Check that the type fits in a word. 3097 if (Size > 32) 3098 return false; 3099 3100 // FIXME: Handle vector types! 3101 if (Ty->isVectorType()) 3102 return false; 3103 3104 // Float types are never treated as "integer like". 3105 if (Ty->isRealFloatingType()) 3106 return false; 3107 3108 // If this is a builtin or pointer type then it is ok. 3109 if (Ty->getAs<BuiltinType>() || Ty->isPointerType()) 3110 return true; 3111 3112 // Small complex integer types are "integer like". 3113 if (const ComplexType *CT = Ty->getAs<ComplexType>()) 3114 return isIntegerLikeType(CT->getElementType(), Context, VMContext); 3115 3116 // Single element and zero sized arrays should be allowed, by the definition 3117 // above, but they are not. 3118 3119 // Otherwise, it must be a record type. 3120 const RecordType *RT = Ty->getAs<RecordType>(); 3121 if (!RT) return false; 3122 3123 // Ignore records with flexible arrays. 3124 const RecordDecl *RD = RT->getDecl(); 3125 if (RD->hasFlexibleArrayMember()) 3126 return false; 3127 3128 // Check that all sub-fields are at offset 0, and are themselves "integer 3129 // like". 3130 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 3131 3132 bool HadField = false; 3133 unsigned idx = 0; 3134 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 3135 i != e; ++i, ++idx) { 3136 const FieldDecl *FD = *i; 3137 3138 // Bit-fields are not addressable, we only need to verify they are "integer 3139 // like". We still have to disallow a subsequent non-bitfield, for example: 3140 // struct { int : 0; int x } 3141 // is non-integer like according to gcc. 3142 if (FD->isBitField()) { 3143 if (!RD->isUnion()) 3144 HadField = true; 3145 3146 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 3147 return false; 3148 3149 continue; 3150 } 3151 3152 // Check if this field is at offset 0. 3153 if (Layout.getFieldOffset(idx) != 0) 3154 return false; 3155 3156 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 3157 return false; 3158 3159 // Only allow at most one field in a structure. This doesn't match the 3160 // wording above, but follows gcc in situations with a field following an 3161 // empty structure. 3162 if (!RD->isUnion()) { 3163 if (HadField) 3164 return false; 3165 3166 HadField = true; 3167 } 3168 } 3169 3170 return true; 3171 } 3172 3173 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy) const { 3174 if (RetTy->isVoidType()) 3175 return ABIArgInfo::getIgnore(); 3176 3177 // Large vector types should be returned via memory. 3178 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) 3179 return ABIArgInfo::getIndirect(0); 3180 3181 if (!isAggregateTypeForABI(RetTy)) { 3182 // Treat an enum type as its underlying type. 3183 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 3184 RetTy = EnumTy->getDecl()->getIntegerType(); 3185 3186 return (RetTy->isPromotableIntegerType() ? 3187 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3188 } 3189 3190 // Structures with either a non-trivial destructor or a non-trivial 3191 // copy constructor are always indirect. 3192 if (isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy)) 3193 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3194 3195 // Are we following APCS? 3196 if (getABIKind() == APCS) { 3197 if (isEmptyRecord(getContext(), RetTy, false)) 3198 return ABIArgInfo::getIgnore(); 3199 3200 // Complex types are all returned as packed integers. 3201 // 3202 // FIXME: Consider using 2 x vector types if the back end handles them 3203 // correctly. 3204 if (RetTy->isAnyComplexType()) 3205 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 3206 getContext().getTypeSize(RetTy))); 3207 3208 // Integer like structures are returned in r0. 3209 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) { 3210 // Return in the smallest viable integer type. 3211 uint64_t Size = getContext().getTypeSize(RetTy); 3212 if (Size <= 8) 3213 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 3214 if (Size <= 16) 3215 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 3216 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 3217 } 3218 3219 // Otherwise return in memory. 3220 return ABIArgInfo::getIndirect(0); 3221 } 3222 3223 // Otherwise this is an AAPCS variant. 3224 3225 if (isEmptyRecord(getContext(), RetTy, true)) 3226 return ABIArgInfo::getIgnore(); 3227 3228 // Check for homogeneous aggregates with AAPCS-VFP. 3229 if (getABIKind() == AAPCS_VFP) { 3230 const Type *Base = 0; 3231 if (isHomogeneousAggregate(RetTy, Base, getContext())) { 3232 assert(Base && "Base class should be set for homogeneous aggregate"); 3233 // Homogeneous Aggregates are returned directly. 3234 return ABIArgInfo::getDirect(); 3235 } 3236 } 3237 3238 // Aggregates <= 4 bytes are returned in r0; other aggregates 3239 // are returned indirectly. 3240 uint64_t Size = getContext().getTypeSize(RetTy); 3241 if (Size <= 32) { 3242 // Return in the smallest viable integer type. 3243 if (Size <= 8) 3244 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 3245 if (Size <= 16) 3246 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 3247 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 3248 } 3249 3250 return ABIArgInfo::getIndirect(0); 3251 } 3252 3253 /// isIllegalVector - check whether Ty is an illegal vector type. 3254 bool ARMABIInfo::isIllegalVectorType(QualType Ty) const { 3255 if (const VectorType *VT = Ty->getAs<VectorType>()) { 3256 // Check whether VT is legal. 3257 unsigned NumElements = VT->getNumElements(); 3258 uint64_t Size = getContext().getTypeSize(VT); 3259 // NumElements should be power of 2. 3260 if ((NumElements & (NumElements - 1)) != 0) 3261 return true; 3262 // Size should be greater than 32 bits. 3263 return Size <= 32; 3264 } 3265 return false; 3266 } 3267 3268 llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3269 CodeGenFunction &CGF) const { 3270 llvm::Type *BP = CGF.Int8PtrTy; 3271 llvm::Type *BPP = CGF.Int8PtrPtrTy; 3272 3273 CGBuilderTy &Builder = CGF.Builder; 3274 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 3275 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 3276 3277 uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8; 3278 uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8; 3279 bool IsIndirect = false; 3280 3281 // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for 3282 // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte. 3283 if (getABIKind() == ARMABIInfo::AAPCS_VFP || 3284 getABIKind() == ARMABIInfo::AAPCS) 3285 TyAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8); 3286 else 3287 TyAlign = 4; 3288 // Use indirect if size of the illegal vector is bigger than 16 bytes. 3289 if (isIllegalVectorType(Ty) && Size > 16) { 3290 IsIndirect = true; 3291 Size = 4; 3292 TyAlign = 4; 3293 } 3294 3295 // Handle address alignment for ABI alignment > 4 bytes. 3296 if (TyAlign > 4) { 3297 assert((TyAlign & (TyAlign - 1)) == 0 && 3298 "Alignment is not power of 2!"); 3299 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty); 3300 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1)); 3301 AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1))); 3302 Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align"); 3303 } 3304 3305 uint64_t Offset = 3306 llvm::RoundUpToAlignment(Size, 4); 3307 llvm::Value *NextAddr = 3308 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 3309 "ap.next"); 3310 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 3311 3312 if (IsIndirect) 3313 Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP)); 3314 else if (TyAlign < CGF.getContext().getTypeAlign(Ty) / 8) { 3315 // We can't directly cast ap.cur to pointer to a vector type, since ap.cur 3316 // may not be correctly aligned for the vector type. We create an aligned 3317 // temporary space and copy the content over from ap.cur to the temporary 3318 // space. This is necessary if the natural alignment of the type is greater 3319 // than the ABI alignment. 3320 llvm::Type *I8PtrTy = Builder.getInt8PtrTy(); 3321 CharUnits CharSize = getContext().getTypeSizeInChars(Ty); 3322 llvm::Value *AlignedTemp = CGF.CreateTempAlloca(CGF.ConvertType(Ty), 3323 "var.align"); 3324 llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy); 3325 llvm::Value *Src = Builder.CreateBitCast(Addr, I8PtrTy); 3326 Builder.CreateMemCpy(Dst, Src, 3327 llvm::ConstantInt::get(CGF.IntPtrTy, CharSize.getQuantity()), 3328 TyAlign, false); 3329 Addr = AlignedTemp; //The content is in aligned location. 3330 } 3331 llvm::Type *PTy = 3332 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 3333 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 3334 3335 return AddrTyped; 3336 } 3337 3338 namespace { 3339 3340 class NaClARMABIInfo : public ABIInfo { 3341 public: 3342 NaClARMABIInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind) 3343 : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, Kind) {} 3344 virtual void computeInfo(CGFunctionInfo &FI) const; 3345 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3346 CodeGenFunction &CGF) const; 3347 private: 3348 PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv. 3349 ARMABIInfo NInfo; // Used for everything else. 3350 }; 3351 3352 class NaClARMTargetCodeGenInfo : public TargetCodeGenInfo { 3353 public: 3354 NaClARMTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind) 3355 : TargetCodeGenInfo(new NaClARMABIInfo(CGT, Kind)) {} 3356 }; 3357 3358 } 3359 3360 void NaClARMABIInfo::computeInfo(CGFunctionInfo &FI) const { 3361 if (FI.getASTCallingConvention() == CC_PnaclCall) 3362 PInfo.computeInfo(FI); 3363 else 3364 static_cast<const ABIInfo&>(NInfo).computeInfo(FI); 3365 } 3366 3367 llvm::Value *NaClARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3368 CodeGenFunction &CGF) const { 3369 // Always use the native convention; calling pnacl-style varargs functions 3370 // is unsupported. 3371 return static_cast<const ABIInfo&>(NInfo).EmitVAArg(VAListAddr, Ty, CGF); 3372 } 3373 3374 //===----------------------------------------------------------------------===// 3375 // NVPTX ABI Implementation 3376 //===----------------------------------------------------------------------===// 3377 3378 namespace { 3379 3380 class NVPTXABIInfo : public ABIInfo { 3381 public: 3382 NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 3383 3384 ABIArgInfo classifyReturnType(QualType RetTy) const; 3385 ABIArgInfo classifyArgumentType(QualType Ty) const; 3386 3387 virtual void computeInfo(CGFunctionInfo &FI) const; 3388 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3389 CodeGenFunction &CFG) const; 3390 }; 3391 3392 class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo { 3393 public: 3394 NVPTXTargetCodeGenInfo(CodeGenTypes &CGT) 3395 : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {} 3396 3397 virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3398 CodeGen::CodeGenModule &M) const; 3399 }; 3400 3401 ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const { 3402 if (RetTy->isVoidType()) 3403 return ABIArgInfo::getIgnore(); 3404 if (isAggregateTypeForABI(RetTy)) 3405 return ABIArgInfo::getIndirect(0); 3406 return ABIArgInfo::getDirect(); 3407 } 3408 3409 ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const { 3410 if (isAggregateTypeForABI(Ty)) 3411 return ABIArgInfo::getIndirect(0); 3412 3413 return ABIArgInfo::getDirect(); 3414 } 3415 3416 void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const { 3417 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 3418 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 3419 it != ie; ++it) 3420 it->info = classifyArgumentType(it->type); 3421 3422 // Always honor user-specified calling convention. 3423 if (FI.getCallingConvention() != llvm::CallingConv::C) 3424 return; 3425 3426 // Calling convention as default by an ABI. 3427 // We're still using the PTX_Kernel/PTX_Device calling conventions here, 3428 // but we should switch to NVVM metadata later on. 3429 llvm::CallingConv::ID DefaultCC; 3430 const LangOptions &LangOpts = getContext().getLangOpts(); 3431 if (LangOpts.OpenCL || LangOpts.CUDA) { 3432 // If we are in OpenCL or CUDA mode, then default to device functions 3433 DefaultCC = llvm::CallingConv::PTX_Device; 3434 } else { 3435 // If we are in standard C/C++ mode, use the triple to decide on the default 3436 StringRef Env = 3437 getContext().getTargetInfo().getTriple().getEnvironmentName(); 3438 if (Env == "device") 3439 DefaultCC = llvm::CallingConv::PTX_Device; 3440 else 3441 DefaultCC = llvm::CallingConv::PTX_Kernel; 3442 } 3443 FI.setEffectiveCallingConvention(DefaultCC); 3444 3445 } 3446 3447 llvm::Value *NVPTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3448 CodeGenFunction &CFG) const { 3449 llvm_unreachable("NVPTX does not support varargs"); 3450 } 3451 3452 void NVPTXTargetCodeGenInfo:: 3453 SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3454 CodeGen::CodeGenModule &M) const{ 3455 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 3456 if (!FD) return; 3457 3458 llvm::Function *F = cast<llvm::Function>(GV); 3459 3460 // Perform special handling in OpenCL mode 3461 if (M.getLangOpts().OpenCL) { 3462 // Use OpenCL function attributes to set proper calling conventions 3463 // By default, all functions are device functions 3464 if (FD->hasAttr<OpenCLKernelAttr>()) { 3465 // OpenCL __kernel functions get a kernel calling convention 3466 F->setCallingConv(llvm::CallingConv::PTX_Kernel); 3467 // And kernel functions are not subject to inlining 3468 F->addFnAttr(llvm::Attributes::NoInline); 3469 } 3470 } 3471 3472 // Perform special handling in CUDA mode. 3473 if (M.getLangOpts().CUDA) { 3474 // CUDA __global__ functions get a kernel calling convention. Since 3475 // __global__ functions cannot be called from the device, we do not 3476 // need to set the noinline attribute. 3477 if (FD->getAttr<CUDAGlobalAttr>()) 3478 F->setCallingConv(llvm::CallingConv::PTX_Kernel); 3479 } 3480 } 3481 3482 } 3483 3484 //===----------------------------------------------------------------------===// 3485 // MBlaze ABI Implementation 3486 //===----------------------------------------------------------------------===// 3487 3488 namespace { 3489 3490 class MBlazeABIInfo : public ABIInfo { 3491 public: 3492 MBlazeABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 3493 3494 bool isPromotableIntegerType(QualType Ty) const; 3495 3496 ABIArgInfo classifyReturnType(QualType RetTy) const; 3497 ABIArgInfo classifyArgumentType(QualType RetTy) const; 3498 3499 virtual void computeInfo(CGFunctionInfo &FI) const { 3500 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 3501 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 3502 it != ie; ++it) 3503 it->info = classifyArgumentType(it->type); 3504 } 3505 3506 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3507 CodeGenFunction &CGF) const; 3508 }; 3509 3510 class MBlazeTargetCodeGenInfo : public TargetCodeGenInfo { 3511 public: 3512 MBlazeTargetCodeGenInfo(CodeGenTypes &CGT) 3513 : TargetCodeGenInfo(new MBlazeABIInfo(CGT)) {} 3514 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3515 CodeGen::CodeGenModule &M) const; 3516 }; 3517 3518 } 3519 3520 bool MBlazeABIInfo::isPromotableIntegerType(QualType Ty) const { 3521 // MBlaze ABI requires all 8 and 16 bit quantities to be extended. 3522 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 3523 switch (BT->getKind()) { 3524 case BuiltinType::Bool: 3525 case BuiltinType::Char_S: 3526 case BuiltinType::Char_U: 3527 case BuiltinType::SChar: 3528 case BuiltinType::UChar: 3529 case BuiltinType::Short: 3530 case BuiltinType::UShort: 3531 return true; 3532 default: 3533 return false; 3534 } 3535 return false; 3536 } 3537 3538 llvm::Value *MBlazeABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3539 CodeGenFunction &CGF) const { 3540 // FIXME: Implement 3541 return 0; 3542 } 3543 3544 3545 ABIArgInfo MBlazeABIInfo::classifyReturnType(QualType RetTy) const { 3546 if (RetTy->isVoidType()) 3547 return ABIArgInfo::getIgnore(); 3548 if (isAggregateTypeForABI(RetTy)) 3549 return ABIArgInfo::getIndirect(0); 3550 3551 return (isPromotableIntegerType(RetTy) ? 3552 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3553 } 3554 3555 ABIArgInfo MBlazeABIInfo::classifyArgumentType(QualType Ty) const { 3556 if (isAggregateTypeForABI(Ty)) 3557 return ABIArgInfo::getIndirect(0); 3558 3559 return (isPromotableIntegerType(Ty) ? 3560 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3561 } 3562 3563 void MBlazeTargetCodeGenInfo::SetTargetAttributes(const Decl *D, 3564 llvm::GlobalValue *GV, 3565 CodeGen::CodeGenModule &M) 3566 const { 3567 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 3568 if (!FD) return; 3569 3570 llvm::CallingConv::ID CC = llvm::CallingConv::C; 3571 if (FD->hasAttr<MBlazeInterruptHandlerAttr>()) 3572 CC = llvm::CallingConv::MBLAZE_INTR; 3573 else if (FD->hasAttr<MBlazeSaveVolatilesAttr>()) 3574 CC = llvm::CallingConv::MBLAZE_SVOL; 3575 3576 if (CC != llvm::CallingConv::C) { 3577 // Handle 'interrupt_handler' attribute: 3578 llvm::Function *F = cast<llvm::Function>(GV); 3579 3580 // Step 1: Set ISR calling convention. 3581 F->setCallingConv(CC); 3582 3583 // Step 2: Add attributes goodness. 3584 F->addFnAttr(llvm::Attributes::NoInline); 3585 } 3586 3587 // Step 3: Emit _interrupt_handler alias. 3588 if (CC == llvm::CallingConv::MBLAZE_INTR) 3589 new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage, 3590 "_interrupt_handler", GV, &M.getModule()); 3591 } 3592 3593 3594 //===----------------------------------------------------------------------===// 3595 // MSP430 ABI Implementation 3596 //===----------------------------------------------------------------------===// 3597 3598 namespace { 3599 3600 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo { 3601 public: 3602 MSP430TargetCodeGenInfo(CodeGenTypes &CGT) 3603 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 3604 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3605 CodeGen::CodeGenModule &M) const; 3606 }; 3607 3608 } 3609 3610 void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D, 3611 llvm::GlobalValue *GV, 3612 CodeGen::CodeGenModule &M) const { 3613 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 3614 if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) { 3615 // Handle 'interrupt' attribute: 3616 llvm::Function *F = cast<llvm::Function>(GV); 3617 3618 // Step 1: Set ISR calling convention. 3619 F->setCallingConv(llvm::CallingConv::MSP430_INTR); 3620 3621 // Step 2: Add attributes goodness. 3622 F->addFnAttr(llvm::Attributes::NoInline); 3623 3624 // Step 3: Emit ISR vector alias. 3625 unsigned Num = attr->getNumber() + 0xffe0; 3626 new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage, 3627 "vector_" + Twine::utohexstr(Num), 3628 GV, &M.getModule()); 3629 } 3630 } 3631 } 3632 3633 //===----------------------------------------------------------------------===// 3634 // MIPS ABI Implementation. This works for both little-endian and 3635 // big-endian variants. 3636 //===----------------------------------------------------------------------===// 3637 3638 namespace { 3639 class MipsABIInfo : public ABIInfo { 3640 bool IsO32; 3641 unsigned MinABIStackAlignInBytes, StackAlignInBytes; 3642 void CoerceToIntArgs(uint64_t TySize, 3643 SmallVector<llvm::Type*, 8> &ArgList) const; 3644 llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const; 3645 llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const; 3646 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const; 3647 public: 3648 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) : 3649 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8), 3650 StackAlignInBytes(IsO32 ? 8 : 16) {} 3651 3652 ABIArgInfo classifyReturnType(QualType RetTy) const; 3653 ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const; 3654 virtual void computeInfo(CGFunctionInfo &FI) const; 3655 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3656 CodeGenFunction &CGF) const; 3657 }; 3658 3659 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo { 3660 unsigned SizeOfUnwindException; 3661 public: 3662 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32) 3663 : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)), 3664 SizeOfUnwindException(IsO32 ? 24 : 32) {} 3665 3666 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 3667 return 29; 3668 } 3669 3670 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3671 llvm::Value *Address) const; 3672 3673 unsigned getSizeOfUnwindException() const { 3674 return SizeOfUnwindException; 3675 } 3676 }; 3677 } 3678 3679 void MipsABIInfo::CoerceToIntArgs(uint64_t TySize, 3680 SmallVector<llvm::Type*, 8> &ArgList) const { 3681 llvm::IntegerType *IntTy = 3682 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8); 3683 3684 // Add (TySize / MinABIStackAlignInBytes) args of IntTy. 3685 for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N) 3686 ArgList.push_back(IntTy); 3687 3688 // If necessary, add one more integer type to ArgList. 3689 unsigned R = TySize % (MinABIStackAlignInBytes * 8); 3690 3691 if (R) 3692 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R)); 3693 } 3694 3695 // In N32/64, an aligned double precision floating point field is passed in 3696 // a register. 3697 llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const { 3698 SmallVector<llvm::Type*, 8> ArgList, IntArgList; 3699 3700 if (IsO32) { 3701 CoerceToIntArgs(TySize, ArgList); 3702 return llvm::StructType::get(getVMContext(), ArgList); 3703 } 3704 3705 if (Ty->isComplexType()) 3706 return CGT.ConvertType(Ty); 3707 3708 const RecordType *RT = Ty->getAs<RecordType>(); 3709 3710 // Unions/vectors are passed in integer registers. 3711 if (!RT || !RT->isStructureOrClassType()) { 3712 CoerceToIntArgs(TySize, ArgList); 3713 return llvm::StructType::get(getVMContext(), ArgList); 3714 } 3715 3716 const RecordDecl *RD = RT->getDecl(); 3717 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 3718 assert(!(TySize % 8) && "Size of structure must be multiple of 8."); 3719 3720 uint64_t LastOffset = 0; 3721 unsigned idx = 0; 3722 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64); 3723 3724 // Iterate over fields in the struct/class and check if there are any aligned 3725 // double fields. 3726 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 3727 i != e; ++i, ++idx) { 3728 const QualType Ty = i->getType(); 3729 const BuiltinType *BT = Ty->getAs<BuiltinType>(); 3730 3731 if (!BT || BT->getKind() != BuiltinType::Double) 3732 continue; 3733 3734 uint64_t Offset = Layout.getFieldOffset(idx); 3735 if (Offset % 64) // Ignore doubles that are not aligned. 3736 continue; 3737 3738 // Add ((Offset - LastOffset) / 64) args of type i64. 3739 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j) 3740 ArgList.push_back(I64); 3741 3742 // Add double type. 3743 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext())); 3744 LastOffset = Offset + 64; 3745 } 3746 3747 CoerceToIntArgs(TySize - LastOffset, IntArgList); 3748 ArgList.append(IntArgList.begin(), IntArgList.end()); 3749 3750 return llvm::StructType::get(getVMContext(), ArgList); 3751 } 3752 3753 llvm::Type *MipsABIInfo::getPaddingType(uint64_t Align, uint64_t Offset) const { 3754 assert((Offset % MinABIStackAlignInBytes) == 0); 3755 3756 if ((Align - 1) & Offset) 3757 return llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8); 3758 3759 return 0; 3760 } 3761 3762 ABIArgInfo 3763 MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const { 3764 uint64_t OrigOffset = Offset; 3765 uint64_t TySize = getContext().getTypeSize(Ty); 3766 uint64_t Align = getContext().getTypeAlign(Ty) / 8; 3767 3768 Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes), 3769 (uint64_t)StackAlignInBytes); 3770 Offset = llvm::RoundUpToAlignment(Offset, Align); 3771 Offset += llvm::RoundUpToAlignment(TySize, Align * 8) / 8; 3772 3773 if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) { 3774 // Ignore empty aggregates. 3775 if (TySize == 0) 3776 return ABIArgInfo::getIgnore(); 3777 3778 // Records with non trivial destructors/constructors should not be passed 3779 // by value. 3780 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) { 3781 Offset = OrigOffset + MinABIStackAlignInBytes; 3782 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3783 } 3784 3785 // If we have reached here, aggregates are passed directly by coercing to 3786 // another structure type. Padding is inserted if the offset of the 3787 // aggregate is unaligned. 3788 return ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0, 3789 getPaddingType(Align, OrigOffset)); 3790 } 3791 3792 // Treat an enum type as its underlying type. 3793 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3794 Ty = EnumTy->getDecl()->getIntegerType(); 3795 3796 if (Ty->isPromotableIntegerType()) 3797 return ABIArgInfo::getExtend(); 3798 3799 return ABIArgInfo::getDirect(0, 0, getPaddingType(Align, OrigOffset)); 3800 } 3801 3802 llvm::Type* 3803 MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const { 3804 const RecordType *RT = RetTy->getAs<RecordType>(); 3805 SmallVector<llvm::Type*, 8> RTList; 3806 3807 if (RT && RT->isStructureOrClassType()) { 3808 const RecordDecl *RD = RT->getDecl(); 3809 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 3810 unsigned FieldCnt = Layout.getFieldCount(); 3811 3812 // N32/64 returns struct/classes in floating point registers if the 3813 // following conditions are met: 3814 // 1. The size of the struct/class is no larger than 128-bit. 3815 // 2. The struct/class has one or two fields all of which are floating 3816 // point types. 3817 // 3. The offset of the first field is zero (this follows what gcc does). 3818 // 3819 // Any other composite results are returned in integer registers. 3820 // 3821 if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) { 3822 RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end(); 3823 for (; b != e; ++b) { 3824 const BuiltinType *BT = b->getType()->getAs<BuiltinType>(); 3825 3826 if (!BT || !BT->isFloatingPoint()) 3827 break; 3828 3829 RTList.push_back(CGT.ConvertType(b->getType())); 3830 } 3831 3832 if (b == e) 3833 return llvm::StructType::get(getVMContext(), RTList, 3834 RD->hasAttr<PackedAttr>()); 3835 3836 RTList.clear(); 3837 } 3838 } 3839 3840 CoerceToIntArgs(Size, RTList); 3841 return llvm::StructType::get(getVMContext(), RTList); 3842 } 3843 3844 ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const { 3845 uint64_t Size = getContext().getTypeSize(RetTy); 3846 3847 if (RetTy->isVoidType() || Size == 0) 3848 return ABIArgInfo::getIgnore(); 3849 3850 if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) { 3851 if (Size <= 128) { 3852 if (RetTy->isAnyComplexType()) 3853 return ABIArgInfo::getDirect(); 3854 3855 // O32 returns integer vectors in registers. 3856 if (IsO32 && RetTy->isVectorType() && !RetTy->hasFloatingRepresentation()) 3857 return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size)); 3858 3859 if (!IsO32 && !isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy)) 3860 return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size)); 3861 } 3862 3863 return ABIArgInfo::getIndirect(0); 3864 } 3865 3866 // Treat an enum type as its underlying type. 3867 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 3868 RetTy = EnumTy->getDecl()->getIntegerType(); 3869 3870 return (RetTy->isPromotableIntegerType() ? 3871 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3872 } 3873 3874 void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const { 3875 ABIArgInfo &RetInfo = FI.getReturnInfo(); 3876 RetInfo = classifyReturnType(FI.getReturnType()); 3877 3878 // Check if a pointer to an aggregate is passed as a hidden argument. 3879 uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0; 3880 3881 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 3882 it != ie; ++it) 3883 it->info = classifyArgumentType(it->type, Offset); 3884 } 3885 3886 llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3887 CodeGenFunction &CGF) const { 3888 llvm::Type *BP = CGF.Int8PtrTy; 3889 llvm::Type *BPP = CGF.Int8PtrPtrTy; 3890 3891 CGBuilderTy &Builder = CGF.Builder; 3892 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 3893 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 3894 int64_t TypeAlign = getContext().getTypeAlign(Ty) / 8; 3895 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 3896 llvm::Value *AddrTyped; 3897 unsigned PtrWidth = getContext().getTargetInfo().getPointerWidth(0); 3898 llvm::IntegerType *IntTy = (PtrWidth == 32) ? CGF.Int32Ty : CGF.Int64Ty; 3899 3900 if (TypeAlign > MinABIStackAlignInBytes) { 3901 llvm::Value *AddrAsInt = CGF.Builder.CreatePtrToInt(Addr, IntTy); 3902 llvm::Value *Inc = llvm::ConstantInt::get(IntTy, TypeAlign - 1); 3903 llvm::Value *Mask = llvm::ConstantInt::get(IntTy, -TypeAlign); 3904 llvm::Value *Add = CGF.Builder.CreateAdd(AddrAsInt, Inc); 3905 llvm::Value *And = CGF.Builder.CreateAnd(Add, Mask); 3906 AddrTyped = CGF.Builder.CreateIntToPtr(And, PTy); 3907 } 3908 else 3909 AddrTyped = Builder.CreateBitCast(Addr, PTy); 3910 3911 llvm::Value *AlignedAddr = Builder.CreateBitCast(AddrTyped, BP); 3912 TypeAlign = std::max((unsigned)TypeAlign, MinABIStackAlignInBytes); 3913 uint64_t Offset = 3914 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, TypeAlign); 3915 llvm::Value *NextAddr = 3916 Builder.CreateGEP(AlignedAddr, llvm::ConstantInt::get(IntTy, Offset), 3917 "ap.next"); 3918 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 3919 3920 return AddrTyped; 3921 } 3922 3923 bool 3924 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3925 llvm::Value *Address) const { 3926 // This information comes from gcc's implementation, which seems to 3927 // as canonical as it gets. 3928 3929 // Everything on MIPS is 4 bytes. Double-precision FP registers 3930 // are aliased to pairs of single-precision FP registers. 3931 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 3932 3933 // 0-31 are the general purpose registers, $0 - $31. 3934 // 32-63 are the floating-point registers, $f0 - $f31. 3935 // 64 and 65 are the multiply/divide registers, $hi and $lo. 3936 // 66 is the (notional, I think) register for signal-handler return. 3937 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65); 3938 3939 // 67-74 are the floating-point status registers, $fcc0 - $fcc7. 3940 // They are one bit wide and ignored here. 3941 3942 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31. 3943 // (coprocessor 1 is the FP unit) 3944 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31. 3945 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31. 3946 // 176-181 are the DSP accumulator registers. 3947 AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181); 3948 return false; 3949 } 3950 3951 //===----------------------------------------------------------------------===// 3952 // TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults. 3953 // Currently subclassed only to implement custom OpenCL C function attribute 3954 // handling. 3955 //===----------------------------------------------------------------------===// 3956 3957 namespace { 3958 3959 class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo { 3960 public: 3961 TCETargetCodeGenInfo(CodeGenTypes &CGT) 3962 : DefaultTargetCodeGenInfo(CGT) {} 3963 3964 virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3965 CodeGen::CodeGenModule &M) const; 3966 }; 3967 3968 void TCETargetCodeGenInfo::SetTargetAttributes(const Decl *D, 3969 llvm::GlobalValue *GV, 3970 CodeGen::CodeGenModule &M) const { 3971 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 3972 if (!FD) return; 3973 3974 llvm::Function *F = cast<llvm::Function>(GV); 3975 3976 if (M.getLangOpts().OpenCL) { 3977 if (FD->hasAttr<OpenCLKernelAttr>()) { 3978 // OpenCL C Kernel functions are not subject to inlining 3979 F->addFnAttr(llvm::Attributes::NoInline); 3980 3981 if (FD->hasAttr<ReqdWorkGroupSizeAttr>()) { 3982 3983 // Convert the reqd_work_group_size() attributes to metadata. 3984 llvm::LLVMContext &Context = F->getContext(); 3985 llvm::NamedMDNode *OpenCLMetadata = 3986 M.getModule().getOrInsertNamedMetadata("opencl.kernel_wg_size_info"); 3987 3988 SmallVector<llvm::Value*, 5> Operands; 3989 Operands.push_back(F); 3990 3991 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty, 3992 llvm::APInt(32, 3993 FD->getAttr<ReqdWorkGroupSizeAttr>()->getXDim()))); 3994 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty, 3995 llvm::APInt(32, 3996 FD->getAttr<ReqdWorkGroupSizeAttr>()->getYDim()))); 3997 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty, 3998 llvm::APInt(32, 3999 FD->getAttr<ReqdWorkGroupSizeAttr>()->getZDim()))); 4000 4001 // Add a boolean constant operand for "required" (true) or "hint" (false) 4002 // for implementing the work_group_size_hint attr later. Currently 4003 // always true as the hint is not yet implemented. 4004 Operands.push_back(llvm::ConstantInt::getTrue(Context)); 4005 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands)); 4006 } 4007 } 4008 } 4009 } 4010 4011 } 4012 4013 //===----------------------------------------------------------------------===// 4014 // Hexagon ABI Implementation 4015 //===----------------------------------------------------------------------===// 4016 4017 namespace { 4018 4019 class HexagonABIInfo : public ABIInfo { 4020 4021 4022 public: 4023 HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 4024 4025 private: 4026 4027 ABIArgInfo classifyReturnType(QualType RetTy) const; 4028 ABIArgInfo classifyArgumentType(QualType RetTy) const; 4029 4030 virtual void computeInfo(CGFunctionInfo &FI) const; 4031 4032 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 4033 CodeGenFunction &CGF) const; 4034 }; 4035 4036 class HexagonTargetCodeGenInfo : public TargetCodeGenInfo { 4037 public: 4038 HexagonTargetCodeGenInfo(CodeGenTypes &CGT) 4039 :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {} 4040 4041 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 4042 return 29; 4043 } 4044 }; 4045 4046 } 4047 4048 void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const { 4049 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 4050 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 4051 it != ie; ++it) 4052 it->info = classifyArgumentType(it->type); 4053 } 4054 4055 ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const { 4056 if (!isAggregateTypeForABI(Ty)) { 4057 // Treat an enum type as its underlying type. 4058 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 4059 Ty = EnumTy->getDecl()->getIntegerType(); 4060 4061 return (Ty->isPromotableIntegerType() ? 4062 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 4063 } 4064 4065 // Ignore empty records. 4066 if (isEmptyRecord(getContext(), Ty, true)) 4067 return ABIArgInfo::getIgnore(); 4068 4069 // Structures with either a non-trivial destructor or a non-trivial 4070 // copy constructor are always indirect. 4071 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 4072 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 4073 4074 uint64_t Size = getContext().getTypeSize(Ty); 4075 if (Size > 64) 4076 return ABIArgInfo::getIndirect(0, /*ByVal=*/true); 4077 // Pass in the smallest viable integer type. 4078 else if (Size > 32) 4079 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext())); 4080 else if (Size > 16) 4081 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 4082 else if (Size > 8) 4083 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 4084 else 4085 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 4086 } 4087 4088 ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const { 4089 if (RetTy->isVoidType()) 4090 return ABIArgInfo::getIgnore(); 4091 4092 // Large vector types should be returned via memory. 4093 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64) 4094 return ABIArgInfo::getIndirect(0); 4095 4096 if (!isAggregateTypeForABI(RetTy)) { 4097 // Treat an enum type as its underlying type. 4098 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 4099 RetTy = EnumTy->getDecl()->getIntegerType(); 4100 4101 return (RetTy->isPromotableIntegerType() ? 4102 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 4103 } 4104 4105 // Structures with either a non-trivial destructor or a non-trivial 4106 // copy constructor are always indirect. 4107 if (isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy)) 4108 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 4109 4110 if (isEmptyRecord(getContext(), RetTy, true)) 4111 return ABIArgInfo::getIgnore(); 4112 4113 // Aggregates <= 8 bytes are returned in r0; other aggregates 4114 // are returned indirectly. 4115 uint64_t Size = getContext().getTypeSize(RetTy); 4116 if (Size <= 64) { 4117 // Return in the smallest viable integer type. 4118 if (Size <= 8) 4119 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 4120 if (Size <= 16) 4121 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 4122 if (Size <= 32) 4123 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 4124 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext())); 4125 } 4126 4127 return ABIArgInfo::getIndirect(0, /*ByVal=*/true); 4128 } 4129 4130 llvm::Value *HexagonABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 4131 CodeGenFunction &CGF) const { 4132 // FIXME: Need to handle alignment 4133 llvm::Type *BPP = CGF.Int8PtrPtrTy; 4134 4135 CGBuilderTy &Builder = CGF.Builder; 4136 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 4137 "ap"); 4138 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 4139 llvm::Type *PTy = 4140 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 4141 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 4142 4143 uint64_t Offset = 4144 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); 4145 llvm::Value *NextAddr = 4146 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 4147 "ap.next"); 4148 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 4149 4150 return AddrTyped; 4151 } 4152 4153 4154 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() { 4155 if (TheTargetCodeGenInfo) 4156 return *TheTargetCodeGenInfo; 4157 4158 const llvm::Triple &Triple = getContext().getTargetInfo().getTriple(); 4159 switch (Triple.getArch()) { 4160 default: 4161 return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types)); 4162 4163 case llvm::Triple::le32: 4164 return *(TheTargetCodeGenInfo = new PNaClTargetCodeGenInfo(Types)); 4165 case llvm::Triple::mips: 4166 case llvm::Triple::mipsel: 4167 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, true)); 4168 4169 case llvm::Triple::mips64: 4170 case llvm::Triple::mips64el: 4171 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, false)); 4172 4173 case llvm::Triple::arm: 4174 case llvm::Triple::thumb: 4175 { 4176 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS; 4177 if (strcmp(getContext().getTargetInfo().getABI(), "apcs-gnu") == 0) 4178 Kind = ARMABIInfo::APCS; 4179 else if (CodeGenOpts.FloatABI == "hard" || 4180 (CodeGenOpts.FloatABI != "soft" && Triple.getEnvironment()==llvm::Triple::GNUEABIHF)) 4181 Kind = ARMABIInfo::AAPCS_VFP; 4182 4183 switch (Triple.getOS()) { 4184 case llvm::Triple::NativeClient: 4185 return *(TheTargetCodeGenInfo = 4186 new NaClARMTargetCodeGenInfo(Types, Kind)); 4187 default: 4188 return *(TheTargetCodeGenInfo = 4189 new ARMTargetCodeGenInfo(Types, Kind)); 4190 } 4191 } 4192 4193 case llvm::Triple::ppc: 4194 return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types)); 4195 case llvm::Triple::ppc64: 4196 if (Triple.isOSBinFormatELF()) 4197 return *(TheTargetCodeGenInfo = new PPC64_SVR4_TargetCodeGenInfo(Types)); 4198 else 4199 return *(TheTargetCodeGenInfo = new PPC64TargetCodeGenInfo(Types)); 4200 4201 case llvm::Triple::nvptx: 4202 case llvm::Triple::nvptx64: 4203 return *(TheTargetCodeGenInfo = new NVPTXTargetCodeGenInfo(Types)); 4204 4205 case llvm::Triple::mblaze: 4206 return *(TheTargetCodeGenInfo = new MBlazeTargetCodeGenInfo(Types)); 4207 4208 case llvm::Triple::msp430: 4209 return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types)); 4210 4211 case llvm::Triple::tce: 4212 return *(TheTargetCodeGenInfo = new TCETargetCodeGenInfo(Types)); 4213 4214 case llvm::Triple::x86: { 4215 bool DisableMMX = strcmp(getContext().getTargetInfo().getABI(), "no-mmx") == 0; 4216 4217 if (Triple.isOSDarwin()) 4218 return *(TheTargetCodeGenInfo = 4219 new X86_32TargetCodeGenInfo(Types, true, true, DisableMMX, false, 4220 CodeGenOpts.NumRegisterParameters)); 4221 4222 switch (Triple.getOS()) { 4223 case llvm::Triple::Cygwin: 4224 case llvm::Triple::MinGW32: 4225 case llvm::Triple::AuroraUX: 4226 case llvm::Triple::DragonFly: 4227 case llvm::Triple::FreeBSD: 4228 case llvm::Triple::OpenBSD: 4229 case llvm::Triple::Bitrig: 4230 return *(TheTargetCodeGenInfo = 4231 new X86_32TargetCodeGenInfo(Types, false, true, DisableMMX, 4232 false, 4233 CodeGenOpts.NumRegisterParameters)); 4234 4235 case llvm::Triple::Win32: 4236 return *(TheTargetCodeGenInfo = 4237 new X86_32TargetCodeGenInfo(Types, false, true, DisableMMX, true, 4238 CodeGenOpts.NumRegisterParameters)); 4239 4240 default: 4241 return *(TheTargetCodeGenInfo = 4242 new X86_32TargetCodeGenInfo(Types, false, false, DisableMMX, 4243 false, 4244 CodeGenOpts.NumRegisterParameters)); 4245 } 4246 } 4247 4248 case llvm::Triple::x86_64: { 4249 bool HasAVX = strcmp(getContext().getTargetInfo().getABI(), "avx") == 0; 4250 4251 switch (Triple.getOS()) { 4252 case llvm::Triple::Win32: 4253 case llvm::Triple::MinGW32: 4254 case llvm::Triple::Cygwin: 4255 return *(TheTargetCodeGenInfo = new WinX86_64TargetCodeGenInfo(Types)); 4256 case llvm::Triple::NativeClient: 4257 return *(TheTargetCodeGenInfo = new NaClX86_64TargetCodeGenInfo(Types, HasAVX)); 4258 default: 4259 return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo(Types, 4260 HasAVX)); 4261 } 4262 } 4263 case llvm::Triple::hexagon: 4264 return *(TheTargetCodeGenInfo = new HexagonTargetCodeGenInfo(Types)); 4265 } 4266 } 4267