1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // These classes wrap the information about a call or function 11 // definition used to handle ABI compliancy. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "TargetInfo.h" 16 #include "ABIInfo.h" 17 #include "CodeGenFunction.h" 18 #include "clang/AST/RecordLayout.h" 19 #include "clang/Frontend/CodeGenOptions.h" 20 #include "llvm/Type.h" 21 #include "llvm/DataLayout.h" 22 #include "llvm/ADT/Triple.h" 23 #include "llvm/Support/raw_ostream.h" 24 using namespace clang; 25 using namespace CodeGen; 26 27 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, 28 llvm::Value *Array, 29 llvm::Value *Value, 30 unsigned FirstIndex, 31 unsigned LastIndex) { 32 // Alternatively, we could emit this as a loop in the source. 33 for (unsigned I = FirstIndex; I <= LastIndex; ++I) { 34 llvm::Value *Cell = Builder.CreateConstInBoundsGEP1_32(Array, I); 35 Builder.CreateStore(Value, Cell); 36 } 37 } 38 39 static bool isAggregateTypeForABI(QualType T) { 40 return CodeGenFunction::hasAggregateLLVMType(T) || 41 T->isMemberFunctionPointerType(); 42 } 43 44 ABIInfo::~ABIInfo() {} 45 46 ASTContext &ABIInfo::getContext() const { 47 return CGT.getContext(); 48 } 49 50 llvm::LLVMContext &ABIInfo::getVMContext() const { 51 return CGT.getLLVMContext(); 52 } 53 54 const llvm::DataLayout &ABIInfo::getDataLayout() const { 55 return CGT.getDataLayout(); 56 } 57 58 59 void ABIArgInfo::dump() const { 60 raw_ostream &OS = llvm::errs(); 61 OS << "(ABIArgInfo Kind="; 62 switch (TheKind) { 63 case Direct: 64 OS << "Direct Type="; 65 if (llvm::Type *Ty = getCoerceToType()) 66 Ty->print(OS); 67 else 68 OS << "null"; 69 break; 70 case Extend: 71 OS << "Extend"; 72 break; 73 case Ignore: 74 OS << "Ignore"; 75 break; 76 case Indirect: 77 OS << "Indirect Align=" << getIndirectAlign() 78 << " ByVal=" << getIndirectByVal() 79 << " Realign=" << getIndirectRealign(); 80 break; 81 case Expand: 82 OS << "Expand"; 83 break; 84 } 85 OS << ")\n"; 86 } 87 88 TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; } 89 90 // If someone can figure out a general rule for this, that would be great. 91 // It's probably just doomed to be platform-dependent, though. 92 unsigned TargetCodeGenInfo::getSizeOfUnwindException() const { 93 // Verified for: 94 // x86-64 FreeBSD, Linux, Darwin 95 // x86-32 FreeBSD, Linux, Darwin 96 // PowerPC Linux, Darwin 97 // ARM Darwin (*not* EABI) 98 return 32; 99 } 100 101 bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args, 102 const FunctionNoProtoType *fnType) const { 103 // The following conventions are known to require this to be false: 104 // x86_stdcall 105 // MIPS 106 // For everything else, we just prefer false unless we opt out. 107 return false; 108 } 109 110 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays); 111 112 /// isEmptyField - Return true iff a the field is "empty", that is it 113 /// is an unnamed bit-field or an (array of) empty record(s). 114 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD, 115 bool AllowArrays) { 116 if (FD->isUnnamedBitfield()) 117 return true; 118 119 QualType FT = FD->getType(); 120 121 // Constant arrays of empty records count as empty, strip them off. 122 // Constant arrays of zero length always count as empty. 123 if (AllowArrays) 124 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 125 if (AT->getSize() == 0) 126 return true; 127 FT = AT->getElementType(); 128 } 129 130 const RecordType *RT = FT->getAs<RecordType>(); 131 if (!RT) 132 return false; 133 134 // C++ record fields are never empty, at least in the Itanium ABI. 135 // 136 // FIXME: We should use a predicate for whether this behavior is true in the 137 // current ABI. 138 if (isa<CXXRecordDecl>(RT->getDecl())) 139 return false; 140 141 return isEmptyRecord(Context, FT, AllowArrays); 142 } 143 144 /// isEmptyRecord - Return true iff a structure contains only empty 145 /// fields. Note that a structure with a flexible array member is not 146 /// considered empty. 147 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) { 148 const RecordType *RT = T->getAs<RecordType>(); 149 if (!RT) 150 return 0; 151 const RecordDecl *RD = RT->getDecl(); 152 if (RD->hasFlexibleArrayMember()) 153 return false; 154 155 // If this is a C++ record, check the bases first. 156 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 157 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 158 e = CXXRD->bases_end(); i != e; ++i) 159 if (!isEmptyRecord(Context, i->getType(), true)) 160 return false; 161 162 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 163 i != e; ++i) 164 if (!isEmptyField(Context, *i, AllowArrays)) 165 return false; 166 return true; 167 } 168 169 /// hasNonTrivialDestructorOrCopyConstructor - Determine if a type has either 170 /// a non-trivial destructor or a non-trivial copy constructor. 171 static bool hasNonTrivialDestructorOrCopyConstructor(const RecordType *RT) { 172 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 173 if (!RD) 174 return false; 175 176 return !RD->hasTrivialDestructor() || !RD->hasTrivialCopyConstructor(); 177 } 178 179 /// isRecordWithNonTrivialDestructorOrCopyConstructor - Determine if a type is 180 /// a record type with either a non-trivial destructor or a non-trivial copy 181 /// constructor. 182 static bool isRecordWithNonTrivialDestructorOrCopyConstructor(QualType T) { 183 const RecordType *RT = T->getAs<RecordType>(); 184 if (!RT) 185 return false; 186 187 return hasNonTrivialDestructorOrCopyConstructor(RT); 188 } 189 190 /// isSingleElementStruct - Determine if a structure is a "single 191 /// element struct", i.e. it has exactly one non-empty field or 192 /// exactly one field which is itself a single element 193 /// struct. Structures with flexible array members are never 194 /// considered single element structs. 195 /// 196 /// \return The field declaration for the single non-empty field, if 197 /// it exists. 198 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) { 199 const RecordType *RT = T->getAsStructureType(); 200 if (!RT) 201 return 0; 202 203 const RecordDecl *RD = RT->getDecl(); 204 if (RD->hasFlexibleArrayMember()) 205 return 0; 206 207 const Type *Found = 0; 208 209 // If this is a C++ record, check the bases first. 210 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 211 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 212 e = CXXRD->bases_end(); i != e; ++i) { 213 // Ignore empty records. 214 if (isEmptyRecord(Context, i->getType(), true)) 215 continue; 216 217 // If we already found an element then this isn't a single-element struct. 218 if (Found) 219 return 0; 220 221 // If this is non-empty and not a single element struct, the composite 222 // cannot be a single element struct. 223 Found = isSingleElementStruct(i->getType(), Context); 224 if (!Found) 225 return 0; 226 } 227 } 228 229 // Check for single element. 230 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 231 i != e; ++i) { 232 const FieldDecl *FD = *i; 233 QualType FT = FD->getType(); 234 235 // Ignore empty fields. 236 if (isEmptyField(Context, FD, true)) 237 continue; 238 239 // If we already found an element then this isn't a single-element 240 // struct. 241 if (Found) 242 return 0; 243 244 // Treat single element arrays as the element. 245 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 246 if (AT->getSize().getZExtValue() != 1) 247 break; 248 FT = AT->getElementType(); 249 } 250 251 if (!isAggregateTypeForABI(FT)) { 252 Found = FT.getTypePtr(); 253 } else { 254 Found = isSingleElementStruct(FT, Context); 255 if (!Found) 256 return 0; 257 } 258 } 259 260 // We don't consider a struct a single-element struct if it has 261 // padding beyond the element type. 262 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T)) 263 return 0; 264 265 return Found; 266 } 267 268 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) { 269 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() && 270 !Ty->isAnyComplexType() && !Ty->isEnumeralType() && 271 !Ty->isBlockPointerType()) 272 return false; 273 274 uint64_t Size = Context.getTypeSize(Ty); 275 return Size == 32 || Size == 64; 276 } 277 278 /// canExpandIndirectArgument - Test whether an argument type which is to be 279 /// passed indirectly (on the stack) would have the equivalent layout if it was 280 /// expanded into separate arguments. If so, we prefer to do the latter to avoid 281 /// inhibiting optimizations. 282 /// 283 // FIXME: This predicate is missing many cases, currently it just follows 284 // llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We 285 // should probably make this smarter, or better yet make the LLVM backend 286 // capable of handling it. 287 static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) { 288 // We can only expand structure types. 289 const RecordType *RT = Ty->getAs<RecordType>(); 290 if (!RT) 291 return false; 292 293 // We can only expand (C) structures. 294 // 295 // FIXME: This needs to be generalized to handle classes as well. 296 const RecordDecl *RD = RT->getDecl(); 297 if (!RD->isStruct() || isa<CXXRecordDecl>(RD)) 298 return false; 299 300 uint64_t Size = 0; 301 302 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 303 i != e; ++i) { 304 const FieldDecl *FD = *i; 305 306 if (!is32Or64BitBasicType(FD->getType(), Context)) 307 return false; 308 309 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know 310 // how to expand them yet, and the predicate for telling if a bitfield still 311 // counts as "basic" is more complicated than what we were doing previously. 312 if (FD->isBitField()) 313 return false; 314 315 Size += Context.getTypeSize(FD->getType()); 316 } 317 318 // Make sure there are not any holes in the struct. 319 if (Size != Context.getTypeSize(Ty)) 320 return false; 321 322 return true; 323 } 324 325 namespace { 326 /// DefaultABIInfo - The default implementation for ABI specific 327 /// details. This implementation provides information which results in 328 /// self-consistent and sensible LLVM IR generation, but does not 329 /// conform to any particular ABI. 330 class DefaultABIInfo : public ABIInfo { 331 public: 332 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 333 334 ABIArgInfo classifyReturnType(QualType RetTy) const; 335 ABIArgInfo classifyArgumentType(QualType RetTy) const; 336 337 virtual void computeInfo(CGFunctionInfo &FI) const { 338 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 339 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 340 it != ie; ++it) 341 it->info = classifyArgumentType(it->type); 342 } 343 344 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 345 CodeGenFunction &CGF) const; 346 }; 347 348 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo { 349 public: 350 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 351 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 352 }; 353 354 llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 355 CodeGenFunction &CGF) const { 356 return 0; 357 } 358 359 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const { 360 if (isAggregateTypeForABI(Ty)) { 361 // Records with non trivial destructors/constructors should not be passed 362 // by value. 363 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 364 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 365 366 return ABIArgInfo::getIndirect(0); 367 } 368 369 // Treat an enum type as its underlying type. 370 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 371 Ty = EnumTy->getDecl()->getIntegerType(); 372 373 return (Ty->isPromotableIntegerType() ? 374 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 375 } 376 377 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const { 378 if (RetTy->isVoidType()) 379 return ABIArgInfo::getIgnore(); 380 381 if (isAggregateTypeForABI(RetTy)) 382 return ABIArgInfo::getIndirect(0); 383 384 // Treat an enum type as its underlying type. 385 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 386 RetTy = EnumTy->getDecl()->getIntegerType(); 387 388 return (RetTy->isPromotableIntegerType() ? 389 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 390 } 391 392 //===----------------------------------------------------------------------===// 393 // le32/PNaCl bitcode ABI Implementation 394 //===----------------------------------------------------------------------===// 395 396 class PNaClABIInfo : public ABIInfo { 397 public: 398 PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 399 400 ABIArgInfo classifyReturnType(QualType RetTy) const; 401 ABIArgInfo classifyArgumentType(QualType RetTy, unsigned &FreeRegs) const; 402 403 virtual void computeInfo(CGFunctionInfo &FI) const; 404 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 405 CodeGenFunction &CGF) const; 406 }; 407 408 class PNaClTargetCodeGenInfo : public TargetCodeGenInfo { 409 public: 410 PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 411 : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {} 412 }; 413 414 void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const { 415 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 416 417 unsigned FreeRegs = FI.getHasRegParm() ? FI.getRegParm() : 0; 418 419 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 420 it != ie; ++it) 421 it->info = classifyArgumentType(it->type, FreeRegs); 422 } 423 424 llvm::Value *PNaClABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 425 CodeGenFunction &CGF) const { 426 return 0; 427 } 428 429 ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty, 430 unsigned &FreeRegs) const { 431 if (isAggregateTypeForABI(Ty)) { 432 // Records with non trivial destructors/constructors should not be passed 433 // by value. 434 FreeRegs = 0; 435 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 436 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 437 438 return ABIArgInfo::getIndirect(0); 439 } 440 441 // Treat an enum type as its underlying type. 442 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 443 Ty = EnumTy->getDecl()->getIntegerType(); 444 445 ABIArgInfo BaseInfo = (Ty->isPromotableIntegerType() ? 446 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 447 448 // Regparm regs hold 32 bits. 449 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32; 450 if (SizeInRegs == 0) return BaseInfo; 451 if (SizeInRegs > FreeRegs) { 452 FreeRegs = 0; 453 return BaseInfo; 454 } 455 FreeRegs -= SizeInRegs; 456 return BaseInfo.isDirect() ? 457 ABIArgInfo::getDirectInReg(BaseInfo.getCoerceToType()) : 458 ABIArgInfo::getExtendInReg(BaseInfo.getCoerceToType()); 459 } 460 461 ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const { 462 if (RetTy->isVoidType()) 463 return ABIArgInfo::getIgnore(); 464 465 if (isAggregateTypeForABI(RetTy)) 466 return ABIArgInfo::getIndirect(0); 467 468 // Treat an enum type as its underlying type. 469 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 470 RetTy = EnumTy->getDecl()->getIntegerType(); 471 472 return (RetTy->isPromotableIntegerType() ? 473 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 474 } 475 476 /// UseX86_MMXType - Return true if this is an MMX type that should use the 477 /// special x86_mmx type. 478 bool UseX86_MMXType(llvm::Type *IRType) { 479 // If the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>, use the 480 // special x86_mmx type. 481 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 && 482 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() && 483 IRType->getScalarSizeInBits() != 64; 484 } 485 486 static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 487 StringRef Constraint, 488 llvm::Type* Ty) { 489 if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy()) 490 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext()); 491 return Ty; 492 } 493 494 //===----------------------------------------------------------------------===// 495 // X86-32 ABI Implementation 496 //===----------------------------------------------------------------------===// 497 498 /// X86_32ABIInfo - The X86-32 ABI information. 499 class X86_32ABIInfo : public ABIInfo { 500 enum Class { 501 Integer, 502 Float 503 }; 504 505 static const unsigned MinABIStackAlignInBytes = 4; 506 507 bool IsDarwinVectorABI; 508 bool IsSmallStructInRegABI; 509 bool IsMMXDisabled; 510 bool IsWin32FloatStructABI; 511 unsigned DefaultNumRegisterParameters; 512 513 static bool isRegisterSize(unsigned Size) { 514 return (Size == 8 || Size == 16 || Size == 32 || Size == 64); 515 } 516 517 static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context, 518 unsigned callingConvention); 519 520 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 521 /// such that the argument will be passed in memory. 522 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, 523 unsigned &FreeRegs) const; 524 525 /// \brief Return the alignment to use for the given type on the stack. 526 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const; 527 528 Class classify(QualType Ty) const; 529 ABIArgInfo classifyReturnType(QualType RetTy, 530 unsigned callingConvention) const; 531 ABIArgInfo classifyArgumentType(QualType RetTy, unsigned &FreeRegs) const; 532 bool shouldUseInReg(QualType Ty, unsigned &FreeRegs) const; 533 534 public: 535 536 virtual void computeInfo(CGFunctionInfo &FI) const; 537 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 538 CodeGenFunction &CGF) const; 539 540 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool m, bool w, 541 unsigned r) 542 : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p), 543 IsMMXDisabled(m), IsWin32FloatStructABI(w), 544 DefaultNumRegisterParameters(r) {} 545 }; 546 547 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo { 548 public: 549 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, 550 bool d, bool p, bool m, bool w, unsigned r) 551 :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, m, w, r)) {} 552 553 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 554 CodeGen::CodeGenModule &CGM) const; 555 556 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 557 // Darwin uses different dwarf register numbers for EH. 558 if (CGM.isTargetDarwin()) return 5; 559 560 return 4; 561 } 562 563 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 564 llvm::Value *Address) const; 565 566 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 567 StringRef Constraint, 568 llvm::Type* Ty) const { 569 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 570 } 571 572 }; 573 574 } 575 576 /// shouldReturnTypeInRegister - Determine if the given type should be 577 /// passed in a register (for the Darwin ABI). 578 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty, 579 ASTContext &Context, 580 unsigned callingConvention) { 581 uint64_t Size = Context.getTypeSize(Ty); 582 583 // Type must be register sized. 584 if (!isRegisterSize(Size)) 585 return false; 586 587 if (Ty->isVectorType()) { 588 // 64- and 128- bit vectors inside structures are not returned in 589 // registers. 590 if (Size == 64 || Size == 128) 591 return false; 592 593 return true; 594 } 595 596 // If this is a builtin, pointer, enum, complex type, member pointer, or 597 // member function pointer it is ok. 598 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() || 599 Ty->isAnyComplexType() || Ty->isEnumeralType() || 600 Ty->isBlockPointerType() || Ty->isMemberPointerType()) 601 return true; 602 603 // Arrays are treated like records. 604 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) 605 return shouldReturnTypeInRegister(AT->getElementType(), Context, 606 callingConvention); 607 608 // Otherwise, it must be a record type. 609 const RecordType *RT = Ty->getAs<RecordType>(); 610 if (!RT) return false; 611 612 // FIXME: Traverse bases here too. 613 614 // For thiscall conventions, structures will never be returned in 615 // a register. This is for compatibility with the MSVC ABI 616 if (callingConvention == llvm::CallingConv::X86_ThisCall && 617 RT->isStructureType()) { 618 return false; 619 } 620 621 // Structure types are passed in register if all fields would be 622 // passed in a register. 623 for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(), 624 e = RT->getDecl()->field_end(); i != e; ++i) { 625 const FieldDecl *FD = *i; 626 627 // Empty fields are ignored. 628 if (isEmptyField(Context, FD, true)) 629 continue; 630 631 // Check fields recursively. 632 if (!shouldReturnTypeInRegister(FD->getType(), Context, 633 callingConvention)) 634 return false; 635 } 636 return true; 637 } 638 639 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, 640 unsigned callingConvention) const { 641 if (RetTy->isVoidType()) 642 return ABIArgInfo::getIgnore(); 643 644 if (const VectorType *VT = RetTy->getAs<VectorType>()) { 645 // On Darwin, some vectors are returned in registers. 646 if (IsDarwinVectorABI) { 647 uint64_t Size = getContext().getTypeSize(RetTy); 648 649 // 128-bit vectors are a special case; they are returned in 650 // registers and we need to make sure to pick a type the LLVM 651 // backend will like. 652 if (Size == 128) 653 return ABIArgInfo::getDirect(llvm::VectorType::get( 654 llvm::Type::getInt64Ty(getVMContext()), 2)); 655 656 // Always return in register if it fits in a general purpose 657 // register, or if it is 64 bits and has a single element. 658 if ((Size == 8 || Size == 16 || Size == 32) || 659 (Size == 64 && VT->getNumElements() == 1)) 660 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 661 Size)); 662 663 return ABIArgInfo::getIndirect(0); 664 } 665 666 return ABIArgInfo::getDirect(); 667 } 668 669 if (isAggregateTypeForABI(RetTy)) { 670 if (const RecordType *RT = RetTy->getAs<RecordType>()) { 671 // Structures with either a non-trivial destructor or a non-trivial 672 // copy constructor are always indirect. 673 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 674 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 675 676 // Structures with flexible arrays are always indirect. 677 if (RT->getDecl()->hasFlexibleArrayMember()) 678 return ABIArgInfo::getIndirect(0); 679 } 680 681 // If specified, structs and unions are always indirect. 682 if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType()) 683 return ABIArgInfo::getIndirect(0); 684 685 // Small structures which are register sized are generally returned 686 // in a register. 687 if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, getContext(), 688 callingConvention)) { 689 uint64_t Size = getContext().getTypeSize(RetTy); 690 691 // As a special-case, if the struct is a "single-element" struct, and 692 // the field is of type "float" or "double", return it in a 693 // floating-point register. (MSVC does not apply this special case.) 694 // We apply a similar transformation for pointer types to improve the 695 // quality of the generated IR. 696 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) 697 if ((!IsWin32FloatStructABI && SeltTy->isRealFloatingType()) 698 || SeltTy->hasPointerRepresentation()) 699 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 700 701 // FIXME: We should be able to narrow this integer in cases with dead 702 // padding. 703 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size)); 704 } 705 706 return ABIArgInfo::getIndirect(0); 707 } 708 709 // Treat an enum type as its underlying type. 710 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 711 RetTy = EnumTy->getDecl()->getIntegerType(); 712 713 return (RetTy->isPromotableIntegerType() ? 714 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 715 } 716 717 static bool isSSEVectorType(ASTContext &Context, QualType Ty) { 718 return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128; 719 } 720 721 static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) { 722 const RecordType *RT = Ty->getAs<RecordType>(); 723 if (!RT) 724 return 0; 725 const RecordDecl *RD = RT->getDecl(); 726 727 // If this is a C++ record, check the bases first. 728 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 729 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 730 e = CXXRD->bases_end(); i != e; ++i) 731 if (!isRecordWithSSEVectorType(Context, i->getType())) 732 return false; 733 734 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 735 i != e; ++i) { 736 QualType FT = i->getType(); 737 738 if (isSSEVectorType(Context, FT)) 739 return true; 740 741 if (isRecordWithSSEVectorType(Context, FT)) 742 return true; 743 } 744 745 return false; 746 } 747 748 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty, 749 unsigned Align) const { 750 // Otherwise, if the alignment is less than or equal to the minimum ABI 751 // alignment, just use the default; the backend will handle this. 752 if (Align <= MinABIStackAlignInBytes) 753 return 0; // Use default alignment. 754 755 // On non-Darwin, the stack type alignment is always 4. 756 if (!IsDarwinVectorABI) { 757 // Set explicit alignment, since we may need to realign the top. 758 return MinABIStackAlignInBytes; 759 } 760 761 // Otherwise, if the type contains an SSE vector type, the alignment is 16. 762 if (Align >= 16 && (isSSEVectorType(getContext(), Ty) || 763 isRecordWithSSEVectorType(getContext(), Ty))) 764 return 16; 765 766 return MinABIStackAlignInBytes; 767 } 768 769 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal, 770 unsigned &FreeRegs) const { 771 if (!ByVal) { 772 if (FreeRegs) { 773 --FreeRegs; // Non byval indirects just use one pointer. 774 return ABIArgInfo::getIndirectInReg(0, false); 775 } 776 return ABIArgInfo::getIndirect(0, false); 777 } 778 779 // Compute the byval alignment. 780 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; 781 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign); 782 if (StackAlign == 0) 783 return ABIArgInfo::getIndirect(4); 784 785 // If the stack alignment is less than the type alignment, realign the 786 // argument. 787 if (StackAlign < TypeAlign) 788 return ABIArgInfo::getIndirect(StackAlign, /*ByVal=*/true, 789 /*Realign=*/true); 790 791 return ABIArgInfo::getIndirect(StackAlign); 792 } 793 794 X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const { 795 const Type *T = isSingleElementStruct(Ty, getContext()); 796 if (!T) 797 T = Ty.getTypePtr(); 798 799 if (const BuiltinType *BT = T->getAs<BuiltinType>()) { 800 BuiltinType::Kind K = BT->getKind(); 801 if (K == BuiltinType::Float || K == BuiltinType::Double) 802 return Float; 803 } 804 return Integer; 805 } 806 807 bool X86_32ABIInfo::shouldUseInReg(QualType Ty, unsigned &FreeRegs) const { 808 Class C = classify(Ty); 809 if (C == Float) 810 return false; 811 812 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32; 813 if (SizeInRegs > FreeRegs) { 814 FreeRegs = 0; 815 return false; 816 } 817 818 FreeRegs -= SizeInRegs; 819 return true; 820 } 821 822 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, 823 unsigned &FreeRegs) const { 824 // FIXME: Set alignment on indirect arguments. 825 if (isAggregateTypeForABI(Ty)) { 826 // Structures with flexible arrays are always indirect. 827 if (const RecordType *RT = Ty->getAs<RecordType>()) { 828 // Structures with either a non-trivial destructor or a non-trivial 829 // copy constructor are always indirect. 830 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 831 return getIndirectResult(Ty, false, FreeRegs); 832 833 if (RT->getDecl()->hasFlexibleArrayMember()) 834 return getIndirectResult(Ty, true, FreeRegs); 835 } 836 837 // Ignore empty structs/unions. 838 if (isEmptyRecord(getContext(), Ty, true)) 839 return ABIArgInfo::getIgnore(); 840 841 if (shouldUseInReg(Ty, FreeRegs)) { 842 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32; 843 llvm::LLVMContext &LLVMContext = getVMContext(); 844 llvm::Type *Int32 = llvm::Type::getInt32Ty(LLVMContext); 845 SmallVector<llvm::Type*, 3> Elements; 846 for (unsigned I = 0; I < SizeInRegs; ++I) 847 Elements.push_back(Int32); 848 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements); 849 return ABIArgInfo::getDirectInReg(Result); 850 } 851 852 // Expand small (<= 128-bit) record types when we know that the stack layout 853 // of those arguments will match the struct. This is important because the 854 // LLVM backend isn't smart enough to remove byval, which inhibits many 855 // optimizations. 856 if (getContext().getTypeSize(Ty) <= 4*32 && 857 canExpandIndirectArgument(Ty, getContext())) 858 return ABIArgInfo::getExpand(); 859 860 return getIndirectResult(Ty, true, FreeRegs); 861 } 862 863 if (const VectorType *VT = Ty->getAs<VectorType>()) { 864 // On Darwin, some vectors are passed in memory, we handle this by passing 865 // it as an i8/i16/i32/i64. 866 if (IsDarwinVectorABI) { 867 uint64_t Size = getContext().getTypeSize(Ty); 868 if ((Size == 8 || Size == 16 || Size == 32) || 869 (Size == 64 && VT->getNumElements() == 1)) 870 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 871 Size)); 872 } 873 874 llvm::Type *IRType = CGT.ConvertType(Ty); 875 if (UseX86_MMXType(IRType)) { 876 if (IsMMXDisabled) 877 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 878 64)); 879 ABIArgInfo AAI = ABIArgInfo::getDirect(IRType); 880 AAI.setCoerceToType(llvm::Type::getX86_MMXTy(getVMContext())); 881 return AAI; 882 } 883 884 return ABIArgInfo::getDirect(); 885 } 886 887 888 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 889 Ty = EnumTy->getDecl()->getIntegerType(); 890 891 bool InReg = shouldUseInReg(Ty, FreeRegs); 892 893 if (Ty->isPromotableIntegerType()) { 894 if (InReg) 895 return ABIArgInfo::getExtendInReg(); 896 return ABIArgInfo::getExtend(); 897 } 898 if (InReg) 899 return ABIArgInfo::getDirectInReg(); 900 return ABIArgInfo::getDirect(); 901 } 902 903 void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const { 904 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), 905 FI.getCallingConvention()); 906 907 unsigned FreeRegs = FI.getHasRegParm() ? FI.getRegParm() : 908 DefaultNumRegisterParameters; 909 910 // If the return value is indirect, then the hidden argument is consuming one 911 // integer register. 912 if (FI.getReturnInfo().isIndirect() && FreeRegs) { 913 --FreeRegs; 914 ABIArgInfo &Old = FI.getReturnInfo(); 915 Old = ABIArgInfo::getIndirectInReg(Old.getIndirectAlign(), 916 Old.getIndirectByVal(), 917 Old.getIndirectRealign()); 918 } 919 920 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 921 it != ie; ++it) 922 it->info = classifyArgumentType(it->type, FreeRegs); 923 } 924 925 llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 926 CodeGenFunction &CGF) const { 927 llvm::Type *BPP = CGF.Int8PtrPtrTy; 928 929 CGBuilderTy &Builder = CGF.Builder; 930 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 931 "ap"); 932 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 933 934 // Compute if the address needs to be aligned 935 unsigned Align = CGF.getContext().getTypeAlignInChars(Ty).getQuantity(); 936 Align = getTypeStackAlignInBytes(Ty, Align); 937 Align = std::max(Align, 4U); 938 if (Align > 4) { 939 // addr = (addr + align - 1) & -align; 940 llvm::Value *Offset = 941 llvm::ConstantInt::get(CGF.Int32Ty, Align - 1); 942 Addr = CGF.Builder.CreateGEP(Addr, Offset); 943 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(Addr, 944 CGF.Int32Ty); 945 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -Align); 946 Addr = CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), 947 Addr->getType(), 948 "ap.cur.aligned"); 949 } 950 951 llvm::Type *PTy = 952 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 953 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 954 955 uint64_t Offset = 956 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, Align); 957 llvm::Value *NextAddr = 958 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 959 "ap.next"); 960 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 961 962 return AddrTyped; 963 } 964 965 void X86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D, 966 llvm::GlobalValue *GV, 967 CodeGen::CodeGenModule &CGM) const { 968 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 969 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { 970 // Get the LLVM function. 971 llvm::Function *Fn = cast<llvm::Function>(GV); 972 973 // Now add the 'alignstack' attribute with a value of 16. 974 llvm::AttrBuilder B; 975 B.addStackAlignmentAttr(16); 976 Fn->addAttribute(llvm::AttrListPtr::FunctionIndex, 977 llvm::Attributes::get(CGM.getLLVMContext(), B)); 978 } 979 } 980 } 981 982 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable( 983 CodeGen::CodeGenFunction &CGF, 984 llvm::Value *Address) const { 985 CodeGen::CGBuilderTy &Builder = CGF.Builder; 986 987 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 988 989 // 0-7 are the eight integer registers; the order is different 990 // on Darwin (for EH), but the range is the same. 991 // 8 is %eip. 992 AssignToArrayRange(Builder, Address, Four8, 0, 8); 993 994 if (CGF.CGM.isTargetDarwin()) { 995 // 12-16 are st(0..4). Not sure why we stop at 4. 996 // These have size 16, which is sizeof(long double) on 997 // platforms with 8-byte alignment for that type. 998 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16); 999 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16); 1000 1001 } else { 1002 // 9 is %eflags, which doesn't get a size on Darwin for some 1003 // reason. 1004 Builder.CreateStore(Four8, Builder.CreateConstInBoundsGEP1_32(Address, 9)); 1005 1006 // 11-16 are st(0..5). Not sure why we stop at 5. 1007 // These have size 12, which is sizeof(long double) on 1008 // platforms with 4-byte alignment for that type. 1009 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12); 1010 AssignToArrayRange(Builder, Address, Twelve8, 11, 16); 1011 } 1012 1013 return false; 1014 } 1015 1016 //===----------------------------------------------------------------------===// 1017 // X86-64 ABI Implementation 1018 //===----------------------------------------------------------------------===// 1019 1020 1021 namespace { 1022 /// X86_64ABIInfo - The X86_64 ABI information. 1023 class X86_64ABIInfo : public ABIInfo { 1024 enum Class { 1025 Integer = 0, 1026 SSE, 1027 SSEUp, 1028 X87, 1029 X87Up, 1030 ComplexX87, 1031 NoClass, 1032 Memory 1033 }; 1034 1035 /// merge - Implement the X86_64 ABI merging algorithm. 1036 /// 1037 /// Merge an accumulating classification \arg Accum with a field 1038 /// classification \arg Field. 1039 /// 1040 /// \param Accum - The accumulating classification. This should 1041 /// always be either NoClass or the result of a previous merge 1042 /// call. In addition, this should never be Memory (the caller 1043 /// should just return Memory for the aggregate). 1044 static Class merge(Class Accum, Class Field); 1045 1046 /// postMerge - Implement the X86_64 ABI post merging algorithm. 1047 /// 1048 /// Post merger cleanup, reduces a malformed Hi and Lo pair to 1049 /// final MEMORY or SSE classes when necessary. 1050 /// 1051 /// \param AggregateSize - The size of the current aggregate in 1052 /// the classification process. 1053 /// 1054 /// \param Lo - The classification for the parts of the type 1055 /// residing in the low word of the containing object. 1056 /// 1057 /// \param Hi - The classification for the parts of the type 1058 /// residing in the higher words of the containing object. 1059 /// 1060 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const; 1061 1062 /// classify - Determine the x86_64 register classes in which the 1063 /// given type T should be passed. 1064 /// 1065 /// \param Lo - The classification for the parts of the type 1066 /// residing in the low word of the containing object. 1067 /// 1068 /// \param Hi - The classification for the parts of the type 1069 /// residing in the high word of the containing object. 1070 /// 1071 /// \param OffsetBase - The bit offset of this type in the 1072 /// containing object. Some parameters are classified different 1073 /// depending on whether they straddle an eightbyte boundary. 1074 /// 1075 /// If a word is unused its result will be NoClass; if a type should 1076 /// be passed in Memory then at least the classification of \arg Lo 1077 /// will be Memory. 1078 /// 1079 /// The \arg Lo class will be NoClass iff the argument is ignored. 1080 /// 1081 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will 1082 /// also be ComplexX87. 1083 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi) const; 1084 1085 llvm::Type *GetByteVectorType(QualType Ty) const; 1086 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType, 1087 unsigned IROffset, QualType SourceTy, 1088 unsigned SourceOffset) const; 1089 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType, 1090 unsigned IROffset, QualType SourceTy, 1091 unsigned SourceOffset) const; 1092 1093 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 1094 /// such that the argument will be returned in memory. 1095 ABIArgInfo getIndirectReturnResult(QualType Ty) const; 1096 1097 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 1098 /// such that the argument will be passed in memory. 1099 /// 1100 /// \param freeIntRegs - The number of free integer registers remaining 1101 /// available. 1102 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const; 1103 1104 ABIArgInfo classifyReturnType(QualType RetTy) const; 1105 1106 ABIArgInfo classifyArgumentType(QualType Ty, 1107 unsigned freeIntRegs, 1108 unsigned &neededInt, 1109 unsigned &neededSSE) const; 1110 1111 bool IsIllegalVectorType(QualType Ty) const; 1112 1113 /// The 0.98 ABI revision clarified a lot of ambiguities, 1114 /// unfortunately in ways that were not always consistent with 1115 /// certain previous compilers. In particular, platforms which 1116 /// required strict binary compatibility with older versions of GCC 1117 /// may need to exempt themselves. 1118 bool honorsRevision0_98() const { 1119 return !getContext().getTargetInfo().getTriple().isOSDarwin(); 1120 } 1121 1122 bool HasAVX; 1123 // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on 1124 // 64-bit hardware. 1125 bool Has64BitPointers; 1126 1127 public: 1128 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool hasavx) : 1129 ABIInfo(CGT), HasAVX(hasavx), 1130 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) { 1131 } 1132 1133 bool isPassedUsingAVXType(QualType type) const { 1134 unsigned neededInt, neededSSE; 1135 // The freeIntRegs argument doesn't matter here. 1136 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE); 1137 if (info.isDirect()) { 1138 llvm::Type *ty = info.getCoerceToType(); 1139 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty)) 1140 return (vectorTy->getBitWidth() > 128); 1141 } 1142 return false; 1143 } 1144 1145 virtual void computeInfo(CGFunctionInfo &FI) const; 1146 1147 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1148 CodeGenFunction &CGF) const; 1149 }; 1150 1151 /// WinX86_64ABIInfo - The Windows X86_64 ABI information. 1152 class WinX86_64ABIInfo : public ABIInfo { 1153 1154 ABIArgInfo classify(QualType Ty) const; 1155 1156 public: 1157 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 1158 1159 virtual void computeInfo(CGFunctionInfo &FI) const; 1160 1161 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1162 CodeGenFunction &CGF) const; 1163 }; 1164 1165 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo { 1166 public: 1167 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) 1168 : TargetCodeGenInfo(new X86_64ABIInfo(CGT, HasAVX)) {} 1169 1170 const X86_64ABIInfo &getABIInfo() const { 1171 return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo()); 1172 } 1173 1174 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 1175 return 7; 1176 } 1177 1178 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 1179 llvm::Value *Address) const { 1180 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 1181 1182 // 0-15 are the 16 integer registers. 1183 // 16 is %rip. 1184 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 1185 return false; 1186 } 1187 1188 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 1189 StringRef Constraint, 1190 llvm::Type* Ty) const { 1191 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 1192 } 1193 1194 bool isNoProtoCallVariadic(const CallArgList &args, 1195 const FunctionNoProtoType *fnType) const { 1196 // The default CC on x86-64 sets %al to the number of SSA 1197 // registers used, and GCC sets this when calling an unprototyped 1198 // function, so we override the default behavior. However, don't do 1199 // that when AVX types are involved: the ABI explicitly states it is 1200 // undefined, and it doesn't work in practice because of how the ABI 1201 // defines varargs anyway. 1202 if (fnType->getCallConv() == CC_Default || fnType->getCallConv() == CC_C) { 1203 bool HasAVXType = false; 1204 for (CallArgList::const_iterator 1205 it = args.begin(), ie = args.end(); it != ie; ++it) { 1206 if (getABIInfo().isPassedUsingAVXType(it->Ty)) { 1207 HasAVXType = true; 1208 break; 1209 } 1210 } 1211 1212 if (!HasAVXType) 1213 return true; 1214 } 1215 1216 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType); 1217 } 1218 1219 }; 1220 1221 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo { 1222 public: 1223 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 1224 : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {} 1225 1226 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 1227 return 7; 1228 } 1229 1230 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 1231 llvm::Value *Address) const { 1232 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 1233 1234 // 0-15 are the 16 integer registers. 1235 // 16 is %rip. 1236 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 1237 return false; 1238 } 1239 }; 1240 1241 } 1242 1243 void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo, 1244 Class &Hi) const { 1245 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done: 1246 // 1247 // (a) If one of the classes is Memory, the whole argument is passed in 1248 // memory. 1249 // 1250 // (b) If X87UP is not preceded by X87, the whole argument is passed in 1251 // memory. 1252 // 1253 // (c) If the size of the aggregate exceeds two eightbytes and the first 1254 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole 1255 // argument is passed in memory. NOTE: This is necessary to keep the 1256 // ABI working for processors that don't support the __m256 type. 1257 // 1258 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE. 1259 // 1260 // Some of these are enforced by the merging logic. Others can arise 1261 // only with unions; for example: 1262 // union { _Complex double; unsigned; } 1263 // 1264 // Note that clauses (b) and (c) were added in 0.98. 1265 // 1266 if (Hi == Memory) 1267 Lo = Memory; 1268 if (Hi == X87Up && Lo != X87 && honorsRevision0_98()) 1269 Lo = Memory; 1270 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp)) 1271 Lo = Memory; 1272 if (Hi == SSEUp && Lo != SSE) 1273 Hi = SSE; 1274 } 1275 1276 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) { 1277 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is 1278 // classified recursively so that always two fields are 1279 // considered. The resulting class is calculated according to 1280 // the classes of the fields in the eightbyte: 1281 // 1282 // (a) If both classes are equal, this is the resulting class. 1283 // 1284 // (b) If one of the classes is NO_CLASS, the resulting class is 1285 // the other class. 1286 // 1287 // (c) If one of the classes is MEMORY, the result is the MEMORY 1288 // class. 1289 // 1290 // (d) If one of the classes is INTEGER, the result is the 1291 // INTEGER. 1292 // 1293 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class, 1294 // MEMORY is used as class. 1295 // 1296 // (f) Otherwise class SSE is used. 1297 1298 // Accum should never be memory (we should have returned) or 1299 // ComplexX87 (because this cannot be passed in a structure). 1300 assert((Accum != Memory && Accum != ComplexX87) && 1301 "Invalid accumulated classification during merge."); 1302 if (Accum == Field || Field == NoClass) 1303 return Accum; 1304 if (Field == Memory) 1305 return Memory; 1306 if (Accum == NoClass) 1307 return Field; 1308 if (Accum == Integer || Field == Integer) 1309 return Integer; 1310 if (Field == X87 || Field == X87Up || Field == ComplexX87 || 1311 Accum == X87 || Accum == X87Up) 1312 return Memory; 1313 return SSE; 1314 } 1315 1316 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, 1317 Class &Lo, Class &Hi) const { 1318 // FIXME: This code can be simplified by introducing a simple value class for 1319 // Class pairs with appropriate constructor methods for the various 1320 // situations. 1321 1322 // FIXME: Some of the split computations are wrong; unaligned vectors 1323 // shouldn't be passed in registers for example, so there is no chance they 1324 // can straddle an eightbyte. Verify & simplify. 1325 1326 Lo = Hi = NoClass; 1327 1328 Class &Current = OffsetBase < 64 ? Lo : Hi; 1329 Current = Memory; 1330 1331 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 1332 BuiltinType::Kind k = BT->getKind(); 1333 1334 if (k == BuiltinType::Void) { 1335 Current = NoClass; 1336 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) { 1337 Lo = Integer; 1338 Hi = Integer; 1339 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) { 1340 Current = Integer; 1341 } else if ((k == BuiltinType::Float || k == BuiltinType::Double) || 1342 (k == BuiltinType::LongDouble && 1343 getContext().getTargetInfo().getTriple().getOS() == 1344 llvm::Triple::NativeClient)) { 1345 Current = SSE; 1346 } else if (k == BuiltinType::LongDouble) { 1347 Lo = X87; 1348 Hi = X87Up; 1349 } 1350 // FIXME: _Decimal32 and _Decimal64 are SSE. 1351 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). 1352 return; 1353 } 1354 1355 if (const EnumType *ET = Ty->getAs<EnumType>()) { 1356 // Classify the underlying integer type. 1357 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi); 1358 return; 1359 } 1360 1361 if (Ty->hasPointerRepresentation()) { 1362 Current = Integer; 1363 return; 1364 } 1365 1366 if (Ty->isMemberPointerType()) { 1367 if (Ty->isMemberFunctionPointerType() && Has64BitPointers) 1368 Lo = Hi = Integer; 1369 else 1370 Current = Integer; 1371 return; 1372 } 1373 1374 if (const VectorType *VT = Ty->getAs<VectorType>()) { 1375 uint64_t Size = getContext().getTypeSize(VT); 1376 if (Size == 32) { 1377 // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x 1378 // float> as integer. 1379 Current = Integer; 1380 1381 // If this type crosses an eightbyte boundary, it should be 1382 // split. 1383 uint64_t EB_Real = (OffsetBase) / 64; 1384 uint64_t EB_Imag = (OffsetBase + Size - 1) / 64; 1385 if (EB_Real != EB_Imag) 1386 Hi = Lo; 1387 } else if (Size == 64) { 1388 // gcc passes <1 x double> in memory. :( 1389 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) 1390 return; 1391 1392 // gcc passes <1 x long long> as INTEGER. 1393 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong) || 1394 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULongLong) || 1395 VT->getElementType()->isSpecificBuiltinType(BuiltinType::Long) || 1396 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULong)) 1397 Current = Integer; 1398 else 1399 Current = SSE; 1400 1401 // If this type crosses an eightbyte boundary, it should be 1402 // split. 1403 if (OffsetBase && OffsetBase != 64) 1404 Hi = Lo; 1405 } else if (Size == 128 || (HasAVX && Size == 256)) { 1406 // Arguments of 256-bits are split into four eightbyte chunks. The 1407 // least significant one belongs to class SSE and all the others to class 1408 // SSEUP. The original Lo and Hi design considers that types can't be 1409 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense. 1410 // This design isn't correct for 256-bits, but since there're no cases 1411 // where the upper parts would need to be inspected, avoid adding 1412 // complexity and just consider Hi to match the 64-256 part. 1413 Lo = SSE; 1414 Hi = SSEUp; 1415 } 1416 return; 1417 } 1418 1419 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 1420 QualType ET = getContext().getCanonicalType(CT->getElementType()); 1421 1422 uint64_t Size = getContext().getTypeSize(Ty); 1423 if (ET->isIntegralOrEnumerationType()) { 1424 if (Size <= 64) 1425 Current = Integer; 1426 else if (Size <= 128) 1427 Lo = Hi = Integer; 1428 } else if (ET == getContext().FloatTy) 1429 Current = SSE; 1430 else if (ET == getContext().DoubleTy || 1431 (ET == getContext().LongDoubleTy && 1432 getContext().getTargetInfo().getTriple().getOS() == 1433 llvm::Triple::NativeClient)) 1434 Lo = Hi = SSE; 1435 else if (ET == getContext().LongDoubleTy) 1436 Current = ComplexX87; 1437 1438 // If this complex type crosses an eightbyte boundary then it 1439 // should be split. 1440 uint64_t EB_Real = (OffsetBase) / 64; 1441 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64; 1442 if (Hi == NoClass && EB_Real != EB_Imag) 1443 Hi = Lo; 1444 1445 return; 1446 } 1447 1448 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 1449 // Arrays are treated like structures. 1450 1451 uint64_t Size = getContext().getTypeSize(Ty); 1452 1453 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 1454 // than four eightbytes, ..., it has class MEMORY. 1455 if (Size > 256) 1456 return; 1457 1458 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned 1459 // fields, it has class MEMORY. 1460 // 1461 // Only need to check alignment of array base. 1462 if (OffsetBase % getContext().getTypeAlign(AT->getElementType())) 1463 return; 1464 1465 // Otherwise implement simplified merge. We could be smarter about 1466 // this, but it isn't worth it and would be harder to verify. 1467 Current = NoClass; 1468 uint64_t EltSize = getContext().getTypeSize(AT->getElementType()); 1469 uint64_t ArraySize = AT->getSize().getZExtValue(); 1470 1471 // The only case a 256-bit wide vector could be used is when the array 1472 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 1473 // to work for sizes wider than 128, early check and fallback to memory. 1474 if (Size > 128 && EltSize != 256) 1475 return; 1476 1477 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) { 1478 Class FieldLo, FieldHi; 1479 classify(AT->getElementType(), Offset, FieldLo, FieldHi); 1480 Lo = merge(Lo, FieldLo); 1481 Hi = merge(Hi, FieldHi); 1482 if (Lo == Memory || Hi == Memory) 1483 break; 1484 } 1485 1486 postMerge(Size, Lo, Hi); 1487 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification."); 1488 return; 1489 } 1490 1491 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1492 uint64_t Size = getContext().getTypeSize(Ty); 1493 1494 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 1495 // than four eightbytes, ..., it has class MEMORY. 1496 if (Size > 256) 1497 return; 1498 1499 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial 1500 // copy constructor or a non-trivial destructor, it is passed by invisible 1501 // reference. 1502 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 1503 return; 1504 1505 const RecordDecl *RD = RT->getDecl(); 1506 1507 // Assume variable sized types are passed in memory. 1508 if (RD->hasFlexibleArrayMember()) 1509 return; 1510 1511 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 1512 1513 // Reset Lo class, this will be recomputed. 1514 Current = NoClass; 1515 1516 // If this is a C++ record, classify the bases first. 1517 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 1518 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 1519 e = CXXRD->bases_end(); i != e; ++i) { 1520 assert(!i->isVirtual() && !i->getType()->isDependentType() && 1521 "Unexpected base class!"); 1522 const CXXRecordDecl *Base = 1523 cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); 1524 1525 // Classify this field. 1526 // 1527 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a 1528 // single eightbyte, each is classified separately. Each eightbyte gets 1529 // initialized to class NO_CLASS. 1530 Class FieldLo, FieldHi; 1531 uint64_t Offset = 1532 OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base)); 1533 classify(i->getType(), Offset, FieldLo, FieldHi); 1534 Lo = merge(Lo, FieldLo); 1535 Hi = merge(Hi, FieldHi); 1536 if (Lo == Memory || Hi == Memory) 1537 break; 1538 } 1539 } 1540 1541 // Classify the fields one at a time, merging the results. 1542 unsigned idx = 0; 1543 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1544 i != e; ++i, ++idx) { 1545 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 1546 bool BitField = i->isBitField(); 1547 1548 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than 1549 // four eightbytes, or it contains unaligned fields, it has class MEMORY. 1550 // 1551 // The only case a 256-bit wide vector could be used is when the struct 1552 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 1553 // to work for sizes wider than 128, early check and fallback to memory. 1554 // 1555 if (Size > 128 && getContext().getTypeSize(i->getType()) != 256) { 1556 Lo = Memory; 1557 return; 1558 } 1559 // Note, skip this test for bit-fields, see below. 1560 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) { 1561 Lo = Memory; 1562 return; 1563 } 1564 1565 // Classify this field. 1566 // 1567 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate 1568 // exceeds a single eightbyte, each is classified 1569 // separately. Each eightbyte gets initialized to class 1570 // NO_CLASS. 1571 Class FieldLo, FieldHi; 1572 1573 // Bit-fields require special handling, they do not force the 1574 // structure to be passed in memory even if unaligned, and 1575 // therefore they can straddle an eightbyte. 1576 if (BitField) { 1577 // Ignore padding bit-fields. 1578 if (i->isUnnamedBitfield()) 1579 continue; 1580 1581 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 1582 uint64_t Size = i->getBitWidthValue(getContext()); 1583 1584 uint64_t EB_Lo = Offset / 64; 1585 uint64_t EB_Hi = (Offset + Size - 1) / 64; 1586 FieldLo = FieldHi = NoClass; 1587 if (EB_Lo) { 1588 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes."); 1589 FieldLo = NoClass; 1590 FieldHi = Integer; 1591 } else { 1592 FieldLo = Integer; 1593 FieldHi = EB_Hi ? Integer : NoClass; 1594 } 1595 } else 1596 classify(i->getType(), Offset, FieldLo, FieldHi); 1597 Lo = merge(Lo, FieldLo); 1598 Hi = merge(Hi, FieldHi); 1599 if (Lo == Memory || Hi == Memory) 1600 break; 1601 } 1602 1603 postMerge(Size, Lo, Hi); 1604 } 1605 } 1606 1607 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const { 1608 // If this is a scalar LLVM value then assume LLVM will pass it in the right 1609 // place naturally. 1610 if (!isAggregateTypeForABI(Ty)) { 1611 // Treat an enum type as its underlying type. 1612 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1613 Ty = EnumTy->getDecl()->getIntegerType(); 1614 1615 return (Ty->isPromotableIntegerType() ? 1616 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 1617 } 1618 1619 return ABIArgInfo::getIndirect(0); 1620 } 1621 1622 bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const { 1623 if (const VectorType *VecTy = Ty->getAs<VectorType>()) { 1624 uint64_t Size = getContext().getTypeSize(VecTy); 1625 unsigned LargestVector = HasAVX ? 256 : 128; 1626 if (Size <= 64 || Size > LargestVector) 1627 return true; 1628 } 1629 1630 return false; 1631 } 1632 1633 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty, 1634 unsigned freeIntRegs) const { 1635 // If this is a scalar LLVM value then assume LLVM will pass it in the right 1636 // place naturally. 1637 // 1638 // This assumption is optimistic, as there could be free registers available 1639 // when we need to pass this argument in memory, and LLVM could try to pass 1640 // the argument in the free register. This does not seem to happen currently, 1641 // but this code would be much safer if we could mark the argument with 1642 // 'onstack'. See PR12193. 1643 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) { 1644 // Treat an enum type as its underlying type. 1645 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1646 Ty = EnumTy->getDecl()->getIntegerType(); 1647 1648 return (Ty->isPromotableIntegerType() ? 1649 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 1650 } 1651 1652 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 1653 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 1654 1655 // Compute the byval alignment. We specify the alignment of the byval in all 1656 // cases so that the mid-level optimizer knows the alignment of the byval. 1657 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U); 1658 1659 // Attempt to avoid passing indirect results using byval when possible. This 1660 // is important for good codegen. 1661 // 1662 // We do this by coercing the value into a scalar type which the backend can 1663 // handle naturally (i.e., without using byval). 1664 // 1665 // For simplicity, we currently only do this when we have exhausted all of the 1666 // free integer registers. Doing this when there are free integer registers 1667 // would require more care, as we would have to ensure that the coerced value 1668 // did not claim the unused register. That would require either reording the 1669 // arguments to the function (so that any subsequent inreg values came first), 1670 // or only doing this optimization when there were no following arguments that 1671 // might be inreg. 1672 // 1673 // We currently expect it to be rare (particularly in well written code) for 1674 // arguments to be passed on the stack when there are still free integer 1675 // registers available (this would typically imply large structs being passed 1676 // by value), so this seems like a fair tradeoff for now. 1677 // 1678 // We can revisit this if the backend grows support for 'onstack' parameter 1679 // attributes. See PR12193. 1680 if (freeIntRegs == 0) { 1681 uint64_t Size = getContext().getTypeSize(Ty); 1682 1683 // If this type fits in an eightbyte, coerce it into the matching integral 1684 // type, which will end up on the stack (with alignment 8). 1685 if (Align == 8 && Size <= 64) 1686 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 1687 Size)); 1688 } 1689 1690 return ABIArgInfo::getIndirect(Align); 1691 } 1692 1693 /// GetByteVectorType - The ABI specifies that a value should be passed in an 1694 /// full vector XMM/YMM register. Pick an LLVM IR type that will be passed as a 1695 /// vector register. 1696 llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const { 1697 llvm::Type *IRType = CGT.ConvertType(Ty); 1698 1699 // Wrapper structs that just contain vectors are passed just like vectors, 1700 // strip them off if present. 1701 llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType); 1702 while (STy && STy->getNumElements() == 1) { 1703 IRType = STy->getElementType(0); 1704 STy = dyn_cast<llvm::StructType>(IRType); 1705 } 1706 1707 // If the preferred type is a 16-byte vector, prefer to pass it. 1708 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){ 1709 llvm::Type *EltTy = VT->getElementType(); 1710 unsigned BitWidth = VT->getBitWidth(); 1711 if ((BitWidth >= 128 && BitWidth <= 256) && 1712 (EltTy->isFloatTy() || EltTy->isDoubleTy() || 1713 EltTy->isIntegerTy(8) || EltTy->isIntegerTy(16) || 1714 EltTy->isIntegerTy(32) || EltTy->isIntegerTy(64) || 1715 EltTy->isIntegerTy(128))) 1716 return VT; 1717 } 1718 1719 return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), 2); 1720 } 1721 1722 /// BitsContainNoUserData - Return true if the specified [start,end) bit range 1723 /// is known to either be off the end of the specified type or being in 1724 /// alignment padding. The user type specified is known to be at most 128 bits 1725 /// in size, and have passed through X86_64ABIInfo::classify with a successful 1726 /// classification that put one of the two halves in the INTEGER class. 1727 /// 1728 /// It is conservatively correct to return false. 1729 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, 1730 unsigned EndBit, ASTContext &Context) { 1731 // If the bytes being queried are off the end of the type, there is no user 1732 // data hiding here. This handles analysis of builtins, vectors and other 1733 // types that don't contain interesting padding. 1734 unsigned TySize = (unsigned)Context.getTypeSize(Ty); 1735 if (TySize <= StartBit) 1736 return true; 1737 1738 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 1739 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType()); 1740 unsigned NumElts = (unsigned)AT->getSize().getZExtValue(); 1741 1742 // Check each element to see if the element overlaps with the queried range. 1743 for (unsigned i = 0; i != NumElts; ++i) { 1744 // If the element is after the span we care about, then we're done.. 1745 unsigned EltOffset = i*EltSize; 1746 if (EltOffset >= EndBit) break; 1747 1748 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0; 1749 if (!BitsContainNoUserData(AT->getElementType(), EltStart, 1750 EndBit-EltOffset, Context)) 1751 return false; 1752 } 1753 // If it overlaps no elements, then it is safe to process as padding. 1754 return true; 1755 } 1756 1757 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1758 const RecordDecl *RD = RT->getDecl(); 1759 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 1760 1761 // If this is a C++ record, check the bases first. 1762 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 1763 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 1764 e = CXXRD->bases_end(); i != e; ++i) { 1765 assert(!i->isVirtual() && !i->getType()->isDependentType() && 1766 "Unexpected base class!"); 1767 const CXXRecordDecl *Base = 1768 cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); 1769 1770 // If the base is after the span we care about, ignore it. 1771 unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base)); 1772 if (BaseOffset >= EndBit) continue; 1773 1774 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0; 1775 if (!BitsContainNoUserData(i->getType(), BaseStart, 1776 EndBit-BaseOffset, Context)) 1777 return false; 1778 } 1779 } 1780 1781 // Verify that no field has data that overlaps the region of interest. Yes 1782 // this could be sped up a lot by being smarter about queried fields, 1783 // however we're only looking at structs up to 16 bytes, so we don't care 1784 // much. 1785 unsigned idx = 0; 1786 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1787 i != e; ++i, ++idx) { 1788 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx); 1789 1790 // If we found a field after the region we care about, then we're done. 1791 if (FieldOffset >= EndBit) break; 1792 1793 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0; 1794 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset, 1795 Context)) 1796 return false; 1797 } 1798 1799 // If nothing in this record overlapped the area of interest, then we're 1800 // clean. 1801 return true; 1802 } 1803 1804 return false; 1805 } 1806 1807 /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a 1808 /// float member at the specified offset. For example, {int,{float}} has a 1809 /// float at offset 4. It is conservatively correct for this routine to return 1810 /// false. 1811 static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset, 1812 const llvm::DataLayout &TD) { 1813 // Base case if we find a float. 1814 if (IROffset == 0 && IRType->isFloatTy()) 1815 return true; 1816 1817 // If this is a struct, recurse into the field at the specified offset. 1818 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 1819 const llvm::StructLayout *SL = TD.getStructLayout(STy); 1820 unsigned Elt = SL->getElementContainingOffset(IROffset); 1821 IROffset -= SL->getElementOffset(Elt); 1822 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD); 1823 } 1824 1825 // If this is an array, recurse into the field at the specified offset. 1826 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 1827 llvm::Type *EltTy = ATy->getElementType(); 1828 unsigned EltSize = TD.getTypeAllocSize(EltTy); 1829 IROffset -= IROffset/EltSize*EltSize; 1830 return ContainsFloatAtOffset(EltTy, IROffset, TD); 1831 } 1832 1833 return false; 1834 } 1835 1836 1837 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the 1838 /// low 8 bytes of an XMM register, corresponding to the SSE class. 1839 llvm::Type *X86_64ABIInfo:: 1840 GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset, 1841 QualType SourceTy, unsigned SourceOffset) const { 1842 // The only three choices we have are either double, <2 x float>, or float. We 1843 // pass as float if the last 4 bytes is just padding. This happens for 1844 // structs that contain 3 floats. 1845 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32, 1846 SourceOffset*8+64, getContext())) 1847 return llvm::Type::getFloatTy(getVMContext()); 1848 1849 // We want to pass as <2 x float> if the LLVM IR type contains a float at 1850 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the 1851 // case. 1852 if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) && 1853 ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout())) 1854 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2); 1855 1856 return llvm::Type::getDoubleTy(getVMContext()); 1857 } 1858 1859 1860 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in 1861 /// an 8-byte GPR. This means that we either have a scalar or we are talking 1862 /// about the high or low part of an up-to-16-byte struct. This routine picks 1863 /// the best LLVM IR type to represent this, which may be i64 or may be anything 1864 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*, 1865 /// etc). 1866 /// 1867 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for 1868 /// the source type. IROffset is an offset in bytes into the LLVM IR type that 1869 /// the 8-byte value references. PrefType may be null. 1870 /// 1871 /// SourceTy is the source level type for the entire argument. SourceOffset is 1872 /// an offset into this that we're processing (which is always either 0 or 8). 1873 /// 1874 llvm::Type *X86_64ABIInfo:: 1875 GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset, 1876 QualType SourceTy, unsigned SourceOffset) const { 1877 // If we're dealing with an un-offset LLVM IR type, then it means that we're 1878 // returning an 8-byte unit starting with it. See if we can safely use it. 1879 if (IROffset == 0) { 1880 // Pointers and int64's always fill the 8-byte unit. 1881 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) || 1882 IRType->isIntegerTy(64)) 1883 return IRType; 1884 1885 // If we have a 1/2/4-byte integer, we can use it only if the rest of the 1886 // goodness in the source type is just tail padding. This is allowed to 1887 // kick in for struct {double,int} on the int, but not on 1888 // struct{double,int,int} because we wouldn't return the second int. We 1889 // have to do this analysis on the source type because we can't depend on 1890 // unions being lowered a specific way etc. 1891 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) || 1892 IRType->isIntegerTy(32) || 1893 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) { 1894 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 : 1895 cast<llvm::IntegerType>(IRType)->getBitWidth(); 1896 1897 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth, 1898 SourceOffset*8+64, getContext())) 1899 return IRType; 1900 } 1901 } 1902 1903 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 1904 // If this is a struct, recurse into the field at the specified offset. 1905 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy); 1906 if (IROffset < SL->getSizeInBytes()) { 1907 unsigned FieldIdx = SL->getElementContainingOffset(IROffset); 1908 IROffset -= SL->getElementOffset(FieldIdx); 1909 1910 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset, 1911 SourceTy, SourceOffset); 1912 } 1913 } 1914 1915 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 1916 llvm::Type *EltTy = ATy->getElementType(); 1917 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy); 1918 unsigned EltOffset = IROffset/EltSize*EltSize; 1919 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy, 1920 SourceOffset); 1921 } 1922 1923 // Okay, we don't have any better idea of what to pass, so we pass this in an 1924 // integer register that isn't too big to fit the rest of the struct. 1925 unsigned TySizeInBytes = 1926 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity(); 1927 1928 assert(TySizeInBytes != SourceOffset && "Empty field?"); 1929 1930 // It is always safe to classify this as an integer type up to i64 that 1931 // isn't larger than the structure. 1932 return llvm::IntegerType::get(getVMContext(), 1933 std::min(TySizeInBytes-SourceOffset, 8U)*8); 1934 } 1935 1936 1937 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally 1938 /// be used as elements of a two register pair to pass or return, return a 1939 /// first class aggregate to represent them. For example, if the low part of 1940 /// a by-value argument should be passed as i32* and the high part as float, 1941 /// return {i32*, float}. 1942 static llvm::Type * 1943 GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, 1944 const llvm::DataLayout &TD) { 1945 // In order to correctly satisfy the ABI, we need to the high part to start 1946 // at offset 8. If the high and low parts we inferred are both 4-byte types 1947 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have 1948 // the second element at offset 8. Check for this: 1949 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo); 1950 unsigned HiAlign = TD.getABITypeAlignment(Hi); 1951 unsigned HiStart = llvm::DataLayout::RoundUpAlignment(LoSize, HiAlign); 1952 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!"); 1953 1954 // To handle this, we have to increase the size of the low part so that the 1955 // second element will start at an 8 byte offset. We can't increase the size 1956 // of the second element because it might make us access off the end of the 1957 // struct. 1958 if (HiStart != 8) { 1959 // There are only two sorts of types the ABI generation code can produce for 1960 // the low part of a pair that aren't 8 bytes in size: float or i8/i16/i32. 1961 // Promote these to a larger type. 1962 if (Lo->isFloatTy()) 1963 Lo = llvm::Type::getDoubleTy(Lo->getContext()); 1964 else { 1965 assert(Lo->isIntegerTy() && "Invalid/unknown lo type"); 1966 Lo = llvm::Type::getInt64Ty(Lo->getContext()); 1967 } 1968 } 1969 1970 llvm::StructType *Result = llvm::StructType::get(Lo, Hi, NULL); 1971 1972 1973 // Verify that the second element is at an 8-byte offset. 1974 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 && 1975 "Invalid x86-64 argument pair!"); 1976 return Result; 1977 } 1978 1979 ABIArgInfo X86_64ABIInfo:: 1980 classifyReturnType(QualType RetTy) const { 1981 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the 1982 // classification algorithm. 1983 X86_64ABIInfo::Class Lo, Hi; 1984 classify(RetTy, 0, Lo, Hi); 1985 1986 // Check some invariants. 1987 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 1988 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 1989 1990 llvm::Type *ResType = 0; 1991 switch (Lo) { 1992 case NoClass: 1993 if (Hi == NoClass) 1994 return ABIArgInfo::getIgnore(); 1995 // If the low part is just padding, it takes no register, leave ResType 1996 // null. 1997 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 1998 "Unknown missing lo part"); 1999 break; 2000 2001 case SSEUp: 2002 case X87Up: 2003 llvm_unreachable("Invalid classification for lo word."); 2004 2005 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via 2006 // hidden argument. 2007 case Memory: 2008 return getIndirectReturnResult(RetTy); 2009 2010 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next 2011 // available register of the sequence %rax, %rdx is used. 2012 case Integer: 2013 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 2014 2015 // If we have a sign or zero extended integer, make sure to return Extend 2016 // so that the parameter gets the right LLVM IR attributes. 2017 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 2018 // Treat an enum type as its underlying type. 2019 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 2020 RetTy = EnumTy->getDecl()->getIntegerType(); 2021 2022 if (RetTy->isIntegralOrEnumerationType() && 2023 RetTy->isPromotableIntegerType()) 2024 return ABIArgInfo::getExtend(); 2025 } 2026 break; 2027 2028 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next 2029 // available SSE register of the sequence %xmm0, %xmm1 is used. 2030 case SSE: 2031 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 2032 break; 2033 2034 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is 2035 // returned on the X87 stack in %st0 as 80-bit x87 number. 2036 case X87: 2037 ResType = llvm::Type::getX86_FP80Ty(getVMContext()); 2038 break; 2039 2040 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real 2041 // part of the value is returned in %st0 and the imaginary part in 2042 // %st1. 2043 case ComplexX87: 2044 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification."); 2045 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()), 2046 llvm::Type::getX86_FP80Ty(getVMContext()), 2047 NULL); 2048 break; 2049 } 2050 2051 llvm::Type *HighPart = 0; 2052 switch (Hi) { 2053 // Memory was handled previously and X87 should 2054 // never occur as a hi class. 2055 case Memory: 2056 case X87: 2057 llvm_unreachable("Invalid classification for hi word."); 2058 2059 case ComplexX87: // Previously handled. 2060 case NoClass: 2061 break; 2062 2063 case Integer: 2064 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 2065 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 2066 return ABIArgInfo::getDirect(HighPart, 8); 2067 break; 2068 case SSE: 2069 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 2070 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 2071 return ABIArgInfo::getDirect(HighPart, 8); 2072 break; 2073 2074 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte 2075 // is passed in the next available eightbyte chunk if the last used 2076 // vector register. 2077 // 2078 // SSEUP should always be preceded by SSE, just widen. 2079 case SSEUp: 2080 assert(Lo == SSE && "Unexpected SSEUp classification."); 2081 ResType = GetByteVectorType(RetTy); 2082 break; 2083 2084 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is 2085 // returned together with the previous X87 value in %st0. 2086 case X87Up: 2087 // If X87Up is preceded by X87, we don't need to do 2088 // anything. However, in some cases with unions it may not be 2089 // preceded by X87. In such situations we follow gcc and pass the 2090 // extra bits in an SSE reg. 2091 if (Lo != X87) { 2092 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 2093 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 2094 return ABIArgInfo::getDirect(HighPart, 8); 2095 } 2096 break; 2097 } 2098 2099 // If a high part was specified, merge it together with the low part. It is 2100 // known to pass in the high eightbyte of the result. We do this by forming a 2101 // first class struct aggregate with the high and low part: {low, high} 2102 if (HighPart) 2103 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); 2104 2105 return ABIArgInfo::getDirect(ResType); 2106 } 2107 2108 ABIArgInfo X86_64ABIInfo::classifyArgumentType( 2109 QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE) 2110 const 2111 { 2112 X86_64ABIInfo::Class Lo, Hi; 2113 classify(Ty, 0, Lo, Hi); 2114 2115 // Check some invariants. 2116 // FIXME: Enforce these by construction. 2117 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 2118 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 2119 2120 neededInt = 0; 2121 neededSSE = 0; 2122 llvm::Type *ResType = 0; 2123 switch (Lo) { 2124 case NoClass: 2125 if (Hi == NoClass) 2126 return ABIArgInfo::getIgnore(); 2127 // If the low part is just padding, it takes no register, leave ResType 2128 // null. 2129 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 2130 "Unknown missing lo part"); 2131 break; 2132 2133 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument 2134 // on the stack. 2135 case Memory: 2136 2137 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or 2138 // COMPLEX_X87, it is passed in memory. 2139 case X87: 2140 case ComplexX87: 2141 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 2142 ++neededInt; 2143 return getIndirectResult(Ty, freeIntRegs); 2144 2145 case SSEUp: 2146 case X87Up: 2147 llvm_unreachable("Invalid classification for lo word."); 2148 2149 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next 2150 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8 2151 // and %r9 is used. 2152 case Integer: 2153 ++neededInt; 2154 2155 // Pick an 8-byte type based on the preferred type. 2156 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0); 2157 2158 // If we have a sign or zero extended integer, make sure to return Extend 2159 // so that the parameter gets the right LLVM IR attributes. 2160 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 2161 // Treat an enum type as its underlying type. 2162 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2163 Ty = EnumTy->getDecl()->getIntegerType(); 2164 2165 if (Ty->isIntegralOrEnumerationType() && 2166 Ty->isPromotableIntegerType()) 2167 return ABIArgInfo::getExtend(); 2168 } 2169 2170 break; 2171 2172 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next 2173 // available SSE register is used, the registers are taken in the 2174 // order from %xmm0 to %xmm7. 2175 case SSE: { 2176 llvm::Type *IRType = CGT.ConvertType(Ty); 2177 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0); 2178 ++neededSSE; 2179 break; 2180 } 2181 } 2182 2183 llvm::Type *HighPart = 0; 2184 switch (Hi) { 2185 // Memory was handled previously, ComplexX87 and X87 should 2186 // never occur as hi classes, and X87Up must be preceded by X87, 2187 // which is passed in memory. 2188 case Memory: 2189 case X87: 2190 case ComplexX87: 2191 llvm_unreachable("Invalid classification for hi word."); 2192 2193 case NoClass: break; 2194 2195 case Integer: 2196 ++neededInt; 2197 // Pick an 8-byte type based on the preferred type. 2198 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 2199 2200 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 2201 return ABIArgInfo::getDirect(HighPart, 8); 2202 break; 2203 2204 // X87Up generally doesn't occur here (long double is passed in 2205 // memory), except in situations involving unions. 2206 case X87Up: 2207 case SSE: 2208 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 2209 2210 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 2211 return ABIArgInfo::getDirect(HighPart, 8); 2212 2213 ++neededSSE; 2214 break; 2215 2216 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the 2217 // eightbyte is passed in the upper half of the last used SSE 2218 // register. This only happens when 128-bit vectors are passed. 2219 case SSEUp: 2220 assert(Lo == SSE && "Unexpected SSEUp classification"); 2221 ResType = GetByteVectorType(Ty); 2222 break; 2223 } 2224 2225 // If a high part was specified, merge it together with the low part. It is 2226 // known to pass in the high eightbyte of the result. We do this by forming a 2227 // first class struct aggregate with the high and low part: {low, high} 2228 if (HighPart) 2229 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); 2230 2231 return ABIArgInfo::getDirect(ResType); 2232 } 2233 2234 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 2235 2236 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2237 2238 // Keep track of the number of assigned registers. 2239 unsigned freeIntRegs = 6, freeSSERegs = 8; 2240 2241 // If the return value is indirect, then the hidden argument is consuming one 2242 // integer register. 2243 if (FI.getReturnInfo().isIndirect()) 2244 --freeIntRegs; 2245 2246 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers 2247 // get assigned (in left-to-right order) for passing as follows... 2248 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2249 it != ie; ++it) { 2250 unsigned neededInt, neededSSE; 2251 it->info = classifyArgumentType(it->type, freeIntRegs, neededInt, 2252 neededSSE); 2253 2254 // AMD64-ABI 3.2.3p3: If there are no registers available for any 2255 // eightbyte of an argument, the whole argument is passed on the 2256 // stack. If registers have already been assigned for some 2257 // eightbytes of such an argument, the assignments get reverted. 2258 if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) { 2259 freeIntRegs -= neededInt; 2260 freeSSERegs -= neededSSE; 2261 } else { 2262 it->info = getIndirectResult(it->type, freeIntRegs); 2263 } 2264 } 2265 } 2266 2267 static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr, 2268 QualType Ty, 2269 CodeGenFunction &CGF) { 2270 llvm::Value *overflow_arg_area_p = 2271 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p"); 2272 llvm::Value *overflow_arg_area = 2273 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area"); 2274 2275 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16 2276 // byte boundary if alignment needed by type exceeds 8 byte boundary. 2277 // It isn't stated explicitly in the standard, but in practice we use 2278 // alignment greater than 16 where necessary. 2279 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8; 2280 if (Align > 8) { 2281 // overflow_arg_area = (overflow_arg_area + align - 1) & -align; 2282 llvm::Value *Offset = 2283 llvm::ConstantInt::get(CGF.Int64Ty, Align - 1); 2284 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset); 2285 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area, 2286 CGF.Int64Ty); 2287 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, -(uint64_t)Align); 2288 overflow_arg_area = 2289 CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), 2290 overflow_arg_area->getType(), 2291 "overflow_arg_area.align"); 2292 } 2293 2294 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area. 2295 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 2296 llvm::Value *Res = 2297 CGF.Builder.CreateBitCast(overflow_arg_area, 2298 llvm::PointerType::getUnqual(LTy)); 2299 2300 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to: 2301 // l->overflow_arg_area + sizeof(type). 2302 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to 2303 // an 8 byte boundary. 2304 2305 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8; 2306 llvm::Value *Offset = 2307 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7); 2308 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset, 2309 "overflow_arg_area.next"); 2310 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p); 2311 2312 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type. 2313 return Res; 2314 } 2315 2316 llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2317 CodeGenFunction &CGF) const { 2318 // Assume that va_list type is correct; should be pointer to LLVM type: 2319 // struct { 2320 // i32 gp_offset; 2321 // i32 fp_offset; 2322 // i8* overflow_arg_area; 2323 // i8* reg_save_area; 2324 // }; 2325 unsigned neededInt, neededSSE; 2326 2327 Ty = CGF.getContext().getCanonicalType(Ty); 2328 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE); 2329 2330 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed 2331 // in the registers. If not go to step 7. 2332 if (!neededInt && !neededSSE) 2333 return EmitVAArgFromMemory(VAListAddr, Ty, CGF); 2334 2335 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of 2336 // general purpose registers needed to pass type and num_fp to hold 2337 // the number of floating point registers needed. 2338 2339 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into 2340 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or 2341 // l->fp_offset > 304 - num_fp * 16 go to step 7. 2342 // 2343 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of 2344 // register save space). 2345 2346 llvm::Value *InRegs = 0; 2347 llvm::Value *gp_offset_p = 0, *gp_offset = 0; 2348 llvm::Value *fp_offset_p = 0, *fp_offset = 0; 2349 if (neededInt) { 2350 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p"); 2351 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset"); 2352 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8); 2353 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp"); 2354 } 2355 2356 if (neededSSE) { 2357 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p"); 2358 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset"); 2359 llvm::Value *FitsInFP = 2360 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16); 2361 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp"); 2362 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP; 2363 } 2364 2365 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 2366 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); 2367 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 2368 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); 2369 2370 // Emit code to load the value if it was passed in registers. 2371 2372 CGF.EmitBlock(InRegBlock); 2373 2374 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with 2375 // an offset of l->gp_offset and/or l->fp_offset. This may require 2376 // copying to a temporary location in case the parameter is passed 2377 // in different register classes or requires an alignment greater 2378 // than 8 for general purpose registers and 16 for XMM registers. 2379 // 2380 // FIXME: This really results in shameful code when we end up needing to 2381 // collect arguments from different places; often what should result in a 2382 // simple assembling of a structure from scattered addresses has many more 2383 // loads than necessary. Can we clean this up? 2384 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 2385 llvm::Value *RegAddr = 2386 CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3), 2387 "reg_save_area"); 2388 if (neededInt && neededSSE) { 2389 // FIXME: Cleanup. 2390 assert(AI.isDirect() && "Unexpected ABI info for mixed regs"); 2391 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType()); 2392 llvm::Value *Tmp = CGF.CreateTempAlloca(ST); 2393 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs"); 2394 llvm::Type *TyLo = ST->getElementType(0); 2395 llvm::Type *TyHi = ST->getElementType(1); 2396 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) && 2397 "Unexpected ABI info for mixed regs"); 2398 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo); 2399 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi); 2400 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 2401 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2402 llvm::Value *RegLoAddr = TyLo->isFloatingPointTy() ? FPAddr : GPAddr; 2403 llvm::Value *RegHiAddr = TyLo->isFloatingPointTy() ? GPAddr : FPAddr; 2404 llvm::Value *V = 2405 CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo)); 2406 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 2407 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi)); 2408 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 2409 2410 RegAddr = CGF.Builder.CreateBitCast(Tmp, 2411 llvm::PointerType::getUnqual(LTy)); 2412 } else if (neededInt) { 2413 RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 2414 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 2415 llvm::PointerType::getUnqual(LTy)); 2416 } else if (neededSSE == 1) { 2417 RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2418 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 2419 llvm::PointerType::getUnqual(LTy)); 2420 } else { 2421 assert(neededSSE == 2 && "Invalid number of needed registers!"); 2422 // SSE registers are spaced 16 bytes apart in the register save 2423 // area, we need to collect the two eightbytes together. 2424 llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2425 llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16); 2426 llvm::Type *DoubleTy = CGF.DoubleTy; 2427 llvm::Type *DblPtrTy = 2428 llvm::PointerType::getUnqual(DoubleTy); 2429 llvm::StructType *ST = llvm::StructType::get(DoubleTy, 2430 DoubleTy, NULL); 2431 llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST); 2432 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo, 2433 DblPtrTy)); 2434 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 2435 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi, 2436 DblPtrTy)); 2437 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 2438 RegAddr = CGF.Builder.CreateBitCast(Tmp, 2439 llvm::PointerType::getUnqual(LTy)); 2440 } 2441 2442 // AMD64-ABI 3.5.7p5: Step 5. Set: 2443 // l->gp_offset = l->gp_offset + num_gp * 8 2444 // l->fp_offset = l->fp_offset + num_fp * 16. 2445 if (neededInt) { 2446 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8); 2447 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset), 2448 gp_offset_p); 2449 } 2450 if (neededSSE) { 2451 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16); 2452 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset), 2453 fp_offset_p); 2454 } 2455 CGF.EmitBranch(ContBlock); 2456 2457 // Emit code to load the value if it was passed in memory. 2458 2459 CGF.EmitBlock(InMemBlock); 2460 llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF); 2461 2462 // Return the appropriate result. 2463 2464 CGF.EmitBlock(ContBlock); 2465 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(), 2, 2466 "vaarg.addr"); 2467 ResAddr->addIncoming(RegAddr, InRegBlock); 2468 ResAddr->addIncoming(MemAddr, InMemBlock); 2469 return ResAddr; 2470 } 2471 2472 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty) const { 2473 2474 if (Ty->isVoidType()) 2475 return ABIArgInfo::getIgnore(); 2476 2477 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2478 Ty = EnumTy->getDecl()->getIntegerType(); 2479 2480 uint64_t Size = getContext().getTypeSize(Ty); 2481 2482 if (const RecordType *RT = Ty->getAs<RecordType>()) { 2483 if (hasNonTrivialDestructorOrCopyConstructor(RT) || 2484 RT->getDecl()->hasFlexibleArrayMember()) 2485 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2486 2487 // FIXME: mingw-w64-gcc emits 128-bit struct as i128 2488 if (Size == 128 && 2489 getContext().getTargetInfo().getTriple().getOS() 2490 == llvm::Triple::MinGW32) 2491 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2492 Size)); 2493 2494 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is 2495 // not 1, 2, 4, or 8 bytes, must be passed by reference." 2496 if (Size <= 64 && 2497 (Size & (Size - 1)) == 0) 2498 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2499 Size)); 2500 2501 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2502 } 2503 2504 if (Ty->isPromotableIntegerType()) 2505 return ABIArgInfo::getExtend(); 2506 2507 return ABIArgInfo::getDirect(); 2508 } 2509 2510 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 2511 2512 QualType RetTy = FI.getReturnType(); 2513 FI.getReturnInfo() = classify(RetTy); 2514 2515 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2516 it != ie; ++it) 2517 it->info = classify(it->type); 2518 } 2519 2520 llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2521 CodeGenFunction &CGF) const { 2522 llvm::Type *BPP = CGF.Int8PtrPtrTy; 2523 2524 CGBuilderTy &Builder = CGF.Builder; 2525 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 2526 "ap"); 2527 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 2528 llvm::Type *PTy = 2529 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 2530 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 2531 2532 uint64_t Offset = 2533 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8); 2534 llvm::Value *NextAddr = 2535 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 2536 "ap.next"); 2537 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 2538 2539 return AddrTyped; 2540 } 2541 2542 namespace { 2543 2544 class NaClX86_64ABIInfo : public ABIInfo { 2545 public: 2546 NaClX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) 2547 : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, HasAVX) {} 2548 virtual void computeInfo(CGFunctionInfo &FI) const; 2549 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2550 CodeGenFunction &CGF) const; 2551 private: 2552 PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv. 2553 X86_64ABIInfo NInfo; // Used for everything else. 2554 }; 2555 2556 class NaClX86_64TargetCodeGenInfo : public TargetCodeGenInfo { 2557 public: 2558 NaClX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) 2559 : TargetCodeGenInfo(new NaClX86_64ABIInfo(CGT, HasAVX)) {} 2560 }; 2561 2562 } 2563 2564 void NaClX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 2565 if (FI.getASTCallingConvention() == CC_PnaclCall) 2566 PInfo.computeInfo(FI); 2567 else 2568 NInfo.computeInfo(FI); 2569 } 2570 2571 llvm::Value *NaClX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2572 CodeGenFunction &CGF) const { 2573 // Always use the native convention; calling pnacl-style varargs functions 2574 // is unuspported. 2575 return NInfo.EmitVAArg(VAListAddr, Ty, CGF); 2576 } 2577 2578 2579 // PowerPC-32 2580 2581 namespace { 2582 class PPC32TargetCodeGenInfo : public DefaultTargetCodeGenInfo { 2583 public: 2584 PPC32TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} 2585 2586 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2587 // This is recovered from gcc output. 2588 return 1; // r1 is the dedicated stack pointer 2589 } 2590 2591 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2592 llvm::Value *Address) const; 2593 }; 2594 2595 } 2596 2597 bool 2598 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2599 llvm::Value *Address) const { 2600 // This is calculated from the LLVM and GCC tables and verified 2601 // against gcc output. AFAIK all ABIs use the same encoding. 2602 2603 CodeGen::CGBuilderTy &Builder = CGF.Builder; 2604 2605 llvm::IntegerType *i8 = CGF.Int8Ty; 2606 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 2607 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 2608 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 2609 2610 // 0-31: r0-31, the 4-byte general-purpose registers 2611 AssignToArrayRange(Builder, Address, Four8, 0, 31); 2612 2613 // 32-63: fp0-31, the 8-byte floating-point registers 2614 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 2615 2616 // 64-76 are various 4-byte special-purpose registers: 2617 // 64: mq 2618 // 65: lr 2619 // 66: ctr 2620 // 67: ap 2621 // 68-75 cr0-7 2622 // 76: xer 2623 AssignToArrayRange(Builder, Address, Four8, 64, 76); 2624 2625 // 77-108: v0-31, the 16-byte vector registers 2626 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 2627 2628 // 109: vrsave 2629 // 110: vscr 2630 // 111: spe_acc 2631 // 112: spefscr 2632 // 113: sfp 2633 AssignToArrayRange(Builder, Address, Four8, 109, 113); 2634 2635 return false; 2636 } 2637 2638 // PowerPC-64 2639 2640 namespace { 2641 /// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information. 2642 class PPC64_SVR4_ABIInfo : public DefaultABIInfo { 2643 2644 public: 2645 PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 2646 2647 // TODO: We can add more logic to computeInfo to improve performance. 2648 // Example: For aggregate arguments that fit in a register, we could 2649 // use getDirectInReg (as is done below for structs containing a single 2650 // floating-point value) to avoid pushing them to memory on function 2651 // entry. This would require changing the logic in PPCISelLowering 2652 // when lowering the parameters in the caller and args in the callee. 2653 virtual void computeInfo(CGFunctionInfo &FI) const { 2654 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2655 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2656 it != ie; ++it) { 2657 // We rely on the default argument classification for the most part. 2658 // One exception: An aggregate containing a single floating-point 2659 // item must be passed in a register if one is available. 2660 const Type *T = isSingleElementStruct(it->type, getContext()); 2661 if (T) { 2662 const BuiltinType *BT = T->getAs<BuiltinType>(); 2663 if (BT && BT->isFloatingPoint()) { 2664 QualType QT(T, 0); 2665 it->info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT)); 2666 continue; 2667 } 2668 } 2669 it->info = classifyArgumentType(it->type); 2670 } 2671 } 2672 2673 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, 2674 QualType Ty, 2675 CodeGenFunction &CGF) const; 2676 }; 2677 2678 class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo { 2679 public: 2680 PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT) 2681 : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT)) {} 2682 2683 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2684 // This is recovered from gcc output. 2685 return 1; // r1 is the dedicated stack pointer 2686 } 2687 2688 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2689 llvm::Value *Address) const; 2690 }; 2691 2692 class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo { 2693 public: 2694 PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} 2695 2696 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2697 // This is recovered from gcc output. 2698 return 1; // r1 is the dedicated stack pointer 2699 } 2700 2701 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2702 llvm::Value *Address) const; 2703 }; 2704 2705 } 2706 2707 // Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine. 2708 llvm::Value *PPC64_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr, 2709 QualType Ty, 2710 CodeGenFunction &CGF) const { 2711 llvm::Type *BP = CGF.Int8PtrTy; 2712 llvm::Type *BPP = CGF.Int8PtrPtrTy; 2713 2714 CGBuilderTy &Builder = CGF.Builder; 2715 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 2716 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 2717 2718 // Handle address alignment for type alignment > 64 bits. Although 2719 // long double normally requires 16-byte alignment, this is not the 2720 // case when it is passed as an argument; so handle that special case. 2721 const BuiltinType *BT = Ty->getAs<BuiltinType>(); 2722 unsigned TyAlign = CGF.getContext().getTypeAlign(Ty) / 8; 2723 2724 if (TyAlign > 8 && (!BT || !BT->isFloatingPoint())) { 2725 assert((TyAlign & (TyAlign - 1)) == 0 && 2726 "Alignment is not power of 2!"); 2727 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty); 2728 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(TyAlign - 1)); 2729 AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt64(~(TyAlign - 1))); 2730 Addr = Builder.CreateIntToPtr(AddrAsInt, BP); 2731 } 2732 2733 // Update the va_list pointer. 2734 unsigned SizeInBytes = CGF.getContext().getTypeSize(Ty) / 8; 2735 unsigned Offset = llvm::RoundUpToAlignment(SizeInBytes, 8); 2736 llvm::Value *NextAddr = 2737 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), 2738 "ap.next"); 2739 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 2740 2741 // If the argument is smaller than 8 bytes, it is right-adjusted in 2742 // its doubleword slot. Adjust the pointer to pick it up from the 2743 // correct offset. 2744 if (SizeInBytes < 8) { 2745 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty); 2746 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(8 - SizeInBytes)); 2747 Addr = Builder.CreateIntToPtr(AddrAsInt, BP); 2748 } 2749 2750 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 2751 return Builder.CreateBitCast(Addr, PTy); 2752 } 2753 2754 static bool 2755 PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2756 llvm::Value *Address) { 2757 // This is calculated from the LLVM and GCC tables and verified 2758 // against gcc output. AFAIK all ABIs use the same encoding. 2759 2760 CodeGen::CGBuilderTy &Builder = CGF.Builder; 2761 2762 llvm::IntegerType *i8 = CGF.Int8Ty; 2763 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 2764 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 2765 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 2766 2767 // 0-31: r0-31, the 8-byte general-purpose registers 2768 AssignToArrayRange(Builder, Address, Eight8, 0, 31); 2769 2770 // 32-63: fp0-31, the 8-byte floating-point registers 2771 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 2772 2773 // 64-76 are various 4-byte special-purpose registers: 2774 // 64: mq 2775 // 65: lr 2776 // 66: ctr 2777 // 67: ap 2778 // 68-75 cr0-7 2779 // 76: xer 2780 AssignToArrayRange(Builder, Address, Four8, 64, 76); 2781 2782 // 77-108: v0-31, the 16-byte vector registers 2783 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 2784 2785 // 109: vrsave 2786 // 110: vscr 2787 // 111: spe_acc 2788 // 112: spefscr 2789 // 113: sfp 2790 AssignToArrayRange(Builder, Address, Four8, 109, 113); 2791 2792 return false; 2793 } 2794 2795 bool 2796 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable( 2797 CodeGen::CodeGenFunction &CGF, 2798 llvm::Value *Address) const { 2799 2800 return PPC64_initDwarfEHRegSizeTable(CGF, Address); 2801 } 2802 2803 bool 2804 PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2805 llvm::Value *Address) const { 2806 2807 return PPC64_initDwarfEHRegSizeTable(CGF, Address); 2808 } 2809 2810 //===----------------------------------------------------------------------===// 2811 // ARM ABI Implementation 2812 //===----------------------------------------------------------------------===// 2813 2814 namespace { 2815 2816 class ARMABIInfo : public ABIInfo { 2817 public: 2818 enum ABIKind { 2819 APCS = 0, 2820 AAPCS = 1, 2821 AAPCS_VFP 2822 }; 2823 2824 private: 2825 ABIKind Kind; 2826 2827 public: 2828 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind) {} 2829 2830 bool isEABI() const { 2831 StringRef Env = 2832 getContext().getTargetInfo().getTriple().getEnvironmentName(); 2833 return (Env == "gnueabi" || Env == "eabi" || 2834 Env == "android" || Env == "androideabi"); 2835 } 2836 2837 private: 2838 ABIKind getABIKind() const { return Kind; } 2839 2840 ABIArgInfo classifyReturnType(QualType RetTy) const; 2841 ABIArgInfo classifyArgumentType(QualType RetTy) const; 2842 bool isIllegalVectorType(QualType Ty) const; 2843 2844 virtual void computeInfo(CGFunctionInfo &FI) const; 2845 2846 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2847 CodeGenFunction &CGF) const; 2848 }; 2849 2850 class ARMTargetCodeGenInfo : public TargetCodeGenInfo { 2851 public: 2852 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K) 2853 :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {} 2854 2855 const ARMABIInfo &getABIInfo() const { 2856 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo()); 2857 } 2858 2859 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2860 return 13; 2861 } 2862 2863 StringRef getARCRetainAutoreleasedReturnValueMarker() const { 2864 return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue"; 2865 } 2866 2867 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2868 llvm::Value *Address) const { 2869 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 2870 2871 // 0-15 are the 16 integer registers. 2872 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15); 2873 return false; 2874 } 2875 2876 unsigned getSizeOfUnwindException() const { 2877 if (getABIInfo().isEABI()) return 88; 2878 return TargetCodeGenInfo::getSizeOfUnwindException(); 2879 } 2880 }; 2881 2882 } 2883 2884 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const { 2885 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2886 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2887 it != ie; ++it) 2888 it->info = classifyArgumentType(it->type); 2889 2890 // Always honor user-specified calling convention. 2891 if (FI.getCallingConvention() != llvm::CallingConv::C) 2892 return; 2893 2894 // Calling convention as default by an ABI. 2895 llvm::CallingConv::ID DefaultCC; 2896 if (isEABI()) 2897 DefaultCC = llvm::CallingConv::ARM_AAPCS; 2898 else 2899 DefaultCC = llvm::CallingConv::ARM_APCS; 2900 2901 // If user did not ask for specific calling convention explicitly (e.g. via 2902 // pcs attribute), set effective calling convention if it's different than ABI 2903 // default. 2904 switch (getABIKind()) { 2905 case APCS: 2906 if (DefaultCC != llvm::CallingConv::ARM_APCS) 2907 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_APCS); 2908 break; 2909 case AAPCS: 2910 if (DefaultCC != llvm::CallingConv::ARM_AAPCS) 2911 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS); 2912 break; 2913 case AAPCS_VFP: 2914 if (DefaultCC != llvm::CallingConv::ARM_AAPCS_VFP) 2915 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS_VFP); 2916 break; 2917 } 2918 } 2919 2920 /// isHomogeneousAggregate - Return true if a type is an AAPCS-VFP homogeneous 2921 /// aggregate. If HAMembers is non-null, the number of base elements 2922 /// contained in the type is returned through it; this is used for the 2923 /// recursive calls that check aggregate component types. 2924 static bool isHomogeneousAggregate(QualType Ty, const Type *&Base, 2925 ASTContext &Context, 2926 uint64_t *HAMembers = 0) { 2927 uint64_t Members = 0; 2928 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 2929 if (!isHomogeneousAggregate(AT->getElementType(), Base, Context, &Members)) 2930 return false; 2931 Members *= AT->getSize().getZExtValue(); 2932 } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 2933 const RecordDecl *RD = RT->getDecl(); 2934 if (RD->hasFlexibleArrayMember()) 2935 return false; 2936 2937 Members = 0; 2938 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 2939 i != e; ++i) { 2940 const FieldDecl *FD = *i; 2941 uint64_t FldMembers; 2942 if (!isHomogeneousAggregate(FD->getType(), Base, Context, &FldMembers)) 2943 return false; 2944 2945 Members = (RD->isUnion() ? 2946 std::max(Members, FldMembers) : Members + FldMembers); 2947 } 2948 } else { 2949 Members = 1; 2950 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 2951 Members = 2; 2952 Ty = CT->getElementType(); 2953 } 2954 2955 // Homogeneous aggregates for AAPCS-VFP must have base types of float, 2956 // double, or 64-bit or 128-bit vectors. 2957 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 2958 if (BT->getKind() != BuiltinType::Float && 2959 BT->getKind() != BuiltinType::Double && 2960 BT->getKind() != BuiltinType::LongDouble) 2961 return false; 2962 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 2963 unsigned VecSize = Context.getTypeSize(VT); 2964 if (VecSize != 64 && VecSize != 128) 2965 return false; 2966 } else { 2967 return false; 2968 } 2969 2970 // The base type must be the same for all members. Vector types of the 2971 // same total size are treated as being equivalent here. 2972 const Type *TyPtr = Ty.getTypePtr(); 2973 if (!Base) 2974 Base = TyPtr; 2975 if (Base != TyPtr && 2976 (!Base->isVectorType() || !TyPtr->isVectorType() || 2977 Context.getTypeSize(Base) != Context.getTypeSize(TyPtr))) 2978 return false; 2979 } 2980 2981 // Homogeneous Aggregates can have at most 4 members of the base type. 2982 if (HAMembers) 2983 *HAMembers = Members; 2984 2985 return (Members > 0 && Members <= 4); 2986 } 2987 2988 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty) const { 2989 // Handle illegal vector types here. 2990 if (isIllegalVectorType(Ty)) { 2991 uint64_t Size = getContext().getTypeSize(Ty); 2992 if (Size <= 32) { 2993 llvm::Type *ResType = 2994 llvm::Type::getInt32Ty(getVMContext()); 2995 return ABIArgInfo::getDirect(ResType); 2996 } 2997 if (Size == 64) { 2998 llvm::Type *ResType = llvm::VectorType::get( 2999 llvm::Type::getInt32Ty(getVMContext()), 2); 3000 return ABIArgInfo::getDirect(ResType); 3001 } 3002 if (Size == 128) { 3003 llvm::Type *ResType = llvm::VectorType::get( 3004 llvm::Type::getInt32Ty(getVMContext()), 4); 3005 return ABIArgInfo::getDirect(ResType); 3006 } 3007 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3008 } 3009 3010 if (!isAggregateTypeForABI(Ty)) { 3011 // Treat an enum type as its underlying type. 3012 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3013 Ty = EnumTy->getDecl()->getIntegerType(); 3014 3015 return (Ty->isPromotableIntegerType() ? 3016 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3017 } 3018 3019 // Ignore empty records. 3020 if (isEmptyRecord(getContext(), Ty, true)) 3021 return ABIArgInfo::getIgnore(); 3022 3023 // Structures with either a non-trivial destructor or a non-trivial 3024 // copy constructor are always indirect. 3025 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 3026 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3027 3028 if (getABIKind() == ARMABIInfo::AAPCS_VFP) { 3029 // Homogeneous Aggregates need to be expanded. 3030 const Type *Base = 0; 3031 if (isHomogeneousAggregate(Ty, Base, getContext())) { 3032 assert(Base && "Base class should be set for homogeneous aggregate"); 3033 return ABIArgInfo::getExpand(); 3034 } 3035 } 3036 3037 // Support byval for ARM. 3038 if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64) || 3039 getContext().getTypeAlign(Ty) > 64) { 3040 return ABIArgInfo::getIndirect(0, /*ByVal=*/true); 3041 } 3042 3043 // Otherwise, pass by coercing to a structure of the appropriate size. 3044 llvm::Type* ElemTy; 3045 unsigned SizeRegs; 3046 // FIXME: Try to match the types of the arguments more accurately where 3047 // we can. 3048 if (getContext().getTypeAlign(Ty) <= 32) { 3049 ElemTy = llvm::Type::getInt32Ty(getVMContext()); 3050 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32; 3051 } else { 3052 ElemTy = llvm::Type::getInt64Ty(getVMContext()); 3053 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64; 3054 } 3055 3056 llvm::Type *STy = 3057 llvm::StructType::get(llvm::ArrayType::get(ElemTy, SizeRegs), NULL); 3058 return ABIArgInfo::getDirect(STy); 3059 } 3060 3061 static bool isIntegerLikeType(QualType Ty, ASTContext &Context, 3062 llvm::LLVMContext &VMContext) { 3063 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure 3064 // is called integer-like if its size is less than or equal to one word, and 3065 // the offset of each of its addressable sub-fields is zero. 3066 3067 uint64_t Size = Context.getTypeSize(Ty); 3068 3069 // Check that the type fits in a word. 3070 if (Size > 32) 3071 return false; 3072 3073 // FIXME: Handle vector types! 3074 if (Ty->isVectorType()) 3075 return false; 3076 3077 // Float types are never treated as "integer like". 3078 if (Ty->isRealFloatingType()) 3079 return false; 3080 3081 // If this is a builtin or pointer type then it is ok. 3082 if (Ty->getAs<BuiltinType>() || Ty->isPointerType()) 3083 return true; 3084 3085 // Small complex integer types are "integer like". 3086 if (const ComplexType *CT = Ty->getAs<ComplexType>()) 3087 return isIntegerLikeType(CT->getElementType(), Context, VMContext); 3088 3089 // Single element and zero sized arrays should be allowed, by the definition 3090 // above, but they are not. 3091 3092 // Otherwise, it must be a record type. 3093 const RecordType *RT = Ty->getAs<RecordType>(); 3094 if (!RT) return false; 3095 3096 // Ignore records with flexible arrays. 3097 const RecordDecl *RD = RT->getDecl(); 3098 if (RD->hasFlexibleArrayMember()) 3099 return false; 3100 3101 // Check that all sub-fields are at offset 0, and are themselves "integer 3102 // like". 3103 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 3104 3105 bool HadField = false; 3106 unsigned idx = 0; 3107 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 3108 i != e; ++i, ++idx) { 3109 const FieldDecl *FD = *i; 3110 3111 // Bit-fields are not addressable, we only need to verify they are "integer 3112 // like". We still have to disallow a subsequent non-bitfield, for example: 3113 // struct { int : 0; int x } 3114 // is non-integer like according to gcc. 3115 if (FD->isBitField()) { 3116 if (!RD->isUnion()) 3117 HadField = true; 3118 3119 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 3120 return false; 3121 3122 continue; 3123 } 3124 3125 // Check if this field is at offset 0. 3126 if (Layout.getFieldOffset(idx) != 0) 3127 return false; 3128 3129 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 3130 return false; 3131 3132 // Only allow at most one field in a structure. This doesn't match the 3133 // wording above, but follows gcc in situations with a field following an 3134 // empty structure. 3135 if (!RD->isUnion()) { 3136 if (HadField) 3137 return false; 3138 3139 HadField = true; 3140 } 3141 } 3142 3143 return true; 3144 } 3145 3146 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy) const { 3147 if (RetTy->isVoidType()) 3148 return ABIArgInfo::getIgnore(); 3149 3150 // Large vector types should be returned via memory. 3151 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) 3152 return ABIArgInfo::getIndirect(0); 3153 3154 if (!isAggregateTypeForABI(RetTy)) { 3155 // Treat an enum type as its underlying type. 3156 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 3157 RetTy = EnumTy->getDecl()->getIntegerType(); 3158 3159 return (RetTy->isPromotableIntegerType() ? 3160 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3161 } 3162 3163 // Structures with either a non-trivial destructor or a non-trivial 3164 // copy constructor are always indirect. 3165 if (isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy)) 3166 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3167 3168 // Are we following APCS? 3169 if (getABIKind() == APCS) { 3170 if (isEmptyRecord(getContext(), RetTy, false)) 3171 return ABIArgInfo::getIgnore(); 3172 3173 // Complex types are all returned as packed integers. 3174 // 3175 // FIXME: Consider using 2 x vector types if the back end handles them 3176 // correctly. 3177 if (RetTy->isAnyComplexType()) 3178 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 3179 getContext().getTypeSize(RetTy))); 3180 3181 // Integer like structures are returned in r0. 3182 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) { 3183 // Return in the smallest viable integer type. 3184 uint64_t Size = getContext().getTypeSize(RetTy); 3185 if (Size <= 8) 3186 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 3187 if (Size <= 16) 3188 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 3189 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 3190 } 3191 3192 // Otherwise return in memory. 3193 return ABIArgInfo::getIndirect(0); 3194 } 3195 3196 // Otherwise this is an AAPCS variant. 3197 3198 if (isEmptyRecord(getContext(), RetTy, true)) 3199 return ABIArgInfo::getIgnore(); 3200 3201 // Check for homogeneous aggregates with AAPCS-VFP. 3202 if (getABIKind() == AAPCS_VFP) { 3203 const Type *Base = 0; 3204 if (isHomogeneousAggregate(RetTy, Base, getContext())) { 3205 assert(Base && "Base class should be set for homogeneous aggregate"); 3206 // Homogeneous Aggregates are returned directly. 3207 return ABIArgInfo::getDirect(); 3208 } 3209 } 3210 3211 // Aggregates <= 4 bytes are returned in r0; other aggregates 3212 // are returned indirectly. 3213 uint64_t Size = getContext().getTypeSize(RetTy); 3214 if (Size <= 32) { 3215 // Return in the smallest viable integer type. 3216 if (Size <= 8) 3217 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 3218 if (Size <= 16) 3219 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 3220 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 3221 } 3222 3223 return ABIArgInfo::getIndirect(0); 3224 } 3225 3226 /// isIllegalVector - check whether Ty is an illegal vector type. 3227 bool ARMABIInfo::isIllegalVectorType(QualType Ty) const { 3228 if (const VectorType *VT = Ty->getAs<VectorType>()) { 3229 // Check whether VT is legal. 3230 unsigned NumElements = VT->getNumElements(); 3231 uint64_t Size = getContext().getTypeSize(VT); 3232 // NumElements should be power of 2. 3233 if ((NumElements & (NumElements - 1)) != 0) 3234 return true; 3235 // Size should be greater than 32 bits. 3236 return Size <= 32; 3237 } 3238 return false; 3239 } 3240 3241 llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3242 CodeGenFunction &CGF) const { 3243 llvm::Type *BP = CGF.Int8PtrTy; 3244 llvm::Type *BPP = CGF.Int8PtrPtrTy; 3245 3246 CGBuilderTy &Builder = CGF.Builder; 3247 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 3248 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 3249 3250 uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8; 3251 uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8; 3252 bool IsIndirect = false; 3253 3254 // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for 3255 // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte. 3256 if (getABIKind() == ARMABIInfo::AAPCS_VFP || 3257 getABIKind() == ARMABIInfo::AAPCS) 3258 TyAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8); 3259 else 3260 TyAlign = 4; 3261 // Use indirect if size of the illegal vector is bigger than 16 bytes. 3262 if (isIllegalVectorType(Ty) && Size > 16) { 3263 IsIndirect = true; 3264 Size = 4; 3265 TyAlign = 4; 3266 } 3267 3268 // Handle address alignment for ABI alignment > 4 bytes. 3269 if (TyAlign > 4) { 3270 assert((TyAlign & (TyAlign - 1)) == 0 && 3271 "Alignment is not power of 2!"); 3272 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty); 3273 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1)); 3274 AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1))); 3275 Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align"); 3276 } 3277 3278 uint64_t Offset = 3279 llvm::RoundUpToAlignment(Size, 4); 3280 llvm::Value *NextAddr = 3281 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 3282 "ap.next"); 3283 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 3284 3285 if (IsIndirect) 3286 Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP)); 3287 else if (TyAlign < CGF.getContext().getTypeAlign(Ty) / 8) { 3288 // We can't directly cast ap.cur to pointer to a vector type, since ap.cur 3289 // may not be correctly aligned for the vector type. We create an aligned 3290 // temporary space and copy the content over from ap.cur to the temporary 3291 // space. This is necessary if the natural alignment of the type is greater 3292 // than the ABI alignment. 3293 llvm::Type *I8PtrTy = Builder.getInt8PtrTy(); 3294 CharUnits CharSize = getContext().getTypeSizeInChars(Ty); 3295 llvm::Value *AlignedTemp = CGF.CreateTempAlloca(CGF.ConvertType(Ty), 3296 "var.align"); 3297 llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy); 3298 llvm::Value *Src = Builder.CreateBitCast(Addr, I8PtrTy); 3299 Builder.CreateMemCpy(Dst, Src, 3300 llvm::ConstantInt::get(CGF.IntPtrTy, CharSize.getQuantity()), 3301 TyAlign, false); 3302 Addr = AlignedTemp; //The content is in aligned location. 3303 } 3304 llvm::Type *PTy = 3305 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 3306 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 3307 3308 return AddrTyped; 3309 } 3310 3311 namespace { 3312 3313 class NaClARMABIInfo : public ABIInfo { 3314 public: 3315 NaClARMABIInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind) 3316 : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, Kind) {} 3317 virtual void computeInfo(CGFunctionInfo &FI) const; 3318 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3319 CodeGenFunction &CGF) const; 3320 private: 3321 PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv. 3322 ARMABIInfo NInfo; // Used for everything else. 3323 }; 3324 3325 class NaClARMTargetCodeGenInfo : public TargetCodeGenInfo { 3326 public: 3327 NaClARMTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind) 3328 : TargetCodeGenInfo(new NaClARMABIInfo(CGT, Kind)) {} 3329 }; 3330 3331 } 3332 3333 void NaClARMABIInfo::computeInfo(CGFunctionInfo &FI) const { 3334 if (FI.getASTCallingConvention() == CC_PnaclCall) 3335 PInfo.computeInfo(FI); 3336 else 3337 static_cast<const ABIInfo&>(NInfo).computeInfo(FI); 3338 } 3339 3340 llvm::Value *NaClARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3341 CodeGenFunction &CGF) const { 3342 // Always use the native convention; calling pnacl-style varargs functions 3343 // is unsupported. 3344 return static_cast<const ABIInfo&>(NInfo).EmitVAArg(VAListAddr, Ty, CGF); 3345 } 3346 3347 //===----------------------------------------------------------------------===// 3348 // NVPTX ABI Implementation 3349 //===----------------------------------------------------------------------===// 3350 3351 namespace { 3352 3353 class NVPTXABIInfo : public ABIInfo { 3354 public: 3355 NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 3356 3357 ABIArgInfo classifyReturnType(QualType RetTy) const; 3358 ABIArgInfo classifyArgumentType(QualType Ty) const; 3359 3360 virtual void computeInfo(CGFunctionInfo &FI) const; 3361 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3362 CodeGenFunction &CFG) const; 3363 }; 3364 3365 class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo { 3366 public: 3367 NVPTXTargetCodeGenInfo(CodeGenTypes &CGT) 3368 : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {} 3369 3370 virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3371 CodeGen::CodeGenModule &M) const; 3372 }; 3373 3374 ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const { 3375 if (RetTy->isVoidType()) 3376 return ABIArgInfo::getIgnore(); 3377 if (isAggregateTypeForABI(RetTy)) 3378 return ABIArgInfo::getIndirect(0); 3379 return ABIArgInfo::getDirect(); 3380 } 3381 3382 ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const { 3383 if (isAggregateTypeForABI(Ty)) 3384 return ABIArgInfo::getIndirect(0); 3385 3386 return ABIArgInfo::getDirect(); 3387 } 3388 3389 void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const { 3390 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 3391 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 3392 it != ie; ++it) 3393 it->info = classifyArgumentType(it->type); 3394 3395 // Always honor user-specified calling convention. 3396 if (FI.getCallingConvention() != llvm::CallingConv::C) 3397 return; 3398 3399 // Calling convention as default by an ABI. 3400 // We're still using the PTX_Kernel/PTX_Device calling conventions here, 3401 // but we should switch to NVVM metadata later on. 3402 llvm::CallingConv::ID DefaultCC; 3403 const LangOptions &LangOpts = getContext().getLangOpts(); 3404 if (LangOpts.OpenCL || LangOpts.CUDA) { 3405 // If we are in OpenCL or CUDA mode, then default to device functions 3406 DefaultCC = llvm::CallingConv::PTX_Device; 3407 } else { 3408 // If we are in standard C/C++ mode, use the triple to decide on the default 3409 StringRef Env = 3410 getContext().getTargetInfo().getTriple().getEnvironmentName(); 3411 if (Env == "device") 3412 DefaultCC = llvm::CallingConv::PTX_Device; 3413 else 3414 DefaultCC = llvm::CallingConv::PTX_Kernel; 3415 } 3416 FI.setEffectiveCallingConvention(DefaultCC); 3417 3418 } 3419 3420 llvm::Value *NVPTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3421 CodeGenFunction &CFG) const { 3422 llvm_unreachable("NVPTX does not support varargs"); 3423 } 3424 3425 void NVPTXTargetCodeGenInfo:: 3426 SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3427 CodeGen::CodeGenModule &M) const{ 3428 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 3429 if (!FD) return; 3430 3431 llvm::Function *F = cast<llvm::Function>(GV); 3432 3433 // Perform special handling in OpenCL mode 3434 if (M.getLangOpts().OpenCL) { 3435 // Use OpenCL function attributes to set proper calling conventions 3436 // By default, all functions are device functions 3437 if (FD->hasAttr<OpenCLKernelAttr>()) { 3438 // OpenCL __kernel functions get a kernel calling convention 3439 F->setCallingConv(llvm::CallingConv::PTX_Kernel); 3440 // And kernel functions are not subject to inlining 3441 F->addFnAttr(llvm::Attributes::NoInline); 3442 } 3443 } 3444 3445 // Perform special handling in CUDA mode. 3446 if (M.getLangOpts().CUDA) { 3447 // CUDA __global__ functions get a kernel calling convention. Since 3448 // __global__ functions cannot be called from the device, we do not 3449 // need to set the noinline attribute. 3450 if (FD->getAttr<CUDAGlobalAttr>()) 3451 F->setCallingConv(llvm::CallingConv::PTX_Kernel); 3452 } 3453 } 3454 3455 } 3456 3457 //===----------------------------------------------------------------------===// 3458 // MBlaze ABI Implementation 3459 //===----------------------------------------------------------------------===// 3460 3461 namespace { 3462 3463 class MBlazeABIInfo : public ABIInfo { 3464 public: 3465 MBlazeABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 3466 3467 bool isPromotableIntegerType(QualType Ty) const; 3468 3469 ABIArgInfo classifyReturnType(QualType RetTy) const; 3470 ABIArgInfo classifyArgumentType(QualType RetTy) const; 3471 3472 virtual void computeInfo(CGFunctionInfo &FI) const { 3473 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 3474 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 3475 it != ie; ++it) 3476 it->info = classifyArgumentType(it->type); 3477 } 3478 3479 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3480 CodeGenFunction &CGF) const; 3481 }; 3482 3483 class MBlazeTargetCodeGenInfo : public TargetCodeGenInfo { 3484 public: 3485 MBlazeTargetCodeGenInfo(CodeGenTypes &CGT) 3486 : TargetCodeGenInfo(new MBlazeABIInfo(CGT)) {} 3487 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3488 CodeGen::CodeGenModule &M) const; 3489 }; 3490 3491 } 3492 3493 bool MBlazeABIInfo::isPromotableIntegerType(QualType Ty) const { 3494 // MBlaze ABI requires all 8 and 16 bit quantities to be extended. 3495 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 3496 switch (BT->getKind()) { 3497 case BuiltinType::Bool: 3498 case BuiltinType::Char_S: 3499 case BuiltinType::Char_U: 3500 case BuiltinType::SChar: 3501 case BuiltinType::UChar: 3502 case BuiltinType::Short: 3503 case BuiltinType::UShort: 3504 return true; 3505 default: 3506 return false; 3507 } 3508 return false; 3509 } 3510 3511 llvm::Value *MBlazeABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3512 CodeGenFunction &CGF) const { 3513 // FIXME: Implement 3514 return 0; 3515 } 3516 3517 3518 ABIArgInfo MBlazeABIInfo::classifyReturnType(QualType RetTy) const { 3519 if (RetTy->isVoidType()) 3520 return ABIArgInfo::getIgnore(); 3521 if (isAggregateTypeForABI(RetTy)) 3522 return ABIArgInfo::getIndirect(0); 3523 3524 return (isPromotableIntegerType(RetTy) ? 3525 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3526 } 3527 3528 ABIArgInfo MBlazeABIInfo::classifyArgumentType(QualType Ty) const { 3529 if (isAggregateTypeForABI(Ty)) 3530 return ABIArgInfo::getIndirect(0); 3531 3532 return (isPromotableIntegerType(Ty) ? 3533 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3534 } 3535 3536 void MBlazeTargetCodeGenInfo::SetTargetAttributes(const Decl *D, 3537 llvm::GlobalValue *GV, 3538 CodeGen::CodeGenModule &M) 3539 const { 3540 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 3541 if (!FD) return; 3542 3543 llvm::CallingConv::ID CC = llvm::CallingConv::C; 3544 if (FD->hasAttr<MBlazeInterruptHandlerAttr>()) 3545 CC = llvm::CallingConv::MBLAZE_INTR; 3546 else if (FD->hasAttr<MBlazeSaveVolatilesAttr>()) 3547 CC = llvm::CallingConv::MBLAZE_SVOL; 3548 3549 if (CC != llvm::CallingConv::C) { 3550 // Handle 'interrupt_handler' attribute: 3551 llvm::Function *F = cast<llvm::Function>(GV); 3552 3553 // Step 1: Set ISR calling convention. 3554 F->setCallingConv(CC); 3555 3556 // Step 2: Add attributes goodness. 3557 F->addFnAttr(llvm::Attributes::NoInline); 3558 } 3559 3560 // Step 3: Emit _interrupt_handler alias. 3561 if (CC == llvm::CallingConv::MBLAZE_INTR) 3562 new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage, 3563 "_interrupt_handler", GV, &M.getModule()); 3564 } 3565 3566 3567 //===----------------------------------------------------------------------===// 3568 // MSP430 ABI Implementation 3569 //===----------------------------------------------------------------------===// 3570 3571 namespace { 3572 3573 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo { 3574 public: 3575 MSP430TargetCodeGenInfo(CodeGenTypes &CGT) 3576 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 3577 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3578 CodeGen::CodeGenModule &M) const; 3579 }; 3580 3581 } 3582 3583 void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D, 3584 llvm::GlobalValue *GV, 3585 CodeGen::CodeGenModule &M) const { 3586 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 3587 if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) { 3588 // Handle 'interrupt' attribute: 3589 llvm::Function *F = cast<llvm::Function>(GV); 3590 3591 // Step 1: Set ISR calling convention. 3592 F->setCallingConv(llvm::CallingConv::MSP430_INTR); 3593 3594 // Step 2: Add attributes goodness. 3595 F->addFnAttr(llvm::Attributes::NoInline); 3596 3597 // Step 3: Emit ISR vector alias. 3598 unsigned Num = attr->getNumber() + 0xffe0; 3599 new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage, 3600 "vector_" + Twine::utohexstr(Num), 3601 GV, &M.getModule()); 3602 } 3603 } 3604 } 3605 3606 //===----------------------------------------------------------------------===// 3607 // MIPS ABI Implementation. This works for both little-endian and 3608 // big-endian variants. 3609 //===----------------------------------------------------------------------===// 3610 3611 namespace { 3612 class MipsABIInfo : public ABIInfo { 3613 bool IsO32; 3614 unsigned MinABIStackAlignInBytes, StackAlignInBytes; 3615 void CoerceToIntArgs(uint64_t TySize, 3616 SmallVector<llvm::Type*, 8> &ArgList) const; 3617 llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const; 3618 llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const; 3619 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const; 3620 public: 3621 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) : 3622 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8), 3623 StackAlignInBytes(IsO32 ? 8 : 16) {} 3624 3625 ABIArgInfo classifyReturnType(QualType RetTy) const; 3626 ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const; 3627 virtual void computeInfo(CGFunctionInfo &FI) const; 3628 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3629 CodeGenFunction &CGF) const; 3630 }; 3631 3632 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo { 3633 unsigned SizeOfUnwindException; 3634 public: 3635 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32) 3636 : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)), 3637 SizeOfUnwindException(IsO32 ? 24 : 32) {} 3638 3639 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 3640 return 29; 3641 } 3642 3643 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3644 llvm::Value *Address) const; 3645 3646 unsigned getSizeOfUnwindException() const { 3647 return SizeOfUnwindException; 3648 } 3649 }; 3650 } 3651 3652 void MipsABIInfo::CoerceToIntArgs(uint64_t TySize, 3653 SmallVector<llvm::Type*, 8> &ArgList) const { 3654 llvm::IntegerType *IntTy = 3655 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8); 3656 3657 // Add (TySize / MinABIStackAlignInBytes) args of IntTy. 3658 for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N) 3659 ArgList.push_back(IntTy); 3660 3661 // If necessary, add one more integer type to ArgList. 3662 unsigned R = TySize % (MinABIStackAlignInBytes * 8); 3663 3664 if (R) 3665 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R)); 3666 } 3667 3668 // In N32/64, an aligned double precision floating point field is passed in 3669 // a register. 3670 llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const { 3671 SmallVector<llvm::Type*, 8> ArgList, IntArgList; 3672 3673 if (IsO32) { 3674 CoerceToIntArgs(TySize, ArgList); 3675 return llvm::StructType::get(getVMContext(), ArgList); 3676 } 3677 3678 if (Ty->isComplexType()) 3679 return CGT.ConvertType(Ty); 3680 3681 const RecordType *RT = Ty->getAs<RecordType>(); 3682 3683 // Unions/vectors are passed in integer registers. 3684 if (!RT || !RT->isStructureOrClassType()) { 3685 CoerceToIntArgs(TySize, ArgList); 3686 return llvm::StructType::get(getVMContext(), ArgList); 3687 } 3688 3689 const RecordDecl *RD = RT->getDecl(); 3690 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 3691 assert(!(TySize % 8) && "Size of structure must be multiple of 8."); 3692 3693 uint64_t LastOffset = 0; 3694 unsigned idx = 0; 3695 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64); 3696 3697 // Iterate over fields in the struct/class and check if there are any aligned 3698 // double fields. 3699 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 3700 i != e; ++i, ++idx) { 3701 const QualType Ty = i->getType(); 3702 const BuiltinType *BT = Ty->getAs<BuiltinType>(); 3703 3704 if (!BT || BT->getKind() != BuiltinType::Double) 3705 continue; 3706 3707 uint64_t Offset = Layout.getFieldOffset(idx); 3708 if (Offset % 64) // Ignore doubles that are not aligned. 3709 continue; 3710 3711 // Add ((Offset - LastOffset) / 64) args of type i64. 3712 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j) 3713 ArgList.push_back(I64); 3714 3715 // Add double type. 3716 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext())); 3717 LastOffset = Offset + 64; 3718 } 3719 3720 CoerceToIntArgs(TySize - LastOffset, IntArgList); 3721 ArgList.append(IntArgList.begin(), IntArgList.end()); 3722 3723 return llvm::StructType::get(getVMContext(), ArgList); 3724 } 3725 3726 llvm::Type *MipsABIInfo::getPaddingType(uint64_t Align, uint64_t Offset) const { 3727 assert((Offset % MinABIStackAlignInBytes) == 0); 3728 3729 if ((Align - 1) & Offset) 3730 return llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8); 3731 3732 return 0; 3733 } 3734 3735 ABIArgInfo 3736 MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const { 3737 uint64_t OrigOffset = Offset; 3738 uint64_t TySize = getContext().getTypeSize(Ty); 3739 uint64_t Align = getContext().getTypeAlign(Ty) / 8; 3740 3741 Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes), 3742 (uint64_t)StackAlignInBytes); 3743 Offset = llvm::RoundUpToAlignment(Offset, Align); 3744 Offset += llvm::RoundUpToAlignment(TySize, Align * 8) / 8; 3745 3746 if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) { 3747 // Ignore empty aggregates. 3748 if (TySize == 0) 3749 return ABIArgInfo::getIgnore(); 3750 3751 // Records with non trivial destructors/constructors should not be passed 3752 // by value. 3753 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) { 3754 Offset = OrigOffset + MinABIStackAlignInBytes; 3755 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3756 } 3757 3758 // If we have reached here, aggregates are passed directly by coercing to 3759 // another structure type. Padding is inserted if the offset of the 3760 // aggregate is unaligned. 3761 return ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0, 3762 getPaddingType(Align, OrigOffset)); 3763 } 3764 3765 // Treat an enum type as its underlying type. 3766 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3767 Ty = EnumTy->getDecl()->getIntegerType(); 3768 3769 if (Ty->isPromotableIntegerType()) 3770 return ABIArgInfo::getExtend(); 3771 3772 return ABIArgInfo::getDirect(0, 0, getPaddingType(Align, OrigOffset)); 3773 } 3774 3775 llvm::Type* 3776 MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const { 3777 const RecordType *RT = RetTy->getAs<RecordType>(); 3778 SmallVector<llvm::Type*, 8> RTList; 3779 3780 if (RT && RT->isStructureOrClassType()) { 3781 const RecordDecl *RD = RT->getDecl(); 3782 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 3783 unsigned FieldCnt = Layout.getFieldCount(); 3784 3785 // N32/64 returns struct/classes in floating point registers if the 3786 // following conditions are met: 3787 // 1. The size of the struct/class is no larger than 128-bit. 3788 // 2. The struct/class has one or two fields all of which are floating 3789 // point types. 3790 // 3. The offset of the first field is zero (this follows what gcc does). 3791 // 3792 // Any other composite results are returned in integer registers. 3793 // 3794 if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) { 3795 RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end(); 3796 for (; b != e; ++b) { 3797 const BuiltinType *BT = b->getType()->getAs<BuiltinType>(); 3798 3799 if (!BT || !BT->isFloatingPoint()) 3800 break; 3801 3802 RTList.push_back(CGT.ConvertType(b->getType())); 3803 } 3804 3805 if (b == e) 3806 return llvm::StructType::get(getVMContext(), RTList, 3807 RD->hasAttr<PackedAttr>()); 3808 3809 RTList.clear(); 3810 } 3811 } 3812 3813 CoerceToIntArgs(Size, RTList); 3814 return llvm::StructType::get(getVMContext(), RTList); 3815 } 3816 3817 ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const { 3818 uint64_t Size = getContext().getTypeSize(RetTy); 3819 3820 if (RetTy->isVoidType() || Size == 0) 3821 return ABIArgInfo::getIgnore(); 3822 3823 if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) { 3824 if (Size <= 128) { 3825 if (RetTy->isAnyComplexType()) 3826 return ABIArgInfo::getDirect(); 3827 3828 // O32 returns integer vectors in registers. 3829 if (IsO32 && RetTy->isVectorType() && !RetTy->hasFloatingRepresentation()) 3830 return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size)); 3831 3832 if (!IsO32 && !isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy)) 3833 return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size)); 3834 } 3835 3836 return ABIArgInfo::getIndirect(0); 3837 } 3838 3839 // Treat an enum type as its underlying type. 3840 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 3841 RetTy = EnumTy->getDecl()->getIntegerType(); 3842 3843 return (RetTy->isPromotableIntegerType() ? 3844 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3845 } 3846 3847 void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const { 3848 ABIArgInfo &RetInfo = FI.getReturnInfo(); 3849 RetInfo = classifyReturnType(FI.getReturnType()); 3850 3851 // Check if a pointer to an aggregate is passed as a hidden argument. 3852 uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0; 3853 3854 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 3855 it != ie; ++it) 3856 it->info = classifyArgumentType(it->type, Offset); 3857 } 3858 3859 llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3860 CodeGenFunction &CGF) const { 3861 llvm::Type *BP = CGF.Int8PtrTy; 3862 llvm::Type *BPP = CGF.Int8PtrPtrTy; 3863 3864 CGBuilderTy &Builder = CGF.Builder; 3865 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 3866 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 3867 int64_t TypeAlign = getContext().getTypeAlign(Ty) / 8; 3868 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 3869 llvm::Value *AddrTyped; 3870 unsigned PtrWidth = getContext().getTargetInfo().getPointerWidth(0); 3871 llvm::IntegerType *IntTy = (PtrWidth == 32) ? CGF.Int32Ty : CGF.Int64Ty; 3872 3873 if (TypeAlign > MinABIStackAlignInBytes) { 3874 llvm::Value *AddrAsInt = CGF.Builder.CreatePtrToInt(Addr, IntTy); 3875 llvm::Value *Inc = llvm::ConstantInt::get(IntTy, TypeAlign - 1); 3876 llvm::Value *Mask = llvm::ConstantInt::get(IntTy, -TypeAlign); 3877 llvm::Value *Add = CGF.Builder.CreateAdd(AddrAsInt, Inc); 3878 llvm::Value *And = CGF.Builder.CreateAnd(Add, Mask); 3879 AddrTyped = CGF.Builder.CreateIntToPtr(And, PTy); 3880 } 3881 else 3882 AddrTyped = Builder.CreateBitCast(Addr, PTy); 3883 3884 llvm::Value *AlignedAddr = Builder.CreateBitCast(AddrTyped, BP); 3885 TypeAlign = std::max((unsigned)TypeAlign, MinABIStackAlignInBytes); 3886 uint64_t Offset = 3887 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, TypeAlign); 3888 llvm::Value *NextAddr = 3889 Builder.CreateGEP(AlignedAddr, llvm::ConstantInt::get(IntTy, Offset), 3890 "ap.next"); 3891 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 3892 3893 return AddrTyped; 3894 } 3895 3896 bool 3897 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3898 llvm::Value *Address) const { 3899 // This information comes from gcc's implementation, which seems to 3900 // as canonical as it gets. 3901 3902 // Everything on MIPS is 4 bytes. Double-precision FP registers 3903 // are aliased to pairs of single-precision FP registers. 3904 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 3905 3906 // 0-31 are the general purpose registers, $0 - $31. 3907 // 32-63 are the floating-point registers, $f0 - $f31. 3908 // 64 and 65 are the multiply/divide registers, $hi and $lo. 3909 // 66 is the (notional, I think) register for signal-handler return. 3910 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65); 3911 3912 // 67-74 are the floating-point status registers, $fcc0 - $fcc7. 3913 // They are one bit wide and ignored here. 3914 3915 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31. 3916 // (coprocessor 1 is the FP unit) 3917 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31. 3918 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31. 3919 // 176-181 are the DSP accumulator registers. 3920 AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181); 3921 return false; 3922 } 3923 3924 //===----------------------------------------------------------------------===// 3925 // TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults. 3926 // Currently subclassed only to implement custom OpenCL C function attribute 3927 // handling. 3928 //===----------------------------------------------------------------------===// 3929 3930 namespace { 3931 3932 class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo { 3933 public: 3934 TCETargetCodeGenInfo(CodeGenTypes &CGT) 3935 : DefaultTargetCodeGenInfo(CGT) {} 3936 3937 virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3938 CodeGen::CodeGenModule &M) const; 3939 }; 3940 3941 void TCETargetCodeGenInfo::SetTargetAttributes(const Decl *D, 3942 llvm::GlobalValue *GV, 3943 CodeGen::CodeGenModule &M) const { 3944 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 3945 if (!FD) return; 3946 3947 llvm::Function *F = cast<llvm::Function>(GV); 3948 3949 if (M.getLangOpts().OpenCL) { 3950 if (FD->hasAttr<OpenCLKernelAttr>()) { 3951 // OpenCL C Kernel functions are not subject to inlining 3952 F->addFnAttr(llvm::Attributes::NoInline); 3953 3954 if (FD->hasAttr<ReqdWorkGroupSizeAttr>()) { 3955 3956 // Convert the reqd_work_group_size() attributes to metadata. 3957 llvm::LLVMContext &Context = F->getContext(); 3958 llvm::NamedMDNode *OpenCLMetadata = 3959 M.getModule().getOrInsertNamedMetadata("opencl.kernel_wg_size_info"); 3960 3961 SmallVector<llvm::Value*, 5> Operands; 3962 Operands.push_back(F); 3963 3964 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty, 3965 llvm::APInt(32, 3966 FD->getAttr<ReqdWorkGroupSizeAttr>()->getXDim()))); 3967 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty, 3968 llvm::APInt(32, 3969 FD->getAttr<ReqdWorkGroupSizeAttr>()->getYDim()))); 3970 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty, 3971 llvm::APInt(32, 3972 FD->getAttr<ReqdWorkGroupSizeAttr>()->getZDim()))); 3973 3974 // Add a boolean constant operand for "required" (true) or "hint" (false) 3975 // for implementing the work_group_size_hint attr later. Currently 3976 // always true as the hint is not yet implemented. 3977 Operands.push_back(llvm::ConstantInt::getTrue(Context)); 3978 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands)); 3979 } 3980 } 3981 } 3982 } 3983 3984 } 3985 3986 //===----------------------------------------------------------------------===// 3987 // Hexagon ABI Implementation 3988 //===----------------------------------------------------------------------===// 3989 3990 namespace { 3991 3992 class HexagonABIInfo : public ABIInfo { 3993 3994 3995 public: 3996 HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 3997 3998 private: 3999 4000 ABIArgInfo classifyReturnType(QualType RetTy) const; 4001 ABIArgInfo classifyArgumentType(QualType RetTy) const; 4002 4003 virtual void computeInfo(CGFunctionInfo &FI) const; 4004 4005 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 4006 CodeGenFunction &CGF) const; 4007 }; 4008 4009 class HexagonTargetCodeGenInfo : public TargetCodeGenInfo { 4010 public: 4011 HexagonTargetCodeGenInfo(CodeGenTypes &CGT) 4012 :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {} 4013 4014 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 4015 return 29; 4016 } 4017 }; 4018 4019 } 4020 4021 void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const { 4022 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 4023 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 4024 it != ie; ++it) 4025 it->info = classifyArgumentType(it->type); 4026 } 4027 4028 ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const { 4029 if (!isAggregateTypeForABI(Ty)) { 4030 // Treat an enum type as its underlying type. 4031 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 4032 Ty = EnumTy->getDecl()->getIntegerType(); 4033 4034 return (Ty->isPromotableIntegerType() ? 4035 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 4036 } 4037 4038 // Ignore empty records. 4039 if (isEmptyRecord(getContext(), Ty, true)) 4040 return ABIArgInfo::getIgnore(); 4041 4042 // Structures with either a non-trivial destructor or a non-trivial 4043 // copy constructor are always indirect. 4044 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 4045 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 4046 4047 uint64_t Size = getContext().getTypeSize(Ty); 4048 if (Size > 64) 4049 return ABIArgInfo::getIndirect(0, /*ByVal=*/true); 4050 // Pass in the smallest viable integer type. 4051 else if (Size > 32) 4052 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext())); 4053 else if (Size > 16) 4054 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 4055 else if (Size > 8) 4056 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 4057 else 4058 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 4059 } 4060 4061 ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const { 4062 if (RetTy->isVoidType()) 4063 return ABIArgInfo::getIgnore(); 4064 4065 // Large vector types should be returned via memory. 4066 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64) 4067 return ABIArgInfo::getIndirect(0); 4068 4069 if (!isAggregateTypeForABI(RetTy)) { 4070 // Treat an enum type as its underlying type. 4071 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 4072 RetTy = EnumTy->getDecl()->getIntegerType(); 4073 4074 return (RetTy->isPromotableIntegerType() ? 4075 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 4076 } 4077 4078 // Structures with either a non-trivial destructor or a non-trivial 4079 // copy constructor are always indirect. 4080 if (isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy)) 4081 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 4082 4083 if (isEmptyRecord(getContext(), RetTy, true)) 4084 return ABIArgInfo::getIgnore(); 4085 4086 // Aggregates <= 8 bytes are returned in r0; other aggregates 4087 // are returned indirectly. 4088 uint64_t Size = getContext().getTypeSize(RetTy); 4089 if (Size <= 64) { 4090 // Return in the smallest viable integer type. 4091 if (Size <= 8) 4092 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 4093 if (Size <= 16) 4094 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 4095 if (Size <= 32) 4096 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 4097 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext())); 4098 } 4099 4100 return ABIArgInfo::getIndirect(0, /*ByVal=*/true); 4101 } 4102 4103 llvm::Value *HexagonABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 4104 CodeGenFunction &CGF) const { 4105 // FIXME: Need to handle alignment 4106 llvm::Type *BPP = CGF.Int8PtrPtrTy; 4107 4108 CGBuilderTy &Builder = CGF.Builder; 4109 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 4110 "ap"); 4111 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 4112 llvm::Type *PTy = 4113 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 4114 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 4115 4116 uint64_t Offset = 4117 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); 4118 llvm::Value *NextAddr = 4119 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 4120 "ap.next"); 4121 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 4122 4123 return AddrTyped; 4124 } 4125 4126 4127 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() { 4128 if (TheTargetCodeGenInfo) 4129 return *TheTargetCodeGenInfo; 4130 4131 const llvm::Triple &Triple = getContext().getTargetInfo().getTriple(); 4132 switch (Triple.getArch()) { 4133 default: 4134 return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types)); 4135 4136 case llvm::Triple::le32: 4137 return *(TheTargetCodeGenInfo = new PNaClTargetCodeGenInfo(Types)); 4138 case llvm::Triple::mips: 4139 case llvm::Triple::mipsel: 4140 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, true)); 4141 4142 case llvm::Triple::mips64: 4143 case llvm::Triple::mips64el: 4144 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, false)); 4145 4146 case llvm::Triple::arm: 4147 case llvm::Triple::thumb: 4148 { 4149 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS; 4150 4151 if (strcmp(getContext().getTargetInfo().getABI(), "apcs-gnu") == 0) 4152 Kind = ARMABIInfo::APCS; 4153 else if (CodeGenOpts.FloatABI == "hard") 4154 Kind = ARMABIInfo::AAPCS_VFP; 4155 4156 switch (Triple.getOS()) { 4157 case llvm::Triple::NativeClient: 4158 return *(TheTargetCodeGenInfo = 4159 new NaClARMTargetCodeGenInfo(Types, Kind)); 4160 default: 4161 return *(TheTargetCodeGenInfo = 4162 new ARMTargetCodeGenInfo(Types, Kind)); 4163 } 4164 } 4165 4166 case llvm::Triple::ppc: 4167 return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types)); 4168 case llvm::Triple::ppc64: 4169 if (Triple.isOSBinFormatELF()) 4170 return *(TheTargetCodeGenInfo = new PPC64_SVR4_TargetCodeGenInfo(Types)); 4171 else 4172 return *(TheTargetCodeGenInfo = new PPC64TargetCodeGenInfo(Types)); 4173 4174 case llvm::Triple::nvptx: 4175 case llvm::Triple::nvptx64: 4176 return *(TheTargetCodeGenInfo = new NVPTXTargetCodeGenInfo(Types)); 4177 4178 case llvm::Triple::mblaze: 4179 return *(TheTargetCodeGenInfo = new MBlazeTargetCodeGenInfo(Types)); 4180 4181 case llvm::Triple::msp430: 4182 return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types)); 4183 4184 case llvm::Triple::tce: 4185 return *(TheTargetCodeGenInfo = new TCETargetCodeGenInfo(Types)); 4186 4187 case llvm::Triple::x86: { 4188 bool DisableMMX = strcmp(getContext().getTargetInfo().getABI(), "no-mmx") == 0; 4189 4190 if (Triple.isOSDarwin()) 4191 return *(TheTargetCodeGenInfo = 4192 new X86_32TargetCodeGenInfo(Types, true, true, DisableMMX, false, 4193 CodeGenOpts.NumRegisterParameters)); 4194 4195 switch (Triple.getOS()) { 4196 case llvm::Triple::Cygwin: 4197 case llvm::Triple::MinGW32: 4198 case llvm::Triple::AuroraUX: 4199 case llvm::Triple::DragonFly: 4200 case llvm::Triple::FreeBSD: 4201 case llvm::Triple::OpenBSD: 4202 case llvm::Triple::Bitrig: 4203 return *(TheTargetCodeGenInfo = 4204 new X86_32TargetCodeGenInfo(Types, false, true, DisableMMX, 4205 false, 4206 CodeGenOpts.NumRegisterParameters)); 4207 4208 case llvm::Triple::Win32: 4209 return *(TheTargetCodeGenInfo = 4210 new X86_32TargetCodeGenInfo(Types, false, true, DisableMMX, true, 4211 CodeGenOpts.NumRegisterParameters)); 4212 4213 default: 4214 return *(TheTargetCodeGenInfo = 4215 new X86_32TargetCodeGenInfo(Types, false, false, DisableMMX, 4216 false, 4217 CodeGenOpts.NumRegisterParameters)); 4218 } 4219 } 4220 4221 case llvm::Triple::x86_64: { 4222 bool HasAVX = strcmp(getContext().getTargetInfo().getABI(), "avx") == 0; 4223 4224 switch (Triple.getOS()) { 4225 case llvm::Triple::Win32: 4226 case llvm::Triple::MinGW32: 4227 case llvm::Triple::Cygwin: 4228 return *(TheTargetCodeGenInfo = new WinX86_64TargetCodeGenInfo(Types)); 4229 case llvm::Triple::NativeClient: 4230 return *(TheTargetCodeGenInfo = new NaClX86_64TargetCodeGenInfo(Types, HasAVX)); 4231 default: 4232 return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo(Types, 4233 HasAVX)); 4234 } 4235 } 4236 case llvm::Triple::hexagon: 4237 return *(TheTargetCodeGenInfo = new HexagonTargetCodeGenInfo(Types)); 4238 } 4239 } 4240