1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // These classes wrap the information about a call or function 11 // definition used to handle ABI compliancy. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "TargetInfo.h" 16 #include "ABIInfo.h" 17 #include "CodeGenFunction.h" 18 #include "clang/AST/RecordLayout.h" 19 #include "clang/Frontend/CodeGenOptions.h" 20 #include "llvm/Type.h" 21 #include "llvm/DataLayout.h" 22 #include "llvm/ADT/Triple.h" 23 #include "llvm/Support/raw_ostream.h" 24 using namespace clang; 25 using namespace CodeGen; 26 27 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, 28 llvm::Value *Array, 29 llvm::Value *Value, 30 unsigned FirstIndex, 31 unsigned LastIndex) { 32 // Alternatively, we could emit this as a loop in the source. 33 for (unsigned I = FirstIndex; I <= LastIndex; ++I) { 34 llvm::Value *Cell = Builder.CreateConstInBoundsGEP1_32(Array, I); 35 Builder.CreateStore(Value, Cell); 36 } 37 } 38 39 static bool isAggregateTypeForABI(QualType T) { 40 return CodeGenFunction::hasAggregateLLVMType(T) || 41 T->isMemberFunctionPointerType(); 42 } 43 44 ABIInfo::~ABIInfo() {} 45 46 ASTContext &ABIInfo::getContext() const { 47 return CGT.getContext(); 48 } 49 50 llvm::LLVMContext &ABIInfo::getVMContext() const { 51 return CGT.getLLVMContext(); 52 } 53 54 const llvm::DataLayout &ABIInfo::getDataLayout() const { 55 return CGT.getDataLayout(); 56 } 57 58 59 void ABIArgInfo::dump() const { 60 raw_ostream &OS = llvm::errs(); 61 OS << "(ABIArgInfo Kind="; 62 switch (TheKind) { 63 case Direct: 64 OS << "Direct Type="; 65 if (llvm::Type *Ty = getCoerceToType()) 66 Ty->print(OS); 67 else 68 OS << "null"; 69 break; 70 case Extend: 71 OS << "Extend"; 72 break; 73 case Ignore: 74 OS << "Ignore"; 75 break; 76 case Indirect: 77 OS << "Indirect Align=" << getIndirectAlign() 78 << " ByVal=" << getIndirectByVal() 79 << " Realign=" << getIndirectRealign(); 80 break; 81 case Expand: 82 OS << "Expand"; 83 break; 84 } 85 OS << ")\n"; 86 } 87 88 TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; } 89 90 // If someone can figure out a general rule for this, that would be great. 91 // It's probably just doomed to be platform-dependent, though. 92 unsigned TargetCodeGenInfo::getSizeOfUnwindException() const { 93 // Verified for: 94 // x86-64 FreeBSD, Linux, Darwin 95 // x86-32 FreeBSD, Linux, Darwin 96 // PowerPC Linux, Darwin 97 // ARM Darwin (*not* EABI) 98 return 32; 99 } 100 101 bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args, 102 const FunctionNoProtoType *fnType) const { 103 // The following conventions are known to require this to be false: 104 // x86_stdcall 105 // MIPS 106 // For everything else, we just prefer false unless we opt out. 107 return false; 108 } 109 110 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays); 111 112 /// isEmptyField - Return true iff a the field is "empty", that is it 113 /// is an unnamed bit-field or an (array of) empty record(s). 114 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD, 115 bool AllowArrays) { 116 if (FD->isUnnamedBitfield()) 117 return true; 118 119 QualType FT = FD->getType(); 120 121 // Constant arrays of empty records count as empty, strip them off. 122 // Constant arrays of zero length always count as empty. 123 if (AllowArrays) 124 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 125 if (AT->getSize() == 0) 126 return true; 127 FT = AT->getElementType(); 128 } 129 130 const RecordType *RT = FT->getAs<RecordType>(); 131 if (!RT) 132 return false; 133 134 // C++ record fields are never empty, at least in the Itanium ABI. 135 // 136 // FIXME: We should use a predicate for whether this behavior is true in the 137 // current ABI. 138 if (isa<CXXRecordDecl>(RT->getDecl())) 139 return false; 140 141 return isEmptyRecord(Context, FT, AllowArrays); 142 } 143 144 /// isEmptyRecord - Return true iff a structure contains only empty 145 /// fields. Note that a structure with a flexible array member is not 146 /// considered empty. 147 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) { 148 const RecordType *RT = T->getAs<RecordType>(); 149 if (!RT) 150 return 0; 151 const RecordDecl *RD = RT->getDecl(); 152 if (RD->hasFlexibleArrayMember()) 153 return false; 154 155 // If this is a C++ record, check the bases first. 156 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 157 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 158 e = CXXRD->bases_end(); i != e; ++i) 159 if (!isEmptyRecord(Context, i->getType(), true)) 160 return false; 161 162 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 163 i != e; ++i) 164 if (!isEmptyField(Context, *i, AllowArrays)) 165 return false; 166 return true; 167 } 168 169 /// hasNonTrivialDestructorOrCopyConstructor - Determine if a type has either 170 /// a non-trivial destructor or a non-trivial copy constructor. 171 static bool hasNonTrivialDestructorOrCopyConstructor(const RecordType *RT) { 172 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 173 if (!RD) 174 return false; 175 176 return !RD->hasTrivialDestructor() || !RD->hasTrivialCopyConstructor(); 177 } 178 179 /// isRecordWithNonTrivialDestructorOrCopyConstructor - Determine if a type is 180 /// a record type with either a non-trivial destructor or a non-trivial copy 181 /// constructor. 182 static bool isRecordWithNonTrivialDestructorOrCopyConstructor(QualType T) { 183 const RecordType *RT = T->getAs<RecordType>(); 184 if (!RT) 185 return false; 186 187 return hasNonTrivialDestructorOrCopyConstructor(RT); 188 } 189 190 /// isSingleElementStruct - Determine if a structure is a "single 191 /// element struct", i.e. it has exactly one non-empty field or 192 /// exactly one field which is itself a single element 193 /// struct. Structures with flexible array members are never 194 /// considered single element structs. 195 /// 196 /// \return The field declaration for the single non-empty field, if 197 /// it exists. 198 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) { 199 const RecordType *RT = T->getAsStructureType(); 200 if (!RT) 201 return 0; 202 203 const RecordDecl *RD = RT->getDecl(); 204 if (RD->hasFlexibleArrayMember()) 205 return 0; 206 207 const Type *Found = 0; 208 209 // If this is a C++ record, check the bases first. 210 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 211 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 212 e = CXXRD->bases_end(); i != e; ++i) { 213 // Ignore empty records. 214 if (isEmptyRecord(Context, i->getType(), true)) 215 continue; 216 217 // If we already found an element then this isn't a single-element struct. 218 if (Found) 219 return 0; 220 221 // If this is non-empty and not a single element struct, the composite 222 // cannot be a single element struct. 223 Found = isSingleElementStruct(i->getType(), Context); 224 if (!Found) 225 return 0; 226 } 227 } 228 229 // Check for single element. 230 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 231 i != e; ++i) { 232 const FieldDecl *FD = *i; 233 QualType FT = FD->getType(); 234 235 // Ignore empty fields. 236 if (isEmptyField(Context, FD, true)) 237 continue; 238 239 // If we already found an element then this isn't a single-element 240 // struct. 241 if (Found) 242 return 0; 243 244 // Treat single element arrays as the element. 245 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 246 if (AT->getSize().getZExtValue() != 1) 247 break; 248 FT = AT->getElementType(); 249 } 250 251 if (!isAggregateTypeForABI(FT)) { 252 Found = FT.getTypePtr(); 253 } else { 254 Found = isSingleElementStruct(FT, Context); 255 if (!Found) 256 return 0; 257 } 258 } 259 260 // We don't consider a struct a single-element struct if it has 261 // padding beyond the element type. 262 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T)) 263 return 0; 264 265 return Found; 266 } 267 268 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) { 269 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() && 270 !Ty->isAnyComplexType() && !Ty->isEnumeralType() && 271 !Ty->isBlockPointerType()) 272 return false; 273 274 uint64_t Size = Context.getTypeSize(Ty); 275 return Size == 32 || Size == 64; 276 } 277 278 /// canExpandIndirectArgument - Test whether an argument type which is to be 279 /// passed indirectly (on the stack) would have the equivalent layout if it was 280 /// expanded into separate arguments. If so, we prefer to do the latter to avoid 281 /// inhibiting optimizations. 282 /// 283 // FIXME: This predicate is missing many cases, currently it just follows 284 // llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We 285 // should probably make this smarter, or better yet make the LLVM backend 286 // capable of handling it. 287 static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) { 288 // We can only expand structure types. 289 const RecordType *RT = Ty->getAs<RecordType>(); 290 if (!RT) 291 return false; 292 293 // We can only expand (C) structures. 294 // 295 // FIXME: This needs to be generalized to handle classes as well. 296 const RecordDecl *RD = RT->getDecl(); 297 if (!RD->isStruct() || isa<CXXRecordDecl>(RD)) 298 return false; 299 300 uint64_t Size = 0; 301 302 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 303 i != e; ++i) { 304 const FieldDecl *FD = *i; 305 306 if (!is32Or64BitBasicType(FD->getType(), Context)) 307 return false; 308 309 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know 310 // how to expand them yet, and the predicate for telling if a bitfield still 311 // counts as "basic" is more complicated than what we were doing previously. 312 if (FD->isBitField()) 313 return false; 314 315 Size += Context.getTypeSize(FD->getType()); 316 } 317 318 // Make sure there are not any holes in the struct. 319 if (Size != Context.getTypeSize(Ty)) 320 return false; 321 322 return true; 323 } 324 325 namespace { 326 /// DefaultABIInfo - The default implementation for ABI specific 327 /// details. This implementation provides information which results in 328 /// self-consistent and sensible LLVM IR generation, but does not 329 /// conform to any particular ABI. 330 class DefaultABIInfo : public ABIInfo { 331 public: 332 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 333 334 ABIArgInfo classifyReturnType(QualType RetTy) const; 335 ABIArgInfo classifyArgumentType(QualType RetTy) const; 336 337 virtual void computeInfo(CGFunctionInfo &FI) const { 338 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 339 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 340 it != ie; ++it) 341 it->info = classifyArgumentType(it->type); 342 } 343 344 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 345 CodeGenFunction &CGF) const; 346 }; 347 348 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo { 349 public: 350 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 351 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 352 }; 353 354 llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 355 CodeGenFunction &CGF) const { 356 return 0; 357 } 358 359 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const { 360 if (isAggregateTypeForABI(Ty)) { 361 // Records with non trivial destructors/constructors should not be passed 362 // by value. 363 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 364 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 365 366 return ABIArgInfo::getIndirect(0); 367 } 368 369 // Treat an enum type as its underlying type. 370 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 371 Ty = EnumTy->getDecl()->getIntegerType(); 372 373 return (Ty->isPromotableIntegerType() ? 374 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 375 } 376 377 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const { 378 if (RetTy->isVoidType()) 379 return ABIArgInfo::getIgnore(); 380 381 if (isAggregateTypeForABI(RetTy)) 382 return ABIArgInfo::getIndirect(0); 383 384 // Treat an enum type as its underlying type. 385 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 386 RetTy = EnumTy->getDecl()->getIntegerType(); 387 388 return (RetTy->isPromotableIntegerType() ? 389 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 390 } 391 392 //===----------------------------------------------------------------------===// 393 // le32/PNaCl bitcode ABI Implementation 394 //===----------------------------------------------------------------------===// 395 396 class PNaClABIInfo : public ABIInfo { 397 public: 398 PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 399 400 ABIArgInfo classifyReturnType(QualType RetTy) const; 401 ABIArgInfo classifyArgumentType(QualType RetTy, unsigned &FreeRegs) const; 402 403 virtual void computeInfo(CGFunctionInfo &FI) const; 404 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 405 CodeGenFunction &CGF) const; 406 }; 407 408 class PNaClTargetCodeGenInfo : public TargetCodeGenInfo { 409 public: 410 PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 411 : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {} 412 }; 413 414 void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const { 415 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 416 417 unsigned FreeRegs = FI.getHasRegParm() ? FI.getRegParm() : 0; 418 419 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 420 it != ie; ++it) 421 it->info = classifyArgumentType(it->type, FreeRegs); 422 } 423 424 llvm::Value *PNaClABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 425 CodeGenFunction &CGF) const { 426 return 0; 427 } 428 429 ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty, 430 unsigned &FreeRegs) const { 431 if (isAggregateTypeForABI(Ty)) { 432 // Records with non trivial destructors/constructors should not be passed 433 // by value. 434 FreeRegs = 0; 435 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 436 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 437 438 return ABIArgInfo::getIndirect(0); 439 } 440 441 // Treat an enum type as its underlying type. 442 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 443 Ty = EnumTy->getDecl()->getIntegerType(); 444 445 ABIArgInfo BaseInfo = (Ty->isPromotableIntegerType() ? 446 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 447 448 // Regparm regs hold 32 bits. 449 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32; 450 if (SizeInRegs == 0) return BaseInfo; 451 if (SizeInRegs > FreeRegs) { 452 FreeRegs = 0; 453 return BaseInfo; 454 } 455 FreeRegs -= SizeInRegs; 456 return BaseInfo.isDirect() ? 457 ABIArgInfo::getDirectInReg(BaseInfo.getCoerceToType()) : 458 ABIArgInfo::getExtendInReg(BaseInfo.getCoerceToType()); 459 } 460 461 ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const { 462 if (RetTy->isVoidType()) 463 return ABIArgInfo::getIgnore(); 464 465 if (isAggregateTypeForABI(RetTy)) 466 return ABIArgInfo::getIndirect(0); 467 468 // Treat an enum type as its underlying type. 469 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 470 RetTy = EnumTy->getDecl()->getIntegerType(); 471 472 return (RetTy->isPromotableIntegerType() ? 473 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 474 } 475 476 /// UseX86_MMXType - Return true if this is an MMX type that should use the 477 /// special x86_mmx type. 478 bool UseX86_MMXType(llvm::Type *IRType) { 479 // If the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>, use the 480 // special x86_mmx type. 481 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 && 482 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() && 483 IRType->getScalarSizeInBits() != 64; 484 } 485 486 static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 487 StringRef Constraint, 488 llvm::Type* Ty) { 489 if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy()) 490 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext()); 491 return Ty; 492 } 493 494 //===----------------------------------------------------------------------===// 495 // X86-32 ABI Implementation 496 //===----------------------------------------------------------------------===// 497 498 /// X86_32ABIInfo - The X86-32 ABI information. 499 class X86_32ABIInfo : public ABIInfo { 500 enum Class { 501 Integer, 502 Float 503 }; 504 505 static const unsigned MinABIStackAlignInBytes = 4; 506 507 bool IsDarwinVectorABI; 508 bool IsSmallStructInRegABI; 509 bool IsMMXDisabled; 510 bool IsWin32FloatStructABI; 511 unsigned DefaultNumRegisterParameters; 512 513 static bool isRegisterSize(unsigned Size) { 514 return (Size == 8 || Size == 16 || Size == 32 || Size == 64); 515 } 516 517 static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context, 518 unsigned callingConvention); 519 520 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 521 /// such that the argument will be passed in memory. 522 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, 523 unsigned &FreeRegs) const; 524 525 /// \brief Return the alignment to use for the given type on the stack. 526 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const; 527 528 Class classify(QualType Ty) const; 529 ABIArgInfo classifyReturnType(QualType RetTy, 530 unsigned callingConvention) const; 531 ABIArgInfo classifyArgumentType(QualType RetTy, unsigned &FreeRegs) const; 532 bool shouldUseInReg(QualType Ty, unsigned &FreeRegs) const; 533 534 public: 535 536 virtual void computeInfo(CGFunctionInfo &FI) const; 537 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 538 CodeGenFunction &CGF) const; 539 540 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool m, bool w, 541 unsigned r) 542 : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p), 543 IsMMXDisabled(m), IsWin32FloatStructABI(w), 544 DefaultNumRegisterParameters(r) {} 545 }; 546 547 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo { 548 public: 549 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, 550 bool d, bool p, bool m, bool w, unsigned r) 551 :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, m, w, r)) {} 552 553 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 554 CodeGen::CodeGenModule &CGM) const; 555 556 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 557 // Darwin uses different dwarf register numbers for EH. 558 if (CGM.isTargetDarwin()) return 5; 559 560 return 4; 561 } 562 563 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 564 llvm::Value *Address) const; 565 566 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 567 StringRef Constraint, 568 llvm::Type* Ty) const { 569 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 570 } 571 572 }; 573 574 } 575 576 /// shouldReturnTypeInRegister - Determine if the given type should be 577 /// passed in a register (for the Darwin ABI). 578 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty, 579 ASTContext &Context, 580 unsigned callingConvention) { 581 uint64_t Size = Context.getTypeSize(Ty); 582 583 // Type must be register sized. 584 if (!isRegisterSize(Size)) 585 return false; 586 587 if (Ty->isVectorType()) { 588 // 64- and 128- bit vectors inside structures are not returned in 589 // registers. 590 if (Size == 64 || Size == 128) 591 return false; 592 593 return true; 594 } 595 596 // If this is a builtin, pointer, enum, complex type, member pointer, or 597 // member function pointer it is ok. 598 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() || 599 Ty->isAnyComplexType() || Ty->isEnumeralType() || 600 Ty->isBlockPointerType() || Ty->isMemberPointerType()) 601 return true; 602 603 // Arrays are treated like records. 604 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) 605 return shouldReturnTypeInRegister(AT->getElementType(), Context, 606 callingConvention); 607 608 // Otherwise, it must be a record type. 609 const RecordType *RT = Ty->getAs<RecordType>(); 610 if (!RT) return false; 611 612 // FIXME: Traverse bases here too. 613 614 // For thiscall conventions, structures will never be returned in 615 // a register. This is for compatibility with the MSVC ABI 616 if (callingConvention == llvm::CallingConv::X86_ThisCall && 617 RT->isStructureType()) { 618 return false; 619 } 620 621 // Structure types are passed in register if all fields would be 622 // passed in a register. 623 for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(), 624 e = RT->getDecl()->field_end(); i != e; ++i) { 625 const FieldDecl *FD = *i; 626 627 // Empty fields are ignored. 628 if (isEmptyField(Context, FD, true)) 629 continue; 630 631 // Check fields recursively. 632 if (!shouldReturnTypeInRegister(FD->getType(), Context, 633 callingConvention)) 634 return false; 635 } 636 return true; 637 } 638 639 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, 640 unsigned callingConvention) const { 641 if (RetTy->isVoidType()) 642 return ABIArgInfo::getIgnore(); 643 644 if (const VectorType *VT = RetTy->getAs<VectorType>()) { 645 // On Darwin, some vectors are returned in registers. 646 if (IsDarwinVectorABI) { 647 uint64_t Size = getContext().getTypeSize(RetTy); 648 649 // 128-bit vectors are a special case; they are returned in 650 // registers and we need to make sure to pick a type the LLVM 651 // backend will like. 652 if (Size == 128) 653 return ABIArgInfo::getDirect(llvm::VectorType::get( 654 llvm::Type::getInt64Ty(getVMContext()), 2)); 655 656 // Always return in register if it fits in a general purpose 657 // register, or if it is 64 bits and has a single element. 658 if ((Size == 8 || Size == 16 || Size == 32) || 659 (Size == 64 && VT->getNumElements() == 1)) 660 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 661 Size)); 662 663 return ABIArgInfo::getIndirect(0); 664 } 665 666 return ABIArgInfo::getDirect(); 667 } 668 669 if (isAggregateTypeForABI(RetTy)) { 670 if (const RecordType *RT = RetTy->getAs<RecordType>()) { 671 // Structures with either a non-trivial destructor or a non-trivial 672 // copy constructor are always indirect. 673 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 674 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 675 676 // Structures with flexible arrays are always indirect. 677 if (RT->getDecl()->hasFlexibleArrayMember()) 678 return ABIArgInfo::getIndirect(0); 679 } 680 681 // If specified, structs and unions are always indirect. 682 if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType()) 683 return ABIArgInfo::getIndirect(0); 684 685 // Small structures which are register sized are generally returned 686 // in a register. 687 if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, getContext(), 688 callingConvention)) { 689 uint64_t Size = getContext().getTypeSize(RetTy); 690 691 // As a special-case, if the struct is a "single-element" struct, and 692 // the field is of type "float" or "double", return it in a 693 // floating-point register. (MSVC does not apply this special case.) 694 // We apply a similar transformation for pointer types to improve the 695 // quality of the generated IR. 696 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) 697 if ((!IsWin32FloatStructABI && SeltTy->isRealFloatingType()) 698 || SeltTy->hasPointerRepresentation()) 699 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 700 701 // FIXME: We should be able to narrow this integer in cases with dead 702 // padding. 703 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size)); 704 } 705 706 return ABIArgInfo::getIndirect(0); 707 } 708 709 // Treat an enum type as its underlying type. 710 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 711 RetTy = EnumTy->getDecl()->getIntegerType(); 712 713 return (RetTy->isPromotableIntegerType() ? 714 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 715 } 716 717 static bool isSSEVectorType(ASTContext &Context, QualType Ty) { 718 return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128; 719 } 720 721 static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) { 722 const RecordType *RT = Ty->getAs<RecordType>(); 723 if (!RT) 724 return 0; 725 const RecordDecl *RD = RT->getDecl(); 726 727 // If this is a C++ record, check the bases first. 728 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 729 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 730 e = CXXRD->bases_end(); i != e; ++i) 731 if (!isRecordWithSSEVectorType(Context, i->getType())) 732 return false; 733 734 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 735 i != e; ++i) { 736 QualType FT = i->getType(); 737 738 if (isSSEVectorType(Context, FT)) 739 return true; 740 741 if (isRecordWithSSEVectorType(Context, FT)) 742 return true; 743 } 744 745 return false; 746 } 747 748 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty, 749 unsigned Align) const { 750 // Otherwise, if the alignment is less than or equal to the minimum ABI 751 // alignment, just use the default; the backend will handle this. 752 if (Align <= MinABIStackAlignInBytes) 753 return 0; // Use default alignment. 754 755 // On non-Darwin, the stack type alignment is always 4. 756 if (!IsDarwinVectorABI) { 757 // Set explicit alignment, since we may need to realign the top. 758 return MinABIStackAlignInBytes; 759 } 760 761 // Otherwise, if the type contains an SSE vector type, the alignment is 16. 762 if (Align >= 16 && (isSSEVectorType(getContext(), Ty) || 763 isRecordWithSSEVectorType(getContext(), Ty))) 764 return 16; 765 766 return MinABIStackAlignInBytes; 767 } 768 769 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal, 770 unsigned &FreeRegs) const { 771 if (!ByVal) { 772 if (FreeRegs) { 773 --FreeRegs; // Non byval indirects just use one pointer. 774 return ABIArgInfo::getIndirectInReg(0, false); 775 } 776 return ABIArgInfo::getIndirect(0, false); 777 } 778 779 // Compute the byval alignment. 780 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; 781 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign); 782 if (StackAlign == 0) 783 return ABIArgInfo::getIndirect(4); 784 785 // If the stack alignment is less than the type alignment, realign the 786 // argument. 787 if (StackAlign < TypeAlign) 788 return ABIArgInfo::getIndirect(StackAlign, /*ByVal=*/true, 789 /*Realign=*/true); 790 791 return ABIArgInfo::getIndirect(StackAlign); 792 } 793 794 X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const { 795 const Type *T = isSingleElementStruct(Ty, getContext()); 796 if (!T) 797 T = Ty.getTypePtr(); 798 799 if (const BuiltinType *BT = T->getAs<BuiltinType>()) { 800 BuiltinType::Kind K = BT->getKind(); 801 if (K == BuiltinType::Float || K == BuiltinType::Double) 802 return Float; 803 } 804 return Integer; 805 } 806 807 bool X86_32ABIInfo::shouldUseInReg(QualType Ty, unsigned &FreeRegs) const { 808 Class C = classify(Ty); 809 if (C == Float) 810 return false; 811 812 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32; 813 if (SizeInRegs > FreeRegs) { 814 FreeRegs = 0; 815 return false; 816 } 817 818 FreeRegs -= SizeInRegs; 819 return true; 820 } 821 822 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, 823 unsigned &FreeRegs) const { 824 // FIXME: Set alignment on indirect arguments. 825 if (isAggregateTypeForABI(Ty)) { 826 // Structures with flexible arrays are always indirect. 827 if (const RecordType *RT = Ty->getAs<RecordType>()) { 828 // Structures with either a non-trivial destructor or a non-trivial 829 // copy constructor are always indirect. 830 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 831 return getIndirectResult(Ty, false, FreeRegs); 832 833 if (RT->getDecl()->hasFlexibleArrayMember()) 834 return getIndirectResult(Ty, true, FreeRegs); 835 } 836 837 // Ignore empty structs/unions. 838 if (isEmptyRecord(getContext(), Ty, true)) 839 return ABIArgInfo::getIgnore(); 840 841 if (shouldUseInReg(Ty, FreeRegs)) { 842 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32; 843 llvm::LLVMContext &LLVMContext = getVMContext(); 844 llvm::Type *Int32 = llvm::Type::getInt32Ty(LLVMContext); 845 SmallVector<llvm::Type*, 3> Elements; 846 for (unsigned I = 0; I < SizeInRegs; ++I) 847 Elements.push_back(Int32); 848 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements); 849 return ABIArgInfo::getDirectInReg(Result); 850 } 851 852 // Expand small (<= 128-bit) record types when we know that the stack layout 853 // of those arguments will match the struct. This is important because the 854 // LLVM backend isn't smart enough to remove byval, which inhibits many 855 // optimizations. 856 if (getContext().getTypeSize(Ty) <= 4*32 && 857 canExpandIndirectArgument(Ty, getContext())) 858 return ABIArgInfo::getExpand(); 859 860 return getIndirectResult(Ty, true, FreeRegs); 861 } 862 863 if (const VectorType *VT = Ty->getAs<VectorType>()) { 864 // On Darwin, some vectors are passed in memory, we handle this by passing 865 // it as an i8/i16/i32/i64. 866 if (IsDarwinVectorABI) { 867 uint64_t Size = getContext().getTypeSize(Ty); 868 if ((Size == 8 || Size == 16 || Size == 32) || 869 (Size == 64 && VT->getNumElements() == 1)) 870 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 871 Size)); 872 } 873 874 llvm::Type *IRType = CGT.ConvertType(Ty); 875 if (UseX86_MMXType(IRType)) { 876 if (IsMMXDisabled) 877 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 878 64)); 879 ABIArgInfo AAI = ABIArgInfo::getDirect(IRType); 880 AAI.setCoerceToType(llvm::Type::getX86_MMXTy(getVMContext())); 881 return AAI; 882 } 883 884 return ABIArgInfo::getDirect(); 885 } 886 887 888 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 889 Ty = EnumTy->getDecl()->getIntegerType(); 890 891 bool InReg = shouldUseInReg(Ty, FreeRegs); 892 893 if (Ty->isPromotableIntegerType()) { 894 if (InReg) 895 return ABIArgInfo::getExtendInReg(); 896 return ABIArgInfo::getExtend(); 897 } 898 if (InReg) 899 return ABIArgInfo::getDirectInReg(); 900 return ABIArgInfo::getDirect(); 901 } 902 903 void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const { 904 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), 905 FI.getCallingConvention()); 906 907 unsigned FreeRegs = FI.getHasRegParm() ? FI.getRegParm() : 908 DefaultNumRegisterParameters; 909 910 // If the return value is indirect, then the hidden argument is consuming one 911 // integer register. 912 if (FI.getReturnInfo().isIndirect() && FreeRegs) { 913 --FreeRegs; 914 ABIArgInfo &Old = FI.getReturnInfo(); 915 Old = ABIArgInfo::getIndirectInReg(Old.getIndirectAlign(), 916 Old.getIndirectByVal(), 917 Old.getIndirectRealign()); 918 } 919 920 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 921 it != ie; ++it) 922 it->info = classifyArgumentType(it->type, FreeRegs); 923 } 924 925 llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 926 CodeGenFunction &CGF) const { 927 llvm::Type *BPP = CGF.Int8PtrPtrTy; 928 929 CGBuilderTy &Builder = CGF.Builder; 930 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 931 "ap"); 932 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 933 934 // Compute if the address needs to be aligned 935 unsigned Align = CGF.getContext().getTypeAlignInChars(Ty).getQuantity(); 936 Align = getTypeStackAlignInBytes(Ty, Align); 937 Align = std::max(Align, 4U); 938 if (Align > 4) { 939 // addr = (addr + align - 1) & -align; 940 llvm::Value *Offset = 941 llvm::ConstantInt::get(CGF.Int32Ty, Align - 1); 942 Addr = CGF.Builder.CreateGEP(Addr, Offset); 943 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(Addr, 944 CGF.Int32Ty); 945 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -Align); 946 Addr = CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), 947 Addr->getType(), 948 "ap.cur.aligned"); 949 } 950 951 llvm::Type *PTy = 952 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 953 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 954 955 uint64_t Offset = 956 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, Align); 957 llvm::Value *NextAddr = 958 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 959 "ap.next"); 960 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 961 962 return AddrTyped; 963 } 964 965 void X86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D, 966 llvm::GlobalValue *GV, 967 CodeGen::CodeGenModule &CGM) const { 968 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 969 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { 970 // Get the LLVM function. 971 llvm::Function *Fn = cast<llvm::Function>(GV); 972 973 // Now add the 'alignstack' attribute with a value of 16. 974 llvm::AttrBuilder B; 975 B.addStackAlignmentAttr(16); 976 Fn->addAttribute(llvm::AttrListPtr::FunctionIndex, 977 llvm::Attributes::get(CGM.getLLVMContext(), B)); 978 } 979 } 980 } 981 982 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable( 983 CodeGen::CodeGenFunction &CGF, 984 llvm::Value *Address) const { 985 CodeGen::CGBuilderTy &Builder = CGF.Builder; 986 987 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 988 989 // 0-7 are the eight integer registers; the order is different 990 // on Darwin (for EH), but the range is the same. 991 // 8 is %eip. 992 AssignToArrayRange(Builder, Address, Four8, 0, 8); 993 994 if (CGF.CGM.isTargetDarwin()) { 995 // 12-16 are st(0..4). Not sure why we stop at 4. 996 // These have size 16, which is sizeof(long double) on 997 // platforms with 8-byte alignment for that type. 998 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16); 999 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16); 1000 1001 } else { 1002 // 9 is %eflags, which doesn't get a size on Darwin for some 1003 // reason. 1004 Builder.CreateStore(Four8, Builder.CreateConstInBoundsGEP1_32(Address, 9)); 1005 1006 // 11-16 are st(0..5). Not sure why we stop at 5. 1007 // These have size 12, which is sizeof(long double) on 1008 // platforms with 4-byte alignment for that type. 1009 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12); 1010 AssignToArrayRange(Builder, Address, Twelve8, 11, 16); 1011 } 1012 1013 return false; 1014 } 1015 1016 //===----------------------------------------------------------------------===// 1017 // X86-64 ABI Implementation 1018 //===----------------------------------------------------------------------===// 1019 1020 1021 namespace { 1022 /// X86_64ABIInfo - The X86_64 ABI information. 1023 class X86_64ABIInfo : public ABIInfo { 1024 enum Class { 1025 Integer = 0, 1026 SSE, 1027 SSEUp, 1028 X87, 1029 X87Up, 1030 ComplexX87, 1031 NoClass, 1032 Memory 1033 }; 1034 1035 /// merge - Implement the X86_64 ABI merging algorithm. 1036 /// 1037 /// Merge an accumulating classification \arg Accum with a field 1038 /// classification \arg Field. 1039 /// 1040 /// \param Accum - The accumulating classification. This should 1041 /// always be either NoClass or the result of a previous merge 1042 /// call. In addition, this should never be Memory (the caller 1043 /// should just return Memory for the aggregate). 1044 static Class merge(Class Accum, Class Field); 1045 1046 /// postMerge - Implement the X86_64 ABI post merging algorithm. 1047 /// 1048 /// Post merger cleanup, reduces a malformed Hi and Lo pair to 1049 /// final MEMORY or SSE classes when necessary. 1050 /// 1051 /// \param AggregateSize - The size of the current aggregate in 1052 /// the classification process. 1053 /// 1054 /// \param Lo - The classification for the parts of the type 1055 /// residing in the low word of the containing object. 1056 /// 1057 /// \param Hi - The classification for the parts of the type 1058 /// residing in the higher words of the containing object. 1059 /// 1060 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const; 1061 1062 /// classify - Determine the x86_64 register classes in which the 1063 /// given type T should be passed. 1064 /// 1065 /// \param Lo - The classification for the parts of the type 1066 /// residing in the low word of the containing object. 1067 /// 1068 /// \param Hi - The classification for the parts of the type 1069 /// residing in the high word of the containing object. 1070 /// 1071 /// \param OffsetBase - The bit offset of this type in the 1072 /// containing object. Some parameters are classified different 1073 /// depending on whether they straddle an eightbyte boundary. 1074 /// 1075 /// If a word is unused its result will be NoClass; if a type should 1076 /// be passed in Memory then at least the classification of \arg Lo 1077 /// will be Memory. 1078 /// 1079 /// The \arg Lo class will be NoClass iff the argument is ignored. 1080 /// 1081 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will 1082 /// also be ComplexX87. 1083 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi) const; 1084 1085 llvm::Type *GetByteVectorType(QualType Ty) const; 1086 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType, 1087 unsigned IROffset, QualType SourceTy, 1088 unsigned SourceOffset) const; 1089 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType, 1090 unsigned IROffset, QualType SourceTy, 1091 unsigned SourceOffset) const; 1092 1093 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 1094 /// such that the argument will be returned in memory. 1095 ABIArgInfo getIndirectReturnResult(QualType Ty) const; 1096 1097 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 1098 /// such that the argument will be passed in memory. 1099 /// 1100 /// \param freeIntRegs - The number of free integer registers remaining 1101 /// available. 1102 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const; 1103 1104 ABIArgInfo classifyReturnType(QualType RetTy) const; 1105 1106 ABIArgInfo classifyArgumentType(QualType Ty, 1107 unsigned freeIntRegs, 1108 unsigned &neededInt, 1109 unsigned &neededSSE) const; 1110 1111 bool IsIllegalVectorType(QualType Ty) const; 1112 1113 /// The 0.98 ABI revision clarified a lot of ambiguities, 1114 /// unfortunately in ways that were not always consistent with 1115 /// certain previous compilers. In particular, platforms which 1116 /// required strict binary compatibility with older versions of GCC 1117 /// may need to exempt themselves. 1118 bool honorsRevision0_98() const { 1119 return !getContext().getTargetInfo().getTriple().isOSDarwin(); 1120 } 1121 1122 bool HasAVX; 1123 // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on 1124 // 64-bit hardware. 1125 bool Has64BitPointers; 1126 1127 public: 1128 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool hasavx) : 1129 ABIInfo(CGT), HasAVX(hasavx), 1130 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) { 1131 } 1132 1133 bool isPassedUsingAVXType(QualType type) const { 1134 unsigned neededInt, neededSSE; 1135 // The freeIntRegs argument doesn't matter here. 1136 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE); 1137 if (info.isDirect()) { 1138 llvm::Type *ty = info.getCoerceToType(); 1139 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty)) 1140 return (vectorTy->getBitWidth() > 128); 1141 } 1142 return false; 1143 } 1144 1145 virtual void computeInfo(CGFunctionInfo &FI) const; 1146 1147 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1148 CodeGenFunction &CGF) const; 1149 }; 1150 1151 /// WinX86_64ABIInfo - The Windows X86_64 ABI information. 1152 class WinX86_64ABIInfo : public ABIInfo { 1153 1154 ABIArgInfo classify(QualType Ty) const; 1155 1156 public: 1157 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 1158 1159 virtual void computeInfo(CGFunctionInfo &FI) const; 1160 1161 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1162 CodeGenFunction &CGF) const; 1163 }; 1164 1165 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo { 1166 public: 1167 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) 1168 : TargetCodeGenInfo(new X86_64ABIInfo(CGT, HasAVX)) {} 1169 1170 const X86_64ABIInfo &getABIInfo() const { 1171 return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo()); 1172 } 1173 1174 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 1175 return 7; 1176 } 1177 1178 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 1179 llvm::Value *Address) const { 1180 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 1181 1182 // 0-15 are the 16 integer registers. 1183 // 16 is %rip. 1184 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 1185 return false; 1186 } 1187 1188 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 1189 StringRef Constraint, 1190 llvm::Type* Ty) const { 1191 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 1192 } 1193 1194 bool isNoProtoCallVariadic(const CallArgList &args, 1195 const FunctionNoProtoType *fnType) const { 1196 // The default CC on x86-64 sets %al to the number of SSA 1197 // registers used, and GCC sets this when calling an unprototyped 1198 // function, so we override the default behavior. However, don't do 1199 // that when AVX types are involved: the ABI explicitly states it is 1200 // undefined, and it doesn't work in practice because of how the ABI 1201 // defines varargs anyway. 1202 if (fnType->getCallConv() == CC_Default || fnType->getCallConv() == CC_C) { 1203 bool HasAVXType = false; 1204 for (CallArgList::const_iterator 1205 it = args.begin(), ie = args.end(); it != ie; ++it) { 1206 if (getABIInfo().isPassedUsingAVXType(it->Ty)) { 1207 HasAVXType = true; 1208 break; 1209 } 1210 } 1211 1212 if (!HasAVXType) 1213 return true; 1214 } 1215 1216 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType); 1217 } 1218 1219 }; 1220 1221 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo { 1222 public: 1223 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 1224 : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {} 1225 1226 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 1227 return 7; 1228 } 1229 1230 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 1231 llvm::Value *Address) const { 1232 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 1233 1234 // 0-15 are the 16 integer registers. 1235 // 16 is %rip. 1236 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 1237 return false; 1238 } 1239 }; 1240 1241 } 1242 1243 void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo, 1244 Class &Hi) const { 1245 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done: 1246 // 1247 // (a) If one of the classes is Memory, the whole argument is passed in 1248 // memory. 1249 // 1250 // (b) If X87UP is not preceded by X87, the whole argument is passed in 1251 // memory. 1252 // 1253 // (c) If the size of the aggregate exceeds two eightbytes and the first 1254 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole 1255 // argument is passed in memory. NOTE: This is necessary to keep the 1256 // ABI working for processors that don't support the __m256 type. 1257 // 1258 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE. 1259 // 1260 // Some of these are enforced by the merging logic. Others can arise 1261 // only with unions; for example: 1262 // union { _Complex double; unsigned; } 1263 // 1264 // Note that clauses (b) and (c) were added in 0.98. 1265 // 1266 if (Hi == Memory) 1267 Lo = Memory; 1268 if (Hi == X87Up && Lo != X87 && honorsRevision0_98()) 1269 Lo = Memory; 1270 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp)) 1271 Lo = Memory; 1272 if (Hi == SSEUp && Lo != SSE) 1273 Hi = SSE; 1274 } 1275 1276 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) { 1277 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is 1278 // classified recursively so that always two fields are 1279 // considered. The resulting class is calculated according to 1280 // the classes of the fields in the eightbyte: 1281 // 1282 // (a) If both classes are equal, this is the resulting class. 1283 // 1284 // (b) If one of the classes is NO_CLASS, the resulting class is 1285 // the other class. 1286 // 1287 // (c) If one of the classes is MEMORY, the result is the MEMORY 1288 // class. 1289 // 1290 // (d) If one of the classes is INTEGER, the result is the 1291 // INTEGER. 1292 // 1293 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class, 1294 // MEMORY is used as class. 1295 // 1296 // (f) Otherwise class SSE is used. 1297 1298 // Accum should never be memory (we should have returned) or 1299 // ComplexX87 (because this cannot be passed in a structure). 1300 assert((Accum != Memory && Accum != ComplexX87) && 1301 "Invalid accumulated classification during merge."); 1302 if (Accum == Field || Field == NoClass) 1303 return Accum; 1304 if (Field == Memory) 1305 return Memory; 1306 if (Accum == NoClass) 1307 return Field; 1308 if (Accum == Integer || Field == Integer) 1309 return Integer; 1310 if (Field == X87 || Field == X87Up || Field == ComplexX87 || 1311 Accum == X87 || Accum == X87Up) 1312 return Memory; 1313 return SSE; 1314 } 1315 1316 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, 1317 Class &Lo, Class &Hi) const { 1318 // FIXME: This code can be simplified by introducing a simple value class for 1319 // Class pairs with appropriate constructor methods for the various 1320 // situations. 1321 1322 // FIXME: Some of the split computations are wrong; unaligned vectors 1323 // shouldn't be passed in registers for example, so there is no chance they 1324 // can straddle an eightbyte. Verify & simplify. 1325 1326 Lo = Hi = NoClass; 1327 1328 Class &Current = OffsetBase < 64 ? Lo : Hi; 1329 Current = Memory; 1330 1331 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 1332 BuiltinType::Kind k = BT->getKind(); 1333 1334 if (k == BuiltinType::Void) { 1335 Current = NoClass; 1336 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) { 1337 Lo = Integer; 1338 Hi = Integer; 1339 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) { 1340 Current = Integer; 1341 } else if ((k == BuiltinType::Float || k == BuiltinType::Double) || 1342 (k == BuiltinType::LongDouble && 1343 getContext().getTargetInfo().getTriple().getOS() == 1344 llvm::Triple::NativeClient)) { 1345 Current = SSE; 1346 } else if (k == BuiltinType::LongDouble) { 1347 Lo = X87; 1348 Hi = X87Up; 1349 } 1350 // FIXME: _Decimal32 and _Decimal64 are SSE. 1351 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). 1352 return; 1353 } 1354 1355 if (const EnumType *ET = Ty->getAs<EnumType>()) { 1356 // Classify the underlying integer type. 1357 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi); 1358 return; 1359 } 1360 1361 if (Ty->hasPointerRepresentation()) { 1362 Current = Integer; 1363 return; 1364 } 1365 1366 if (Ty->isMemberPointerType()) { 1367 if (Ty->isMemberFunctionPointerType() && Has64BitPointers) 1368 Lo = Hi = Integer; 1369 else 1370 Current = Integer; 1371 return; 1372 } 1373 1374 if (const VectorType *VT = Ty->getAs<VectorType>()) { 1375 uint64_t Size = getContext().getTypeSize(VT); 1376 if (Size == 32) { 1377 // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x 1378 // float> as integer. 1379 Current = Integer; 1380 1381 // If this type crosses an eightbyte boundary, it should be 1382 // split. 1383 uint64_t EB_Real = (OffsetBase) / 64; 1384 uint64_t EB_Imag = (OffsetBase + Size - 1) / 64; 1385 if (EB_Real != EB_Imag) 1386 Hi = Lo; 1387 } else if (Size == 64) { 1388 // gcc passes <1 x double> in memory. :( 1389 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) 1390 return; 1391 1392 // gcc passes <1 x long long> as INTEGER. 1393 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong) || 1394 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULongLong) || 1395 VT->getElementType()->isSpecificBuiltinType(BuiltinType::Long) || 1396 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULong)) 1397 Current = Integer; 1398 else 1399 Current = SSE; 1400 1401 // If this type crosses an eightbyte boundary, it should be 1402 // split. 1403 if (OffsetBase && OffsetBase != 64) 1404 Hi = Lo; 1405 } else if (Size == 128 || (HasAVX && Size == 256)) { 1406 // Arguments of 256-bits are split into four eightbyte chunks. The 1407 // least significant one belongs to class SSE and all the others to class 1408 // SSEUP. The original Lo and Hi design considers that types can't be 1409 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense. 1410 // This design isn't correct for 256-bits, but since there're no cases 1411 // where the upper parts would need to be inspected, avoid adding 1412 // complexity and just consider Hi to match the 64-256 part. 1413 Lo = SSE; 1414 Hi = SSEUp; 1415 } 1416 return; 1417 } 1418 1419 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 1420 QualType ET = getContext().getCanonicalType(CT->getElementType()); 1421 1422 uint64_t Size = getContext().getTypeSize(Ty); 1423 if (ET->isIntegralOrEnumerationType()) { 1424 if (Size <= 64) 1425 Current = Integer; 1426 else if (Size <= 128) 1427 Lo = Hi = Integer; 1428 } else if (ET == getContext().FloatTy) 1429 Current = SSE; 1430 else if (ET == getContext().DoubleTy || 1431 (ET == getContext().LongDoubleTy && 1432 getContext().getTargetInfo().getTriple().getOS() == 1433 llvm::Triple::NativeClient)) 1434 Lo = Hi = SSE; 1435 else if (ET == getContext().LongDoubleTy) 1436 Current = ComplexX87; 1437 1438 // If this complex type crosses an eightbyte boundary then it 1439 // should be split. 1440 uint64_t EB_Real = (OffsetBase) / 64; 1441 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64; 1442 if (Hi == NoClass && EB_Real != EB_Imag) 1443 Hi = Lo; 1444 1445 return; 1446 } 1447 1448 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 1449 // Arrays are treated like structures. 1450 1451 uint64_t Size = getContext().getTypeSize(Ty); 1452 1453 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 1454 // than four eightbytes, ..., it has class MEMORY. 1455 if (Size > 256) 1456 return; 1457 1458 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned 1459 // fields, it has class MEMORY. 1460 // 1461 // Only need to check alignment of array base. 1462 if (OffsetBase % getContext().getTypeAlign(AT->getElementType())) 1463 return; 1464 1465 // Otherwise implement simplified merge. We could be smarter about 1466 // this, but it isn't worth it and would be harder to verify. 1467 Current = NoClass; 1468 uint64_t EltSize = getContext().getTypeSize(AT->getElementType()); 1469 uint64_t ArraySize = AT->getSize().getZExtValue(); 1470 1471 // The only case a 256-bit wide vector could be used is when the array 1472 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 1473 // to work for sizes wider than 128, early check and fallback to memory. 1474 if (Size > 128 && EltSize != 256) 1475 return; 1476 1477 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) { 1478 Class FieldLo, FieldHi; 1479 classify(AT->getElementType(), Offset, FieldLo, FieldHi); 1480 Lo = merge(Lo, FieldLo); 1481 Hi = merge(Hi, FieldHi); 1482 if (Lo == Memory || Hi == Memory) 1483 break; 1484 } 1485 1486 postMerge(Size, Lo, Hi); 1487 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification."); 1488 return; 1489 } 1490 1491 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1492 uint64_t Size = getContext().getTypeSize(Ty); 1493 1494 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 1495 // than four eightbytes, ..., it has class MEMORY. 1496 if (Size > 256) 1497 return; 1498 1499 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial 1500 // copy constructor or a non-trivial destructor, it is passed by invisible 1501 // reference. 1502 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 1503 return; 1504 1505 const RecordDecl *RD = RT->getDecl(); 1506 1507 // Assume variable sized types are passed in memory. 1508 if (RD->hasFlexibleArrayMember()) 1509 return; 1510 1511 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 1512 1513 // Reset Lo class, this will be recomputed. 1514 Current = NoClass; 1515 1516 // If this is a C++ record, classify the bases first. 1517 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 1518 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 1519 e = CXXRD->bases_end(); i != e; ++i) { 1520 assert(!i->isVirtual() && !i->getType()->isDependentType() && 1521 "Unexpected base class!"); 1522 const CXXRecordDecl *Base = 1523 cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); 1524 1525 // Classify this field. 1526 // 1527 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a 1528 // single eightbyte, each is classified separately. Each eightbyte gets 1529 // initialized to class NO_CLASS. 1530 Class FieldLo, FieldHi; 1531 uint64_t Offset = 1532 OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base)); 1533 classify(i->getType(), Offset, FieldLo, FieldHi); 1534 Lo = merge(Lo, FieldLo); 1535 Hi = merge(Hi, FieldHi); 1536 if (Lo == Memory || Hi == Memory) 1537 break; 1538 } 1539 } 1540 1541 // Classify the fields one at a time, merging the results. 1542 unsigned idx = 0; 1543 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1544 i != e; ++i, ++idx) { 1545 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 1546 bool BitField = i->isBitField(); 1547 1548 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than 1549 // four eightbytes, or it contains unaligned fields, it has class MEMORY. 1550 // 1551 // The only case a 256-bit wide vector could be used is when the struct 1552 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 1553 // to work for sizes wider than 128, early check and fallback to memory. 1554 // 1555 if (Size > 128 && getContext().getTypeSize(i->getType()) != 256) { 1556 Lo = Memory; 1557 return; 1558 } 1559 // Note, skip this test for bit-fields, see below. 1560 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) { 1561 Lo = Memory; 1562 return; 1563 } 1564 1565 // Classify this field. 1566 // 1567 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate 1568 // exceeds a single eightbyte, each is classified 1569 // separately. Each eightbyte gets initialized to class 1570 // NO_CLASS. 1571 Class FieldLo, FieldHi; 1572 1573 // Bit-fields require special handling, they do not force the 1574 // structure to be passed in memory even if unaligned, and 1575 // therefore they can straddle an eightbyte. 1576 if (BitField) { 1577 // Ignore padding bit-fields. 1578 if (i->isUnnamedBitfield()) 1579 continue; 1580 1581 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 1582 uint64_t Size = i->getBitWidthValue(getContext()); 1583 1584 uint64_t EB_Lo = Offset / 64; 1585 uint64_t EB_Hi = (Offset + Size - 1) / 64; 1586 FieldLo = FieldHi = NoClass; 1587 if (EB_Lo) { 1588 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes."); 1589 FieldLo = NoClass; 1590 FieldHi = Integer; 1591 } else { 1592 FieldLo = Integer; 1593 FieldHi = EB_Hi ? Integer : NoClass; 1594 } 1595 } else 1596 classify(i->getType(), Offset, FieldLo, FieldHi); 1597 Lo = merge(Lo, FieldLo); 1598 Hi = merge(Hi, FieldHi); 1599 if (Lo == Memory || Hi == Memory) 1600 break; 1601 } 1602 1603 postMerge(Size, Lo, Hi); 1604 } 1605 } 1606 1607 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const { 1608 // If this is a scalar LLVM value then assume LLVM will pass it in the right 1609 // place naturally. 1610 if (!isAggregateTypeForABI(Ty)) { 1611 // Treat an enum type as its underlying type. 1612 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1613 Ty = EnumTy->getDecl()->getIntegerType(); 1614 1615 return (Ty->isPromotableIntegerType() ? 1616 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 1617 } 1618 1619 return ABIArgInfo::getIndirect(0); 1620 } 1621 1622 bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const { 1623 if (const VectorType *VecTy = Ty->getAs<VectorType>()) { 1624 uint64_t Size = getContext().getTypeSize(VecTy); 1625 unsigned LargestVector = HasAVX ? 256 : 128; 1626 if (Size <= 64 || Size > LargestVector) 1627 return true; 1628 } 1629 1630 return false; 1631 } 1632 1633 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty, 1634 unsigned freeIntRegs) const { 1635 // If this is a scalar LLVM value then assume LLVM will pass it in the right 1636 // place naturally. 1637 // 1638 // This assumption is optimistic, as there could be free registers available 1639 // when we need to pass this argument in memory, and LLVM could try to pass 1640 // the argument in the free register. This does not seem to happen currently, 1641 // but this code would be much safer if we could mark the argument with 1642 // 'onstack'. See PR12193. 1643 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) { 1644 // Treat an enum type as its underlying type. 1645 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1646 Ty = EnumTy->getDecl()->getIntegerType(); 1647 1648 return (Ty->isPromotableIntegerType() ? 1649 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 1650 } 1651 1652 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 1653 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 1654 1655 // Compute the byval alignment. We specify the alignment of the byval in all 1656 // cases so that the mid-level optimizer knows the alignment of the byval. 1657 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U); 1658 1659 // Attempt to avoid passing indirect results using byval when possible. This 1660 // is important for good codegen. 1661 // 1662 // We do this by coercing the value into a scalar type which the backend can 1663 // handle naturally (i.e., without using byval). 1664 // 1665 // For simplicity, we currently only do this when we have exhausted all of the 1666 // free integer registers. Doing this when there are free integer registers 1667 // would require more care, as we would have to ensure that the coerced value 1668 // did not claim the unused register. That would require either reording the 1669 // arguments to the function (so that any subsequent inreg values came first), 1670 // or only doing this optimization when there were no following arguments that 1671 // might be inreg. 1672 // 1673 // We currently expect it to be rare (particularly in well written code) for 1674 // arguments to be passed on the stack when there are still free integer 1675 // registers available (this would typically imply large structs being passed 1676 // by value), so this seems like a fair tradeoff for now. 1677 // 1678 // We can revisit this if the backend grows support for 'onstack' parameter 1679 // attributes. See PR12193. 1680 if (freeIntRegs == 0) { 1681 uint64_t Size = getContext().getTypeSize(Ty); 1682 1683 // If this type fits in an eightbyte, coerce it into the matching integral 1684 // type, which will end up on the stack (with alignment 8). 1685 if (Align == 8 && Size <= 64) 1686 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 1687 Size)); 1688 } 1689 1690 return ABIArgInfo::getIndirect(Align); 1691 } 1692 1693 /// GetByteVectorType - The ABI specifies that a value should be passed in an 1694 /// full vector XMM/YMM register. Pick an LLVM IR type that will be passed as a 1695 /// vector register. 1696 llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const { 1697 llvm::Type *IRType = CGT.ConvertType(Ty); 1698 1699 // Wrapper structs that just contain vectors are passed just like vectors, 1700 // strip them off if present. 1701 llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType); 1702 while (STy && STy->getNumElements() == 1) { 1703 IRType = STy->getElementType(0); 1704 STy = dyn_cast<llvm::StructType>(IRType); 1705 } 1706 1707 // If the preferred type is a 16-byte vector, prefer to pass it. 1708 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){ 1709 llvm::Type *EltTy = VT->getElementType(); 1710 unsigned BitWidth = VT->getBitWidth(); 1711 if ((BitWidth >= 128 && BitWidth <= 256) && 1712 (EltTy->isFloatTy() || EltTy->isDoubleTy() || 1713 EltTy->isIntegerTy(8) || EltTy->isIntegerTy(16) || 1714 EltTy->isIntegerTy(32) || EltTy->isIntegerTy(64) || 1715 EltTy->isIntegerTy(128))) 1716 return VT; 1717 } 1718 1719 return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), 2); 1720 } 1721 1722 /// BitsContainNoUserData - Return true if the specified [start,end) bit range 1723 /// is known to either be off the end of the specified type or being in 1724 /// alignment padding. The user type specified is known to be at most 128 bits 1725 /// in size, and have passed through X86_64ABIInfo::classify with a successful 1726 /// classification that put one of the two halves in the INTEGER class. 1727 /// 1728 /// It is conservatively correct to return false. 1729 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, 1730 unsigned EndBit, ASTContext &Context) { 1731 // If the bytes being queried are off the end of the type, there is no user 1732 // data hiding here. This handles analysis of builtins, vectors and other 1733 // types that don't contain interesting padding. 1734 unsigned TySize = (unsigned)Context.getTypeSize(Ty); 1735 if (TySize <= StartBit) 1736 return true; 1737 1738 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 1739 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType()); 1740 unsigned NumElts = (unsigned)AT->getSize().getZExtValue(); 1741 1742 // Check each element to see if the element overlaps with the queried range. 1743 for (unsigned i = 0; i != NumElts; ++i) { 1744 // If the element is after the span we care about, then we're done.. 1745 unsigned EltOffset = i*EltSize; 1746 if (EltOffset >= EndBit) break; 1747 1748 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0; 1749 if (!BitsContainNoUserData(AT->getElementType(), EltStart, 1750 EndBit-EltOffset, Context)) 1751 return false; 1752 } 1753 // If it overlaps no elements, then it is safe to process as padding. 1754 return true; 1755 } 1756 1757 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1758 const RecordDecl *RD = RT->getDecl(); 1759 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 1760 1761 // If this is a C++ record, check the bases first. 1762 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 1763 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 1764 e = CXXRD->bases_end(); i != e; ++i) { 1765 assert(!i->isVirtual() && !i->getType()->isDependentType() && 1766 "Unexpected base class!"); 1767 const CXXRecordDecl *Base = 1768 cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); 1769 1770 // If the base is after the span we care about, ignore it. 1771 unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base)); 1772 if (BaseOffset >= EndBit) continue; 1773 1774 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0; 1775 if (!BitsContainNoUserData(i->getType(), BaseStart, 1776 EndBit-BaseOffset, Context)) 1777 return false; 1778 } 1779 } 1780 1781 // Verify that no field has data that overlaps the region of interest. Yes 1782 // this could be sped up a lot by being smarter about queried fields, 1783 // however we're only looking at structs up to 16 bytes, so we don't care 1784 // much. 1785 unsigned idx = 0; 1786 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1787 i != e; ++i, ++idx) { 1788 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx); 1789 1790 // If we found a field after the region we care about, then we're done. 1791 if (FieldOffset >= EndBit) break; 1792 1793 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0; 1794 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset, 1795 Context)) 1796 return false; 1797 } 1798 1799 // If nothing in this record overlapped the area of interest, then we're 1800 // clean. 1801 return true; 1802 } 1803 1804 return false; 1805 } 1806 1807 /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a 1808 /// float member at the specified offset. For example, {int,{float}} has a 1809 /// float at offset 4. It is conservatively correct for this routine to return 1810 /// false. 1811 static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset, 1812 const llvm::DataLayout &TD) { 1813 // Base case if we find a float. 1814 if (IROffset == 0 && IRType->isFloatTy()) 1815 return true; 1816 1817 // If this is a struct, recurse into the field at the specified offset. 1818 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 1819 const llvm::StructLayout *SL = TD.getStructLayout(STy); 1820 unsigned Elt = SL->getElementContainingOffset(IROffset); 1821 IROffset -= SL->getElementOffset(Elt); 1822 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD); 1823 } 1824 1825 // If this is an array, recurse into the field at the specified offset. 1826 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 1827 llvm::Type *EltTy = ATy->getElementType(); 1828 unsigned EltSize = TD.getTypeAllocSize(EltTy); 1829 IROffset -= IROffset/EltSize*EltSize; 1830 return ContainsFloatAtOffset(EltTy, IROffset, TD); 1831 } 1832 1833 return false; 1834 } 1835 1836 1837 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the 1838 /// low 8 bytes of an XMM register, corresponding to the SSE class. 1839 llvm::Type *X86_64ABIInfo:: 1840 GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset, 1841 QualType SourceTy, unsigned SourceOffset) const { 1842 // The only three choices we have are either double, <2 x float>, or float. We 1843 // pass as float if the last 4 bytes is just padding. This happens for 1844 // structs that contain 3 floats. 1845 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32, 1846 SourceOffset*8+64, getContext())) 1847 return llvm::Type::getFloatTy(getVMContext()); 1848 1849 // We want to pass as <2 x float> if the LLVM IR type contains a float at 1850 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the 1851 // case. 1852 if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) && 1853 ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout())) 1854 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2); 1855 1856 return llvm::Type::getDoubleTy(getVMContext()); 1857 } 1858 1859 1860 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in 1861 /// an 8-byte GPR. This means that we either have a scalar or we are talking 1862 /// about the high or low part of an up-to-16-byte struct. This routine picks 1863 /// the best LLVM IR type to represent this, which may be i64 or may be anything 1864 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*, 1865 /// etc). 1866 /// 1867 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for 1868 /// the source type. IROffset is an offset in bytes into the LLVM IR type that 1869 /// the 8-byte value references. PrefType may be null. 1870 /// 1871 /// SourceTy is the source level type for the entire argument. SourceOffset is 1872 /// an offset into this that we're processing (which is always either 0 or 8). 1873 /// 1874 llvm::Type *X86_64ABIInfo:: 1875 GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset, 1876 QualType SourceTy, unsigned SourceOffset) const { 1877 // If we're dealing with an un-offset LLVM IR type, then it means that we're 1878 // returning an 8-byte unit starting with it. See if we can safely use it. 1879 if (IROffset == 0) { 1880 // Pointers and int64's always fill the 8-byte unit. 1881 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) || 1882 IRType->isIntegerTy(64)) 1883 return IRType; 1884 1885 // If we have a 1/2/4-byte integer, we can use it only if the rest of the 1886 // goodness in the source type is just tail padding. This is allowed to 1887 // kick in for struct {double,int} on the int, but not on 1888 // struct{double,int,int} because we wouldn't return the second int. We 1889 // have to do this analysis on the source type because we can't depend on 1890 // unions being lowered a specific way etc. 1891 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) || 1892 IRType->isIntegerTy(32) || 1893 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) { 1894 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 : 1895 cast<llvm::IntegerType>(IRType)->getBitWidth(); 1896 1897 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth, 1898 SourceOffset*8+64, getContext())) 1899 return IRType; 1900 } 1901 } 1902 1903 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 1904 // If this is a struct, recurse into the field at the specified offset. 1905 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy); 1906 if (IROffset < SL->getSizeInBytes()) { 1907 unsigned FieldIdx = SL->getElementContainingOffset(IROffset); 1908 IROffset -= SL->getElementOffset(FieldIdx); 1909 1910 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset, 1911 SourceTy, SourceOffset); 1912 } 1913 } 1914 1915 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 1916 llvm::Type *EltTy = ATy->getElementType(); 1917 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy); 1918 unsigned EltOffset = IROffset/EltSize*EltSize; 1919 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy, 1920 SourceOffset); 1921 } 1922 1923 // Okay, we don't have any better idea of what to pass, so we pass this in an 1924 // integer register that isn't too big to fit the rest of the struct. 1925 unsigned TySizeInBytes = 1926 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity(); 1927 1928 assert(TySizeInBytes != SourceOffset && "Empty field?"); 1929 1930 // It is always safe to classify this as an integer type up to i64 that 1931 // isn't larger than the structure. 1932 return llvm::IntegerType::get(getVMContext(), 1933 std::min(TySizeInBytes-SourceOffset, 8U)*8); 1934 } 1935 1936 1937 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally 1938 /// be used as elements of a two register pair to pass or return, return a 1939 /// first class aggregate to represent them. For example, if the low part of 1940 /// a by-value argument should be passed as i32* and the high part as float, 1941 /// return {i32*, float}. 1942 static llvm::Type * 1943 GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, 1944 const llvm::DataLayout &TD) { 1945 // In order to correctly satisfy the ABI, we need to the high part to start 1946 // at offset 8. If the high and low parts we inferred are both 4-byte types 1947 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have 1948 // the second element at offset 8. Check for this: 1949 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo); 1950 unsigned HiAlign = TD.getABITypeAlignment(Hi); 1951 unsigned HiStart = llvm::DataLayout::RoundUpAlignment(LoSize, HiAlign); 1952 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!"); 1953 1954 // To handle this, we have to increase the size of the low part so that the 1955 // second element will start at an 8 byte offset. We can't increase the size 1956 // of the second element because it might make us access off the end of the 1957 // struct. 1958 if (HiStart != 8) { 1959 // There are only two sorts of types the ABI generation code can produce for 1960 // the low part of a pair that aren't 8 bytes in size: float or i8/i16/i32. 1961 // Promote these to a larger type. 1962 if (Lo->isFloatTy()) 1963 Lo = llvm::Type::getDoubleTy(Lo->getContext()); 1964 else { 1965 assert(Lo->isIntegerTy() && "Invalid/unknown lo type"); 1966 Lo = llvm::Type::getInt64Ty(Lo->getContext()); 1967 } 1968 } 1969 1970 llvm::StructType *Result = llvm::StructType::get(Lo, Hi, NULL); 1971 1972 1973 // Verify that the second element is at an 8-byte offset. 1974 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 && 1975 "Invalid x86-64 argument pair!"); 1976 return Result; 1977 } 1978 1979 ABIArgInfo X86_64ABIInfo:: 1980 classifyReturnType(QualType RetTy) const { 1981 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the 1982 // classification algorithm. 1983 X86_64ABIInfo::Class Lo, Hi; 1984 classify(RetTy, 0, Lo, Hi); 1985 1986 // Check some invariants. 1987 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 1988 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 1989 1990 llvm::Type *ResType = 0; 1991 switch (Lo) { 1992 case NoClass: 1993 if (Hi == NoClass) 1994 return ABIArgInfo::getIgnore(); 1995 // If the low part is just padding, it takes no register, leave ResType 1996 // null. 1997 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 1998 "Unknown missing lo part"); 1999 break; 2000 2001 case SSEUp: 2002 case X87Up: 2003 llvm_unreachable("Invalid classification for lo word."); 2004 2005 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via 2006 // hidden argument. 2007 case Memory: 2008 return getIndirectReturnResult(RetTy); 2009 2010 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next 2011 // available register of the sequence %rax, %rdx is used. 2012 case Integer: 2013 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 2014 2015 // If we have a sign or zero extended integer, make sure to return Extend 2016 // so that the parameter gets the right LLVM IR attributes. 2017 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 2018 // Treat an enum type as its underlying type. 2019 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 2020 RetTy = EnumTy->getDecl()->getIntegerType(); 2021 2022 if (RetTy->isIntegralOrEnumerationType() && 2023 RetTy->isPromotableIntegerType()) 2024 return ABIArgInfo::getExtend(); 2025 } 2026 break; 2027 2028 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next 2029 // available SSE register of the sequence %xmm0, %xmm1 is used. 2030 case SSE: 2031 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 2032 break; 2033 2034 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is 2035 // returned on the X87 stack in %st0 as 80-bit x87 number. 2036 case X87: 2037 ResType = llvm::Type::getX86_FP80Ty(getVMContext()); 2038 break; 2039 2040 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real 2041 // part of the value is returned in %st0 and the imaginary part in 2042 // %st1. 2043 case ComplexX87: 2044 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification."); 2045 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()), 2046 llvm::Type::getX86_FP80Ty(getVMContext()), 2047 NULL); 2048 break; 2049 } 2050 2051 llvm::Type *HighPart = 0; 2052 switch (Hi) { 2053 // Memory was handled previously and X87 should 2054 // never occur as a hi class. 2055 case Memory: 2056 case X87: 2057 llvm_unreachable("Invalid classification for hi word."); 2058 2059 case ComplexX87: // Previously handled. 2060 case NoClass: 2061 break; 2062 2063 case Integer: 2064 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 2065 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 2066 return ABIArgInfo::getDirect(HighPart, 8); 2067 break; 2068 case SSE: 2069 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 2070 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 2071 return ABIArgInfo::getDirect(HighPart, 8); 2072 break; 2073 2074 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte 2075 // is passed in the next available eightbyte chunk if the last used 2076 // vector register. 2077 // 2078 // SSEUP should always be preceded by SSE, just widen. 2079 case SSEUp: 2080 assert(Lo == SSE && "Unexpected SSEUp classification."); 2081 ResType = GetByteVectorType(RetTy); 2082 break; 2083 2084 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is 2085 // returned together with the previous X87 value in %st0. 2086 case X87Up: 2087 // If X87Up is preceded by X87, we don't need to do 2088 // anything. However, in some cases with unions it may not be 2089 // preceded by X87. In such situations we follow gcc and pass the 2090 // extra bits in an SSE reg. 2091 if (Lo != X87) { 2092 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 2093 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 2094 return ABIArgInfo::getDirect(HighPart, 8); 2095 } 2096 break; 2097 } 2098 2099 // If a high part was specified, merge it together with the low part. It is 2100 // known to pass in the high eightbyte of the result. We do this by forming a 2101 // first class struct aggregate with the high and low part: {low, high} 2102 if (HighPart) 2103 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); 2104 2105 return ABIArgInfo::getDirect(ResType); 2106 } 2107 2108 ABIArgInfo X86_64ABIInfo::classifyArgumentType( 2109 QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE) 2110 const 2111 { 2112 X86_64ABIInfo::Class Lo, Hi; 2113 classify(Ty, 0, Lo, Hi); 2114 2115 // Check some invariants. 2116 // FIXME: Enforce these by construction. 2117 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 2118 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 2119 2120 neededInt = 0; 2121 neededSSE = 0; 2122 llvm::Type *ResType = 0; 2123 switch (Lo) { 2124 case NoClass: 2125 if (Hi == NoClass) 2126 return ABIArgInfo::getIgnore(); 2127 // If the low part is just padding, it takes no register, leave ResType 2128 // null. 2129 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 2130 "Unknown missing lo part"); 2131 break; 2132 2133 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument 2134 // on the stack. 2135 case Memory: 2136 2137 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or 2138 // COMPLEX_X87, it is passed in memory. 2139 case X87: 2140 case ComplexX87: 2141 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 2142 ++neededInt; 2143 return getIndirectResult(Ty, freeIntRegs); 2144 2145 case SSEUp: 2146 case X87Up: 2147 llvm_unreachable("Invalid classification for lo word."); 2148 2149 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next 2150 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8 2151 // and %r9 is used. 2152 case Integer: 2153 ++neededInt; 2154 2155 // Pick an 8-byte type based on the preferred type. 2156 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0); 2157 2158 // If we have a sign or zero extended integer, make sure to return Extend 2159 // so that the parameter gets the right LLVM IR attributes. 2160 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 2161 // Treat an enum type as its underlying type. 2162 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2163 Ty = EnumTy->getDecl()->getIntegerType(); 2164 2165 if (Ty->isIntegralOrEnumerationType() && 2166 Ty->isPromotableIntegerType()) 2167 return ABIArgInfo::getExtend(); 2168 } 2169 2170 break; 2171 2172 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next 2173 // available SSE register is used, the registers are taken in the 2174 // order from %xmm0 to %xmm7. 2175 case SSE: { 2176 llvm::Type *IRType = CGT.ConvertType(Ty); 2177 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0); 2178 ++neededSSE; 2179 break; 2180 } 2181 } 2182 2183 llvm::Type *HighPart = 0; 2184 switch (Hi) { 2185 // Memory was handled previously, ComplexX87 and X87 should 2186 // never occur as hi classes, and X87Up must be preceded by X87, 2187 // which is passed in memory. 2188 case Memory: 2189 case X87: 2190 case ComplexX87: 2191 llvm_unreachable("Invalid classification for hi word."); 2192 2193 case NoClass: break; 2194 2195 case Integer: 2196 ++neededInt; 2197 // Pick an 8-byte type based on the preferred type. 2198 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 2199 2200 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 2201 return ABIArgInfo::getDirect(HighPart, 8); 2202 break; 2203 2204 // X87Up generally doesn't occur here (long double is passed in 2205 // memory), except in situations involving unions. 2206 case X87Up: 2207 case SSE: 2208 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 2209 2210 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 2211 return ABIArgInfo::getDirect(HighPart, 8); 2212 2213 ++neededSSE; 2214 break; 2215 2216 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the 2217 // eightbyte is passed in the upper half of the last used SSE 2218 // register. This only happens when 128-bit vectors are passed. 2219 case SSEUp: 2220 assert(Lo == SSE && "Unexpected SSEUp classification"); 2221 ResType = GetByteVectorType(Ty); 2222 break; 2223 } 2224 2225 // If a high part was specified, merge it together with the low part. It is 2226 // known to pass in the high eightbyte of the result. We do this by forming a 2227 // first class struct aggregate with the high and low part: {low, high} 2228 if (HighPart) 2229 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); 2230 2231 return ABIArgInfo::getDirect(ResType); 2232 } 2233 2234 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 2235 2236 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2237 2238 // Keep track of the number of assigned registers. 2239 unsigned freeIntRegs = 6, freeSSERegs = 8; 2240 2241 // If the return value is indirect, then the hidden argument is consuming one 2242 // integer register. 2243 if (FI.getReturnInfo().isIndirect()) 2244 --freeIntRegs; 2245 2246 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers 2247 // get assigned (in left-to-right order) for passing as follows... 2248 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2249 it != ie; ++it) { 2250 unsigned neededInt, neededSSE; 2251 it->info = classifyArgumentType(it->type, freeIntRegs, neededInt, 2252 neededSSE); 2253 2254 // AMD64-ABI 3.2.3p3: If there are no registers available for any 2255 // eightbyte of an argument, the whole argument is passed on the 2256 // stack. If registers have already been assigned for some 2257 // eightbytes of such an argument, the assignments get reverted. 2258 if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) { 2259 freeIntRegs -= neededInt; 2260 freeSSERegs -= neededSSE; 2261 } else { 2262 it->info = getIndirectResult(it->type, freeIntRegs); 2263 } 2264 } 2265 } 2266 2267 static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr, 2268 QualType Ty, 2269 CodeGenFunction &CGF) { 2270 llvm::Value *overflow_arg_area_p = 2271 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p"); 2272 llvm::Value *overflow_arg_area = 2273 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area"); 2274 2275 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16 2276 // byte boundary if alignment needed by type exceeds 8 byte boundary. 2277 // It isn't stated explicitly in the standard, but in practice we use 2278 // alignment greater than 16 where necessary. 2279 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8; 2280 if (Align > 8) { 2281 // overflow_arg_area = (overflow_arg_area + align - 1) & -align; 2282 llvm::Value *Offset = 2283 llvm::ConstantInt::get(CGF.Int64Ty, Align - 1); 2284 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset); 2285 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area, 2286 CGF.Int64Ty); 2287 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, -(uint64_t)Align); 2288 overflow_arg_area = 2289 CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), 2290 overflow_arg_area->getType(), 2291 "overflow_arg_area.align"); 2292 } 2293 2294 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area. 2295 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 2296 llvm::Value *Res = 2297 CGF.Builder.CreateBitCast(overflow_arg_area, 2298 llvm::PointerType::getUnqual(LTy)); 2299 2300 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to: 2301 // l->overflow_arg_area + sizeof(type). 2302 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to 2303 // an 8 byte boundary. 2304 2305 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8; 2306 llvm::Value *Offset = 2307 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7); 2308 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset, 2309 "overflow_arg_area.next"); 2310 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p); 2311 2312 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type. 2313 return Res; 2314 } 2315 2316 llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2317 CodeGenFunction &CGF) const { 2318 // Assume that va_list type is correct; should be pointer to LLVM type: 2319 // struct { 2320 // i32 gp_offset; 2321 // i32 fp_offset; 2322 // i8* overflow_arg_area; 2323 // i8* reg_save_area; 2324 // }; 2325 unsigned neededInt, neededSSE; 2326 2327 Ty = CGF.getContext().getCanonicalType(Ty); 2328 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE); 2329 2330 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed 2331 // in the registers. If not go to step 7. 2332 if (!neededInt && !neededSSE) 2333 return EmitVAArgFromMemory(VAListAddr, Ty, CGF); 2334 2335 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of 2336 // general purpose registers needed to pass type and num_fp to hold 2337 // the number of floating point registers needed. 2338 2339 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into 2340 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or 2341 // l->fp_offset > 304 - num_fp * 16 go to step 7. 2342 // 2343 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of 2344 // register save space). 2345 2346 llvm::Value *InRegs = 0; 2347 llvm::Value *gp_offset_p = 0, *gp_offset = 0; 2348 llvm::Value *fp_offset_p = 0, *fp_offset = 0; 2349 if (neededInt) { 2350 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p"); 2351 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset"); 2352 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8); 2353 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp"); 2354 } 2355 2356 if (neededSSE) { 2357 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p"); 2358 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset"); 2359 llvm::Value *FitsInFP = 2360 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16); 2361 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp"); 2362 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP; 2363 } 2364 2365 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 2366 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); 2367 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 2368 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); 2369 2370 // Emit code to load the value if it was passed in registers. 2371 2372 CGF.EmitBlock(InRegBlock); 2373 2374 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with 2375 // an offset of l->gp_offset and/or l->fp_offset. This may require 2376 // copying to a temporary location in case the parameter is passed 2377 // in different register classes or requires an alignment greater 2378 // than 8 for general purpose registers and 16 for XMM registers. 2379 // 2380 // FIXME: This really results in shameful code when we end up needing to 2381 // collect arguments from different places; often what should result in a 2382 // simple assembling of a structure from scattered addresses has many more 2383 // loads than necessary. Can we clean this up? 2384 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 2385 llvm::Value *RegAddr = 2386 CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3), 2387 "reg_save_area"); 2388 if (neededInt && neededSSE) { 2389 // FIXME: Cleanup. 2390 assert(AI.isDirect() && "Unexpected ABI info for mixed regs"); 2391 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType()); 2392 llvm::Value *Tmp = CGF.CreateTempAlloca(ST); 2393 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs"); 2394 llvm::Type *TyLo = ST->getElementType(0); 2395 llvm::Type *TyHi = ST->getElementType(1); 2396 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) && 2397 "Unexpected ABI info for mixed regs"); 2398 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo); 2399 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi); 2400 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 2401 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2402 llvm::Value *RegLoAddr = TyLo->isFloatingPointTy() ? FPAddr : GPAddr; 2403 llvm::Value *RegHiAddr = TyLo->isFloatingPointTy() ? GPAddr : FPAddr; 2404 llvm::Value *V = 2405 CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo)); 2406 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 2407 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi)); 2408 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 2409 2410 RegAddr = CGF.Builder.CreateBitCast(Tmp, 2411 llvm::PointerType::getUnqual(LTy)); 2412 } else if (neededInt) { 2413 RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 2414 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 2415 llvm::PointerType::getUnqual(LTy)); 2416 } else if (neededSSE == 1) { 2417 RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2418 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 2419 llvm::PointerType::getUnqual(LTy)); 2420 } else { 2421 assert(neededSSE == 2 && "Invalid number of needed registers!"); 2422 // SSE registers are spaced 16 bytes apart in the register save 2423 // area, we need to collect the two eightbytes together. 2424 llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2425 llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16); 2426 llvm::Type *DoubleTy = CGF.DoubleTy; 2427 llvm::Type *DblPtrTy = 2428 llvm::PointerType::getUnqual(DoubleTy); 2429 llvm::StructType *ST = llvm::StructType::get(DoubleTy, 2430 DoubleTy, NULL); 2431 llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST); 2432 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo, 2433 DblPtrTy)); 2434 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 2435 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi, 2436 DblPtrTy)); 2437 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 2438 RegAddr = CGF.Builder.CreateBitCast(Tmp, 2439 llvm::PointerType::getUnqual(LTy)); 2440 } 2441 2442 // AMD64-ABI 3.5.7p5: Step 5. Set: 2443 // l->gp_offset = l->gp_offset + num_gp * 8 2444 // l->fp_offset = l->fp_offset + num_fp * 16. 2445 if (neededInt) { 2446 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8); 2447 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset), 2448 gp_offset_p); 2449 } 2450 if (neededSSE) { 2451 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16); 2452 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset), 2453 fp_offset_p); 2454 } 2455 CGF.EmitBranch(ContBlock); 2456 2457 // Emit code to load the value if it was passed in memory. 2458 2459 CGF.EmitBlock(InMemBlock); 2460 llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF); 2461 2462 // Return the appropriate result. 2463 2464 CGF.EmitBlock(ContBlock); 2465 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(), 2, 2466 "vaarg.addr"); 2467 ResAddr->addIncoming(RegAddr, InRegBlock); 2468 ResAddr->addIncoming(MemAddr, InMemBlock); 2469 return ResAddr; 2470 } 2471 2472 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty) const { 2473 2474 if (Ty->isVoidType()) 2475 return ABIArgInfo::getIgnore(); 2476 2477 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2478 Ty = EnumTy->getDecl()->getIntegerType(); 2479 2480 uint64_t Size = getContext().getTypeSize(Ty); 2481 2482 if (const RecordType *RT = Ty->getAs<RecordType>()) { 2483 if (hasNonTrivialDestructorOrCopyConstructor(RT) || 2484 RT->getDecl()->hasFlexibleArrayMember()) 2485 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2486 2487 // FIXME: mingw-w64-gcc emits 128-bit struct as i128 2488 if (Size == 128 && 2489 getContext().getTargetInfo().getTriple().getOS() 2490 == llvm::Triple::MinGW32) 2491 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2492 Size)); 2493 2494 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is 2495 // not 1, 2, 4, or 8 bytes, must be passed by reference." 2496 if (Size <= 64 && 2497 (Size & (Size - 1)) == 0) 2498 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2499 Size)); 2500 2501 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2502 } 2503 2504 if (Ty->isPromotableIntegerType()) 2505 return ABIArgInfo::getExtend(); 2506 2507 return ABIArgInfo::getDirect(); 2508 } 2509 2510 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 2511 2512 QualType RetTy = FI.getReturnType(); 2513 FI.getReturnInfo() = classify(RetTy); 2514 2515 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2516 it != ie; ++it) 2517 it->info = classify(it->type); 2518 } 2519 2520 llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2521 CodeGenFunction &CGF) const { 2522 llvm::Type *BPP = CGF.Int8PtrPtrTy; 2523 2524 CGBuilderTy &Builder = CGF.Builder; 2525 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 2526 "ap"); 2527 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 2528 llvm::Type *PTy = 2529 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 2530 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 2531 2532 uint64_t Offset = 2533 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8); 2534 llvm::Value *NextAddr = 2535 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 2536 "ap.next"); 2537 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 2538 2539 return AddrTyped; 2540 } 2541 2542 class NaClX86_64ABIInfo : public ABIInfo { 2543 public: 2544 NaClX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) 2545 : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, HasAVX) {} 2546 virtual void computeInfo(CGFunctionInfo &FI) const; 2547 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2548 CodeGenFunction &CGF) const; 2549 private: 2550 PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv. 2551 X86_64ABIInfo NInfo; // Used for everything else. 2552 }; 2553 2554 class NaClX86_64TargetCodeGenInfo : public TargetCodeGenInfo { 2555 public: 2556 NaClX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) 2557 : TargetCodeGenInfo(new NaClX86_64ABIInfo(CGT, HasAVX)) {} 2558 }; 2559 2560 void NaClX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 2561 if (FI.getASTCallingConvention() == CC_PnaclCall) 2562 PInfo.computeInfo(FI); 2563 else 2564 NInfo.computeInfo(FI); 2565 } 2566 2567 llvm::Value *NaClX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2568 CodeGenFunction &CGF) const { 2569 // Always use the native convention; calling pnacl-style varargs functions 2570 // is unuspported. 2571 return NInfo.EmitVAArg(VAListAddr, Ty, CGF); 2572 } 2573 2574 2575 // PowerPC-32 2576 2577 namespace { 2578 class PPC32TargetCodeGenInfo : public DefaultTargetCodeGenInfo { 2579 public: 2580 PPC32TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} 2581 2582 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2583 // This is recovered from gcc output. 2584 return 1; // r1 is the dedicated stack pointer 2585 } 2586 2587 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2588 llvm::Value *Address) const; 2589 }; 2590 2591 } 2592 2593 bool 2594 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2595 llvm::Value *Address) const { 2596 // This is calculated from the LLVM and GCC tables and verified 2597 // against gcc output. AFAIK all ABIs use the same encoding. 2598 2599 CodeGen::CGBuilderTy &Builder = CGF.Builder; 2600 2601 llvm::IntegerType *i8 = CGF.Int8Ty; 2602 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 2603 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 2604 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 2605 2606 // 0-31: r0-31, the 4-byte general-purpose registers 2607 AssignToArrayRange(Builder, Address, Four8, 0, 31); 2608 2609 // 32-63: fp0-31, the 8-byte floating-point registers 2610 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 2611 2612 // 64-76 are various 4-byte special-purpose registers: 2613 // 64: mq 2614 // 65: lr 2615 // 66: ctr 2616 // 67: ap 2617 // 68-75 cr0-7 2618 // 76: xer 2619 AssignToArrayRange(Builder, Address, Four8, 64, 76); 2620 2621 // 77-108: v0-31, the 16-byte vector registers 2622 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 2623 2624 // 109: vrsave 2625 // 110: vscr 2626 // 111: spe_acc 2627 // 112: spefscr 2628 // 113: sfp 2629 AssignToArrayRange(Builder, Address, Four8, 109, 113); 2630 2631 return false; 2632 } 2633 2634 // PowerPC-64 2635 2636 namespace { 2637 /// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information. 2638 class PPC64_SVR4_ABIInfo : public DefaultABIInfo { 2639 2640 public: 2641 PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 2642 2643 // TODO: We can add more logic to computeInfo to improve performance. 2644 // Example: For aggregate arguments that fit in a register, we could 2645 // use getDirectInReg (as is done below for structs containing a single 2646 // floating-point value) to avoid pushing them to memory on function 2647 // entry. This would require changing the logic in PPCISelLowering 2648 // when lowering the parameters in the caller and args in the callee. 2649 virtual void computeInfo(CGFunctionInfo &FI) const { 2650 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2651 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2652 it != ie; ++it) { 2653 // We rely on the default argument classification for the most part. 2654 // One exception: An aggregate containing a single floating-point 2655 // item must be passed in a register if one is available. 2656 const Type *T = isSingleElementStruct(it->type, getContext()); 2657 if (T) { 2658 const BuiltinType *BT = T->getAs<BuiltinType>(); 2659 if (BT && BT->isFloatingPoint()) { 2660 QualType QT(T, 0); 2661 it->info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT)); 2662 continue; 2663 } 2664 } 2665 it->info = classifyArgumentType(it->type); 2666 } 2667 } 2668 2669 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, 2670 QualType Ty, 2671 CodeGenFunction &CGF) const; 2672 }; 2673 2674 class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo { 2675 public: 2676 PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT) 2677 : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT)) {} 2678 2679 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2680 // This is recovered from gcc output. 2681 return 1; // r1 is the dedicated stack pointer 2682 } 2683 2684 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2685 llvm::Value *Address) const; 2686 }; 2687 2688 class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo { 2689 public: 2690 PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} 2691 2692 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2693 // This is recovered from gcc output. 2694 return 1; // r1 is the dedicated stack pointer 2695 } 2696 2697 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2698 llvm::Value *Address) const; 2699 }; 2700 2701 } 2702 2703 // Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine. 2704 llvm::Value *PPC64_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr, 2705 QualType Ty, 2706 CodeGenFunction &CGF) const { 2707 llvm::Type *BP = CGF.Int8PtrTy; 2708 llvm::Type *BPP = CGF.Int8PtrPtrTy; 2709 2710 CGBuilderTy &Builder = CGF.Builder; 2711 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 2712 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 2713 2714 // Handle address alignment for type alignment > 64 bits. Although 2715 // long double normally requires 16-byte alignment, this is not the 2716 // case when it is passed as an argument; so handle that special case. 2717 const BuiltinType *BT = Ty->getAs<BuiltinType>(); 2718 unsigned TyAlign = CGF.getContext().getTypeAlign(Ty) / 8; 2719 2720 if (TyAlign > 8 && (!BT || !BT->isFloatingPoint())) { 2721 assert((TyAlign & (TyAlign - 1)) == 0 && 2722 "Alignment is not power of 2!"); 2723 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty); 2724 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(TyAlign - 1)); 2725 AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt64(~(TyAlign - 1))); 2726 Addr = Builder.CreateIntToPtr(AddrAsInt, BP); 2727 } 2728 2729 // Update the va_list pointer. 2730 unsigned SizeInBytes = CGF.getContext().getTypeSize(Ty) / 8; 2731 unsigned Offset = llvm::RoundUpToAlignment(SizeInBytes, 8); 2732 llvm::Value *NextAddr = 2733 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), 2734 "ap.next"); 2735 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 2736 2737 // If the argument is smaller than 8 bytes, it is right-adjusted in 2738 // its doubleword slot. Adjust the pointer to pick it up from the 2739 // correct offset. 2740 if (SizeInBytes < 8) { 2741 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty); 2742 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(8 - SizeInBytes)); 2743 Addr = Builder.CreateIntToPtr(AddrAsInt, BP); 2744 } 2745 2746 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 2747 return Builder.CreateBitCast(Addr, PTy); 2748 } 2749 2750 static bool 2751 PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2752 llvm::Value *Address) { 2753 // This is calculated from the LLVM and GCC tables and verified 2754 // against gcc output. AFAIK all ABIs use the same encoding. 2755 2756 CodeGen::CGBuilderTy &Builder = CGF.Builder; 2757 2758 llvm::IntegerType *i8 = CGF.Int8Ty; 2759 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 2760 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 2761 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 2762 2763 // 0-31: r0-31, the 8-byte general-purpose registers 2764 AssignToArrayRange(Builder, Address, Eight8, 0, 31); 2765 2766 // 32-63: fp0-31, the 8-byte floating-point registers 2767 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 2768 2769 // 64-76 are various 4-byte special-purpose registers: 2770 // 64: mq 2771 // 65: lr 2772 // 66: ctr 2773 // 67: ap 2774 // 68-75 cr0-7 2775 // 76: xer 2776 AssignToArrayRange(Builder, Address, Four8, 64, 76); 2777 2778 // 77-108: v0-31, the 16-byte vector registers 2779 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 2780 2781 // 109: vrsave 2782 // 110: vscr 2783 // 111: spe_acc 2784 // 112: spefscr 2785 // 113: sfp 2786 AssignToArrayRange(Builder, Address, Four8, 109, 113); 2787 2788 return false; 2789 } 2790 2791 bool 2792 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable( 2793 CodeGen::CodeGenFunction &CGF, 2794 llvm::Value *Address) const { 2795 2796 return PPC64_initDwarfEHRegSizeTable(CGF, Address); 2797 } 2798 2799 bool 2800 PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2801 llvm::Value *Address) const { 2802 2803 return PPC64_initDwarfEHRegSizeTable(CGF, Address); 2804 } 2805 2806 //===----------------------------------------------------------------------===// 2807 // ARM ABI Implementation 2808 //===----------------------------------------------------------------------===// 2809 2810 namespace { 2811 2812 class ARMABIInfo : public ABIInfo { 2813 public: 2814 enum ABIKind { 2815 APCS = 0, 2816 AAPCS = 1, 2817 AAPCS_VFP 2818 }; 2819 2820 private: 2821 ABIKind Kind; 2822 2823 public: 2824 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind) {} 2825 2826 bool isEABI() const { 2827 StringRef Env = 2828 getContext().getTargetInfo().getTriple().getEnvironmentName(); 2829 return (Env == "gnueabi" || Env == "eabi" || 2830 Env == "android" || Env == "androideabi"); 2831 } 2832 2833 private: 2834 ABIKind getABIKind() const { return Kind; } 2835 2836 ABIArgInfo classifyReturnType(QualType RetTy) const; 2837 ABIArgInfo classifyArgumentType(QualType RetTy) const; 2838 bool isIllegalVectorType(QualType Ty) const; 2839 2840 virtual void computeInfo(CGFunctionInfo &FI) const; 2841 2842 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2843 CodeGenFunction &CGF) const; 2844 }; 2845 2846 class ARMTargetCodeGenInfo : public TargetCodeGenInfo { 2847 public: 2848 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K) 2849 :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {} 2850 2851 const ARMABIInfo &getABIInfo() const { 2852 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo()); 2853 } 2854 2855 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2856 return 13; 2857 } 2858 2859 StringRef getARCRetainAutoreleasedReturnValueMarker() const { 2860 return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue"; 2861 } 2862 2863 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2864 llvm::Value *Address) const { 2865 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 2866 2867 // 0-15 are the 16 integer registers. 2868 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15); 2869 return false; 2870 } 2871 2872 unsigned getSizeOfUnwindException() const { 2873 if (getABIInfo().isEABI()) return 88; 2874 return TargetCodeGenInfo::getSizeOfUnwindException(); 2875 } 2876 }; 2877 2878 } 2879 2880 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const { 2881 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2882 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2883 it != ie; ++it) 2884 it->info = classifyArgumentType(it->type); 2885 2886 // Always honor user-specified calling convention. 2887 if (FI.getCallingConvention() != llvm::CallingConv::C) 2888 return; 2889 2890 // Calling convention as default by an ABI. 2891 llvm::CallingConv::ID DefaultCC; 2892 if (isEABI()) 2893 DefaultCC = llvm::CallingConv::ARM_AAPCS; 2894 else 2895 DefaultCC = llvm::CallingConv::ARM_APCS; 2896 2897 // If user did not ask for specific calling convention explicitly (e.g. via 2898 // pcs attribute), set effective calling convention if it's different than ABI 2899 // default. 2900 switch (getABIKind()) { 2901 case APCS: 2902 if (DefaultCC != llvm::CallingConv::ARM_APCS) 2903 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_APCS); 2904 break; 2905 case AAPCS: 2906 if (DefaultCC != llvm::CallingConv::ARM_AAPCS) 2907 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS); 2908 break; 2909 case AAPCS_VFP: 2910 if (DefaultCC != llvm::CallingConv::ARM_AAPCS_VFP) 2911 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS_VFP); 2912 break; 2913 } 2914 } 2915 2916 /// isHomogeneousAggregate - Return true if a type is an AAPCS-VFP homogeneous 2917 /// aggregate. If HAMembers is non-null, the number of base elements 2918 /// contained in the type is returned through it; this is used for the 2919 /// recursive calls that check aggregate component types. 2920 static bool isHomogeneousAggregate(QualType Ty, const Type *&Base, 2921 ASTContext &Context, 2922 uint64_t *HAMembers = 0) { 2923 uint64_t Members = 0; 2924 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 2925 if (!isHomogeneousAggregate(AT->getElementType(), Base, Context, &Members)) 2926 return false; 2927 Members *= AT->getSize().getZExtValue(); 2928 } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 2929 const RecordDecl *RD = RT->getDecl(); 2930 if (RD->hasFlexibleArrayMember()) 2931 return false; 2932 2933 Members = 0; 2934 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 2935 i != e; ++i) { 2936 const FieldDecl *FD = *i; 2937 uint64_t FldMembers; 2938 if (!isHomogeneousAggregate(FD->getType(), Base, Context, &FldMembers)) 2939 return false; 2940 2941 Members = (RD->isUnion() ? 2942 std::max(Members, FldMembers) : Members + FldMembers); 2943 } 2944 } else { 2945 Members = 1; 2946 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 2947 Members = 2; 2948 Ty = CT->getElementType(); 2949 } 2950 2951 // Homogeneous aggregates for AAPCS-VFP must have base types of float, 2952 // double, or 64-bit or 128-bit vectors. 2953 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 2954 if (BT->getKind() != BuiltinType::Float && 2955 BT->getKind() != BuiltinType::Double && 2956 BT->getKind() != BuiltinType::LongDouble) 2957 return false; 2958 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 2959 unsigned VecSize = Context.getTypeSize(VT); 2960 if (VecSize != 64 && VecSize != 128) 2961 return false; 2962 } else { 2963 return false; 2964 } 2965 2966 // The base type must be the same for all members. Vector types of the 2967 // same total size are treated as being equivalent here. 2968 const Type *TyPtr = Ty.getTypePtr(); 2969 if (!Base) 2970 Base = TyPtr; 2971 if (Base != TyPtr && 2972 (!Base->isVectorType() || !TyPtr->isVectorType() || 2973 Context.getTypeSize(Base) != Context.getTypeSize(TyPtr))) 2974 return false; 2975 } 2976 2977 // Homogeneous Aggregates can have at most 4 members of the base type. 2978 if (HAMembers) 2979 *HAMembers = Members; 2980 2981 return (Members > 0 && Members <= 4); 2982 } 2983 2984 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty) const { 2985 // Handle illegal vector types here. 2986 if (isIllegalVectorType(Ty)) { 2987 uint64_t Size = getContext().getTypeSize(Ty); 2988 if (Size <= 32) { 2989 llvm::Type *ResType = 2990 llvm::Type::getInt32Ty(getVMContext()); 2991 return ABIArgInfo::getDirect(ResType); 2992 } 2993 if (Size == 64) { 2994 llvm::Type *ResType = llvm::VectorType::get( 2995 llvm::Type::getInt32Ty(getVMContext()), 2); 2996 return ABIArgInfo::getDirect(ResType); 2997 } 2998 if (Size == 128) { 2999 llvm::Type *ResType = llvm::VectorType::get( 3000 llvm::Type::getInt32Ty(getVMContext()), 4); 3001 return ABIArgInfo::getDirect(ResType); 3002 } 3003 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3004 } 3005 3006 if (!isAggregateTypeForABI(Ty)) { 3007 // Treat an enum type as its underlying type. 3008 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3009 Ty = EnumTy->getDecl()->getIntegerType(); 3010 3011 return (Ty->isPromotableIntegerType() ? 3012 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3013 } 3014 3015 // Ignore empty records. 3016 if (isEmptyRecord(getContext(), Ty, true)) 3017 return ABIArgInfo::getIgnore(); 3018 3019 // Structures with either a non-trivial destructor or a non-trivial 3020 // copy constructor are always indirect. 3021 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 3022 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3023 3024 if (getABIKind() == ARMABIInfo::AAPCS_VFP) { 3025 // Homogeneous Aggregates need to be expanded. 3026 const Type *Base = 0; 3027 if (isHomogeneousAggregate(Ty, Base, getContext())) { 3028 assert(Base && "Base class should be set for homogeneous aggregate"); 3029 return ABIArgInfo::getExpand(); 3030 } 3031 } 3032 3033 // Support byval for ARM. 3034 if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64) || 3035 getContext().getTypeAlign(Ty) > 64) { 3036 return ABIArgInfo::getIndirect(0, /*ByVal=*/true); 3037 } 3038 3039 // Otherwise, pass by coercing to a structure of the appropriate size. 3040 llvm::Type* ElemTy; 3041 unsigned SizeRegs; 3042 // FIXME: Try to match the types of the arguments more accurately where 3043 // we can. 3044 if (getContext().getTypeAlign(Ty) <= 32) { 3045 ElemTy = llvm::Type::getInt32Ty(getVMContext()); 3046 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32; 3047 } else { 3048 ElemTy = llvm::Type::getInt64Ty(getVMContext()); 3049 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64; 3050 } 3051 3052 llvm::Type *STy = 3053 llvm::StructType::get(llvm::ArrayType::get(ElemTy, SizeRegs), NULL); 3054 return ABIArgInfo::getDirect(STy); 3055 } 3056 3057 static bool isIntegerLikeType(QualType Ty, ASTContext &Context, 3058 llvm::LLVMContext &VMContext) { 3059 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure 3060 // is called integer-like if its size is less than or equal to one word, and 3061 // the offset of each of its addressable sub-fields is zero. 3062 3063 uint64_t Size = Context.getTypeSize(Ty); 3064 3065 // Check that the type fits in a word. 3066 if (Size > 32) 3067 return false; 3068 3069 // FIXME: Handle vector types! 3070 if (Ty->isVectorType()) 3071 return false; 3072 3073 // Float types are never treated as "integer like". 3074 if (Ty->isRealFloatingType()) 3075 return false; 3076 3077 // If this is a builtin or pointer type then it is ok. 3078 if (Ty->getAs<BuiltinType>() || Ty->isPointerType()) 3079 return true; 3080 3081 // Small complex integer types are "integer like". 3082 if (const ComplexType *CT = Ty->getAs<ComplexType>()) 3083 return isIntegerLikeType(CT->getElementType(), Context, VMContext); 3084 3085 // Single element and zero sized arrays should be allowed, by the definition 3086 // above, but they are not. 3087 3088 // Otherwise, it must be a record type. 3089 const RecordType *RT = Ty->getAs<RecordType>(); 3090 if (!RT) return false; 3091 3092 // Ignore records with flexible arrays. 3093 const RecordDecl *RD = RT->getDecl(); 3094 if (RD->hasFlexibleArrayMember()) 3095 return false; 3096 3097 // Check that all sub-fields are at offset 0, and are themselves "integer 3098 // like". 3099 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 3100 3101 bool HadField = false; 3102 unsigned idx = 0; 3103 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 3104 i != e; ++i, ++idx) { 3105 const FieldDecl *FD = *i; 3106 3107 // Bit-fields are not addressable, we only need to verify they are "integer 3108 // like". We still have to disallow a subsequent non-bitfield, for example: 3109 // struct { int : 0; int x } 3110 // is non-integer like according to gcc. 3111 if (FD->isBitField()) { 3112 if (!RD->isUnion()) 3113 HadField = true; 3114 3115 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 3116 return false; 3117 3118 continue; 3119 } 3120 3121 // Check if this field is at offset 0. 3122 if (Layout.getFieldOffset(idx) != 0) 3123 return false; 3124 3125 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 3126 return false; 3127 3128 // Only allow at most one field in a structure. This doesn't match the 3129 // wording above, but follows gcc in situations with a field following an 3130 // empty structure. 3131 if (!RD->isUnion()) { 3132 if (HadField) 3133 return false; 3134 3135 HadField = true; 3136 } 3137 } 3138 3139 return true; 3140 } 3141 3142 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy) const { 3143 if (RetTy->isVoidType()) 3144 return ABIArgInfo::getIgnore(); 3145 3146 // Large vector types should be returned via memory. 3147 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) 3148 return ABIArgInfo::getIndirect(0); 3149 3150 if (!isAggregateTypeForABI(RetTy)) { 3151 // Treat an enum type as its underlying type. 3152 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 3153 RetTy = EnumTy->getDecl()->getIntegerType(); 3154 3155 return (RetTy->isPromotableIntegerType() ? 3156 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3157 } 3158 3159 // Structures with either a non-trivial destructor or a non-trivial 3160 // copy constructor are always indirect. 3161 if (isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy)) 3162 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3163 3164 // Are we following APCS? 3165 if (getABIKind() == APCS) { 3166 if (isEmptyRecord(getContext(), RetTy, false)) 3167 return ABIArgInfo::getIgnore(); 3168 3169 // Complex types are all returned as packed integers. 3170 // 3171 // FIXME: Consider using 2 x vector types if the back end handles them 3172 // correctly. 3173 if (RetTy->isAnyComplexType()) 3174 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 3175 getContext().getTypeSize(RetTy))); 3176 3177 // Integer like structures are returned in r0. 3178 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) { 3179 // Return in the smallest viable integer type. 3180 uint64_t Size = getContext().getTypeSize(RetTy); 3181 if (Size <= 8) 3182 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 3183 if (Size <= 16) 3184 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 3185 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 3186 } 3187 3188 // Otherwise return in memory. 3189 return ABIArgInfo::getIndirect(0); 3190 } 3191 3192 // Otherwise this is an AAPCS variant. 3193 3194 if (isEmptyRecord(getContext(), RetTy, true)) 3195 return ABIArgInfo::getIgnore(); 3196 3197 // Check for homogeneous aggregates with AAPCS-VFP. 3198 if (getABIKind() == AAPCS_VFP) { 3199 const Type *Base = 0; 3200 if (isHomogeneousAggregate(RetTy, Base, getContext())) { 3201 assert(Base && "Base class should be set for homogeneous aggregate"); 3202 // Homogeneous Aggregates are returned directly. 3203 return ABIArgInfo::getDirect(); 3204 } 3205 } 3206 3207 // Aggregates <= 4 bytes are returned in r0; other aggregates 3208 // are returned indirectly. 3209 uint64_t Size = getContext().getTypeSize(RetTy); 3210 if (Size <= 32) { 3211 // Return in the smallest viable integer type. 3212 if (Size <= 8) 3213 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 3214 if (Size <= 16) 3215 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 3216 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 3217 } 3218 3219 return ABIArgInfo::getIndirect(0); 3220 } 3221 3222 /// isIllegalVector - check whether Ty is an illegal vector type. 3223 bool ARMABIInfo::isIllegalVectorType(QualType Ty) const { 3224 if (const VectorType *VT = Ty->getAs<VectorType>()) { 3225 // Check whether VT is legal. 3226 unsigned NumElements = VT->getNumElements(); 3227 uint64_t Size = getContext().getTypeSize(VT); 3228 // NumElements should be power of 2. 3229 if ((NumElements & (NumElements - 1)) != 0) 3230 return true; 3231 // Size should be greater than 32 bits. 3232 return Size <= 32; 3233 } 3234 return false; 3235 } 3236 3237 llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3238 CodeGenFunction &CGF) const { 3239 llvm::Type *BP = CGF.Int8PtrTy; 3240 llvm::Type *BPP = CGF.Int8PtrPtrTy; 3241 3242 CGBuilderTy &Builder = CGF.Builder; 3243 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 3244 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 3245 3246 uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8; 3247 uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8; 3248 bool IsIndirect = false; 3249 3250 // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for 3251 // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte. 3252 if (getABIKind() == ARMABIInfo::AAPCS_VFP || 3253 getABIKind() == ARMABIInfo::AAPCS) 3254 TyAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8); 3255 else 3256 TyAlign = 4; 3257 // Use indirect if size of the illegal vector is bigger than 16 bytes. 3258 if (isIllegalVectorType(Ty) && Size > 16) { 3259 IsIndirect = true; 3260 Size = 4; 3261 TyAlign = 4; 3262 } 3263 3264 // Handle address alignment for ABI alignment > 4 bytes. 3265 if (TyAlign > 4) { 3266 assert((TyAlign & (TyAlign - 1)) == 0 && 3267 "Alignment is not power of 2!"); 3268 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty); 3269 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1)); 3270 AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1))); 3271 Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align"); 3272 } 3273 3274 uint64_t Offset = 3275 llvm::RoundUpToAlignment(Size, 4); 3276 llvm::Value *NextAddr = 3277 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 3278 "ap.next"); 3279 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 3280 3281 if (IsIndirect) 3282 Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP)); 3283 else if (TyAlign < CGF.getContext().getTypeAlign(Ty) / 8) { 3284 // We can't directly cast ap.cur to pointer to a vector type, since ap.cur 3285 // may not be correctly aligned for the vector type. We create an aligned 3286 // temporary space and copy the content over from ap.cur to the temporary 3287 // space. This is necessary if the natural alignment of the type is greater 3288 // than the ABI alignment. 3289 llvm::Type *I8PtrTy = Builder.getInt8PtrTy(); 3290 CharUnits CharSize = getContext().getTypeSizeInChars(Ty); 3291 llvm::Value *AlignedTemp = CGF.CreateTempAlloca(CGF.ConvertType(Ty), 3292 "var.align"); 3293 llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy); 3294 llvm::Value *Src = Builder.CreateBitCast(Addr, I8PtrTy); 3295 Builder.CreateMemCpy(Dst, Src, 3296 llvm::ConstantInt::get(CGF.IntPtrTy, CharSize.getQuantity()), 3297 TyAlign, false); 3298 Addr = AlignedTemp; //The content is in aligned location. 3299 } 3300 llvm::Type *PTy = 3301 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 3302 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 3303 3304 return AddrTyped; 3305 } 3306 3307 class NaClARMABIInfo : public ABIInfo { 3308 public: 3309 NaClARMABIInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind) 3310 : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, Kind) {} 3311 virtual void computeInfo(CGFunctionInfo &FI) const; 3312 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3313 CodeGenFunction &CGF) const; 3314 private: 3315 PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv. 3316 ARMABIInfo NInfo; // Used for everything else. 3317 }; 3318 3319 class NaClARMTargetCodeGenInfo : public TargetCodeGenInfo { 3320 public: 3321 NaClARMTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind) 3322 : TargetCodeGenInfo(new NaClARMABIInfo(CGT, Kind)) {} 3323 }; 3324 3325 void NaClARMABIInfo::computeInfo(CGFunctionInfo &FI) const { 3326 if (FI.getASTCallingConvention() == CC_PnaclCall) 3327 PInfo.computeInfo(FI); 3328 else 3329 static_cast<const ABIInfo&>(NInfo).computeInfo(FI); 3330 } 3331 3332 llvm::Value *NaClARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3333 CodeGenFunction &CGF) const { 3334 // Always use the native convention; calling pnacl-style varargs functions 3335 // is unsupported. 3336 return static_cast<const ABIInfo&>(NInfo).EmitVAArg(VAListAddr, Ty, CGF); 3337 } 3338 3339 //===----------------------------------------------------------------------===// 3340 // NVPTX ABI Implementation 3341 //===----------------------------------------------------------------------===// 3342 3343 namespace { 3344 3345 class NVPTXABIInfo : public ABIInfo { 3346 public: 3347 NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 3348 3349 ABIArgInfo classifyReturnType(QualType RetTy) const; 3350 ABIArgInfo classifyArgumentType(QualType Ty) const; 3351 3352 virtual void computeInfo(CGFunctionInfo &FI) const; 3353 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3354 CodeGenFunction &CFG) const; 3355 }; 3356 3357 class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo { 3358 public: 3359 NVPTXTargetCodeGenInfo(CodeGenTypes &CGT) 3360 : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {} 3361 3362 virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3363 CodeGen::CodeGenModule &M) const; 3364 }; 3365 3366 ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const { 3367 if (RetTy->isVoidType()) 3368 return ABIArgInfo::getIgnore(); 3369 if (isAggregateTypeForABI(RetTy)) 3370 return ABIArgInfo::getIndirect(0); 3371 return ABIArgInfo::getDirect(); 3372 } 3373 3374 ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const { 3375 if (isAggregateTypeForABI(Ty)) 3376 return ABIArgInfo::getIndirect(0); 3377 3378 return ABIArgInfo::getDirect(); 3379 } 3380 3381 void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const { 3382 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 3383 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 3384 it != ie; ++it) 3385 it->info = classifyArgumentType(it->type); 3386 3387 // Always honor user-specified calling convention. 3388 if (FI.getCallingConvention() != llvm::CallingConv::C) 3389 return; 3390 3391 // Calling convention as default by an ABI. 3392 // We're still using the PTX_Kernel/PTX_Device calling conventions here, 3393 // but we should switch to NVVM metadata later on. 3394 llvm::CallingConv::ID DefaultCC; 3395 const LangOptions &LangOpts = getContext().getLangOpts(); 3396 if (LangOpts.OpenCL || LangOpts.CUDA) { 3397 // If we are in OpenCL or CUDA mode, then default to device functions 3398 DefaultCC = llvm::CallingConv::PTX_Device; 3399 } else { 3400 // If we are in standard C/C++ mode, use the triple to decide on the default 3401 StringRef Env = 3402 getContext().getTargetInfo().getTriple().getEnvironmentName(); 3403 if (Env == "device") 3404 DefaultCC = llvm::CallingConv::PTX_Device; 3405 else 3406 DefaultCC = llvm::CallingConv::PTX_Kernel; 3407 } 3408 FI.setEffectiveCallingConvention(DefaultCC); 3409 3410 } 3411 3412 llvm::Value *NVPTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3413 CodeGenFunction &CFG) const { 3414 llvm_unreachable("NVPTX does not support varargs"); 3415 } 3416 3417 void NVPTXTargetCodeGenInfo:: 3418 SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3419 CodeGen::CodeGenModule &M) const{ 3420 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 3421 if (!FD) return; 3422 3423 llvm::Function *F = cast<llvm::Function>(GV); 3424 3425 // Perform special handling in OpenCL mode 3426 if (M.getLangOpts().OpenCL) { 3427 // Use OpenCL function attributes to set proper calling conventions 3428 // By default, all functions are device functions 3429 if (FD->hasAttr<OpenCLKernelAttr>()) { 3430 // OpenCL __kernel functions get a kernel calling convention 3431 F->setCallingConv(llvm::CallingConv::PTX_Kernel); 3432 // And kernel functions are not subject to inlining 3433 F->addFnAttr(llvm::Attributes::NoInline); 3434 } 3435 } 3436 3437 // Perform special handling in CUDA mode. 3438 if (M.getLangOpts().CUDA) { 3439 // CUDA __global__ functions get a kernel calling convention. Since 3440 // __global__ functions cannot be called from the device, we do not 3441 // need to set the noinline attribute. 3442 if (FD->getAttr<CUDAGlobalAttr>()) 3443 F->setCallingConv(llvm::CallingConv::PTX_Kernel); 3444 } 3445 } 3446 3447 } 3448 3449 //===----------------------------------------------------------------------===// 3450 // MBlaze ABI Implementation 3451 //===----------------------------------------------------------------------===// 3452 3453 namespace { 3454 3455 class MBlazeABIInfo : public ABIInfo { 3456 public: 3457 MBlazeABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 3458 3459 bool isPromotableIntegerType(QualType Ty) const; 3460 3461 ABIArgInfo classifyReturnType(QualType RetTy) const; 3462 ABIArgInfo classifyArgumentType(QualType RetTy) const; 3463 3464 virtual void computeInfo(CGFunctionInfo &FI) const { 3465 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 3466 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 3467 it != ie; ++it) 3468 it->info = classifyArgumentType(it->type); 3469 } 3470 3471 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3472 CodeGenFunction &CGF) const; 3473 }; 3474 3475 class MBlazeTargetCodeGenInfo : public TargetCodeGenInfo { 3476 public: 3477 MBlazeTargetCodeGenInfo(CodeGenTypes &CGT) 3478 : TargetCodeGenInfo(new MBlazeABIInfo(CGT)) {} 3479 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3480 CodeGen::CodeGenModule &M) const; 3481 }; 3482 3483 } 3484 3485 bool MBlazeABIInfo::isPromotableIntegerType(QualType Ty) const { 3486 // MBlaze ABI requires all 8 and 16 bit quantities to be extended. 3487 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 3488 switch (BT->getKind()) { 3489 case BuiltinType::Bool: 3490 case BuiltinType::Char_S: 3491 case BuiltinType::Char_U: 3492 case BuiltinType::SChar: 3493 case BuiltinType::UChar: 3494 case BuiltinType::Short: 3495 case BuiltinType::UShort: 3496 return true; 3497 default: 3498 return false; 3499 } 3500 return false; 3501 } 3502 3503 llvm::Value *MBlazeABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3504 CodeGenFunction &CGF) const { 3505 // FIXME: Implement 3506 return 0; 3507 } 3508 3509 3510 ABIArgInfo MBlazeABIInfo::classifyReturnType(QualType RetTy) const { 3511 if (RetTy->isVoidType()) 3512 return ABIArgInfo::getIgnore(); 3513 if (isAggregateTypeForABI(RetTy)) 3514 return ABIArgInfo::getIndirect(0); 3515 3516 return (isPromotableIntegerType(RetTy) ? 3517 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3518 } 3519 3520 ABIArgInfo MBlazeABIInfo::classifyArgumentType(QualType Ty) const { 3521 if (isAggregateTypeForABI(Ty)) 3522 return ABIArgInfo::getIndirect(0); 3523 3524 return (isPromotableIntegerType(Ty) ? 3525 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3526 } 3527 3528 void MBlazeTargetCodeGenInfo::SetTargetAttributes(const Decl *D, 3529 llvm::GlobalValue *GV, 3530 CodeGen::CodeGenModule &M) 3531 const { 3532 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 3533 if (!FD) return; 3534 3535 llvm::CallingConv::ID CC = llvm::CallingConv::C; 3536 if (FD->hasAttr<MBlazeInterruptHandlerAttr>()) 3537 CC = llvm::CallingConv::MBLAZE_INTR; 3538 else if (FD->hasAttr<MBlazeSaveVolatilesAttr>()) 3539 CC = llvm::CallingConv::MBLAZE_SVOL; 3540 3541 if (CC != llvm::CallingConv::C) { 3542 // Handle 'interrupt_handler' attribute: 3543 llvm::Function *F = cast<llvm::Function>(GV); 3544 3545 // Step 1: Set ISR calling convention. 3546 F->setCallingConv(CC); 3547 3548 // Step 2: Add attributes goodness. 3549 F->addFnAttr(llvm::Attributes::NoInline); 3550 } 3551 3552 // Step 3: Emit _interrupt_handler alias. 3553 if (CC == llvm::CallingConv::MBLAZE_INTR) 3554 new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage, 3555 "_interrupt_handler", GV, &M.getModule()); 3556 } 3557 3558 3559 //===----------------------------------------------------------------------===// 3560 // MSP430 ABI Implementation 3561 //===----------------------------------------------------------------------===// 3562 3563 namespace { 3564 3565 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo { 3566 public: 3567 MSP430TargetCodeGenInfo(CodeGenTypes &CGT) 3568 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 3569 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3570 CodeGen::CodeGenModule &M) const; 3571 }; 3572 3573 } 3574 3575 void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D, 3576 llvm::GlobalValue *GV, 3577 CodeGen::CodeGenModule &M) const { 3578 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 3579 if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) { 3580 // Handle 'interrupt' attribute: 3581 llvm::Function *F = cast<llvm::Function>(GV); 3582 3583 // Step 1: Set ISR calling convention. 3584 F->setCallingConv(llvm::CallingConv::MSP430_INTR); 3585 3586 // Step 2: Add attributes goodness. 3587 F->addFnAttr(llvm::Attributes::NoInline); 3588 3589 // Step 3: Emit ISR vector alias. 3590 unsigned Num = attr->getNumber() + 0xffe0; 3591 new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage, 3592 "vector_" + Twine::utohexstr(Num), 3593 GV, &M.getModule()); 3594 } 3595 } 3596 } 3597 3598 //===----------------------------------------------------------------------===// 3599 // MIPS ABI Implementation. This works for both little-endian and 3600 // big-endian variants. 3601 //===----------------------------------------------------------------------===// 3602 3603 namespace { 3604 class MipsABIInfo : public ABIInfo { 3605 bool IsO32; 3606 unsigned MinABIStackAlignInBytes, StackAlignInBytes; 3607 void CoerceToIntArgs(uint64_t TySize, 3608 SmallVector<llvm::Type*, 8> &ArgList) const; 3609 llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const; 3610 llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const; 3611 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const; 3612 public: 3613 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) : 3614 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8), 3615 StackAlignInBytes(IsO32 ? 8 : 16) {} 3616 3617 ABIArgInfo classifyReturnType(QualType RetTy) const; 3618 ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const; 3619 virtual void computeInfo(CGFunctionInfo &FI) const; 3620 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3621 CodeGenFunction &CGF) const; 3622 }; 3623 3624 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo { 3625 unsigned SizeOfUnwindException; 3626 public: 3627 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32) 3628 : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)), 3629 SizeOfUnwindException(IsO32 ? 24 : 32) {} 3630 3631 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 3632 return 29; 3633 } 3634 3635 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3636 llvm::Value *Address) const; 3637 3638 unsigned getSizeOfUnwindException() const { 3639 return SizeOfUnwindException; 3640 } 3641 }; 3642 } 3643 3644 void MipsABIInfo::CoerceToIntArgs(uint64_t TySize, 3645 SmallVector<llvm::Type*, 8> &ArgList) const { 3646 llvm::IntegerType *IntTy = 3647 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8); 3648 3649 // Add (TySize / MinABIStackAlignInBytes) args of IntTy. 3650 for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N) 3651 ArgList.push_back(IntTy); 3652 3653 // If necessary, add one more integer type to ArgList. 3654 unsigned R = TySize % (MinABIStackAlignInBytes * 8); 3655 3656 if (R) 3657 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R)); 3658 } 3659 3660 // In N32/64, an aligned double precision floating point field is passed in 3661 // a register. 3662 llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const { 3663 SmallVector<llvm::Type*, 8> ArgList, IntArgList; 3664 3665 if (IsO32) { 3666 CoerceToIntArgs(TySize, ArgList); 3667 return llvm::StructType::get(getVMContext(), ArgList); 3668 } 3669 3670 if (Ty->isComplexType()) 3671 return CGT.ConvertType(Ty); 3672 3673 const RecordType *RT = Ty->getAs<RecordType>(); 3674 3675 // Unions/vectors are passed in integer registers. 3676 if (!RT || !RT->isStructureOrClassType()) { 3677 CoerceToIntArgs(TySize, ArgList); 3678 return llvm::StructType::get(getVMContext(), ArgList); 3679 } 3680 3681 const RecordDecl *RD = RT->getDecl(); 3682 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 3683 assert(!(TySize % 8) && "Size of structure must be multiple of 8."); 3684 3685 uint64_t LastOffset = 0; 3686 unsigned idx = 0; 3687 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64); 3688 3689 // Iterate over fields in the struct/class and check if there are any aligned 3690 // double fields. 3691 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 3692 i != e; ++i, ++idx) { 3693 const QualType Ty = i->getType(); 3694 const BuiltinType *BT = Ty->getAs<BuiltinType>(); 3695 3696 if (!BT || BT->getKind() != BuiltinType::Double) 3697 continue; 3698 3699 uint64_t Offset = Layout.getFieldOffset(idx); 3700 if (Offset % 64) // Ignore doubles that are not aligned. 3701 continue; 3702 3703 // Add ((Offset - LastOffset) / 64) args of type i64. 3704 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j) 3705 ArgList.push_back(I64); 3706 3707 // Add double type. 3708 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext())); 3709 LastOffset = Offset + 64; 3710 } 3711 3712 CoerceToIntArgs(TySize - LastOffset, IntArgList); 3713 ArgList.append(IntArgList.begin(), IntArgList.end()); 3714 3715 return llvm::StructType::get(getVMContext(), ArgList); 3716 } 3717 3718 llvm::Type *MipsABIInfo::getPaddingType(uint64_t Align, uint64_t Offset) const { 3719 assert((Offset % MinABIStackAlignInBytes) == 0); 3720 3721 if ((Align - 1) & Offset) 3722 return llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8); 3723 3724 return 0; 3725 } 3726 3727 ABIArgInfo 3728 MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const { 3729 uint64_t OrigOffset = Offset; 3730 uint64_t TySize = getContext().getTypeSize(Ty); 3731 uint64_t Align = getContext().getTypeAlign(Ty) / 8; 3732 3733 Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes), 3734 (uint64_t)StackAlignInBytes); 3735 Offset = llvm::RoundUpToAlignment(Offset, Align); 3736 Offset += llvm::RoundUpToAlignment(TySize, Align * 8) / 8; 3737 3738 if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) { 3739 // Ignore empty aggregates. 3740 if (TySize == 0) 3741 return ABIArgInfo::getIgnore(); 3742 3743 // Records with non trivial destructors/constructors should not be passed 3744 // by value. 3745 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) { 3746 Offset = OrigOffset + MinABIStackAlignInBytes; 3747 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3748 } 3749 3750 // If we have reached here, aggregates are passed directly by coercing to 3751 // another structure type. Padding is inserted if the offset of the 3752 // aggregate is unaligned. 3753 return ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0, 3754 getPaddingType(Align, OrigOffset)); 3755 } 3756 3757 // Treat an enum type as its underlying type. 3758 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3759 Ty = EnumTy->getDecl()->getIntegerType(); 3760 3761 if (Ty->isPromotableIntegerType()) 3762 return ABIArgInfo::getExtend(); 3763 3764 return ABIArgInfo::getDirect(0, 0, getPaddingType(Align, OrigOffset)); 3765 } 3766 3767 llvm::Type* 3768 MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const { 3769 const RecordType *RT = RetTy->getAs<RecordType>(); 3770 SmallVector<llvm::Type*, 8> RTList; 3771 3772 if (RT && RT->isStructureOrClassType()) { 3773 const RecordDecl *RD = RT->getDecl(); 3774 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 3775 unsigned FieldCnt = Layout.getFieldCount(); 3776 3777 // N32/64 returns struct/classes in floating point registers if the 3778 // following conditions are met: 3779 // 1. The size of the struct/class is no larger than 128-bit. 3780 // 2. The struct/class has one or two fields all of which are floating 3781 // point types. 3782 // 3. The offset of the first field is zero (this follows what gcc does). 3783 // 3784 // Any other composite results are returned in integer registers. 3785 // 3786 if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) { 3787 RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end(); 3788 for (; b != e; ++b) { 3789 const BuiltinType *BT = b->getType()->getAs<BuiltinType>(); 3790 3791 if (!BT || !BT->isFloatingPoint()) 3792 break; 3793 3794 RTList.push_back(CGT.ConvertType(b->getType())); 3795 } 3796 3797 if (b == e) 3798 return llvm::StructType::get(getVMContext(), RTList, 3799 RD->hasAttr<PackedAttr>()); 3800 3801 RTList.clear(); 3802 } 3803 } 3804 3805 CoerceToIntArgs(Size, RTList); 3806 return llvm::StructType::get(getVMContext(), RTList); 3807 } 3808 3809 ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const { 3810 uint64_t Size = getContext().getTypeSize(RetTy); 3811 3812 if (RetTy->isVoidType() || Size == 0) 3813 return ABIArgInfo::getIgnore(); 3814 3815 if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) { 3816 if (Size <= 128) { 3817 if (RetTy->isAnyComplexType()) 3818 return ABIArgInfo::getDirect(); 3819 3820 // O32 returns integer vectors in registers. 3821 if (IsO32 && RetTy->isVectorType() && !RetTy->hasFloatingRepresentation()) 3822 return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size)); 3823 3824 if (!IsO32 && !isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy)) 3825 return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size)); 3826 } 3827 3828 return ABIArgInfo::getIndirect(0); 3829 } 3830 3831 // Treat an enum type as its underlying type. 3832 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 3833 RetTy = EnumTy->getDecl()->getIntegerType(); 3834 3835 return (RetTy->isPromotableIntegerType() ? 3836 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3837 } 3838 3839 void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const { 3840 ABIArgInfo &RetInfo = FI.getReturnInfo(); 3841 RetInfo = classifyReturnType(FI.getReturnType()); 3842 3843 // Check if a pointer to an aggregate is passed as a hidden argument. 3844 uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0; 3845 3846 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 3847 it != ie; ++it) 3848 it->info = classifyArgumentType(it->type, Offset); 3849 } 3850 3851 llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3852 CodeGenFunction &CGF) const { 3853 llvm::Type *BP = CGF.Int8PtrTy; 3854 llvm::Type *BPP = CGF.Int8PtrPtrTy; 3855 3856 CGBuilderTy &Builder = CGF.Builder; 3857 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 3858 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 3859 int64_t TypeAlign = getContext().getTypeAlign(Ty) / 8; 3860 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 3861 llvm::Value *AddrTyped; 3862 unsigned PtrWidth = getContext().getTargetInfo().getPointerWidth(0); 3863 llvm::IntegerType *IntTy = (PtrWidth == 32) ? CGF.Int32Ty : CGF.Int64Ty; 3864 3865 if (TypeAlign > MinABIStackAlignInBytes) { 3866 llvm::Value *AddrAsInt = CGF.Builder.CreatePtrToInt(Addr, IntTy); 3867 llvm::Value *Inc = llvm::ConstantInt::get(IntTy, TypeAlign - 1); 3868 llvm::Value *Mask = llvm::ConstantInt::get(IntTy, -TypeAlign); 3869 llvm::Value *Add = CGF.Builder.CreateAdd(AddrAsInt, Inc); 3870 llvm::Value *And = CGF.Builder.CreateAnd(Add, Mask); 3871 AddrTyped = CGF.Builder.CreateIntToPtr(And, PTy); 3872 } 3873 else 3874 AddrTyped = Builder.CreateBitCast(Addr, PTy); 3875 3876 llvm::Value *AlignedAddr = Builder.CreateBitCast(AddrTyped, BP); 3877 TypeAlign = std::max((unsigned)TypeAlign, MinABIStackAlignInBytes); 3878 uint64_t Offset = 3879 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, TypeAlign); 3880 llvm::Value *NextAddr = 3881 Builder.CreateGEP(AlignedAddr, llvm::ConstantInt::get(IntTy, Offset), 3882 "ap.next"); 3883 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 3884 3885 return AddrTyped; 3886 } 3887 3888 bool 3889 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3890 llvm::Value *Address) const { 3891 // This information comes from gcc's implementation, which seems to 3892 // as canonical as it gets. 3893 3894 // Everything on MIPS is 4 bytes. Double-precision FP registers 3895 // are aliased to pairs of single-precision FP registers. 3896 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 3897 3898 // 0-31 are the general purpose registers, $0 - $31. 3899 // 32-63 are the floating-point registers, $f0 - $f31. 3900 // 64 and 65 are the multiply/divide registers, $hi and $lo. 3901 // 66 is the (notional, I think) register for signal-handler return. 3902 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65); 3903 3904 // 67-74 are the floating-point status registers, $fcc0 - $fcc7. 3905 // They are one bit wide and ignored here. 3906 3907 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31. 3908 // (coprocessor 1 is the FP unit) 3909 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31. 3910 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31. 3911 // 176-181 are the DSP accumulator registers. 3912 AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181); 3913 return false; 3914 } 3915 3916 //===----------------------------------------------------------------------===// 3917 // TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults. 3918 // Currently subclassed only to implement custom OpenCL C function attribute 3919 // handling. 3920 //===----------------------------------------------------------------------===// 3921 3922 namespace { 3923 3924 class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo { 3925 public: 3926 TCETargetCodeGenInfo(CodeGenTypes &CGT) 3927 : DefaultTargetCodeGenInfo(CGT) {} 3928 3929 virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3930 CodeGen::CodeGenModule &M) const; 3931 }; 3932 3933 void TCETargetCodeGenInfo::SetTargetAttributes(const Decl *D, 3934 llvm::GlobalValue *GV, 3935 CodeGen::CodeGenModule &M) const { 3936 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 3937 if (!FD) return; 3938 3939 llvm::Function *F = cast<llvm::Function>(GV); 3940 3941 if (M.getLangOpts().OpenCL) { 3942 if (FD->hasAttr<OpenCLKernelAttr>()) { 3943 // OpenCL C Kernel functions are not subject to inlining 3944 F->addFnAttr(llvm::Attributes::NoInline); 3945 3946 if (FD->hasAttr<ReqdWorkGroupSizeAttr>()) { 3947 3948 // Convert the reqd_work_group_size() attributes to metadata. 3949 llvm::LLVMContext &Context = F->getContext(); 3950 llvm::NamedMDNode *OpenCLMetadata = 3951 M.getModule().getOrInsertNamedMetadata("opencl.kernel_wg_size_info"); 3952 3953 SmallVector<llvm::Value*, 5> Operands; 3954 Operands.push_back(F); 3955 3956 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty, 3957 llvm::APInt(32, 3958 FD->getAttr<ReqdWorkGroupSizeAttr>()->getXDim()))); 3959 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty, 3960 llvm::APInt(32, 3961 FD->getAttr<ReqdWorkGroupSizeAttr>()->getYDim()))); 3962 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty, 3963 llvm::APInt(32, 3964 FD->getAttr<ReqdWorkGroupSizeAttr>()->getZDim()))); 3965 3966 // Add a boolean constant operand for "required" (true) or "hint" (false) 3967 // for implementing the work_group_size_hint attr later. Currently 3968 // always true as the hint is not yet implemented. 3969 Operands.push_back(llvm::ConstantInt::getTrue(Context)); 3970 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands)); 3971 } 3972 } 3973 } 3974 } 3975 3976 } 3977 3978 //===----------------------------------------------------------------------===// 3979 // Hexagon ABI Implementation 3980 //===----------------------------------------------------------------------===// 3981 3982 namespace { 3983 3984 class HexagonABIInfo : public ABIInfo { 3985 3986 3987 public: 3988 HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 3989 3990 private: 3991 3992 ABIArgInfo classifyReturnType(QualType RetTy) const; 3993 ABIArgInfo classifyArgumentType(QualType RetTy) const; 3994 3995 virtual void computeInfo(CGFunctionInfo &FI) const; 3996 3997 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3998 CodeGenFunction &CGF) const; 3999 }; 4000 4001 class HexagonTargetCodeGenInfo : public TargetCodeGenInfo { 4002 public: 4003 HexagonTargetCodeGenInfo(CodeGenTypes &CGT) 4004 :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {} 4005 4006 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 4007 return 29; 4008 } 4009 }; 4010 4011 } 4012 4013 void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const { 4014 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 4015 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 4016 it != ie; ++it) 4017 it->info = classifyArgumentType(it->type); 4018 } 4019 4020 ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const { 4021 if (!isAggregateTypeForABI(Ty)) { 4022 // Treat an enum type as its underlying type. 4023 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 4024 Ty = EnumTy->getDecl()->getIntegerType(); 4025 4026 return (Ty->isPromotableIntegerType() ? 4027 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 4028 } 4029 4030 // Ignore empty records. 4031 if (isEmptyRecord(getContext(), Ty, true)) 4032 return ABIArgInfo::getIgnore(); 4033 4034 // Structures with either a non-trivial destructor or a non-trivial 4035 // copy constructor are always indirect. 4036 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 4037 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 4038 4039 uint64_t Size = getContext().getTypeSize(Ty); 4040 if (Size > 64) 4041 return ABIArgInfo::getIndirect(0, /*ByVal=*/true); 4042 // Pass in the smallest viable integer type. 4043 else if (Size > 32) 4044 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext())); 4045 else if (Size > 16) 4046 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 4047 else if (Size > 8) 4048 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 4049 else 4050 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 4051 } 4052 4053 ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const { 4054 if (RetTy->isVoidType()) 4055 return ABIArgInfo::getIgnore(); 4056 4057 // Large vector types should be returned via memory. 4058 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64) 4059 return ABIArgInfo::getIndirect(0); 4060 4061 if (!isAggregateTypeForABI(RetTy)) { 4062 // Treat an enum type as its underlying type. 4063 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 4064 RetTy = EnumTy->getDecl()->getIntegerType(); 4065 4066 return (RetTy->isPromotableIntegerType() ? 4067 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 4068 } 4069 4070 // Structures with either a non-trivial destructor or a non-trivial 4071 // copy constructor are always indirect. 4072 if (isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy)) 4073 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 4074 4075 if (isEmptyRecord(getContext(), RetTy, true)) 4076 return ABIArgInfo::getIgnore(); 4077 4078 // Aggregates <= 8 bytes are returned in r0; other aggregates 4079 // are returned indirectly. 4080 uint64_t Size = getContext().getTypeSize(RetTy); 4081 if (Size <= 64) { 4082 // Return in the smallest viable integer type. 4083 if (Size <= 8) 4084 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 4085 if (Size <= 16) 4086 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 4087 if (Size <= 32) 4088 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 4089 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext())); 4090 } 4091 4092 return ABIArgInfo::getIndirect(0, /*ByVal=*/true); 4093 } 4094 4095 llvm::Value *HexagonABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 4096 CodeGenFunction &CGF) const { 4097 // FIXME: Need to handle alignment 4098 llvm::Type *BPP = CGF.Int8PtrPtrTy; 4099 4100 CGBuilderTy &Builder = CGF.Builder; 4101 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 4102 "ap"); 4103 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 4104 llvm::Type *PTy = 4105 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 4106 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 4107 4108 uint64_t Offset = 4109 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); 4110 llvm::Value *NextAddr = 4111 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 4112 "ap.next"); 4113 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 4114 4115 return AddrTyped; 4116 } 4117 4118 4119 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() { 4120 if (TheTargetCodeGenInfo) 4121 return *TheTargetCodeGenInfo; 4122 4123 const llvm::Triple &Triple = getContext().getTargetInfo().getTriple(); 4124 switch (Triple.getArch()) { 4125 default: 4126 return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types)); 4127 4128 case llvm::Triple::le32: 4129 return *(TheTargetCodeGenInfo = new PNaClTargetCodeGenInfo(Types)); 4130 case llvm::Triple::mips: 4131 case llvm::Triple::mipsel: 4132 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, true)); 4133 4134 case llvm::Triple::mips64: 4135 case llvm::Triple::mips64el: 4136 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, false)); 4137 4138 case llvm::Triple::arm: 4139 case llvm::Triple::thumb: 4140 { 4141 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS; 4142 4143 if (strcmp(getContext().getTargetInfo().getABI(), "apcs-gnu") == 0) 4144 Kind = ARMABIInfo::APCS; 4145 else if (CodeGenOpts.FloatABI == "hard") 4146 Kind = ARMABIInfo::AAPCS_VFP; 4147 4148 switch (Triple.getOS()) { 4149 case llvm::Triple::NativeClient: 4150 return *(TheTargetCodeGenInfo = 4151 new NaClARMTargetCodeGenInfo(Types, Kind)); 4152 default: 4153 return *(TheTargetCodeGenInfo = 4154 new ARMTargetCodeGenInfo(Types, Kind)); 4155 } 4156 } 4157 4158 case llvm::Triple::ppc: 4159 return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types)); 4160 case llvm::Triple::ppc64: 4161 if (Triple.isOSBinFormatELF()) 4162 return *(TheTargetCodeGenInfo = new PPC64_SVR4_TargetCodeGenInfo(Types)); 4163 else 4164 return *(TheTargetCodeGenInfo = new PPC64TargetCodeGenInfo(Types)); 4165 4166 case llvm::Triple::nvptx: 4167 case llvm::Triple::nvptx64: 4168 return *(TheTargetCodeGenInfo = new NVPTXTargetCodeGenInfo(Types)); 4169 4170 case llvm::Triple::mblaze: 4171 return *(TheTargetCodeGenInfo = new MBlazeTargetCodeGenInfo(Types)); 4172 4173 case llvm::Triple::msp430: 4174 return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types)); 4175 4176 case llvm::Triple::tce: 4177 return *(TheTargetCodeGenInfo = new TCETargetCodeGenInfo(Types)); 4178 4179 case llvm::Triple::x86: { 4180 bool DisableMMX = strcmp(getContext().getTargetInfo().getABI(), "no-mmx") == 0; 4181 4182 if (Triple.isOSDarwin()) 4183 return *(TheTargetCodeGenInfo = 4184 new X86_32TargetCodeGenInfo(Types, true, true, DisableMMX, false, 4185 CodeGenOpts.NumRegisterParameters)); 4186 4187 switch (Triple.getOS()) { 4188 case llvm::Triple::Cygwin: 4189 case llvm::Triple::MinGW32: 4190 case llvm::Triple::AuroraUX: 4191 case llvm::Triple::DragonFly: 4192 case llvm::Triple::FreeBSD: 4193 case llvm::Triple::OpenBSD: 4194 case llvm::Triple::Bitrig: 4195 return *(TheTargetCodeGenInfo = 4196 new X86_32TargetCodeGenInfo(Types, false, true, DisableMMX, 4197 false, 4198 CodeGenOpts.NumRegisterParameters)); 4199 4200 case llvm::Triple::Win32: 4201 return *(TheTargetCodeGenInfo = 4202 new X86_32TargetCodeGenInfo(Types, false, true, DisableMMX, true, 4203 CodeGenOpts.NumRegisterParameters)); 4204 4205 default: 4206 return *(TheTargetCodeGenInfo = 4207 new X86_32TargetCodeGenInfo(Types, false, false, DisableMMX, 4208 false, 4209 CodeGenOpts.NumRegisterParameters)); 4210 } 4211 } 4212 4213 case llvm::Triple::x86_64: { 4214 bool HasAVX = strcmp(getContext().getTargetInfo().getABI(), "avx") == 0; 4215 4216 switch (Triple.getOS()) { 4217 case llvm::Triple::Win32: 4218 case llvm::Triple::MinGW32: 4219 case llvm::Triple::Cygwin: 4220 return *(TheTargetCodeGenInfo = new WinX86_64TargetCodeGenInfo(Types)); 4221 case llvm::Triple::NativeClient: 4222 return *(TheTargetCodeGenInfo = new NaClX86_64TargetCodeGenInfo(Types, HasAVX)); 4223 default: 4224 return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo(Types, 4225 HasAVX)); 4226 } 4227 } 4228 case llvm::Triple::hexagon: 4229 return *(TheTargetCodeGenInfo = new HexagonTargetCodeGenInfo(Types)); 4230 } 4231 } 4232