1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // These classes wrap the information about a call or function 11 // definition used to handle ABI compliancy. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "TargetInfo.h" 16 #include "ABIInfo.h" 17 #include "CodeGenFunction.h" 18 #include "clang/AST/RecordLayout.h" 19 #include "clang/Frontend/CodeGenOptions.h" 20 #include "llvm/ADT/Triple.h" 21 #include "llvm/IR/DataLayout.h" 22 #include "llvm/IR/Type.h" 23 #include "llvm/Support/raw_ostream.h" 24 using namespace clang; 25 using namespace CodeGen; 26 27 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, 28 llvm::Value *Array, 29 llvm::Value *Value, 30 unsigned FirstIndex, 31 unsigned LastIndex) { 32 // Alternatively, we could emit this as a loop in the source. 33 for (unsigned I = FirstIndex; I <= LastIndex; ++I) { 34 llvm::Value *Cell = Builder.CreateConstInBoundsGEP1_32(Array, I); 35 Builder.CreateStore(Value, Cell); 36 } 37 } 38 39 static bool isAggregateTypeForABI(QualType T) { 40 return CodeGenFunction::hasAggregateLLVMType(T) || 41 T->isMemberFunctionPointerType(); 42 } 43 44 ABIInfo::~ABIInfo() {} 45 46 ASTContext &ABIInfo::getContext() const { 47 return CGT.getContext(); 48 } 49 50 llvm::LLVMContext &ABIInfo::getVMContext() const { 51 return CGT.getLLVMContext(); 52 } 53 54 const llvm::DataLayout &ABIInfo::getDataLayout() const { 55 return CGT.getDataLayout(); 56 } 57 58 59 void ABIArgInfo::dump() const { 60 raw_ostream &OS = llvm::errs(); 61 OS << "(ABIArgInfo Kind="; 62 switch (TheKind) { 63 case Direct: 64 OS << "Direct Type="; 65 if (llvm::Type *Ty = getCoerceToType()) 66 Ty->print(OS); 67 else 68 OS << "null"; 69 break; 70 case Extend: 71 OS << "Extend"; 72 break; 73 case Ignore: 74 OS << "Ignore"; 75 break; 76 case Indirect: 77 OS << "Indirect Align=" << getIndirectAlign() 78 << " ByVal=" << getIndirectByVal() 79 << " Realign=" << getIndirectRealign(); 80 break; 81 case Expand: 82 OS << "Expand"; 83 break; 84 } 85 OS << ")\n"; 86 } 87 88 TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; } 89 90 // If someone can figure out a general rule for this, that would be great. 91 // It's probably just doomed to be platform-dependent, though. 92 unsigned TargetCodeGenInfo::getSizeOfUnwindException() const { 93 // Verified for: 94 // x86-64 FreeBSD, Linux, Darwin 95 // x86-32 FreeBSD, Linux, Darwin 96 // PowerPC Linux, Darwin 97 // ARM Darwin (*not* EABI) 98 return 32; 99 } 100 101 bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args, 102 const FunctionNoProtoType *fnType) const { 103 // The following conventions are known to require this to be false: 104 // x86_stdcall 105 // MIPS 106 // For everything else, we just prefer false unless we opt out. 107 return false; 108 } 109 110 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays); 111 112 /// isEmptyField - Return true iff a the field is "empty", that is it 113 /// is an unnamed bit-field or an (array of) empty record(s). 114 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD, 115 bool AllowArrays) { 116 if (FD->isUnnamedBitfield()) 117 return true; 118 119 QualType FT = FD->getType(); 120 121 // Constant arrays of empty records count as empty, strip them off. 122 // Constant arrays of zero length always count as empty. 123 if (AllowArrays) 124 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 125 if (AT->getSize() == 0) 126 return true; 127 FT = AT->getElementType(); 128 } 129 130 const RecordType *RT = FT->getAs<RecordType>(); 131 if (!RT) 132 return false; 133 134 // C++ record fields are never empty, at least in the Itanium ABI. 135 // 136 // FIXME: We should use a predicate for whether this behavior is true in the 137 // current ABI. 138 if (isa<CXXRecordDecl>(RT->getDecl())) 139 return false; 140 141 return isEmptyRecord(Context, FT, AllowArrays); 142 } 143 144 /// isEmptyRecord - Return true iff a structure contains only empty 145 /// fields. Note that a structure with a flexible array member is not 146 /// considered empty. 147 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) { 148 const RecordType *RT = T->getAs<RecordType>(); 149 if (!RT) 150 return 0; 151 const RecordDecl *RD = RT->getDecl(); 152 if (RD->hasFlexibleArrayMember()) 153 return false; 154 155 // If this is a C++ record, check the bases first. 156 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 157 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 158 e = CXXRD->bases_end(); i != e; ++i) 159 if (!isEmptyRecord(Context, i->getType(), true)) 160 return false; 161 162 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 163 i != e; ++i) 164 if (!isEmptyField(Context, *i, AllowArrays)) 165 return false; 166 return true; 167 } 168 169 /// hasNonTrivialDestructorOrCopyConstructor - Determine if a type has either 170 /// a non-trivial destructor or a non-trivial copy constructor. 171 static bool hasNonTrivialDestructorOrCopyConstructor(const RecordType *RT) { 172 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 173 if (!RD) 174 return false; 175 176 return !RD->hasTrivialDestructor() || RD->hasNonTrivialCopyConstructor(); 177 } 178 179 /// isRecordWithNonTrivialDestructorOrCopyConstructor - Determine if a type is 180 /// a record type with either a non-trivial destructor or a non-trivial copy 181 /// constructor. 182 static bool isRecordWithNonTrivialDestructorOrCopyConstructor(QualType T) { 183 const RecordType *RT = T->getAs<RecordType>(); 184 if (!RT) 185 return false; 186 187 return hasNonTrivialDestructorOrCopyConstructor(RT); 188 } 189 190 /// isSingleElementStruct - Determine if a structure is a "single 191 /// element struct", i.e. it has exactly one non-empty field or 192 /// exactly one field which is itself a single element 193 /// struct. Structures with flexible array members are never 194 /// considered single element structs. 195 /// 196 /// \return The field declaration for the single non-empty field, if 197 /// it exists. 198 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) { 199 const RecordType *RT = T->getAsStructureType(); 200 if (!RT) 201 return 0; 202 203 const RecordDecl *RD = RT->getDecl(); 204 if (RD->hasFlexibleArrayMember()) 205 return 0; 206 207 const Type *Found = 0; 208 209 // If this is a C++ record, check the bases first. 210 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 211 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 212 e = CXXRD->bases_end(); i != e; ++i) { 213 // Ignore empty records. 214 if (isEmptyRecord(Context, i->getType(), true)) 215 continue; 216 217 // If we already found an element then this isn't a single-element struct. 218 if (Found) 219 return 0; 220 221 // If this is non-empty and not a single element struct, the composite 222 // cannot be a single element struct. 223 Found = isSingleElementStruct(i->getType(), Context); 224 if (!Found) 225 return 0; 226 } 227 } 228 229 // Check for single element. 230 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 231 i != e; ++i) { 232 const FieldDecl *FD = *i; 233 QualType FT = FD->getType(); 234 235 // Ignore empty fields. 236 if (isEmptyField(Context, FD, true)) 237 continue; 238 239 // If we already found an element then this isn't a single-element 240 // struct. 241 if (Found) 242 return 0; 243 244 // Treat single element arrays as the element. 245 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 246 if (AT->getSize().getZExtValue() != 1) 247 break; 248 FT = AT->getElementType(); 249 } 250 251 if (!isAggregateTypeForABI(FT)) { 252 Found = FT.getTypePtr(); 253 } else { 254 Found = isSingleElementStruct(FT, Context); 255 if (!Found) 256 return 0; 257 } 258 } 259 260 // We don't consider a struct a single-element struct if it has 261 // padding beyond the element type. 262 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T)) 263 return 0; 264 265 return Found; 266 } 267 268 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) { 269 // Treat complex types as the element type. 270 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) 271 Ty = CTy->getElementType(); 272 273 // Check for a type which we know has a simple scalar argument-passing 274 // convention without any padding. (We're specifically looking for 32 275 // and 64-bit integer and integer-equivalents, float, and double.) 276 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() && 277 !Ty->isEnumeralType() && !Ty->isBlockPointerType()) 278 return false; 279 280 uint64_t Size = Context.getTypeSize(Ty); 281 return Size == 32 || Size == 64; 282 } 283 284 /// canExpandIndirectArgument - Test whether an argument type which is to be 285 /// passed indirectly (on the stack) would have the equivalent layout if it was 286 /// expanded into separate arguments. If so, we prefer to do the latter to avoid 287 /// inhibiting optimizations. 288 /// 289 // FIXME: This predicate is missing many cases, currently it just follows 290 // llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We 291 // should probably make this smarter, or better yet make the LLVM backend 292 // capable of handling it. 293 static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) { 294 // We can only expand structure types. 295 const RecordType *RT = Ty->getAs<RecordType>(); 296 if (!RT) 297 return false; 298 299 // We can only expand (C) structures. 300 // 301 // FIXME: This needs to be generalized to handle classes as well. 302 const RecordDecl *RD = RT->getDecl(); 303 if (!RD->isStruct() || isa<CXXRecordDecl>(RD)) 304 return false; 305 306 uint64_t Size = 0; 307 308 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 309 i != e; ++i) { 310 const FieldDecl *FD = *i; 311 312 if (!is32Or64BitBasicType(FD->getType(), Context)) 313 return false; 314 315 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know 316 // how to expand them yet, and the predicate for telling if a bitfield still 317 // counts as "basic" is more complicated than what we were doing previously. 318 if (FD->isBitField()) 319 return false; 320 321 Size += Context.getTypeSize(FD->getType()); 322 } 323 324 // Make sure there are not any holes in the struct. 325 if (Size != Context.getTypeSize(Ty)) 326 return false; 327 328 return true; 329 } 330 331 namespace { 332 /// DefaultABIInfo - The default implementation for ABI specific 333 /// details. This implementation provides information which results in 334 /// self-consistent and sensible LLVM IR generation, but does not 335 /// conform to any particular ABI. 336 class DefaultABIInfo : public ABIInfo { 337 public: 338 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 339 340 ABIArgInfo classifyReturnType(QualType RetTy) const; 341 ABIArgInfo classifyArgumentType(QualType RetTy) const; 342 343 virtual void computeInfo(CGFunctionInfo &FI) const { 344 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 345 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 346 it != ie; ++it) 347 it->info = classifyArgumentType(it->type); 348 } 349 350 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 351 CodeGenFunction &CGF) const; 352 }; 353 354 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo { 355 public: 356 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 357 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 358 }; 359 360 llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 361 CodeGenFunction &CGF) const { 362 return 0; 363 } 364 365 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const { 366 if (isAggregateTypeForABI(Ty)) { 367 // Records with non trivial destructors/constructors should not be passed 368 // by value. 369 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 370 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 371 372 return ABIArgInfo::getIndirect(0); 373 } 374 375 // Treat an enum type as its underlying type. 376 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 377 Ty = EnumTy->getDecl()->getIntegerType(); 378 379 return (Ty->isPromotableIntegerType() ? 380 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 381 } 382 383 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const { 384 if (RetTy->isVoidType()) 385 return ABIArgInfo::getIgnore(); 386 387 if (isAggregateTypeForABI(RetTy)) 388 return ABIArgInfo::getIndirect(0); 389 390 // Treat an enum type as its underlying type. 391 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 392 RetTy = EnumTy->getDecl()->getIntegerType(); 393 394 return (RetTy->isPromotableIntegerType() ? 395 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 396 } 397 398 //===----------------------------------------------------------------------===// 399 // le32/PNaCl bitcode ABI Implementation 400 //===----------------------------------------------------------------------===// 401 402 class PNaClABIInfo : public ABIInfo { 403 public: 404 PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 405 406 ABIArgInfo classifyReturnType(QualType RetTy) const; 407 ABIArgInfo classifyArgumentType(QualType RetTy, unsigned &FreeRegs) const; 408 409 virtual void computeInfo(CGFunctionInfo &FI) const; 410 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 411 CodeGenFunction &CGF) const; 412 }; 413 414 class PNaClTargetCodeGenInfo : public TargetCodeGenInfo { 415 public: 416 PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 417 : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {} 418 }; 419 420 void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const { 421 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 422 423 unsigned FreeRegs = FI.getHasRegParm() ? FI.getRegParm() : 0; 424 425 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 426 it != ie; ++it) 427 it->info = classifyArgumentType(it->type, FreeRegs); 428 } 429 430 llvm::Value *PNaClABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 431 CodeGenFunction &CGF) const { 432 return 0; 433 } 434 435 ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty, 436 unsigned &FreeRegs) const { 437 if (isAggregateTypeForABI(Ty)) { 438 // Records with non trivial destructors/constructors should not be passed 439 // by value. 440 FreeRegs = 0; 441 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 442 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 443 444 return ABIArgInfo::getIndirect(0); 445 } 446 447 // Treat an enum type as its underlying type. 448 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 449 Ty = EnumTy->getDecl()->getIntegerType(); 450 451 ABIArgInfo BaseInfo = (Ty->isPromotableIntegerType() ? 452 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 453 454 // Regparm regs hold 32 bits. 455 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32; 456 if (SizeInRegs == 0) return BaseInfo; 457 if (SizeInRegs > FreeRegs) { 458 FreeRegs = 0; 459 return BaseInfo; 460 } 461 FreeRegs -= SizeInRegs; 462 return BaseInfo.isDirect() ? 463 ABIArgInfo::getDirectInReg(BaseInfo.getCoerceToType()) : 464 ABIArgInfo::getExtendInReg(BaseInfo.getCoerceToType()); 465 } 466 467 ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const { 468 if (RetTy->isVoidType()) 469 return ABIArgInfo::getIgnore(); 470 471 if (isAggregateTypeForABI(RetTy)) 472 return ABIArgInfo::getIndirect(0); 473 474 // Treat an enum type as its underlying type. 475 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 476 RetTy = EnumTy->getDecl()->getIntegerType(); 477 478 return (RetTy->isPromotableIntegerType() ? 479 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 480 } 481 482 /// UseX86_MMXType - Return true if this is an MMX type that should use the 483 /// special x86_mmx type. 484 bool UseX86_MMXType(llvm::Type *IRType) { 485 // If the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>, use the 486 // special x86_mmx type. 487 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 && 488 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() && 489 IRType->getScalarSizeInBits() != 64; 490 } 491 492 static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 493 StringRef Constraint, 494 llvm::Type* Ty) { 495 if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy()) 496 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext()); 497 return Ty; 498 } 499 500 //===----------------------------------------------------------------------===// 501 // X86-32 ABI Implementation 502 //===----------------------------------------------------------------------===// 503 504 /// X86_32ABIInfo - The X86-32 ABI information. 505 class X86_32ABIInfo : public ABIInfo { 506 enum Class { 507 Integer, 508 Float 509 }; 510 511 static const unsigned MinABIStackAlignInBytes = 4; 512 513 bool IsDarwinVectorABI; 514 bool IsSmallStructInRegABI; 515 bool IsMMXDisabled; 516 bool IsWin32FloatStructABI; 517 unsigned DefaultNumRegisterParameters; 518 519 static bool isRegisterSize(unsigned Size) { 520 return (Size == 8 || Size == 16 || Size == 32 || Size == 64); 521 } 522 523 static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context, 524 unsigned callingConvention); 525 526 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 527 /// such that the argument will be passed in memory. 528 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, 529 unsigned &FreeRegs) const; 530 531 /// \brief Return the alignment to use for the given type on the stack. 532 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const; 533 534 Class classify(QualType Ty) const; 535 ABIArgInfo classifyReturnType(QualType RetTy, 536 unsigned callingConvention) const; 537 ABIArgInfo classifyArgumentType(QualType RetTy, unsigned &FreeRegs, 538 bool IsFastCall) const; 539 bool shouldUseInReg(QualType Ty, unsigned &FreeRegs, 540 bool IsFastCall, bool &NeedsPadding) const; 541 542 public: 543 544 virtual void computeInfo(CGFunctionInfo &FI) const; 545 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 546 CodeGenFunction &CGF) const; 547 548 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool m, bool w, 549 unsigned r) 550 : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p), 551 IsMMXDisabled(m), IsWin32FloatStructABI(w), 552 DefaultNumRegisterParameters(r) {} 553 }; 554 555 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo { 556 public: 557 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, 558 bool d, bool p, bool m, bool w, unsigned r) 559 :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, m, w, r)) {} 560 561 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 562 CodeGen::CodeGenModule &CGM) const; 563 564 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 565 // Darwin uses different dwarf register numbers for EH. 566 if (CGM.isTargetDarwin()) return 5; 567 568 return 4; 569 } 570 571 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 572 llvm::Value *Address) const; 573 574 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 575 StringRef Constraint, 576 llvm::Type* Ty) const { 577 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 578 } 579 580 }; 581 582 } 583 584 /// shouldReturnTypeInRegister - Determine if the given type should be 585 /// passed in a register (for the Darwin ABI). 586 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty, 587 ASTContext &Context, 588 unsigned callingConvention) { 589 uint64_t Size = Context.getTypeSize(Ty); 590 591 // Type must be register sized. 592 if (!isRegisterSize(Size)) 593 return false; 594 595 if (Ty->isVectorType()) { 596 // 64- and 128- bit vectors inside structures are not returned in 597 // registers. 598 if (Size == 64 || Size == 128) 599 return false; 600 601 return true; 602 } 603 604 // If this is a builtin, pointer, enum, complex type, member pointer, or 605 // member function pointer it is ok. 606 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() || 607 Ty->isAnyComplexType() || Ty->isEnumeralType() || 608 Ty->isBlockPointerType() || Ty->isMemberPointerType()) 609 return true; 610 611 // Arrays are treated like records. 612 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) 613 return shouldReturnTypeInRegister(AT->getElementType(), Context, 614 callingConvention); 615 616 // Otherwise, it must be a record type. 617 const RecordType *RT = Ty->getAs<RecordType>(); 618 if (!RT) return false; 619 620 // FIXME: Traverse bases here too. 621 622 // For thiscall conventions, structures will never be returned in 623 // a register. This is for compatibility with the MSVC ABI 624 if (callingConvention == llvm::CallingConv::X86_ThisCall && 625 RT->isStructureType()) { 626 return false; 627 } 628 629 // Structure types are passed in register if all fields would be 630 // passed in a register. 631 for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(), 632 e = RT->getDecl()->field_end(); i != e; ++i) { 633 const FieldDecl *FD = *i; 634 635 // Empty fields are ignored. 636 if (isEmptyField(Context, FD, true)) 637 continue; 638 639 // Check fields recursively. 640 if (!shouldReturnTypeInRegister(FD->getType(), Context, 641 callingConvention)) 642 return false; 643 } 644 return true; 645 } 646 647 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, 648 unsigned callingConvention) const { 649 if (RetTy->isVoidType()) 650 return ABIArgInfo::getIgnore(); 651 652 if (const VectorType *VT = RetTy->getAs<VectorType>()) { 653 // On Darwin, some vectors are returned in registers. 654 if (IsDarwinVectorABI) { 655 uint64_t Size = getContext().getTypeSize(RetTy); 656 657 // 128-bit vectors are a special case; they are returned in 658 // registers and we need to make sure to pick a type the LLVM 659 // backend will like. 660 if (Size == 128) 661 return ABIArgInfo::getDirect(llvm::VectorType::get( 662 llvm::Type::getInt64Ty(getVMContext()), 2)); 663 664 // Always return in register if it fits in a general purpose 665 // register, or if it is 64 bits and has a single element. 666 if ((Size == 8 || Size == 16 || Size == 32) || 667 (Size == 64 && VT->getNumElements() == 1)) 668 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 669 Size)); 670 671 return ABIArgInfo::getIndirect(0); 672 } 673 674 return ABIArgInfo::getDirect(); 675 } 676 677 if (isAggregateTypeForABI(RetTy)) { 678 if (const RecordType *RT = RetTy->getAs<RecordType>()) { 679 // Structures with either a non-trivial destructor or a non-trivial 680 // copy constructor are always indirect. 681 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 682 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 683 684 // Structures with flexible arrays are always indirect. 685 if (RT->getDecl()->hasFlexibleArrayMember()) 686 return ABIArgInfo::getIndirect(0); 687 } 688 689 // If specified, structs and unions are always indirect. 690 if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType()) 691 return ABIArgInfo::getIndirect(0); 692 693 // Small structures which are register sized are generally returned 694 // in a register. 695 if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, getContext(), 696 callingConvention)) { 697 uint64_t Size = getContext().getTypeSize(RetTy); 698 699 // As a special-case, if the struct is a "single-element" struct, and 700 // the field is of type "float" or "double", return it in a 701 // floating-point register. (MSVC does not apply this special case.) 702 // We apply a similar transformation for pointer types to improve the 703 // quality of the generated IR. 704 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) 705 if ((!IsWin32FloatStructABI && SeltTy->isRealFloatingType()) 706 || SeltTy->hasPointerRepresentation()) 707 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 708 709 // FIXME: We should be able to narrow this integer in cases with dead 710 // padding. 711 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size)); 712 } 713 714 return ABIArgInfo::getIndirect(0); 715 } 716 717 // Treat an enum type as its underlying type. 718 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 719 RetTy = EnumTy->getDecl()->getIntegerType(); 720 721 return (RetTy->isPromotableIntegerType() ? 722 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 723 } 724 725 static bool isSSEVectorType(ASTContext &Context, QualType Ty) { 726 return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128; 727 } 728 729 static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) { 730 const RecordType *RT = Ty->getAs<RecordType>(); 731 if (!RT) 732 return 0; 733 const RecordDecl *RD = RT->getDecl(); 734 735 // If this is a C++ record, check the bases first. 736 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 737 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 738 e = CXXRD->bases_end(); i != e; ++i) 739 if (!isRecordWithSSEVectorType(Context, i->getType())) 740 return false; 741 742 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 743 i != e; ++i) { 744 QualType FT = i->getType(); 745 746 if (isSSEVectorType(Context, FT)) 747 return true; 748 749 if (isRecordWithSSEVectorType(Context, FT)) 750 return true; 751 } 752 753 return false; 754 } 755 756 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty, 757 unsigned Align) const { 758 // Otherwise, if the alignment is less than or equal to the minimum ABI 759 // alignment, just use the default; the backend will handle this. 760 if (Align <= MinABIStackAlignInBytes) 761 return 0; // Use default alignment. 762 763 // On non-Darwin, the stack type alignment is always 4. 764 if (!IsDarwinVectorABI) { 765 // Set explicit alignment, since we may need to realign the top. 766 return MinABIStackAlignInBytes; 767 } 768 769 // Otherwise, if the type contains an SSE vector type, the alignment is 16. 770 if (Align >= 16 && (isSSEVectorType(getContext(), Ty) || 771 isRecordWithSSEVectorType(getContext(), Ty))) 772 return 16; 773 774 return MinABIStackAlignInBytes; 775 } 776 777 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal, 778 unsigned &FreeRegs) const { 779 if (!ByVal) { 780 if (FreeRegs) { 781 --FreeRegs; // Non byval indirects just use one pointer. 782 return ABIArgInfo::getIndirectInReg(0, false); 783 } 784 return ABIArgInfo::getIndirect(0, false); 785 } 786 787 // Compute the byval alignment. 788 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; 789 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign); 790 if (StackAlign == 0) 791 return ABIArgInfo::getIndirect(4); 792 793 // If the stack alignment is less than the type alignment, realign the 794 // argument. 795 if (StackAlign < TypeAlign) 796 return ABIArgInfo::getIndirect(StackAlign, /*ByVal=*/true, 797 /*Realign=*/true); 798 799 return ABIArgInfo::getIndirect(StackAlign); 800 } 801 802 X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const { 803 const Type *T = isSingleElementStruct(Ty, getContext()); 804 if (!T) 805 T = Ty.getTypePtr(); 806 807 if (const BuiltinType *BT = T->getAs<BuiltinType>()) { 808 BuiltinType::Kind K = BT->getKind(); 809 if (K == BuiltinType::Float || K == BuiltinType::Double) 810 return Float; 811 } 812 return Integer; 813 } 814 815 bool X86_32ABIInfo::shouldUseInReg(QualType Ty, unsigned &FreeRegs, 816 bool IsFastCall, bool &NeedsPadding) const { 817 NeedsPadding = false; 818 Class C = classify(Ty); 819 if (C == Float) 820 return false; 821 822 unsigned Size = getContext().getTypeSize(Ty); 823 unsigned SizeInRegs = (Size + 31) / 32; 824 825 if (SizeInRegs == 0) 826 return false; 827 828 if (SizeInRegs > FreeRegs) { 829 FreeRegs = 0; 830 return false; 831 } 832 833 FreeRegs -= SizeInRegs; 834 835 if (IsFastCall) { 836 if (Size > 32) 837 return false; 838 839 if (Ty->isIntegralOrEnumerationType()) 840 return true; 841 842 if (Ty->isPointerType()) 843 return true; 844 845 if (Ty->isReferenceType()) 846 return true; 847 848 if (FreeRegs) 849 NeedsPadding = true; 850 851 return false; 852 } 853 854 return true; 855 } 856 857 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, 858 unsigned &FreeRegs, 859 bool IsFastCall) const { 860 // FIXME: Set alignment on indirect arguments. 861 if (isAggregateTypeForABI(Ty)) { 862 // Structures with flexible arrays are always indirect. 863 if (const RecordType *RT = Ty->getAs<RecordType>()) { 864 // Structures with either a non-trivial destructor or a non-trivial 865 // copy constructor are always indirect. 866 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 867 return getIndirectResult(Ty, false, FreeRegs); 868 869 if (RT->getDecl()->hasFlexibleArrayMember()) 870 return getIndirectResult(Ty, true, FreeRegs); 871 } 872 873 // Ignore empty structs/unions. 874 if (isEmptyRecord(getContext(), Ty, true)) 875 return ABIArgInfo::getIgnore(); 876 877 llvm::LLVMContext &LLVMContext = getVMContext(); 878 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext); 879 bool NeedsPadding; 880 if (shouldUseInReg(Ty, FreeRegs, IsFastCall, NeedsPadding)) { 881 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32; 882 SmallVector<llvm::Type*, 3> Elements; 883 for (unsigned I = 0; I < SizeInRegs; ++I) 884 Elements.push_back(Int32); 885 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements); 886 return ABIArgInfo::getDirectInReg(Result); 887 } 888 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : 0; 889 890 // Expand small (<= 128-bit) record types when we know that the stack layout 891 // of those arguments will match the struct. This is important because the 892 // LLVM backend isn't smart enough to remove byval, which inhibits many 893 // optimizations. 894 if (getContext().getTypeSize(Ty) <= 4*32 && 895 canExpandIndirectArgument(Ty, getContext())) 896 return ABIArgInfo::getExpandWithPadding(IsFastCall, PaddingType); 897 898 return getIndirectResult(Ty, true, FreeRegs); 899 } 900 901 if (const VectorType *VT = Ty->getAs<VectorType>()) { 902 // On Darwin, some vectors are passed in memory, we handle this by passing 903 // it as an i8/i16/i32/i64. 904 if (IsDarwinVectorABI) { 905 uint64_t Size = getContext().getTypeSize(Ty); 906 if ((Size == 8 || Size == 16 || Size == 32) || 907 (Size == 64 && VT->getNumElements() == 1)) 908 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 909 Size)); 910 } 911 912 llvm::Type *IRType = CGT.ConvertType(Ty); 913 if (UseX86_MMXType(IRType)) { 914 if (IsMMXDisabled) 915 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 916 64)); 917 ABIArgInfo AAI = ABIArgInfo::getDirect(IRType); 918 AAI.setCoerceToType(llvm::Type::getX86_MMXTy(getVMContext())); 919 return AAI; 920 } 921 922 return ABIArgInfo::getDirect(); 923 } 924 925 926 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 927 Ty = EnumTy->getDecl()->getIntegerType(); 928 929 bool NeedsPadding; 930 bool InReg = shouldUseInReg(Ty, FreeRegs, IsFastCall, NeedsPadding); 931 932 if (Ty->isPromotableIntegerType()) { 933 if (InReg) 934 return ABIArgInfo::getExtendInReg(); 935 return ABIArgInfo::getExtend(); 936 } 937 if (InReg) 938 return ABIArgInfo::getDirectInReg(); 939 return ABIArgInfo::getDirect(); 940 } 941 942 void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const { 943 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), 944 FI.getCallingConvention()); 945 946 unsigned CC = FI.getCallingConvention(); 947 bool IsFastCall = CC == llvm::CallingConv::X86_FastCall; 948 unsigned FreeRegs; 949 if (IsFastCall) 950 FreeRegs = 2; 951 else if (FI.getHasRegParm()) 952 FreeRegs = FI.getRegParm(); 953 else 954 FreeRegs = DefaultNumRegisterParameters; 955 956 // If the return value is indirect, then the hidden argument is consuming one 957 // integer register. 958 if (FI.getReturnInfo().isIndirect() && FreeRegs) { 959 --FreeRegs; 960 ABIArgInfo &Old = FI.getReturnInfo(); 961 Old = ABIArgInfo::getIndirectInReg(Old.getIndirectAlign(), 962 Old.getIndirectByVal(), 963 Old.getIndirectRealign()); 964 } 965 966 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 967 it != ie; ++it) 968 it->info = classifyArgumentType(it->type, FreeRegs, IsFastCall); 969 } 970 971 llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 972 CodeGenFunction &CGF) const { 973 llvm::Type *BPP = CGF.Int8PtrPtrTy; 974 975 CGBuilderTy &Builder = CGF.Builder; 976 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 977 "ap"); 978 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 979 980 // Compute if the address needs to be aligned 981 unsigned Align = CGF.getContext().getTypeAlignInChars(Ty).getQuantity(); 982 Align = getTypeStackAlignInBytes(Ty, Align); 983 Align = std::max(Align, 4U); 984 if (Align > 4) { 985 // addr = (addr + align - 1) & -align; 986 llvm::Value *Offset = 987 llvm::ConstantInt::get(CGF.Int32Ty, Align - 1); 988 Addr = CGF.Builder.CreateGEP(Addr, Offset); 989 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(Addr, 990 CGF.Int32Ty); 991 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -Align); 992 Addr = CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), 993 Addr->getType(), 994 "ap.cur.aligned"); 995 } 996 997 llvm::Type *PTy = 998 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 999 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 1000 1001 uint64_t Offset = 1002 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, Align); 1003 llvm::Value *NextAddr = 1004 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 1005 "ap.next"); 1006 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 1007 1008 return AddrTyped; 1009 } 1010 1011 void X86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D, 1012 llvm::GlobalValue *GV, 1013 CodeGen::CodeGenModule &CGM) const { 1014 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 1015 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { 1016 // Get the LLVM function. 1017 llvm::Function *Fn = cast<llvm::Function>(GV); 1018 1019 // Now add the 'alignstack' attribute with a value of 16. 1020 llvm::AttrBuilder B; 1021 B.addStackAlignmentAttr(16); 1022 Fn->addAttribute(llvm::AttributeSet::FunctionIndex, 1023 llvm::Attribute::get(CGM.getLLVMContext(), B)); 1024 } 1025 } 1026 } 1027 1028 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable( 1029 CodeGen::CodeGenFunction &CGF, 1030 llvm::Value *Address) const { 1031 CodeGen::CGBuilderTy &Builder = CGF.Builder; 1032 1033 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 1034 1035 // 0-7 are the eight integer registers; the order is different 1036 // on Darwin (for EH), but the range is the same. 1037 // 8 is %eip. 1038 AssignToArrayRange(Builder, Address, Four8, 0, 8); 1039 1040 if (CGF.CGM.isTargetDarwin()) { 1041 // 12-16 are st(0..4). Not sure why we stop at 4. 1042 // These have size 16, which is sizeof(long double) on 1043 // platforms with 8-byte alignment for that type. 1044 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16); 1045 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16); 1046 1047 } else { 1048 // 9 is %eflags, which doesn't get a size on Darwin for some 1049 // reason. 1050 Builder.CreateStore(Four8, Builder.CreateConstInBoundsGEP1_32(Address, 9)); 1051 1052 // 11-16 are st(0..5). Not sure why we stop at 5. 1053 // These have size 12, which is sizeof(long double) on 1054 // platforms with 4-byte alignment for that type. 1055 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12); 1056 AssignToArrayRange(Builder, Address, Twelve8, 11, 16); 1057 } 1058 1059 return false; 1060 } 1061 1062 //===----------------------------------------------------------------------===// 1063 // X86-64 ABI Implementation 1064 //===----------------------------------------------------------------------===// 1065 1066 1067 namespace { 1068 /// X86_64ABIInfo - The X86_64 ABI information. 1069 class X86_64ABIInfo : public ABIInfo { 1070 enum Class { 1071 Integer = 0, 1072 SSE, 1073 SSEUp, 1074 X87, 1075 X87Up, 1076 ComplexX87, 1077 NoClass, 1078 Memory 1079 }; 1080 1081 /// merge - Implement the X86_64 ABI merging algorithm. 1082 /// 1083 /// Merge an accumulating classification \arg Accum with a field 1084 /// classification \arg Field. 1085 /// 1086 /// \param Accum - The accumulating classification. This should 1087 /// always be either NoClass or the result of a previous merge 1088 /// call. In addition, this should never be Memory (the caller 1089 /// should just return Memory for the aggregate). 1090 static Class merge(Class Accum, Class Field); 1091 1092 /// postMerge - Implement the X86_64 ABI post merging algorithm. 1093 /// 1094 /// Post merger cleanup, reduces a malformed Hi and Lo pair to 1095 /// final MEMORY or SSE classes when necessary. 1096 /// 1097 /// \param AggregateSize - The size of the current aggregate in 1098 /// the classification process. 1099 /// 1100 /// \param Lo - The classification for the parts of the type 1101 /// residing in the low word of the containing object. 1102 /// 1103 /// \param Hi - The classification for the parts of the type 1104 /// residing in the higher words of the containing object. 1105 /// 1106 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const; 1107 1108 /// classify - Determine the x86_64 register classes in which the 1109 /// given type T should be passed. 1110 /// 1111 /// \param Lo - The classification for the parts of the type 1112 /// residing in the low word of the containing object. 1113 /// 1114 /// \param Hi - The classification for the parts of the type 1115 /// residing in the high word of the containing object. 1116 /// 1117 /// \param OffsetBase - The bit offset of this type in the 1118 /// containing object. Some parameters are classified different 1119 /// depending on whether they straddle an eightbyte boundary. 1120 /// 1121 /// If a word is unused its result will be NoClass; if a type should 1122 /// be passed in Memory then at least the classification of \arg Lo 1123 /// will be Memory. 1124 /// 1125 /// The \arg Lo class will be NoClass iff the argument is ignored. 1126 /// 1127 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will 1128 /// also be ComplexX87. 1129 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi) const; 1130 1131 llvm::Type *GetByteVectorType(QualType Ty) const; 1132 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType, 1133 unsigned IROffset, QualType SourceTy, 1134 unsigned SourceOffset) const; 1135 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType, 1136 unsigned IROffset, QualType SourceTy, 1137 unsigned SourceOffset) const; 1138 1139 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 1140 /// such that the argument will be returned in memory. 1141 ABIArgInfo getIndirectReturnResult(QualType Ty) const; 1142 1143 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 1144 /// such that the argument will be passed in memory. 1145 /// 1146 /// \param freeIntRegs - The number of free integer registers remaining 1147 /// available. 1148 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const; 1149 1150 ABIArgInfo classifyReturnType(QualType RetTy) const; 1151 1152 ABIArgInfo classifyArgumentType(QualType Ty, 1153 unsigned freeIntRegs, 1154 unsigned &neededInt, 1155 unsigned &neededSSE) const; 1156 1157 bool IsIllegalVectorType(QualType Ty) const; 1158 1159 /// The 0.98 ABI revision clarified a lot of ambiguities, 1160 /// unfortunately in ways that were not always consistent with 1161 /// certain previous compilers. In particular, platforms which 1162 /// required strict binary compatibility with older versions of GCC 1163 /// may need to exempt themselves. 1164 bool honorsRevision0_98() const { 1165 return !getContext().getTargetInfo().getTriple().isOSDarwin(); 1166 } 1167 1168 bool HasAVX; 1169 // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on 1170 // 64-bit hardware. 1171 bool Has64BitPointers; 1172 1173 public: 1174 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool hasavx) : 1175 ABIInfo(CGT), HasAVX(hasavx), 1176 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) { 1177 } 1178 1179 bool isPassedUsingAVXType(QualType type) const { 1180 unsigned neededInt, neededSSE; 1181 // The freeIntRegs argument doesn't matter here. 1182 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE); 1183 if (info.isDirect()) { 1184 llvm::Type *ty = info.getCoerceToType(); 1185 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty)) 1186 return (vectorTy->getBitWidth() > 128); 1187 } 1188 return false; 1189 } 1190 1191 virtual void computeInfo(CGFunctionInfo &FI) const; 1192 1193 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1194 CodeGenFunction &CGF) const; 1195 }; 1196 1197 /// WinX86_64ABIInfo - The Windows X86_64 ABI information. 1198 class WinX86_64ABIInfo : public ABIInfo { 1199 1200 ABIArgInfo classify(QualType Ty) const; 1201 1202 public: 1203 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 1204 1205 virtual void computeInfo(CGFunctionInfo &FI) const; 1206 1207 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1208 CodeGenFunction &CGF) const; 1209 }; 1210 1211 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo { 1212 public: 1213 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) 1214 : TargetCodeGenInfo(new X86_64ABIInfo(CGT, HasAVX)) {} 1215 1216 const X86_64ABIInfo &getABIInfo() const { 1217 return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo()); 1218 } 1219 1220 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 1221 return 7; 1222 } 1223 1224 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 1225 llvm::Value *Address) const { 1226 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 1227 1228 // 0-15 are the 16 integer registers. 1229 // 16 is %rip. 1230 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 1231 return false; 1232 } 1233 1234 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 1235 StringRef Constraint, 1236 llvm::Type* Ty) const { 1237 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 1238 } 1239 1240 bool isNoProtoCallVariadic(const CallArgList &args, 1241 const FunctionNoProtoType *fnType) const { 1242 // The default CC on x86-64 sets %al to the number of SSA 1243 // registers used, and GCC sets this when calling an unprototyped 1244 // function, so we override the default behavior. However, don't do 1245 // that when AVX types are involved: the ABI explicitly states it is 1246 // undefined, and it doesn't work in practice because of how the ABI 1247 // defines varargs anyway. 1248 if (fnType->getCallConv() == CC_Default || fnType->getCallConv() == CC_C) { 1249 bool HasAVXType = false; 1250 for (CallArgList::const_iterator 1251 it = args.begin(), ie = args.end(); it != ie; ++it) { 1252 if (getABIInfo().isPassedUsingAVXType(it->Ty)) { 1253 HasAVXType = true; 1254 break; 1255 } 1256 } 1257 1258 if (!HasAVXType) 1259 return true; 1260 } 1261 1262 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType); 1263 } 1264 1265 }; 1266 1267 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo { 1268 public: 1269 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 1270 : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {} 1271 1272 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 1273 return 7; 1274 } 1275 1276 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 1277 llvm::Value *Address) const { 1278 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 1279 1280 // 0-15 are the 16 integer registers. 1281 // 16 is %rip. 1282 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 1283 return false; 1284 } 1285 }; 1286 1287 } 1288 1289 void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo, 1290 Class &Hi) const { 1291 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done: 1292 // 1293 // (a) If one of the classes is Memory, the whole argument is passed in 1294 // memory. 1295 // 1296 // (b) If X87UP is not preceded by X87, the whole argument is passed in 1297 // memory. 1298 // 1299 // (c) If the size of the aggregate exceeds two eightbytes and the first 1300 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole 1301 // argument is passed in memory. NOTE: This is necessary to keep the 1302 // ABI working for processors that don't support the __m256 type. 1303 // 1304 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE. 1305 // 1306 // Some of these are enforced by the merging logic. Others can arise 1307 // only with unions; for example: 1308 // union { _Complex double; unsigned; } 1309 // 1310 // Note that clauses (b) and (c) were added in 0.98. 1311 // 1312 if (Hi == Memory) 1313 Lo = Memory; 1314 if (Hi == X87Up && Lo != X87 && honorsRevision0_98()) 1315 Lo = Memory; 1316 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp)) 1317 Lo = Memory; 1318 if (Hi == SSEUp && Lo != SSE) 1319 Hi = SSE; 1320 } 1321 1322 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) { 1323 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is 1324 // classified recursively so that always two fields are 1325 // considered. The resulting class is calculated according to 1326 // the classes of the fields in the eightbyte: 1327 // 1328 // (a) If both classes are equal, this is the resulting class. 1329 // 1330 // (b) If one of the classes is NO_CLASS, the resulting class is 1331 // the other class. 1332 // 1333 // (c) If one of the classes is MEMORY, the result is the MEMORY 1334 // class. 1335 // 1336 // (d) If one of the classes is INTEGER, the result is the 1337 // INTEGER. 1338 // 1339 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class, 1340 // MEMORY is used as class. 1341 // 1342 // (f) Otherwise class SSE is used. 1343 1344 // Accum should never be memory (we should have returned) or 1345 // ComplexX87 (because this cannot be passed in a structure). 1346 assert((Accum != Memory && Accum != ComplexX87) && 1347 "Invalid accumulated classification during merge."); 1348 if (Accum == Field || Field == NoClass) 1349 return Accum; 1350 if (Field == Memory) 1351 return Memory; 1352 if (Accum == NoClass) 1353 return Field; 1354 if (Accum == Integer || Field == Integer) 1355 return Integer; 1356 if (Field == X87 || Field == X87Up || Field == ComplexX87 || 1357 Accum == X87 || Accum == X87Up) 1358 return Memory; 1359 return SSE; 1360 } 1361 1362 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, 1363 Class &Lo, Class &Hi) const { 1364 // FIXME: This code can be simplified by introducing a simple value class for 1365 // Class pairs with appropriate constructor methods for the various 1366 // situations. 1367 1368 // FIXME: Some of the split computations are wrong; unaligned vectors 1369 // shouldn't be passed in registers for example, so there is no chance they 1370 // can straddle an eightbyte. Verify & simplify. 1371 1372 Lo = Hi = NoClass; 1373 1374 Class &Current = OffsetBase < 64 ? Lo : Hi; 1375 Current = Memory; 1376 1377 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 1378 BuiltinType::Kind k = BT->getKind(); 1379 1380 if (k == BuiltinType::Void) { 1381 Current = NoClass; 1382 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) { 1383 Lo = Integer; 1384 Hi = Integer; 1385 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) { 1386 Current = Integer; 1387 } else if ((k == BuiltinType::Float || k == BuiltinType::Double) || 1388 (k == BuiltinType::LongDouble && 1389 getContext().getTargetInfo().getTriple().getOS() == 1390 llvm::Triple::NaCl)) { 1391 Current = SSE; 1392 } else if (k == BuiltinType::LongDouble) { 1393 Lo = X87; 1394 Hi = X87Up; 1395 } 1396 // FIXME: _Decimal32 and _Decimal64 are SSE. 1397 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). 1398 return; 1399 } 1400 1401 if (const EnumType *ET = Ty->getAs<EnumType>()) { 1402 // Classify the underlying integer type. 1403 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi); 1404 return; 1405 } 1406 1407 if (Ty->hasPointerRepresentation()) { 1408 Current = Integer; 1409 return; 1410 } 1411 1412 if (Ty->isMemberPointerType()) { 1413 if (Ty->isMemberFunctionPointerType() && Has64BitPointers) 1414 Lo = Hi = Integer; 1415 else 1416 Current = Integer; 1417 return; 1418 } 1419 1420 if (const VectorType *VT = Ty->getAs<VectorType>()) { 1421 uint64_t Size = getContext().getTypeSize(VT); 1422 if (Size == 32) { 1423 // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x 1424 // float> as integer. 1425 Current = Integer; 1426 1427 // If this type crosses an eightbyte boundary, it should be 1428 // split. 1429 uint64_t EB_Real = (OffsetBase) / 64; 1430 uint64_t EB_Imag = (OffsetBase + Size - 1) / 64; 1431 if (EB_Real != EB_Imag) 1432 Hi = Lo; 1433 } else if (Size == 64) { 1434 // gcc passes <1 x double> in memory. :( 1435 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) 1436 return; 1437 1438 // gcc passes <1 x long long> as INTEGER. 1439 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong) || 1440 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULongLong) || 1441 VT->getElementType()->isSpecificBuiltinType(BuiltinType::Long) || 1442 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULong)) 1443 Current = Integer; 1444 else 1445 Current = SSE; 1446 1447 // If this type crosses an eightbyte boundary, it should be 1448 // split. 1449 if (OffsetBase && OffsetBase != 64) 1450 Hi = Lo; 1451 } else if (Size == 128 || (HasAVX && Size == 256)) { 1452 // Arguments of 256-bits are split into four eightbyte chunks. The 1453 // least significant one belongs to class SSE and all the others to class 1454 // SSEUP. The original Lo and Hi design considers that types can't be 1455 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense. 1456 // This design isn't correct for 256-bits, but since there're no cases 1457 // where the upper parts would need to be inspected, avoid adding 1458 // complexity and just consider Hi to match the 64-256 part. 1459 Lo = SSE; 1460 Hi = SSEUp; 1461 } 1462 return; 1463 } 1464 1465 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 1466 QualType ET = getContext().getCanonicalType(CT->getElementType()); 1467 1468 uint64_t Size = getContext().getTypeSize(Ty); 1469 if (ET->isIntegralOrEnumerationType()) { 1470 if (Size <= 64) 1471 Current = Integer; 1472 else if (Size <= 128) 1473 Lo = Hi = Integer; 1474 } else if (ET == getContext().FloatTy) 1475 Current = SSE; 1476 else if (ET == getContext().DoubleTy || 1477 (ET == getContext().LongDoubleTy && 1478 getContext().getTargetInfo().getTriple().getOS() == 1479 llvm::Triple::NaCl)) 1480 Lo = Hi = SSE; 1481 else if (ET == getContext().LongDoubleTy) 1482 Current = ComplexX87; 1483 1484 // If this complex type crosses an eightbyte boundary then it 1485 // should be split. 1486 uint64_t EB_Real = (OffsetBase) / 64; 1487 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64; 1488 if (Hi == NoClass && EB_Real != EB_Imag) 1489 Hi = Lo; 1490 1491 return; 1492 } 1493 1494 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 1495 // Arrays are treated like structures. 1496 1497 uint64_t Size = getContext().getTypeSize(Ty); 1498 1499 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 1500 // than four eightbytes, ..., it has class MEMORY. 1501 if (Size > 256) 1502 return; 1503 1504 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned 1505 // fields, it has class MEMORY. 1506 // 1507 // Only need to check alignment of array base. 1508 if (OffsetBase % getContext().getTypeAlign(AT->getElementType())) 1509 return; 1510 1511 // Otherwise implement simplified merge. We could be smarter about 1512 // this, but it isn't worth it and would be harder to verify. 1513 Current = NoClass; 1514 uint64_t EltSize = getContext().getTypeSize(AT->getElementType()); 1515 uint64_t ArraySize = AT->getSize().getZExtValue(); 1516 1517 // The only case a 256-bit wide vector could be used is when the array 1518 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 1519 // to work for sizes wider than 128, early check and fallback to memory. 1520 if (Size > 128 && EltSize != 256) 1521 return; 1522 1523 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) { 1524 Class FieldLo, FieldHi; 1525 classify(AT->getElementType(), Offset, FieldLo, FieldHi); 1526 Lo = merge(Lo, FieldLo); 1527 Hi = merge(Hi, FieldHi); 1528 if (Lo == Memory || Hi == Memory) 1529 break; 1530 } 1531 1532 postMerge(Size, Lo, Hi); 1533 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification."); 1534 return; 1535 } 1536 1537 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1538 uint64_t Size = getContext().getTypeSize(Ty); 1539 1540 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 1541 // than four eightbytes, ..., it has class MEMORY. 1542 if (Size > 256) 1543 return; 1544 1545 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial 1546 // copy constructor or a non-trivial destructor, it is passed by invisible 1547 // reference. 1548 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 1549 return; 1550 1551 const RecordDecl *RD = RT->getDecl(); 1552 1553 // Assume variable sized types are passed in memory. 1554 if (RD->hasFlexibleArrayMember()) 1555 return; 1556 1557 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 1558 1559 // Reset Lo class, this will be recomputed. 1560 Current = NoClass; 1561 1562 // If this is a C++ record, classify the bases first. 1563 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 1564 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 1565 e = CXXRD->bases_end(); i != e; ++i) { 1566 assert(!i->isVirtual() && !i->getType()->isDependentType() && 1567 "Unexpected base class!"); 1568 const CXXRecordDecl *Base = 1569 cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); 1570 1571 // Classify this field. 1572 // 1573 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a 1574 // single eightbyte, each is classified separately. Each eightbyte gets 1575 // initialized to class NO_CLASS. 1576 Class FieldLo, FieldHi; 1577 uint64_t Offset = 1578 OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base)); 1579 classify(i->getType(), Offset, FieldLo, FieldHi); 1580 Lo = merge(Lo, FieldLo); 1581 Hi = merge(Hi, FieldHi); 1582 if (Lo == Memory || Hi == Memory) 1583 break; 1584 } 1585 } 1586 1587 // Classify the fields one at a time, merging the results. 1588 unsigned idx = 0; 1589 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1590 i != e; ++i, ++idx) { 1591 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 1592 bool BitField = i->isBitField(); 1593 1594 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than 1595 // four eightbytes, or it contains unaligned fields, it has class MEMORY. 1596 // 1597 // The only case a 256-bit wide vector could be used is when the struct 1598 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 1599 // to work for sizes wider than 128, early check and fallback to memory. 1600 // 1601 if (Size > 128 && getContext().getTypeSize(i->getType()) != 256) { 1602 Lo = Memory; 1603 return; 1604 } 1605 // Note, skip this test for bit-fields, see below. 1606 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) { 1607 Lo = Memory; 1608 return; 1609 } 1610 1611 // Classify this field. 1612 // 1613 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate 1614 // exceeds a single eightbyte, each is classified 1615 // separately. Each eightbyte gets initialized to class 1616 // NO_CLASS. 1617 Class FieldLo, FieldHi; 1618 1619 // Bit-fields require special handling, they do not force the 1620 // structure to be passed in memory even if unaligned, and 1621 // therefore they can straddle an eightbyte. 1622 if (BitField) { 1623 // Ignore padding bit-fields. 1624 if (i->isUnnamedBitfield()) 1625 continue; 1626 1627 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 1628 uint64_t Size = i->getBitWidthValue(getContext()); 1629 1630 uint64_t EB_Lo = Offset / 64; 1631 uint64_t EB_Hi = (Offset + Size - 1) / 64; 1632 FieldLo = FieldHi = NoClass; 1633 if (EB_Lo) { 1634 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes."); 1635 FieldLo = NoClass; 1636 FieldHi = Integer; 1637 } else { 1638 FieldLo = Integer; 1639 FieldHi = EB_Hi ? Integer : NoClass; 1640 } 1641 } else 1642 classify(i->getType(), Offset, FieldLo, FieldHi); 1643 Lo = merge(Lo, FieldLo); 1644 Hi = merge(Hi, FieldHi); 1645 if (Lo == Memory || Hi == Memory) 1646 break; 1647 } 1648 1649 postMerge(Size, Lo, Hi); 1650 } 1651 } 1652 1653 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const { 1654 // If this is a scalar LLVM value then assume LLVM will pass it in the right 1655 // place naturally. 1656 if (!isAggregateTypeForABI(Ty)) { 1657 // Treat an enum type as its underlying type. 1658 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1659 Ty = EnumTy->getDecl()->getIntegerType(); 1660 1661 return (Ty->isPromotableIntegerType() ? 1662 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 1663 } 1664 1665 return ABIArgInfo::getIndirect(0); 1666 } 1667 1668 bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const { 1669 if (const VectorType *VecTy = Ty->getAs<VectorType>()) { 1670 uint64_t Size = getContext().getTypeSize(VecTy); 1671 unsigned LargestVector = HasAVX ? 256 : 128; 1672 if (Size <= 64 || Size > LargestVector) 1673 return true; 1674 } 1675 1676 return false; 1677 } 1678 1679 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty, 1680 unsigned freeIntRegs) const { 1681 // If this is a scalar LLVM value then assume LLVM will pass it in the right 1682 // place naturally. 1683 // 1684 // This assumption is optimistic, as there could be free registers available 1685 // when we need to pass this argument in memory, and LLVM could try to pass 1686 // the argument in the free register. This does not seem to happen currently, 1687 // but this code would be much safer if we could mark the argument with 1688 // 'onstack'. See PR12193. 1689 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) { 1690 // Treat an enum type as its underlying type. 1691 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1692 Ty = EnumTy->getDecl()->getIntegerType(); 1693 1694 return (Ty->isPromotableIntegerType() ? 1695 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 1696 } 1697 1698 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 1699 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 1700 1701 // Compute the byval alignment. We specify the alignment of the byval in all 1702 // cases so that the mid-level optimizer knows the alignment of the byval. 1703 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U); 1704 1705 // Attempt to avoid passing indirect results using byval when possible. This 1706 // is important for good codegen. 1707 // 1708 // We do this by coercing the value into a scalar type which the backend can 1709 // handle naturally (i.e., without using byval). 1710 // 1711 // For simplicity, we currently only do this when we have exhausted all of the 1712 // free integer registers. Doing this when there are free integer registers 1713 // would require more care, as we would have to ensure that the coerced value 1714 // did not claim the unused register. That would require either reording the 1715 // arguments to the function (so that any subsequent inreg values came first), 1716 // or only doing this optimization when there were no following arguments that 1717 // might be inreg. 1718 // 1719 // We currently expect it to be rare (particularly in well written code) for 1720 // arguments to be passed on the stack when there are still free integer 1721 // registers available (this would typically imply large structs being passed 1722 // by value), so this seems like a fair tradeoff for now. 1723 // 1724 // We can revisit this if the backend grows support for 'onstack' parameter 1725 // attributes. See PR12193. 1726 if (freeIntRegs == 0) { 1727 uint64_t Size = getContext().getTypeSize(Ty); 1728 1729 // If this type fits in an eightbyte, coerce it into the matching integral 1730 // type, which will end up on the stack (with alignment 8). 1731 if (Align == 8 && Size <= 64) 1732 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 1733 Size)); 1734 } 1735 1736 return ABIArgInfo::getIndirect(Align); 1737 } 1738 1739 /// GetByteVectorType - The ABI specifies that a value should be passed in an 1740 /// full vector XMM/YMM register. Pick an LLVM IR type that will be passed as a 1741 /// vector register. 1742 llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const { 1743 llvm::Type *IRType = CGT.ConvertType(Ty); 1744 1745 // Wrapper structs that just contain vectors are passed just like vectors, 1746 // strip them off if present. 1747 llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType); 1748 while (STy && STy->getNumElements() == 1) { 1749 IRType = STy->getElementType(0); 1750 STy = dyn_cast<llvm::StructType>(IRType); 1751 } 1752 1753 // If the preferred type is a 16-byte vector, prefer to pass it. 1754 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){ 1755 llvm::Type *EltTy = VT->getElementType(); 1756 unsigned BitWidth = VT->getBitWidth(); 1757 if ((BitWidth >= 128 && BitWidth <= 256) && 1758 (EltTy->isFloatTy() || EltTy->isDoubleTy() || 1759 EltTy->isIntegerTy(8) || EltTy->isIntegerTy(16) || 1760 EltTy->isIntegerTy(32) || EltTy->isIntegerTy(64) || 1761 EltTy->isIntegerTy(128))) 1762 return VT; 1763 } 1764 1765 return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), 2); 1766 } 1767 1768 /// BitsContainNoUserData - Return true if the specified [start,end) bit range 1769 /// is known to either be off the end of the specified type or being in 1770 /// alignment padding. The user type specified is known to be at most 128 bits 1771 /// in size, and have passed through X86_64ABIInfo::classify with a successful 1772 /// classification that put one of the two halves in the INTEGER class. 1773 /// 1774 /// It is conservatively correct to return false. 1775 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, 1776 unsigned EndBit, ASTContext &Context) { 1777 // If the bytes being queried are off the end of the type, there is no user 1778 // data hiding here. This handles analysis of builtins, vectors and other 1779 // types that don't contain interesting padding. 1780 unsigned TySize = (unsigned)Context.getTypeSize(Ty); 1781 if (TySize <= StartBit) 1782 return true; 1783 1784 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 1785 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType()); 1786 unsigned NumElts = (unsigned)AT->getSize().getZExtValue(); 1787 1788 // Check each element to see if the element overlaps with the queried range. 1789 for (unsigned i = 0; i != NumElts; ++i) { 1790 // If the element is after the span we care about, then we're done.. 1791 unsigned EltOffset = i*EltSize; 1792 if (EltOffset >= EndBit) break; 1793 1794 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0; 1795 if (!BitsContainNoUserData(AT->getElementType(), EltStart, 1796 EndBit-EltOffset, Context)) 1797 return false; 1798 } 1799 // If it overlaps no elements, then it is safe to process as padding. 1800 return true; 1801 } 1802 1803 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1804 const RecordDecl *RD = RT->getDecl(); 1805 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 1806 1807 // If this is a C++ record, check the bases first. 1808 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 1809 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 1810 e = CXXRD->bases_end(); i != e; ++i) { 1811 assert(!i->isVirtual() && !i->getType()->isDependentType() && 1812 "Unexpected base class!"); 1813 const CXXRecordDecl *Base = 1814 cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); 1815 1816 // If the base is after the span we care about, ignore it. 1817 unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base)); 1818 if (BaseOffset >= EndBit) continue; 1819 1820 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0; 1821 if (!BitsContainNoUserData(i->getType(), BaseStart, 1822 EndBit-BaseOffset, Context)) 1823 return false; 1824 } 1825 } 1826 1827 // Verify that no field has data that overlaps the region of interest. Yes 1828 // this could be sped up a lot by being smarter about queried fields, 1829 // however we're only looking at structs up to 16 bytes, so we don't care 1830 // much. 1831 unsigned idx = 0; 1832 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1833 i != e; ++i, ++idx) { 1834 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx); 1835 1836 // If we found a field after the region we care about, then we're done. 1837 if (FieldOffset >= EndBit) break; 1838 1839 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0; 1840 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset, 1841 Context)) 1842 return false; 1843 } 1844 1845 // If nothing in this record overlapped the area of interest, then we're 1846 // clean. 1847 return true; 1848 } 1849 1850 return false; 1851 } 1852 1853 /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a 1854 /// float member at the specified offset. For example, {int,{float}} has a 1855 /// float at offset 4. It is conservatively correct for this routine to return 1856 /// false. 1857 static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset, 1858 const llvm::DataLayout &TD) { 1859 // Base case if we find a float. 1860 if (IROffset == 0 && IRType->isFloatTy()) 1861 return true; 1862 1863 // If this is a struct, recurse into the field at the specified offset. 1864 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 1865 const llvm::StructLayout *SL = TD.getStructLayout(STy); 1866 unsigned Elt = SL->getElementContainingOffset(IROffset); 1867 IROffset -= SL->getElementOffset(Elt); 1868 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD); 1869 } 1870 1871 // If this is an array, recurse into the field at the specified offset. 1872 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 1873 llvm::Type *EltTy = ATy->getElementType(); 1874 unsigned EltSize = TD.getTypeAllocSize(EltTy); 1875 IROffset -= IROffset/EltSize*EltSize; 1876 return ContainsFloatAtOffset(EltTy, IROffset, TD); 1877 } 1878 1879 return false; 1880 } 1881 1882 1883 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the 1884 /// low 8 bytes of an XMM register, corresponding to the SSE class. 1885 llvm::Type *X86_64ABIInfo:: 1886 GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset, 1887 QualType SourceTy, unsigned SourceOffset) const { 1888 // The only three choices we have are either double, <2 x float>, or float. We 1889 // pass as float if the last 4 bytes is just padding. This happens for 1890 // structs that contain 3 floats. 1891 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32, 1892 SourceOffset*8+64, getContext())) 1893 return llvm::Type::getFloatTy(getVMContext()); 1894 1895 // We want to pass as <2 x float> if the LLVM IR type contains a float at 1896 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the 1897 // case. 1898 if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) && 1899 ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout())) 1900 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2); 1901 1902 return llvm::Type::getDoubleTy(getVMContext()); 1903 } 1904 1905 1906 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in 1907 /// an 8-byte GPR. This means that we either have a scalar or we are talking 1908 /// about the high or low part of an up-to-16-byte struct. This routine picks 1909 /// the best LLVM IR type to represent this, which may be i64 or may be anything 1910 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*, 1911 /// etc). 1912 /// 1913 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for 1914 /// the source type. IROffset is an offset in bytes into the LLVM IR type that 1915 /// the 8-byte value references. PrefType may be null. 1916 /// 1917 /// SourceTy is the source level type for the entire argument. SourceOffset is 1918 /// an offset into this that we're processing (which is always either 0 or 8). 1919 /// 1920 llvm::Type *X86_64ABIInfo:: 1921 GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset, 1922 QualType SourceTy, unsigned SourceOffset) const { 1923 // If we're dealing with an un-offset LLVM IR type, then it means that we're 1924 // returning an 8-byte unit starting with it. See if we can safely use it. 1925 if (IROffset == 0) { 1926 // Pointers and int64's always fill the 8-byte unit. 1927 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) || 1928 IRType->isIntegerTy(64)) 1929 return IRType; 1930 1931 // If we have a 1/2/4-byte integer, we can use it only if the rest of the 1932 // goodness in the source type is just tail padding. This is allowed to 1933 // kick in for struct {double,int} on the int, but not on 1934 // struct{double,int,int} because we wouldn't return the second int. We 1935 // have to do this analysis on the source type because we can't depend on 1936 // unions being lowered a specific way etc. 1937 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) || 1938 IRType->isIntegerTy(32) || 1939 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) { 1940 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 : 1941 cast<llvm::IntegerType>(IRType)->getBitWidth(); 1942 1943 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth, 1944 SourceOffset*8+64, getContext())) 1945 return IRType; 1946 } 1947 } 1948 1949 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 1950 // If this is a struct, recurse into the field at the specified offset. 1951 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy); 1952 if (IROffset < SL->getSizeInBytes()) { 1953 unsigned FieldIdx = SL->getElementContainingOffset(IROffset); 1954 IROffset -= SL->getElementOffset(FieldIdx); 1955 1956 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset, 1957 SourceTy, SourceOffset); 1958 } 1959 } 1960 1961 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 1962 llvm::Type *EltTy = ATy->getElementType(); 1963 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy); 1964 unsigned EltOffset = IROffset/EltSize*EltSize; 1965 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy, 1966 SourceOffset); 1967 } 1968 1969 // Okay, we don't have any better idea of what to pass, so we pass this in an 1970 // integer register that isn't too big to fit the rest of the struct. 1971 unsigned TySizeInBytes = 1972 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity(); 1973 1974 assert(TySizeInBytes != SourceOffset && "Empty field?"); 1975 1976 // It is always safe to classify this as an integer type up to i64 that 1977 // isn't larger than the structure. 1978 return llvm::IntegerType::get(getVMContext(), 1979 std::min(TySizeInBytes-SourceOffset, 8U)*8); 1980 } 1981 1982 1983 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally 1984 /// be used as elements of a two register pair to pass or return, return a 1985 /// first class aggregate to represent them. For example, if the low part of 1986 /// a by-value argument should be passed as i32* and the high part as float, 1987 /// return {i32*, float}. 1988 static llvm::Type * 1989 GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, 1990 const llvm::DataLayout &TD) { 1991 // In order to correctly satisfy the ABI, we need to the high part to start 1992 // at offset 8. If the high and low parts we inferred are both 4-byte types 1993 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have 1994 // the second element at offset 8. Check for this: 1995 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo); 1996 unsigned HiAlign = TD.getABITypeAlignment(Hi); 1997 unsigned HiStart = llvm::DataLayout::RoundUpAlignment(LoSize, HiAlign); 1998 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!"); 1999 2000 // To handle this, we have to increase the size of the low part so that the 2001 // second element will start at an 8 byte offset. We can't increase the size 2002 // of the second element because it might make us access off the end of the 2003 // struct. 2004 if (HiStart != 8) { 2005 // There are only two sorts of types the ABI generation code can produce for 2006 // the low part of a pair that aren't 8 bytes in size: float or i8/i16/i32. 2007 // Promote these to a larger type. 2008 if (Lo->isFloatTy()) 2009 Lo = llvm::Type::getDoubleTy(Lo->getContext()); 2010 else { 2011 assert(Lo->isIntegerTy() && "Invalid/unknown lo type"); 2012 Lo = llvm::Type::getInt64Ty(Lo->getContext()); 2013 } 2014 } 2015 2016 llvm::StructType *Result = llvm::StructType::get(Lo, Hi, NULL); 2017 2018 2019 // Verify that the second element is at an 8-byte offset. 2020 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 && 2021 "Invalid x86-64 argument pair!"); 2022 return Result; 2023 } 2024 2025 ABIArgInfo X86_64ABIInfo:: 2026 classifyReturnType(QualType RetTy) const { 2027 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the 2028 // classification algorithm. 2029 X86_64ABIInfo::Class Lo, Hi; 2030 classify(RetTy, 0, Lo, Hi); 2031 2032 // Check some invariants. 2033 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 2034 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 2035 2036 llvm::Type *ResType = 0; 2037 switch (Lo) { 2038 case NoClass: 2039 if (Hi == NoClass) 2040 return ABIArgInfo::getIgnore(); 2041 // If the low part is just padding, it takes no register, leave ResType 2042 // null. 2043 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 2044 "Unknown missing lo part"); 2045 break; 2046 2047 case SSEUp: 2048 case X87Up: 2049 llvm_unreachable("Invalid classification for lo word."); 2050 2051 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via 2052 // hidden argument. 2053 case Memory: 2054 return getIndirectReturnResult(RetTy); 2055 2056 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next 2057 // available register of the sequence %rax, %rdx is used. 2058 case Integer: 2059 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 2060 2061 // If we have a sign or zero extended integer, make sure to return Extend 2062 // so that the parameter gets the right LLVM IR attributes. 2063 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 2064 // Treat an enum type as its underlying type. 2065 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 2066 RetTy = EnumTy->getDecl()->getIntegerType(); 2067 2068 if (RetTy->isIntegralOrEnumerationType() && 2069 RetTy->isPromotableIntegerType()) 2070 return ABIArgInfo::getExtend(); 2071 } 2072 break; 2073 2074 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next 2075 // available SSE register of the sequence %xmm0, %xmm1 is used. 2076 case SSE: 2077 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 2078 break; 2079 2080 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is 2081 // returned on the X87 stack in %st0 as 80-bit x87 number. 2082 case X87: 2083 ResType = llvm::Type::getX86_FP80Ty(getVMContext()); 2084 break; 2085 2086 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real 2087 // part of the value is returned in %st0 and the imaginary part in 2088 // %st1. 2089 case ComplexX87: 2090 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification."); 2091 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()), 2092 llvm::Type::getX86_FP80Ty(getVMContext()), 2093 NULL); 2094 break; 2095 } 2096 2097 llvm::Type *HighPart = 0; 2098 switch (Hi) { 2099 // Memory was handled previously and X87 should 2100 // never occur as a hi class. 2101 case Memory: 2102 case X87: 2103 llvm_unreachable("Invalid classification for hi word."); 2104 2105 case ComplexX87: // Previously handled. 2106 case NoClass: 2107 break; 2108 2109 case Integer: 2110 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 2111 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 2112 return ABIArgInfo::getDirect(HighPart, 8); 2113 break; 2114 case SSE: 2115 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 2116 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 2117 return ABIArgInfo::getDirect(HighPart, 8); 2118 break; 2119 2120 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte 2121 // is passed in the next available eightbyte chunk if the last used 2122 // vector register. 2123 // 2124 // SSEUP should always be preceded by SSE, just widen. 2125 case SSEUp: 2126 assert(Lo == SSE && "Unexpected SSEUp classification."); 2127 ResType = GetByteVectorType(RetTy); 2128 break; 2129 2130 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is 2131 // returned together with the previous X87 value in %st0. 2132 case X87Up: 2133 // If X87Up is preceded by X87, we don't need to do 2134 // anything. However, in some cases with unions it may not be 2135 // preceded by X87. In such situations we follow gcc and pass the 2136 // extra bits in an SSE reg. 2137 if (Lo != X87) { 2138 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 2139 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 2140 return ABIArgInfo::getDirect(HighPart, 8); 2141 } 2142 break; 2143 } 2144 2145 // If a high part was specified, merge it together with the low part. It is 2146 // known to pass in the high eightbyte of the result. We do this by forming a 2147 // first class struct aggregate with the high and low part: {low, high} 2148 if (HighPart) 2149 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); 2150 2151 return ABIArgInfo::getDirect(ResType); 2152 } 2153 2154 ABIArgInfo X86_64ABIInfo::classifyArgumentType( 2155 QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE) 2156 const 2157 { 2158 X86_64ABIInfo::Class Lo, Hi; 2159 classify(Ty, 0, Lo, Hi); 2160 2161 // Check some invariants. 2162 // FIXME: Enforce these by construction. 2163 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 2164 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 2165 2166 neededInt = 0; 2167 neededSSE = 0; 2168 llvm::Type *ResType = 0; 2169 switch (Lo) { 2170 case NoClass: 2171 if (Hi == NoClass) 2172 return ABIArgInfo::getIgnore(); 2173 // If the low part is just padding, it takes no register, leave ResType 2174 // null. 2175 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 2176 "Unknown missing lo part"); 2177 break; 2178 2179 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument 2180 // on the stack. 2181 case Memory: 2182 2183 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or 2184 // COMPLEX_X87, it is passed in memory. 2185 case X87: 2186 case ComplexX87: 2187 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 2188 ++neededInt; 2189 return getIndirectResult(Ty, freeIntRegs); 2190 2191 case SSEUp: 2192 case X87Up: 2193 llvm_unreachable("Invalid classification for lo word."); 2194 2195 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next 2196 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8 2197 // and %r9 is used. 2198 case Integer: 2199 ++neededInt; 2200 2201 // Pick an 8-byte type based on the preferred type. 2202 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0); 2203 2204 // If we have a sign or zero extended integer, make sure to return Extend 2205 // so that the parameter gets the right LLVM IR attributes. 2206 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 2207 // Treat an enum type as its underlying type. 2208 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2209 Ty = EnumTy->getDecl()->getIntegerType(); 2210 2211 if (Ty->isIntegralOrEnumerationType() && 2212 Ty->isPromotableIntegerType()) 2213 return ABIArgInfo::getExtend(); 2214 } 2215 2216 break; 2217 2218 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next 2219 // available SSE register is used, the registers are taken in the 2220 // order from %xmm0 to %xmm7. 2221 case SSE: { 2222 llvm::Type *IRType = CGT.ConvertType(Ty); 2223 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0); 2224 ++neededSSE; 2225 break; 2226 } 2227 } 2228 2229 llvm::Type *HighPart = 0; 2230 switch (Hi) { 2231 // Memory was handled previously, ComplexX87 and X87 should 2232 // never occur as hi classes, and X87Up must be preceded by X87, 2233 // which is passed in memory. 2234 case Memory: 2235 case X87: 2236 case ComplexX87: 2237 llvm_unreachable("Invalid classification for hi word."); 2238 2239 case NoClass: break; 2240 2241 case Integer: 2242 ++neededInt; 2243 // Pick an 8-byte type based on the preferred type. 2244 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 2245 2246 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 2247 return ABIArgInfo::getDirect(HighPart, 8); 2248 break; 2249 2250 // X87Up generally doesn't occur here (long double is passed in 2251 // memory), except in situations involving unions. 2252 case X87Up: 2253 case SSE: 2254 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 2255 2256 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 2257 return ABIArgInfo::getDirect(HighPart, 8); 2258 2259 ++neededSSE; 2260 break; 2261 2262 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the 2263 // eightbyte is passed in the upper half of the last used SSE 2264 // register. This only happens when 128-bit vectors are passed. 2265 case SSEUp: 2266 assert(Lo == SSE && "Unexpected SSEUp classification"); 2267 ResType = GetByteVectorType(Ty); 2268 break; 2269 } 2270 2271 // If a high part was specified, merge it together with the low part. It is 2272 // known to pass in the high eightbyte of the result. We do this by forming a 2273 // first class struct aggregate with the high and low part: {low, high} 2274 if (HighPart) 2275 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); 2276 2277 return ABIArgInfo::getDirect(ResType); 2278 } 2279 2280 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 2281 2282 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2283 2284 // Keep track of the number of assigned registers. 2285 unsigned freeIntRegs = 6, freeSSERegs = 8; 2286 2287 // If the return value is indirect, then the hidden argument is consuming one 2288 // integer register. 2289 if (FI.getReturnInfo().isIndirect()) 2290 --freeIntRegs; 2291 2292 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers 2293 // get assigned (in left-to-right order) for passing as follows... 2294 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2295 it != ie; ++it) { 2296 unsigned neededInt, neededSSE; 2297 it->info = classifyArgumentType(it->type, freeIntRegs, neededInt, 2298 neededSSE); 2299 2300 // AMD64-ABI 3.2.3p3: If there are no registers available for any 2301 // eightbyte of an argument, the whole argument is passed on the 2302 // stack. If registers have already been assigned for some 2303 // eightbytes of such an argument, the assignments get reverted. 2304 if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) { 2305 freeIntRegs -= neededInt; 2306 freeSSERegs -= neededSSE; 2307 } else { 2308 it->info = getIndirectResult(it->type, freeIntRegs); 2309 } 2310 } 2311 } 2312 2313 static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr, 2314 QualType Ty, 2315 CodeGenFunction &CGF) { 2316 llvm::Value *overflow_arg_area_p = 2317 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p"); 2318 llvm::Value *overflow_arg_area = 2319 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area"); 2320 2321 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16 2322 // byte boundary if alignment needed by type exceeds 8 byte boundary. 2323 // It isn't stated explicitly in the standard, but in practice we use 2324 // alignment greater than 16 where necessary. 2325 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8; 2326 if (Align > 8) { 2327 // overflow_arg_area = (overflow_arg_area + align - 1) & -align; 2328 llvm::Value *Offset = 2329 llvm::ConstantInt::get(CGF.Int64Ty, Align - 1); 2330 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset); 2331 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area, 2332 CGF.Int64Ty); 2333 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, -(uint64_t)Align); 2334 overflow_arg_area = 2335 CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), 2336 overflow_arg_area->getType(), 2337 "overflow_arg_area.align"); 2338 } 2339 2340 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area. 2341 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 2342 llvm::Value *Res = 2343 CGF.Builder.CreateBitCast(overflow_arg_area, 2344 llvm::PointerType::getUnqual(LTy)); 2345 2346 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to: 2347 // l->overflow_arg_area + sizeof(type). 2348 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to 2349 // an 8 byte boundary. 2350 2351 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8; 2352 llvm::Value *Offset = 2353 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7); 2354 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset, 2355 "overflow_arg_area.next"); 2356 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p); 2357 2358 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type. 2359 return Res; 2360 } 2361 2362 llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2363 CodeGenFunction &CGF) const { 2364 // Assume that va_list type is correct; should be pointer to LLVM type: 2365 // struct { 2366 // i32 gp_offset; 2367 // i32 fp_offset; 2368 // i8* overflow_arg_area; 2369 // i8* reg_save_area; 2370 // }; 2371 unsigned neededInt, neededSSE; 2372 2373 Ty = CGF.getContext().getCanonicalType(Ty); 2374 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE); 2375 2376 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed 2377 // in the registers. If not go to step 7. 2378 if (!neededInt && !neededSSE) 2379 return EmitVAArgFromMemory(VAListAddr, Ty, CGF); 2380 2381 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of 2382 // general purpose registers needed to pass type and num_fp to hold 2383 // the number of floating point registers needed. 2384 2385 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into 2386 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or 2387 // l->fp_offset > 304 - num_fp * 16 go to step 7. 2388 // 2389 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of 2390 // register save space). 2391 2392 llvm::Value *InRegs = 0; 2393 llvm::Value *gp_offset_p = 0, *gp_offset = 0; 2394 llvm::Value *fp_offset_p = 0, *fp_offset = 0; 2395 if (neededInt) { 2396 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p"); 2397 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset"); 2398 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8); 2399 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp"); 2400 } 2401 2402 if (neededSSE) { 2403 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p"); 2404 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset"); 2405 llvm::Value *FitsInFP = 2406 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16); 2407 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp"); 2408 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP; 2409 } 2410 2411 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 2412 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); 2413 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 2414 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); 2415 2416 // Emit code to load the value if it was passed in registers. 2417 2418 CGF.EmitBlock(InRegBlock); 2419 2420 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with 2421 // an offset of l->gp_offset and/or l->fp_offset. This may require 2422 // copying to a temporary location in case the parameter is passed 2423 // in different register classes or requires an alignment greater 2424 // than 8 for general purpose registers and 16 for XMM registers. 2425 // 2426 // FIXME: This really results in shameful code when we end up needing to 2427 // collect arguments from different places; often what should result in a 2428 // simple assembling of a structure from scattered addresses has many more 2429 // loads than necessary. Can we clean this up? 2430 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 2431 llvm::Value *RegAddr = 2432 CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3), 2433 "reg_save_area"); 2434 if (neededInt && neededSSE) { 2435 // FIXME: Cleanup. 2436 assert(AI.isDirect() && "Unexpected ABI info for mixed regs"); 2437 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType()); 2438 llvm::Value *Tmp = CGF.CreateTempAlloca(ST); 2439 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs"); 2440 llvm::Type *TyLo = ST->getElementType(0); 2441 llvm::Type *TyHi = ST->getElementType(1); 2442 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) && 2443 "Unexpected ABI info for mixed regs"); 2444 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo); 2445 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi); 2446 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 2447 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2448 llvm::Value *RegLoAddr = TyLo->isFloatingPointTy() ? FPAddr : GPAddr; 2449 llvm::Value *RegHiAddr = TyLo->isFloatingPointTy() ? GPAddr : FPAddr; 2450 llvm::Value *V = 2451 CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo)); 2452 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 2453 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi)); 2454 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 2455 2456 RegAddr = CGF.Builder.CreateBitCast(Tmp, 2457 llvm::PointerType::getUnqual(LTy)); 2458 } else if (neededInt) { 2459 RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 2460 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 2461 llvm::PointerType::getUnqual(LTy)); 2462 } else if (neededSSE == 1) { 2463 RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2464 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 2465 llvm::PointerType::getUnqual(LTy)); 2466 } else { 2467 assert(neededSSE == 2 && "Invalid number of needed registers!"); 2468 // SSE registers are spaced 16 bytes apart in the register save 2469 // area, we need to collect the two eightbytes together. 2470 llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2471 llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16); 2472 llvm::Type *DoubleTy = CGF.DoubleTy; 2473 llvm::Type *DblPtrTy = 2474 llvm::PointerType::getUnqual(DoubleTy); 2475 llvm::StructType *ST = llvm::StructType::get(DoubleTy, 2476 DoubleTy, NULL); 2477 llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST); 2478 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo, 2479 DblPtrTy)); 2480 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 2481 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi, 2482 DblPtrTy)); 2483 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 2484 RegAddr = CGF.Builder.CreateBitCast(Tmp, 2485 llvm::PointerType::getUnqual(LTy)); 2486 } 2487 2488 // AMD64-ABI 3.5.7p5: Step 5. Set: 2489 // l->gp_offset = l->gp_offset + num_gp * 8 2490 // l->fp_offset = l->fp_offset + num_fp * 16. 2491 if (neededInt) { 2492 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8); 2493 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset), 2494 gp_offset_p); 2495 } 2496 if (neededSSE) { 2497 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16); 2498 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset), 2499 fp_offset_p); 2500 } 2501 CGF.EmitBranch(ContBlock); 2502 2503 // Emit code to load the value if it was passed in memory. 2504 2505 CGF.EmitBlock(InMemBlock); 2506 llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF); 2507 2508 // Return the appropriate result. 2509 2510 CGF.EmitBlock(ContBlock); 2511 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(), 2, 2512 "vaarg.addr"); 2513 ResAddr->addIncoming(RegAddr, InRegBlock); 2514 ResAddr->addIncoming(MemAddr, InMemBlock); 2515 return ResAddr; 2516 } 2517 2518 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty) const { 2519 2520 if (Ty->isVoidType()) 2521 return ABIArgInfo::getIgnore(); 2522 2523 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2524 Ty = EnumTy->getDecl()->getIntegerType(); 2525 2526 uint64_t Size = getContext().getTypeSize(Ty); 2527 2528 if (const RecordType *RT = Ty->getAs<RecordType>()) { 2529 if (hasNonTrivialDestructorOrCopyConstructor(RT) || 2530 RT->getDecl()->hasFlexibleArrayMember()) 2531 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2532 2533 // FIXME: mingw-w64-gcc emits 128-bit struct as i128 2534 if (Size == 128 && 2535 getContext().getTargetInfo().getTriple().getOS() 2536 == llvm::Triple::MinGW32) 2537 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2538 Size)); 2539 2540 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is 2541 // not 1, 2, 4, or 8 bytes, must be passed by reference." 2542 if (Size <= 64 && 2543 (Size & (Size - 1)) == 0) 2544 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2545 Size)); 2546 2547 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2548 } 2549 2550 if (Ty->isPromotableIntegerType()) 2551 return ABIArgInfo::getExtend(); 2552 2553 return ABIArgInfo::getDirect(); 2554 } 2555 2556 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 2557 2558 QualType RetTy = FI.getReturnType(); 2559 FI.getReturnInfo() = classify(RetTy); 2560 2561 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2562 it != ie; ++it) 2563 it->info = classify(it->type); 2564 } 2565 2566 llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2567 CodeGenFunction &CGF) const { 2568 llvm::Type *BPP = CGF.Int8PtrPtrTy; 2569 2570 CGBuilderTy &Builder = CGF.Builder; 2571 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 2572 "ap"); 2573 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 2574 llvm::Type *PTy = 2575 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 2576 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 2577 2578 uint64_t Offset = 2579 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8); 2580 llvm::Value *NextAddr = 2581 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 2582 "ap.next"); 2583 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 2584 2585 return AddrTyped; 2586 } 2587 2588 namespace { 2589 2590 class NaClX86_64ABIInfo : public ABIInfo { 2591 public: 2592 NaClX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) 2593 : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, HasAVX) {} 2594 virtual void computeInfo(CGFunctionInfo &FI) const; 2595 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2596 CodeGenFunction &CGF) const; 2597 private: 2598 PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv. 2599 X86_64ABIInfo NInfo; // Used for everything else. 2600 }; 2601 2602 class NaClX86_64TargetCodeGenInfo : public TargetCodeGenInfo { 2603 public: 2604 NaClX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) 2605 : TargetCodeGenInfo(new NaClX86_64ABIInfo(CGT, HasAVX)) {} 2606 }; 2607 2608 } 2609 2610 void NaClX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 2611 if (FI.getASTCallingConvention() == CC_PnaclCall) 2612 PInfo.computeInfo(FI); 2613 else 2614 NInfo.computeInfo(FI); 2615 } 2616 2617 llvm::Value *NaClX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2618 CodeGenFunction &CGF) const { 2619 // Always use the native convention; calling pnacl-style varargs functions 2620 // is unuspported. 2621 return NInfo.EmitVAArg(VAListAddr, Ty, CGF); 2622 } 2623 2624 2625 // PowerPC-32 2626 2627 namespace { 2628 class PPC32TargetCodeGenInfo : public DefaultTargetCodeGenInfo { 2629 public: 2630 PPC32TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} 2631 2632 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2633 // This is recovered from gcc output. 2634 return 1; // r1 is the dedicated stack pointer 2635 } 2636 2637 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2638 llvm::Value *Address) const; 2639 }; 2640 2641 } 2642 2643 bool 2644 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2645 llvm::Value *Address) const { 2646 // This is calculated from the LLVM and GCC tables and verified 2647 // against gcc output. AFAIK all ABIs use the same encoding. 2648 2649 CodeGen::CGBuilderTy &Builder = CGF.Builder; 2650 2651 llvm::IntegerType *i8 = CGF.Int8Ty; 2652 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 2653 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 2654 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 2655 2656 // 0-31: r0-31, the 4-byte general-purpose registers 2657 AssignToArrayRange(Builder, Address, Four8, 0, 31); 2658 2659 // 32-63: fp0-31, the 8-byte floating-point registers 2660 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 2661 2662 // 64-76 are various 4-byte special-purpose registers: 2663 // 64: mq 2664 // 65: lr 2665 // 66: ctr 2666 // 67: ap 2667 // 68-75 cr0-7 2668 // 76: xer 2669 AssignToArrayRange(Builder, Address, Four8, 64, 76); 2670 2671 // 77-108: v0-31, the 16-byte vector registers 2672 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 2673 2674 // 109: vrsave 2675 // 110: vscr 2676 // 111: spe_acc 2677 // 112: spefscr 2678 // 113: sfp 2679 AssignToArrayRange(Builder, Address, Four8, 109, 113); 2680 2681 return false; 2682 } 2683 2684 // PowerPC-64 2685 2686 namespace { 2687 /// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information. 2688 class PPC64_SVR4_ABIInfo : public DefaultABIInfo { 2689 2690 public: 2691 PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 2692 2693 bool isPromotableTypeForABI(QualType Ty) const; 2694 2695 ABIArgInfo classifyReturnType(QualType RetTy) const; 2696 ABIArgInfo classifyArgumentType(QualType Ty) const; 2697 2698 // TODO: We can add more logic to computeInfo to improve performance. 2699 // Example: For aggregate arguments that fit in a register, we could 2700 // use getDirectInReg (as is done below for structs containing a single 2701 // floating-point value) to avoid pushing them to memory on function 2702 // entry. This would require changing the logic in PPCISelLowering 2703 // when lowering the parameters in the caller and args in the callee. 2704 virtual void computeInfo(CGFunctionInfo &FI) const { 2705 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2706 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2707 it != ie; ++it) { 2708 // We rely on the default argument classification for the most part. 2709 // One exception: An aggregate containing a single floating-point 2710 // item must be passed in a register if one is available. 2711 const Type *T = isSingleElementStruct(it->type, getContext()); 2712 if (T) { 2713 const BuiltinType *BT = T->getAs<BuiltinType>(); 2714 if (BT && BT->isFloatingPoint()) { 2715 QualType QT(T, 0); 2716 it->info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT)); 2717 continue; 2718 } 2719 } 2720 it->info = classifyArgumentType(it->type); 2721 } 2722 } 2723 2724 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, 2725 QualType Ty, 2726 CodeGenFunction &CGF) const; 2727 }; 2728 2729 class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo { 2730 public: 2731 PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT) 2732 : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT)) {} 2733 2734 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2735 // This is recovered from gcc output. 2736 return 1; // r1 is the dedicated stack pointer 2737 } 2738 2739 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2740 llvm::Value *Address) const; 2741 }; 2742 2743 class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo { 2744 public: 2745 PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} 2746 2747 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2748 // This is recovered from gcc output. 2749 return 1; // r1 is the dedicated stack pointer 2750 } 2751 2752 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2753 llvm::Value *Address) const; 2754 }; 2755 2756 } 2757 2758 // Return true if the ABI requires Ty to be passed sign- or zero- 2759 // extended to 64 bits. 2760 bool 2761 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const { 2762 // Treat an enum type as its underlying type. 2763 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2764 Ty = EnumTy->getDecl()->getIntegerType(); 2765 2766 // Promotable integer types are required to be promoted by the ABI. 2767 if (Ty->isPromotableIntegerType()) 2768 return true; 2769 2770 // In addition to the usual promotable integer types, we also need to 2771 // extend all 32-bit types, since the ABI requires promotion to 64 bits. 2772 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 2773 switch (BT->getKind()) { 2774 case BuiltinType::Int: 2775 case BuiltinType::UInt: 2776 return true; 2777 default: 2778 break; 2779 } 2780 2781 return false; 2782 } 2783 2784 ABIArgInfo 2785 PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const { 2786 if (Ty->isAnyComplexType()) 2787 return ABIArgInfo::getDirect(); 2788 2789 if (isAggregateTypeForABI(Ty)) { 2790 // Records with non trivial destructors/constructors should not be passed 2791 // by value. 2792 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 2793 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2794 2795 return ABIArgInfo::getIndirect(0); 2796 } 2797 2798 return (isPromotableTypeForABI(Ty) ? 2799 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2800 } 2801 2802 ABIArgInfo 2803 PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const { 2804 if (RetTy->isVoidType()) 2805 return ABIArgInfo::getIgnore(); 2806 2807 if (RetTy->isAnyComplexType()) 2808 return ABIArgInfo::getDirect(); 2809 2810 if (isAggregateTypeForABI(RetTy)) 2811 return ABIArgInfo::getIndirect(0); 2812 2813 return (isPromotableTypeForABI(RetTy) ? 2814 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2815 } 2816 2817 // Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine. 2818 llvm::Value *PPC64_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr, 2819 QualType Ty, 2820 CodeGenFunction &CGF) const { 2821 llvm::Type *BP = CGF.Int8PtrTy; 2822 llvm::Type *BPP = CGF.Int8PtrPtrTy; 2823 2824 CGBuilderTy &Builder = CGF.Builder; 2825 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 2826 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 2827 2828 // Update the va_list pointer. The pointer should be bumped by the 2829 // size of the object. We can trust getTypeSize() except for a complex 2830 // type whose base type is smaller than a doubleword. For these, the 2831 // size of the object is 16 bytes; see below for further explanation. 2832 unsigned SizeInBytes = CGF.getContext().getTypeSize(Ty) / 8; 2833 QualType BaseTy; 2834 unsigned CplxBaseSize = 0; 2835 2836 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { 2837 BaseTy = CTy->getElementType(); 2838 CplxBaseSize = CGF.getContext().getTypeSize(BaseTy) / 8; 2839 if (CplxBaseSize < 8) 2840 SizeInBytes = 16; 2841 } 2842 2843 unsigned Offset = llvm::RoundUpToAlignment(SizeInBytes, 8); 2844 llvm::Value *NextAddr = 2845 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), 2846 "ap.next"); 2847 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 2848 2849 // If we have a complex type and the base type is smaller than 8 bytes, 2850 // the ABI calls for the real and imaginary parts to be right-adjusted 2851 // in separate doublewords. However, Clang expects us to produce a 2852 // pointer to a structure with the two parts packed tightly. So generate 2853 // loads of the real and imaginary parts relative to the va_list pointer, 2854 // and store them to a temporary structure. 2855 if (CplxBaseSize && CplxBaseSize < 8) { 2856 llvm::Value *RealAddr = Builder.CreatePtrToInt(Addr, CGF.Int64Ty); 2857 llvm::Value *ImagAddr = RealAddr; 2858 RealAddr = Builder.CreateAdd(RealAddr, Builder.getInt64(8 - CplxBaseSize)); 2859 ImagAddr = Builder.CreateAdd(ImagAddr, Builder.getInt64(16 - CplxBaseSize)); 2860 llvm::Type *PBaseTy = llvm::PointerType::getUnqual(CGF.ConvertType(BaseTy)); 2861 RealAddr = Builder.CreateIntToPtr(RealAddr, PBaseTy); 2862 ImagAddr = Builder.CreateIntToPtr(ImagAddr, PBaseTy); 2863 llvm::Value *Real = Builder.CreateLoad(RealAddr, false, ".vareal"); 2864 llvm::Value *Imag = Builder.CreateLoad(ImagAddr, false, ".vaimag"); 2865 llvm::Value *Ptr = CGF.CreateTempAlloca(CGT.ConvertTypeForMem(Ty), 2866 "vacplx"); 2867 llvm::Value *RealPtr = Builder.CreateStructGEP(Ptr, 0, ".real"); 2868 llvm::Value *ImagPtr = Builder.CreateStructGEP(Ptr, 1, ".imag"); 2869 Builder.CreateStore(Real, RealPtr, false); 2870 Builder.CreateStore(Imag, ImagPtr, false); 2871 return Ptr; 2872 } 2873 2874 // If the argument is smaller than 8 bytes, it is right-adjusted in 2875 // its doubleword slot. Adjust the pointer to pick it up from the 2876 // correct offset. 2877 if (SizeInBytes < 8) { 2878 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty); 2879 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(8 - SizeInBytes)); 2880 Addr = Builder.CreateIntToPtr(AddrAsInt, BP); 2881 } 2882 2883 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 2884 return Builder.CreateBitCast(Addr, PTy); 2885 } 2886 2887 static bool 2888 PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2889 llvm::Value *Address) { 2890 // This is calculated from the LLVM and GCC tables and verified 2891 // against gcc output. AFAIK all ABIs use the same encoding. 2892 2893 CodeGen::CGBuilderTy &Builder = CGF.Builder; 2894 2895 llvm::IntegerType *i8 = CGF.Int8Ty; 2896 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 2897 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 2898 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 2899 2900 // 0-31: r0-31, the 8-byte general-purpose registers 2901 AssignToArrayRange(Builder, Address, Eight8, 0, 31); 2902 2903 // 32-63: fp0-31, the 8-byte floating-point registers 2904 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 2905 2906 // 64-76 are various 4-byte special-purpose registers: 2907 // 64: mq 2908 // 65: lr 2909 // 66: ctr 2910 // 67: ap 2911 // 68-75 cr0-7 2912 // 76: xer 2913 AssignToArrayRange(Builder, Address, Four8, 64, 76); 2914 2915 // 77-108: v0-31, the 16-byte vector registers 2916 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 2917 2918 // 109: vrsave 2919 // 110: vscr 2920 // 111: spe_acc 2921 // 112: spefscr 2922 // 113: sfp 2923 AssignToArrayRange(Builder, Address, Four8, 109, 113); 2924 2925 return false; 2926 } 2927 2928 bool 2929 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable( 2930 CodeGen::CodeGenFunction &CGF, 2931 llvm::Value *Address) const { 2932 2933 return PPC64_initDwarfEHRegSizeTable(CGF, Address); 2934 } 2935 2936 bool 2937 PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2938 llvm::Value *Address) const { 2939 2940 return PPC64_initDwarfEHRegSizeTable(CGF, Address); 2941 } 2942 2943 //===----------------------------------------------------------------------===// 2944 // ARM ABI Implementation 2945 //===----------------------------------------------------------------------===// 2946 2947 namespace { 2948 2949 class ARMABIInfo : public ABIInfo { 2950 public: 2951 enum ABIKind { 2952 APCS = 0, 2953 AAPCS = 1, 2954 AAPCS_VFP 2955 }; 2956 2957 private: 2958 ABIKind Kind; 2959 2960 public: 2961 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind) {} 2962 2963 bool isEABI() const { 2964 StringRef Env = 2965 getContext().getTargetInfo().getTriple().getEnvironmentName(); 2966 return (Env == "gnueabi" || Env == "eabi" || 2967 Env == "android" || Env == "androideabi"); 2968 } 2969 2970 private: 2971 ABIKind getABIKind() const { return Kind; } 2972 2973 ABIArgInfo classifyReturnType(QualType RetTy) const; 2974 ABIArgInfo classifyArgumentType(QualType RetTy, int *VFPRegs, 2975 unsigned &AllocatedVFP, 2976 bool &IsHA) const; 2977 bool isIllegalVectorType(QualType Ty) const; 2978 2979 virtual void computeInfo(CGFunctionInfo &FI) const; 2980 2981 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2982 CodeGenFunction &CGF) const; 2983 }; 2984 2985 class ARMTargetCodeGenInfo : public TargetCodeGenInfo { 2986 public: 2987 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K) 2988 :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {} 2989 2990 const ARMABIInfo &getABIInfo() const { 2991 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo()); 2992 } 2993 2994 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2995 return 13; 2996 } 2997 2998 StringRef getARCRetainAutoreleasedReturnValueMarker() const { 2999 return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue"; 3000 } 3001 3002 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3003 llvm::Value *Address) const { 3004 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 3005 3006 // 0-15 are the 16 integer registers. 3007 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15); 3008 return false; 3009 } 3010 3011 unsigned getSizeOfUnwindException() const { 3012 if (getABIInfo().isEABI()) return 88; 3013 return TargetCodeGenInfo::getSizeOfUnwindException(); 3014 } 3015 }; 3016 3017 } 3018 3019 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const { 3020 // To correctly handle Homogeneous Aggregate, we need to keep track of the 3021 // VFP registers allocated so far. 3022 // C.1.vfp If the argument is a VFP CPRC and there are sufficient consecutive 3023 // VFP registers of the appropriate type unallocated then the argument is 3024 // allocated to the lowest-numbered sequence of such registers. 3025 // C.2.vfp If the argument is a VFP CPRC then any VFP registers that are 3026 // unallocated are marked as unavailable. 3027 unsigned AllocatedVFP = 0; 3028 int VFPRegs[16] = { 0 }; 3029 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 3030 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 3031 it != ie; ++it) { 3032 unsigned PreAllocation = AllocatedVFP; 3033 bool IsHA = false; 3034 // 6.1.2.3 There is one VFP co-processor register class using registers 3035 // s0-s15 (d0-d7) for passing arguments. 3036 const unsigned NumVFPs = 16; 3037 it->info = classifyArgumentType(it->type, VFPRegs, AllocatedVFP, IsHA); 3038 // If we do not have enough VFP registers for the HA, any VFP registers 3039 // that are unallocated are marked as unavailable. To achieve this, we add 3040 // padding of (NumVFPs - PreAllocation) floats. 3041 if (IsHA && AllocatedVFP > NumVFPs && PreAllocation < NumVFPs) { 3042 llvm::Type *PaddingTy = llvm::ArrayType::get( 3043 llvm::Type::getFloatTy(getVMContext()), NumVFPs - PreAllocation); 3044 it->info = ABIArgInfo::getExpandWithPadding(false, PaddingTy); 3045 } 3046 } 3047 3048 // Always honor user-specified calling convention. 3049 if (FI.getCallingConvention() != llvm::CallingConv::C) 3050 return; 3051 3052 // Calling convention as default by an ABI. 3053 llvm::CallingConv::ID DefaultCC; 3054 if (getContext().getTargetInfo().getTriple().getEnvironmentName()=="gnueabihf") 3055 DefaultCC = llvm::CallingConv::ARM_AAPCS_VFP; 3056 else if (isEABI()) 3057 DefaultCC = llvm::CallingConv::ARM_AAPCS; 3058 else 3059 DefaultCC = llvm::CallingConv::ARM_APCS; 3060 3061 // If user did not ask for specific calling convention explicitly (e.g. via 3062 // pcs attribute), set effective calling convention if it's different than ABI 3063 // default. 3064 switch (getABIKind()) { 3065 case APCS: 3066 if (DefaultCC != llvm::CallingConv::ARM_APCS) 3067 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_APCS); 3068 break; 3069 case AAPCS: 3070 if (DefaultCC != llvm::CallingConv::ARM_AAPCS) 3071 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS); 3072 break; 3073 case AAPCS_VFP: 3074 if (DefaultCC != llvm::CallingConv::ARM_AAPCS_VFP) 3075 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS_VFP); 3076 break; 3077 } 3078 } 3079 3080 /// isHomogeneousAggregate - Return true if a type is an AAPCS-VFP homogeneous 3081 /// aggregate. If HAMembers is non-null, the number of base elements 3082 /// contained in the type is returned through it; this is used for the 3083 /// recursive calls that check aggregate component types. 3084 static bool isHomogeneousAggregate(QualType Ty, const Type *&Base, 3085 ASTContext &Context, 3086 uint64_t *HAMembers = 0) { 3087 uint64_t Members = 0; 3088 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 3089 if (!isHomogeneousAggregate(AT->getElementType(), Base, Context, &Members)) 3090 return false; 3091 Members *= AT->getSize().getZExtValue(); 3092 } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 3093 const RecordDecl *RD = RT->getDecl(); 3094 if (RD->hasFlexibleArrayMember()) 3095 return false; 3096 3097 Members = 0; 3098 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 3099 i != e; ++i) { 3100 const FieldDecl *FD = *i; 3101 uint64_t FldMembers; 3102 if (!isHomogeneousAggregate(FD->getType(), Base, Context, &FldMembers)) 3103 return false; 3104 3105 Members = (RD->isUnion() ? 3106 std::max(Members, FldMembers) : Members + FldMembers); 3107 } 3108 } else { 3109 Members = 1; 3110 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 3111 Members = 2; 3112 Ty = CT->getElementType(); 3113 } 3114 3115 // Homogeneous aggregates for AAPCS-VFP must have base types of float, 3116 // double, or 64-bit or 128-bit vectors. 3117 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 3118 if (BT->getKind() != BuiltinType::Float && 3119 BT->getKind() != BuiltinType::Double && 3120 BT->getKind() != BuiltinType::LongDouble) 3121 return false; 3122 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 3123 unsigned VecSize = Context.getTypeSize(VT); 3124 if (VecSize != 64 && VecSize != 128) 3125 return false; 3126 } else { 3127 return false; 3128 } 3129 3130 // The base type must be the same for all members. Vector types of the 3131 // same total size are treated as being equivalent here. 3132 const Type *TyPtr = Ty.getTypePtr(); 3133 if (!Base) 3134 Base = TyPtr; 3135 if (Base != TyPtr && 3136 (!Base->isVectorType() || !TyPtr->isVectorType() || 3137 Context.getTypeSize(Base) != Context.getTypeSize(TyPtr))) 3138 return false; 3139 } 3140 3141 // Homogeneous Aggregates can have at most 4 members of the base type. 3142 if (HAMembers) 3143 *HAMembers = Members; 3144 3145 return (Members > 0 && Members <= 4); 3146 } 3147 3148 /// markAllocatedVFPs - update VFPRegs according to the alignment and 3149 /// number of VFP registers (unit is S register) requested. 3150 static void markAllocatedVFPs(int *VFPRegs, unsigned &AllocatedVFP, 3151 unsigned Alignment, 3152 unsigned NumRequired) { 3153 // Early Exit. 3154 if (AllocatedVFP >= 16) 3155 return; 3156 // C.1.vfp If the argument is a VFP CPRC and there are sufficient consecutive 3157 // VFP registers of the appropriate type unallocated then the argument is 3158 // allocated to the lowest-numbered sequence of such registers. 3159 for (unsigned I = 0; I < 16; I += Alignment) { 3160 bool FoundSlot = true; 3161 for (unsigned J = I, JEnd = I + NumRequired; J < JEnd; J++) 3162 if (J >= 16 || VFPRegs[J]) { 3163 FoundSlot = false; 3164 break; 3165 } 3166 if (FoundSlot) { 3167 for (unsigned J = I, JEnd = I + NumRequired; J < JEnd; J++) 3168 VFPRegs[J] = 1; 3169 AllocatedVFP += NumRequired; 3170 return; 3171 } 3172 } 3173 // C.2.vfp If the argument is a VFP CPRC then any VFP registers that are 3174 // unallocated are marked as unavailable. 3175 for (unsigned I = 0; I < 16; I++) 3176 VFPRegs[I] = 1; 3177 AllocatedVFP = 17; // We do not have enough VFP registers. 3178 } 3179 3180 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, int *VFPRegs, 3181 unsigned &AllocatedVFP, 3182 bool &IsHA) const { 3183 // We update number of allocated VFPs according to 3184 // 6.1.2.1 The following argument types are VFP CPRCs: 3185 // A single-precision floating-point type (including promoted 3186 // half-precision types); A double-precision floating-point type; 3187 // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate 3188 // with a Base Type of a single- or double-precision floating-point type, 3189 // 64-bit containerized vectors or 128-bit containerized vectors with one 3190 // to four Elements. 3191 3192 // Handle illegal vector types here. 3193 if (isIllegalVectorType(Ty)) { 3194 uint64_t Size = getContext().getTypeSize(Ty); 3195 if (Size <= 32) { 3196 llvm::Type *ResType = 3197 llvm::Type::getInt32Ty(getVMContext()); 3198 return ABIArgInfo::getDirect(ResType); 3199 } 3200 if (Size == 64) { 3201 llvm::Type *ResType = llvm::VectorType::get( 3202 llvm::Type::getInt32Ty(getVMContext()), 2); 3203 markAllocatedVFPs(VFPRegs, AllocatedVFP, 2, 2); 3204 return ABIArgInfo::getDirect(ResType); 3205 } 3206 if (Size == 128) { 3207 llvm::Type *ResType = llvm::VectorType::get( 3208 llvm::Type::getInt32Ty(getVMContext()), 4); 3209 markAllocatedVFPs(VFPRegs, AllocatedVFP, 4, 4); 3210 return ABIArgInfo::getDirect(ResType); 3211 } 3212 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3213 } 3214 // Update VFPRegs for legal vector types. 3215 if (const VectorType *VT = Ty->getAs<VectorType>()) { 3216 uint64_t Size = getContext().getTypeSize(VT); 3217 // Size of a legal vector should be power of 2 and above 64. 3218 markAllocatedVFPs(VFPRegs, AllocatedVFP, Size >= 128 ? 4 : 2, Size / 32); 3219 } 3220 // Update VFPRegs for floating point types. 3221 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 3222 if (BT->getKind() == BuiltinType::Half || 3223 BT->getKind() == BuiltinType::Float) 3224 markAllocatedVFPs(VFPRegs, AllocatedVFP, 1, 1); 3225 if (BT->getKind() == BuiltinType::Double || 3226 BT->getKind() == BuiltinType::LongDouble) 3227 markAllocatedVFPs(VFPRegs, AllocatedVFP, 2, 2); 3228 } 3229 3230 if (!isAggregateTypeForABI(Ty)) { 3231 // Treat an enum type as its underlying type. 3232 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3233 Ty = EnumTy->getDecl()->getIntegerType(); 3234 3235 return (Ty->isPromotableIntegerType() ? 3236 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3237 } 3238 3239 // Ignore empty records. 3240 if (isEmptyRecord(getContext(), Ty, true)) 3241 return ABIArgInfo::getIgnore(); 3242 3243 // Structures with either a non-trivial destructor or a non-trivial 3244 // copy constructor are always indirect. 3245 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 3246 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3247 3248 if (getABIKind() == ARMABIInfo::AAPCS_VFP) { 3249 // Homogeneous Aggregates need to be expanded when we can fit the aggregate 3250 // into VFP registers. 3251 const Type *Base = 0; 3252 uint64_t Members = 0; 3253 if (isHomogeneousAggregate(Ty, Base, getContext(), &Members)) { 3254 assert(Base && "Base class should be set for homogeneous aggregate"); 3255 // Base can be a floating-point or a vector. 3256 if (Base->isVectorType()) { 3257 // ElementSize is in number of floats. 3258 unsigned ElementSize = getContext().getTypeSize(Base) == 64 ? 2 : 4; 3259 markAllocatedVFPs(VFPRegs, AllocatedVFP, ElementSize, 3260 Members * ElementSize); 3261 } else if (Base->isSpecificBuiltinType(BuiltinType::Float)) 3262 markAllocatedVFPs(VFPRegs, AllocatedVFP, 1, Members); 3263 else { 3264 assert(Base->isSpecificBuiltinType(BuiltinType::Double) || 3265 Base->isSpecificBuiltinType(BuiltinType::LongDouble)); 3266 markAllocatedVFPs(VFPRegs, AllocatedVFP, 2, Members * 2); 3267 } 3268 IsHA = true; 3269 return ABIArgInfo::getExpand(); 3270 } 3271 } 3272 3273 // Support byval for ARM. 3274 // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at 3275 // most 8-byte. We realign the indirect argument if type alignment is bigger 3276 // than ABI alignment. 3277 uint64_t ABIAlign = 4; 3278 uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8; 3279 if (getABIKind() == ARMABIInfo::AAPCS_VFP || 3280 getABIKind() == ARMABIInfo::AAPCS) 3281 ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8); 3282 if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) { 3283 return ABIArgInfo::getIndirect(0, /*ByVal=*/true, 3284 /*Realign=*/TyAlign > ABIAlign); 3285 } 3286 3287 // Otherwise, pass by coercing to a structure of the appropriate size. 3288 llvm::Type* ElemTy; 3289 unsigned SizeRegs; 3290 // FIXME: Try to match the types of the arguments more accurately where 3291 // we can. 3292 if (getContext().getTypeAlign(Ty) <= 32) { 3293 ElemTy = llvm::Type::getInt32Ty(getVMContext()); 3294 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32; 3295 } else { 3296 ElemTy = llvm::Type::getInt64Ty(getVMContext()); 3297 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64; 3298 } 3299 3300 llvm::Type *STy = 3301 llvm::StructType::get(llvm::ArrayType::get(ElemTy, SizeRegs), NULL); 3302 return ABIArgInfo::getDirect(STy); 3303 } 3304 3305 static bool isIntegerLikeType(QualType Ty, ASTContext &Context, 3306 llvm::LLVMContext &VMContext) { 3307 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure 3308 // is called integer-like if its size is less than or equal to one word, and 3309 // the offset of each of its addressable sub-fields is zero. 3310 3311 uint64_t Size = Context.getTypeSize(Ty); 3312 3313 // Check that the type fits in a word. 3314 if (Size > 32) 3315 return false; 3316 3317 // FIXME: Handle vector types! 3318 if (Ty->isVectorType()) 3319 return false; 3320 3321 // Float types are never treated as "integer like". 3322 if (Ty->isRealFloatingType()) 3323 return false; 3324 3325 // If this is a builtin or pointer type then it is ok. 3326 if (Ty->getAs<BuiltinType>() || Ty->isPointerType()) 3327 return true; 3328 3329 // Small complex integer types are "integer like". 3330 if (const ComplexType *CT = Ty->getAs<ComplexType>()) 3331 return isIntegerLikeType(CT->getElementType(), Context, VMContext); 3332 3333 // Single element and zero sized arrays should be allowed, by the definition 3334 // above, but they are not. 3335 3336 // Otherwise, it must be a record type. 3337 const RecordType *RT = Ty->getAs<RecordType>(); 3338 if (!RT) return false; 3339 3340 // Ignore records with flexible arrays. 3341 const RecordDecl *RD = RT->getDecl(); 3342 if (RD->hasFlexibleArrayMember()) 3343 return false; 3344 3345 // Check that all sub-fields are at offset 0, and are themselves "integer 3346 // like". 3347 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 3348 3349 bool HadField = false; 3350 unsigned idx = 0; 3351 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 3352 i != e; ++i, ++idx) { 3353 const FieldDecl *FD = *i; 3354 3355 // Bit-fields are not addressable, we only need to verify they are "integer 3356 // like". We still have to disallow a subsequent non-bitfield, for example: 3357 // struct { int : 0; int x } 3358 // is non-integer like according to gcc. 3359 if (FD->isBitField()) { 3360 if (!RD->isUnion()) 3361 HadField = true; 3362 3363 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 3364 return false; 3365 3366 continue; 3367 } 3368 3369 // Check if this field is at offset 0. 3370 if (Layout.getFieldOffset(idx) != 0) 3371 return false; 3372 3373 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 3374 return false; 3375 3376 // Only allow at most one field in a structure. This doesn't match the 3377 // wording above, but follows gcc in situations with a field following an 3378 // empty structure. 3379 if (!RD->isUnion()) { 3380 if (HadField) 3381 return false; 3382 3383 HadField = true; 3384 } 3385 } 3386 3387 return true; 3388 } 3389 3390 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy) const { 3391 if (RetTy->isVoidType()) 3392 return ABIArgInfo::getIgnore(); 3393 3394 // Large vector types should be returned via memory. 3395 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) 3396 return ABIArgInfo::getIndirect(0); 3397 3398 if (!isAggregateTypeForABI(RetTy)) { 3399 // Treat an enum type as its underlying type. 3400 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 3401 RetTy = EnumTy->getDecl()->getIntegerType(); 3402 3403 return (RetTy->isPromotableIntegerType() ? 3404 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3405 } 3406 3407 // Structures with either a non-trivial destructor or a non-trivial 3408 // copy constructor are always indirect. 3409 if (isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy)) 3410 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3411 3412 // Are we following APCS? 3413 if (getABIKind() == APCS) { 3414 if (isEmptyRecord(getContext(), RetTy, false)) 3415 return ABIArgInfo::getIgnore(); 3416 3417 // Complex types are all returned as packed integers. 3418 // 3419 // FIXME: Consider using 2 x vector types if the back end handles them 3420 // correctly. 3421 if (RetTy->isAnyComplexType()) 3422 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 3423 getContext().getTypeSize(RetTy))); 3424 3425 // Integer like structures are returned in r0. 3426 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) { 3427 // Return in the smallest viable integer type. 3428 uint64_t Size = getContext().getTypeSize(RetTy); 3429 if (Size <= 8) 3430 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 3431 if (Size <= 16) 3432 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 3433 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 3434 } 3435 3436 // Otherwise return in memory. 3437 return ABIArgInfo::getIndirect(0); 3438 } 3439 3440 // Otherwise this is an AAPCS variant. 3441 3442 if (isEmptyRecord(getContext(), RetTy, true)) 3443 return ABIArgInfo::getIgnore(); 3444 3445 // Check for homogeneous aggregates with AAPCS-VFP. 3446 if (getABIKind() == AAPCS_VFP) { 3447 const Type *Base = 0; 3448 if (isHomogeneousAggregate(RetTy, Base, getContext())) { 3449 assert(Base && "Base class should be set for homogeneous aggregate"); 3450 // Homogeneous Aggregates are returned directly. 3451 return ABIArgInfo::getDirect(); 3452 } 3453 } 3454 3455 // Aggregates <= 4 bytes are returned in r0; other aggregates 3456 // are returned indirectly. 3457 uint64_t Size = getContext().getTypeSize(RetTy); 3458 if (Size <= 32) { 3459 // Return in the smallest viable integer type. 3460 if (Size <= 8) 3461 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 3462 if (Size <= 16) 3463 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 3464 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 3465 } 3466 3467 return ABIArgInfo::getIndirect(0); 3468 } 3469 3470 /// isIllegalVector - check whether Ty is an illegal vector type. 3471 bool ARMABIInfo::isIllegalVectorType(QualType Ty) const { 3472 if (const VectorType *VT = Ty->getAs<VectorType>()) { 3473 // Check whether VT is legal. 3474 unsigned NumElements = VT->getNumElements(); 3475 uint64_t Size = getContext().getTypeSize(VT); 3476 // NumElements should be power of 2. 3477 if ((NumElements & (NumElements - 1)) != 0) 3478 return true; 3479 // Size should be greater than 32 bits. 3480 return Size <= 32; 3481 } 3482 return false; 3483 } 3484 3485 llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3486 CodeGenFunction &CGF) const { 3487 llvm::Type *BP = CGF.Int8PtrTy; 3488 llvm::Type *BPP = CGF.Int8PtrPtrTy; 3489 3490 CGBuilderTy &Builder = CGF.Builder; 3491 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 3492 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 3493 3494 uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8; 3495 uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8; 3496 bool IsIndirect = false; 3497 3498 // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for 3499 // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte. 3500 if (getABIKind() == ARMABIInfo::AAPCS_VFP || 3501 getABIKind() == ARMABIInfo::AAPCS) 3502 TyAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8); 3503 else 3504 TyAlign = 4; 3505 // Use indirect if size of the illegal vector is bigger than 16 bytes. 3506 if (isIllegalVectorType(Ty) && Size > 16) { 3507 IsIndirect = true; 3508 Size = 4; 3509 TyAlign = 4; 3510 } 3511 3512 // Handle address alignment for ABI alignment > 4 bytes. 3513 if (TyAlign > 4) { 3514 assert((TyAlign & (TyAlign - 1)) == 0 && 3515 "Alignment is not power of 2!"); 3516 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty); 3517 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1)); 3518 AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1))); 3519 Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align"); 3520 } 3521 3522 uint64_t Offset = 3523 llvm::RoundUpToAlignment(Size, 4); 3524 llvm::Value *NextAddr = 3525 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 3526 "ap.next"); 3527 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 3528 3529 if (IsIndirect) 3530 Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP)); 3531 else if (TyAlign < CGF.getContext().getTypeAlign(Ty) / 8) { 3532 // We can't directly cast ap.cur to pointer to a vector type, since ap.cur 3533 // may not be correctly aligned for the vector type. We create an aligned 3534 // temporary space and copy the content over from ap.cur to the temporary 3535 // space. This is necessary if the natural alignment of the type is greater 3536 // than the ABI alignment. 3537 llvm::Type *I8PtrTy = Builder.getInt8PtrTy(); 3538 CharUnits CharSize = getContext().getTypeSizeInChars(Ty); 3539 llvm::Value *AlignedTemp = CGF.CreateTempAlloca(CGF.ConvertType(Ty), 3540 "var.align"); 3541 llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy); 3542 llvm::Value *Src = Builder.CreateBitCast(Addr, I8PtrTy); 3543 Builder.CreateMemCpy(Dst, Src, 3544 llvm::ConstantInt::get(CGF.IntPtrTy, CharSize.getQuantity()), 3545 TyAlign, false); 3546 Addr = AlignedTemp; //The content is in aligned location. 3547 } 3548 llvm::Type *PTy = 3549 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 3550 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 3551 3552 return AddrTyped; 3553 } 3554 3555 namespace { 3556 3557 class NaClARMABIInfo : public ABIInfo { 3558 public: 3559 NaClARMABIInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind) 3560 : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, Kind) {} 3561 virtual void computeInfo(CGFunctionInfo &FI) const; 3562 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3563 CodeGenFunction &CGF) const; 3564 private: 3565 PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv. 3566 ARMABIInfo NInfo; // Used for everything else. 3567 }; 3568 3569 class NaClARMTargetCodeGenInfo : public TargetCodeGenInfo { 3570 public: 3571 NaClARMTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind) 3572 : TargetCodeGenInfo(new NaClARMABIInfo(CGT, Kind)) {} 3573 }; 3574 3575 } 3576 3577 void NaClARMABIInfo::computeInfo(CGFunctionInfo &FI) const { 3578 if (FI.getASTCallingConvention() == CC_PnaclCall) 3579 PInfo.computeInfo(FI); 3580 else 3581 static_cast<const ABIInfo&>(NInfo).computeInfo(FI); 3582 } 3583 3584 llvm::Value *NaClARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3585 CodeGenFunction &CGF) const { 3586 // Always use the native convention; calling pnacl-style varargs functions 3587 // is unsupported. 3588 return static_cast<const ABIInfo&>(NInfo).EmitVAArg(VAListAddr, Ty, CGF); 3589 } 3590 3591 //===----------------------------------------------------------------------===// 3592 // NVPTX ABI Implementation 3593 //===----------------------------------------------------------------------===// 3594 3595 namespace { 3596 3597 class NVPTXABIInfo : public ABIInfo { 3598 public: 3599 NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 3600 3601 ABIArgInfo classifyReturnType(QualType RetTy) const; 3602 ABIArgInfo classifyArgumentType(QualType Ty) const; 3603 3604 virtual void computeInfo(CGFunctionInfo &FI) const; 3605 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3606 CodeGenFunction &CFG) const; 3607 }; 3608 3609 class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo { 3610 public: 3611 NVPTXTargetCodeGenInfo(CodeGenTypes &CGT) 3612 : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {} 3613 3614 virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3615 CodeGen::CodeGenModule &M) const; 3616 }; 3617 3618 ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const { 3619 if (RetTy->isVoidType()) 3620 return ABIArgInfo::getIgnore(); 3621 if (isAggregateTypeForABI(RetTy)) 3622 return ABIArgInfo::getIndirect(0); 3623 return ABIArgInfo::getDirect(); 3624 } 3625 3626 ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const { 3627 if (isAggregateTypeForABI(Ty)) 3628 return ABIArgInfo::getIndirect(0); 3629 3630 return ABIArgInfo::getDirect(); 3631 } 3632 3633 void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const { 3634 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 3635 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 3636 it != ie; ++it) 3637 it->info = classifyArgumentType(it->type); 3638 3639 // Always honor user-specified calling convention. 3640 if (FI.getCallingConvention() != llvm::CallingConv::C) 3641 return; 3642 3643 // Calling convention as default by an ABI. 3644 // We're still using the PTX_Kernel/PTX_Device calling conventions here, 3645 // but we should switch to NVVM metadata later on. 3646 llvm::CallingConv::ID DefaultCC; 3647 const LangOptions &LangOpts = getContext().getLangOpts(); 3648 if (LangOpts.OpenCL || LangOpts.CUDA) { 3649 // If we are in OpenCL or CUDA mode, then default to device functions 3650 DefaultCC = llvm::CallingConv::PTX_Device; 3651 } else { 3652 // If we are in standard C/C++ mode, use the triple to decide on the default 3653 StringRef Env = 3654 getContext().getTargetInfo().getTriple().getEnvironmentName(); 3655 if (Env == "device") 3656 DefaultCC = llvm::CallingConv::PTX_Device; 3657 else 3658 DefaultCC = llvm::CallingConv::PTX_Kernel; 3659 } 3660 FI.setEffectiveCallingConvention(DefaultCC); 3661 3662 } 3663 3664 llvm::Value *NVPTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3665 CodeGenFunction &CFG) const { 3666 llvm_unreachable("NVPTX does not support varargs"); 3667 } 3668 3669 void NVPTXTargetCodeGenInfo:: 3670 SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3671 CodeGen::CodeGenModule &M) const{ 3672 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 3673 if (!FD) return; 3674 3675 llvm::Function *F = cast<llvm::Function>(GV); 3676 3677 // Perform special handling in OpenCL mode 3678 if (M.getLangOpts().OpenCL) { 3679 // Use OpenCL function attributes to set proper calling conventions 3680 // By default, all functions are device functions 3681 if (FD->hasAttr<OpenCLKernelAttr>()) { 3682 // OpenCL __kernel functions get a kernel calling convention 3683 F->setCallingConv(llvm::CallingConv::PTX_Kernel); 3684 // And kernel functions are not subject to inlining 3685 F->addFnAttr(llvm::Attribute::NoInline); 3686 } 3687 } 3688 3689 // Perform special handling in CUDA mode. 3690 if (M.getLangOpts().CUDA) { 3691 // CUDA __global__ functions get a kernel calling convention. Since 3692 // __global__ functions cannot be called from the device, we do not 3693 // need to set the noinline attribute. 3694 if (FD->getAttr<CUDAGlobalAttr>()) 3695 F->setCallingConv(llvm::CallingConv::PTX_Kernel); 3696 } 3697 } 3698 3699 } 3700 3701 //===----------------------------------------------------------------------===// 3702 // MBlaze ABI Implementation 3703 //===----------------------------------------------------------------------===// 3704 3705 namespace { 3706 3707 class MBlazeABIInfo : public ABIInfo { 3708 public: 3709 MBlazeABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 3710 3711 bool isPromotableIntegerType(QualType Ty) const; 3712 3713 ABIArgInfo classifyReturnType(QualType RetTy) const; 3714 ABIArgInfo classifyArgumentType(QualType RetTy) const; 3715 3716 virtual void computeInfo(CGFunctionInfo &FI) const { 3717 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 3718 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 3719 it != ie; ++it) 3720 it->info = classifyArgumentType(it->type); 3721 } 3722 3723 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3724 CodeGenFunction &CGF) const; 3725 }; 3726 3727 class MBlazeTargetCodeGenInfo : public TargetCodeGenInfo { 3728 public: 3729 MBlazeTargetCodeGenInfo(CodeGenTypes &CGT) 3730 : TargetCodeGenInfo(new MBlazeABIInfo(CGT)) {} 3731 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3732 CodeGen::CodeGenModule &M) const; 3733 }; 3734 3735 } 3736 3737 bool MBlazeABIInfo::isPromotableIntegerType(QualType Ty) const { 3738 // MBlaze ABI requires all 8 and 16 bit quantities to be extended. 3739 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 3740 switch (BT->getKind()) { 3741 case BuiltinType::Bool: 3742 case BuiltinType::Char_S: 3743 case BuiltinType::Char_U: 3744 case BuiltinType::SChar: 3745 case BuiltinType::UChar: 3746 case BuiltinType::Short: 3747 case BuiltinType::UShort: 3748 return true; 3749 default: 3750 return false; 3751 } 3752 return false; 3753 } 3754 3755 llvm::Value *MBlazeABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3756 CodeGenFunction &CGF) const { 3757 // FIXME: Implement 3758 return 0; 3759 } 3760 3761 3762 ABIArgInfo MBlazeABIInfo::classifyReturnType(QualType RetTy) const { 3763 if (RetTy->isVoidType()) 3764 return ABIArgInfo::getIgnore(); 3765 if (isAggregateTypeForABI(RetTy)) 3766 return ABIArgInfo::getIndirect(0); 3767 3768 return (isPromotableIntegerType(RetTy) ? 3769 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3770 } 3771 3772 ABIArgInfo MBlazeABIInfo::classifyArgumentType(QualType Ty) const { 3773 if (isAggregateTypeForABI(Ty)) 3774 return ABIArgInfo::getIndirect(0); 3775 3776 return (isPromotableIntegerType(Ty) ? 3777 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3778 } 3779 3780 void MBlazeTargetCodeGenInfo::SetTargetAttributes(const Decl *D, 3781 llvm::GlobalValue *GV, 3782 CodeGen::CodeGenModule &M) 3783 const { 3784 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 3785 if (!FD) return; 3786 3787 llvm::CallingConv::ID CC = llvm::CallingConv::C; 3788 if (FD->hasAttr<MBlazeInterruptHandlerAttr>()) 3789 CC = llvm::CallingConv::MBLAZE_INTR; 3790 else if (FD->hasAttr<MBlazeSaveVolatilesAttr>()) 3791 CC = llvm::CallingConv::MBLAZE_SVOL; 3792 3793 if (CC != llvm::CallingConv::C) { 3794 // Handle 'interrupt_handler' attribute: 3795 llvm::Function *F = cast<llvm::Function>(GV); 3796 3797 // Step 1: Set ISR calling convention. 3798 F->setCallingConv(CC); 3799 3800 // Step 2: Add attributes goodness. 3801 F->addFnAttr(llvm::Attribute::NoInline); 3802 } 3803 3804 // Step 3: Emit _interrupt_handler alias. 3805 if (CC == llvm::CallingConv::MBLAZE_INTR) 3806 new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage, 3807 "_interrupt_handler", GV, &M.getModule()); 3808 } 3809 3810 3811 //===----------------------------------------------------------------------===// 3812 // MSP430 ABI Implementation 3813 //===----------------------------------------------------------------------===// 3814 3815 namespace { 3816 3817 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo { 3818 public: 3819 MSP430TargetCodeGenInfo(CodeGenTypes &CGT) 3820 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 3821 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3822 CodeGen::CodeGenModule &M) const; 3823 }; 3824 3825 } 3826 3827 void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D, 3828 llvm::GlobalValue *GV, 3829 CodeGen::CodeGenModule &M) const { 3830 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 3831 if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) { 3832 // Handle 'interrupt' attribute: 3833 llvm::Function *F = cast<llvm::Function>(GV); 3834 3835 // Step 1: Set ISR calling convention. 3836 F->setCallingConv(llvm::CallingConv::MSP430_INTR); 3837 3838 // Step 2: Add attributes goodness. 3839 F->addFnAttr(llvm::Attribute::NoInline); 3840 3841 // Step 3: Emit ISR vector alias. 3842 unsigned Num = attr->getNumber() / 2; 3843 new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage, 3844 "__isr_" + Twine(Num), 3845 GV, &M.getModule()); 3846 } 3847 } 3848 } 3849 3850 //===----------------------------------------------------------------------===// 3851 // MIPS ABI Implementation. This works for both little-endian and 3852 // big-endian variants. 3853 //===----------------------------------------------------------------------===// 3854 3855 namespace { 3856 class MipsABIInfo : public ABIInfo { 3857 bool IsO32; 3858 unsigned MinABIStackAlignInBytes, StackAlignInBytes; 3859 void CoerceToIntArgs(uint64_t TySize, 3860 SmallVector<llvm::Type*, 8> &ArgList) const; 3861 llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const; 3862 llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const; 3863 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const; 3864 public: 3865 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) : 3866 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8), 3867 StackAlignInBytes(IsO32 ? 8 : 16) {} 3868 3869 ABIArgInfo classifyReturnType(QualType RetTy) const; 3870 ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const; 3871 virtual void computeInfo(CGFunctionInfo &FI) const; 3872 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3873 CodeGenFunction &CGF) const; 3874 }; 3875 3876 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo { 3877 unsigned SizeOfUnwindException; 3878 public: 3879 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32) 3880 : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)), 3881 SizeOfUnwindException(IsO32 ? 24 : 32) {} 3882 3883 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 3884 return 29; 3885 } 3886 3887 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3888 llvm::Value *Address) const; 3889 3890 unsigned getSizeOfUnwindException() const { 3891 return SizeOfUnwindException; 3892 } 3893 }; 3894 } 3895 3896 void MipsABIInfo::CoerceToIntArgs(uint64_t TySize, 3897 SmallVector<llvm::Type*, 8> &ArgList) const { 3898 llvm::IntegerType *IntTy = 3899 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8); 3900 3901 // Add (TySize / MinABIStackAlignInBytes) args of IntTy. 3902 for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N) 3903 ArgList.push_back(IntTy); 3904 3905 // If necessary, add one more integer type to ArgList. 3906 unsigned R = TySize % (MinABIStackAlignInBytes * 8); 3907 3908 if (R) 3909 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R)); 3910 } 3911 3912 // In N32/64, an aligned double precision floating point field is passed in 3913 // a register. 3914 llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const { 3915 SmallVector<llvm::Type*, 8> ArgList, IntArgList; 3916 3917 if (IsO32) { 3918 CoerceToIntArgs(TySize, ArgList); 3919 return llvm::StructType::get(getVMContext(), ArgList); 3920 } 3921 3922 if (Ty->isComplexType()) 3923 return CGT.ConvertType(Ty); 3924 3925 const RecordType *RT = Ty->getAs<RecordType>(); 3926 3927 // Unions/vectors are passed in integer registers. 3928 if (!RT || !RT->isStructureOrClassType()) { 3929 CoerceToIntArgs(TySize, ArgList); 3930 return llvm::StructType::get(getVMContext(), ArgList); 3931 } 3932 3933 const RecordDecl *RD = RT->getDecl(); 3934 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 3935 assert(!(TySize % 8) && "Size of structure must be multiple of 8."); 3936 3937 uint64_t LastOffset = 0; 3938 unsigned idx = 0; 3939 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64); 3940 3941 // Iterate over fields in the struct/class and check if there are any aligned 3942 // double fields. 3943 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 3944 i != e; ++i, ++idx) { 3945 const QualType Ty = i->getType(); 3946 const BuiltinType *BT = Ty->getAs<BuiltinType>(); 3947 3948 if (!BT || BT->getKind() != BuiltinType::Double) 3949 continue; 3950 3951 uint64_t Offset = Layout.getFieldOffset(idx); 3952 if (Offset % 64) // Ignore doubles that are not aligned. 3953 continue; 3954 3955 // Add ((Offset - LastOffset) / 64) args of type i64. 3956 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j) 3957 ArgList.push_back(I64); 3958 3959 // Add double type. 3960 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext())); 3961 LastOffset = Offset + 64; 3962 } 3963 3964 CoerceToIntArgs(TySize - LastOffset, IntArgList); 3965 ArgList.append(IntArgList.begin(), IntArgList.end()); 3966 3967 return llvm::StructType::get(getVMContext(), ArgList); 3968 } 3969 3970 llvm::Type *MipsABIInfo::getPaddingType(uint64_t Align, uint64_t Offset) const { 3971 assert((Offset % MinABIStackAlignInBytes) == 0); 3972 3973 if ((Align - 1) & Offset) 3974 return llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8); 3975 3976 return 0; 3977 } 3978 3979 ABIArgInfo 3980 MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const { 3981 uint64_t OrigOffset = Offset; 3982 uint64_t TySize = getContext().getTypeSize(Ty); 3983 uint64_t Align = getContext().getTypeAlign(Ty) / 8; 3984 3985 Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes), 3986 (uint64_t)StackAlignInBytes); 3987 Offset = llvm::RoundUpToAlignment(Offset, Align); 3988 Offset += llvm::RoundUpToAlignment(TySize, Align * 8) / 8; 3989 3990 if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) { 3991 // Ignore empty aggregates. 3992 if (TySize == 0) 3993 return ABIArgInfo::getIgnore(); 3994 3995 // Records with non trivial destructors/constructors should not be passed 3996 // by value. 3997 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) { 3998 Offset = OrigOffset + MinABIStackAlignInBytes; 3999 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 4000 } 4001 4002 // If we have reached here, aggregates are passed directly by coercing to 4003 // another structure type. Padding is inserted if the offset of the 4004 // aggregate is unaligned. 4005 return ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0, 4006 getPaddingType(Align, OrigOffset)); 4007 } 4008 4009 // Treat an enum type as its underlying type. 4010 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 4011 Ty = EnumTy->getDecl()->getIntegerType(); 4012 4013 if (Ty->isPromotableIntegerType()) 4014 return ABIArgInfo::getExtend(); 4015 4016 return ABIArgInfo::getDirect(0, 0, getPaddingType(Align, OrigOffset)); 4017 } 4018 4019 llvm::Type* 4020 MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const { 4021 const RecordType *RT = RetTy->getAs<RecordType>(); 4022 SmallVector<llvm::Type*, 8> RTList; 4023 4024 if (RT && RT->isStructureOrClassType()) { 4025 const RecordDecl *RD = RT->getDecl(); 4026 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 4027 unsigned FieldCnt = Layout.getFieldCount(); 4028 4029 // N32/64 returns struct/classes in floating point registers if the 4030 // following conditions are met: 4031 // 1. The size of the struct/class is no larger than 128-bit. 4032 // 2. The struct/class has one or two fields all of which are floating 4033 // point types. 4034 // 3. The offset of the first field is zero (this follows what gcc does). 4035 // 4036 // Any other composite results are returned in integer registers. 4037 // 4038 if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) { 4039 RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end(); 4040 for (; b != e; ++b) { 4041 const BuiltinType *BT = b->getType()->getAs<BuiltinType>(); 4042 4043 if (!BT || !BT->isFloatingPoint()) 4044 break; 4045 4046 RTList.push_back(CGT.ConvertType(b->getType())); 4047 } 4048 4049 if (b == e) 4050 return llvm::StructType::get(getVMContext(), RTList, 4051 RD->hasAttr<PackedAttr>()); 4052 4053 RTList.clear(); 4054 } 4055 } 4056 4057 CoerceToIntArgs(Size, RTList); 4058 return llvm::StructType::get(getVMContext(), RTList); 4059 } 4060 4061 ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const { 4062 uint64_t Size = getContext().getTypeSize(RetTy); 4063 4064 if (RetTy->isVoidType() || Size == 0) 4065 return ABIArgInfo::getIgnore(); 4066 4067 if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) { 4068 if (Size <= 128) { 4069 if (RetTy->isAnyComplexType()) 4070 return ABIArgInfo::getDirect(); 4071 4072 // O32 returns integer vectors in registers. 4073 if (IsO32 && RetTy->isVectorType() && !RetTy->hasFloatingRepresentation()) 4074 return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size)); 4075 4076 if (!IsO32 && !isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy)) 4077 return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size)); 4078 } 4079 4080 return ABIArgInfo::getIndirect(0); 4081 } 4082 4083 // Treat an enum type as its underlying type. 4084 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 4085 RetTy = EnumTy->getDecl()->getIntegerType(); 4086 4087 return (RetTy->isPromotableIntegerType() ? 4088 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 4089 } 4090 4091 void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const { 4092 ABIArgInfo &RetInfo = FI.getReturnInfo(); 4093 RetInfo = classifyReturnType(FI.getReturnType()); 4094 4095 // Check if a pointer to an aggregate is passed as a hidden argument. 4096 uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0; 4097 4098 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 4099 it != ie; ++it) 4100 it->info = classifyArgumentType(it->type, Offset); 4101 } 4102 4103 llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 4104 CodeGenFunction &CGF) const { 4105 llvm::Type *BP = CGF.Int8PtrTy; 4106 llvm::Type *BPP = CGF.Int8PtrPtrTy; 4107 4108 CGBuilderTy &Builder = CGF.Builder; 4109 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 4110 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 4111 int64_t TypeAlign = getContext().getTypeAlign(Ty) / 8; 4112 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 4113 llvm::Value *AddrTyped; 4114 unsigned PtrWidth = getContext().getTargetInfo().getPointerWidth(0); 4115 llvm::IntegerType *IntTy = (PtrWidth == 32) ? CGF.Int32Ty : CGF.Int64Ty; 4116 4117 if (TypeAlign > MinABIStackAlignInBytes) { 4118 llvm::Value *AddrAsInt = CGF.Builder.CreatePtrToInt(Addr, IntTy); 4119 llvm::Value *Inc = llvm::ConstantInt::get(IntTy, TypeAlign - 1); 4120 llvm::Value *Mask = llvm::ConstantInt::get(IntTy, -TypeAlign); 4121 llvm::Value *Add = CGF.Builder.CreateAdd(AddrAsInt, Inc); 4122 llvm::Value *And = CGF.Builder.CreateAnd(Add, Mask); 4123 AddrTyped = CGF.Builder.CreateIntToPtr(And, PTy); 4124 } 4125 else 4126 AddrTyped = Builder.CreateBitCast(Addr, PTy); 4127 4128 llvm::Value *AlignedAddr = Builder.CreateBitCast(AddrTyped, BP); 4129 TypeAlign = std::max((unsigned)TypeAlign, MinABIStackAlignInBytes); 4130 uint64_t Offset = 4131 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, TypeAlign); 4132 llvm::Value *NextAddr = 4133 Builder.CreateGEP(AlignedAddr, llvm::ConstantInt::get(IntTy, Offset), 4134 "ap.next"); 4135 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 4136 4137 return AddrTyped; 4138 } 4139 4140 bool 4141 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4142 llvm::Value *Address) const { 4143 // This information comes from gcc's implementation, which seems to 4144 // as canonical as it gets. 4145 4146 // Everything on MIPS is 4 bytes. Double-precision FP registers 4147 // are aliased to pairs of single-precision FP registers. 4148 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 4149 4150 // 0-31 are the general purpose registers, $0 - $31. 4151 // 32-63 are the floating-point registers, $f0 - $f31. 4152 // 64 and 65 are the multiply/divide registers, $hi and $lo. 4153 // 66 is the (notional, I think) register for signal-handler return. 4154 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65); 4155 4156 // 67-74 are the floating-point status registers, $fcc0 - $fcc7. 4157 // They are one bit wide and ignored here. 4158 4159 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31. 4160 // (coprocessor 1 is the FP unit) 4161 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31. 4162 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31. 4163 // 176-181 are the DSP accumulator registers. 4164 AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181); 4165 return false; 4166 } 4167 4168 //===----------------------------------------------------------------------===// 4169 // TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults. 4170 // Currently subclassed only to implement custom OpenCL C function attribute 4171 // handling. 4172 //===----------------------------------------------------------------------===// 4173 4174 namespace { 4175 4176 class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo { 4177 public: 4178 TCETargetCodeGenInfo(CodeGenTypes &CGT) 4179 : DefaultTargetCodeGenInfo(CGT) {} 4180 4181 virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 4182 CodeGen::CodeGenModule &M) const; 4183 }; 4184 4185 void TCETargetCodeGenInfo::SetTargetAttributes(const Decl *D, 4186 llvm::GlobalValue *GV, 4187 CodeGen::CodeGenModule &M) const { 4188 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 4189 if (!FD) return; 4190 4191 llvm::Function *F = cast<llvm::Function>(GV); 4192 4193 if (M.getLangOpts().OpenCL) { 4194 if (FD->hasAttr<OpenCLKernelAttr>()) { 4195 // OpenCL C Kernel functions are not subject to inlining 4196 F->addFnAttr(llvm::Attribute::NoInline); 4197 4198 if (FD->hasAttr<ReqdWorkGroupSizeAttr>()) { 4199 4200 // Convert the reqd_work_group_size() attributes to metadata. 4201 llvm::LLVMContext &Context = F->getContext(); 4202 llvm::NamedMDNode *OpenCLMetadata = 4203 M.getModule().getOrInsertNamedMetadata("opencl.kernel_wg_size_info"); 4204 4205 SmallVector<llvm::Value*, 5> Operands; 4206 Operands.push_back(F); 4207 4208 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty, 4209 llvm::APInt(32, 4210 FD->getAttr<ReqdWorkGroupSizeAttr>()->getXDim()))); 4211 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty, 4212 llvm::APInt(32, 4213 FD->getAttr<ReqdWorkGroupSizeAttr>()->getYDim()))); 4214 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty, 4215 llvm::APInt(32, 4216 FD->getAttr<ReqdWorkGroupSizeAttr>()->getZDim()))); 4217 4218 // Add a boolean constant operand for "required" (true) or "hint" (false) 4219 // for implementing the work_group_size_hint attr later. Currently 4220 // always true as the hint is not yet implemented. 4221 Operands.push_back(llvm::ConstantInt::getTrue(Context)); 4222 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands)); 4223 } 4224 } 4225 } 4226 } 4227 4228 } 4229 4230 //===----------------------------------------------------------------------===// 4231 // Hexagon ABI Implementation 4232 //===----------------------------------------------------------------------===// 4233 4234 namespace { 4235 4236 class HexagonABIInfo : public ABIInfo { 4237 4238 4239 public: 4240 HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 4241 4242 private: 4243 4244 ABIArgInfo classifyReturnType(QualType RetTy) const; 4245 ABIArgInfo classifyArgumentType(QualType RetTy) const; 4246 4247 virtual void computeInfo(CGFunctionInfo &FI) const; 4248 4249 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 4250 CodeGenFunction &CGF) const; 4251 }; 4252 4253 class HexagonTargetCodeGenInfo : public TargetCodeGenInfo { 4254 public: 4255 HexagonTargetCodeGenInfo(CodeGenTypes &CGT) 4256 :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {} 4257 4258 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 4259 return 29; 4260 } 4261 }; 4262 4263 } 4264 4265 void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const { 4266 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 4267 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 4268 it != ie; ++it) 4269 it->info = classifyArgumentType(it->type); 4270 } 4271 4272 ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const { 4273 if (!isAggregateTypeForABI(Ty)) { 4274 // Treat an enum type as its underlying type. 4275 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 4276 Ty = EnumTy->getDecl()->getIntegerType(); 4277 4278 return (Ty->isPromotableIntegerType() ? 4279 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 4280 } 4281 4282 // Ignore empty records. 4283 if (isEmptyRecord(getContext(), Ty, true)) 4284 return ABIArgInfo::getIgnore(); 4285 4286 // Structures with either a non-trivial destructor or a non-trivial 4287 // copy constructor are always indirect. 4288 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 4289 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 4290 4291 uint64_t Size = getContext().getTypeSize(Ty); 4292 if (Size > 64) 4293 return ABIArgInfo::getIndirect(0, /*ByVal=*/true); 4294 // Pass in the smallest viable integer type. 4295 else if (Size > 32) 4296 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext())); 4297 else if (Size > 16) 4298 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 4299 else if (Size > 8) 4300 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 4301 else 4302 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 4303 } 4304 4305 ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const { 4306 if (RetTy->isVoidType()) 4307 return ABIArgInfo::getIgnore(); 4308 4309 // Large vector types should be returned via memory. 4310 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64) 4311 return ABIArgInfo::getIndirect(0); 4312 4313 if (!isAggregateTypeForABI(RetTy)) { 4314 // Treat an enum type as its underlying type. 4315 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 4316 RetTy = EnumTy->getDecl()->getIntegerType(); 4317 4318 return (RetTy->isPromotableIntegerType() ? 4319 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 4320 } 4321 4322 // Structures with either a non-trivial destructor or a non-trivial 4323 // copy constructor are always indirect. 4324 if (isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy)) 4325 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 4326 4327 if (isEmptyRecord(getContext(), RetTy, true)) 4328 return ABIArgInfo::getIgnore(); 4329 4330 // Aggregates <= 8 bytes are returned in r0; other aggregates 4331 // are returned indirectly. 4332 uint64_t Size = getContext().getTypeSize(RetTy); 4333 if (Size <= 64) { 4334 // Return in the smallest viable integer type. 4335 if (Size <= 8) 4336 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 4337 if (Size <= 16) 4338 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 4339 if (Size <= 32) 4340 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 4341 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext())); 4342 } 4343 4344 return ABIArgInfo::getIndirect(0, /*ByVal=*/true); 4345 } 4346 4347 llvm::Value *HexagonABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 4348 CodeGenFunction &CGF) const { 4349 // FIXME: Need to handle alignment 4350 llvm::Type *BPP = CGF.Int8PtrPtrTy; 4351 4352 CGBuilderTy &Builder = CGF.Builder; 4353 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 4354 "ap"); 4355 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 4356 llvm::Type *PTy = 4357 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 4358 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 4359 4360 uint64_t Offset = 4361 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); 4362 llvm::Value *NextAddr = 4363 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 4364 "ap.next"); 4365 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 4366 4367 return AddrTyped; 4368 } 4369 4370 4371 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() { 4372 if (TheTargetCodeGenInfo) 4373 return *TheTargetCodeGenInfo; 4374 4375 const llvm::Triple &Triple = getContext().getTargetInfo().getTriple(); 4376 switch (Triple.getArch()) { 4377 default: 4378 return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types)); 4379 4380 case llvm::Triple::le32: 4381 return *(TheTargetCodeGenInfo = new PNaClTargetCodeGenInfo(Types)); 4382 case llvm::Triple::mips: 4383 case llvm::Triple::mipsel: 4384 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, true)); 4385 4386 case llvm::Triple::mips64: 4387 case llvm::Triple::mips64el: 4388 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, false)); 4389 4390 case llvm::Triple::arm: 4391 case llvm::Triple::thumb: 4392 { 4393 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS; 4394 if (strcmp(getContext().getTargetInfo().getABI(), "apcs-gnu") == 0) 4395 Kind = ARMABIInfo::APCS; 4396 else if (CodeGenOpts.FloatABI == "hard" || 4397 (CodeGenOpts.FloatABI != "soft" && Triple.getEnvironment()==llvm::Triple::GNUEABIHF)) 4398 Kind = ARMABIInfo::AAPCS_VFP; 4399 4400 switch (Triple.getOS()) { 4401 case llvm::Triple::NaCl: 4402 return *(TheTargetCodeGenInfo = 4403 new NaClARMTargetCodeGenInfo(Types, Kind)); 4404 default: 4405 return *(TheTargetCodeGenInfo = 4406 new ARMTargetCodeGenInfo(Types, Kind)); 4407 } 4408 } 4409 4410 case llvm::Triple::ppc: 4411 return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types)); 4412 case llvm::Triple::ppc64: 4413 if (Triple.isOSBinFormatELF()) 4414 return *(TheTargetCodeGenInfo = new PPC64_SVR4_TargetCodeGenInfo(Types)); 4415 else 4416 return *(TheTargetCodeGenInfo = new PPC64TargetCodeGenInfo(Types)); 4417 4418 case llvm::Triple::nvptx: 4419 case llvm::Triple::nvptx64: 4420 return *(TheTargetCodeGenInfo = new NVPTXTargetCodeGenInfo(Types)); 4421 4422 case llvm::Triple::mblaze: 4423 return *(TheTargetCodeGenInfo = new MBlazeTargetCodeGenInfo(Types)); 4424 4425 case llvm::Triple::msp430: 4426 return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types)); 4427 4428 case llvm::Triple::tce: 4429 return *(TheTargetCodeGenInfo = new TCETargetCodeGenInfo(Types)); 4430 4431 case llvm::Triple::x86: { 4432 bool DisableMMX = strcmp(getContext().getTargetInfo().getABI(), "no-mmx") == 0; 4433 4434 if (Triple.isOSDarwin()) 4435 return *(TheTargetCodeGenInfo = 4436 new X86_32TargetCodeGenInfo(Types, true, true, DisableMMX, false, 4437 CodeGenOpts.NumRegisterParameters)); 4438 4439 switch (Triple.getOS()) { 4440 case llvm::Triple::Cygwin: 4441 case llvm::Triple::MinGW32: 4442 case llvm::Triple::AuroraUX: 4443 case llvm::Triple::DragonFly: 4444 case llvm::Triple::FreeBSD: 4445 case llvm::Triple::OpenBSD: 4446 case llvm::Triple::Bitrig: 4447 return *(TheTargetCodeGenInfo = 4448 new X86_32TargetCodeGenInfo(Types, false, true, DisableMMX, 4449 false, 4450 CodeGenOpts.NumRegisterParameters)); 4451 4452 case llvm::Triple::Win32: 4453 return *(TheTargetCodeGenInfo = 4454 new X86_32TargetCodeGenInfo(Types, false, true, DisableMMX, true, 4455 CodeGenOpts.NumRegisterParameters)); 4456 4457 default: 4458 return *(TheTargetCodeGenInfo = 4459 new X86_32TargetCodeGenInfo(Types, false, false, DisableMMX, 4460 false, 4461 CodeGenOpts.NumRegisterParameters)); 4462 } 4463 } 4464 4465 case llvm::Triple::x86_64: { 4466 bool HasAVX = strcmp(getContext().getTargetInfo().getABI(), "avx") == 0; 4467 4468 switch (Triple.getOS()) { 4469 case llvm::Triple::Win32: 4470 case llvm::Triple::MinGW32: 4471 case llvm::Triple::Cygwin: 4472 return *(TheTargetCodeGenInfo = new WinX86_64TargetCodeGenInfo(Types)); 4473 case llvm::Triple::NaCl: 4474 return *(TheTargetCodeGenInfo = new NaClX86_64TargetCodeGenInfo(Types, HasAVX)); 4475 default: 4476 return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo(Types, 4477 HasAVX)); 4478 } 4479 } 4480 case llvm::Triple::hexagon: 4481 return *(TheTargetCodeGenInfo = new HexagonTargetCodeGenInfo(Types)); 4482 } 4483 } 4484