1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // These classes wrap the information about a call or function 11 // definition used to handle ABI compliancy. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "TargetInfo.h" 16 #include "ABIInfo.h" 17 #include "CGCXXABI.h" 18 #include "CodeGenFunction.h" 19 #include "clang/AST/RecordLayout.h" 20 #include "clang/CodeGen/CGFunctionInfo.h" 21 #include "clang/Frontend/CodeGenOptions.h" 22 #include "llvm/ADT/Triple.h" 23 #include "llvm/IR/DataLayout.h" 24 #include "llvm/IR/Type.h" 25 #include "llvm/Support/raw_ostream.h" 26 using namespace clang; 27 using namespace CodeGen; 28 29 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, 30 llvm::Value *Array, 31 llvm::Value *Value, 32 unsigned FirstIndex, 33 unsigned LastIndex) { 34 // Alternatively, we could emit this as a loop in the source. 35 for (unsigned I = FirstIndex; I <= LastIndex; ++I) { 36 llvm::Value *Cell = Builder.CreateConstInBoundsGEP1_32(Array, I); 37 Builder.CreateStore(Value, Cell); 38 } 39 } 40 41 static bool isAggregateTypeForABI(QualType T) { 42 return !CodeGenFunction::hasScalarEvaluationKind(T) || 43 T->isMemberFunctionPointerType(); 44 } 45 46 ABIInfo::~ABIInfo() {} 47 48 static bool isRecordReturnIndirect(const RecordType *RT, 49 CGCXXABI &CXXABI) { 50 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 51 if (!RD) 52 return false; 53 return CXXABI.isReturnTypeIndirect(RD); 54 } 55 56 57 static bool isRecordReturnIndirect(QualType T, CGCXXABI &CXXABI) { 58 const RecordType *RT = T->getAs<RecordType>(); 59 if (!RT) 60 return false; 61 return isRecordReturnIndirect(RT, CXXABI); 62 } 63 64 static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, 65 CGCXXABI &CXXABI) { 66 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 67 if (!RD) 68 return CGCXXABI::RAA_Default; 69 return CXXABI.getRecordArgABI(RD); 70 } 71 72 static CGCXXABI::RecordArgABI getRecordArgABI(QualType T, 73 CGCXXABI &CXXABI) { 74 const RecordType *RT = T->getAs<RecordType>(); 75 if (!RT) 76 return CGCXXABI::RAA_Default; 77 return getRecordArgABI(RT, CXXABI); 78 } 79 80 CGCXXABI &ABIInfo::getCXXABI() const { 81 return CGT.getCXXABI(); 82 } 83 84 ASTContext &ABIInfo::getContext() const { 85 return CGT.getContext(); 86 } 87 88 llvm::LLVMContext &ABIInfo::getVMContext() const { 89 return CGT.getLLVMContext(); 90 } 91 92 const llvm::DataLayout &ABIInfo::getDataLayout() const { 93 return CGT.getDataLayout(); 94 } 95 96 const TargetInfo &ABIInfo::getTarget() const { 97 return CGT.getTarget(); 98 } 99 100 void ABIArgInfo::dump() const { 101 raw_ostream &OS = llvm::errs(); 102 OS << "(ABIArgInfo Kind="; 103 switch (TheKind) { 104 case Direct: 105 OS << "Direct Type="; 106 if (llvm::Type *Ty = getCoerceToType()) 107 Ty->print(OS); 108 else 109 OS << "null"; 110 break; 111 case Extend: 112 OS << "Extend"; 113 break; 114 case Ignore: 115 OS << "Ignore"; 116 break; 117 case InAlloca: 118 OS << "InAlloca Offset=" << getInAllocaFieldIndex(); 119 break; 120 case Indirect: 121 OS << "Indirect Align=" << getIndirectAlign() 122 << " ByVal=" << getIndirectByVal() 123 << " Realign=" << getIndirectRealign(); 124 break; 125 case Expand: 126 OS << "Expand"; 127 break; 128 } 129 OS << ")\n"; 130 } 131 132 TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; } 133 134 // If someone can figure out a general rule for this, that would be great. 135 // It's probably just doomed to be platform-dependent, though. 136 unsigned TargetCodeGenInfo::getSizeOfUnwindException() const { 137 // Verified for: 138 // x86-64 FreeBSD, Linux, Darwin 139 // x86-32 FreeBSD, Linux, Darwin 140 // PowerPC Linux, Darwin 141 // ARM Darwin (*not* EABI) 142 // AArch64 Linux 143 return 32; 144 } 145 146 bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args, 147 const FunctionNoProtoType *fnType) const { 148 // The following conventions are known to require this to be false: 149 // x86_stdcall 150 // MIPS 151 // For everything else, we just prefer false unless we opt out. 152 return false; 153 } 154 155 void 156 TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib, 157 llvm::SmallString<24> &Opt) const { 158 // This assumes the user is passing a library name like "rt" instead of a 159 // filename like "librt.a/so", and that they don't care whether it's static or 160 // dynamic. 161 Opt = "-l"; 162 Opt += Lib; 163 } 164 165 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays); 166 167 /// isEmptyField - Return true iff a the field is "empty", that is it 168 /// is an unnamed bit-field or an (array of) empty record(s). 169 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD, 170 bool AllowArrays) { 171 if (FD->isUnnamedBitfield()) 172 return true; 173 174 QualType FT = FD->getType(); 175 176 // Constant arrays of empty records count as empty, strip them off. 177 // Constant arrays of zero length always count as empty. 178 if (AllowArrays) 179 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 180 if (AT->getSize() == 0) 181 return true; 182 FT = AT->getElementType(); 183 } 184 185 const RecordType *RT = FT->getAs<RecordType>(); 186 if (!RT) 187 return false; 188 189 // C++ record fields are never empty, at least in the Itanium ABI. 190 // 191 // FIXME: We should use a predicate for whether this behavior is true in the 192 // current ABI. 193 if (isa<CXXRecordDecl>(RT->getDecl())) 194 return false; 195 196 return isEmptyRecord(Context, FT, AllowArrays); 197 } 198 199 /// isEmptyRecord - Return true iff a structure contains only empty 200 /// fields. Note that a structure with a flexible array member is not 201 /// considered empty. 202 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) { 203 const RecordType *RT = T->getAs<RecordType>(); 204 if (!RT) 205 return 0; 206 const RecordDecl *RD = RT->getDecl(); 207 if (RD->hasFlexibleArrayMember()) 208 return false; 209 210 // If this is a C++ record, check the bases first. 211 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 212 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 213 e = CXXRD->bases_end(); i != e; ++i) 214 if (!isEmptyRecord(Context, i->getType(), true)) 215 return false; 216 217 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 218 i != e; ++i) 219 if (!isEmptyField(Context, *i, AllowArrays)) 220 return false; 221 return true; 222 } 223 224 /// isSingleElementStruct - Determine if a structure is a "single 225 /// element struct", i.e. it has exactly one non-empty field or 226 /// exactly one field which is itself a single element 227 /// struct. Structures with flexible array members are never 228 /// considered single element structs. 229 /// 230 /// \return The field declaration for the single non-empty field, if 231 /// it exists. 232 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) { 233 const RecordType *RT = T->getAsStructureType(); 234 if (!RT) 235 return 0; 236 237 const RecordDecl *RD = RT->getDecl(); 238 if (RD->hasFlexibleArrayMember()) 239 return 0; 240 241 const Type *Found = 0; 242 243 // If this is a C++ record, check the bases first. 244 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 245 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 246 e = CXXRD->bases_end(); i != e; ++i) { 247 // Ignore empty records. 248 if (isEmptyRecord(Context, i->getType(), true)) 249 continue; 250 251 // If we already found an element then this isn't a single-element struct. 252 if (Found) 253 return 0; 254 255 // If this is non-empty and not a single element struct, the composite 256 // cannot be a single element struct. 257 Found = isSingleElementStruct(i->getType(), Context); 258 if (!Found) 259 return 0; 260 } 261 } 262 263 // Check for single element. 264 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 265 i != e; ++i) { 266 const FieldDecl *FD = *i; 267 QualType FT = FD->getType(); 268 269 // Ignore empty fields. 270 if (isEmptyField(Context, FD, true)) 271 continue; 272 273 // If we already found an element then this isn't a single-element 274 // struct. 275 if (Found) 276 return 0; 277 278 // Treat single element arrays as the element. 279 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 280 if (AT->getSize().getZExtValue() != 1) 281 break; 282 FT = AT->getElementType(); 283 } 284 285 if (!isAggregateTypeForABI(FT)) { 286 Found = FT.getTypePtr(); 287 } else { 288 Found = isSingleElementStruct(FT, Context); 289 if (!Found) 290 return 0; 291 } 292 } 293 294 // We don't consider a struct a single-element struct if it has 295 // padding beyond the element type. 296 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T)) 297 return 0; 298 299 return Found; 300 } 301 302 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) { 303 // Treat complex types as the element type. 304 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) 305 Ty = CTy->getElementType(); 306 307 // Check for a type which we know has a simple scalar argument-passing 308 // convention without any padding. (We're specifically looking for 32 309 // and 64-bit integer and integer-equivalents, float, and double.) 310 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() && 311 !Ty->isEnumeralType() && !Ty->isBlockPointerType()) 312 return false; 313 314 uint64_t Size = Context.getTypeSize(Ty); 315 return Size == 32 || Size == 64; 316 } 317 318 /// canExpandIndirectArgument - Test whether an argument type which is to be 319 /// passed indirectly (on the stack) would have the equivalent layout if it was 320 /// expanded into separate arguments. If so, we prefer to do the latter to avoid 321 /// inhibiting optimizations. 322 /// 323 // FIXME: This predicate is missing many cases, currently it just follows 324 // llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We 325 // should probably make this smarter, or better yet make the LLVM backend 326 // capable of handling it. 327 static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) { 328 // We can only expand structure types. 329 const RecordType *RT = Ty->getAs<RecordType>(); 330 if (!RT) 331 return false; 332 333 // We can only expand (C) structures. 334 // 335 // FIXME: This needs to be generalized to handle classes as well. 336 const RecordDecl *RD = RT->getDecl(); 337 if (!RD->isStruct() || isa<CXXRecordDecl>(RD)) 338 return false; 339 340 uint64_t Size = 0; 341 342 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 343 i != e; ++i) { 344 const FieldDecl *FD = *i; 345 346 if (!is32Or64BitBasicType(FD->getType(), Context)) 347 return false; 348 349 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know 350 // how to expand them yet, and the predicate for telling if a bitfield still 351 // counts as "basic" is more complicated than what we were doing previously. 352 if (FD->isBitField()) 353 return false; 354 355 Size += Context.getTypeSize(FD->getType()); 356 } 357 358 // Make sure there are not any holes in the struct. 359 if (Size != Context.getTypeSize(Ty)) 360 return false; 361 362 return true; 363 } 364 365 namespace { 366 /// DefaultABIInfo - The default implementation for ABI specific 367 /// details. This implementation provides information which results in 368 /// self-consistent and sensible LLVM IR generation, but does not 369 /// conform to any particular ABI. 370 class DefaultABIInfo : public ABIInfo { 371 public: 372 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 373 374 ABIArgInfo classifyReturnType(QualType RetTy) const; 375 ABIArgInfo classifyArgumentType(QualType RetTy) const; 376 377 virtual void computeInfo(CGFunctionInfo &FI) const { 378 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 379 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 380 it != ie; ++it) 381 it->info = classifyArgumentType(it->type); 382 } 383 384 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 385 CodeGenFunction &CGF) const; 386 }; 387 388 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo { 389 public: 390 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 391 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 392 }; 393 394 llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 395 CodeGenFunction &CGF) const { 396 return 0; 397 } 398 399 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const { 400 if (isAggregateTypeForABI(Ty)) { 401 // Records with non-trivial destructors/constructors should not be passed 402 // by value. 403 if (isRecordReturnIndirect(Ty, getCXXABI())) 404 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 405 406 return ABIArgInfo::getIndirect(0); 407 } 408 409 // Treat an enum type as its underlying type. 410 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 411 Ty = EnumTy->getDecl()->getIntegerType(); 412 413 return (Ty->isPromotableIntegerType() ? 414 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 415 } 416 417 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const { 418 if (RetTy->isVoidType()) 419 return ABIArgInfo::getIgnore(); 420 421 if (isAggregateTypeForABI(RetTy)) 422 return ABIArgInfo::getIndirect(0); 423 424 // Treat an enum type as its underlying type. 425 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 426 RetTy = EnumTy->getDecl()->getIntegerType(); 427 428 return (RetTy->isPromotableIntegerType() ? 429 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 430 } 431 432 //===----------------------------------------------------------------------===// 433 // le32/PNaCl bitcode ABI Implementation 434 // 435 // This is a simplified version of the x86_32 ABI. Arguments and return values 436 // are always passed on the stack. 437 //===----------------------------------------------------------------------===// 438 439 class PNaClABIInfo : public ABIInfo { 440 public: 441 PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 442 443 ABIArgInfo classifyReturnType(QualType RetTy) const; 444 ABIArgInfo classifyArgumentType(QualType RetTy) const; 445 446 virtual void computeInfo(CGFunctionInfo &FI) const; 447 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 448 CodeGenFunction &CGF) const; 449 }; 450 451 class PNaClTargetCodeGenInfo : public TargetCodeGenInfo { 452 public: 453 PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 454 : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {} 455 }; 456 457 void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const { 458 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 459 460 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 461 it != ie; ++it) 462 it->info = classifyArgumentType(it->type); 463 } 464 465 llvm::Value *PNaClABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 466 CodeGenFunction &CGF) const { 467 return 0; 468 } 469 470 /// \brief Classify argument of given type \p Ty. 471 ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const { 472 if (isAggregateTypeForABI(Ty)) { 473 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 474 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); 475 return ABIArgInfo::getIndirect(0); 476 } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) { 477 // Treat an enum type as its underlying type. 478 Ty = EnumTy->getDecl()->getIntegerType(); 479 } else if (Ty->isFloatingType()) { 480 // Floating-point types don't go inreg. 481 return ABIArgInfo::getDirect(); 482 } 483 484 return (Ty->isPromotableIntegerType() ? 485 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 486 } 487 488 ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const { 489 if (RetTy->isVoidType()) 490 return ABIArgInfo::getIgnore(); 491 492 // In the PNaCl ABI we always return records/structures on the stack. 493 if (isAggregateTypeForABI(RetTy)) 494 return ABIArgInfo::getIndirect(0); 495 496 // Treat an enum type as its underlying type. 497 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 498 RetTy = EnumTy->getDecl()->getIntegerType(); 499 500 return (RetTy->isPromotableIntegerType() ? 501 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 502 } 503 504 /// IsX86_MMXType - Return true if this is an MMX type. 505 bool IsX86_MMXType(llvm::Type *IRType) { 506 // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>. 507 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 && 508 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() && 509 IRType->getScalarSizeInBits() != 64; 510 } 511 512 static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 513 StringRef Constraint, 514 llvm::Type* Ty) { 515 if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy()) { 516 if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) { 517 // Invalid MMX constraint 518 return 0; 519 } 520 521 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext()); 522 } 523 524 // No operation needed 525 return Ty; 526 } 527 528 //===----------------------------------------------------------------------===// 529 // X86-32 ABI Implementation 530 //===----------------------------------------------------------------------===// 531 532 /// \brief Similar to llvm::CCState, but for Clang. 533 struct CCState { 534 CCState(unsigned CC) : CC(CC), FreeRegs(0) {} 535 536 unsigned CC; 537 unsigned FreeRegs; 538 unsigned StackOffset; 539 bool UseInAlloca; 540 }; 541 542 /// X86_32ABIInfo - The X86-32 ABI information. 543 class X86_32ABIInfo : public ABIInfo { 544 enum Class { 545 Integer, 546 Float 547 }; 548 549 static const unsigned MinABIStackAlignInBytes = 4; 550 551 bool IsDarwinVectorABI; 552 bool IsSmallStructInRegABI; 553 bool IsWin32StructABI; 554 unsigned DefaultNumRegisterParameters; 555 556 static bool isRegisterSize(unsigned Size) { 557 return (Size == 8 || Size == 16 || Size == 32 || Size == 64); 558 } 559 560 bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context, 561 bool IsInstanceMethod) const; 562 563 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 564 /// such that the argument will be passed in memory. 565 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const; 566 567 ABIArgInfo getIndirectReturnResult(CCState &State) const; 568 569 /// \brief Return the alignment to use for the given type on the stack. 570 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const; 571 572 Class classify(QualType Ty) const; 573 ABIArgInfo classifyReturnType(QualType RetTy, CCState &State, 574 bool IsInstanceMethod) const; 575 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const; 576 bool shouldUseInReg(QualType Ty, CCState &State, bool &NeedsPadding) const; 577 578 /// \brief Rewrite the function info so that all memory arguments use 579 /// inalloca. 580 void rewriteWithInAlloca(CGFunctionInfo &FI) const; 581 582 void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields, 583 unsigned &StackOffset, ABIArgInfo &Info, 584 QualType Type) const; 585 586 public: 587 588 virtual void computeInfo(CGFunctionInfo &FI) const; 589 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 590 CodeGenFunction &CGF) const; 591 592 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool w, 593 unsigned r) 594 : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p), 595 IsWin32StructABI(w), DefaultNumRegisterParameters(r) {} 596 }; 597 598 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo { 599 public: 600 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, 601 bool d, bool p, bool w, unsigned r) 602 :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, w, r)) {} 603 604 static bool isStructReturnInRegABI( 605 const llvm::Triple &Triple, const CodeGenOptions &Opts); 606 607 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 608 CodeGen::CodeGenModule &CGM) const; 609 610 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 611 // Darwin uses different dwarf register numbers for EH. 612 if (CGM.getTarget().getTriple().isOSDarwin()) return 5; 613 return 4; 614 } 615 616 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 617 llvm::Value *Address) const; 618 619 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 620 StringRef Constraint, 621 llvm::Type* Ty) const { 622 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 623 } 624 625 llvm::Constant *getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const { 626 unsigned Sig = (0xeb << 0) | // jmp rel8 627 (0x06 << 8) | // .+0x08 628 ('F' << 16) | 629 ('T' << 24); 630 return llvm::ConstantInt::get(CGM.Int32Ty, Sig); 631 } 632 633 }; 634 635 } 636 637 /// shouldReturnTypeInRegister - Determine if the given type should be 638 /// passed in a register (for the Darwin ABI). 639 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty, ASTContext &Context, 640 bool IsInstanceMethod) const { 641 uint64_t Size = Context.getTypeSize(Ty); 642 643 // Type must be register sized. 644 if (!isRegisterSize(Size)) 645 return false; 646 647 if (Ty->isVectorType()) { 648 // 64- and 128- bit vectors inside structures are not returned in 649 // registers. 650 if (Size == 64 || Size == 128) 651 return false; 652 653 return true; 654 } 655 656 // If this is a builtin, pointer, enum, complex type, member pointer, or 657 // member function pointer it is ok. 658 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() || 659 Ty->isAnyComplexType() || Ty->isEnumeralType() || 660 Ty->isBlockPointerType() || Ty->isMemberPointerType()) 661 return true; 662 663 // Arrays are treated like records. 664 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) 665 return shouldReturnTypeInRegister(AT->getElementType(), Context, 666 IsInstanceMethod); 667 668 // Otherwise, it must be a record type. 669 const RecordType *RT = Ty->getAs<RecordType>(); 670 if (!RT) return false; 671 672 // FIXME: Traverse bases here too. 673 674 // For thiscall conventions, structures will never be returned in 675 // a register. This is for compatibility with the MSVC ABI 676 if (IsWin32StructABI && IsInstanceMethod && RT->isStructureType()) 677 return false; 678 679 // Structure types are passed in register if all fields would be 680 // passed in a register. 681 for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(), 682 e = RT->getDecl()->field_end(); i != e; ++i) { 683 const FieldDecl *FD = *i; 684 685 // Empty fields are ignored. 686 if (isEmptyField(Context, FD, true)) 687 continue; 688 689 // Check fields recursively. 690 if (!shouldReturnTypeInRegister(FD->getType(), Context, IsInstanceMethod)) 691 return false; 692 } 693 return true; 694 } 695 696 ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(CCState &State) const { 697 // If the return value is indirect, then the hidden argument is consuming one 698 // integer register. 699 if (State.FreeRegs) { 700 --State.FreeRegs; 701 return ABIArgInfo::getIndirectInReg(/*Align=*/0, /*ByVal=*/false); 702 } 703 return ABIArgInfo::getIndirect(/*Align=*/0, /*ByVal=*/false); 704 } 705 706 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, CCState &State, 707 bool IsInstanceMethod) const { 708 if (RetTy->isVoidType()) 709 return ABIArgInfo::getIgnore(); 710 711 if (const VectorType *VT = RetTy->getAs<VectorType>()) { 712 // On Darwin, some vectors are returned in registers. 713 if (IsDarwinVectorABI) { 714 uint64_t Size = getContext().getTypeSize(RetTy); 715 716 // 128-bit vectors are a special case; they are returned in 717 // registers and we need to make sure to pick a type the LLVM 718 // backend will like. 719 if (Size == 128) 720 return ABIArgInfo::getDirect(llvm::VectorType::get( 721 llvm::Type::getInt64Ty(getVMContext()), 2)); 722 723 // Always return in register if it fits in a general purpose 724 // register, or if it is 64 bits and has a single element. 725 if ((Size == 8 || Size == 16 || Size == 32) || 726 (Size == 64 && VT->getNumElements() == 1)) 727 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 728 Size)); 729 730 return getIndirectReturnResult(State); 731 } 732 733 return ABIArgInfo::getDirect(); 734 } 735 736 if (isAggregateTypeForABI(RetTy)) { 737 if (const RecordType *RT = RetTy->getAs<RecordType>()) { 738 if (isRecordReturnIndirect(RT, getCXXABI())) 739 return getIndirectReturnResult(State); 740 741 // Structures with flexible arrays are always indirect. 742 if (RT->getDecl()->hasFlexibleArrayMember()) 743 return getIndirectReturnResult(State); 744 } 745 746 // If specified, structs and unions are always indirect. 747 if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType()) 748 return getIndirectReturnResult(State); 749 750 // Small structures which are register sized are generally returned 751 // in a register. 752 if (shouldReturnTypeInRegister(RetTy, getContext(), IsInstanceMethod)) { 753 uint64_t Size = getContext().getTypeSize(RetTy); 754 755 // As a special-case, if the struct is a "single-element" struct, and 756 // the field is of type "float" or "double", return it in a 757 // floating-point register. (MSVC does not apply this special case.) 758 // We apply a similar transformation for pointer types to improve the 759 // quality of the generated IR. 760 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) 761 if ((!IsWin32StructABI && SeltTy->isRealFloatingType()) 762 || SeltTy->hasPointerRepresentation()) 763 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 764 765 // FIXME: We should be able to narrow this integer in cases with dead 766 // padding. 767 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size)); 768 } 769 770 return getIndirectReturnResult(State); 771 } 772 773 // Treat an enum type as its underlying type. 774 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 775 RetTy = EnumTy->getDecl()->getIntegerType(); 776 777 return (RetTy->isPromotableIntegerType() ? 778 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 779 } 780 781 static bool isSSEVectorType(ASTContext &Context, QualType Ty) { 782 return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128; 783 } 784 785 static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) { 786 const RecordType *RT = Ty->getAs<RecordType>(); 787 if (!RT) 788 return 0; 789 const RecordDecl *RD = RT->getDecl(); 790 791 // If this is a C++ record, check the bases first. 792 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 793 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 794 e = CXXRD->bases_end(); i != e; ++i) 795 if (!isRecordWithSSEVectorType(Context, i->getType())) 796 return false; 797 798 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 799 i != e; ++i) { 800 QualType FT = i->getType(); 801 802 if (isSSEVectorType(Context, FT)) 803 return true; 804 805 if (isRecordWithSSEVectorType(Context, FT)) 806 return true; 807 } 808 809 return false; 810 } 811 812 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty, 813 unsigned Align) const { 814 // Otherwise, if the alignment is less than or equal to the minimum ABI 815 // alignment, just use the default; the backend will handle this. 816 if (Align <= MinABIStackAlignInBytes) 817 return 0; // Use default alignment. 818 819 // On non-Darwin, the stack type alignment is always 4. 820 if (!IsDarwinVectorABI) { 821 // Set explicit alignment, since we may need to realign the top. 822 return MinABIStackAlignInBytes; 823 } 824 825 // Otherwise, if the type contains an SSE vector type, the alignment is 16. 826 if (Align >= 16 && (isSSEVectorType(getContext(), Ty) || 827 isRecordWithSSEVectorType(getContext(), Ty))) 828 return 16; 829 830 return MinABIStackAlignInBytes; 831 } 832 833 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal, 834 CCState &State) const { 835 if (!ByVal) { 836 if (State.FreeRegs) { 837 --State.FreeRegs; // Non-byval indirects just use one pointer. 838 return ABIArgInfo::getIndirectInReg(0, false); 839 } 840 return ABIArgInfo::getIndirect(0, false); 841 } 842 843 // Compute the byval alignment. 844 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; 845 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign); 846 if (StackAlign == 0) 847 return ABIArgInfo::getIndirect(4, /*ByVal=*/true); 848 849 // If the stack alignment is less than the type alignment, realign the 850 // argument. 851 bool Realign = TypeAlign > StackAlign; 852 return ABIArgInfo::getIndirect(StackAlign, /*ByVal=*/true, Realign); 853 } 854 855 X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const { 856 const Type *T = isSingleElementStruct(Ty, getContext()); 857 if (!T) 858 T = Ty.getTypePtr(); 859 860 if (const BuiltinType *BT = T->getAs<BuiltinType>()) { 861 BuiltinType::Kind K = BT->getKind(); 862 if (K == BuiltinType::Float || K == BuiltinType::Double) 863 return Float; 864 } 865 return Integer; 866 } 867 868 bool X86_32ABIInfo::shouldUseInReg(QualType Ty, CCState &State, 869 bool &NeedsPadding) const { 870 NeedsPadding = false; 871 Class C = classify(Ty); 872 if (C == Float) 873 return false; 874 875 unsigned Size = getContext().getTypeSize(Ty); 876 unsigned SizeInRegs = (Size + 31) / 32; 877 878 if (SizeInRegs == 0) 879 return false; 880 881 if (SizeInRegs > State.FreeRegs) { 882 State.FreeRegs = 0; 883 return false; 884 } 885 886 State.FreeRegs -= SizeInRegs; 887 888 if (State.CC == llvm::CallingConv::X86_FastCall) { 889 if (Size > 32) 890 return false; 891 892 if (Ty->isIntegralOrEnumerationType()) 893 return true; 894 895 if (Ty->isPointerType()) 896 return true; 897 898 if (Ty->isReferenceType()) 899 return true; 900 901 if (State.FreeRegs) 902 NeedsPadding = true; 903 904 return false; 905 } 906 907 return true; 908 } 909 910 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, 911 CCState &State) const { 912 // FIXME: Set alignment on indirect arguments. 913 if (isAggregateTypeForABI(Ty)) { 914 if (const RecordType *RT = Ty->getAs<RecordType>()) { 915 // Check with the C++ ABI first. 916 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()); 917 if (RAA == CGCXXABI::RAA_Indirect) { 918 return getIndirectResult(Ty, false, State); 919 } else if (RAA == CGCXXABI::RAA_DirectInMemory) { 920 // The field index doesn't matter, we'll fix it up later. 921 return ABIArgInfo::getInAlloca(/*FieldIndex=*/0); 922 } 923 924 // Structs are always byval on win32, regardless of what they contain. 925 if (IsWin32StructABI) 926 return getIndirectResult(Ty, true, State); 927 928 // Structures with flexible arrays are always indirect. 929 if (RT->getDecl()->hasFlexibleArrayMember()) 930 return getIndirectResult(Ty, true, State); 931 } 932 933 // Ignore empty structs/unions. 934 if (isEmptyRecord(getContext(), Ty, true)) 935 return ABIArgInfo::getIgnore(); 936 937 llvm::LLVMContext &LLVMContext = getVMContext(); 938 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext); 939 bool NeedsPadding; 940 if (shouldUseInReg(Ty, State, NeedsPadding)) { 941 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32; 942 SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32); 943 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements); 944 return ABIArgInfo::getDirectInReg(Result); 945 } 946 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : 0; 947 948 // Expand small (<= 128-bit) record types when we know that the stack layout 949 // of those arguments will match the struct. This is important because the 950 // LLVM backend isn't smart enough to remove byval, which inhibits many 951 // optimizations. 952 if (getContext().getTypeSize(Ty) <= 4*32 && 953 canExpandIndirectArgument(Ty, getContext())) 954 return ABIArgInfo::getExpandWithPadding( 955 State.CC == llvm::CallingConv::X86_FastCall, PaddingType); 956 957 return getIndirectResult(Ty, true, State); 958 } 959 960 if (const VectorType *VT = Ty->getAs<VectorType>()) { 961 // On Darwin, some vectors are passed in memory, we handle this by passing 962 // it as an i8/i16/i32/i64. 963 if (IsDarwinVectorABI) { 964 uint64_t Size = getContext().getTypeSize(Ty); 965 if ((Size == 8 || Size == 16 || Size == 32) || 966 (Size == 64 && VT->getNumElements() == 1)) 967 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 968 Size)); 969 } 970 971 if (IsX86_MMXType(CGT.ConvertType(Ty))) 972 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64)); 973 974 return ABIArgInfo::getDirect(); 975 } 976 977 978 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 979 Ty = EnumTy->getDecl()->getIntegerType(); 980 981 bool NeedsPadding; 982 bool InReg = shouldUseInReg(Ty, State, NeedsPadding); 983 984 if (Ty->isPromotableIntegerType()) { 985 if (InReg) 986 return ABIArgInfo::getExtendInReg(); 987 return ABIArgInfo::getExtend(); 988 } 989 if (InReg) 990 return ABIArgInfo::getDirectInReg(); 991 return ABIArgInfo::getDirect(); 992 } 993 994 void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const { 995 CCState State(FI.getCallingConvention()); 996 if (State.CC == llvm::CallingConv::X86_FastCall) 997 State.FreeRegs = 2; 998 else if (FI.getHasRegParm()) 999 State.FreeRegs = FI.getRegParm(); 1000 else 1001 State.FreeRegs = DefaultNumRegisterParameters; 1002 1003 FI.getReturnInfo() = 1004 classifyReturnType(FI.getReturnType(), State, FI.isInstanceMethod()); 1005 1006 // On win32, use the x86_cdeclmethodcc convention for cdecl methods that use 1007 // sret. This convention swaps the order of the first two parameters behind 1008 // the scenes to match MSVC. 1009 if (IsWin32StructABI && FI.isInstanceMethod() && 1010 FI.getCallingConvention() == llvm::CallingConv::C && 1011 FI.getReturnInfo().isIndirect()) 1012 FI.setEffectiveCallingConvention(llvm::CallingConv::X86_CDeclMethod); 1013 1014 bool UsedInAlloca = false; 1015 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 1016 it != ie; ++it) { 1017 it->info = classifyArgumentType(it->type, State); 1018 UsedInAlloca |= (it->info.getKind() == ABIArgInfo::InAlloca); 1019 } 1020 1021 // If we needed to use inalloca for any argument, do a second pass and rewrite 1022 // all the memory arguments to use inalloca. 1023 if (UsedInAlloca) 1024 rewriteWithInAlloca(FI); 1025 } 1026 1027 void 1028 X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields, 1029 unsigned &StackOffset, 1030 ABIArgInfo &Info, QualType Type) const { 1031 // Insert padding bytes to respect alignment. For x86_32, each argument is 4 1032 // byte aligned. 1033 unsigned Align = 4U; 1034 if (Info.getKind() == ABIArgInfo::Indirect && Info.getIndirectByVal()) 1035 Align = std::max(Align, Info.getIndirectAlign()); 1036 if (StackOffset & (Align - 1)) { 1037 unsigned OldOffset = StackOffset; 1038 StackOffset = llvm::RoundUpToAlignment(StackOffset, Align); 1039 unsigned NumBytes = StackOffset - OldOffset; 1040 assert(NumBytes); 1041 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext()); 1042 Ty = llvm::ArrayType::get(Ty, NumBytes); 1043 FrameFields.push_back(Ty); 1044 } 1045 1046 Info = ABIArgInfo::getInAlloca(FrameFields.size()); 1047 FrameFields.push_back(CGT.ConvertTypeForMem(Type)); 1048 StackOffset += getContext().getTypeSizeInChars(Type).getQuantity(); 1049 } 1050 1051 void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const { 1052 assert(IsWin32StructABI && "inalloca only supported on win32"); 1053 1054 // Build a packed struct type for all of the arguments in memory. 1055 SmallVector<llvm::Type *, 6> FrameFields; 1056 1057 unsigned StackOffset = 0; 1058 1059 // Put the sret parameter into the inalloca struct if it's in memory. 1060 ABIArgInfo &Ret = FI.getReturnInfo(); 1061 if (Ret.isIndirect() && !Ret.getInReg()) { 1062 CanQualType PtrTy = getContext().getPointerType(FI.getReturnType()); 1063 addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy); 1064 } 1065 1066 // Skip the 'this' parameter in ecx. 1067 CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end(); 1068 if (FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall) 1069 ++I; 1070 1071 // Put arguments passed in memory into the struct. 1072 for (; I != E; ++I) { 1073 1074 // Leave ignored and inreg arguments alone. 1075 switch (I->info.getKind()) { 1076 case ABIArgInfo::Indirect: 1077 assert(I->info.getIndirectByVal()); 1078 break; 1079 case ABIArgInfo::Ignore: 1080 continue; 1081 case ABIArgInfo::Direct: 1082 case ABIArgInfo::Extend: 1083 if (I->info.getInReg()) 1084 continue; 1085 break; 1086 default: 1087 break; 1088 } 1089 1090 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type); 1091 } 1092 1093 FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields, 1094 /*isPacked=*/true)); 1095 } 1096 1097 llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1098 CodeGenFunction &CGF) const { 1099 llvm::Type *BPP = CGF.Int8PtrPtrTy; 1100 1101 CGBuilderTy &Builder = CGF.Builder; 1102 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 1103 "ap"); 1104 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 1105 1106 // Compute if the address needs to be aligned 1107 unsigned Align = CGF.getContext().getTypeAlignInChars(Ty).getQuantity(); 1108 Align = getTypeStackAlignInBytes(Ty, Align); 1109 Align = std::max(Align, 4U); 1110 if (Align > 4) { 1111 // addr = (addr + align - 1) & -align; 1112 llvm::Value *Offset = 1113 llvm::ConstantInt::get(CGF.Int32Ty, Align - 1); 1114 Addr = CGF.Builder.CreateGEP(Addr, Offset); 1115 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(Addr, 1116 CGF.Int32Ty); 1117 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -Align); 1118 Addr = CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), 1119 Addr->getType(), 1120 "ap.cur.aligned"); 1121 } 1122 1123 llvm::Type *PTy = 1124 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 1125 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 1126 1127 uint64_t Offset = 1128 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, Align); 1129 llvm::Value *NextAddr = 1130 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 1131 "ap.next"); 1132 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 1133 1134 return AddrTyped; 1135 } 1136 1137 void X86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D, 1138 llvm::GlobalValue *GV, 1139 CodeGen::CodeGenModule &CGM) const { 1140 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 1141 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { 1142 // Get the LLVM function. 1143 llvm::Function *Fn = cast<llvm::Function>(GV); 1144 1145 // Now add the 'alignstack' attribute with a value of 16. 1146 llvm::AttrBuilder B; 1147 B.addStackAlignmentAttr(16); 1148 Fn->addAttributes(llvm::AttributeSet::FunctionIndex, 1149 llvm::AttributeSet::get(CGM.getLLVMContext(), 1150 llvm::AttributeSet::FunctionIndex, 1151 B)); 1152 } 1153 } 1154 } 1155 1156 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable( 1157 CodeGen::CodeGenFunction &CGF, 1158 llvm::Value *Address) const { 1159 CodeGen::CGBuilderTy &Builder = CGF.Builder; 1160 1161 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 1162 1163 // 0-7 are the eight integer registers; the order is different 1164 // on Darwin (for EH), but the range is the same. 1165 // 8 is %eip. 1166 AssignToArrayRange(Builder, Address, Four8, 0, 8); 1167 1168 if (CGF.CGM.getTarget().getTriple().isOSDarwin()) { 1169 // 12-16 are st(0..4). Not sure why we stop at 4. 1170 // These have size 16, which is sizeof(long double) on 1171 // platforms with 8-byte alignment for that type. 1172 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16); 1173 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16); 1174 1175 } else { 1176 // 9 is %eflags, which doesn't get a size on Darwin for some 1177 // reason. 1178 Builder.CreateStore(Four8, Builder.CreateConstInBoundsGEP1_32(Address, 9)); 1179 1180 // 11-16 are st(0..5). Not sure why we stop at 5. 1181 // These have size 12, which is sizeof(long double) on 1182 // platforms with 4-byte alignment for that type. 1183 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12); 1184 AssignToArrayRange(Builder, Address, Twelve8, 11, 16); 1185 } 1186 1187 return false; 1188 } 1189 1190 //===----------------------------------------------------------------------===// 1191 // X86-64 ABI Implementation 1192 //===----------------------------------------------------------------------===// 1193 1194 1195 namespace { 1196 /// X86_64ABIInfo - The X86_64 ABI information. 1197 class X86_64ABIInfo : public ABIInfo { 1198 enum Class { 1199 Integer = 0, 1200 SSE, 1201 SSEUp, 1202 X87, 1203 X87Up, 1204 ComplexX87, 1205 NoClass, 1206 Memory 1207 }; 1208 1209 /// merge - Implement the X86_64 ABI merging algorithm. 1210 /// 1211 /// Merge an accumulating classification \arg Accum with a field 1212 /// classification \arg Field. 1213 /// 1214 /// \param Accum - The accumulating classification. This should 1215 /// always be either NoClass or the result of a previous merge 1216 /// call. In addition, this should never be Memory (the caller 1217 /// should just return Memory for the aggregate). 1218 static Class merge(Class Accum, Class Field); 1219 1220 /// postMerge - Implement the X86_64 ABI post merging algorithm. 1221 /// 1222 /// Post merger cleanup, reduces a malformed Hi and Lo pair to 1223 /// final MEMORY or SSE classes when necessary. 1224 /// 1225 /// \param AggregateSize - The size of the current aggregate in 1226 /// the classification process. 1227 /// 1228 /// \param Lo - The classification for the parts of the type 1229 /// residing in the low word of the containing object. 1230 /// 1231 /// \param Hi - The classification for the parts of the type 1232 /// residing in the higher words of the containing object. 1233 /// 1234 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const; 1235 1236 /// classify - Determine the x86_64 register classes in which the 1237 /// given type T should be passed. 1238 /// 1239 /// \param Lo - The classification for the parts of the type 1240 /// residing in the low word of the containing object. 1241 /// 1242 /// \param Hi - The classification for the parts of the type 1243 /// residing in the high word of the containing object. 1244 /// 1245 /// \param OffsetBase - The bit offset of this type in the 1246 /// containing object. Some parameters are classified different 1247 /// depending on whether they straddle an eightbyte boundary. 1248 /// 1249 /// \param isNamedArg - Whether the argument in question is a "named" 1250 /// argument, as used in AMD64-ABI 3.5.7. 1251 /// 1252 /// If a word is unused its result will be NoClass; if a type should 1253 /// be passed in Memory then at least the classification of \arg Lo 1254 /// will be Memory. 1255 /// 1256 /// The \arg Lo class will be NoClass iff the argument is ignored. 1257 /// 1258 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will 1259 /// also be ComplexX87. 1260 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi, 1261 bool isNamedArg) const; 1262 1263 llvm::Type *GetByteVectorType(QualType Ty) const; 1264 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType, 1265 unsigned IROffset, QualType SourceTy, 1266 unsigned SourceOffset) const; 1267 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType, 1268 unsigned IROffset, QualType SourceTy, 1269 unsigned SourceOffset) const; 1270 1271 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 1272 /// such that the argument will be returned in memory. 1273 ABIArgInfo getIndirectReturnResult(QualType Ty) const; 1274 1275 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 1276 /// such that the argument will be passed in memory. 1277 /// 1278 /// \param freeIntRegs - The number of free integer registers remaining 1279 /// available. 1280 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const; 1281 1282 ABIArgInfo classifyReturnType(QualType RetTy) const; 1283 1284 ABIArgInfo classifyArgumentType(QualType Ty, 1285 unsigned freeIntRegs, 1286 unsigned &neededInt, 1287 unsigned &neededSSE, 1288 bool isNamedArg) const; 1289 1290 bool IsIllegalVectorType(QualType Ty) const; 1291 1292 /// The 0.98 ABI revision clarified a lot of ambiguities, 1293 /// unfortunately in ways that were not always consistent with 1294 /// certain previous compilers. In particular, platforms which 1295 /// required strict binary compatibility with older versions of GCC 1296 /// may need to exempt themselves. 1297 bool honorsRevision0_98() const { 1298 return !getTarget().getTriple().isOSDarwin(); 1299 } 1300 1301 bool HasAVX; 1302 // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on 1303 // 64-bit hardware. 1304 bool Has64BitPointers; 1305 1306 public: 1307 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool hasavx) : 1308 ABIInfo(CGT), HasAVX(hasavx), 1309 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) { 1310 } 1311 1312 bool isPassedUsingAVXType(QualType type) const { 1313 unsigned neededInt, neededSSE; 1314 // The freeIntRegs argument doesn't matter here. 1315 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE, 1316 /*isNamedArg*/true); 1317 if (info.isDirect()) { 1318 llvm::Type *ty = info.getCoerceToType(); 1319 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty)) 1320 return (vectorTy->getBitWidth() > 128); 1321 } 1322 return false; 1323 } 1324 1325 virtual void computeInfo(CGFunctionInfo &FI) const; 1326 1327 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1328 CodeGenFunction &CGF) const; 1329 }; 1330 1331 /// WinX86_64ABIInfo - The Windows X86_64 ABI information. 1332 class WinX86_64ABIInfo : public ABIInfo { 1333 1334 ABIArgInfo classify(QualType Ty, bool IsReturnType) const; 1335 1336 public: 1337 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 1338 1339 virtual void computeInfo(CGFunctionInfo &FI) const; 1340 1341 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1342 CodeGenFunction &CGF) const; 1343 }; 1344 1345 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo { 1346 public: 1347 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) 1348 : TargetCodeGenInfo(new X86_64ABIInfo(CGT, HasAVX)) {} 1349 1350 const X86_64ABIInfo &getABIInfo() const { 1351 return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo()); 1352 } 1353 1354 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 1355 return 7; 1356 } 1357 1358 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 1359 llvm::Value *Address) const { 1360 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 1361 1362 // 0-15 are the 16 integer registers. 1363 // 16 is %rip. 1364 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 1365 return false; 1366 } 1367 1368 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 1369 StringRef Constraint, 1370 llvm::Type* Ty) const { 1371 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 1372 } 1373 1374 bool isNoProtoCallVariadic(const CallArgList &args, 1375 const FunctionNoProtoType *fnType) const { 1376 // The default CC on x86-64 sets %al to the number of SSA 1377 // registers used, and GCC sets this when calling an unprototyped 1378 // function, so we override the default behavior. However, don't do 1379 // that when AVX types are involved: the ABI explicitly states it is 1380 // undefined, and it doesn't work in practice because of how the ABI 1381 // defines varargs anyway. 1382 if (fnType->getCallConv() == CC_C) { 1383 bool HasAVXType = false; 1384 for (CallArgList::const_iterator 1385 it = args.begin(), ie = args.end(); it != ie; ++it) { 1386 if (getABIInfo().isPassedUsingAVXType(it->Ty)) { 1387 HasAVXType = true; 1388 break; 1389 } 1390 } 1391 1392 if (!HasAVXType) 1393 return true; 1394 } 1395 1396 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType); 1397 } 1398 1399 llvm::Constant *getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const { 1400 unsigned Sig = (0xeb << 0) | // jmp rel8 1401 (0x0a << 8) | // .+0x0c 1402 ('F' << 16) | 1403 ('T' << 24); 1404 return llvm::ConstantInt::get(CGM.Int32Ty, Sig); 1405 } 1406 1407 }; 1408 1409 static std::string qualifyWindowsLibrary(llvm::StringRef Lib) { 1410 // If the argument does not end in .lib, automatically add the suffix. This 1411 // matches the behavior of MSVC. 1412 std::string ArgStr = Lib; 1413 if (!Lib.endswith_lower(".lib")) 1414 ArgStr += ".lib"; 1415 return ArgStr; 1416 } 1417 1418 class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo { 1419 public: 1420 WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, 1421 bool d, bool p, bool w, unsigned RegParms) 1422 : X86_32TargetCodeGenInfo(CGT, d, p, w, RegParms) {} 1423 1424 void getDependentLibraryOption(llvm::StringRef Lib, 1425 llvm::SmallString<24> &Opt) const { 1426 Opt = "/DEFAULTLIB:"; 1427 Opt += qualifyWindowsLibrary(Lib); 1428 } 1429 1430 void getDetectMismatchOption(llvm::StringRef Name, 1431 llvm::StringRef Value, 1432 llvm::SmallString<32> &Opt) const { 1433 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; 1434 } 1435 }; 1436 1437 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo { 1438 public: 1439 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 1440 : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {} 1441 1442 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 1443 return 7; 1444 } 1445 1446 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 1447 llvm::Value *Address) const { 1448 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 1449 1450 // 0-15 are the 16 integer registers. 1451 // 16 is %rip. 1452 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 1453 return false; 1454 } 1455 1456 void getDependentLibraryOption(llvm::StringRef Lib, 1457 llvm::SmallString<24> &Opt) const { 1458 Opt = "/DEFAULTLIB:"; 1459 Opt += qualifyWindowsLibrary(Lib); 1460 } 1461 1462 void getDetectMismatchOption(llvm::StringRef Name, 1463 llvm::StringRef Value, 1464 llvm::SmallString<32> &Opt) const { 1465 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; 1466 } 1467 }; 1468 1469 } 1470 1471 void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo, 1472 Class &Hi) const { 1473 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done: 1474 // 1475 // (a) If one of the classes is Memory, the whole argument is passed in 1476 // memory. 1477 // 1478 // (b) If X87UP is not preceded by X87, the whole argument is passed in 1479 // memory. 1480 // 1481 // (c) If the size of the aggregate exceeds two eightbytes and the first 1482 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole 1483 // argument is passed in memory. NOTE: This is necessary to keep the 1484 // ABI working for processors that don't support the __m256 type. 1485 // 1486 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE. 1487 // 1488 // Some of these are enforced by the merging logic. Others can arise 1489 // only with unions; for example: 1490 // union { _Complex double; unsigned; } 1491 // 1492 // Note that clauses (b) and (c) were added in 0.98. 1493 // 1494 if (Hi == Memory) 1495 Lo = Memory; 1496 if (Hi == X87Up && Lo != X87 && honorsRevision0_98()) 1497 Lo = Memory; 1498 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp)) 1499 Lo = Memory; 1500 if (Hi == SSEUp && Lo != SSE) 1501 Hi = SSE; 1502 } 1503 1504 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) { 1505 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is 1506 // classified recursively so that always two fields are 1507 // considered. The resulting class is calculated according to 1508 // the classes of the fields in the eightbyte: 1509 // 1510 // (a) If both classes are equal, this is the resulting class. 1511 // 1512 // (b) If one of the classes is NO_CLASS, the resulting class is 1513 // the other class. 1514 // 1515 // (c) If one of the classes is MEMORY, the result is the MEMORY 1516 // class. 1517 // 1518 // (d) If one of the classes is INTEGER, the result is the 1519 // INTEGER. 1520 // 1521 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class, 1522 // MEMORY is used as class. 1523 // 1524 // (f) Otherwise class SSE is used. 1525 1526 // Accum should never be memory (we should have returned) or 1527 // ComplexX87 (because this cannot be passed in a structure). 1528 assert((Accum != Memory && Accum != ComplexX87) && 1529 "Invalid accumulated classification during merge."); 1530 if (Accum == Field || Field == NoClass) 1531 return Accum; 1532 if (Field == Memory) 1533 return Memory; 1534 if (Accum == NoClass) 1535 return Field; 1536 if (Accum == Integer || Field == Integer) 1537 return Integer; 1538 if (Field == X87 || Field == X87Up || Field == ComplexX87 || 1539 Accum == X87 || Accum == X87Up) 1540 return Memory; 1541 return SSE; 1542 } 1543 1544 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, 1545 Class &Lo, Class &Hi, bool isNamedArg) const { 1546 // FIXME: This code can be simplified by introducing a simple value class for 1547 // Class pairs with appropriate constructor methods for the various 1548 // situations. 1549 1550 // FIXME: Some of the split computations are wrong; unaligned vectors 1551 // shouldn't be passed in registers for example, so there is no chance they 1552 // can straddle an eightbyte. Verify & simplify. 1553 1554 Lo = Hi = NoClass; 1555 1556 Class &Current = OffsetBase < 64 ? Lo : Hi; 1557 Current = Memory; 1558 1559 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 1560 BuiltinType::Kind k = BT->getKind(); 1561 1562 if (k == BuiltinType::Void) { 1563 Current = NoClass; 1564 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) { 1565 Lo = Integer; 1566 Hi = Integer; 1567 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) { 1568 Current = Integer; 1569 } else if ((k == BuiltinType::Float || k == BuiltinType::Double) || 1570 (k == BuiltinType::LongDouble && 1571 getTarget().getTriple().isOSNaCl())) { 1572 Current = SSE; 1573 } else if (k == BuiltinType::LongDouble) { 1574 Lo = X87; 1575 Hi = X87Up; 1576 } 1577 // FIXME: _Decimal32 and _Decimal64 are SSE. 1578 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). 1579 return; 1580 } 1581 1582 if (const EnumType *ET = Ty->getAs<EnumType>()) { 1583 // Classify the underlying integer type. 1584 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg); 1585 return; 1586 } 1587 1588 if (Ty->hasPointerRepresentation()) { 1589 Current = Integer; 1590 return; 1591 } 1592 1593 if (Ty->isMemberPointerType()) { 1594 if (Ty->isMemberFunctionPointerType() && Has64BitPointers) 1595 Lo = Hi = Integer; 1596 else 1597 Current = Integer; 1598 return; 1599 } 1600 1601 if (const VectorType *VT = Ty->getAs<VectorType>()) { 1602 uint64_t Size = getContext().getTypeSize(VT); 1603 if (Size == 32) { 1604 // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x 1605 // float> as integer. 1606 Current = Integer; 1607 1608 // If this type crosses an eightbyte boundary, it should be 1609 // split. 1610 uint64_t EB_Real = (OffsetBase) / 64; 1611 uint64_t EB_Imag = (OffsetBase + Size - 1) / 64; 1612 if (EB_Real != EB_Imag) 1613 Hi = Lo; 1614 } else if (Size == 64) { 1615 // gcc passes <1 x double> in memory. :( 1616 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) 1617 return; 1618 1619 // gcc passes <1 x long long> as INTEGER. 1620 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong) || 1621 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULongLong) || 1622 VT->getElementType()->isSpecificBuiltinType(BuiltinType::Long) || 1623 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULong)) 1624 Current = Integer; 1625 else 1626 Current = SSE; 1627 1628 // If this type crosses an eightbyte boundary, it should be 1629 // split. 1630 if (OffsetBase && OffsetBase != 64) 1631 Hi = Lo; 1632 } else if (Size == 128 || (HasAVX && isNamedArg && Size == 256)) { 1633 // Arguments of 256-bits are split into four eightbyte chunks. The 1634 // least significant one belongs to class SSE and all the others to class 1635 // SSEUP. The original Lo and Hi design considers that types can't be 1636 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense. 1637 // This design isn't correct for 256-bits, but since there're no cases 1638 // where the upper parts would need to be inspected, avoid adding 1639 // complexity and just consider Hi to match the 64-256 part. 1640 // 1641 // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in 1642 // registers if they are "named", i.e. not part of the "..." of a 1643 // variadic function. 1644 Lo = SSE; 1645 Hi = SSEUp; 1646 } 1647 return; 1648 } 1649 1650 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 1651 QualType ET = getContext().getCanonicalType(CT->getElementType()); 1652 1653 uint64_t Size = getContext().getTypeSize(Ty); 1654 if (ET->isIntegralOrEnumerationType()) { 1655 if (Size <= 64) 1656 Current = Integer; 1657 else if (Size <= 128) 1658 Lo = Hi = Integer; 1659 } else if (ET == getContext().FloatTy) 1660 Current = SSE; 1661 else if (ET == getContext().DoubleTy || 1662 (ET == getContext().LongDoubleTy && 1663 getTarget().getTriple().isOSNaCl())) 1664 Lo = Hi = SSE; 1665 else if (ET == getContext().LongDoubleTy) 1666 Current = ComplexX87; 1667 1668 // If this complex type crosses an eightbyte boundary then it 1669 // should be split. 1670 uint64_t EB_Real = (OffsetBase) / 64; 1671 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64; 1672 if (Hi == NoClass && EB_Real != EB_Imag) 1673 Hi = Lo; 1674 1675 return; 1676 } 1677 1678 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 1679 // Arrays are treated like structures. 1680 1681 uint64_t Size = getContext().getTypeSize(Ty); 1682 1683 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 1684 // than four eightbytes, ..., it has class MEMORY. 1685 if (Size > 256) 1686 return; 1687 1688 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned 1689 // fields, it has class MEMORY. 1690 // 1691 // Only need to check alignment of array base. 1692 if (OffsetBase % getContext().getTypeAlign(AT->getElementType())) 1693 return; 1694 1695 // Otherwise implement simplified merge. We could be smarter about 1696 // this, but it isn't worth it and would be harder to verify. 1697 Current = NoClass; 1698 uint64_t EltSize = getContext().getTypeSize(AT->getElementType()); 1699 uint64_t ArraySize = AT->getSize().getZExtValue(); 1700 1701 // The only case a 256-bit wide vector could be used is when the array 1702 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 1703 // to work for sizes wider than 128, early check and fallback to memory. 1704 if (Size > 128 && EltSize != 256) 1705 return; 1706 1707 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) { 1708 Class FieldLo, FieldHi; 1709 classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg); 1710 Lo = merge(Lo, FieldLo); 1711 Hi = merge(Hi, FieldHi); 1712 if (Lo == Memory || Hi == Memory) 1713 break; 1714 } 1715 1716 postMerge(Size, Lo, Hi); 1717 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification."); 1718 return; 1719 } 1720 1721 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1722 uint64_t Size = getContext().getTypeSize(Ty); 1723 1724 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 1725 // than four eightbytes, ..., it has class MEMORY. 1726 if (Size > 256) 1727 return; 1728 1729 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial 1730 // copy constructor or a non-trivial destructor, it is passed by invisible 1731 // reference. 1732 if (getRecordArgABI(RT, getCXXABI())) 1733 return; 1734 1735 const RecordDecl *RD = RT->getDecl(); 1736 1737 // Assume variable sized types are passed in memory. 1738 if (RD->hasFlexibleArrayMember()) 1739 return; 1740 1741 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 1742 1743 // Reset Lo class, this will be recomputed. 1744 Current = NoClass; 1745 1746 // If this is a C++ record, classify the bases first. 1747 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 1748 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 1749 e = CXXRD->bases_end(); i != e; ++i) { 1750 assert(!i->isVirtual() && !i->getType()->isDependentType() && 1751 "Unexpected base class!"); 1752 const CXXRecordDecl *Base = 1753 cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); 1754 1755 // Classify this field. 1756 // 1757 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a 1758 // single eightbyte, each is classified separately. Each eightbyte gets 1759 // initialized to class NO_CLASS. 1760 Class FieldLo, FieldHi; 1761 uint64_t Offset = 1762 OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base)); 1763 classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg); 1764 Lo = merge(Lo, FieldLo); 1765 Hi = merge(Hi, FieldHi); 1766 if (Lo == Memory || Hi == Memory) 1767 break; 1768 } 1769 } 1770 1771 // Classify the fields one at a time, merging the results. 1772 unsigned idx = 0; 1773 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1774 i != e; ++i, ++idx) { 1775 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 1776 bool BitField = i->isBitField(); 1777 1778 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than 1779 // four eightbytes, or it contains unaligned fields, it has class MEMORY. 1780 // 1781 // The only case a 256-bit wide vector could be used is when the struct 1782 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 1783 // to work for sizes wider than 128, early check and fallback to memory. 1784 // 1785 if (Size > 128 && getContext().getTypeSize(i->getType()) != 256) { 1786 Lo = Memory; 1787 return; 1788 } 1789 // Note, skip this test for bit-fields, see below. 1790 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) { 1791 Lo = Memory; 1792 return; 1793 } 1794 1795 // Classify this field. 1796 // 1797 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate 1798 // exceeds a single eightbyte, each is classified 1799 // separately. Each eightbyte gets initialized to class 1800 // NO_CLASS. 1801 Class FieldLo, FieldHi; 1802 1803 // Bit-fields require special handling, they do not force the 1804 // structure to be passed in memory even if unaligned, and 1805 // therefore they can straddle an eightbyte. 1806 if (BitField) { 1807 // Ignore padding bit-fields. 1808 if (i->isUnnamedBitfield()) 1809 continue; 1810 1811 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 1812 uint64_t Size = i->getBitWidthValue(getContext()); 1813 1814 uint64_t EB_Lo = Offset / 64; 1815 uint64_t EB_Hi = (Offset + Size - 1) / 64; 1816 1817 if (EB_Lo) { 1818 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes."); 1819 FieldLo = NoClass; 1820 FieldHi = Integer; 1821 } else { 1822 FieldLo = Integer; 1823 FieldHi = EB_Hi ? Integer : NoClass; 1824 } 1825 } else 1826 classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg); 1827 Lo = merge(Lo, FieldLo); 1828 Hi = merge(Hi, FieldHi); 1829 if (Lo == Memory || Hi == Memory) 1830 break; 1831 } 1832 1833 postMerge(Size, Lo, Hi); 1834 } 1835 } 1836 1837 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const { 1838 // If this is a scalar LLVM value then assume LLVM will pass it in the right 1839 // place naturally. 1840 if (!isAggregateTypeForABI(Ty)) { 1841 // Treat an enum type as its underlying type. 1842 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1843 Ty = EnumTy->getDecl()->getIntegerType(); 1844 1845 return (Ty->isPromotableIntegerType() ? 1846 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 1847 } 1848 1849 return ABIArgInfo::getIndirect(0); 1850 } 1851 1852 bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const { 1853 if (const VectorType *VecTy = Ty->getAs<VectorType>()) { 1854 uint64_t Size = getContext().getTypeSize(VecTy); 1855 unsigned LargestVector = HasAVX ? 256 : 128; 1856 if (Size <= 64 || Size > LargestVector) 1857 return true; 1858 } 1859 1860 return false; 1861 } 1862 1863 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty, 1864 unsigned freeIntRegs) const { 1865 // If this is a scalar LLVM value then assume LLVM will pass it in the right 1866 // place naturally. 1867 // 1868 // This assumption is optimistic, as there could be free registers available 1869 // when we need to pass this argument in memory, and LLVM could try to pass 1870 // the argument in the free register. This does not seem to happen currently, 1871 // but this code would be much safer if we could mark the argument with 1872 // 'onstack'. See PR12193. 1873 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) { 1874 // Treat an enum type as its underlying type. 1875 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1876 Ty = EnumTy->getDecl()->getIntegerType(); 1877 1878 return (Ty->isPromotableIntegerType() ? 1879 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 1880 } 1881 1882 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 1883 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); 1884 1885 // Compute the byval alignment. We specify the alignment of the byval in all 1886 // cases so that the mid-level optimizer knows the alignment of the byval. 1887 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U); 1888 1889 // Attempt to avoid passing indirect results using byval when possible. This 1890 // is important for good codegen. 1891 // 1892 // We do this by coercing the value into a scalar type which the backend can 1893 // handle naturally (i.e., without using byval). 1894 // 1895 // For simplicity, we currently only do this when we have exhausted all of the 1896 // free integer registers. Doing this when there are free integer registers 1897 // would require more care, as we would have to ensure that the coerced value 1898 // did not claim the unused register. That would require either reording the 1899 // arguments to the function (so that any subsequent inreg values came first), 1900 // or only doing this optimization when there were no following arguments that 1901 // might be inreg. 1902 // 1903 // We currently expect it to be rare (particularly in well written code) for 1904 // arguments to be passed on the stack when there are still free integer 1905 // registers available (this would typically imply large structs being passed 1906 // by value), so this seems like a fair tradeoff for now. 1907 // 1908 // We can revisit this if the backend grows support for 'onstack' parameter 1909 // attributes. See PR12193. 1910 if (freeIntRegs == 0) { 1911 uint64_t Size = getContext().getTypeSize(Ty); 1912 1913 // If this type fits in an eightbyte, coerce it into the matching integral 1914 // type, which will end up on the stack (with alignment 8). 1915 if (Align == 8 && Size <= 64) 1916 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 1917 Size)); 1918 } 1919 1920 return ABIArgInfo::getIndirect(Align); 1921 } 1922 1923 /// GetByteVectorType - The ABI specifies that a value should be passed in an 1924 /// full vector XMM/YMM register. Pick an LLVM IR type that will be passed as a 1925 /// vector register. 1926 llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const { 1927 llvm::Type *IRType = CGT.ConvertType(Ty); 1928 1929 // Wrapper structs that just contain vectors are passed just like vectors, 1930 // strip them off if present. 1931 llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType); 1932 while (STy && STy->getNumElements() == 1) { 1933 IRType = STy->getElementType(0); 1934 STy = dyn_cast<llvm::StructType>(IRType); 1935 } 1936 1937 // If the preferred type is a 16-byte vector, prefer to pass it. 1938 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){ 1939 llvm::Type *EltTy = VT->getElementType(); 1940 unsigned BitWidth = VT->getBitWidth(); 1941 if ((BitWidth >= 128 && BitWidth <= 256) && 1942 (EltTy->isFloatTy() || EltTy->isDoubleTy() || 1943 EltTy->isIntegerTy(8) || EltTy->isIntegerTy(16) || 1944 EltTy->isIntegerTy(32) || EltTy->isIntegerTy(64) || 1945 EltTy->isIntegerTy(128))) 1946 return VT; 1947 } 1948 1949 return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), 2); 1950 } 1951 1952 /// BitsContainNoUserData - Return true if the specified [start,end) bit range 1953 /// is known to either be off the end of the specified type or being in 1954 /// alignment padding. The user type specified is known to be at most 128 bits 1955 /// in size, and have passed through X86_64ABIInfo::classify with a successful 1956 /// classification that put one of the two halves in the INTEGER class. 1957 /// 1958 /// It is conservatively correct to return false. 1959 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, 1960 unsigned EndBit, ASTContext &Context) { 1961 // If the bytes being queried are off the end of the type, there is no user 1962 // data hiding here. This handles analysis of builtins, vectors and other 1963 // types that don't contain interesting padding. 1964 unsigned TySize = (unsigned)Context.getTypeSize(Ty); 1965 if (TySize <= StartBit) 1966 return true; 1967 1968 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 1969 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType()); 1970 unsigned NumElts = (unsigned)AT->getSize().getZExtValue(); 1971 1972 // Check each element to see if the element overlaps with the queried range. 1973 for (unsigned i = 0; i != NumElts; ++i) { 1974 // If the element is after the span we care about, then we're done.. 1975 unsigned EltOffset = i*EltSize; 1976 if (EltOffset >= EndBit) break; 1977 1978 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0; 1979 if (!BitsContainNoUserData(AT->getElementType(), EltStart, 1980 EndBit-EltOffset, Context)) 1981 return false; 1982 } 1983 // If it overlaps no elements, then it is safe to process as padding. 1984 return true; 1985 } 1986 1987 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1988 const RecordDecl *RD = RT->getDecl(); 1989 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 1990 1991 // If this is a C++ record, check the bases first. 1992 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 1993 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 1994 e = CXXRD->bases_end(); i != e; ++i) { 1995 assert(!i->isVirtual() && !i->getType()->isDependentType() && 1996 "Unexpected base class!"); 1997 const CXXRecordDecl *Base = 1998 cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); 1999 2000 // If the base is after the span we care about, ignore it. 2001 unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base)); 2002 if (BaseOffset >= EndBit) continue; 2003 2004 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0; 2005 if (!BitsContainNoUserData(i->getType(), BaseStart, 2006 EndBit-BaseOffset, Context)) 2007 return false; 2008 } 2009 } 2010 2011 // Verify that no field has data that overlaps the region of interest. Yes 2012 // this could be sped up a lot by being smarter about queried fields, 2013 // however we're only looking at structs up to 16 bytes, so we don't care 2014 // much. 2015 unsigned idx = 0; 2016 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 2017 i != e; ++i, ++idx) { 2018 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx); 2019 2020 // If we found a field after the region we care about, then we're done. 2021 if (FieldOffset >= EndBit) break; 2022 2023 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0; 2024 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset, 2025 Context)) 2026 return false; 2027 } 2028 2029 // If nothing in this record overlapped the area of interest, then we're 2030 // clean. 2031 return true; 2032 } 2033 2034 return false; 2035 } 2036 2037 /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a 2038 /// float member at the specified offset. For example, {int,{float}} has a 2039 /// float at offset 4. It is conservatively correct for this routine to return 2040 /// false. 2041 static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset, 2042 const llvm::DataLayout &TD) { 2043 // Base case if we find a float. 2044 if (IROffset == 0 && IRType->isFloatTy()) 2045 return true; 2046 2047 // If this is a struct, recurse into the field at the specified offset. 2048 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 2049 const llvm::StructLayout *SL = TD.getStructLayout(STy); 2050 unsigned Elt = SL->getElementContainingOffset(IROffset); 2051 IROffset -= SL->getElementOffset(Elt); 2052 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD); 2053 } 2054 2055 // If this is an array, recurse into the field at the specified offset. 2056 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 2057 llvm::Type *EltTy = ATy->getElementType(); 2058 unsigned EltSize = TD.getTypeAllocSize(EltTy); 2059 IROffset -= IROffset/EltSize*EltSize; 2060 return ContainsFloatAtOffset(EltTy, IROffset, TD); 2061 } 2062 2063 return false; 2064 } 2065 2066 2067 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the 2068 /// low 8 bytes of an XMM register, corresponding to the SSE class. 2069 llvm::Type *X86_64ABIInfo:: 2070 GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset, 2071 QualType SourceTy, unsigned SourceOffset) const { 2072 // The only three choices we have are either double, <2 x float>, or float. We 2073 // pass as float if the last 4 bytes is just padding. This happens for 2074 // structs that contain 3 floats. 2075 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32, 2076 SourceOffset*8+64, getContext())) 2077 return llvm::Type::getFloatTy(getVMContext()); 2078 2079 // We want to pass as <2 x float> if the LLVM IR type contains a float at 2080 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the 2081 // case. 2082 if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) && 2083 ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout())) 2084 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2); 2085 2086 return llvm::Type::getDoubleTy(getVMContext()); 2087 } 2088 2089 2090 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in 2091 /// an 8-byte GPR. This means that we either have a scalar or we are talking 2092 /// about the high or low part of an up-to-16-byte struct. This routine picks 2093 /// the best LLVM IR type to represent this, which may be i64 or may be anything 2094 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*, 2095 /// etc). 2096 /// 2097 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for 2098 /// the source type. IROffset is an offset in bytes into the LLVM IR type that 2099 /// the 8-byte value references. PrefType may be null. 2100 /// 2101 /// SourceTy is the source level type for the entire argument. SourceOffset is 2102 /// an offset into this that we're processing (which is always either 0 or 8). 2103 /// 2104 llvm::Type *X86_64ABIInfo:: 2105 GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset, 2106 QualType SourceTy, unsigned SourceOffset) const { 2107 // If we're dealing with an un-offset LLVM IR type, then it means that we're 2108 // returning an 8-byte unit starting with it. See if we can safely use it. 2109 if (IROffset == 0) { 2110 // Pointers and int64's always fill the 8-byte unit. 2111 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) || 2112 IRType->isIntegerTy(64)) 2113 return IRType; 2114 2115 // If we have a 1/2/4-byte integer, we can use it only if the rest of the 2116 // goodness in the source type is just tail padding. This is allowed to 2117 // kick in for struct {double,int} on the int, but not on 2118 // struct{double,int,int} because we wouldn't return the second int. We 2119 // have to do this analysis on the source type because we can't depend on 2120 // unions being lowered a specific way etc. 2121 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) || 2122 IRType->isIntegerTy(32) || 2123 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) { 2124 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 : 2125 cast<llvm::IntegerType>(IRType)->getBitWidth(); 2126 2127 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth, 2128 SourceOffset*8+64, getContext())) 2129 return IRType; 2130 } 2131 } 2132 2133 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 2134 // If this is a struct, recurse into the field at the specified offset. 2135 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy); 2136 if (IROffset < SL->getSizeInBytes()) { 2137 unsigned FieldIdx = SL->getElementContainingOffset(IROffset); 2138 IROffset -= SL->getElementOffset(FieldIdx); 2139 2140 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset, 2141 SourceTy, SourceOffset); 2142 } 2143 } 2144 2145 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 2146 llvm::Type *EltTy = ATy->getElementType(); 2147 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy); 2148 unsigned EltOffset = IROffset/EltSize*EltSize; 2149 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy, 2150 SourceOffset); 2151 } 2152 2153 // Okay, we don't have any better idea of what to pass, so we pass this in an 2154 // integer register that isn't too big to fit the rest of the struct. 2155 unsigned TySizeInBytes = 2156 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity(); 2157 2158 assert(TySizeInBytes != SourceOffset && "Empty field?"); 2159 2160 // It is always safe to classify this as an integer type up to i64 that 2161 // isn't larger than the structure. 2162 return llvm::IntegerType::get(getVMContext(), 2163 std::min(TySizeInBytes-SourceOffset, 8U)*8); 2164 } 2165 2166 2167 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally 2168 /// be used as elements of a two register pair to pass or return, return a 2169 /// first class aggregate to represent them. For example, if the low part of 2170 /// a by-value argument should be passed as i32* and the high part as float, 2171 /// return {i32*, float}. 2172 static llvm::Type * 2173 GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, 2174 const llvm::DataLayout &TD) { 2175 // In order to correctly satisfy the ABI, we need to the high part to start 2176 // at offset 8. If the high and low parts we inferred are both 4-byte types 2177 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have 2178 // the second element at offset 8. Check for this: 2179 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo); 2180 unsigned HiAlign = TD.getABITypeAlignment(Hi); 2181 unsigned HiStart = llvm::DataLayout::RoundUpAlignment(LoSize, HiAlign); 2182 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!"); 2183 2184 // To handle this, we have to increase the size of the low part so that the 2185 // second element will start at an 8 byte offset. We can't increase the size 2186 // of the second element because it might make us access off the end of the 2187 // struct. 2188 if (HiStart != 8) { 2189 // There are only two sorts of types the ABI generation code can produce for 2190 // the low part of a pair that aren't 8 bytes in size: float or i8/i16/i32. 2191 // Promote these to a larger type. 2192 if (Lo->isFloatTy()) 2193 Lo = llvm::Type::getDoubleTy(Lo->getContext()); 2194 else { 2195 assert(Lo->isIntegerTy() && "Invalid/unknown lo type"); 2196 Lo = llvm::Type::getInt64Ty(Lo->getContext()); 2197 } 2198 } 2199 2200 llvm::StructType *Result = llvm::StructType::get(Lo, Hi, NULL); 2201 2202 2203 // Verify that the second element is at an 8-byte offset. 2204 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 && 2205 "Invalid x86-64 argument pair!"); 2206 return Result; 2207 } 2208 2209 ABIArgInfo X86_64ABIInfo:: 2210 classifyReturnType(QualType RetTy) const { 2211 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the 2212 // classification algorithm. 2213 X86_64ABIInfo::Class Lo, Hi; 2214 classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true); 2215 2216 // Check some invariants. 2217 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 2218 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 2219 2220 llvm::Type *ResType = 0; 2221 switch (Lo) { 2222 case NoClass: 2223 if (Hi == NoClass) 2224 return ABIArgInfo::getIgnore(); 2225 // If the low part is just padding, it takes no register, leave ResType 2226 // null. 2227 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 2228 "Unknown missing lo part"); 2229 break; 2230 2231 case SSEUp: 2232 case X87Up: 2233 llvm_unreachable("Invalid classification for lo word."); 2234 2235 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via 2236 // hidden argument. 2237 case Memory: 2238 return getIndirectReturnResult(RetTy); 2239 2240 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next 2241 // available register of the sequence %rax, %rdx is used. 2242 case Integer: 2243 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 2244 2245 // If we have a sign or zero extended integer, make sure to return Extend 2246 // so that the parameter gets the right LLVM IR attributes. 2247 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 2248 // Treat an enum type as its underlying type. 2249 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 2250 RetTy = EnumTy->getDecl()->getIntegerType(); 2251 2252 if (RetTy->isIntegralOrEnumerationType() && 2253 RetTy->isPromotableIntegerType()) 2254 return ABIArgInfo::getExtend(); 2255 } 2256 break; 2257 2258 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next 2259 // available SSE register of the sequence %xmm0, %xmm1 is used. 2260 case SSE: 2261 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 2262 break; 2263 2264 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is 2265 // returned on the X87 stack in %st0 as 80-bit x87 number. 2266 case X87: 2267 ResType = llvm::Type::getX86_FP80Ty(getVMContext()); 2268 break; 2269 2270 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real 2271 // part of the value is returned in %st0 and the imaginary part in 2272 // %st1. 2273 case ComplexX87: 2274 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification."); 2275 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()), 2276 llvm::Type::getX86_FP80Ty(getVMContext()), 2277 NULL); 2278 break; 2279 } 2280 2281 llvm::Type *HighPart = 0; 2282 switch (Hi) { 2283 // Memory was handled previously and X87 should 2284 // never occur as a hi class. 2285 case Memory: 2286 case X87: 2287 llvm_unreachable("Invalid classification for hi word."); 2288 2289 case ComplexX87: // Previously handled. 2290 case NoClass: 2291 break; 2292 2293 case Integer: 2294 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 2295 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 2296 return ABIArgInfo::getDirect(HighPart, 8); 2297 break; 2298 case SSE: 2299 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 2300 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 2301 return ABIArgInfo::getDirect(HighPart, 8); 2302 break; 2303 2304 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte 2305 // is passed in the next available eightbyte chunk if the last used 2306 // vector register. 2307 // 2308 // SSEUP should always be preceded by SSE, just widen. 2309 case SSEUp: 2310 assert(Lo == SSE && "Unexpected SSEUp classification."); 2311 ResType = GetByteVectorType(RetTy); 2312 break; 2313 2314 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is 2315 // returned together with the previous X87 value in %st0. 2316 case X87Up: 2317 // If X87Up is preceded by X87, we don't need to do 2318 // anything. However, in some cases with unions it may not be 2319 // preceded by X87. In such situations we follow gcc and pass the 2320 // extra bits in an SSE reg. 2321 if (Lo != X87) { 2322 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 2323 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 2324 return ABIArgInfo::getDirect(HighPart, 8); 2325 } 2326 break; 2327 } 2328 2329 // If a high part was specified, merge it together with the low part. It is 2330 // known to pass in the high eightbyte of the result. We do this by forming a 2331 // first class struct aggregate with the high and low part: {low, high} 2332 if (HighPart) 2333 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); 2334 2335 return ABIArgInfo::getDirect(ResType); 2336 } 2337 2338 ABIArgInfo X86_64ABIInfo::classifyArgumentType( 2339 QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE, 2340 bool isNamedArg) 2341 const 2342 { 2343 X86_64ABIInfo::Class Lo, Hi; 2344 classify(Ty, 0, Lo, Hi, isNamedArg); 2345 2346 // Check some invariants. 2347 // FIXME: Enforce these by construction. 2348 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 2349 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 2350 2351 neededInt = 0; 2352 neededSSE = 0; 2353 llvm::Type *ResType = 0; 2354 switch (Lo) { 2355 case NoClass: 2356 if (Hi == NoClass) 2357 return ABIArgInfo::getIgnore(); 2358 // If the low part is just padding, it takes no register, leave ResType 2359 // null. 2360 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 2361 "Unknown missing lo part"); 2362 break; 2363 2364 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument 2365 // on the stack. 2366 case Memory: 2367 2368 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or 2369 // COMPLEX_X87, it is passed in memory. 2370 case X87: 2371 case ComplexX87: 2372 if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect) 2373 ++neededInt; 2374 return getIndirectResult(Ty, freeIntRegs); 2375 2376 case SSEUp: 2377 case X87Up: 2378 llvm_unreachable("Invalid classification for lo word."); 2379 2380 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next 2381 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8 2382 // and %r9 is used. 2383 case Integer: 2384 ++neededInt; 2385 2386 // Pick an 8-byte type based on the preferred type. 2387 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0); 2388 2389 // If we have a sign or zero extended integer, make sure to return Extend 2390 // so that the parameter gets the right LLVM IR attributes. 2391 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 2392 // Treat an enum type as its underlying type. 2393 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2394 Ty = EnumTy->getDecl()->getIntegerType(); 2395 2396 if (Ty->isIntegralOrEnumerationType() && 2397 Ty->isPromotableIntegerType()) 2398 return ABIArgInfo::getExtend(); 2399 } 2400 2401 break; 2402 2403 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next 2404 // available SSE register is used, the registers are taken in the 2405 // order from %xmm0 to %xmm7. 2406 case SSE: { 2407 llvm::Type *IRType = CGT.ConvertType(Ty); 2408 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0); 2409 ++neededSSE; 2410 break; 2411 } 2412 } 2413 2414 llvm::Type *HighPart = 0; 2415 switch (Hi) { 2416 // Memory was handled previously, ComplexX87 and X87 should 2417 // never occur as hi classes, and X87Up must be preceded by X87, 2418 // which is passed in memory. 2419 case Memory: 2420 case X87: 2421 case ComplexX87: 2422 llvm_unreachable("Invalid classification for hi word."); 2423 2424 case NoClass: break; 2425 2426 case Integer: 2427 ++neededInt; 2428 // Pick an 8-byte type based on the preferred type. 2429 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 2430 2431 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 2432 return ABIArgInfo::getDirect(HighPart, 8); 2433 break; 2434 2435 // X87Up generally doesn't occur here (long double is passed in 2436 // memory), except in situations involving unions. 2437 case X87Up: 2438 case SSE: 2439 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 2440 2441 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 2442 return ABIArgInfo::getDirect(HighPart, 8); 2443 2444 ++neededSSE; 2445 break; 2446 2447 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the 2448 // eightbyte is passed in the upper half of the last used SSE 2449 // register. This only happens when 128-bit vectors are passed. 2450 case SSEUp: 2451 assert(Lo == SSE && "Unexpected SSEUp classification"); 2452 ResType = GetByteVectorType(Ty); 2453 break; 2454 } 2455 2456 // If a high part was specified, merge it together with the low part. It is 2457 // known to pass in the high eightbyte of the result. We do this by forming a 2458 // first class struct aggregate with the high and low part: {low, high} 2459 if (HighPart) 2460 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); 2461 2462 return ABIArgInfo::getDirect(ResType); 2463 } 2464 2465 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 2466 2467 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2468 2469 // Keep track of the number of assigned registers. 2470 unsigned freeIntRegs = 6, freeSSERegs = 8; 2471 2472 // If the return value is indirect, then the hidden argument is consuming one 2473 // integer register. 2474 if (FI.getReturnInfo().isIndirect()) 2475 --freeIntRegs; 2476 2477 bool isVariadic = FI.isVariadic(); 2478 unsigned numRequiredArgs = 0; 2479 if (isVariadic) 2480 numRequiredArgs = FI.getRequiredArgs().getNumRequiredArgs(); 2481 2482 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers 2483 // get assigned (in left-to-right order) for passing as follows... 2484 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2485 it != ie; ++it) { 2486 bool isNamedArg = true; 2487 if (isVariadic) 2488 isNamedArg = (it - FI.arg_begin()) < 2489 static_cast<signed>(numRequiredArgs); 2490 2491 unsigned neededInt, neededSSE; 2492 it->info = classifyArgumentType(it->type, freeIntRegs, neededInt, 2493 neededSSE, isNamedArg); 2494 2495 // AMD64-ABI 3.2.3p3: If there are no registers available for any 2496 // eightbyte of an argument, the whole argument is passed on the 2497 // stack. If registers have already been assigned for some 2498 // eightbytes of such an argument, the assignments get reverted. 2499 if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) { 2500 freeIntRegs -= neededInt; 2501 freeSSERegs -= neededSSE; 2502 } else { 2503 it->info = getIndirectResult(it->type, freeIntRegs); 2504 } 2505 } 2506 } 2507 2508 static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr, 2509 QualType Ty, 2510 CodeGenFunction &CGF) { 2511 llvm::Value *overflow_arg_area_p = 2512 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p"); 2513 llvm::Value *overflow_arg_area = 2514 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area"); 2515 2516 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16 2517 // byte boundary if alignment needed by type exceeds 8 byte boundary. 2518 // It isn't stated explicitly in the standard, but in practice we use 2519 // alignment greater than 16 where necessary. 2520 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8; 2521 if (Align > 8) { 2522 // overflow_arg_area = (overflow_arg_area + align - 1) & -align; 2523 llvm::Value *Offset = 2524 llvm::ConstantInt::get(CGF.Int64Ty, Align - 1); 2525 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset); 2526 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area, 2527 CGF.Int64Ty); 2528 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, -(uint64_t)Align); 2529 overflow_arg_area = 2530 CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), 2531 overflow_arg_area->getType(), 2532 "overflow_arg_area.align"); 2533 } 2534 2535 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area. 2536 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 2537 llvm::Value *Res = 2538 CGF.Builder.CreateBitCast(overflow_arg_area, 2539 llvm::PointerType::getUnqual(LTy)); 2540 2541 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to: 2542 // l->overflow_arg_area + sizeof(type). 2543 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to 2544 // an 8 byte boundary. 2545 2546 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8; 2547 llvm::Value *Offset = 2548 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7); 2549 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset, 2550 "overflow_arg_area.next"); 2551 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p); 2552 2553 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type. 2554 return Res; 2555 } 2556 2557 llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2558 CodeGenFunction &CGF) const { 2559 // Assume that va_list type is correct; should be pointer to LLVM type: 2560 // struct { 2561 // i32 gp_offset; 2562 // i32 fp_offset; 2563 // i8* overflow_arg_area; 2564 // i8* reg_save_area; 2565 // }; 2566 unsigned neededInt, neededSSE; 2567 2568 Ty = CGF.getContext().getCanonicalType(Ty); 2569 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE, 2570 /*isNamedArg*/false); 2571 2572 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed 2573 // in the registers. If not go to step 7. 2574 if (!neededInt && !neededSSE) 2575 return EmitVAArgFromMemory(VAListAddr, Ty, CGF); 2576 2577 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of 2578 // general purpose registers needed to pass type and num_fp to hold 2579 // the number of floating point registers needed. 2580 2581 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into 2582 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or 2583 // l->fp_offset > 304 - num_fp * 16 go to step 7. 2584 // 2585 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of 2586 // register save space). 2587 2588 llvm::Value *InRegs = 0; 2589 llvm::Value *gp_offset_p = 0, *gp_offset = 0; 2590 llvm::Value *fp_offset_p = 0, *fp_offset = 0; 2591 if (neededInt) { 2592 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p"); 2593 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset"); 2594 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8); 2595 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp"); 2596 } 2597 2598 if (neededSSE) { 2599 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p"); 2600 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset"); 2601 llvm::Value *FitsInFP = 2602 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16); 2603 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp"); 2604 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP; 2605 } 2606 2607 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 2608 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); 2609 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 2610 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); 2611 2612 // Emit code to load the value if it was passed in registers. 2613 2614 CGF.EmitBlock(InRegBlock); 2615 2616 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with 2617 // an offset of l->gp_offset and/or l->fp_offset. This may require 2618 // copying to a temporary location in case the parameter is passed 2619 // in different register classes or requires an alignment greater 2620 // than 8 for general purpose registers and 16 for XMM registers. 2621 // 2622 // FIXME: This really results in shameful code when we end up needing to 2623 // collect arguments from different places; often what should result in a 2624 // simple assembling of a structure from scattered addresses has many more 2625 // loads than necessary. Can we clean this up? 2626 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 2627 llvm::Value *RegAddr = 2628 CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3), 2629 "reg_save_area"); 2630 if (neededInt && neededSSE) { 2631 // FIXME: Cleanup. 2632 assert(AI.isDirect() && "Unexpected ABI info for mixed regs"); 2633 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType()); 2634 llvm::Value *Tmp = CGF.CreateMemTemp(Ty); 2635 Tmp = CGF.Builder.CreateBitCast(Tmp, ST->getPointerTo()); 2636 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs"); 2637 llvm::Type *TyLo = ST->getElementType(0); 2638 llvm::Type *TyHi = ST->getElementType(1); 2639 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) && 2640 "Unexpected ABI info for mixed regs"); 2641 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo); 2642 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi); 2643 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 2644 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2645 llvm::Value *RegLoAddr = TyLo->isFloatingPointTy() ? FPAddr : GPAddr; 2646 llvm::Value *RegHiAddr = TyLo->isFloatingPointTy() ? GPAddr : FPAddr; 2647 llvm::Value *V = 2648 CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo)); 2649 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 2650 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi)); 2651 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 2652 2653 RegAddr = CGF.Builder.CreateBitCast(Tmp, 2654 llvm::PointerType::getUnqual(LTy)); 2655 } else if (neededInt) { 2656 RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 2657 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 2658 llvm::PointerType::getUnqual(LTy)); 2659 2660 // Copy to a temporary if necessary to ensure the appropriate alignment. 2661 std::pair<CharUnits, CharUnits> SizeAlign = 2662 CGF.getContext().getTypeInfoInChars(Ty); 2663 uint64_t TySize = SizeAlign.first.getQuantity(); 2664 unsigned TyAlign = SizeAlign.second.getQuantity(); 2665 if (TyAlign > 8) { 2666 llvm::Value *Tmp = CGF.CreateMemTemp(Ty); 2667 CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, 8, false); 2668 RegAddr = Tmp; 2669 } 2670 } else if (neededSSE == 1) { 2671 RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2672 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 2673 llvm::PointerType::getUnqual(LTy)); 2674 } else { 2675 assert(neededSSE == 2 && "Invalid number of needed registers!"); 2676 // SSE registers are spaced 16 bytes apart in the register save 2677 // area, we need to collect the two eightbytes together. 2678 llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2679 llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16); 2680 llvm::Type *DoubleTy = CGF.DoubleTy; 2681 llvm::Type *DblPtrTy = 2682 llvm::PointerType::getUnqual(DoubleTy); 2683 llvm::StructType *ST = llvm::StructType::get(DoubleTy, DoubleTy, NULL); 2684 llvm::Value *V, *Tmp = CGF.CreateMemTemp(Ty); 2685 Tmp = CGF.Builder.CreateBitCast(Tmp, ST->getPointerTo()); 2686 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo, 2687 DblPtrTy)); 2688 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 2689 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi, 2690 DblPtrTy)); 2691 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 2692 RegAddr = CGF.Builder.CreateBitCast(Tmp, 2693 llvm::PointerType::getUnqual(LTy)); 2694 } 2695 2696 // AMD64-ABI 3.5.7p5: Step 5. Set: 2697 // l->gp_offset = l->gp_offset + num_gp * 8 2698 // l->fp_offset = l->fp_offset + num_fp * 16. 2699 if (neededInt) { 2700 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8); 2701 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset), 2702 gp_offset_p); 2703 } 2704 if (neededSSE) { 2705 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16); 2706 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset), 2707 fp_offset_p); 2708 } 2709 CGF.EmitBranch(ContBlock); 2710 2711 // Emit code to load the value if it was passed in memory. 2712 2713 CGF.EmitBlock(InMemBlock); 2714 llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF); 2715 2716 // Return the appropriate result. 2717 2718 CGF.EmitBlock(ContBlock); 2719 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(), 2, 2720 "vaarg.addr"); 2721 ResAddr->addIncoming(RegAddr, InRegBlock); 2722 ResAddr->addIncoming(MemAddr, InMemBlock); 2723 return ResAddr; 2724 } 2725 2726 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, bool IsReturnType) const { 2727 2728 if (Ty->isVoidType()) 2729 return ABIArgInfo::getIgnore(); 2730 2731 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2732 Ty = EnumTy->getDecl()->getIntegerType(); 2733 2734 uint64_t Size = getContext().getTypeSize(Ty); 2735 2736 if (const RecordType *RT = Ty->getAs<RecordType>()) { 2737 if (IsReturnType) { 2738 if (isRecordReturnIndirect(RT, getCXXABI())) 2739 return ABIArgInfo::getIndirect(0, false); 2740 } else { 2741 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI())) 2742 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); 2743 } 2744 2745 if (RT->getDecl()->hasFlexibleArrayMember()) 2746 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2747 2748 // FIXME: mingw-w64-gcc emits 128-bit struct as i128 2749 if (Size == 128 && getTarget().getTriple().getOS() == llvm::Triple::MinGW32) 2750 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2751 Size)); 2752 2753 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is 2754 // not 1, 2, 4, or 8 bytes, must be passed by reference." 2755 if (Size <= 64 && 2756 (Size & (Size - 1)) == 0) 2757 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2758 Size)); 2759 2760 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2761 } 2762 2763 if (Ty->isPromotableIntegerType()) 2764 return ABIArgInfo::getExtend(); 2765 2766 return ABIArgInfo::getDirect(); 2767 } 2768 2769 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 2770 2771 QualType RetTy = FI.getReturnType(); 2772 FI.getReturnInfo() = classify(RetTy, true); 2773 2774 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2775 it != ie; ++it) 2776 it->info = classify(it->type, false); 2777 } 2778 2779 llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2780 CodeGenFunction &CGF) const { 2781 llvm::Type *BPP = CGF.Int8PtrPtrTy; 2782 2783 CGBuilderTy &Builder = CGF.Builder; 2784 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 2785 "ap"); 2786 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 2787 llvm::Type *PTy = 2788 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 2789 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 2790 2791 uint64_t Offset = 2792 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8); 2793 llvm::Value *NextAddr = 2794 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 2795 "ap.next"); 2796 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 2797 2798 return AddrTyped; 2799 } 2800 2801 namespace { 2802 2803 class NaClX86_64ABIInfo : public ABIInfo { 2804 public: 2805 NaClX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) 2806 : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, HasAVX) {} 2807 virtual void computeInfo(CGFunctionInfo &FI) const; 2808 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2809 CodeGenFunction &CGF) const; 2810 private: 2811 PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv. 2812 X86_64ABIInfo NInfo; // Used for everything else. 2813 }; 2814 2815 class NaClX86_64TargetCodeGenInfo : public TargetCodeGenInfo { 2816 public: 2817 NaClX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) 2818 : TargetCodeGenInfo(new NaClX86_64ABIInfo(CGT, HasAVX)) {} 2819 }; 2820 2821 } 2822 2823 void NaClX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 2824 if (FI.getASTCallingConvention() == CC_PnaclCall) 2825 PInfo.computeInfo(FI); 2826 else 2827 NInfo.computeInfo(FI); 2828 } 2829 2830 llvm::Value *NaClX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2831 CodeGenFunction &CGF) const { 2832 // Always use the native convention; calling pnacl-style varargs functions 2833 // is unuspported. 2834 return NInfo.EmitVAArg(VAListAddr, Ty, CGF); 2835 } 2836 2837 2838 // PowerPC-32 2839 2840 namespace { 2841 class PPC32TargetCodeGenInfo : public DefaultTargetCodeGenInfo { 2842 public: 2843 PPC32TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} 2844 2845 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2846 // This is recovered from gcc output. 2847 return 1; // r1 is the dedicated stack pointer 2848 } 2849 2850 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2851 llvm::Value *Address) const; 2852 }; 2853 2854 } 2855 2856 bool 2857 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2858 llvm::Value *Address) const { 2859 // This is calculated from the LLVM and GCC tables and verified 2860 // against gcc output. AFAIK all ABIs use the same encoding. 2861 2862 CodeGen::CGBuilderTy &Builder = CGF.Builder; 2863 2864 llvm::IntegerType *i8 = CGF.Int8Ty; 2865 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 2866 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 2867 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 2868 2869 // 0-31: r0-31, the 4-byte general-purpose registers 2870 AssignToArrayRange(Builder, Address, Four8, 0, 31); 2871 2872 // 32-63: fp0-31, the 8-byte floating-point registers 2873 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 2874 2875 // 64-76 are various 4-byte special-purpose registers: 2876 // 64: mq 2877 // 65: lr 2878 // 66: ctr 2879 // 67: ap 2880 // 68-75 cr0-7 2881 // 76: xer 2882 AssignToArrayRange(Builder, Address, Four8, 64, 76); 2883 2884 // 77-108: v0-31, the 16-byte vector registers 2885 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 2886 2887 // 109: vrsave 2888 // 110: vscr 2889 // 111: spe_acc 2890 // 112: spefscr 2891 // 113: sfp 2892 AssignToArrayRange(Builder, Address, Four8, 109, 113); 2893 2894 return false; 2895 } 2896 2897 // PowerPC-64 2898 2899 namespace { 2900 /// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information. 2901 class PPC64_SVR4_ABIInfo : public DefaultABIInfo { 2902 2903 public: 2904 PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 2905 2906 bool isPromotableTypeForABI(QualType Ty) const; 2907 2908 ABIArgInfo classifyReturnType(QualType RetTy) const; 2909 ABIArgInfo classifyArgumentType(QualType Ty) const; 2910 2911 // TODO: We can add more logic to computeInfo to improve performance. 2912 // Example: For aggregate arguments that fit in a register, we could 2913 // use getDirectInReg (as is done below for structs containing a single 2914 // floating-point value) to avoid pushing them to memory on function 2915 // entry. This would require changing the logic in PPCISelLowering 2916 // when lowering the parameters in the caller and args in the callee. 2917 virtual void computeInfo(CGFunctionInfo &FI) const { 2918 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2919 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2920 it != ie; ++it) { 2921 // We rely on the default argument classification for the most part. 2922 // One exception: An aggregate containing a single floating-point 2923 // or vector item must be passed in a register if one is available. 2924 const Type *T = isSingleElementStruct(it->type, getContext()); 2925 if (T) { 2926 const BuiltinType *BT = T->getAs<BuiltinType>(); 2927 if (T->isVectorType() || (BT && BT->isFloatingPoint())) { 2928 QualType QT(T, 0); 2929 it->info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT)); 2930 continue; 2931 } 2932 } 2933 it->info = classifyArgumentType(it->type); 2934 } 2935 } 2936 2937 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, 2938 QualType Ty, 2939 CodeGenFunction &CGF) const; 2940 }; 2941 2942 class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo { 2943 public: 2944 PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT) 2945 : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT)) {} 2946 2947 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2948 // This is recovered from gcc output. 2949 return 1; // r1 is the dedicated stack pointer 2950 } 2951 2952 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2953 llvm::Value *Address) const; 2954 }; 2955 2956 class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo { 2957 public: 2958 PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} 2959 2960 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2961 // This is recovered from gcc output. 2962 return 1; // r1 is the dedicated stack pointer 2963 } 2964 2965 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2966 llvm::Value *Address) const; 2967 }; 2968 2969 } 2970 2971 // Return true if the ABI requires Ty to be passed sign- or zero- 2972 // extended to 64 bits. 2973 bool 2974 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const { 2975 // Treat an enum type as its underlying type. 2976 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2977 Ty = EnumTy->getDecl()->getIntegerType(); 2978 2979 // Promotable integer types are required to be promoted by the ABI. 2980 if (Ty->isPromotableIntegerType()) 2981 return true; 2982 2983 // In addition to the usual promotable integer types, we also need to 2984 // extend all 32-bit types, since the ABI requires promotion to 64 bits. 2985 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 2986 switch (BT->getKind()) { 2987 case BuiltinType::Int: 2988 case BuiltinType::UInt: 2989 return true; 2990 default: 2991 break; 2992 } 2993 2994 return false; 2995 } 2996 2997 ABIArgInfo 2998 PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const { 2999 if (Ty->isAnyComplexType()) 3000 return ABIArgInfo::getDirect(); 3001 3002 if (isAggregateTypeForABI(Ty)) { 3003 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 3004 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); 3005 3006 return ABIArgInfo::getIndirect(0); 3007 } 3008 3009 return (isPromotableTypeForABI(Ty) ? 3010 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3011 } 3012 3013 ABIArgInfo 3014 PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const { 3015 if (RetTy->isVoidType()) 3016 return ABIArgInfo::getIgnore(); 3017 3018 if (RetTy->isAnyComplexType()) 3019 return ABIArgInfo::getDirect(); 3020 3021 if (isAggregateTypeForABI(RetTy)) 3022 return ABIArgInfo::getIndirect(0); 3023 3024 return (isPromotableTypeForABI(RetTy) ? 3025 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3026 } 3027 3028 // Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine. 3029 llvm::Value *PPC64_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr, 3030 QualType Ty, 3031 CodeGenFunction &CGF) const { 3032 llvm::Type *BP = CGF.Int8PtrTy; 3033 llvm::Type *BPP = CGF.Int8PtrPtrTy; 3034 3035 CGBuilderTy &Builder = CGF.Builder; 3036 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 3037 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 3038 3039 // Update the va_list pointer. The pointer should be bumped by the 3040 // size of the object. We can trust getTypeSize() except for a complex 3041 // type whose base type is smaller than a doubleword. For these, the 3042 // size of the object is 16 bytes; see below for further explanation. 3043 unsigned SizeInBytes = CGF.getContext().getTypeSize(Ty) / 8; 3044 QualType BaseTy; 3045 unsigned CplxBaseSize = 0; 3046 3047 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { 3048 BaseTy = CTy->getElementType(); 3049 CplxBaseSize = CGF.getContext().getTypeSize(BaseTy) / 8; 3050 if (CplxBaseSize < 8) 3051 SizeInBytes = 16; 3052 } 3053 3054 unsigned Offset = llvm::RoundUpToAlignment(SizeInBytes, 8); 3055 llvm::Value *NextAddr = 3056 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), 3057 "ap.next"); 3058 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 3059 3060 // If we have a complex type and the base type is smaller than 8 bytes, 3061 // the ABI calls for the real and imaginary parts to be right-adjusted 3062 // in separate doublewords. However, Clang expects us to produce a 3063 // pointer to a structure with the two parts packed tightly. So generate 3064 // loads of the real and imaginary parts relative to the va_list pointer, 3065 // and store them to a temporary structure. 3066 if (CplxBaseSize && CplxBaseSize < 8) { 3067 llvm::Value *RealAddr = Builder.CreatePtrToInt(Addr, CGF.Int64Ty); 3068 llvm::Value *ImagAddr = RealAddr; 3069 RealAddr = Builder.CreateAdd(RealAddr, Builder.getInt64(8 - CplxBaseSize)); 3070 ImagAddr = Builder.CreateAdd(ImagAddr, Builder.getInt64(16 - CplxBaseSize)); 3071 llvm::Type *PBaseTy = llvm::PointerType::getUnqual(CGF.ConvertType(BaseTy)); 3072 RealAddr = Builder.CreateIntToPtr(RealAddr, PBaseTy); 3073 ImagAddr = Builder.CreateIntToPtr(ImagAddr, PBaseTy); 3074 llvm::Value *Real = Builder.CreateLoad(RealAddr, false, ".vareal"); 3075 llvm::Value *Imag = Builder.CreateLoad(ImagAddr, false, ".vaimag"); 3076 llvm::Value *Ptr = CGF.CreateTempAlloca(CGT.ConvertTypeForMem(Ty), 3077 "vacplx"); 3078 llvm::Value *RealPtr = Builder.CreateStructGEP(Ptr, 0, ".real"); 3079 llvm::Value *ImagPtr = Builder.CreateStructGEP(Ptr, 1, ".imag"); 3080 Builder.CreateStore(Real, RealPtr, false); 3081 Builder.CreateStore(Imag, ImagPtr, false); 3082 return Ptr; 3083 } 3084 3085 // If the argument is smaller than 8 bytes, it is right-adjusted in 3086 // its doubleword slot. Adjust the pointer to pick it up from the 3087 // correct offset. 3088 if (SizeInBytes < 8) { 3089 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty); 3090 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(8 - SizeInBytes)); 3091 Addr = Builder.CreateIntToPtr(AddrAsInt, BP); 3092 } 3093 3094 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 3095 return Builder.CreateBitCast(Addr, PTy); 3096 } 3097 3098 static bool 3099 PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3100 llvm::Value *Address) { 3101 // This is calculated from the LLVM and GCC tables and verified 3102 // against gcc output. AFAIK all ABIs use the same encoding. 3103 3104 CodeGen::CGBuilderTy &Builder = CGF.Builder; 3105 3106 llvm::IntegerType *i8 = CGF.Int8Ty; 3107 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 3108 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 3109 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 3110 3111 // 0-31: r0-31, the 8-byte general-purpose registers 3112 AssignToArrayRange(Builder, Address, Eight8, 0, 31); 3113 3114 // 32-63: fp0-31, the 8-byte floating-point registers 3115 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 3116 3117 // 64-76 are various 4-byte special-purpose registers: 3118 // 64: mq 3119 // 65: lr 3120 // 66: ctr 3121 // 67: ap 3122 // 68-75 cr0-7 3123 // 76: xer 3124 AssignToArrayRange(Builder, Address, Four8, 64, 76); 3125 3126 // 77-108: v0-31, the 16-byte vector registers 3127 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 3128 3129 // 109: vrsave 3130 // 110: vscr 3131 // 111: spe_acc 3132 // 112: spefscr 3133 // 113: sfp 3134 AssignToArrayRange(Builder, Address, Four8, 109, 113); 3135 3136 return false; 3137 } 3138 3139 bool 3140 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable( 3141 CodeGen::CodeGenFunction &CGF, 3142 llvm::Value *Address) const { 3143 3144 return PPC64_initDwarfEHRegSizeTable(CGF, Address); 3145 } 3146 3147 bool 3148 PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3149 llvm::Value *Address) const { 3150 3151 return PPC64_initDwarfEHRegSizeTable(CGF, Address); 3152 } 3153 3154 //===----------------------------------------------------------------------===// 3155 // ARM ABI Implementation 3156 //===----------------------------------------------------------------------===// 3157 3158 namespace { 3159 3160 class ARMABIInfo : public ABIInfo { 3161 public: 3162 enum ABIKind { 3163 APCS = 0, 3164 AAPCS = 1, 3165 AAPCS_VFP 3166 }; 3167 3168 private: 3169 ABIKind Kind; 3170 3171 public: 3172 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind) { 3173 setRuntimeCC(); 3174 } 3175 3176 bool isEABI() const { 3177 switch (getTarget().getTriple().getEnvironment()) { 3178 case llvm::Triple::Android: 3179 case llvm::Triple::EABI: 3180 case llvm::Triple::EABIHF: 3181 case llvm::Triple::GNUEABI: 3182 case llvm::Triple::GNUEABIHF: 3183 return true; 3184 default: 3185 return false; 3186 } 3187 } 3188 3189 bool isEABIHF() const { 3190 switch (getTarget().getTriple().getEnvironment()) { 3191 case llvm::Triple::EABIHF: 3192 case llvm::Triple::GNUEABIHF: 3193 return true; 3194 default: 3195 return false; 3196 } 3197 } 3198 3199 ABIKind getABIKind() const { return Kind; } 3200 3201 private: 3202 ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic) const; 3203 ABIArgInfo classifyArgumentType(QualType RetTy, int *VFPRegs, 3204 unsigned &AllocatedVFP, 3205 bool &IsHA, bool isVariadic) const; 3206 bool isIllegalVectorType(QualType Ty) const; 3207 3208 virtual void computeInfo(CGFunctionInfo &FI) const; 3209 3210 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3211 CodeGenFunction &CGF) const; 3212 3213 llvm::CallingConv::ID getLLVMDefaultCC() const; 3214 llvm::CallingConv::ID getABIDefaultCC() const; 3215 void setRuntimeCC(); 3216 }; 3217 3218 class ARMTargetCodeGenInfo : public TargetCodeGenInfo { 3219 public: 3220 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K) 3221 :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {} 3222 3223 const ARMABIInfo &getABIInfo() const { 3224 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo()); 3225 } 3226 3227 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 3228 return 13; 3229 } 3230 3231 StringRef getARCRetainAutoreleasedReturnValueMarker() const { 3232 return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue"; 3233 } 3234 3235 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3236 llvm::Value *Address) const { 3237 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 3238 3239 // 0-15 are the 16 integer registers. 3240 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15); 3241 return false; 3242 } 3243 3244 unsigned getSizeOfUnwindException() const { 3245 if (getABIInfo().isEABI()) return 88; 3246 return TargetCodeGenInfo::getSizeOfUnwindException(); 3247 } 3248 3249 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 3250 CodeGen::CodeGenModule &CGM) const { 3251 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 3252 if (!FD) 3253 return; 3254 3255 const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>(); 3256 if (!Attr) 3257 return; 3258 3259 const char *Kind; 3260 switch (Attr->getInterrupt()) { 3261 case ARMInterruptAttr::Generic: Kind = ""; break; 3262 case ARMInterruptAttr::IRQ: Kind = "IRQ"; break; 3263 case ARMInterruptAttr::FIQ: Kind = "FIQ"; break; 3264 case ARMInterruptAttr::SWI: Kind = "SWI"; break; 3265 case ARMInterruptAttr::ABORT: Kind = "ABORT"; break; 3266 case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break; 3267 } 3268 3269 llvm::Function *Fn = cast<llvm::Function>(GV); 3270 3271 Fn->addFnAttr("interrupt", Kind); 3272 3273 if (cast<ARMABIInfo>(getABIInfo()).getABIKind() == ARMABIInfo::APCS) 3274 return; 3275 3276 // AAPCS guarantees that sp will be 8-byte aligned on any public interface, 3277 // however this is not necessarily true on taking any interrupt. Instruct 3278 // the backend to perform a realignment as part of the function prologue. 3279 llvm::AttrBuilder B; 3280 B.addStackAlignmentAttr(8); 3281 Fn->addAttributes(llvm::AttributeSet::FunctionIndex, 3282 llvm::AttributeSet::get(CGM.getLLVMContext(), 3283 llvm::AttributeSet::FunctionIndex, 3284 B)); 3285 } 3286 3287 }; 3288 3289 } 3290 3291 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const { 3292 // To correctly handle Homogeneous Aggregate, we need to keep track of the 3293 // VFP registers allocated so far. 3294 // C.1.vfp If the argument is a VFP CPRC and there are sufficient consecutive 3295 // VFP registers of the appropriate type unallocated then the argument is 3296 // allocated to the lowest-numbered sequence of such registers. 3297 // C.2.vfp If the argument is a VFP CPRC then any VFP registers that are 3298 // unallocated are marked as unavailable. 3299 unsigned AllocatedVFP = 0; 3300 int VFPRegs[16] = { 0 }; 3301 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), FI.isVariadic()); 3302 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 3303 it != ie; ++it) { 3304 unsigned PreAllocation = AllocatedVFP; 3305 bool IsHA = false; 3306 // 6.1.2.3 There is one VFP co-processor register class using registers 3307 // s0-s15 (d0-d7) for passing arguments. 3308 const unsigned NumVFPs = 16; 3309 it->info = classifyArgumentType(it->type, VFPRegs, AllocatedVFP, IsHA, FI.isVariadic()); 3310 // If we do not have enough VFP registers for the HA, any VFP registers 3311 // that are unallocated are marked as unavailable. To achieve this, we add 3312 // padding of (NumVFPs - PreAllocation) floats. 3313 // Note that IsHA will only be set when using the AAPCS-VFP calling convention, 3314 // and the callee is not variadic. 3315 if (IsHA && AllocatedVFP > NumVFPs && PreAllocation < NumVFPs) { 3316 llvm::Type *PaddingTy = llvm::ArrayType::get( 3317 llvm::Type::getFloatTy(getVMContext()), NumVFPs - PreAllocation); 3318 it->info = ABIArgInfo::getExpandWithPadding(false, PaddingTy); 3319 } 3320 } 3321 3322 // Always honor user-specified calling convention. 3323 if (FI.getCallingConvention() != llvm::CallingConv::C) 3324 return; 3325 3326 llvm::CallingConv::ID cc = getRuntimeCC(); 3327 if (cc != llvm::CallingConv::C) 3328 FI.setEffectiveCallingConvention(cc); 3329 } 3330 3331 /// Return the default calling convention that LLVM will use. 3332 llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const { 3333 // The default calling convention that LLVM will infer. 3334 if (isEABIHF()) 3335 return llvm::CallingConv::ARM_AAPCS_VFP; 3336 else if (isEABI()) 3337 return llvm::CallingConv::ARM_AAPCS; 3338 else 3339 return llvm::CallingConv::ARM_APCS; 3340 } 3341 3342 /// Return the calling convention that our ABI would like us to use 3343 /// as the C calling convention. 3344 llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const { 3345 switch (getABIKind()) { 3346 case APCS: return llvm::CallingConv::ARM_APCS; 3347 case AAPCS: return llvm::CallingConv::ARM_AAPCS; 3348 case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 3349 } 3350 llvm_unreachable("bad ABI kind"); 3351 } 3352 3353 void ARMABIInfo::setRuntimeCC() { 3354 assert(getRuntimeCC() == llvm::CallingConv::C); 3355 3356 // Don't muddy up the IR with a ton of explicit annotations if 3357 // they'd just match what LLVM will infer from the triple. 3358 llvm::CallingConv::ID abiCC = getABIDefaultCC(); 3359 if (abiCC != getLLVMDefaultCC()) 3360 RuntimeCC = abiCC; 3361 } 3362 3363 /// isHomogeneousAggregate - Return true if a type is an AAPCS-VFP homogeneous 3364 /// aggregate. If HAMembers is non-null, the number of base elements 3365 /// contained in the type is returned through it; this is used for the 3366 /// recursive calls that check aggregate component types. 3367 static bool isHomogeneousAggregate(QualType Ty, const Type *&Base, 3368 ASTContext &Context, 3369 uint64_t *HAMembers = 0) { 3370 uint64_t Members = 0; 3371 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 3372 if (!isHomogeneousAggregate(AT->getElementType(), Base, Context, &Members)) 3373 return false; 3374 Members *= AT->getSize().getZExtValue(); 3375 } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 3376 const RecordDecl *RD = RT->getDecl(); 3377 if (RD->hasFlexibleArrayMember()) 3378 return false; 3379 3380 Members = 0; 3381 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 3382 i != e; ++i) { 3383 const FieldDecl *FD = *i; 3384 uint64_t FldMembers; 3385 if (!isHomogeneousAggregate(FD->getType(), Base, Context, &FldMembers)) 3386 return false; 3387 3388 Members = (RD->isUnion() ? 3389 std::max(Members, FldMembers) : Members + FldMembers); 3390 } 3391 } else { 3392 Members = 1; 3393 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 3394 Members = 2; 3395 Ty = CT->getElementType(); 3396 } 3397 3398 // Homogeneous aggregates for AAPCS-VFP must have base types of float, 3399 // double, or 64-bit or 128-bit vectors. 3400 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 3401 if (BT->getKind() != BuiltinType::Float && 3402 BT->getKind() != BuiltinType::Double && 3403 BT->getKind() != BuiltinType::LongDouble) 3404 return false; 3405 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 3406 unsigned VecSize = Context.getTypeSize(VT); 3407 if (VecSize != 64 && VecSize != 128) 3408 return false; 3409 } else { 3410 return false; 3411 } 3412 3413 // The base type must be the same for all members. Vector types of the 3414 // same total size are treated as being equivalent here. 3415 const Type *TyPtr = Ty.getTypePtr(); 3416 if (!Base) 3417 Base = TyPtr; 3418 if (Base != TyPtr && 3419 (!Base->isVectorType() || !TyPtr->isVectorType() || 3420 Context.getTypeSize(Base) != Context.getTypeSize(TyPtr))) 3421 return false; 3422 } 3423 3424 // Homogeneous Aggregates can have at most 4 members of the base type. 3425 if (HAMembers) 3426 *HAMembers = Members; 3427 3428 return (Members > 0 && Members <= 4); 3429 } 3430 3431 /// markAllocatedVFPs - update VFPRegs according to the alignment and 3432 /// number of VFP registers (unit is S register) requested. 3433 static void markAllocatedVFPs(int *VFPRegs, unsigned &AllocatedVFP, 3434 unsigned Alignment, 3435 unsigned NumRequired) { 3436 // Early Exit. 3437 if (AllocatedVFP >= 16) 3438 return; 3439 // C.1.vfp If the argument is a VFP CPRC and there are sufficient consecutive 3440 // VFP registers of the appropriate type unallocated then the argument is 3441 // allocated to the lowest-numbered sequence of such registers. 3442 for (unsigned I = 0; I < 16; I += Alignment) { 3443 bool FoundSlot = true; 3444 for (unsigned J = I, JEnd = I + NumRequired; J < JEnd; J++) 3445 if (J >= 16 || VFPRegs[J]) { 3446 FoundSlot = false; 3447 break; 3448 } 3449 if (FoundSlot) { 3450 for (unsigned J = I, JEnd = I + NumRequired; J < JEnd; J++) 3451 VFPRegs[J] = 1; 3452 AllocatedVFP += NumRequired; 3453 return; 3454 } 3455 } 3456 // C.2.vfp If the argument is a VFP CPRC then any VFP registers that are 3457 // unallocated are marked as unavailable. 3458 for (unsigned I = 0; I < 16; I++) 3459 VFPRegs[I] = 1; 3460 AllocatedVFP = 17; // We do not have enough VFP registers. 3461 } 3462 3463 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, int *VFPRegs, 3464 unsigned &AllocatedVFP, 3465 bool &IsHA, bool isVariadic) const { 3466 // We update number of allocated VFPs according to 3467 // 6.1.2.1 The following argument types are VFP CPRCs: 3468 // A single-precision floating-point type (including promoted 3469 // half-precision types); A double-precision floating-point type; 3470 // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate 3471 // with a Base Type of a single- or double-precision floating-point type, 3472 // 64-bit containerized vectors or 128-bit containerized vectors with one 3473 // to four Elements. 3474 3475 // Handle illegal vector types here. 3476 if (isIllegalVectorType(Ty)) { 3477 uint64_t Size = getContext().getTypeSize(Ty); 3478 if (Size <= 32) { 3479 llvm::Type *ResType = 3480 llvm::Type::getInt32Ty(getVMContext()); 3481 return ABIArgInfo::getDirect(ResType); 3482 } 3483 if (Size == 64) { 3484 llvm::Type *ResType = llvm::VectorType::get( 3485 llvm::Type::getInt32Ty(getVMContext()), 2); 3486 markAllocatedVFPs(VFPRegs, AllocatedVFP, 2, 2); 3487 return ABIArgInfo::getDirect(ResType); 3488 } 3489 if (Size == 128) { 3490 llvm::Type *ResType = llvm::VectorType::get( 3491 llvm::Type::getInt32Ty(getVMContext()), 4); 3492 markAllocatedVFPs(VFPRegs, AllocatedVFP, 4, 4); 3493 return ABIArgInfo::getDirect(ResType); 3494 } 3495 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3496 } 3497 // Update VFPRegs for legal vector types. 3498 if (const VectorType *VT = Ty->getAs<VectorType>()) { 3499 uint64_t Size = getContext().getTypeSize(VT); 3500 // Size of a legal vector should be power of 2 and above 64. 3501 markAllocatedVFPs(VFPRegs, AllocatedVFP, Size >= 128 ? 4 : 2, Size / 32); 3502 } 3503 // Update VFPRegs for floating point types. 3504 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 3505 if (BT->getKind() == BuiltinType::Half || 3506 BT->getKind() == BuiltinType::Float) 3507 markAllocatedVFPs(VFPRegs, AllocatedVFP, 1, 1); 3508 if (BT->getKind() == BuiltinType::Double || 3509 BT->getKind() == BuiltinType::LongDouble) 3510 markAllocatedVFPs(VFPRegs, AllocatedVFP, 2, 2); 3511 } 3512 3513 if (!isAggregateTypeForABI(Ty)) { 3514 // Treat an enum type as its underlying type. 3515 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3516 Ty = EnumTy->getDecl()->getIntegerType(); 3517 3518 return (Ty->isPromotableIntegerType() ? 3519 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3520 } 3521 3522 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 3523 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); 3524 3525 // Ignore empty records. 3526 if (isEmptyRecord(getContext(), Ty, true)) 3527 return ABIArgInfo::getIgnore(); 3528 3529 if (getABIKind() == ARMABIInfo::AAPCS_VFP && !isVariadic) { 3530 // Homogeneous Aggregates need to be expanded when we can fit the aggregate 3531 // into VFP registers. 3532 const Type *Base = 0; 3533 uint64_t Members = 0; 3534 if (isHomogeneousAggregate(Ty, Base, getContext(), &Members)) { 3535 assert(Base && "Base class should be set for homogeneous aggregate"); 3536 // Base can be a floating-point or a vector. 3537 if (Base->isVectorType()) { 3538 // ElementSize is in number of floats. 3539 unsigned ElementSize = getContext().getTypeSize(Base) == 64 ? 2 : 4; 3540 markAllocatedVFPs(VFPRegs, AllocatedVFP, ElementSize, 3541 Members * ElementSize); 3542 } else if (Base->isSpecificBuiltinType(BuiltinType::Float)) 3543 markAllocatedVFPs(VFPRegs, AllocatedVFP, 1, Members); 3544 else { 3545 assert(Base->isSpecificBuiltinType(BuiltinType::Double) || 3546 Base->isSpecificBuiltinType(BuiltinType::LongDouble)); 3547 markAllocatedVFPs(VFPRegs, AllocatedVFP, 2, Members * 2); 3548 } 3549 IsHA = true; 3550 return ABIArgInfo::getExpand(); 3551 } 3552 } 3553 3554 // Support byval for ARM. 3555 // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at 3556 // most 8-byte. We realign the indirect argument if type alignment is bigger 3557 // than ABI alignment. 3558 uint64_t ABIAlign = 4; 3559 uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8; 3560 if (getABIKind() == ARMABIInfo::AAPCS_VFP || 3561 getABIKind() == ARMABIInfo::AAPCS) 3562 ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8); 3563 if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) { 3564 return ABIArgInfo::getIndirect(0, /*ByVal=*/true, 3565 /*Realign=*/TyAlign > ABIAlign); 3566 } 3567 3568 // Otherwise, pass by coercing to a structure of the appropriate size. 3569 llvm::Type* ElemTy; 3570 unsigned SizeRegs; 3571 // FIXME: Try to match the types of the arguments more accurately where 3572 // we can. 3573 if (getContext().getTypeAlign(Ty) <= 32) { 3574 ElemTy = llvm::Type::getInt32Ty(getVMContext()); 3575 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32; 3576 } else { 3577 ElemTy = llvm::Type::getInt64Ty(getVMContext()); 3578 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64; 3579 } 3580 3581 llvm::Type *STy = 3582 llvm::StructType::get(llvm::ArrayType::get(ElemTy, SizeRegs), NULL); 3583 return ABIArgInfo::getDirect(STy); 3584 } 3585 3586 static bool isIntegerLikeType(QualType Ty, ASTContext &Context, 3587 llvm::LLVMContext &VMContext) { 3588 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure 3589 // is called integer-like if its size is less than or equal to one word, and 3590 // the offset of each of its addressable sub-fields is zero. 3591 3592 uint64_t Size = Context.getTypeSize(Ty); 3593 3594 // Check that the type fits in a word. 3595 if (Size > 32) 3596 return false; 3597 3598 // FIXME: Handle vector types! 3599 if (Ty->isVectorType()) 3600 return false; 3601 3602 // Float types are never treated as "integer like". 3603 if (Ty->isRealFloatingType()) 3604 return false; 3605 3606 // If this is a builtin or pointer type then it is ok. 3607 if (Ty->getAs<BuiltinType>() || Ty->isPointerType()) 3608 return true; 3609 3610 // Small complex integer types are "integer like". 3611 if (const ComplexType *CT = Ty->getAs<ComplexType>()) 3612 return isIntegerLikeType(CT->getElementType(), Context, VMContext); 3613 3614 // Single element and zero sized arrays should be allowed, by the definition 3615 // above, but they are not. 3616 3617 // Otherwise, it must be a record type. 3618 const RecordType *RT = Ty->getAs<RecordType>(); 3619 if (!RT) return false; 3620 3621 // Ignore records with flexible arrays. 3622 const RecordDecl *RD = RT->getDecl(); 3623 if (RD->hasFlexibleArrayMember()) 3624 return false; 3625 3626 // Check that all sub-fields are at offset 0, and are themselves "integer 3627 // like". 3628 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 3629 3630 bool HadField = false; 3631 unsigned idx = 0; 3632 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 3633 i != e; ++i, ++idx) { 3634 const FieldDecl *FD = *i; 3635 3636 // Bit-fields are not addressable, we only need to verify they are "integer 3637 // like". We still have to disallow a subsequent non-bitfield, for example: 3638 // struct { int : 0; int x } 3639 // is non-integer like according to gcc. 3640 if (FD->isBitField()) { 3641 if (!RD->isUnion()) 3642 HadField = true; 3643 3644 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 3645 return false; 3646 3647 continue; 3648 } 3649 3650 // Check if this field is at offset 0. 3651 if (Layout.getFieldOffset(idx) != 0) 3652 return false; 3653 3654 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 3655 return false; 3656 3657 // Only allow at most one field in a structure. This doesn't match the 3658 // wording above, but follows gcc in situations with a field following an 3659 // empty structure. 3660 if (!RD->isUnion()) { 3661 if (HadField) 3662 return false; 3663 3664 HadField = true; 3665 } 3666 } 3667 3668 return true; 3669 } 3670 3671 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, bool isVariadic) const { 3672 if (RetTy->isVoidType()) 3673 return ABIArgInfo::getIgnore(); 3674 3675 // Large vector types should be returned via memory. 3676 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) 3677 return ABIArgInfo::getIndirect(0); 3678 3679 if (!isAggregateTypeForABI(RetTy)) { 3680 // Treat an enum type as its underlying type. 3681 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 3682 RetTy = EnumTy->getDecl()->getIntegerType(); 3683 3684 return (RetTy->isPromotableIntegerType() ? 3685 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3686 } 3687 3688 // Structures with either a non-trivial destructor or a non-trivial 3689 // copy constructor are always indirect. 3690 if (isRecordReturnIndirect(RetTy, getCXXABI())) 3691 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3692 3693 // Are we following APCS? 3694 if (getABIKind() == APCS) { 3695 if (isEmptyRecord(getContext(), RetTy, false)) 3696 return ABIArgInfo::getIgnore(); 3697 3698 // Complex types are all returned as packed integers. 3699 // 3700 // FIXME: Consider using 2 x vector types if the back end handles them 3701 // correctly. 3702 if (RetTy->isAnyComplexType()) 3703 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 3704 getContext().getTypeSize(RetTy))); 3705 3706 // Integer like structures are returned in r0. 3707 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) { 3708 // Return in the smallest viable integer type. 3709 uint64_t Size = getContext().getTypeSize(RetTy); 3710 if (Size <= 8) 3711 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 3712 if (Size <= 16) 3713 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 3714 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 3715 } 3716 3717 // Otherwise return in memory. 3718 return ABIArgInfo::getIndirect(0); 3719 } 3720 3721 // Otherwise this is an AAPCS variant. 3722 3723 if (isEmptyRecord(getContext(), RetTy, true)) 3724 return ABIArgInfo::getIgnore(); 3725 3726 // Check for homogeneous aggregates with AAPCS-VFP. 3727 if (getABIKind() == AAPCS_VFP && !isVariadic) { 3728 const Type *Base = 0; 3729 if (isHomogeneousAggregate(RetTy, Base, getContext())) { 3730 assert(Base && "Base class should be set for homogeneous aggregate"); 3731 // Homogeneous Aggregates are returned directly. 3732 return ABIArgInfo::getDirect(); 3733 } 3734 } 3735 3736 // Aggregates <= 4 bytes are returned in r0; other aggregates 3737 // are returned indirectly. 3738 uint64_t Size = getContext().getTypeSize(RetTy); 3739 if (Size <= 32) { 3740 // Return in the smallest viable integer type. 3741 if (Size <= 8) 3742 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 3743 if (Size <= 16) 3744 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 3745 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 3746 } 3747 3748 return ABIArgInfo::getIndirect(0); 3749 } 3750 3751 /// isIllegalVector - check whether Ty is an illegal vector type. 3752 bool ARMABIInfo::isIllegalVectorType(QualType Ty) const { 3753 if (const VectorType *VT = Ty->getAs<VectorType>()) { 3754 // Check whether VT is legal. 3755 unsigned NumElements = VT->getNumElements(); 3756 uint64_t Size = getContext().getTypeSize(VT); 3757 // NumElements should be power of 2. 3758 if ((NumElements & (NumElements - 1)) != 0) 3759 return true; 3760 // Size should be greater than 32 bits. 3761 return Size <= 32; 3762 } 3763 return false; 3764 } 3765 3766 llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3767 CodeGenFunction &CGF) const { 3768 llvm::Type *BP = CGF.Int8PtrTy; 3769 llvm::Type *BPP = CGF.Int8PtrPtrTy; 3770 3771 CGBuilderTy &Builder = CGF.Builder; 3772 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 3773 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 3774 3775 if (isEmptyRecord(getContext(), Ty, true)) { 3776 // These are ignored for parameter passing purposes. 3777 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 3778 return Builder.CreateBitCast(Addr, PTy); 3779 } 3780 3781 uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8; 3782 uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8; 3783 bool IsIndirect = false; 3784 3785 // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for 3786 // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte. 3787 if (getABIKind() == ARMABIInfo::AAPCS_VFP || 3788 getABIKind() == ARMABIInfo::AAPCS) 3789 TyAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8); 3790 else 3791 TyAlign = 4; 3792 // Use indirect if size of the illegal vector is bigger than 16 bytes. 3793 if (isIllegalVectorType(Ty) && Size > 16) { 3794 IsIndirect = true; 3795 Size = 4; 3796 TyAlign = 4; 3797 } 3798 3799 // Handle address alignment for ABI alignment > 4 bytes. 3800 if (TyAlign > 4) { 3801 assert((TyAlign & (TyAlign - 1)) == 0 && 3802 "Alignment is not power of 2!"); 3803 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty); 3804 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1)); 3805 AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1))); 3806 Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align"); 3807 } 3808 3809 uint64_t Offset = 3810 llvm::RoundUpToAlignment(Size, 4); 3811 llvm::Value *NextAddr = 3812 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 3813 "ap.next"); 3814 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 3815 3816 if (IsIndirect) 3817 Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP)); 3818 else if (TyAlign < CGF.getContext().getTypeAlign(Ty) / 8) { 3819 // We can't directly cast ap.cur to pointer to a vector type, since ap.cur 3820 // may not be correctly aligned for the vector type. We create an aligned 3821 // temporary space and copy the content over from ap.cur to the temporary 3822 // space. This is necessary if the natural alignment of the type is greater 3823 // than the ABI alignment. 3824 llvm::Type *I8PtrTy = Builder.getInt8PtrTy(); 3825 CharUnits CharSize = getContext().getTypeSizeInChars(Ty); 3826 llvm::Value *AlignedTemp = CGF.CreateTempAlloca(CGF.ConvertType(Ty), 3827 "var.align"); 3828 llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy); 3829 llvm::Value *Src = Builder.CreateBitCast(Addr, I8PtrTy); 3830 Builder.CreateMemCpy(Dst, Src, 3831 llvm::ConstantInt::get(CGF.IntPtrTy, CharSize.getQuantity()), 3832 TyAlign, false); 3833 Addr = AlignedTemp; //The content is in aligned location. 3834 } 3835 llvm::Type *PTy = 3836 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 3837 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 3838 3839 return AddrTyped; 3840 } 3841 3842 namespace { 3843 3844 class NaClARMABIInfo : public ABIInfo { 3845 public: 3846 NaClARMABIInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind) 3847 : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, Kind) {} 3848 virtual void computeInfo(CGFunctionInfo &FI) const; 3849 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3850 CodeGenFunction &CGF) const; 3851 private: 3852 PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv. 3853 ARMABIInfo NInfo; // Used for everything else. 3854 }; 3855 3856 class NaClARMTargetCodeGenInfo : public TargetCodeGenInfo { 3857 public: 3858 NaClARMTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind) 3859 : TargetCodeGenInfo(new NaClARMABIInfo(CGT, Kind)) {} 3860 }; 3861 3862 } 3863 3864 void NaClARMABIInfo::computeInfo(CGFunctionInfo &FI) const { 3865 if (FI.getASTCallingConvention() == CC_PnaclCall) 3866 PInfo.computeInfo(FI); 3867 else 3868 static_cast<const ABIInfo&>(NInfo).computeInfo(FI); 3869 } 3870 3871 llvm::Value *NaClARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3872 CodeGenFunction &CGF) const { 3873 // Always use the native convention; calling pnacl-style varargs functions 3874 // is unsupported. 3875 return static_cast<const ABIInfo&>(NInfo).EmitVAArg(VAListAddr, Ty, CGF); 3876 } 3877 3878 //===----------------------------------------------------------------------===// 3879 // AArch64 ABI Implementation 3880 //===----------------------------------------------------------------------===// 3881 3882 namespace { 3883 3884 class AArch64ABIInfo : public ABIInfo { 3885 public: 3886 AArch64ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 3887 3888 private: 3889 // The AArch64 PCS is explicit about return types and argument types being 3890 // handled identically, so we don't need to draw a distinction between 3891 // Argument and Return classification. 3892 ABIArgInfo classifyGenericType(QualType Ty, int &FreeIntRegs, 3893 int &FreeVFPRegs) const; 3894 3895 ABIArgInfo tryUseRegs(QualType Ty, int &FreeRegs, int RegsNeeded, bool IsInt, 3896 llvm::Type *DirectTy = 0) const; 3897 3898 virtual void computeInfo(CGFunctionInfo &FI) const; 3899 3900 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3901 CodeGenFunction &CGF) const; 3902 }; 3903 3904 class AArch64TargetCodeGenInfo : public TargetCodeGenInfo { 3905 public: 3906 AArch64TargetCodeGenInfo(CodeGenTypes &CGT) 3907 :TargetCodeGenInfo(new AArch64ABIInfo(CGT)) {} 3908 3909 const AArch64ABIInfo &getABIInfo() const { 3910 return static_cast<const AArch64ABIInfo&>(TargetCodeGenInfo::getABIInfo()); 3911 } 3912 3913 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 3914 return 31; 3915 } 3916 3917 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3918 llvm::Value *Address) const { 3919 // 0-31 are x0-x30 and sp: 8 bytes each 3920 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 3921 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 31); 3922 3923 // 64-95 are v0-v31: 16 bytes each 3924 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16); 3925 AssignToArrayRange(CGF.Builder, Address, Sixteen8, 64, 95); 3926 3927 return false; 3928 } 3929 3930 }; 3931 3932 } 3933 3934 void AArch64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 3935 int FreeIntRegs = 8, FreeVFPRegs = 8; 3936 3937 FI.getReturnInfo() = classifyGenericType(FI.getReturnType(), 3938 FreeIntRegs, FreeVFPRegs); 3939 3940 FreeIntRegs = FreeVFPRegs = 8; 3941 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 3942 it != ie; ++it) { 3943 it->info = classifyGenericType(it->type, FreeIntRegs, FreeVFPRegs); 3944 3945 } 3946 } 3947 3948 ABIArgInfo 3949 AArch64ABIInfo::tryUseRegs(QualType Ty, int &FreeRegs, int RegsNeeded, 3950 bool IsInt, llvm::Type *DirectTy) const { 3951 if (FreeRegs >= RegsNeeded) { 3952 FreeRegs -= RegsNeeded; 3953 return ABIArgInfo::getDirect(DirectTy); 3954 } 3955 3956 llvm::Type *Padding = 0; 3957 3958 // We need padding so that later arguments don't get filled in anyway. That 3959 // wouldn't happen if only ByVal arguments followed in the same category, but 3960 // a large structure will simply seem to be a pointer as far as LLVM is 3961 // concerned. 3962 if (FreeRegs > 0) { 3963 if (IsInt) 3964 Padding = llvm::Type::getInt64Ty(getVMContext()); 3965 else 3966 Padding = llvm::Type::getFloatTy(getVMContext()); 3967 3968 // Either [N x i64] or [N x float]. 3969 Padding = llvm::ArrayType::get(Padding, FreeRegs); 3970 FreeRegs = 0; 3971 } 3972 3973 return ABIArgInfo::getIndirect(getContext().getTypeAlign(Ty) / 8, 3974 /*IsByVal=*/ true, /*Realign=*/ false, 3975 Padding); 3976 } 3977 3978 3979 ABIArgInfo AArch64ABIInfo::classifyGenericType(QualType Ty, 3980 int &FreeIntRegs, 3981 int &FreeVFPRegs) const { 3982 // Can only occurs for return, but harmless otherwise. 3983 if (Ty->isVoidType()) 3984 return ABIArgInfo::getIgnore(); 3985 3986 // Large vector types should be returned via memory. There's no such concept 3987 // in the ABI, but they'd be over 16 bytes anyway so no matter how they're 3988 // classified they'd go into memory (see B.3). 3989 if (Ty->isVectorType() && getContext().getTypeSize(Ty) > 128) { 3990 if (FreeIntRegs > 0) 3991 --FreeIntRegs; 3992 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3993 } 3994 3995 // All non-aggregate LLVM types have a concrete ABI representation so they can 3996 // be passed directly. After this block we're guaranteed to be in a 3997 // complicated case. 3998 if (!isAggregateTypeForABI(Ty)) { 3999 // Treat an enum type as its underlying type. 4000 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 4001 Ty = EnumTy->getDecl()->getIntegerType(); 4002 4003 if (Ty->isFloatingType() || Ty->isVectorType()) 4004 return tryUseRegs(Ty, FreeVFPRegs, /*RegsNeeded=*/ 1, /*IsInt=*/ false); 4005 4006 assert(getContext().getTypeSize(Ty) <= 128 && 4007 "unexpectedly large scalar type"); 4008 4009 int RegsNeeded = getContext().getTypeSize(Ty) > 64 ? 2 : 1; 4010 4011 // If the type may need padding registers to ensure "alignment", we must be 4012 // careful when this is accounted for. Increasing the effective size covers 4013 // all cases. 4014 if (getContext().getTypeAlign(Ty) == 128) 4015 RegsNeeded += FreeIntRegs % 2 != 0; 4016 4017 return tryUseRegs(Ty, FreeIntRegs, RegsNeeded, /*IsInt=*/ true); 4018 } 4019 4020 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { 4021 if (FreeIntRegs > 0 && RAA == CGCXXABI::RAA_Indirect) 4022 --FreeIntRegs; 4023 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); 4024 } 4025 4026 if (isEmptyRecord(getContext(), Ty, true)) { 4027 if (!getContext().getLangOpts().CPlusPlus) { 4028 // Empty structs outside C++ mode are a GNU extension, so no ABI can 4029 // possibly tell us what to do. It turns out (I believe) that GCC ignores 4030 // the object for parameter-passsing purposes. 4031 return ABIArgInfo::getIgnore(); 4032 } 4033 4034 // The combination of C++98 9p5 (sizeof(struct) != 0) and the pseudocode 4035 // description of va_arg in the PCS require that an empty struct does 4036 // actually occupy space for parameter-passing. I'm hoping for a 4037 // clarification giving an explicit paragraph to point to in future. 4038 return tryUseRegs(Ty, FreeIntRegs, /*RegsNeeded=*/ 1, /*IsInt=*/ true, 4039 llvm::Type::getInt8Ty(getVMContext())); 4040 } 4041 4042 // Homogeneous vector aggregates get passed in registers or on the stack. 4043 const Type *Base = 0; 4044 uint64_t NumMembers = 0; 4045 if (isHomogeneousAggregate(Ty, Base, getContext(), &NumMembers)) { 4046 assert(Base && "Base class should be set for homogeneous aggregate"); 4047 // Homogeneous aggregates are passed and returned directly. 4048 return tryUseRegs(Ty, FreeVFPRegs, /*RegsNeeded=*/ NumMembers, 4049 /*IsInt=*/ false); 4050 } 4051 4052 uint64_t Size = getContext().getTypeSize(Ty); 4053 if (Size <= 128) { 4054 // Small structs can use the same direct type whether they're in registers 4055 // or on the stack. 4056 llvm::Type *BaseTy; 4057 unsigned NumBases; 4058 int SizeInRegs = (Size + 63) / 64; 4059 4060 if (getContext().getTypeAlign(Ty) == 128) { 4061 BaseTy = llvm::Type::getIntNTy(getVMContext(), 128); 4062 NumBases = 1; 4063 4064 // If the type may need padding registers to ensure "alignment", we must 4065 // be careful when this is accounted for. Increasing the effective size 4066 // covers all cases. 4067 SizeInRegs += FreeIntRegs % 2 != 0; 4068 } else { 4069 BaseTy = llvm::Type::getInt64Ty(getVMContext()); 4070 NumBases = SizeInRegs; 4071 } 4072 llvm::Type *DirectTy = llvm::ArrayType::get(BaseTy, NumBases); 4073 4074 return tryUseRegs(Ty, FreeIntRegs, /*RegsNeeded=*/ SizeInRegs, 4075 /*IsInt=*/ true, DirectTy); 4076 } 4077 4078 // If the aggregate is > 16 bytes, it's passed and returned indirectly. In 4079 // LLVM terms the return uses an "sret" pointer, but that's handled elsewhere. 4080 --FreeIntRegs; 4081 return ABIArgInfo::getIndirect(0, /* byVal = */ false); 4082 } 4083 4084 llvm::Value *AArch64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 4085 CodeGenFunction &CGF) const { 4086 // The AArch64 va_list type and handling is specified in the Procedure Call 4087 // Standard, section B.4: 4088 // 4089 // struct { 4090 // void *__stack; 4091 // void *__gr_top; 4092 // void *__vr_top; 4093 // int __gr_offs; 4094 // int __vr_offs; 4095 // }; 4096 4097 assert(!CGF.CGM.getDataLayout().isBigEndian() 4098 && "va_arg not implemented for big-endian AArch64"); 4099 4100 int FreeIntRegs = 8, FreeVFPRegs = 8; 4101 Ty = CGF.getContext().getCanonicalType(Ty); 4102 ABIArgInfo AI = classifyGenericType(Ty, FreeIntRegs, FreeVFPRegs); 4103 4104 llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg"); 4105 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 4106 llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack"); 4107 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 4108 4109 llvm::Value *reg_offs_p = 0, *reg_offs = 0; 4110 int reg_top_index; 4111 int RegSize; 4112 if (FreeIntRegs < 8) { 4113 assert(FreeVFPRegs == 8 && "Arguments never split between int & VFP regs"); 4114 // 3 is the field number of __gr_offs 4115 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p"); 4116 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs"); 4117 reg_top_index = 1; // field number for __gr_top 4118 RegSize = 8 * (8 - FreeIntRegs); 4119 } else { 4120 assert(FreeVFPRegs < 8 && "Argument must go in VFP or int regs"); 4121 // 4 is the field number of __vr_offs. 4122 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p"); 4123 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs"); 4124 reg_top_index = 2; // field number for __vr_top 4125 RegSize = 16 * (8 - FreeVFPRegs); 4126 } 4127 4128 //======================================= 4129 // Find out where argument was passed 4130 //======================================= 4131 4132 // If reg_offs >= 0 we're already using the stack for this type of 4133 // argument. We don't want to keep updating reg_offs (in case it overflows, 4134 // though anyone passing 2GB of arguments, each at most 16 bytes, deserves 4135 // whatever they get). 4136 llvm::Value *UsingStack = 0; 4137 UsingStack = CGF.Builder.CreateICmpSGE(reg_offs, 4138 llvm::ConstantInt::get(CGF.Int32Ty, 0)); 4139 4140 CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock); 4141 4142 // Otherwise, at least some kind of argument could go in these registers, the 4143 // quesiton is whether this particular type is too big. 4144 CGF.EmitBlock(MaybeRegBlock); 4145 4146 // Integer arguments may need to correct register alignment (for example a 4147 // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we 4148 // align __gr_offs to calculate the potential address. 4149 if (FreeIntRegs < 8 && AI.isDirect() && getContext().getTypeAlign(Ty) > 64) { 4150 int Align = getContext().getTypeAlign(Ty) / 8; 4151 4152 reg_offs = CGF.Builder.CreateAdd(reg_offs, 4153 llvm::ConstantInt::get(CGF.Int32Ty, Align - 1), 4154 "align_regoffs"); 4155 reg_offs = CGF.Builder.CreateAnd(reg_offs, 4156 llvm::ConstantInt::get(CGF.Int32Ty, -Align), 4157 "aligned_regoffs"); 4158 } 4159 4160 // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list. 4161 llvm::Value *NewOffset = 0; 4162 NewOffset = CGF.Builder.CreateAdd(reg_offs, 4163 llvm::ConstantInt::get(CGF.Int32Ty, RegSize), 4164 "new_reg_offs"); 4165 CGF.Builder.CreateStore(NewOffset, reg_offs_p); 4166 4167 // Now we're in a position to decide whether this argument really was in 4168 // registers or not. 4169 llvm::Value *InRegs = 0; 4170 InRegs = CGF.Builder.CreateICmpSLE(NewOffset, 4171 llvm::ConstantInt::get(CGF.Int32Ty, 0), 4172 "inreg"); 4173 4174 CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock); 4175 4176 //======================================= 4177 // Argument was in registers 4178 //======================================= 4179 4180 // Now we emit the code for if the argument was originally passed in 4181 // registers. First start the appropriate block: 4182 CGF.EmitBlock(InRegBlock); 4183 4184 llvm::Value *reg_top_p = 0, *reg_top = 0; 4185 reg_top_p = CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p"); 4186 reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top"); 4187 llvm::Value *BaseAddr = CGF.Builder.CreateGEP(reg_top, reg_offs); 4188 llvm::Value *RegAddr = 0; 4189 llvm::Type *MemTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty)); 4190 4191 if (!AI.isDirect()) { 4192 // If it's been passed indirectly (actually a struct), whatever we find from 4193 // stored registers or on the stack will actually be a struct **. 4194 MemTy = llvm::PointerType::getUnqual(MemTy); 4195 } 4196 4197 const Type *Base = 0; 4198 uint64_t NumMembers; 4199 if (isHomogeneousAggregate(Ty, Base, getContext(), &NumMembers) 4200 && NumMembers > 1) { 4201 // Homogeneous aggregates passed in registers will have their elements split 4202 // and stored 16-bytes apart regardless of size (they're notionally in qN, 4203 // qN+1, ...). We reload and store into a temporary local variable 4204 // contiguously. 4205 assert(AI.isDirect() && "Homogeneous aggregates should be passed directly"); 4206 llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0)); 4207 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers); 4208 llvm::Value *Tmp = CGF.CreateTempAlloca(HFATy); 4209 4210 for (unsigned i = 0; i < NumMembers; ++i) { 4211 llvm::Value *BaseOffset = llvm::ConstantInt::get(CGF.Int32Ty, 16 * i); 4212 llvm::Value *LoadAddr = CGF.Builder.CreateGEP(BaseAddr, BaseOffset); 4213 LoadAddr = CGF.Builder.CreateBitCast(LoadAddr, 4214 llvm::PointerType::getUnqual(BaseTy)); 4215 llvm::Value *StoreAddr = CGF.Builder.CreateStructGEP(Tmp, i); 4216 4217 llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr); 4218 CGF.Builder.CreateStore(Elem, StoreAddr); 4219 } 4220 4221 RegAddr = CGF.Builder.CreateBitCast(Tmp, MemTy); 4222 } else { 4223 // Otherwise the object is contiguous in memory 4224 RegAddr = CGF.Builder.CreateBitCast(BaseAddr, MemTy); 4225 } 4226 4227 CGF.EmitBranch(ContBlock); 4228 4229 //======================================= 4230 // Argument was on the stack 4231 //======================================= 4232 CGF.EmitBlock(OnStackBlock); 4233 4234 llvm::Value *stack_p = 0, *OnStackAddr = 0; 4235 stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p"); 4236 OnStackAddr = CGF.Builder.CreateLoad(stack_p, "stack"); 4237 4238 // Again, stack arguments may need realigmnent. In this case both integer and 4239 // floating-point ones might be affected. 4240 if (AI.isDirect() && getContext().getTypeAlign(Ty) > 64) { 4241 int Align = getContext().getTypeAlign(Ty) / 8; 4242 4243 OnStackAddr = CGF.Builder.CreatePtrToInt(OnStackAddr, CGF.Int64Ty); 4244 4245 OnStackAddr = CGF.Builder.CreateAdd(OnStackAddr, 4246 llvm::ConstantInt::get(CGF.Int64Ty, Align - 1), 4247 "align_stack"); 4248 OnStackAddr = CGF.Builder.CreateAnd(OnStackAddr, 4249 llvm::ConstantInt::get(CGF.Int64Ty, -Align), 4250 "align_stack"); 4251 4252 OnStackAddr = CGF.Builder.CreateIntToPtr(OnStackAddr, CGF.Int8PtrTy); 4253 } 4254 4255 uint64_t StackSize; 4256 if (AI.isDirect()) 4257 StackSize = getContext().getTypeSize(Ty) / 8; 4258 else 4259 StackSize = 8; 4260 4261 // All stack slots are 8 bytes 4262 StackSize = llvm::RoundUpToAlignment(StackSize, 8); 4263 4264 llvm::Value *StackSizeC = llvm::ConstantInt::get(CGF.Int32Ty, StackSize); 4265 llvm::Value *NewStack = CGF.Builder.CreateGEP(OnStackAddr, StackSizeC, 4266 "new_stack"); 4267 4268 // Write the new value of __stack for the next call to va_arg 4269 CGF.Builder.CreateStore(NewStack, stack_p); 4270 4271 OnStackAddr = CGF.Builder.CreateBitCast(OnStackAddr, MemTy); 4272 4273 CGF.EmitBranch(ContBlock); 4274 4275 //======================================= 4276 // Tidy up 4277 //======================================= 4278 CGF.EmitBlock(ContBlock); 4279 4280 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(MemTy, 2, "vaarg.addr"); 4281 ResAddr->addIncoming(RegAddr, InRegBlock); 4282 ResAddr->addIncoming(OnStackAddr, OnStackBlock); 4283 4284 if (AI.isDirect()) 4285 return ResAddr; 4286 4287 return CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"); 4288 } 4289 4290 //===----------------------------------------------------------------------===// 4291 // NVPTX ABI Implementation 4292 //===----------------------------------------------------------------------===// 4293 4294 namespace { 4295 4296 class NVPTXABIInfo : public ABIInfo { 4297 public: 4298 NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 4299 4300 ABIArgInfo classifyReturnType(QualType RetTy) const; 4301 ABIArgInfo classifyArgumentType(QualType Ty) const; 4302 4303 virtual void computeInfo(CGFunctionInfo &FI) const; 4304 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 4305 CodeGenFunction &CFG) const; 4306 }; 4307 4308 class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo { 4309 public: 4310 NVPTXTargetCodeGenInfo(CodeGenTypes &CGT) 4311 : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {} 4312 4313 virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 4314 CodeGen::CodeGenModule &M) const; 4315 private: 4316 static void addKernelMetadata(llvm::Function *F); 4317 }; 4318 4319 ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const { 4320 if (RetTy->isVoidType()) 4321 return ABIArgInfo::getIgnore(); 4322 4323 // note: this is different from default ABI 4324 if (!RetTy->isScalarType()) 4325 return ABIArgInfo::getDirect(); 4326 4327 // Treat an enum type as its underlying type. 4328 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 4329 RetTy = EnumTy->getDecl()->getIntegerType(); 4330 4331 return (RetTy->isPromotableIntegerType() ? 4332 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 4333 } 4334 4335 ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const { 4336 // Treat an enum type as its underlying type. 4337 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 4338 Ty = EnumTy->getDecl()->getIntegerType(); 4339 4340 return (Ty->isPromotableIntegerType() ? 4341 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 4342 } 4343 4344 void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const { 4345 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 4346 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 4347 it != ie; ++it) 4348 it->info = classifyArgumentType(it->type); 4349 4350 // Always honor user-specified calling convention. 4351 if (FI.getCallingConvention() != llvm::CallingConv::C) 4352 return; 4353 4354 FI.setEffectiveCallingConvention(getRuntimeCC()); 4355 } 4356 4357 llvm::Value *NVPTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 4358 CodeGenFunction &CFG) const { 4359 llvm_unreachable("NVPTX does not support varargs"); 4360 } 4361 4362 void NVPTXTargetCodeGenInfo:: 4363 SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 4364 CodeGen::CodeGenModule &M) const{ 4365 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 4366 if (!FD) return; 4367 4368 llvm::Function *F = cast<llvm::Function>(GV); 4369 4370 // Perform special handling in OpenCL mode 4371 if (M.getLangOpts().OpenCL) { 4372 // Use OpenCL function attributes to check for kernel functions 4373 // By default, all functions are device functions 4374 if (FD->hasAttr<OpenCLKernelAttr>()) { 4375 // OpenCL __kernel functions get kernel metadata 4376 addKernelMetadata(F); 4377 // And kernel functions are not subject to inlining 4378 F->addFnAttr(llvm::Attribute::NoInline); 4379 } 4380 } 4381 4382 // Perform special handling in CUDA mode. 4383 if (M.getLangOpts().CUDA) { 4384 // CUDA __global__ functions get a kernel metadata entry. Since 4385 // __global__ functions cannot be called from the device, we do not 4386 // need to set the noinline attribute. 4387 if (FD->hasAttr<CUDAGlobalAttr>()) 4388 addKernelMetadata(F); 4389 } 4390 } 4391 4392 void NVPTXTargetCodeGenInfo::addKernelMetadata(llvm::Function *F) { 4393 llvm::Module *M = F->getParent(); 4394 llvm::LLVMContext &Ctx = M->getContext(); 4395 4396 // Get "nvvm.annotations" metadata node 4397 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations"); 4398 4399 // Create !{<func-ref>, metadata !"kernel", i32 1} node 4400 llvm::SmallVector<llvm::Value *, 3> MDVals; 4401 MDVals.push_back(F); 4402 MDVals.push_back(llvm::MDString::get(Ctx, "kernel")); 4403 MDVals.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), 1)); 4404 4405 // Append metadata to nvvm.annotations 4406 MD->addOperand(llvm::MDNode::get(Ctx, MDVals)); 4407 } 4408 4409 } 4410 4411 //===----------------------------------------------------------------------===// 4412 // SystemZ ABI Implementation 4413 //===----------------------------------------------------------------------===// 4414 4415 namespace { 4416 4417 class SystemZABIInfo : public ABIInfo { 4418 public: 4419 SystemZABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 4420 4421 bool isPromotableIntegerType(QualType Ty) const; 4422 bool isCompoundType(QualType Ty) const; 4423 bool isFPArgumentType(QualType Ty) const; 4424 4425 ABIArgInfo classifyReturnType(QualType RetTy) const; 4426 ABIArgInfo classifyArgumentType(QualType ArgTy) const; 4427 4428 virtual void computeInfo(CGFunctionInfo &FI) const { 4429 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 4430 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 4431 it != ie; ++it) 4432 it->info = classifyArgumentType(it->type); 4433 } 4434 4435 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 4436 CodeGenFunction &CGF) const; 4437 }; 4438 4439 class SystemZTargetCodeGenInfo : public TargetCodeGenInfo { 4440 public: 4441 SystemZTargetCodeGenInfo(CodeGenTypes &CGT) 4442 : TargetCodeGenInfo(new SystemZABIInfo(CGT)) {} 4443 }; 4444 4445 } 4446 4447 bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const { 4448 // Treat an enum type as its underlying type. 4449 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 4450 Ty = EnumTy->getDecl()->getIntegerType(); 4451 4452 // Promotable integer types are required to be promoted by the ABI. 4453 if (Ty->isPromotableIntegerType()) 4454 return true; 4455 4456 // 32-bit values must also be promoted. 4457 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 4458 switch (BT->getKind()) { 4459 case BuiltinType::Int: 4460 case BuiltinType::UInt: 4461 return true; 4462 default: 4463 return false; 4464 } 4465 return false; 4466 } 4467 4468 bool SystemZABIInfo::isCompoundType(QualType Ty) const { 4469 return Ty->isAnyComplexType() || isAggregateTypeForABI(Ty); 4470 } 4471 4472 bool SystemZABIInfo::isFPArgumentType(QualType Ty) const { 4473 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 4474 switch (BT->getKind()) { 4475 case BuiltinType::Float: 4476 case BuiltinType::Double: 4477 return true; 4478 default: 4479 return false; 4480 } 4481 4482 if (const RecordType *RT = Ty->getAsStructureType()) { 4483 const RecordDecl *RD = RT->getDecl(); 4484 bool Found = false; 4485 4486 // If this is a C++ record, check the bases first. 4487 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 4488 for (CXXRecordDecl::base_class_const_iterator I = CXXRD->bases_begin(), 4489 E = CXXRD->bases_end(); I != E; ++I) { 4490 QualType Base = I->getType(); 4491 4492 // Empty bases don't affect things either way. 4493 if (isEmptyRecord(getContext(), Base, true)) 4494 continue; 4495 4496 if (Found) 4497 return false; 4498 Found = isFPArgumentType(Base); 4499 if (!Found) 4500 return false; 4501 } 4502 4503 // Check the fields. 4504 for (RecordDecl::field_iterator I = RD->field_begin(), 4505 E = RD->field_end(); I != E; ++I) { 4506 const FieldDecl *FD = *I; 4507 4508 // Empty bitfields don't affect things either way. 4509 // Unlike isSingleElementStruct(), empty structure and array fields 4510 // do count. So do anonymous bitfields that aren't zero-sized. 4511 if (FD->isBitField() && FD->getBitWidthValue(getContext()) == 0) 4512 return true; 4513 4514 // Unlike isSingleElementStruct(), arrays do not count. 4515 // Nested isFPArgumentType structures still do though. 4516 if (Found) 4517 return false; 4518 Found = isFPArgumentType(FD->getType()); 4519 if (!Found) 4520 return false; 4521 } 4522 4523 // Unlike isSingleElementStruct(), trailing padding is allowed. 4524 // An 8-byte aligned struct s { float f; } is passed as a double. 4525 return Found; 4526 } 4527 4528 return false; 4529 } 4530 4531 llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 4532 CodeGenFunction &CGF) const { 4533 // Assume that va_list type is correct; should be pointer to LLVM type: 4534 // struct { 4535 // i64 __gpr; 4536 // i64 __fpr; 4537 // i8 *__overflow_arg_area; 4538 // i8 *__reg_save_area; 4539 // }; 4540 4541 // Every argument occupies 8 bytes and is passed by preference in either 4542 // GPRs or FPRs. 4543 Ty = CGF.getContext().getCanonicalType(Ty); 4544 ABIArgInfo AI = classifyArgumentType(Ty); 4545 bool InFPRs = isFPArgumentType(Ty); 4546 4547 llvm::Type *APTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty)); 4548 bool IsIndirect = AI.isIndirect(); 4549 unsigned UnpaddedBitSize; 4550 if (IsIndirect) { 4551 APTy = llvm::PointerType::getUnqual(APTy); 4552 UnpaddedBitSize = 64; 4553 } else 4554 UnpaddedBitSize = getContext().getTypeSize(Ty); 4555 unsigned PaddedBitSize = 64; 4556 assert((UnpaddedBitSize <= PaddedBitSize) && "Invalid argument size."); 4557 4558 unsigned PaddedSize = PaddedBitSize / 8; 4559 unsigned Padding = (PaddedBitSize - UnpaddedBitSize) / 8; 4560 4561 unsigned MaxRegs, RegCountField, RegSaveIndex, RegPadding; 4562 if (InFPRs) { 4563 MaxRegs = 4; // Maximum of 4 FPR arguments 4564 RegCountField = 1; // __fpr 4565 RegSaveIndex = 16; // save offset for f0 4566 RegPadding = 0; // floats are passed in the high bits of an FPR 4567 } else { 4568 MaxRegs = 5; // Maximum of 5 GPR arguments 4569 RegCountField = 0; // __gpr 4570 RegSaveIndex = 2; // save offset for r2 4571 RegPadding = Padding; // values are passed in the low bits of a GPR 4572 } 4573 4574 llvm::Value *RegCountPtr = 4575 CGF.Builder.CreateStructGEP(VAListAddr, RegCountField, "reg_count_ptr"); 4576 llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count"); 4577 llvm::Type *IndexTy = RegCount->getType(); 4578 llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs); 4579 llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV, 4580 "fits_in_regs"); 4581 4582 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 4583 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); 4584 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 4585 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); 4586 4587 // Emit code to load the value if it was passed in registers. 4588 CGF.EmitBlock(InRegBlock); 4589 4590 // Work out the address of an argument register. 4591 llvm::Value *PaddedSizeV = llvm::ConstantInt::get(IndexTy, PaddedSize); 4592 llvm::Value *ScaledRegCount = 4593 CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count"); 4594 llvm::Value *RegBase = 4595 llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize + RegPadding); 4596 llvm::Value *RegOffset = 4597 CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset"); 4598 llvm::Value *RegSaveAreaPtr = 4599 CGF.Builder.CreateStructGEP(VAListAddr, 3, "reg_save_area_ptr"); 4600 llvm::Value *RegSaveArea = 4601 CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area"); 4602 llvm::Value *RawRegAddr = 4603 CGF.Builder.CreateGEP(RegSaveArea, RegOffset, "raw_reg_addr"); 4604 llvm::Value *RegAddr = 4605 CGF.Builder.CreateBitCast(RawRegAddr, APTy, "reg_addr"); 4606 4607 // Update the register count 4608 llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1); 4609 llvm::Value *NewRegCount = 4610 CGF.Builder.CreateAdd(RegCount, One, "reg_count"); 4611 CGF.Builder.CreateStore(NewRegCount, RegCountPtr); 4612 CGF.EmitBranch(ContBlock); 4613 4614 // Emit code to load the value if it was passed in memory. 4615 CGF.EmitBlock(InMemBlock); 4616 4617 // Work out the address of a stack argument. 4618 llvm::Value *OverflowArgAreaPtr = 4619 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr"); 4620 llvm::Value *OverflowArgArea = 4621 CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"); 4622 llvm::Value *PaddingV = llvm::ConstantInt::get(IndexTy, Padding); 4623 llvm::Value *RawMemAddr = 4624 CGF.Builder.CreateGEP(OverflowArgArea, PaddingV, "raw_mem_addr"); 4625 llvm::Value *MemAddr = 4626 CGF.Builder.CreateBitCast(RawMemAddr, APTy, "mem_addr"); 4627 4628 // Update overflow_arg_area_ptr pointer 4629 llvm::Value *NewOverflowArgArea = 4630 CGF.Builder.CreateGEP(OverflowArgArea, PaddedSizeV, "overflow_arg_area"); 4631 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr); 4632 CGF.EmitBranch(ContBlock); 4633 4634 // Return the appropriate result. 4635 CGF.EmitBlock(ContBlock); 4636 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(APTy, 2, "va_arg.addr"); 4637 ResAddr->addIncoming(RegAddr, InRegBlock); 4638 ResAddr->addIncoming(MemAddr, InMemBlock); 4639 4640 if (IsIndirect) 4641 return CGF.Builder.CreateLoad(ResAddr, "indirect_arg"); 4642 4643 return ResAddr; 4644 } 4645 4646 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI( 4647 const llvm::Triple &Triple, const CodeGenOptions &Opts) { 4648 assert(Triple.getArch() == llvm::Triple::x86); 4649 4650 switch (Opts.getStructReturnConvention()) { 4651 case CodeGenOptions::SRCK_Default: 4652 break; 4653 case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return 4654 return false; 4655 case CodeGenOptions::SRCK_InRegs: // -freg-struct-return 4656 return true; 4657 } 4658 4659 if (Triple.isOSDarwin()) 4660 return true; 4661 4662 switch (Triple.getOS()) { 4663 case llvm::Triple::Cygwin: 4664 case llvm::Triple::MinGW32: 4665 case llvm::Triple::AuroraUX: 4666 case llvm::Triple::DragonFly: 4667 case llvm::Triple::FreeBSD: 4668 case llvm::Triple::OpenBSD: 4669 case llvm::Triple::Bitrig: 4670 case llvm::Triple::Win32: 4671 return true; 4672 default: 4673 return false; 4674 } 4675 } 4676 4677 ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const { 4678 if (RetTy->isVoidType()) 4679 return ABIArgInfo::getIgnore(); 4680 if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64) 4681 return ABIArgInfo::getIndirect(0); 4682 return (isPromotableIntegerType(RetTy) ? 4683 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 4684 } 4685 4686 ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const { 4687 // Handle the generic C++ ABI. 4688 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 4689 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); 4690 4691 // Integers and enums are extended to full register width. 4692 if (isPromotableIntegerType(Ty)) 4693 return ABIArgInfo::getExtend(); 4694 4695 // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly. 4696 uint64_t Size = getContext().getTypeSize(Ty); 4697 if (Size != 8 && Size != 16 && Size != 32 && Size != 64) 4698 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 4699 4700 // Handle small structures. 4701 if (const RecordType *RT = Ty->getAs<RecordType>()) { 4702 // Structures with flexible arrays have variable length, so really 4703 // fail the size test above. 4704 const RecordDecl *RD = RT->getDecl(); 4705 if (RD->hasFlexibleArrayMember()) 4706 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 4707 4708 // The structure is passed as an unextended integer, a float, or a double. 4709 llvm::Type *PassTy; 4710 if (isFPArgumentType(Ty)) { 4711 assert(Size == 32 || Size == 64); 4712 if (Size == 32) 4713 PassTy = llvm::Type::getFloatTy(getVMContext()); 4714 else 4715 PassTy = llvm::Type::getDoubleTy(getVMContext()); 4716 } else 4717 PassTy = llvm::IntegerType::get(getVMContext(), Size); 4718 return ABIArgInfo::getDirect(PassTy); 4719 } 4720 4721 // Non-structure compounds are passed indirectly. 4722 if (isCompoundType(Ty)) 4723 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 4724 4725 return ABIArgInfo::getDirect(0); 4726 } 4727 4728 //===----------------------------------------------------------------------===// 4729 // MSP430 ABI Implementation 4730 //===----------------------------------------------------------------------===// 4731 4732 namespace { 4733 4734 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo { 4735 public: 4736 MSP430TargetCodeGenInfo(CodeGenTypes &CGT) 4737 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 4738 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 4739 CodeGen::CodeGenModule &M) const; 4740 }; 4741 4742 } 4743 4744 void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D, 4745 llvm::GlobalValue *GV, 4746 CodeGen::CodeGenModule &M) const { 4747 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 4748 if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) { 4749 // Handle 'interrupt' attribute: 4750 llvm::Function *F = cast<llvm::Function>(GV); 4751 4752 // Step 1: Set ISR calling convention. 4753 F->setCallingConv(llvm::CallingConv::MSP430_INTR); 4754 4755 // Step 2: Add attributes goodness. 4756 F->addFnAttr(llvm::Attribute::NoInline); 4757 4758 // Step 3: Emit ISR vector alias. 4759 unsigned Num = attr->getNumber() / 2; 4760 new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage, 4761 "__isr_" + Twine(Num), 4762 GV, &M.getModule()); 4763 } 4764 } 4765 } 4766 4767 //===----------------------------------------------------------------------===// 4768 // MIPS ABI Implementation. This works for both little-endian and 4769 // big-endian variants. 4770 //===----------------------------------------------------------------------===// 4771 4772 namespace { 4773 class MipsABIInfo : public ABIInfo { 4774 bool IsO32; 4775 unsigned MinABIStackAlignInBytes, StackAlignInBytes; 4776 void CoerceToIntArgs(uint64_t TySize, 4777 SmallVectorImpl<llvm::Type *> &ArgList) const; 4778 llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const; 4779 llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const; 4780 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const; 4781 public: 4782 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) : 4783 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8), 4784 StackAlignInBytes(IsO32 ? 8 : 16) {} 4785 4786 ABIArgInfo classifyReturnType(QualType RetTy) const; 4787 ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const; 4788 virtual void computeInfo(CGFunctionInfo &FI) const; 4789 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 4790 CodeGenFunction &CGF) const; 4791 }; 4792 4793 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo { 4794 unsigned SizeOfUnwindException; 4795 public: 4796 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32) 4797 : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)), 4798 SizeOfUnwindException(IsO32 ? 24 : 32) {} 4799 4800 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 4801 return 29; 4802 } 4803 4804 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 4805 CodeGen::CodeGenModule &CGM) const { 4806 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 4807 if (!FD) return; 4808 llvm::Function *Fn = cast<llvm::Function>(GV); 4809 if (FD->hasAttr<Mips16Attr>()) { 4810 Fn->addFnAttr("mips16"); 4811 } 4812 else if (FD->hasAttr<NoMips16Attr>()) { 4813 Fn->addFnAttr("nomips16"); 4814 } 4815 } 4816 4817 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4818 llvm::Value *Address) const; 4819 4820 unsigned getSizeOfUnwindException() const { 4821 return SizeOfUnwindException; 4822 } 4823 }; 4824 } 4825 4826 void MipsABIInfo::CoerceToIntArgs(uint64_t TySize, 4827 SmallVectorImpl<llvm::Type *> &ArgList) const { 4828 llvm::IntegerType *IntTy = 4829 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8); 4830 4831 // Add (TySize / MinABIStackAlignInBytes) args of IntTy. 4832 for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N) 4833 ArgList.push_back(IntTy); 4834 4835 // If necessary, add one more integer type to ArgList. 4836 unsigned R = TySize % (MinABIStackAlignInBytes * 8); 4837 4838 if (R) 4839 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R)); 4840 } 4841 4842 // In N32/64, an aligned double precision floating point field is passed in 4843 // a register. 4844 llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const { 4845 SmallVector<llvm::Type*, 8> ArgList, IntArgList; 4846 4847 if (IsO32) { 4848 CoerceToIntArgs(TySize, ArgList); 4849 return llvm::StructType::get(getVMContext(), ArgList); 4850 } 4851 4852 if (Ty->isComplexType()) 4853 return CGT.ConvertType(Ty); 4854 4855 const RecordType *RT = Ty->getAs<RecordType>(); 4856 4857 // Unions/vectors are passed in integer registers. 4858 if (!RT || !RT->isStructureOrClassType()) { 4859 CoerceToIntArgs(TySize, ArgList); 4860 return llvm::StructType::get(getVMContext(), ArgList); 4861 } 4862 4863 const RecordDecl *RD = RT->getDecl(); 4864 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 4865 assert(!(TySize % 8) && "Size of structure must be multiple of 8."); 4866 4867 uint64_t LastOffset = 0; 4868 unsigned idx = 0; 4869 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64); 4870 4871 // Iterate over fields in the struct/class and check if there are any aligned 4872 // double fields. 4873 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 4874 i != e; ++i, ++idx) { 4875 const QualType Ty = i->getType(); 4876 const BuiltinType *BT = Ty->getAs<BuiltinType>(); 4877 4878 if (!BT || BT->getKind() != BuiltinType::Double) 4879 continue; 4880 4881 uint64_t Offset = Layout.getFieldOffset(idx); 4882 if (Offset % 64) // Ignore doubles that are not aligned. 4883 continue; 4884 4885 // Add ((Offset - LastOffset) / 64) args of type i64. 4886 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j) 4887 ArgList.push_back(I64); 4888 4889 // Add double type. 4890 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext())); 4891 LastOffset = Offset + 64; 4892 } 4893 4894 CoerceToIntArgs(TySize - LastOffset, IntArgList); 4895 ArgList.append(IntArgList.begin(), IntArgList.end()); 4896 4897 return llvm::StructType::get(getVMContext(), ArgList); 4898 } 4899 4900 llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset, 4901 uint64_t Offset) const { 4902 if (OrigOffset + MinABIStackAlignInBytes > Offset) 4903 return 0; 4904 4905 return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8); 4906 } 4907 4908 ABIArgInfo 4909 MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const { 4910 uint64_t OrigOffset = Offset; 4911 uint64_t TySize = getContext().getTypeSize(Ty); 4912 uint64_t Align = getContext().getTypeAlign(Ty) / 8; 4913 4914 Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes), 4915 (uint64_t)StackAlignInBytes); 4916 unsigned CurrOffset = llvm::RoundUpToAlignment(Offset, Align); 4917 Offset = CurrOffset + llvm::RoundUpToAlignment(TySize, Align * 8) / 8; 4918 4919 if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) { 4920 // Ignore empty aggregates. 4921 if (TySize == 0) 4922 return ABIArgInfo::getIgnore(); 4923 4924 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { 4925 Offset = OrigOffset + MinABIStackAlignInBytes; 4926 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); 4927 } 4928 4929 // If we have reached here, aggregates are passed directly by coercing to 4930 // another structure type. Padding is inserted if the offset of the 4931 // aggregate is unaligned. 4932 return ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0, 4933 getPaddingType(OrigOffset, CurrOffset)); 4934 } 4935 4936 // Treat an enum type as its underlying type. 4937 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 4938 Ty = EnumTy->getDecl()->getIntegerType(); 4939 4940 if (Ty->isPromotableIntegerType()) 4941 return ABIArgInfo::getExtend(); 4942 4943 return ABIArgInfo::getDirect( 4944 0, 0, IsO32 ? 0 : getPaddingType(OrigOffset, CurrOffset)); 4945 } 4946 4947 llvm::Type* 4948 MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const { 4949 const RecordType *RT = RetTy->getAs<RecordType>(); 4950 SmallVector<llvm::Type*, 8> RTList; 4951 4952 if (RT && RT->isStructureOrClassType()) { 4953 const RecordDecl *RD = RT->getDecl(); 4954 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 4955 unsigned FieldCnt = Layout.getFieldCount(); 4956 4957 // N32/64 returns struct/classes in floating point registers if the 4958 // following conditions are met: 4959 // 1. The size of the struct/class is no larger than 128-bit. 4960 // 2. The struct/class has one or two fields all of which are floating 4961 // point types. 4962 // 3. The offset of the first field is zero (this follows what gcc does). 4963 // 4964 // Any other composite results are returned in integer registers. 4965 // 4966 if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) { 4967 RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end(); 4968 for (; b != e; ++b) { 4969 const BuiltinType *BT = b->getType()->getAs<BuiltinType>(); 4970 4971 if (!BT || !BT->isFloatingPoint()) 4972 break; 4973 4974 RTList.push_back(CGT.ConvertType(b->getType())); 4975 } 4976 4977 if (b == e) 4978 return llvm::StructType::get(getVMContext(), RTList, 4979 RD->hasAttr<PackedAttr>()); 4980 4981 RTList.clear(); 4982 } 4983 } 4984 4985 CoerceToIntArgs(Size, RTList); 4986 return llvm::StructType::get(getVMContext(), RTList); 4987 } 4988 4989 ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const { 4990 uint64_t Size = getContext().getTypeSize(RetTy); 4991 4992 if (RetTy->isVoidType() || Size == 0) 4993 return ABIArgInfo::getIgnore(); 4994 4995 if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) { 4996 if (isRecordReturnIndirect(RetTy, getCXXABI())) 4997 return ABIArgInfo::getIndirect(0); 4998 4999 if (Size <= 128) { 5000 if (RetTy->isAnyComplexType()) 5001 return ABIArgInfo::getDirect(); 5002 5003 // O32 returns integer vectors in registers. 5004 if (IsO32 && RetTy->isVectorType() && !RetTy->hasFloatingRepresentation()) 5005 return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size)); 5006 5007 if (!IsO32) 5008 return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size)); 5009 } 5010 5011 return ABIArgInfo::getIndirect(0); 5012 } 5013 5014 // Treat an enum type as its underlying type. 5015 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 5016 RetTy = EnumTy->getDecl()->getIntegerType(); 5017 5018 return (RetTy->isPromotableIntegerType() ? 5019 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 5020 } 5021 5022 void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const { 5023 ABIArgInfo &RetInfo = FI.getReturnInfo(); 5024 RetInfo = classifyReturnType(FI.getReturnType()); 5025 5026 // Check if a pointer to an aggregate is passed as a hidden argument. 5027 uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0; 5028 5029 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 5030 it != ie; ++it) 5031 it->info = classifyArgumentType(it->type, Offset); 5032 } 5033 5034 llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 5035 CodeGenFunction &CGF) const { 5036 llvm::Type *BP = CGF.Int8PtrTy; 5037 llvm::Type *BPP = CGF.Int8PtrPtrTy; 5038 5039 CGBuilderTy &Builder = CGF.Builder; 5040 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 5041 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 5042 int64_t TypeAlign = getContext().getTypeAlign(Ty) / 8; 5043 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 5044 llvm::Value *AddrTyped; 5045 unsigned PtrWidth = getTarget().getPointerWidth(0); 5046 llvm::IntegerType *IntTy = (PtrWidth == 32) ? CGF.Int32Ty : CGF.Int64Ty; 5047 5048 if (TypeAlign > MinABIStackAlignInBytes) { 5049 llvm::Value *AddrAsInt = CGF.Builder.CreatePtrToInt(Addr, IntTy); 5050 llvm::Value *Inc = llvm::ConstantInt::get(IntTy, TypeAlign - 1); 5051 llvm::Value *Mask = llvm::ConstantInt::get(IntTy, -TypeAlign); 5052 llvm::Value *Add = CGF.Builder.CreateAdd(AddrAsInt, Inc); 5053 llvm::Value *And = CGF.Builder.CreateAnd(Add, Mask); 5054 AddrTyped = CGF.Builder.CreateIntToPtr(And, PTy); 5055 } 5056 else 5057 AddrTyped = Builder.CreateBitCast(Addr, PTy); 5058 5059 llvm::Value *AlignedAddr = Builder.CreateBitCast(AddrTyped, BP); 5060 TypeAlign = std::max((unsigned)TypeAlign, MinABIStackAlignInBytes); 5061 uint64_t Offset = 5062 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, TypeAlign); 5063 llvm::Value *NextAddr = 5064 Builder.CreateGEP(AlignedAddr, llvm::ConstantInt::get(IntTy, Offset), 5065 "ap.next"); 5066 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 5067 5068 return AddrTyped; 5069 } 5070 5071 bool 5072 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 5073 llvm::Value *Address) const { 5074 // This information comes from gcc's implementation, which seems to 5075 // as canonical as it gets. 5076 5077 // Everything on MIPS is 4 bytes. Double-precision FP registers 5078 // are aliased to pairs of single-precision FP registers. 5079 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 5080 5081 // 0-31 are the general purpose registers, $0 - $31. 5082 // 32-63 are the floating-point registers, $f0 - $f31. 5083 // 64 and 65 are the multiply/divide registers, $hi and $lo. 5084 // 66 is the (notional, I think) register for signal-handler return. 5085 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65); 5086 5087 // 67-74 are the floating-point status registers, $fcc0 - $fcc7. 5088 // They are one bit wide and ignored here. 5089 5090 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31. 5091 // (coprocessor 1 is the FP unit) 5092 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31. 5093 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31. 5094 // 176-181 are the DSP accumulator registers. 5095 AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181); 5096 return false; 5097 } 5098 5099 //===----------------------------------------------------------------------===// 5100 // TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults. 5101 // Currently subclassed only to implement custom OpenCL C function attribute 5102 // handling. 5103 //===----------------------------------------------------------------------===// 5104 5105 namespace { 5106 5107 class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo { 5108 public: 5109 TCETargetCodeGenInfo(CodeGenTypes &CGT) 5110 : DefaultTargetCodeGenInfo(CGT) {} 5111 5112 virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 5113 CodeGen::CodeGenModule &M) const; 5114 }; 5115 5116 void TCETargetCodeGenInfo::SetTargetAttributes(const Decl *D, 5117 llvm::GlobalValue *GV, 5118 CodeGen::CodeGenModule &M) const { 5119 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 5120 if (!FD) return; 5121 5122 llvm::Function *F = cast<llvm::Function>(GV); 5123 5124 if (M.getLangOpts().OpenCL) { 5125 if (FD->hasAttr<OpenCLKernelAttr>()) { 5126 // OpenCL C Kernel functions are not subject to inlining 5127 F->addFnAttr(llvm::Attribute::NoInline); 5128 const ReqdWorkGroupSizeAttr *Attr = FD->getAttr<ReqdWorkGroupSizeAttr>(); 5129 if (Attr) { 5130 // Convert the reqd_work_group_size() attributes to metadata. 5131 llvm::LLVMContext &Context = F->getContext(); 5132 llvm::NamedMDNode *OpenCLMetadata = 5133 M.getModule().getOrInsertNamedMetadata("opencl.kernel_wg_size_info"); 5134 5135 SmallVector<llvm::Value*, 5> Operands; 5136 Operands.push_back(F); 5137 5138 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty, 5139 llvm::APInt(32, Attr->getXDim()))); 5140 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty, 5141 llvm::APInt(32, Attr->getYDim()))); 5142 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty, 5143 llvm::APInt(32, Attr->getZDim()))); 5144 5145 // Add a boolean constant operand for "required" (true) or "hint" (false) 5146 // for implementing the work_group_size_hint attr later. Currently 5147 // always true as the hint is not yet implemented. 5148 Operands.push_back(llvm::ConstantInt::getTrue(Context)); 5149 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands)); 5150 } 5151 } 5152 } 5153 } 5154 5155 } 5156 5157 //===----------------------------------------------------------------------===// 5158 // Hexagon ABI Implementation 5159 //===----------------------------------------------------------------------===// 5160 5161 namespace { 5162 5163 class HexagonABIInfo : public ABIInfo { 5164 5165 5166 public: 5167 HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 5168 5169 private: 5170 5171 ABIArgInfo classifyReturnType(QualType RetTy) const; 5172 ABIArgInfo classifyArgumentType(QualType RetTy) const; 5173 5174 virtual void computeInfo(CGFunctionInfo &FI) const; 5175 5176 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 5177 CodeGenFunction &CGF) const; 5178 }; 5179 5180 class HexagonTargetCodeGenInfo : public TargetCodeGenInfo { 5181 public: 5182 HexagonTargetCodeGenInfo(CodeGenTypes &CGT) 5183 :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {} 5184 5185 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 5186 return 29; 5187 } 5188 }; 5189 5190 } 5191 5192 void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const { 5193 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 5194 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 5195 it != ie; ++it) 5196 it->info = classifyArgumentType(it->type); 5197 } 5198 5199 ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const { 5200 if (!isAggregateTypeForABI(Ty)) { 5201 // Treat an enum type as its underlying type. 5202 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 5203 Ty = EnumTy->getDecl()->getIntegerType(); 5204 5205 return (Ty->isPromotableIntegerType() ? 5206 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 5207 } 5208 5209 // Ignore empty records. 5210 if (isEmptyRecord(getContext(), Ty, true)) 5211 return ABIArgInfo::getIgnore(); 5212 5213 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 5214 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); 5215 5216 uint64_t Size = getContext().getTypeSize(Ty); 5217 if (Size > 64) 5218 return ABIArgInfo::getIndirect(0, /*ByVal=*/true); 5219 // Pass in the smallest viable integer type. 5220 else if (Size > 32) 5221 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext())); 5222 else if (Size > 16) 5223 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 5224 else if (Size > 8) 5225 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 5226 else 5227 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 5228 } 5229 5230 ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const { 5231 if (RetTy->isVoidType()) 5232 return ABIArgInfo::getIgnore(); 5233 5234 // Large vector types should be returned via memory. 5235 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64) 5236 return ABIArgInfo::getIndirect(0); 5237 5238 if (!isAggregateTypeForABI(RetTy)) { 5239 // Treat an enum type as its underlying type. 5240 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 5241 RetTy = EnumTy->getDecl()->getIntegerType(); 5242 5243 return (RetTy->isPromotableIntegerType() ? 5244 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 5245 } 5246 5247 // Structures with either a non-trivial destructor or a non-trivial 5248 // copy constructor are always indirect. 5249 if (isRecordReturnIndirect(RetTy, getCXXABI())) 5250 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 5251 5252 if (isEmptyRecord(getContext(), RetTy, true)) 5253 return ABIArgInfo::getIgnore(); 5254 5255 // Aggregates <= 8 bytes are returned in r0; other aggregates 5256 // are returned indirectly. 5257 uint64_t Size = getContext().getTypeSize(RetTy); 5258 if (Size <= 64) { 5259 // Return in the smallest viable integer type. 5260 if (Size <= 8) 5261 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 5262 if (Size <= 16) 5263 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 5264 if (Size <= 32) 5265 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 5266 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext())); 5267 } 5268 5269 return ABIArgInfo::getIndirect(0, /*ByVal=*/true); 5270 } 5271 5272 llvm::Value *HexagonABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 5273 CodeGenFunction &CGF) const { 5274 // FIXME: Need to handle alignment 5275 llvm::Type *BPP = CGF.Int8PtrPtrTy; 5276 5277 CGBuilderTy &Builder = CGF.Builder; 5278 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 5279 "ap"); 5280 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 5281 llvm::Type *PTy = 5282 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 5283 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 5284 5285 uint64_t Offset = 5286 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); 5287 llvm::Value *NextAddr = 5288 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 5289 "ap.next"); 5290 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 5291 5292 return AddrTyped; 5293 } 5294 5295 5296 //===----------------------------------------------------------------------===// 5297 // SPARC v9 ABI Implementation. 5298 // Based on the SPARC Compliance Definition version 2.4.1. 5299 // 5300 // Function arguments a mapped to a nominal "parameter array" and promoted to 5301 // registers depending on their type. Each argument occupies 8 or 16 bytes in 5302 // the array, structs larger than 16 bytes are passed indirectly. 5303 // 5304 // One case requires special care: 5305 // 5306 // struct mixed { 5307 // int i; 5308 // float f; 5309 // }; 5310 // 5311 // When a struct mixed is passed by value, it only occupies 8 bytes in the 5312 // parameter array, but the int is passed in an integer register, and the float 5313 // is passed in a floating point register. This is represented as two arguments 5314 // with the LLVM IR inreg attribute: 5315 // 5316 // declare void f(i32 inreg %i, float inreg %f) 5317 // 5318 // The code generator will only allocate 4 bytes from the parameter array for 5319 // the inreg arguments. All other arguments are allocated a multiple of 8 5320 // bytes. 5321 // 5322 namespace { 5323 class SparcV9ABIInfo : public ABIInfo { 5324 public: 5325 SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 5326 5327 private: 5328 ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const; 5329 virtual void computeInfo(CGFunctionInfo &FI) const; 5330 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 5331 CodeGenFunction &CGF) const; 5332 5333 // Coercion type builder for structs passed in registers. The coercion type 5334 // serves two purposes: 5335 // 5336 // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned' 5337 // in registers. 5338 // 2. Expose aligned floating point elements as first-level elements, so the 5339 // code generator knows to pass them in floating point registers. 5340 // 5341 // We also compute the InReg flag which indicates that the struct contains 5342 // aligned 32-bit floats. 5343 // 5344 struct CoerceBuilder { 5345 llvm::LLVMContext &Context; 5346 const llvm::DataLayout &DL; 5347 SmallVector<llvm::Type*, 8> Elems; 5348 uint64_t Size; 5349 bool InReg; 5350 5351 CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl) 5352 : Context(c), DL(dl), Size(0), InReg(false) {} 5353 5354 // Pad Elems with integers until Size is ToSize. 5355 void pad(uint64_t ToSize) { 5356 assert(ToSize >= Size && "Cannot remove elements"); 5357 if (ToSize == Size) 5358 return; 5359 5360 // Finish the current 64-bit word. 5361 uint64_t Aligned = llvm::RoundUpToAlignment(Size, 64); 5362 if (Aligned > Size && Aligned <= ToSize) { 5363 Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size)); 5364 Size = Aligned; 5365 } 5366 5367 // Add whole 64-bit words. 5368 while (Size + 64 <= ToSize) { 5369 Elems.push_back(llvm::Type::getInt64Ty(Context)); 5370 Size += 64; 5371 } 5372 5373 // Final in-word padding. 5374 if (Size < ToSize) { 5375 Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size)); 5376 Size = ToSize; 5377 } 5378 } 5379 5380 // Add a floating point element at Offset. 5381 void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) { 5382 // Unaligned floats are treated as integers. 5383 if (Offset % Bits) 5384 return; 5385 // The InReg flag is only required if there are any floats < 64 bits. 5386 if (Bits < 64) 5387 InReg = true; 5388 pad(Offset); 5389 Elems.push_back(Ty); 5390 Size = Offset + Bits; 5391 } 5392 5393 // Add a struct type to the coercion type, starting at Offset (in bits). 5394 void addStruct(uint64_t Offset, llvm::StructType *StrTy) { 5395 const llvm::StructLayout *Layout = DL.getStructLayout(StrTy); 5396 for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) { 5397 llvm::Type *ElemTy = StrTy->getElementType(i); 5398 uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i); 5399 switch (ElemTy->getTypeID()) { 5400 case llvm::Type::StructTyID: 5401 addStruct(ElemOffset, cast<llvm::StructType>(ElemTy)); 5402 break; 5403 case llvm::Type::FloatTyID: 5404 addFloat(ElemOffset, ElemTy, 32); 5405 break; 5406 case llvm::Type::DoubleTyID: 5407 addFloat(ElemOffset, ElemTy, 64); 5408 break; 5409 case llvm::Type::FP128TyID: 5410 addFloat(ElemOffset, ElemTy, 128); 5411 break; 5412 case llvm::Type::PointerTyID: 5413 if (ElemOffset % 64 == 0) { 5414 pad(ElemOffset); 5415 Elems.push_back(ElemTy); 5416 Size += 64; 5417 } 5418 break; 5419 default: 5420 break; 5421 } 5422 } 5423 } 5424 5425 // Check if Ty is a usable substitute for the coercion type. 5426 bool isUsableType(llvm::StructType *Ty) const { 5427 if (Ty->getNumElements() != Elems.size()) 5428 return false; 5429 for (unsigned i = 0, e = Elems.size(); i != e; ++i) 5430 if (Elems[i] != Ty->getElementType(i)) 5431 return false; 5432 return true; 5433 } 5434 5435 // Get the coercion type as a literal struct type. 5436 llvm::Type *getType() const { 5437 if (Elems.size() == 1) 5438 return Elems.front(); 5439 else 5440 return llvm::StructType::get(Context, Elems); 5441 } 5442 }; 5443 }; 5444 } // end anonymous namespace 5445 5446 ABIArgInfo 5447 SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const { 5448 if (Ty->isVoidType()) 5449 return ABIArgInfo::getIgnore(); 5450 5451 uint64_t Size = getContext().getTypeSize(Ty); 5452 5453 // Anything too big to fit in registers is passed with an explicit indirect 5454 // pointer / sret pointer. 5455 if (Size > SizeLimit) 5456 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 5457 5458 // Treat an enum type as its underlying type. 5459 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 5460 Ty = EnumTy->getDecl()->getIntegerType(); 5461 5462 // Integer types smaller than a register are extended. 5463 if (Size < 64 && Ty->isIntegerType()) 5464 return ABIArgInfo::getExtend(); 5465 5466 // Other non-aggregates go in registers. 5467 if (!isAggregateTypeForABI(Ty)) 5468 return ABIArgInfo::getDirect(); 5469 5470 // If a C++ object has either a non-trivial copy constructor or a non-trivial 5471 // destructor, it is passed with an explicit indirect pointer / sret pointer. 5472 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 5473 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); 5474 5475 // This is a small aggregate type that should be passed in registers. 5476 // Build a coercion type from the LLVM struct type. 5477 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty)); 5478 if (!StrTy) 5479 return ABIArgInfo::getDirect(); 5480 5481 CoerceBuilder CB(getVMContext(), getDataLayout()); 5482 CB.addStruct(0, StrTy); 5483 CB.pad(llvm::RoundUpToAlignment(CB.DL.getTypeSizeInBits(StrTy), 64)); 5484 5485 // Try to use the original type for coercion. 5486 llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType(); 5487 5488 if (CB.InReg) 5489 return ABIArgInfo::getDirectInReg(CoerceTy); 5490 else 5491 return ABIArgInfo::getDirect(CoerceTy); 5492 } 5493 5494 llvm::Value *SparcV9ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 5495 CodeGenFunction &CGF) const { 5496 ABIArgInfo AI = classifyType(Ty, 16 * 8); 5497 llvm::Type *ArgTy = CGT.ConvertType(Ty); 5498 if (AI.canHaveCoerceToType() && !AI.getCoerceToType()) 5499 AI.setCoerceToType(ArgTy); 5500 5501 llvm::Type *BPP = CGF.Int8PtrPtrTy; 5502 CGBuilderTy &Builder = CGF.Builder; 5503 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 5504 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 5505 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy); 5506 llvm::Value *ArgAddr; 5507 unsigned Stride; 5508 5509 switch (AI.getKind()) { 5510 case ABIArgInfo::Expand: 5511 case ABIArgInfo::InAlloca: 5512 llvm_unreachable("Unsupported ABI kind for va_arg"); 5513 5514 case ABIArgInfo::Extend: 5515 Stride = 8; 5516 ArgAddr = Builder 5517 .CreateConstGEP1_32(Addr, 8 - getDataLayout().getTypeAllocSize(ArgTy), 5518 "extend"); 5519 break; 5520 5521 case ABIArgInfo::Direct: 5522 Stride = getDataLayout().getTypeAllocSize(AI.getCoerceToType()); 5523 ArgAddr = Addr; 5524 break; 5525 5526 case ABIArgInfo::Indirect: 5527 Stride = 8; 5528 ArgAddr = Builder.CreateBitCast(Addr, 5529 llvm::PointerType::getUnqual(ArgPtrTy), 5530 "indirect"); 5531 ArgAddr = Builder.CreateLoad(ArgAddr, "indirect.arg"); 5532 break; 5533 5534 case ABIArgInfo::Ignore: 5535 return llvm::UndefValue::get(ArgPtrTy); 5536 } 5537 5538 // Update VAList. 5539 Addr = Builder.CreateConstGEP1_32(Addr, Stride, "ap.next"); 5540 Builder.CreateStore(Addr, VAListAddrAsBPP); 5541 5542 return Builder.CreatePointerCast(ArgAddr, ArgPtrTy, "arg.addr"); 5543 } 5544 5545 void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const { 5546 FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8); 5547 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 5548 it != ie; ++it) 5549 it->info = classifyType(it->type, 16 * 8); 5550 } 5551 5552 namespace { 5553 class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo { 5554 public: 5555 SparcV9TargetCodeGenInfo(CodeGenTypes &CGT) 5556 : TargetCodeGenInfo(new SparcV9ABIInfo(CGT)) {} 5557 }; 5558 } // end anonymous namespace 5559 5560 5561 //===----------------------------------------------------------------------===// 5562 // Xcore ABI Implementation 5563 //===----------------------------------------------------------------------===// 5564 namespace { 5565 class XCoreABIInfo : public DefaultABIInfo { 5566 public: 5567 XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 5568 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 5569 CodeGenFunction &CGF) const; 5570 }; 5571 5572 class XcoreTargetCodeGenInfo : public TargetCodeGenInfo { 5573 public: 5574 XcoreTargetCodeGenInfo(CodeGenTypes &CGT) 5575 :TargetCodeGenInfo(new XCoreABIInfo(CGT)) {} 5576 }; 5577 } // End anonymous namespace. 5578 5579 llvm::Value *XCoreABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 5580 CodeGenFunction &CGF) const { 5581 CGBuilderTy &Builder = CGF.Builder; 5582 5583 // Get the VAList. 5584 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, 5585 CGF.Int8PtrPtrTy); 5586 llvm::Value *AP = Builder.CreateLoad(VAListAddrAsBPP); 5587 5588 // Handle the argument. 5589 ABIArgInfo AI = classifyArgumentType(Ty); 5590 llvm::Type *ArgTy = CGT.ConvertType(Ty); 5591 if (AI.canHaveCoerceToType() && !AI.getCoerceToType()) 5592 AI.setCoerceToType(ArgTy); 5593 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy); 5594 llvm::Value *Val; 5595 uint64_t ArgSize = 0; 5596 switch (AI.getKind()) { 5597 case ABIArgInfo::Expand: 5598 case ABIArgInfo::InAlloca: 5599 llvm_unreachable("Unsupported ABI kind for va_arg"); 5600 case ABIArgInfo::Ignore: 5601 Val = llvm::UndefValue::get(ArgPtrTy); 5602 ArgSize = 0; 5603 break; 5604 case ABIArgInfo::Extend: 5605 case ABIArgInfo::Direct: 5606 Val = Builder.CreatePointerCast(AP, ArgPtrTy); 5607 ArgSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType()); 5608 if (ArgSize < 4) 5609 ArgSize = 4; 5610 break; 5611 case ABIArgInfo::Indirect: 5612 llvm::Value *ArgAddr; 5613 ArgAddr = Builder.CreateBitCast(AP, llvm::PointerType::getUnqual(ArgPtrTy)); 5614 ArgAddr = Builder.CreateLoad(ArgAddr); 5615 Val = Builder.CreatePointerCast(ArgAddr, ArgPtrTy); 5616 ArgSize = 4; 5617 break; 5618 } 5619 5620 // Increment the VAList. 5621 if (ArgSize) { 5622 llvm::Value *APN = Builder.CreateConstGEP1_32(AP, ArgSize); 5623 Builder.CreateStore(APN, VAListAddrAsBPP); 5624 } 5625 return Val; 5626 } 5627 5628 //===----------------------------------------------------------------------===// 5629 // Driver code 5630 //===----------------------------------------------------------------------===// 5631 5632 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() { 5633 if (TheTargetCodeGenInfo) 5634 return *TheTargetCodeGenInfo; 5635 5636 const llvm::Triple &Triple = getTarget().getTriple(); 5637 switch (Triple.getArch()) { 5638 default: 5639 return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types)); 5640 5641 case llvm::Triple::le32: 5642 return *(TheTargetCodeGenInfo = new PNaClTargetCodeGenInfo(Types)); 5643 case llvm::Triple::mips: 5644 case llvm::Triple::mipsel: 5645 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, true)); 5646 5647 case llvm::Triple::mips64: 5648 case llvm::Triple::mips64el: 5649 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, false)); 5650 5651 case llvm::Triple::aarch64: 5652 return *(TheTargetCodeGenInfo = new AArch64TargetCodeGenInfo(Types)); 5653 5654 case llvm::Triple::arm: 5655 case llvm::Triple::thumb: 5656 { 5657 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS; 5658 if (strcmp(getTarget().getABI(), "apcs-gnu") == 0) 5659 Kind = ARMABIInfo::APCS; 5660 else if (CodeGenOpts.FloatABI == "hard" || 5661 (CodeGenOpts.FloatABI != "soft" && 5662 Triple.getEnvironment() == llvm::Triple::GNUEABIHF)) 5663 Kind = ARMABIInfo::AAPCS_VFP; 5664 5665 switch (Triple.getOS()) { 5666 case llvm::Triple::NaCl: 5667 return *(TheTargetCodeGenInfo = 5668 new NaClARMTargetCodeGenInfo(Types, Kind)); 5669 default: 5670 return *(TheTargetCodeGenInfo = 5671 new ARMTargetCodeGenInfo(Types, Kind)); 5672 } 5673 } 5674 5675 case llvm::Triple::ppc: 5676 return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types)); 5677 case llvm::Triple::ppc64: 5678 if (Triple.isOSBinFormatELF()) 5679 return *(TheTargetCodeGenInfo = new PPC64_SVR4_TargetCodeGenInfo(Types)); 5680 else 5681 return *(TheTargetCodeGenInfo = new PPC64TargetCodeGenInfo(Types)); 5682 case llvm::Triple::ppc64le: 5683 assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!"); 5684 return *(TheTargetCodeGenInfo = new PPC64_SVR4_TargetCodeGenInfo(Types)); 5685 5686 case llvm::Triple::nvptx: 5687 case llvm::Triple::nvptx64: 5688 return *(TheTargetCodeGenInfo = new NVPTXTargetCodeGenInfo(Types)); 5689 5690 case llvm::Triple::msp430: 5691 return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types)); 5692 5693 case llvm::Triple::systemz: 5694 return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo(Types)); 5695 5696 case llvm::Triple::tce: 5697 return *(TheTargetCodeGenInfo = new TCETargetCodeGenInfo(Types)); 5698 5699 case llvm::Triple::x86: { 5700 bool IsDarwinVectorABI = Triple.isOSDarwin(); 5701 bool IsSmallStructInRegABI = 5702 X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts); 5703 bool IsWin32FloatStructABI = (Triple.getOS() == llvm::Triple::Win32); 5704 5705 if (Triple.getOS() == llvm::Triple::Win32) { 5706 return *(TheTargetCodeGenInfo = 5707 new WinX86_32TargetCodeGenInfo(Types, 5708 IsDarwinVectorABI, IsSmallStructInRegABI, 5709 IsWin32FloatStructABI, 5710 CodeGenOpts.NumRegisterParameters)); 5711 } else { 5712 return *(TheTargetCodeGenInfo = 5713 new X86_32TargetCodeGenInfo(Types, 5714 IsDarwinVectorABI, IsSmallStructInRegABI, 5715 IsWin32FloatStructABI, 5716 CodeGenOpts.NumRegisterParameters)); 5717 } 5718 } 5719 5720 case llvm::Triple::x86_64: { 5721 bool HasAVX = strcmp(getTarget().getABI(), "avx") == 0; 5722 5723 switch (Triple.getOS()) { 5724 case llvm::Triple::Win32: 5725 case llvm::Triple::MinGW32: 5726 case llvm::Triple::Cygwin: 5727 return *(TheTargetCodeGenInfo = new WinX86_64TargetCodeGenInfo(Types)); 5728 case llvm::Triple::NaCl: 5729 return *(TheTargetCodeGenInfo = new NaClX86_64TargetCodeGenInfo(Types, 5730 HasAVX)); 5731 default: 5732 return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo(Types, 5733 HasAVX)); 5734 } 5735 } 5736 case llvm::Triple::hexagon: 5737 return *(TheTargetCodeGenInfo = new HexagonTargetCodeGenInfo(Types)); 5738 case llvm::Triple::sparcv9: 5739 return *(TheTargetCodeGenInfo = new SparcV9TargetCodeGenInfo(Types)); 5740 case llvm::Triple::xcore: 5741 return *(TheTargetCodeGenInfo = new XcoreTargetCodeGenInfo(Types)); 5742 5743 } 5744 } 5745