1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // These classes wrap the information about a call or function 11 // definition used to handle ABI compliancy. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "TargetInfo.h" 16 #include "ABIInfo.h" 17 #include "CGCXXABI.h" 18 #include "CGValue.h" 19 #include "CodeGenFunction.h" 20 #include "clang/AST/RecordLayout.h" 21 #include "clang/CodeGen/CGFunctionInfo.h" 22 #include "clang/Frontend/CodeGenOptions.h" 23 #include "llvm/ADT/StringExtras.h" 24 #include "llvm/ADT/Triple.h" 25 #include "llvm/IR/DataLayout.h" 26 #include "llvm/IR/Type.h" 27 #include "llvm/Support/raw_ostream.h" 28 #include <algorithm> // std::sort 29 30 using namespace clang; 31 using namespace CodeGen; 32 33 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, 34 llvm::Value *Array, 35 llvm::Value *Value, 36 unsigned FirstIndex, 37 unsigned LastIndex) { 38 // Alternatively, we could emit this as a loop in the source. 39 for (unsigned I = FirstIndex; I <= LastIndex; ++I) { 40 llvm::Value *Cell = Builder.CreateConstInBoundsGEP1_32(Array, I); 41 Builder.CreateStore(Value, Cell); 42 } 43 } 44 45 static bool isAggregateTypeForABI(QualType T) { 46 return !CodeGenFunction::hasScalarEvaluationKind(T) || 47 T->isMemberFunctionPointerType(); 48 } 49 50 ABIInfo::~ABIInfo() {} 51 52 static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, 53 CGCXXABI &CXXABI) { 54 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 55 if (!RD) 56 return CGCXXABI::RAA_Default; 57 return CXXABI.getRecordArgABI(RD); 58 } 59 60 static CGCXXABI::RecordArgABI getRecordArgABI(QualType T, 61 CGCXXABI &CXXABI) { 62 const RecordType *RT = T->getAs<RecordType>(); 63 if (!RT) 64 return CGCXXABI::RAA_Default; 65 return getRecordArgABI(RT, CXXABI); 66 } 67 68 /// Pass transparent unions as if they were the type of the first element. Sema 69 /// should ensure that all elements of the union have the same "machine type". 70 static QualType useFirstFieldIfTransparentUnion(QualType Ty) { 71 if (const RecordType *UT = Ty->getAsUnionType()) { 72 const RecordDecl *UD = UT->getDecl(); 73 if (UD->hasAttr<TransparentUnionAttr>()) { 74 assert(!UD->field_empty() && "sema created an empty transparent union"); 75 return UD->field_begin()->getType(); 76 } 77 } 78 return Ty; 79 } 80 81 CGCXXABI &ABIInfo::getCXXABI() const { 82 return CGT.getCXXABI(); 83 } 84 85 ASTContext &ABIInfo::getContext() const { 86 return CGT.getContext(); 87 } 88 89 llvm::LLVMContext &ABIInfo::getVMContext() const { 90 return CGT.getLLVMContext(); 91 } 92 93 const llvm::DataLayout &ABIInfo::getDataLayout() const { 94 return CGT.getDataLayout(); 95 } 96 97 const TargetInfo &ABIInfo::getTarget() const { 98 return CGT.getTarget(); 99 } 100 101 bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 102 return false; 103 } 104 105 bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, 106 uint64_t Members) const { 107 return false; 108 } 109 110 void ABIArgInfo::dump() const { 111 raw_ostream &OS = llvm::errs(); 112 OS << "(ABIArgInfo Kind="; 113 switch (TheKind) { 114 case Direct: 115 OS << "Direct Type="; 116 if (llvm::Type *Ty = getCoerceToType()) 117 Ty->print(OS); 118 else 119 OS << "null"; 120 break; 121 case Extend: 122 OS << "Extend"; 123 break; 124 case Ignore: 125 OS << "Ignore"; 126 break; 127 case InAlloca: 128 OS << "InAlloca Offset=" << getInAllocaFieldIndex(); 129 break; 130 case Indirect: 131 OS << "Indirect Align=" << getIndirectAlign() 132 << " ByVal=" << getIndirectByVal() 133 << " Realign=" << getIndirectRealign(); 134 break; 135 case Expand: 136 OS << "Expand"; 137 break; 138 } 139 OS << ")\n"; 140 } 141 142 TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; } 143 144 // If someone can figure out a general rule for this, that would be great. 145 // It's probably just doomed to be platform-dependent, though. 146 unsigned TargetCodeGenInfo::getSizeOfUnwindException() const { 147 // Verified for: 148 // x86-64 FreeBSD, Linux, Darwin 149 // x86-32 FreeBSD, Linux, Darwin 150 // PowerPC Linux, Darwin 151 // ARM Darwin (*not* EABI) 152 // AArch64 Linux 153 return 32; 154 } 155 156 bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args, 157 const FunctionNoProtoType *fnType) const { 158 // The following conventions are known to require this to be false: 159 // x86_stdcall 160 // MIPS 161 // For everything else, we just prefer false unless we opt out. 162 return false; 163 } 164 165 void 166 TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib, 167 llvm::SmallString<24> &Opt) const { 168 // This assumes the user is passing a library name like "rt" instead of a 169 // filename like "librt.a/so", and that they don't care whether it's static or 170 // dynamic. 171 Opt = "-l"; 172 Opt += Lib; 173 } 174 175 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays); 176 177 /// isEmptyField - Return true iff a the field is "empty", that is it 178 /// is an unnamed bit-field or an (array of) empty record(s). 179 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD, 180 bool AllowArrays) { 181 if (FD->isUnnamedBitfield()) 182 return true; 183 184 QualType FT = FD->getType(); 185 186 // Constant arrays of empty records count as empty, strip them off. 187 // Constant arrays of zero length always count as empty. 188 if (AllowArrays) 189 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 190 if (AT->getSize() == 0) 191 return true; 192 FT = AT->getElementType(); 193 } 194 195 const RecordType *RT = FT->getAs<RecordType>(); 196 if (!RT) 197 return false; 198 199 // C++ record fields are never empty, at least in the Itanium ABI. 200 // 201 // FIXME: We should use a predicate for whether this behavior is true in the 202 // current ABI. 203 if (isa<CXXRecordDecl>(RT->getDecl())) 204 return false; 205 206 return isEmptyRecord(Context, FT, AllowArrays); 207 } 208 209 /// isEmptyRecord - Return true iff a structure contains only empty 210 /// fields. Note that a structure with a flexible array member is not 211 /// considered empty. 212 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) { 213 const RecordType *RT = T->getAs<RecordType>(); 214 if (!RT) 215 return 0; 216 const RecordDecl *RD = RT->getDecl(); 217 if (RD->hasFlexibleArrayMember()) 218 return false; 219 220 // If this is a C++ record, check the bases first. 221 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 222 for (const auto &I : CXXRD->bases()) 223 if (!isEmptyRecord(Context, I.getType(), true)) 224 return false; 225 226 for (const auto *I : RD->fields()) 227 if (!isEmptyField(Context, I, AllowArrays)) 228 return false; 229 return true; 230 } 231 232 /// isSingleElementStruct - Determine if a structure is a "single 233 /// element struct", i.e. it has exactly one non-empty field or 234 /// exactly one field which is itself a single element 235 /// struct. Structures with flexible array members are never 236 /// considered single element structs. 237 /// 238 /// \return The field declaration for the single non-empty field, if 239 /// it exists. 240 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) { 241 const RecordType *RT = T->getAs<RecordType>(); 242 if (!RT) 243 return nullptr; 244 245 const RecordDecl *RD = RT->getDecl(); 246 if (RD->hasFlexibleArrayMember()) 247 return nullptr; 248 249 const Type *Found = nullptr; 250 251 // If this is a C++ record, check the bases first. 252 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 253 for (const auto &I : CXXRD->bases()) { 254 // Ignore empty records. 255 if (isEmptyRecord(Context, I.getType(), true)) 256 continue; 257 258 // If we already found an element then this isn't a single-element struct. 259 if (Found) 260 return nullptr; 261 262 // If this is non-empty and not a single element struct, the composite 263 // cannot be a single element struct. 264 Found = isSingleElementStruct(I.getType(), Context); 265 if (!Found) 266 return nullptr; 267 } 268 } 269 270 // Check for single element. 271 for (const auto *FD : RD->fields()) { 272 QualType FT = FD->getType(); 273 274 // Ignore empty fields. 275 if (isEmptyField(Context, FD, true)) 276 continue; 277 278 // If we already found an element then this isn't a single-element 279 // struct. 280 if (Found) 281 return nullptr; 282 283 // Treat single element arrays as the element. 284 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 285 if (AT->getSize().getZExtValue() != 1) 286 break; 287 FT = AT->getElementType(); 288 } 289 290 if (!isAggregateTypeForABI(FT)) { 291 Found = FT.getTypePtr(); 292 } else { 293 Found = isSingleElementStruct(FT, Context); 294 if (!Found) 295 return nullptr; 296 } 297 } 298 299 // We don't consider a struct a single-element struct if it has 300 // padding beyond the element type. 301 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T)) 302 return nullptr; 303 304 return Found; 305 } 306 307 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) { 308 // Treat complex types as the element type. 309 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) 310 Ty = CTy->getElementType(); 311 312 // Check for a type which we know has a simple scalar argument-passing 313 // convention without any padding. (We're specifically looking for 32 314 // and 64-bit integer and integer-equivalents, float, and double.) 315 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() && 316 !Ty->isEnumeralType() && !Ty->isBlockPointerType()) 317 return false; 318 319 uint64_t Size = Context.getTypeSize(Ty); 320 return Size == 32 || Size == 64; 321 } 322 323 /// canExpandIndirectArgument - Test whether an argument type which is to be 324 /// passed indirectly (on the stack) would have the equivalent layout if it was 325 /// expanded into separate arguments. If so, we prefer to do the latter to avoid 326 /// inhibiting optimizations. 327 /// 328 // FIXME: This predicate is missing many cases, currently it just follows 329 // llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We 330 // should probably make this smarter, or better yet make the LLVM backend 331 // capable of handling it. 332 static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) { 333 // We can only expand structure types. 334 const RecordType *RT = Ty->getAs<RecordType>(); 335 if (!RT) 336 return false; 337 338 // We can only expand (C) structures. 339 // 340 // FIXME: This needs to be generalized to handle classes as well. 341 const RecordDecl *RD = RT->getDecl(); 342 if (!RD->isStruct() || isa<CXXRecordDecl>(RD)) 343 return false; 344 345 uint64_t Size = 0; 346 347 for (const auto *FD : RD->fields()) { 348 if (!is32Or64BitBasicType(FD->getType(), Context)) 349 return false; 350 351 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know 352 // how to expand them yet, and the predicate for telling if a bitfield still 353 // counts as "basic" is more complicated than what we were doing previously. 354 if (FD->isBitField()) 355 return false; 356 357 Size += Context.getTypeSize(FD->getType()); 358 } 359 360 // Make sure there are not any holes in the struct. 361 if (Size != Context.getTypeSize(Ty)) 362 return false; 363 364 return true; 365 } 366 367 namespace { 368 /// DefaultABIInfo - The default implementation for ABI specific 369 /// details. This implementation provides information which results in 370 /// self-consistent and sensible LLVM IR generation, but does not 371 /// conform to any particular ABI. 372 class DefaultABIInfo : public ABIInfo { 373 public: 374 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 375 376 ABIArgInfo classifyReturnType(QualType RetTy) const; 377 ABIArgInfo classifyArgumentType(QualType RetTy) const; 378 379 void computeInfo(CGFunctionInfo &FI) const override { 380 if (!getCXXABI().classifyReturnType(FI)) 381 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 382 for (auto &I : FI.arguments()) 383 I.info = classifyArgumentType(I.type); 384 } 385 386 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 387 CodeGenFunction &CGF) const override; 388 }; 389 390 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo { 391 public: 392 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 393 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 394 }; 395 396 llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 397 CodeGenFunction &CGF) const { 398 return nullptr; 399 } 400 401 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const { 402 if (isAggregateTypeForABI(Ty)) 403 return ABIArgInfo::getIndirect(0); 404 405 // Treat an enum type as its underlying type. 406 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 407 Ty = EnumTy->getDecl()->getIntegerType(); 408 409 return (Ty->isPromotableIntegerType() ? 410 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 411 } 412 413 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const { 414 if (RetTy->isVoidType()) 415 return ABIArgInfo::getIgnore(); 416 417 if (isAggregateTypeForABI(RetTy)) 418 return ABIArgInfo::getIndirect(0); 419 420 // Treat an enum type as its underlying type. 421 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 422 RetTy = EnumTy->getDecl()->getIntegerType(); 423 424 return (RetTy->isPromotableIntegerType() ? 425 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 426 } 427 428 //===----------------------------------------------------------------------===// 429 // le32/PNaCl bitcode ABI Implementation 430 // 431 // This is a simplified version of the x86_32 ABI. Arguments and return values 432 // are always passed on the stack. 433 //===----------------------------------------------------------------------===// 434 435 class PNaClABIInfo : public ABIInfo { 436 public: 437 PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 438 439 ABIArgInfo classifyReturnType(QualType RetTy) const; 440 ABIArgInfo classifyArgumentType(QualType RetTy) const; 441 442 void computeInfo(CGFunctionInfo &FI) const override; 443 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 444 CodeGenFunction &CGF) const override; 445 }; 446 447 class PNaClTargetCodeGenInfo : public TargetCodeGenInfo { 448 public: 449 PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 450 : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {} 451 }; 452 453 void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const { 454 if (!getCXXABI().classifyReturnType(FI)) 455 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 456 457 for (auto &I : FI.arguments()) 458 I.info = classifyArgumentType(I.type); 459 } 460 461 llvm::Value *PNaClABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 462 CodeGenFunction &CGF) const { 463 return nullptr; 464 } 465 466 /// \brief Classify argument of given type \p Ty. 467 ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const { 468 if (isAggregateTypeForABI(Ty)) { 469 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 470 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); 471 return ABIArgInfo::getIndirect(0); 472 } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) { 473 // Treat an enum type as its underlying type. 474 Ty = EnumTy->getDecl()->getIntegerType(); 475 } else if (Ty->isFloatingType()) { 476 // Floating-point types don't go inreg. 477 return ABIArgInfo::getDirect(); 478 } 479 480 return (Ty->isPromotableIntegerType() ? 481 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 482 } 483 484 ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const { 485 if (RetTy->isVoidType()) 486 return ABIArgInfo::getIgnore(); 487 488 // In the PNaCl ABI we always return records/structures on the stack. 489 if (isAggregateTypeForABI(RetTy)) 490 return ABIArgInfo::getIndirect(0); 491 492 // Treat an enum type as its underlying type. 493 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 494 RetTy = EnumTy->getDecl()->getIntegerType(); 495 496 return (RetTy->isPromotableIntegerType() ? 497 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 498 } 499 500 /// IsX86_MMXType - Return true if this is an MMX type. 501 bool IsX86_MMXType(llvm::Type *IRType) { 502 // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>. 503 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 && 504 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() && 505 IRType->getScalarSizeInBits() != 64; 506 } 507 508 static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 509 StringRef Constraint, 510 llvm::Type* Ty) { 511 if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy()) { 512 if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) { 513 // Invalid MMX constraint 514 return nullptr; 515 } 516 517 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext()); 518 } 519 520 // No operation needed 521 return Ty; 522 } 523 524 /// Returns true if this type can be passed in SSE registers with the 525 /// X86_VectorCall calling convention. Shared between x86_32 and x86_64. 526 static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) { 527 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 528 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) 529 return true; 530 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 531 // vectorcall can pass XMM, YMM, and ZMM vectors. We don't pass SSE1 MMX 532 // registers specially. 533 unsigned VecSize = Context.getTypeSize(VT); 534 if (VecSize == 128 || VecSize == 256 || VecSize == 512) 535 return true; 536 } 537 return false; 538 } 539 540 /// Returns true if this aggregate is small enough to be passed in SSE registers 541 /// in the X86_VectorCall calling convention. Shared between x86_32 and x86_64. 542 static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) { 543 return NumMembers <= 4; 544 } 545 546 //===----------------------------------------------------------------------===// 547 // X86-32 ABI Implementation 548 //===----------------------------------------------------------------------===// 549 550 /// \brief Similar to llvm::CCState, but for Clang. 551 struct CCState { 552 CCState(unsigned CC) : CC(CC), FreeRegs(0), FreeSSERegs(0) {} 553 554 unsigned CC; 555 unsigned FreeRegs; 556 unsigned FreeSSERegs; 557 }; 558 559 /// X86_32ABIInfo - The X86-32 ABI information. 560 class X86_32ABIInfo : public ABIInfo { 561 enum Class { 562 Integer, 563 Float 564 }; 565 566 static const unsigned MinABIStackAlignInBytes = 4; 567 568 bool IsDarwinVectorABI; 569 bool IsSmallStructInRegABI; 570 bool IsWin32StructABI; 571 unsigned DefaultNumRegisterParameters; 572 573 static bool isRegisterSize(unsigned Size) { 574 return (Size == 8 || Size == 16 || Size == 32 || Size == 64); 575 } 576 577 bool isHomogeneousAggregateBaseType(QualType Ty) const override { 578 // FIXME: Assumes vectorcall is in use. 579 return isX86VectorTypeForVectorCall(getContext(), Ty); 580 } 581 582 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 583 uint64_t NumMembers) const override { 584 // FIXME: Assumes vectorcall is in use. 585 return isX86VectorCallAggregateSmallEnough(NumMembers); 586 } 587 588 bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const; 589 590 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 591 /// such that the argument will be passed in memory. 592 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const; 593 594 ABIArgInfo getIndirectReturnResult(CCState &State) const; 595 596 /// \brief Return the alignment to use for the given type on the stack. 597 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const; 598 599 Class classify(QualType Ty) const; 600 ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const; 601 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const; 602 bool shouldUseInReg(QualType Ty, CCState &State, bool &NeedsPadding) const; 603 604 /// \brief Rewrite the function info so that all memory arguments use 605 /// inalloca. 606 void rewriteWithInAlloca(CGFunctionInfo &FI) const; 607 608 void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields, 609 unsigned &StackOffset, ABIArgInfo &Info, 610 QualType Type) const; 611 612 public: 613 614 void computeInfo(CGFunctionInfo &FI) const override; 615 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 616 CodeGenFunction &CGF) const override; 617 618 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool w, 619 unsigned r) 620 : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p), 621 IsWin32StructABI(w), DefaultNumRegisterParameters(r) {} 622 }; 623 624 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo { 625 public: 626 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, 627 bool d, bool p, bool w, unsigned r) 628 :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, w, r)) {} 629 630 static bool isStructReturnInRegABI( 631 const llvm::Triple &Triple, const CodeGenOptions &Opts); 632 633 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 634 CodeGen::CodeGenModule &CGM) const override; 635 636 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { 637 // Darwin uses different dwarf register numbers for EH. 638 if (CGM.getTarget().getTriple().isOSDarwin()) return 5; 639 return 4; 640 } 641 642 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 643 llvm::Value *Address) const override; 644 645 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 646 StringRef Constraint, 647 llvm::Type* Ty) const override { 648 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 649 } 650 651 void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue, 652 std::string &Constraints, 653 std::vector<llvm::Type *> &ResultRegTypes, 654 std::vector<llvm::Type *> &ResultTruncRegTypes, 655 std::vector<LValue> &ResultRegDests, 656 std::string &AsmString, 657 unsigned NumOutputs) const override; 658 659 llvm::Constant * 660 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override { 661 unsigned Sig = (0xeb << 0) | // jmp rel8 662 (0x06 << 8) | // .+0x08 663 ('F' << 16) | 664 ('T' << 24); 665 return llvm::ConstantInt::get(CGM.Int32Ty, Sig); 666 } 667 668 bool hasSjLjLowering(CodeGen::CodeGenFunction &CGF) const override { 669 return true; 670 } 671 }; 672 673 } 674 675 /// Rewrite input constraint references after adding some output constraints. 676 /// In the case where there is one output and one input and we add one output, 677 /// we need to replace all operand references greater than or equal to 1: 678 /// mov $0, $1 679 /// mov eax, $1 680 /// The result will be: 681 /// mov $0, $2 682 /// mov eax, $2 683 static void rewriteInputConstraintReferences(unsigned FirstIn, 684 unsigned NumNewOuts, 685 std::string &AsmString) { 686 std::string Buf; 687 llvm::raw_string_ostream OS(Buf); 688 size_t Pos = 0; 689 while (Pos < AsmString.size()) { 690 size_t DollarStart = AsmString.find('$', Pos); 691 if (DollarStart == std::string::npos) 692 DollarStart = AsmString.size(); 693 size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart); 694 if (DollarEnd == std::string::npos) 695 DollarEnd = AsmString.size(); 696 OS << StringRef(&AsmString[Pos], DollarEnd - Pos); 697 Pos = DollarEnd; 698 size_t NumDollars = DollarEnd - DollarStart; 699 if (NumDollars % 2 != 0 && Pos < AsmString.size()) { 700 // We have an operand reference. 701 size_t DigitStart = Pos; 702 size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart); 703 if (DigitEnd == std::string::npos) 704 DigitEnd = AsmString.size(); 705 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart); 706 unsigned OperandIndex; 707 if (!OperandStr.getAsInteger(10, OperandIndex)) { 708 if (OperandIndex >= FirstIn) 709 OperandIndex += NumNewOuts; 710 OS << OperandIndex; 711 } else { 712 OS << OperandStr; 713 } 714 Pos = DigitEnd; 715 } 716 } 717 AsmString = std::move(OS.str()); 718 } 719 720 /// Add output constraints for EAX:EDX because they are return registers. 721 void X86_32TargetCodeGenInfo::addReturnRegisterOutputs( 722 CodeGenFunction &CGF, LValue ReturnSlot, std::string &Constraints, 723 std::vector<llvm::Type *> &ResultRegTypes, 724 std::vector<llvm::Type *> &ResultTruncRegTypes, 725 std::vector<LValue> &ResultRegDests, std::string &AsmString, 726 unsigned NumOutputs) const { 727 uint64_t RetWidth = CGF.getContext().getTypeSize(ReturnSlot.getType()); 728 729 // Use the EAX constraint if the width is 32 or smaller and EAX:EDX if it is 730 // larger. 731 if (!Constraints.empty()) 732 Constraints += ','; 733 if (RetWidth <= 32) { 734 Constraints += "={eax}"; 735 ResultRegTypes.push_back(CGF.Int32Ty); 736 } else { 737 // Use the 'A' constraint for EAX:EDX. 738 Constraints += "=A"; 739 ResultRegTypes.push_back(CGF.Int64Ty); 740 } 741 742 // Truncate EAX or EAX:EDX to an integer of the appropriate size. 743 llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth); 744 ResultTruncRegTypes.push_back(CoerceTy); 745 746 // Coerce the integer by bitcasting the return slot pointer. 747 ReturnSlot.setAddress(CGF.Builder.CreateBitCast(ReturnSlot.getAddress(), 748 CoerceTy->getPointerTo())); 749 ResultRegDests.push_back(ReturnSlot); 750 751 rewriteInputConstraintReferences(NumOutputs, 1, AsmString); 752 } 753 754 /// shouldReturnTypeInRegister - Determine if the given type should be 755 /// passed in a register (for the Darwin ABI). 756 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty, 757 ASTContext &Context) const { 758 uint64_t Size = Context.getTypeSize(Ty); 759 760 // Type must be register sized. 761 if (!isRegisterSize(Size)) 762 return false; 763 764 if (Ty->isVectorType()) { 765 // 64- and 128- bit vectors inside structures are not returned in 766 // registers. 767 if (Size == 64 || Size == 128) 768 return false; 769 770 return true; 771 } 772 773 // If this is a builtin, pointer, enum, complex type, member pointer, or 774 // member function pointer it is ok. 775 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() || 776 Ty->isAnyComplexType() || Ty->isEnumeralType() || 777 Ty->isBlockPointerType() || Ty->isMemberPointerType()) 778 return true; 779 780 // Arrays are treated like records. 781 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) 782 return shouldReturnTypeInRegister(AT->getElementType(), Context); 783 784 // Otherwise, it must be a record type. 785 const RecordType *RT = Ty->getAs<RecordType>(); 786 if (!RT) return false; 787 788 // FIXME: Traverse bases here too. 789 790 // Structure types are passed in register if all fields would be 791 // passed in a register. 792 for (const auto *FD : RT->getDecl()->fields()) { 793 // Empty fields are ignored. 794 if (isEmptyField(Context, FD, true)) 795 continue; 796 797 // Check fields recursively. 798 if (!shouldReturnTypeInRegister(FD->getType(), Context)) 799 return false; 800 } 801 return true; 802 } 803 804 ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(CCState &State) const { 805 // If the return value is indirect, then the hidden argument is consuming one 806 // integer register. 807 if (State.FreeRegs) { 808 --State.FreeRegs; 809 return ABIArgInfo::getIndirectInReg(/*Align=*/0, /*ByVal=*/false); 810 } 811 return ABIArgInfo::getIndirect(/*Align=*/0, /*ByVal=*/false); 812 } 813 814 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, CCState &State) const { 815 if (RetTy->isVoidType()) 816 return ABIArgInfo::getIgnore(); 817 818 const Type *Base = nullptr; 819 uint64_t NumElts = 0; 820 if (State.CC == llvm::CallingConv::X86_VectorCall && 821 isHomogeneousAggregate(RetTy, Base, NumElts)) { 822 // The LLVM struct type for such an aggregate should lower properly. 823 return ABIArgInfo::getDirect(); 824 } 825 826 if (const VectorType *VT = RetTy->getAs<VectorType>()) { 827 // On Darwin, some vectors are returned in registers. 828 if (IsDarwinVectorABI) { 829 uint64_t Size = getContext().getTypeSize(RetTy); 830 831 // 128-bit vectors are a special case; they are returned in 832 // registers and we need to make sure to pick a type the LLVM 833 // backend will like. 834 if (Size == 128) 835 return ABIArgInfo::getDirect(llvm::VectorType::get( 836 llvm::Type::getInt64Ty(getVMContext()), 2)); 837 838 // Always return in register if it fits in a general purpose 839 // register, or if it is 64 bits and has a single element. 840 if ((Size == 8 || Size == 16 || Size == 32) || 841 (Size == 64 && VT->getNumElements() == 1)) 842 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 843 Size)); 844 845 return getIndirectReturnResult(State); 846 } 847 848 return ABIArgInfo::getDirect(); 849 } 850 851 if (isAggregateTypeForABI(RetTy)) { 852 if (const RecordType *RT = RetTy->getAs<RecordType>()) { 853 // Structures with flexible arrays are always indirect. 854 if (RT->getDecl()->hasFlexibleArrayMember()) 855 return getIndirectReturnResult(State); 856 } 857 858 // If specified, structs and unions are always indirect. 859 if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType()) 860 return getIndirectReturnResult(State); 861 862 // Small structures which are register sized are generally returned 863 // in a register. 864 if (shouldReturnTypeInRegister(RetTy, getContext())) { 865 uint64_t Size = getContext().getTypeSize(RetTy); 866 867 // As a special-case, if the struct is a "single-element" struct, and 868 // the field is of type "float" or "double", return it in a 869 // floating-point register. (MSVC does not apply this special case.) 870 // We apply a similar transformation for pointer types to improve the 871 // quality of the generated IR. 872 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) 873 if ((!IsWin32StructABI && SeltTy->isRealFloatingType()) 874 || SeltTy->hasPointerRepresentation()) 875 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 876 877 // FIXME: We should be able to narrow this integer in cases with dead 878 // padding. 879 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size)); 880 } 881 882 return getIndirectReturnResult(State); 883 } 884 885 // Treat an enum type as its underlying type. 886 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 887 RetTy = EnumTy->getDecl()->getIntegerType(); 888 889 return (RetTy->isPromotableIntegerType() ? 890 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 891 } 892 893 static bool isSSEVectorType(ASTContext &Context, QualType Ty) { 894 return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128; 895 } 896 897 static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) { 898 const RecordType *RT = Ty->getAs<RecordType>(); 899 if (!RT) 900 return 0; 901 const RecordDecl *RD = RT->getDecl(); 902 903 // If this is a C++ record, check the bases first. 904 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 905 for (const auto &I : CXXRD->bases()) 906 if (!isRecordWithSSEVectorType(Context, I.getType())) 907 return false; 908 909 for (const auto *i : RD->fields()) { 910 QualType FT = i->getType(); 911 912 if (isSSEVectorType(Context, FT)) 913 return true; 914 915 if (isRecordWithSSEVectorType(Context, FT)) 916 return true; 917 } 918 919 return false; 920 } 921 922 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty, 923 unsigned Align) const { 924 // Otherwise, if the alignment is less than or equal to the minimum ABI 925 // alignment, just use the default; the backend will handle this. 926 if (Align <= MinABIStackAlignInBytes) 927 return 0; // Use default alignment. 928 929 // On non-Darwin, the stack type alignment is always 4. 930 if (!IsDarwinVectorABI) { 931 // Set explicit alignment, since we may need to realign the top. 932 return MinABIStackAlignInBytes; 933 } 934 935 // Otherwise, if the type contains an SSE vector type, the alignment is 16. 936 if (Align >= 16 && (isSSEVectorType(getContext(), Ty) || 937 isRecordWithSSEVectorType(getContext(), Ty))) 938 return 16; 939 940 return MinABIStackAlignInBytes; 941 } 942 943 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal, 944 CCState &State) const { 945 if (!ByVal) { 946 if (State.FreeRegs) { 947 --State.FreeRegs; // Non-byval indirects just use one pointer. 948 return ABIArgInfo::getIndirectInReg(0, false); 949 } 950 return ABIArgInfo::getIndirect(0, false); 951 } 952 953 // Compute the byval alignment. 954 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; 955 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign); 956 if (StackAlign == 0) 957 return ABIArgInfo::getIndirect(4, /*ByVal=*/true); 958 959 // If the stack alignment is less than the type alignment, realign the 960 // argument. 961 bool Realign = TypeAlign > StackAlign; 962 return ABIArgInfo::getIndirect(StackAlign, /*ByVal=*/true, Realign); 963 } 964 965 X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const { 966 const Type *T = isSingleElementStruct(Ty, getContext()); 967 if (!T) 968 T = Ty.getTypePtr(); 969 970 if (const BuiltinType *BT = T->getAs<BuiltinType>()) { 971 BuiltinType::Kind K = BT->getKind(); 972 if (K == BuiltinType::Float || K == BuiltinType::Double) 973 return Float; 974 } 975 return Integer; 976 } 977 978 bool X86_32ABIInfo::shouldUseInReg(QualType Ty, CCState &State, 979 bool &NeedsPadding) const { 980 NeedsPadding = false; 981 Class C = classify(Ty); 982 if (C == Float) 983 return false; 984 985 unsigned Size = getContext().getTypeSize(Ty); 986 unsigned SizeInRegs = (Size + 31) / 32; 987 988 if (SizeInRegs == 0) 989 return false; 990 991 if (SizeInRegs > State.FreeRegs) { 992 State.FreeRegs = 0; 993 return false; 994 } 995 996 State.FreeRegs -= SizeInRegs; 997 998 if (State.CC == llvm::CallingConv::X86_FastCall || 999 State.CC == llvm::CallingConv::X86_VectorCall) { 1000 if (Size > 32) 1001 return false; 1002 1003 if (Ty->isIntegralOrEnumerationType()) 1004 return true; 1005 1006 if (Ty->isPointerType()) 1007 return true; 1008 1009 if (Ty->isReferenceType()) 1010 return true; 1011 1012 if (State.FreeRegs) 1013 NeedsPadding = true; 1014 1015 return false; 1016 } 1017 1018 return true; 1019 } 1020 1021 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, 1022 CCState &State) const { 1023 // FIXME: Set alignment on indirect arguments. 1024 1025 Ty = useFirstFieldIfTransparentUnion(Ty); 1026 1027 // Check with the C++ ABI first. 1028 const RecordType *RT = Ty->getAs<RecordType>(); 1029 if (RT) { 1030 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()); 1031 if (RAA == CGCXXABI::RAA_Indirect) { 1032 return getIndirectResult(Ty, false, State); 1033 } else if (RAA == CGCXXABI::RAA_DirectInMemory) { 1034 // The field index doesn't matter, we'll fix it up later. 1035 return ABIArgInfo::getInAlloca(/*FieldIndex=*/0); 1036 } 1037 } 1038 1039 // vectorcall adds the concept of a homogenous vector aggregate, similar 1040 // to other targets. 1041 const Type *Base = nullptr; 1042 uint64_t NumElts = 0; 1043 if (State.CC == llvm::CallingConv::X86_VectorCall && 1044 isHomogeneousAggregate(Ty, Base, NumElts)) { 1045 if (State.FreeSSERegs >= NumElts) { 1046 State.FreeSSERegs -= NumElts; 1047 if (Ty->isBuiltinType() || Ty->isVectorType()) 1048 return ABIArgInfo::getDirect(); 1049 return ABIArgInfo::getExpand(); 1050 } 1051 return getIndirectResult(Ty, /*ByVal=*/false, State); 1052 } 1053 1054 if (isAggregateTypeForABI(Ty)) { 1055 if (RT) { 1056 // Structs are always byval on win32, regardless of what they contain. 1057 if (IsWin32StructABI) 1058 return getIndirectResult(Ty, true, State); 1059 1060 // Structures with flexible arrays are always indirect. 1061 if (RT->getDecl()->hasFlexibleArrayMember()) 1062 return getIndirectResult(Ty, true, State); 1063 } 1064 1065 // Ignore empty structs/unions. 1066 if (isEmptyRecord(getContext(), Ty, true)) 1067 return ABIArgInfo::getIgnore(); 1068 1069 llvm::LLVMContext &LLVMContext = getVMContext(); 1070 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext); 1071 bool NeedsPadding; 1072 if (shouldUseInReg(Ty, State, NeedsPadding)) { 1073 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32; 1074 SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32); 1075 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements); 1076 return ABIArgInfo::getDirectInReg(Result); 1077 } 1078 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr; 1079 1080 // Expand small (<= 128-bit) record types when we know that the stack layout 1081 // of those arguments will match the struct. This is important because the 1082 // LLVM backend isn't smart enough to remove byval, which inhibits many 1083 // optimizations. 1084 if (getContext().getTypeSize(Ty) <= 4*32 && 1085 canExpandIndirectArgument(Ty, getContext())) 1086 return ABIArgInfo::getExpandWithPadding( 1087 State.CC == llvm::CallingConv::X86_FastCall || 1088 State.CC == llvm::CallingConv::X86_VectorCall, 1089 PaddingType); 1090 1091 return getIndirectResult(Ty, true, State); 1092 } 1093 1094 if (const VectorType *VT = Ty->getAs<VectorType>()) { 1095 // On Darwin, some vectors are passed in memory, we handle this by passing 1096 // it as an i8/i16/i32/i64. 1097 if (IsDarwinVectorABI) { 1098 uint64_t Size = getContext().getTypeSize(Ty); 1099 if ((Size == 8 || Size == 16 || Size == 32) || 1100 (Size == 64 && VT->getNumElements() == 1)) 1101 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 1102 Size)); 1103 } 1104 1105 if (IsX86_MMXType(CGT.ConvertType(Ty))) 1106 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64)); 1107 1108 return ABIArgInfo::getDirect(); 1109 } 1110 1111 1112 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1113 Ty = EnumTy->getDecl()->getIntegerType(); 1114 1115 bool NeedsPadding; 1116 bool InReg = shouldUseInReg(Ty, State, NeedsPadding); 1117 1118 if (Ty->isPromotableIntegerType()) { 1119 if (InReg) 1120 return ABIArgInfo::getExtendInReg(); 1121 return ABIArgInfo::getExtend(); 1122 } 1123 if (InReg) 1124 return ABIArgInfo::getDirectInReg(); 1125 return ABIArgInfo::getDirect(); 1126 } 1127 1128 void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const { 1129 CCState State(FI.getCallingConvention()); 1130 if (State.CC == llvm::CallingConv::X86_FastCall) 1131 State.FreeRegs = 2; 1132 else if (State.CC == llvm::CallingConv::X86_VectorCall) { 1133 State.FreeRegs = 2; 1134 State.FreeSSERegs = 6; 1135 } else if (FI.getHasRegParm()) 1136 State.FreeRegs = FI.getRegParm(); 1137 else 1138 State.FreeRegs = DefaultNumRegisterParameters; 1139 1140 if (!getCXXABI().classifyReturnType(FI)) { 1141 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State); 1142 } else if (FI.getReturnInfo().isIndirect()) { 1143 // The C++ ABI is not aware of register usage, so we have to check if the 1144 // return value was sret and put it in a register ourselves if appropriate. 1145 if (State.FreeRegs) { 1146 --State.FreeRegs; // The sret parameter consumes a register. 1147 FI.getReturnInfo().setInReg(true); 1148 } 1149 } 1150 1151 // The chain argument effectively gives us another free register. 1152 if (FI.isChainCall()) 1153 ++State.FreeRegs; 1154 1155 bool UsedInAlloca = false; 1156 for (auto &I : FI.arguments()) { 1157 I.info = classifyArgumentType(I.type, State); 1158 UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca); 1159 } 1160 1161 // If we needed to use inalloca for any argument, do a second pass and rewrite 1162 // all the memory arguments to use inalloca. 1163 if (UsedInAlloca) 1164 rewriteWithInAlloca(FI); 1165 } 1166 1167 void 1168 X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields, 1169 unsigned &StackOffset, 1170 ABIArgInfo &Info, QualType Type) const { 1171 assert(StackOffset % 4U == 0 && "unaligned inalloca struct"); 1172 Info = ABIArgInfo::getInAlloca(FrameFields.size()); 1173 FrameFields.push_back(CGT.ConvertTypeForMem(Type)); 1174 StackOffset += getContext().getTypeSizeInChars(Type).getQuantity(); 1175 1176 // Insert padding bytes to respect alignment. For x86_32, each argument is 4 1177 // byte aligned. 1178 if (StackOffset % 4U) { 1179 unsigned OldOffset = StackOffset; 1180 StackOffset = llvm::RoundUpToAlignment(StackOffset, 4U); 1181 unsigned NumBytes = StackOffset - OldOffset; 1182 assert(NumBytes); 1183 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext()); 1184 Ty = llvm::ArrayType::get(Ty, NumBytes); 1185 FrameFields.push_back(Ty); 1186 } 1187 } 1188 1189 static bool isArgInAlloca(const ABIArgInfo &Info) { 1190 // Leave ignored and inreg arguments alone. 1191 switch (Info.getKind()) { 1192 case ABIArgInfo::InAlloca: 1193 return true; 1194 case ABIArgInfo::Indirect: 1195 assert(Info.getIndirectByVal()); 1196 return true; 1197 case ABIArgInfo::Ignore: 1198 return false; 1199 case ABIArgInfo::Direct: 1200 case ABIArgInfo::Extend: 1201 case ABIArgInfo::Expand: 1202 if (Info.getInReg()) 1203 return false; 1204 return true; 1205 } 1206 llvm_unreachable("invalid enum"); 1207 } 1208 1209 void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const { 1210 assert(IsWin32StructABI && "inalloca only supported on win32"); 1211 1212 // Build a packed struct type for all of the arguments in memory. 1213 SmallVector<llvm::Type *, 6> FrameFields; 1214 1215 unsigned StackOffset = 0; 1216 CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end(); 1217 1218 // Put 'this' into the struct before 'sret', if necessary. 1219 bool IsThisCall = 1220 FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall; 1221 ABIArgInfo &Ret = FI.getReturnInfo(); 1222 if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall && 1223 isArgInAlloca(I->info)) { 1224 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type); 1225 ++I; 1226 } 1227 1228 // Put the sret parameter into the inalloca struct if it's in memory. 1229 if (Ret.isIndirect() && !Ret.getInReg()) { 1230 CanQualType PtrTy = getContext().getPointerType(FI.getReturnType()); 1231 addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy); 1232 // On Windows, the hidden sret parameter is always returned in eax. 1233 Ret.setInAllocaSRet(IsWin32StructABI); 1234 } 1235 1236 // Skip the 'this' parameter in ecx. 1237 if (IsThisCall) 1238 ++I; 1239 1240 // Put arguments passed in memory into the struct. 1241 for (; I != E; ++I) { 1242 if (isArgInAlloca(I->info)) 1243 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type); 1244 } 1245 1246 FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields, 1247 /*isPacked=*/true)); 1248 } 1249 1250 llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1251 CodeGenFunction &CGF) const { 1252 llvm::Type *BPP = CGF.Int8PtrPtrTy; 1253 1254 CGBuilderTy &Builder = CGF.Builder; 1255 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 1256 "ap"); 1257 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 1258 1259 // Compute if the address needs to be aligned 1260 unsigned Align = CGF.getContext().getTypeAlignInChars(Ty).getQuantity(); 1261 Align = getTypeStackAlignInBytes(Ty, Align); 1262 Align = std::max(Align, 4U); 1263 if (Align > 4) { 1264 // addr = (addr + align - 1) & -align; 1265 llvm::Value *Offset = 1266 llvm::ConstantInt::get(CGF.Int32Ty, Align - 1); 1267 Addr = CGF.Builder.CreateGEP(Addr, Offset); 1268 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(Addr, 1269 CGF.Int32Ty); 1270 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -Align); 1271 Addr = CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), 1272 Addr->getType(), 1273 "ap.cur.aligned"); 1274 } 1275 1276 llvm::Type *PTy = 1277 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 1278 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 1279 1280 uint64_t Offset = 1281 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, Align); 1282 llvm::Value *NextAddr = 1283 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 1284 "ap.next"); 1285 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 1286 1287 return AddrTyped; 1288 } 1289 1290 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI( 1291 const llvm::Triple &Triple, const CodeGenOptions &Opts) { 1292 assert(Triple.getArch() == llvm::Triple::x86); 1293 1294 switch (Opts.getStructReturnConvention()) { 1295 case CodeGenOptions::SRCK_Default: 1296 break; 1297 case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return 1298 return false; 1299 case CodeGenOptions::SRCK_InRegs: // -freg-struct-return 1300 return true; 1301 } 1302 1303 if (Triple.isOSDarwin()) 1304 return true; 1305 1306 switch (Triple.getOS()) { 1307 case llvm::Triple::DragonFly: 1308 case llvm::Triple::FreeBSD: 1309 case llvm::Triple::OpenBSD: 1310 case llvm::Triple::Bitrig: 1311 case llvm::Triple::Win32: 1312 return true; 1313 default: 1314 return false; 1315 } 1316 } 1317 1318 void X86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D, 1319 llvm::GlobalValue *GV, 1320 CodeGen::CodeGenModule &CGM) const { 1321 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 1322 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { 1323 // Get the LLVM function. 1324 llvm::Function *Fn = cast<llvm::Function>(GV); 1325 1326 // Now add the 'alignstack' attribute with a value of 16. 1327 llvm::AttrBuilder B; 1328 B.addStackAlignmentAttr(16); 1329 Fn->addAttributes(llvm::AttributeSet::FunctionIndex, 1330 llvm::AttributeSet::get(CGM.getLLVMContext(), 1331 llvm::AttributeSet::FunctionIndex, 1332 B)); 1333 } 1334 } 1335 } 1336 1337 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable( 1338 CodeGen::CodeGenFunction &CGF, 1339 llvm::Value *Address) const { 1340 CodeGen::CGBuilderTy &Builder = CGF.Builder; 1341 1342 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 1343 1344 // 0-7 are the eight integer registers; the order is different 1345 // on Darwin (for EH), but the range is the same. 1346 // 8 is %eip. 1347 AssignToArrayRange(Builder, Address, Four8, 0, 8); 1348 1349 if (CGF.CGM.getTarget().getTriple().isOSDarwin()) { 1350 // 12-16 are st(0..4). Not sure why we stop at 4. 1351 // These have size 16, which is sizeof(long double) on 1352 // platforms with 8-byte alignment for that type. 1353 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16); 1354 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16); 1355 1356 } else { 1357 // 9 is %eflags, which doesn't get a size on Darwin for some 1358 // reason. 1359 Builder.CreateStore(Four8, Builder.CreateConstInBoundsGEP1_32(Address, 9)); 1360 1361 // 11-16 are st(0..5). Not sure why we stop at 5. 1362 // These have size 12, which is sizeof(long double) on 1363 // platforms with 4-byte alignment for that type. 1364 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12); 1365 AssignToArrayRange(Builder, Address, Twelve8, 11, 16); 1366 } 1367 1368 return false; 1369 } 1370 1371 //===----------------------------------------------------------------------===// 1372 // X86-64 ABI Implementation 1373 //===----------------------------------------------------------------------===// 1374 1375 1376 namespace { 1377 /// X86_64ABIInfo - The X86_64 ABI information. 1378 class X86_64ABIInfo : public ABIInfo { 1379 enum Class { 1380 Integer = 0, 1381 SSE, 1382 SSEUp, 1383 X87, 1384 X87Up, 1385 ComplexX87, 1386 NoClass, 1387 Memory 1388 }; 1389 1390 /// merge - Implement the X86_64 ABI merging algorithm. 1391 /// 1392 /// Merge an accumulating classification \arg Accum with a field 1393 /// classification \arg Field. 1394 /// 1395 /// \param Accum - The accumulating classification. This should 1396 /// always be either NoClass or the result of a previous merge 1397 /// call. In addition, this should never be Memory (the caller 1398 /// should just return Memory for the aggregate). 1399 static Class merge(Class Accum, Class Field); 1400 1401 /// postMerge - Implement the X86_64 ABI post merging algorithm. 1402 /// 1403 /// Post merger cleanup, reduces a malformed Hi and Lo pair to 1404 /// final MEMORY or SSE classes when necessary. 1405 /// 1406 /// \param AggregateSize - The size of the current aggregate in 1407 /// the classification process. 1408 /// 1409 /// \param Lo - The classification for the parts of the type 1410 /// residing in the low word of the containing object. 1411 /// 1412 /// \param Hi - The classification for the parts of the type 1413 /// residing in the higher words of the containing object. 1414 /// 1415 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const; 1416 1417 /// classify - Determine the x86_64 register classes in which the 1418 /// given type T should be passed. 1419 /// 1420 /// \param Lo - The classification for the parts of the type 1421 /// residing in the low word of the containing object. 1422 /// 1423 /// \param Hi - The classification for the parts of the type 1424 /// residing in the high word of the containing object. 1425 /// 1426 /// \param OffsetBase - The bit offset of this type in the 1427 /// containing object. Some parameters are classified different 1428 /// depending on whether they straddle an eightbyte boundary. 1429 /// 1430 /// \param isNamedArg - Whether the argument in question is a "named" 1431 /// argument, as used in AMD64-ABI 3.5.7. 1432 /// 1433 /// If a word is unused its result will be NoClass; if a type should 1434 /// be passed in Memory then at least the classification of \arg Lo 1435 /// will be Memory. 1436 /// 1437 /// The \arg Lo class will be NoClass iff the argument is ignored. 1438 /// 1439 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will 1440 /// also be ComplexX87. 1441 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi, 1442 bool isNamedArg) const; 1443 1444 llvm::Type *GetByteVectorType(QualType Ty) const; 1445 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType, 1446 unsigned IROffset, QualType SourceTy, 1447 unsigned SourceOffset) const; 1448 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType, 1449 unsigned IROffset, QualType SourceTy, 1450 unsigned SourceOffset) const; 1451 1452 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 1453 /// such that the argument will be returned in memory. 1454 ABIArgInfo getIndirectReturnResult(QualType Ty) const; 1455 1456 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 1457 /// such that the argument will be passed in memory. 1458 /// 1459 /// \param freeIntRegs - The number of free integer registers remaining 1460 /// available. 1461 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const; 1462 1463 ABIArgInfo classifyReturnType(QualType RetTy) const; 1464 1465 ABIArgInfo classifyArgumentType(QualType Ty, 1466 unsigned freeIntRegs, 1467 unsigned &neededInt, 1468 unsigned &neededSSE, 1469 bool isNamedArg) const; 1470 1471 bool IsIllegalVectorType(QualType Ty) const; 1472 1473 /// The 0.98 ABI revision clarified a lot of ambiguities, 1474 /// unfortunately in ways that were not always consistent with 1475 /// certain previous compilers. In particular, platforms which 1476 /// required strict binary compatibility with older versions of GCC 1477 /// may need to exempt themselves. 1478 bool honorsRevision0_98() const { 1479 return !getTarget().getTriple().isOSDarwin(); 1480 } 1481 1482 bool HasAVX; 1483 // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on 1484 // 64-bit hardware. 1485 bool Has64BitPointers; 1486 1487 public: 1488 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool hasavx) : 1489 ABIInfo(CGT), HasAVX(hasavx), 1490 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) { 1491 } 1492 1493 bool isPassedUsingAVXType(QualType type) const { 1494 unsigned neededInt, neededSSE; 1495 // The freeIntRegs argument doesn't matter here. 1496 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE, 1497 /*isNamedArg*/true); 1498 if (info.isDirect()) { 1499 llvm::Type *ty = info.getCoerceToType(); 1500 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty)) 1501 return (vectorTy->getBitWidth() > 128); 1502 } 1503 return false; 1504 } 1505 1506 void computeInfo(CGFunctionInfo &FI) const override; 1507 1508 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1509 CodeGenFunction &CGF) const override; 1510 1511 bool has64BitPointers() const { 1512 return Has64BitPointers; 1513 } 1514 }; 1515 1516 /// WinX86_64ABIInfo - The Windows X86_64 ABI information. 1517 class WinX86_64ABIInfo : public ABIInfo { 1518 1519 ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs, 1520 bool IsReturnType) const; 1521 1522 public: 1523 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 1524 1525 void computeInfo(CGFunctionInfo &FI) const override; 1526 1527 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1528 CodeGenFunction &CGF) const override; 1529 1530 bool isHomogeneousAggregateBaseType(QualType Ty) const override { 1531 // FIXME: Assumes vectorcall is in use. 1532 return isX86VectorTypeForVectorCall(getContext(), Ty); 1533 } 1534 1535 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 1536 uint64_t NumMembers) const override { 1537 // FIXME: Assumes vectorcall is in use. 1538 return isX86VectorCallAggregateSmallEnough(NumMembers); 1539 } 1540 }; 1541 1542 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo { 1543 bool HasAVX; 1544 public: 1545 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) 1546 : TargetCodeGenInfo(new X86_64ABIInfo(CGT, HasAVX)), HasAVX(HasAVX) {} 1547 1548 const X86_64ABIInfo &getABIInfo() const { 1549 return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo()); 1550 } 1551 1552 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { 1553 return 7; 1554 } 1555 1556 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 1557 llvm::Value *Address) const override { 1558 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 1559 1560 // 0-15 are the 16 integer registers. 1561 // 16 is %rip. 1562 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 1563 return false; 1564 } 1565 1566 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 1567 StringRef Constraint, 1568 llvm::Type* Ty) const override { 1569 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 1570 } 1571 1572 bool isNoProtoCallVariadic(const CallArgList &args, 1573 const FunctionNoProtoType *fnType) const override { 1574 // The default CC on x86-64 sets %al to the number of SSA 1575 // registers used, and GCC sets this when calling an unprototyped 1576 // function, so we override the default behavior. However, don't do 1577 // that when AVX types are involved: the ABI explicitly states it is 1578 // undefined, and it doesn't work in practice because of how the ABI 1579 // defines varargs anyway. 1580 if (fnType->getCallConv() == CC_C) { 1581 bool HasAVXType = false; 1582 for (CallArgList::const_iterator 1583 it = args.begin(), ie = args.end(); it != ie; ++it) { 1584 if (getABIInfo().isPassedUsingAVXType(it->Ty)) { 1585 HasAVXType = true; 1586 break; 1587 } 1588 } 1589 1590 if (!HasAVXType) 1591 return true; 1592 } 1593 1594 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType); 1595 } 1596 1597 llvm::Constant * 1598 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override { 1599 unsigned Sig; 1600 if (getABIInfo().has64BitPointers()) 1601 Sig = (0xeb << 0) | // jmp rel8 1602 (0x0a << 8) | // .+0x0c 1603 ('F' << 16) | 1604 ('T' << 24); 1605 else 1606 Sig = (0xeb << 0) | // jmp rel8 1607 (0x06 << 8) | // .+0x08 1608 ('F' << 16) | 1609 ('T' << 24); 1610 return llvm::ConstantInt::get(CGM.Int32Ty, Sig); 1611 } 1612 1613 unsigned getOpenMPSimdDefaultAlignment(QualType) const override { 1614 return HasAVX ? 32 : 16; 1615 } 1616 1617 bool hasSjLjLowering(CodeGen::CodeGenFunction &CGF) const override { 1618 return true; 1619 } 1620 }; 1621 1622 class PS4TargetCodeGenInfo : public X86_64TargetCodeGenInfo { 1623 public: 1624 PS4TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) 1625 : X86_64TargetCodeGenInfo(CGT, HasAVX) {} 1626 1627 void getDependentLibraryOption(llvm::StringRef Lib, 1628 llvm::SmallString<24> &Opt) const { 1629 Opt = "\01"; 1630 Opt += Lib; 1631 } 1632 }; 1633 1634 static std::string qualifyWindowsLibrary(llvm::StringRef Lib) { 1635 // If the argument does not end in .lib, automatically add the suffix. 1636 // If the argument contains a space, enclose it in quotes. 1637 // This matches the behavior of MSVC. 1638 bool Quote = (Lib.find(" ") != StringRef::npos); 1639 std::string ArgStr = Quote ? "\"" : ""; 1640 ArgStr += Lib; 1641 if (!Lib.endswith_lower(".lib")) 1642 ArgStr += ".lib"; 1643 ArgStr += Quote ? "\"" : ""; 1644 return ArgStr; 1645 } 1646 1647 class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo { 1648 public: 1649 WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, 1650 bool d, bool p, bool w, unsigned RegParms) 1651 : X86_32TargetCodeGenInfo(CGT, d, p, w, RegParms) {} 1652 1653 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 1654 CodeGen::CodeGenModule &CGM) const override; 1655 1656 void getDependentLibraryOption(llvm::StringRef Lib, 1657 llvm::SmallString<24> &Opt) const override { 1658 Opt = "/DEFAULTLIB:"; 1659 Opt += qualifyWindowsLibrary(Lib); 1660 } 1661 1662 void getDetectMismatchOption(llvm::StringRef Name, 1663 llvm::StringRef Value, 1664 llvm::SmallString<32> &Opt) const override { 1665 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; 1666 } 1667 }; 1668 1669 static void addStackProbeSizeTargetAttribute(const Decl *D, 1670 llvm::GlobalValue *GV, 1671 CodeGen::CodeGenModule &CGM) { 1672 if (isa<FunctionDecl>(D)) { 1673 if (CGM.getCodeGenOpts().StackProbeSize != 4096) { 1674 llvm::Function *Fn = cast<llvm::Function>(GV); 1675 1676 Fn->addFnAttr("stack-probe-size", llvm::utostr(CGM.getCodeGenOpts().StackProbeSize)); 1677 } 1678 } 1679 } 1680 1681 void WinX86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D, 1682 llvm::GlobalValue *GV, 1683 CodeGen::CodeGenModule &CGM) const { 1684 X86_32TargetCodeGenInfo::SetTargetAttributes(D, GV, CGM); 1685 1686 addStackProbeSizeTargetAttribute(D, GV, CGM); 1687 } 1688 1689 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo { 1690 bool HasAVX; 1691 public: 1692 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) 1693 : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)), HasAVX(HasAVX) {} 1694 1695 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 1696 CodeGen::CodeGenModule &CGM) const override; 1697 1698 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { 1699 return 7; 1700 } 1701 1702 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 1703 llvm::Value *Address) const override { 1704 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 1705 1706 // 0-15 are the 16 integer registers. 1707 // 16 is %rip. 1708 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 1709 return false; 1710 } 1711 1712 void getDependentLibraryOption(llvm::StringRef Lib, 1713 llvm::SmallString<24> &Opt) const override { 1714 Opt = "/DEFAULTLIB:"; 1715 Opt += qualifyWindowsLibrary(Lib); 1716 } 1717 1718 void getDetectMismatchOption(llvm::StringRef Name, 1719 llvm::StringRef Value, 1720 llvm::SmallString<32> &Opt) const override { 1721 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; 1722 } 1723 1724 unsigned getOpenMPSimdDefaultAlignment(QualType) const override { 1725 return HasAVX ? 32 : 16; 1726 } 1727 }; 1728 1729 void WinX86_64TargetCodeGenInfo::SetTargetAttributes(const Decl *D, 1730 llvm::GlobalValue *GV, 1731 CodeGen::CodeGenModule &CGM) const { 1732 TargetCodeGenInfo::SetTargetAttributes(D, GV, CGM); 1733 1734 addStackProbeSizeTargetAttribute(D, GV, CGM); 1735 } 1736 } 1737 1738 void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo, 1739 Class &Hi) const { 1740 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done: 1741 // 1742 // (a) If one of the classes is Memory, the whole argument is passed in 1743 // memory. 1744 // 1745 // (b) If X87UP is not preceded by X87, the whole argument is passed in 1746 // memory. 1747 // 1748 // (c) If the size of the aggregate exceeds two eightbytes and the first 1749 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole 1750 // argument is passed in memory. NOTE: This is necessary to keep the 1751 // ABI working for processors that don't support the __m256 type. 1752 // 1753 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE. 1754 // 1755 // Some of these are enforced by the merging logic. Others can arise 1756 // only with unions; for example: 1757 // union { _Complex double; unsigned; } 1758 // 1759 // Note that clauses (b) and (c) were added in 0.98. 1760 // 1761 if (Hi == Memory) 1762 Lo = Memory; 1763 if (Hi == X87Up && Lo != X87 && honorsRevision0_98()) 1764 Lo = Memory; 1765 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp)) 1766 Lo = Memory; 1767 if (Hi == SSEUp && Lo != SSE) 1768 Hi = SSE; 1769 } 1770 1771 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) { 1772 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is 1773 // classified recursively so that always two fields are 1774 // considered. The resulting class is calculated according to 1775 // the classes of the fields in the eightbyte: 1776 // 1777 // (a) If both classes are equal, this is the resulting class. 1778 // 1779 // (b) If one of the classes is NO_CLASS, the resulting class is 1780 // the other class. 1781 // 1782 // (c) If one of the classes is MEMORY, the result is the MEMORY 1783 // class. 1784 // 1785 // (d) If one of the classes is INTEGER, the result is the 1786 // INTEGER. 1787 // 1788 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class, 1789 // MEMORY is used as class. 1790 // 1791 // (f) Otherwise class SSE is used. 1792 1793 // Accum should never be memory (we should have returned) or 1794 // ComplexX87 (because this cannot be passed in a structure). 1795 assert((Accum != Memory && Accum != ComplexX87) && 1796 "Invalid accumulated classification during merge."); 1797 if (Accum == Field || Field == NoClass) 1798 return Accum; 1799 if (Field == Memory) 1800 return Memory; 1801 if (Accum == NoClass) 1802 return Field; 1803 if (Accum == Integer || Field == Integer) 1804 return Integer; 1805 if (Field == X87 || Field == X87Up || Field == ComplexX87 || 1806 Accum == X87 || Accum == X87Up) 1807 return Memory; 1808 return SSE; 1809 } 1810 1811 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, 1812 Class &Lo, Class &Hi, bool isNamedArg) const { 1813 // FIXME: This code can be simplified by introducing a simple value class for 1814 // Class pairs with appropriate constructor methods for the various 1815 // situations. 1816 1817 // FIXME: Some of the split computations are wrong; unaligned vectors 1818 // shouldn't be passed in registers for example, so there is no chance they 1819 // can straddle an eightbyte. Verify & simplify. 1820 1821 Lo = Hi = NoClass; 1822 1823 Class &Current = OffsetBase < 64 ? Lo : Hi; 1824 Current = Memory; 1825 1826 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 1827 BuiltinType::Kind k = BT->getKind(); 1828 1829 if (k == BuiltinType::Void) { 1830 Current = NoClass; 1831 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) { 1832 Lo = Integer; 1833 Hi = Integer; 1834 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) { 1835 Current = Integer; 1836 } else if ((k == BuiltinType::Float || k == BuiltinType::Double) || 1837 (k == BuiltinType::LongDouble && 1838 getTarget().getTriple().isOSNaCl())) { 1839 Current = SSE; 1840 } else if (k == BuiltinType::LongDouble) { 1841 Lo = X87; 1842 Hi = X87Up; 1843 } 1844 // FIXME: _Decimal32 and _Decimal64 are SSE. 1845 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). 1846 return; 1847 } 1848 1849 if (const EnumType *ET = Ty->getAs<EnumType>()) { 1850 // Classify the underlying integer type. 1851 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg); 1852 return; 1853 } 1854 1855 if (Ty->hasPointerRepresentation()) { 1856 Current = Integer; 1857 return; 1858 } 1859 1860 if (Ty->isMemberPointerType()) { 1861 if (Ty->isMemberFunctionPointerType()) { 1862 if (Has64BitPointers) { 1863 // If Has64BitPointers, this is an {i64, i64}, so classify both 1864 // Lo and Hi now. 1865 Lo = Hi = Integer; 1866 } else { 1867 // Otherwise, with 32-bit pointers, this is an {i32, i32}. If that 1868 // straddles an eightbyte boundary, Hi should be classified as well. 1869 uint64_t EB_FuncPtr = (OffsetBase) / 64; 1870 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64; 1871 if (EB_FuncPtr != EB_ThisAdj) { 1872 Lo = Hi = Integer; 1873 } else { 1874 Current = Integer; 1875 } 1876 } 1877 } else { 1878 Current = Integer; 1879 } 1880 return; 1881 } 1882 1883 if (const VectorType *VT = Ty->getAs<VectorType>()) { 1884 uint64_t Size = getContext().getTypeSize(VT); 1885 if (Size == 32) { 1886 // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x 1887 // float> as integer. 1888 Current = Integer; 1889 1890 // If this type crosses an eightbyte boundary, it should be 1891 // split. 1892 uint64_t EB_Real = (OffsetBase) / 64; 1893 uint64_t EB_Imag = (OffsetBase + Size - 1) / 64; 1894 if (EB_Real != EB_Imag) 1895 Hi = Lo; 1896 } else if (Size == 64) { 1897 // gcc passes <1 x double> in memory. :( 1898 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) 1899 return; 1900 1901 // gcc passes <1 x long long> as INTEGER. 1902 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong) || 1903 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULongLong) || 1904 VT->getElementType()->isSpecificBuiltinType(BuiltinType::Long) || 1905 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULong)) 1906 Current = Integer; 1907 else 1908 Current = SSE; 1909 1910 // If this type crosses an eightbyte boundary, it should be 1911 // split. 1912 if (OffsetBase && OffsetBase != 64) 1913 Hi = Lo; 1914 } else if (Size == 128 || (HasAVX && isNamedArg && Size == 256)) { 1915 // Arguments of 256-bits are split into four eightbyte chunks. The 1916 // least significant one belongs to class SSE and all the others to class 1917 // SSEUP. The original Lo and Hi design considers that types can't be 1918 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense. 1919 // This design isn't correct for 256-bits, but since there're no cases 1920 // where the upper parts would need to be inspected, avoid adding 1921 // complexity and just consider Hi to match the 64-256 part. 1922 // 1923 // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in 1924 // registers if they are "named", i.e. not part of the "..." of a 1925 // variadic function. 1926 Lo = SSE; 1927 Hi = SSEUp; 1928 } 1929 return; 1930 } 1931 1932 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 1933 QualType ET = getContext().getCanonicalType(CT->getElementType()); 1934 1935 uint64_t Size = getContext().getTypeSize(Ty); 1936 if (ET->isIntegralOrEnumerationType()) { 1937 if (Size <= 64) 1938 Current = Integer; 1939 else if (Size <= 128) 1940 Lo = Hi = Integer; 1941 } else if (ET == getContext().FloatTy) 1942 Current = SSE; 1943 else if (ET == getContext().DoubleTy || 1944 (ET == getContext().LongDoubleTy && 1945 getTarget().getTriple().isOSNaCl())) 1946 Lo = Hi = SSE; 1947 else if (ET == getContext().LongDoubleTy) 1948 Current = ComplexX87; 1949 1950 // If this complex type crosses an eightbyte boundary then it 1951 // should be split. 1952 uint64_t EB_Real = (OffsetBase) / 64; 1953 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64; 1954 if (Hi == NoClass && EB_Real != EB_Imag) 1955 Hi = Lo; 1956 1957 return; 1958 } 1959 1960 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 1961 // Arrays are treated like structures. 1962 1963 uint64_t Size = getContext().getTypeSize(Ty); 1964 1965 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 1966 // than four eightbytes, ..., it has class MEMORY. 1967 if (Size > 256) 1968 return; 1969 1970 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned 1971 // fields, it has class MEMORY. 1972 // 1973 // Only need to check alignment of array base. 1974 if (OffsetBase % getContext().getTypeAlign(AT->getElementType())) 1975 return; 1976 1977 // Otherwise implement simplified merge. We could be smarter about 1978 // this, but it isn't worth it and would be harder to verify. 1979 Current = NoClass; 1980 uint64_t EltSize = getContext().getTypeSize(AT->getElementType()); 1981 uint64_t ArraySize = AT->getSize().getZExtValue(); 1982 1983 // The only case a 256-bit wide vector could be used is when the array 1984 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 1985 // to work for sizes wider than 128, early check and fallback to memory. 1986 if (Size > 128 && EltSize != 256) 1987 return; 1988 1989 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) { 1990 Class FieldLo, FieldHi; 1991 classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg); 1992 Lo = merge(Lo, FieldLo); 1993 Hi = merge(Hi, FieldHi); 1994 if (Lo == Memory || Hi == Memory) 1995 break; 1996 } 1997 1998 postMerge(Size, Lo, Hi); 1999 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification."); 2000 return; 2001 } 2002 2003 if (const RecordType *RT = Ty->getAs<RecordType>()) { 2004 uint64_t Size = getContext().getTypeSize(Ty); 2005 2006 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 2007 // than four eightbytes, ..., it has class MEMORY. 2008 if (Size > 256) 2009 return; 2010 2011 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial 2012 // copy constructor or a non-trivial destructor, it is passed by invisible 2013 // reference. 2014 if (getRecordArgABI(RT, getCXXABI())) 2015 return; 2016 2017 const RecordDecl *RD = RT->getDecl(); 2018 2019 // Assume variable sized types are passed in memory. 2020 if (RD->hasFlexibleArrayMember()) 2021 return; 2022 2023 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 2024 2025 // Reset Lo class, this will be recomputed. 2026 Current = NoClass; 2027 2028 // If this is a C++ record, classify the bases first. 2029 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 2030 for (const auto &I : CXXRD->bases()) { 2031 assert(!I.isVirtual() && !I.getType()->isDependentType() && 2032 "Unexpected base class!"); 2033 const CXXRecordDecl *Base = 2034 cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl()); 2035 2036 // Classify this field. 2037 // 2038 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a 2039 // single eightbyte, each is classified separately. Each eightbyte gets 2040 // initialized to class NO_CLASS. 2041 Class FieldLo, FieldHi; 2042 uint64_t Offset = 2043 OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base)); 2044 classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg); 2045 Lo = merge(Lo, FieldLo); 2046 Hi = merge(Hi, FieldHi); 2047 if (Lo == Memory || Hi == Memory) 2048 break; 2049 } 2050 } 2051 2052 // Classify the fields one at a time, merging the results. 2053 unsigned idx = 0; 2054 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 2055 i != e; ++i, ++idx) { 2056 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 2057 bool BitField = i->isBitField(); 2058 2059 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than 2060 // four eightbytes, or it contains unaligned fields, it has class MEMORY. 2061 // 2062 // The only case a 256-bit wide vector could be used is when the struct 2063 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 2064 // to work for sizes wider than 128, early check and fallback to memory. 2065 // 2066 if (Size > 128 && getContext().getTypeSize(i->getType()) != 256) { 2067 Lo = Memory; 2068 return; 2069 } 2070 // Note, skip this test for bit-fields, see below. 2071 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) { 2072 Lo = Memory; 2073 return; 2074 } 2075 2076 // Classify this field. 2077 // 2078 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate 2079 // exceeds a single eightbyte, each is classified 2080 // separately. Each eightbyte gets initialized to class 2081 // NO_CLASS. 2082 Class FieldLo, FieldHi; 2083 2084 // Bit-fields require special handling, they do not force the 2085 // structure to be passed in memory even if unaligned, and 2086 // therefore they can straddle an eightbyte. 2087 if (BitField) { 2088 // Ignore padding bit-fields. 2089 if (i->isUnnamedBitfield()) 2090 continue; 2091 2092 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 2093 uint64_t Size = i->getBitWidthValue(getContext()); 2094 2095 uint64_t EB_Lo = Offset / 64; 2096 uint64_t EB_Hi = (Offset + Size - 1) / 64; 2097 2098 if (EB_Lo) { 2099 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes."); 2100 FieldLo = NoClass; 2101 FieldHi = Integer; 2102 } else { 2103 FieldLo = Integer; 2104 FieldHi = EB_Hi ? Integer : NoClass; 2105 } 2106 } else 2107 classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg); 2108 Lo = merge(Lo, FieldLo); 2109 Hi = merge(Hi, FieldHi); 2110 if (Lo == Memory || Hi == Memory) 2111 break; 2112 } 2113 2114 postMerge(Size, Lo, Hi); 2115 } 2116 } 2117 2118 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const { 2119 // If this is a scalar LLVM value then assume LLVM will pass it in the right 2120 // place naturally. 2121 if (!isAggregateTypeForABI(Ty)) { 2122 // Treat an enum type as its underlying type. 2123 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2124 Ty = EnumTy->getDecl()->getIntegerType(); 2125 2126 return (Ty->isPromotableIntegerType() ? 2127 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2128 } 2129 2130 return ABIArgInfo::getIndirect(0); 2131 } 2132 2133 bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const { 2134 if (const VectorType *VecTy = Ty->getAs<VectorType>()) { 2135 uint64_t Size = getContext().getTypeSize(VecTy); 2136 unsigned LargestVector = HasAVX ? 256 : 128; 2137 if (Size <= 64 || Size > LargestVector) 2138 return true; 2139 } 2140 2141 return false; 2142 } 2143 2144 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty, 2145 unsigned freeIntRegs) const { 2146 // If this is a scalar LLVM value then assume LLVM will pass it in the right 2147 // place naturally. 2148 // 2149 // This assumption is optimistic, as there could be free registers available 2150 // when we need to pass this argument in memory, and LLVM could try to pass 2151 // the argument in the free register. This does not seem to happen currently, 2152 // but this code would be much safer if we could mark the argument with 2153 // 'onstack'. See PR12193. 2154 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) { 2155 // Treat an enum type as its underlying type. 2156 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2157 Ty = EnumTy->getDecl()->getIntegerType(); 2158 2159 return (Ty->isPromotableIntegerType() ? 2160 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2161 } 2162 2163 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 2164 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); 2165 2166 // Compute the byval alignment. We specify the alignment of the byval in all 2167 // cases so that the mid-level optimizer knows the alignment of the byval. 2168 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U); 2169 2170 // Attempt to avoid passing indirect results using byval when possible. This 2171 // is important for good codegen. 2172 // 2173 // We do this by coercing the value into a scalar type which the backend can 2174 // handle naturally (i.e., without using byval). 2175 // 2176 // For simplicity, we currently only do this when we have exhausted all of the 2177 // free integer registers. Doing this when there are free integer registers 2178 // would require more care, as we would have to ensure that the coerced value 2179 // did not claim the unused register. That would require either reording the 2180 // arguments to the function (so that any subsequent inreg values came first), 2181 // or only doing this optimization when there were no following arguments that 2182 // might be inreg. 2183 // 2184 // We currently expect it to be rare (particularly in well written code) for 2185 // arguments to be passed on the stack when there are still free integer 2186 // registers available (this would typically imply large structs being passed 2187 // by value), so this seems like a fair tradeoff for now. 2188 // 2189 // We can revisit this if the backend grows support for 'onstack' parameter 2190 // attributes. See PR12193. 2191 if (freeIntRegs == 0) { 2192 uint64_t Size = getContext().getTypeSize(Ty); 2193 2194 // If this type fits in an eightbyte, coerce it into the matching integral 2195 // type, which will end up on the stack (with alignment 8). 2196 if (Align == 8 && Size <= 64) 2197 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2198 Size)); 2199 } 2200 2201 return ABIArgInfo::getIndirect(Align); 2202 } 2203 2204 /// The ABI specifies that a value should be passed in a full vector XMM/YMM 2205 /// register. Pick an LLVM IR type that will be passed as a vector register. 2206 llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const { 2207 // Wrapper structs/arrays that only contain vectors are passed just like 2208 // vectors; strip them off if present. 2209 if (const Type *InnerTy = isSingleElementStruct(Ty, getContext())) 2210 Ty = QualType(InnerTy, 0); 2211 2212 llvm::Type *IRType = CGT.ConvertType(Ty); 2213 assert(isa<llvm::VectorType>(IRType) && 2214 "Trying to return a non-vector type in a vector register!"); 2215 return IRType; 2216 } 2217 2218 /// BitsContainNoUserData - Return true if the specified [start,end) bit range 2219 /// is known to either be off the end of the specified type or being in 2220 /// alignment padding. The user type specified is known to be at most 128 bits 2221 /// in size, and have passed through X86_64ABIInfo::classify with a successful 2222 /// classification that put one of the two halves in the INTEGER class. 2223 /// 2224 /// It is conservatively correct to return false. 2225 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, 2226 unsigned EndBit, ASTContext &Context) { 2227 // If the bytes being queried are off the end of the type, there is no user 2228 // data hiding here. This handles analysis of builtins, vectors and other 2229 // types that don't contain interesting padding. 2230 unsigned TySize = (unsigned)Context.getTypeSize(Ty); 2231 if (TySize <= StartBit) 2232 return true; 2233 2234 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 2235 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType()); 2236 unsigned NumElts = (unsigned)AT->getSize().getZExtValue(); 2237 2238 // Check each element to see if the element overlaps with the queried range. 2239 for (unsigned i = 0; i != NumElts; ++i) { 2240 // If the element is after the span we care about, then we're done.. 2241 unsigned EltOffset = i*EltSize; 2242 if (EltOffset >= EndBit) break; 2243 2244 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0; 2245 if (!BitsContainNoUserData(AT->getElementType(), EltStart, 2246 EndBit-EltOffset, Context)) 2247 return false; 2248 } 2249 // If it overlaps no elements, then it is safe to process as padding. 2250 return true; 2251 } 2252 2253 if (const RecordType *RT = Ty->getAs<RecordType>()) { 2254 const RecordDecl *RD = RT->getDecl(); 2255 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 2256 2257 // If this is a C++ record, check the bases first. 2258 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 2259 for (const auto &I : CXXRD->bases()) { 2260 assert(!I.isVirtual() && !I.getType()->isDependentType() && 2261 "Unexpected base class!"); 2262 const CXXRecordDecl *Base = 2263 cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl()); 2264 2265 // If the base is after the span we care about, ignore it. 2266 unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base)); 2267 if (BaseOffset >= EndBit) continue; 2268 2269 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0; 2270 if (!BitsContainNoUserData(I.getType(), BaseStart, 2271 EndBit-BaseOffset, Context)) 2272 return false; 2273 } 2274 } 2275 2276 // Verify that no field has data that overlaps the region of interest. Yes 2277 // this could be sped up a lot by being smarter about queried fields, 2278 // however we're only looking at structs up to 16 bytes, so we don't care 2279 // much. 2280 unsigned idx = 0; 2281 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 2282 i != e; ++i, ++idx) { 2283 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx); 2284 2285 // If we found a field after the region we care about, then we're done. 2286 if (FieldOffset >= EndBit) break; 2287 2288 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0; 2289 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset, 2290 Context)) 2291 return false; 2292 } 2293 2294 // If nothing in this record overlapped the area of interest, then we're 2295 // clean. 2296 return true; 2297 } 2298 2299 return false; 2300 } 2301 2302 /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a 2303 /// float member at the specified offset. For example, {int,{float}} has a 2304 /// float at offset 4. It is conservatively correct for this routine to return 2305 /// false. 2306 static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset, 2307 const llvm::DataLayout &TD) { 2308 // Base case if we find a float. 2309 if (IROffset == 0 && IRType->isFloatTy()) 2310 return true; 2311 2312 // If this is a struct, recurse into the field at the specified offset. 2313 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 2314 const llvm::StructLayout *SL = TD.getStructLayout(STy); 2315 unsigned Elt = SL->getElementContainingOffset(IROffset); 2316 IROffset -= SL->getElementOffset(Elt); 2317 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD); 2318 } 2319 2320 // If this is an array, recurse into the field at the specified offset. 2321 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 2322 llvm::Type *EltTy = ATy->getElementType(); 2323 unsigned EltSize = TD.getTypeAllocSize(EltTy); 2324 IROffset -= IROffset/EltSize*EltSize; 2325 return ContainsFloatAtOffset(EltTy, IROffset, TD); 2326 } 2327 2328 return false; 2329 } 2330 2331 2332 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the 2333 /// low 8 bytes of an XMM register, corresponding to the SSE class. 2334 llvm::Type *X86_64ABIInfo:: 2335 GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset, 2336 QualType SourceTy, unsigned SourceOffset) const { 2337 // The only three choices we have are either double, <2 x float>, or float. We 2338 // pass as float if the last 4 bytes is just padding. This happens for 2339 // structs that contain 3 floats. 2340 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32, 2341 SourceOffset*8+64, getContext())) 2342 return llvm::Type::getFloatTy(getVMContext()); 2343 2344 // We want to pass as <2 x float> if the LLVM IR type contains a float at 2345 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the 2346 // case. 2347 if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) && 2348 ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout())) 2349 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2); 2350 2351 return llvm::Type::getDoubleTy(getVMContext()); 2352 } 2353 2354 2355 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in 2356 /// an 8-byte GPR. This means that we either have a scalar or we are talking 2357 /// about the high or low part of an up-to-16-byte struct. This routine picks 2358 /// the best LLVM IR type to represent this, which may be i64 or may be anything 2359 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*, 2360 /// etc). 2361 /// 2362 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for 2363 /// the source type. IROffset is an offset in bytes into the LLVM IR type that 2364 /// the 8-byte value references. PrefType may be null. 2365 /// 2366 /// SourceTy is the source-level type for the entire argument. SourceOffset is 2367 /// an offset into this that we're processing (which is always either 0 or 8). 2368 /// 2369 llvm::Type *X86_64ABIInfo:: 2370 GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset, 2371 QualType SourceTy, unsigned SourceOffset) const { 2372 // If we're dealing with an un-offset LLVM IR type, then it means that we're 2373 // returning an 8-byte unit starting with it. See if we can safely use it. 2374 if (IROffset == 0) { 2375 // Pointers and int64's always fill the 8-byte unit. 2376 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) || 2377 IRType->isIntegerTy(64)) 2378 return IRType; 2379 2380 // If we have a 1/2/4-byte integer, we can use it only if the rest of the 2381 // goodness in the source type is just tail padding. This is allowed to 2382 // kick in for struct {double,int} on the int, but not on 2383 // struct{double,int,int} because we wouldn't return the second int. We 2384 // have to do this analysis on the source type because we can't depend on 2385 // unions being lowered a specific way etc. 2386 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) || 2387 IRType->isIntegerTy(32) || 2388 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) { 2389 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 : 2390 cast<llvm::IntegerType>(IRType)->getBitWidth(); 2391 2392 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth, 2393 SourceOffset*8+64, getContext())) 2394 return IRType; 2395 } 2396 } 2397 2398 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 2399 // If this is a struct, recurse into the field at the specified offset. 2400 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy); 2401 if (IROffset < SL->getSizeInBytes()) { 2402 unsigned FieldIdx = SL->getElementContainingOffset(IROffset); 2403 IROffset -= SL->getElementOffset(FieldIdx); 2404 2405 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset, 2406 SourceTy, SourceOffset); 2407 } 2408 } 2409 2410 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 2411 llvm::Type *EltTy = ATy->getElementType(); 2412 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy); 2413 unsigned EltOffset = IROffset/EltSize*EltSize; 2414 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy, 2415 SourceOffset); 2416 } 2417 2418 // Okay, we don't have any better idea of what to pass, so we pass this in an 2419 // integer register that isn't too big to fit the rest of the struct. 2420 unsigned TySizeInBytes = 2421 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity(); 2422 2423 assert(TySizeInBytes != SourceOffset && "Empty field?"); 2424 2425 // It is always safe to classify this as an integer type up to i64 that 2426 // isn't larger than the structure. 2427 return llvm::IntegerType::get(getVMContext(), 2428 std::min(TySizeInBytes-SourceOffset, 8U)*8); 2429 } 2430 2431 2432 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally 2433 /// be used as elements of a two register pair to pass or return, return a 2434 /// first class aggregate to represent them. For example, if the low part of 2435 /// a by-value argument should be passed as i32* and the high part as float, 2436 /// return {i32*, float}. 2437 static llvm::Type * 2438 GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, 2439 const llvm::DataLayout &TD) { 2440 // In order to correctly satisfy the ABI, we need to the high part to start 2441 // at offset 8. If the high and low parts we inferred are both 4-byte types 2442 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have 2443 // the second element at offset 8. Check for this: 2444 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo); 2445 unsigned HiAlign = TD.getABITypeAlignment(Hi); 2446 unsigned HiStart = llvm::RoundUpToAlignment(LoSize, HiAlign); 2447 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!"); 2448 2449 // To handle this, we have to increase the size of the low part so that the 2450 // second element will start at an 8 byte offset. We can't increase the size 2451 // of the second element because it might make us access off the end of the 2452 // struct. 2453 if (HiStart != 8) { 2454 // There are only two sorts of types the ABI generation code can produce for 2455 // the low part of a pair that aren't 8 bytes in size: float or i8/i16/i32. 2456 // Promote these to a larger type. 2457 if (Lo->isFloatTy()) 2458 Lo = llvm::Type::getDoubleTy(Lo->getContext()); 2459 else { 2460 assert(Lo->isIntegerTy() && "Invalid/unknown lo type"); 2461 Lo = llvm::Type::getInt64Ty(Lo->getContext()); 2462 } 2463 } 2464 2465 llvm::StructType *Result = llvm::StructType::get(Lo, Hi, nullptr); 2466 2467 2468 // Verify that the second element is at an 8-byte offset. 2469 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 && 2470 "Invalid x86-64 argument pair!"); 2471 return Result; 2472 } 2473 2474 ABIArgInfo X86_64ABIInfo:: 2475 classifyReturnType(QualType RetTy) const { 2476 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the 2477 // classification algorithm. 2478 X86_64ABIInfo::Class Lo, Hi; 2479 classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true); 2480 2481 // Check some invariants. 2482 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 2483 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 2484 2485 llvm::Type *ResType = nullptr; 2486 switch (Lo) { 2487 case NoClass: 2488 if (Hi == NoClass) 2489 return ABIArgInfo::getIgnore(); 2490 // If the low part is just padding, it takes no register, leave ResType 2491 // null. 2492 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 2493 "Unknown missing lo part"); 2494 break; 2495 2496 case SSEUp: 2497 case X87Up: 2498 llvm_unreachable("Invalid classification for lo word."); 2499 2500 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via 2501 // hidden argument. 2502 case Memory: 2503 return getIndirectReturnResult(RetTy); 2504 2505 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next 2506 // available register of the sequence %rax, %rdx is used. 2507 case Integer: 2508 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 2509 2510 // If we have a sign or zero extended integer, make sure to return Extend 2511 // so that the parameter gets the right LLVM IR attributes. 2512 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 2513 // Treat an enum type as its underlying type. 2514 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 2515 RetTy = EnumTy->getDecl()->getIntegerType(); 2516 2517 if (RetTy->isIntegralOrEnumerationType() && 2518 RetTy->isPromotableIntegerType()) 2519 return ABIArgInfo::getExtend(); 2520 } 2521 break; 2522 2523 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next 2524 // available SSE register of the sequence %xmm0, %xmm1 is used. 2525 case SSE: 2526 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 2527 break; 2528 2529 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is 2530 // returned on the X87 stack in %st0 as 80-bit x87 number. 2531 case X87: 2532 ResType = llvm::Type::getX86_FP80Ty(getVMContext()); 2533 break; 2534 2535 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real 2536 // part of the value is returned in %st0 and the imaginary part in 2537 // %st1. 2538 case ComplexX87: 2539 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification."); 2540 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()), 2541 llvm::Type::getX86_FP80Ty(getVMContext()), 2542 nullptr); 2543 break; 2544 } 2545 2546 llvm::Type *HighPart = nullptr; 2547 switch (Hi) { 2548 // Memory was handled previously and X87 should 2549 // never occur as a hi class. 2550 case Memory: 2551 case X87: 2552 llvm_unreachable("Invalid classification for hi word."); 2553 2554 case ComplexX87: // Previously handled. 2555 case NoClass: 2556 break; 2557 2558 case Integer: 2559 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 2560 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 2561 return ABIArgInfo::getDirect(HighPart, 8); 2562 break; 2563 case SSE: 2564 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 2565 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 2566 return ABIArgInfo::getDirect(HighPart, 8); 2567 break; 2568 2569 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte 2570 // is passed in the next available eightbyte chunk if the last used 2571 // vector register. 2572 // 2573 // SSEUP should always be preceded by SSE, just widen. 2574 case SSEUp: 2575 assert(Lo == SSE && "Unexpected SSEUp classification."); 2576 ResType = GetByteVectorType(RetTy); 2577 break; 2578 2579 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is 2580 // returned together with the previous X87 value in %st0. 2581 case X87Up: 2582 // If X87Up is preceded by X87, we don't need to do 2583 // anything. However, in some cases with unions it may not be 2584 // preceded by X87. In such situations we follow gcc and pass the 2585 // extra bits in an SSE reg. 2586 if (Lo != X87) { 2587 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 2588 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 2589 return ABIArgInfo::getDirect(HighPart, 8); 2590 } 2591 break; 2592 } 2593 2594 // If a high part was specified, merge it together with the low part. It is 2595 // known to pass in the high eightbyte of the result. We do this by forming a 2596 // first class struct aggregate with the high and low part: {low, high} 2597 if (HighPart) 2598 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); 2599 2600 return ABIArgInfo::getDirect(ResType); 2601 } 2602 2603 ABIArgInfo X86_64ABIInfo::classifyArgumentType( 2604 QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE, 2605 bool isNamedArg) 2606 const 2607 { 2608 Ty = useFirstFieldIfTransparentUnion(Ty); 2609 2610 X86_64ABIInfo::Class Lo, Hi; 2611 classify(Ty, 0, Lo, Hi, isNamedArg); 2612 2613 // Check some invariants. 2614 // FIXME: Enforce these by construction. 2615 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 2616 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 2617 2618 neededInt = 0; 2619 neededSSE = 0; 2620 llvm::Type *ResType = nullptr; 2621 switch (Lo) { 2622 case NoClass: 2623 if (Hi == NoClass) 2624 return ABIArgInfo::getIgnore(); 2625 // If the low part is just padding, it takes no register, leave ResType 2626 // null. 2627 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 2628 "Unknown missing lo part"); 2629 break; 2630 2631 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument 2632 // on the stack. 2633 case Memory: 2634 2635 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or 2636 // COMPLEX_X87, it is passed in memory. 2637 case X87: 2638 case ComplexX87: 2639 if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect) 2640 ++neededInt; 2641 return getIndirectResult(Ty, freeIntRegs); 2642 2643 case SSEUp: 2644 case X87Up: 2645 llvm_unreachable("Invalid classification for lo word."); 2646 2647 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next 2648 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8 2649 // and %r9 is used. 2650 case Integer: 2651 ++neededInt; 2652 2653 // Pick an 8-byte type based on the preferred type. 2654 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0); 2655 2656 // If we have a sign or zero extended integer, make sure to return Extend 2657 // so that the parameter gets the right LLVM IR attributes. 2658 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 2659 // Treat an enum type as its underlying type. 2660 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2661 Ty = EnumTy->getDecl()->getIntegerType(); 2662 2663 if (Ty->isIntegralOrEnumerationType() && 2664 Ty->isPromotableIntegerType()) 2665 return ABIArgInfo::getExtend(); 2666 } 2667 2668 break; 2669 2670 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next 2671 // available SSE register is used, the registers are taken in the 2672 // order from %xmm0 to %xmm7. 2673 case SSE: { 2674 llvm::Type *IRType = CGT.ConvertType(Ty); 2675 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0); 2676 ++neededSSE; 2677 break; 2678 } 2679 } 2680 2681 llvm::Type *HighPart = nullptr; 2682 switch (Hi) { 2683 // Memory was handled previously, ComplexX87 and X87 should 2684 // never occur as hi classes, and X87Up must be preceded by X87, 2685 // which is passed in memory. 2686 case Memory: 2687 case X87: 2688 case ComplexX87: 2689 llvm_unreachable("Invalid classification for hi word."); 2690 2691 case NoClass: break; 2692 2693 case Integer: 2694 ++neededInt; 2695 // Pick an 8-byte type based on the preferred type. 2696 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 2697 2698 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 2699 return ABIArgInfo::getDirect(HighPart, 8); 2700 break; 2701 2702 // X87Up generally doesn't occur here (long double is passed in 2703 // memory), except in situations involving unions. 2704 case X87Up: 2705 case SSE: 2706 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 2707 2708 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 2709 return ABIArgInfo::getDirect(HighPart, 8); 2710 2711 ++neededSSE; 2712 break; 2713 2714 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the 2715 // eightbyte is passed in the upper half of the last used SSE 2716 // register. This only happens when 128-bit vectors are passed. 2717 case SSEUp: 2718 assert(Lo == SSE && "Unexpected SSEUp classification"); 2719 ResType = GetByteVectorType(Ty); 2720 break; 2721 } 2722 2723 // If a high part was specified, merge it together with the low part. It is 2724 // known to pass in the high eightbyte of the result. We do this by forming a 2725 // first class struct aggregate with the high and low part: {low, high} 2726 if (HighPart) 2727 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); 2728 2729 return ABIArgInfo::getDirect(ResType); 2730 } 2731 2732 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 2733 2734 if (!getCXXABI().classifyReturnType(FI)) 2735 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2736 2737 // Keep track of the number of assigned registers. 2738 unsigned freeIntRegs = 6, freeSSERegs = 8; 2739 2740 // If the return value is indirect, then the hidden argument is consuming one 2741 // integer register. 2742 if (FI.getReturnInfo().isIndirect()) 2743 --freeIntRegs; 2744 2745 // The chain argument effectively gives us another free register. 2746 if (FI.isChainCall()) 2747 ++freeIntRegs; 2748 2749 unsigned NumRequiredArgs = FI.getNumRequiredArgs(); 2750 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers 2751 // get assigned (in left-to-right order) for passing as follows... 2752 unsigned ArgNo = 0; 2753 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2754 it != ie; ++it, ++ArgNo) { 2755 bool IsNamedArg = ArgNo < NumRequiredArgs; 2756 2757 unsigned neededInt, neededSSE; 2758 it->info = classifyArgumentType(it->type, freeIntRegs, neededInt, 2759 neededSSE, IsNamedArg); 2760 2761 // AMD64-ABI 3.2.3p3: If there are no registers available for any 2762 // eightbyte of an argument, the whole argument is passed on the 2763 // stack. If registers have already been assigned for some 2764 // eightbytes of such an argument, the assignments get reverted. 2765 if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) { 2766 freeIntRegs -= neededInt; 2767 freeSSERegs -= neededSSE; 2768 } else { 2769 it->info = getIndirectResult(it->type, freeIntRegs); 2770 } 2771 } 2772 } 2773 2774 static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr, 2775 QualType Ty, 2776 CodeGenFunction &CGF) { 2777 llvm::Value *overflow_arg_area_p = 2778 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p"); 2779 llvm::Value *overflow_arg_area = 2780 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area"); 2781 2782 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16 2783 // byte boundary if alignment needed by type exceeds 8 byte boundary. 2784 // It isn't stated explicitly in the standard, but in practice we use 2785 // alignment greater than 16 where necessary. 2786 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8; 2787 if (Align > 8) { 2788 // overflow_arg_area = (overflow_arg_area + align - 1) & -align; 2789 llvm::Value *Offset = 2790 llvm::ConstantInt::get(CGF.Int64Ty, Align - 1); 2791 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset); 2792 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area, 2793 CGF.Int64Ty); 2794 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, -(uint64_t)Align); 2795 overflow_arg_area = 2796 CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), 2797 overflow_arg_area->getType(), 2798 "overflow_arg_area.align"); 2799 } 2800 2801 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area. 2802 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 2803 llvm::Value *Res = 2804 CGF.Builder.CreateBitCast(overflow_arg_area, 2805 llvm::PointerType::getUnqual(LTy)); 2806 2807 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to: 2808 // l->overflow_arg_area + sizeof(type). 2809 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to 2810 // an 8 byte boundary. 2811 2812 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8; 2813 llvm::Value *Offset = 2814 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7); 2815 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset, 2816 "overflow_arg_area.next"); 2817 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p); 2818 2819 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type. 2820 return Res; 2821 } 2822 2823 llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2824 CodeGenFunction &CGF) const { 2825 // Assume that va_list type is correct; should be pointer to LLVM type: 2826 // struct { 2827 // i32 gp_offset; 2828 // i32 fp_offset; 2829 // i8* overflow_arg_area; 2830 // i8* reg_save_area; 2831 // }; 2832 unsigned neededInt, neededSSE; 2833 2834 Ty = CGF.getContext().getCanonicalType(Ty); 2835 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE, 2836 /*isNamedArg*/false); 2837 2838 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed 2839 // in the registers. If not go to step 7. 2840 if (!neededInt && !neededSSE) 2841 return EmitVAArgFromMemory(VAListAddr, Ty, CGF); 2842 2843 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of 2844 // general purpose registers needed to pass type and num_fp to hold 2845 // the number of floating point registers needed. 2846 2847 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into 2848 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or 2849 // l->fp_offset > 304 - num_fp * 16 go to step 7. 2850 // 2851 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of 2852 // register save space). 2853 2854 llvm::Value *InRegs = nullptr; 2855 llvm::Value *gp_offset_p = nullptr, *gp_offset = nullptr; 2856 llvm::Value *fp_offset_p = nullptr, *fp_offset = nullptr; 2857 if (neededInt) { 2858 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p"); 2859 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset"); 2860 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8); 2861 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp"); 2862 } 2863 2864 if (neededSSE) { 2865 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p"); 2866 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset"); 2867 llvm::Value *FitsInFP = 2868 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16); 2869 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp"); 2870 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP; 2871 } 2872 2873 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 2874 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); 2875 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 2876 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); 2877 2878 // Emit code to load the value if it was passed in registers. 2879 2880 CGF.EmitBlock(InRegBlock); 2881 2882 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with 2883 // an offset of l->gp_offset and/or l->fp_offset. This may require 2884 // copying to a temporary location in case the parameter is passed 2885 // in different register classes or requires an alignment greater 2886 // than 8 for general purpose registers and 16 for XMM registers. 2887 // 2888 // FIXME: This really results in shameful code when we end up needing to 2889 // collect arguments from different places; often what should result in a 2890 // simple assembling of a structure from scattered addresses has many more 2891 // loads than necessary. Can we clean this up? 2892 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 2893 llvm::Value *RegAddr = 2894 CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3), 2895 "reg_save_area"); 2896 if (neededInt && neededSSE) { 2897 // FIXME: Cleanup. 2898 assert(AI.isDirect() && "Unexpected ABI info for mixed regs"); 2899 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType()); 2900 llvm::Value *Tmp = CGF.CreateMemTemp(Ty); 2901 Tmp = CGF.Builder.CreateBitCast(Tmp, ST->getPointerTo()); 2902 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs"); 2903 llvm::Type *TyLo = ST->getElementType(0); 2904 llvm::Type *TyHi = ST->getElementType(1); 2905 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) && 2906 "Unexpected ABI info for mixed regs"); 2907 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo); 2908 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi); 2909 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 2910 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2911 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr; 2912 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr; 2913 llvm::Value *V = 2914 CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo)); 2915 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 2916 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi)); 2917 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 2918 2919 RegAddr = CGF.Builder.CreateBitCast(Tmp, 2920 llvm::PointerType::getUnqual(LTy)); 2921 } else if (neededInt) { 2922 RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 2923 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 2924 llvm::PointerType::getUnqual(LTy)); 2925 2926 // Copy to a temporary if necessary to ensure the appropriate alignment. 2927 std::pair<CharUnits, CharUnits> SizeAlign = 2928 CGF.getContext().getTypeInfoInChars(Ty); 2929 uint64_t TySize = SizeAlign.first.getQuantity(); 2930 unsigned TyAlign = SizeAlign.second.getQuantity(); 2931 if (TyAlign > 8) { 2932 llvm::Value *Tmp = CGF.CreateMemTemp(Ty); 2933 CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, 8, false); 2934 RegAddr = Tmp; 2935 } 2936 } else if (neededSSE == 1) { 2937 RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2938 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 2939 llvm::PointerType::getUnqual(LTy)); 2940 } else { 2941 assert(neededSSE == 2 && "Invalid number of needed registers!"); 2942 // SSE registers are spaced 16 bytes apart in the register save 2943 // area, we need to collect the two eightbytes together. 2944 llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2945 llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16); 2946 llvm::Type *DoubleTy = CGF.DoubleTy; 2947 llvm::Type *DblPtrTy = 2948 llvm::PointerType::getUnqual(DoubleTy); 2949 llvm::StructType *ST = llvm::StructType::get(DoubleTy, DoubleTy, nullptr); 2950 llvm::Value *V, *Tmp = CGF.CreateMemTemp(Ty); 2951 Tmp = CGF.Builder.CreateBitCast(Tmp, ST->getPointerTo()); 2952 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo, 2953 DblPtrTy)); 2954 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 2955 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi, 2956 DblPtrTy)); 2957 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 2958 RegAddr = CGF.Builder.CreateBitCast(Tmp, 2959 llvm::PointerType::getUnqual(LTy)); 2960 } 2961 2962 // AMD64-ABI 3.5.7p5: Step 5. Set: 2963 // l->gp_offset = l->gp_offset + num_gp * 8 2964 // l->fp_offset = l->fp_offset + num_fp * 16. 2965 if (neededInt) { 2966 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8); 2967 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset), 2968 gp_offset_p); 2969 } 2970 if (neededSSE) { 2971 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16); 2972 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset), 2973 fp_offset_p); 2974 } 2975 CGF.EmitBranch(ContBlock); 2976 2977 // Emit code to load the value if it was passed in memory. 2978 2979 CGF.EmitBlock(InMemBlock); 2980 llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF); 2981 2982 // Return the appropriate result. 2983 2984 CGF.EmitBlock(ContBlock); 2985 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(), 2, 2986 "vaarg.addr"); 2987 ResAddr->addIncoming(RegAddr, InRegBlock); 2988 ResAddr->addIncoming(MemAddr, InMemBlock); 2989 return ResAddr; 2990 } 2991 2992 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs, 2993 bool IsReturnType) const { 2994 2995 if (Ty->isVoidType()) 2996 return ABIArgInfo::getIgnore(); 2997 2998 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2999 Ty = EnumTy->getDecl()->getIntegerType(); 3000 3001 TypeInfo Info = getContext().getTypeInfo(Ty); 3002 uint64_t Width = Info.Width; 3003 unsigned Align = getContext().toCharUnitsFromBits(Info.Align).getQuantity(); 3004 3005 const RecordType *RT = Ty->getAs<RecordType>(); 3006 if (RT) { 3007 if (!IsReturnType) { 3008 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI())) 3009 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); 3010 } 3011 3012 if (RT->getDecl()->hasFlexibleArrayMember()) 3013 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3014 3015 // FIXME: mingw-w64-gcc emits 128-bit struct as i128 3016 if (Width == 128 && getTarget().getTriple().isWindowsGNUEnvironment()) 3017 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 3018 Width)); 3019 } 3020 3021 // vectorcall adds the concept of a homogenous vector aggregate, similar to 3022 // other targets. 3023 const Type *Base = nullptr; 3024 uint64_t NumElts = 0; 3025 if (FreeSSERegs && isHomogeneousAggregate(Ty, Base, NumElts)) { 3026 if (FreeSSERegs >= NumElts) { 3027 FreeSSERegs -= NumElts; 3028 if (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType()) 3029 return ABIArgInfo::getDirect(); 3030 return ABIArgInfo::getExpand(); 3031 } 3032 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false); 3033 } 3034 3035 3036 if (Ty->isMemberPointerType()) { 3037 // If the member pointer is represented by an LLVM int or ptr, pass it 3038 // directly. 3039 llvm::Type *LLTy = CGT.ConvertType(Ty); 3040 if (LLTy->isPointerTy() || LLTy->isIntegerTy()) 3041 return ABIArgInfo::getDirect(); 3042 } 3043 3044 if (RT || Ty->isAnyComplexType() || Ty->isMemberPointerType()) { 3045 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is 3046 // not 1, 2, 4, or 8 bytes, must be passed by reference." 3047 if (Width > 64 || !llvm::isPowerOf2_64(Width)) 3048 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3049 3050 // Otherwise, coerce it to a small integer. 3051 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width)); 3052 } 3053 3054 // Bool type is always extended to the ABI, other builtin types are not 3055 // extended. 3056 const BuiltinType *BT = Ty->getAs<BuiltinType>(); 3057 if (BT && BT->getKind() == BuiltinType::Bool) 3058 return ABIArgInfo::getExtend(); 3059 3060 return ABIArgInfo::getDirect(); 3061 } 3062 3063 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 3064 bool IsVectorCall = 3065 FI.getCallingConvention() == llvm::CallingConv::X86_VectorCall; 3066 3067 // We can use up to 4 SSE return registers with vectorcall. 3068 unsigned FreeSSERegs = IsVectorCall ? 4 : 0; 3069 if (!getCXXABI().classifyReturnType(FI)) 3070 FI.getReturnInfo() = classify(FI.getReturnType(), FreeSSERegs, true); 3071 3072 // We can use up to 6 SSE register parameters with vectorcall. 3073 FreeSSERegs = IsVectorCall ? 6 : 0; 3074 for (auto &I : FI.arguments()) 3075 I.info = classify(I.type, FreeSSERegs, false); 3076 } 3077 3078 llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3079 CodeGenFunction &CGF) const { 3080 llvm::Type *BPP = CGF.Int8PtrPtrTy; 3081 3082 CGBuilderTy &Builder = CGF.Builder; 3083 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 3084 "ap"); 3085 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 3086 llvm::Type *PTy = 3087 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 3088 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 3089 3090 uint64_t Offset = 3091 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8); 3092 llvm::Value *NextAddr = 3093 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 3094 "ap.next"); 3095 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 3096 3097 return AddrTyped; 3098 } 3099 3100 // PowerPC-32 3101 namespace { 3102 /// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information. 3103 class PPC32_SVR4_ABIInfo : public DefaultABIInfo { 3104 public: 3105 PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 3106 3107 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3108 CodeGenFunction &CGF) const override; 3109 }; 3110 3111 class PPC32TargetCodeGenInfo : public TargetCodeGenInfo { 3112 public: 3113 PPC32TargetCodeGenInfo(CodeGenTypes &CGT) : TargetCodeGenInfo(new PPC32_SVR4_ABIInfo(CGT)) {} 3114 3115 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 3116 // This is recovered from gcc output. 3117 return 1; // r1 is the dedicated stack pointer 3118 } 3119 3120 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3121 llvm::Value *Address) const override; 3122 3123 unsigned getOpenMPSimdDefaultAlignment(QualType) const override { 3124 return 16; // Natural alignment for Altivec vectors. 3125 } 3126 3127 bool hasSjLjLowering(CodeGen::CodeGenFunction &CGF) const override { 3128 return true; 3129 } 3130 }; 3131 3132 } 3133 3134 llvm::Value *PPC32_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr, 3135 QualType Ty, 3136 CodeGenFunction &CGF) const { 3137 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { 3138 // TODO: Implement this. For now ignore. 3139 (void)CTy; 3140 return nullptr; 3141 } 3142 3143 bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64; 3144 bool isInt = Ty->isIntegerType() || Ty->isPointerType() || Ty->isAggregateType(); 3145 llvm::Type *CharPtr = CGF.Int8PtrTy; 3146 llvm::Type *CharPtrPtr = CGF.Int8PtrPtrTy; 3147 3148 CGBuilderTy &Builder = CGF.Builder; 3149 llvm::Value *GPRPtr = Builder.CreateBitCast(VAListAddr, CharPtr, "gprptr"); 3150 llvm::Value *GPRPtrAsInt = Builder.CreatePtrToInt(GPRPtr, CGF.Int32Ty); 3151 llvm::Value *FPRPtrAsInt = Builder.CreateAdd(GPRPtrAsInt, Builder.getInt32(1)); 3152 llvm::Value *FPRPtr = Builder.CreateIntToPtr(FPRPtrAsInt, CharPtr); 3153 llvm::Value *OverflowAreaPtrAsInt = Builder.CreateAdd(FPRPtrAsInt, Builder.getInt32(3)); 3154 llvm::Value *OverflowAreaPtr = Builder.CreateIntToPtr(OverflowAreaPtrAsInt, CharPtrPtr); 3155 llvm::Value *RegsaveAreaPtrAsInt = Builder.CreateAdd(OverflowAreaPtrAsInt, Builder.getInt32(4)); 3156 llvm::Value *RegsaveAreaPtr = Builder.CreateIntToPtr(RegsaveAreaPtrAsInt, CharPtrPtr); 3157 llvm::Value *GPR = Builder.CreateLoad(GPRPtr, false, "gpr"); 3158 // Align GPR when TY is i64. 3159 if (isI64) { 3160 llvm::Value *GPRAnd = Builder.CreateAnd(GPR, Builder.getInt8(1)); 3161 llvm::Value *CC64 = Builder.CreateICmpEQ(GPRAnd, Builder.getInt8(1)); 3162 llvm::Value *GPRPlusOne = Builder.CreateAdd(GPR, Builder.getInt8(1)); 3163 GPR = Builder.CreateSelect(CC64, GPRPlusOne, GPR); 3164 } 3165 llvm::Value *FPR = Builder.CreateLoad(FPRPtr, false, "fpr"); 3166 llvm::Value *OverflowArea = Builder.CreateLoad(OverflowAreaPtr, false, "overflow_area"); 3167 llvm::Value *OverflowAreaAsInt = Builder.CreatePtrToInt(OverflowArea, CGF.Int32Ty); 3168 llvm::Value *RegsaveArea = Builder.CreateLoad(RegsaveAreaPtr, false, "regsave_area"); 3169 llvm::Value *RegsaveAreaAsInt = Builder.CreatePtrToInt(RegsaveArea, CGF.Int32Ty); 3170 3171 llvm::Value *CC = Builder.CreateICmpULT(isInt ? GPR : FPR, 3172 Builder.getInt8(8), "cond"); 3173 3174 llvm::Value *RegConstant = Builder.CreateMul(isInt ? GPR : FPR, 3175 Builder.getInt8(isInt ? 4 : 8)); 3176 3177 llvm::Value *OurReg = Builder.CreateAdd(RegsaveAreaAsInt, Builder.CreateSExt(RegConstant, CGF.Int32Ty)); 3178 3179 if (Ty->isFloatingType()) 3180 OurReg = Builder.CreateAdd(OurReg, Builder.getInt32(32)); 3181 3182 llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs"); 3183 llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow"); 3184 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont"); 3185 3186 Builder.CreateCondBr(CC, UsingRegs, UsingOverflow); 3187 3188 CGF.EmitBlock(UsingRegs); 3189 3190 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 3191 llvm::Value *Result1 = Builder.CreateIntToPtr(OurReg, PTy); 3192 // Increase the GPR/FPR indexes. 3193 if (isInt) { 3194 GPR = Builder.CreateAdd(GPR, Builder.getInt8(isI64 ? 2 : 1)); 3195 Builder.CreateStore(GPR, GPRPtr); 3196 } else { 3197 FPR = Builder.CreateAdd(FPR, Builder.getInt8(1)); 3198 Builder.CreateStore(FPR, FPRPtr); 3199 } 3200 CGF.EmitBranch(Cont); 3201 3202 CGF.EmitBlock(UsingOverflow); 3203 3204 // Increase the overflow area. 3205 llvm::Value *Result2 = Builder.CreateIntToPtr(OverflowAreaAsInt, PTy); 3206 OverflowAreaAsInt = Builder.CreateAdd(OverflowAreaAsInt, Builder.getInt32(isInt ? 4 : 8)); 3207 Builder.CreateStore(Builder.CreateIntToPtr(OverflowAreaAsInt, CharPtr), OverflowAreaPtr); 3208 CGF.EmitBranch(Cont); 3209 3210 CGF.EmitBlock(Cont); 3211 3212 llvm::PHINode *Result = CGF.Builder.CreatePHI(PTy, 2, "vaarg.addr"); 3213 Result->addIncoming(Result1, UsingRegs); 3214 Result->addIncoming(Result2, UsingOverflow); 3215 3216 if (Ty->isAggregateType()) { 3217 llvm::Value *AGGPtr = Builder.CreateBitCast(Result, CharPtrPtr, "aggrptr") ; 3218 return Builder.CreateLoad(AGGPtr, false, "aggr"); 3219 } 3220 3221 return Result; 3222 } 3223 3224 bool 3225 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3226 llvm::Value *Address) const { 3227 // This is calculated from the LLVM and GCC tables and verified 3228 // against gcc output. AFAIK all ABIs use the same encoding. 3229 3230 CodeGen::CGBuilderTy &Builder = CGF.Builder; 3231 3232 llvm::IntegerType *i8 = CGF.Int8Ty; 3233 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 3234 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 3235 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 3236 3237 // 0-31: r0-31, the 4-byte general-purpose registers 3238 AssignToArrayRange(Builder, Address, Four8, 0, 31); 3239 3240 // 32-63: fp0-31, the 8-byte floating-point registers 3241 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 3242 3243 // 64-76 are various 4-byte special-purpose registers: 3244 // 64: mq 3245 // 65: lr 3246 // 66: ctr 3247 // 67: ap 3248 // 68-75 cr0-7 3249 // 76: xer 3250 AssignToArrayRange(Builder, Address, Four8, 64, 76); 3251 3252 // 77-108: v0-31, the 16-byte vector registers 3253 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 3254 3255 // 109: vrsave 3256 // 110: vscr 3257 // 111: spe_acc 3258 // 112: spefscr 3259 // 113: sfp 3260 AssignToArrayRange(Builder, Address, Four8, 109, 113); 3261 3262 return false; 3263 } 3264 3265 // PowerPC-64 3266 3267 namespace { 3268 /// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information. 3269 class PPC64_SVR4_ABIInfo : public DefaultABIInfo { 3270 public: 3271 enum ABIKind { 3272 ELFv1 = 0, 3273 ELFv2 3274 }; 3275 3276 private: 3277 static const unsigned GPRBits = 64; 3278 ABIKind Kind; 3279 3280 public: 3281 PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind) 3282 : DefaultABIInfo(CGT), Kind(Kind) {} 3283 3284 bool isPromotableTypeForABI(QualType Ty) const; 3285 bool isAlignedParamType(QualType Ty) const; 3286 3287 ABIArgInfo classifyReturnType(QualType RetTy) const; 3288 ABIArgInfo classifyArgumentType(QualType Ty) const; 3289 3290 bool isHomogeneousAggregateBaseType(QualType Ty) const override; 3291 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 3292 uint64_t Members) const override; 3293 3294 // TODO: We can add more logic to computeInfo to improve performance. 3295 // Example: For aggregate arguments that fit in a register, we could 3296 // use getDirectInReg (as is done below for structs containing a single 3297 // floating-point value) to avoid pushing them to memory on function 3298 // entry. This would require changing the logic in PPCISelLowering 3299 // when lowering the parameters in the caller and args in the callee. 3300 void computeInfo(CGFunctionInfo &FI) const override { 3301 if (!getCXXABI().classifyReturnType(FI)) 3302 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 3303 for (auto &I : FI.arguments()) { 3304 // We rely on the default argument classification for the most part. 3305 // One exception: An aggregate containing a single floating-point 3306 // or vector item must be passed in a register if one is available. 3307 const Type *T = isSingleElementStruct(I.type, getContext()); 3308 if (T) { 3309 const BuiltinType *BT = T->getAs<BuiltinType>(); 3310 if ((T->isVectorType() && getContext().getTypeSize(T) == 128) || 3311 (BT && BT->isFloatingPoint())) { 3312 QualType QT(T, 0); 3313 I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT)); 3314 continue; 3315 } 3316 } 3317 I.info = classifyArgumentType(I.type); 3318 } 3319 } 3320 3321 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3322 CodeGenFunction &CGF) const override; 3323 }; 3324 3325 class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo { 3326 public: 3327 PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT, 3328 PPC64_SVR4_ABIInfo::ABIKind Kind) 3329 : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT, Kind)) {} 3330 3331 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 3332 // This is recovered from gcc output. 3333 return 1; // r1 is the dedicated stack pointer 3334 } 3335 3336 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3337 llvm::Value *Address) const override; 3338 3339 unsigned getOpenMPSimdDefaultAlignment(QualType) const override { 3340 return 16; // Natural alignment for Altivec and VSX vectors. 3341 } 3342 3343 bool hasSjLjLowering(CodeGen::CodeGenFunction &CGF) const override { 3344 return true; 3345 } 3346 }; 3347 3348 class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo { 3349 public: 3350 PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} 3351 3352 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 3353 // This is recovered from gcc output. 3354 return 1; // r1 is the dedicated stack pointer 3355 } 3356 3357 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3358 llvm::Value *Address) const override; 3359 3360 unsigned getOpenMPSimdDefaultAlignment(QualType) const override { 3361 return 16; // Natural alignment for Altivec vectors. 3362 } 3363 3364 bool hasSjLjLowering(CodeGen::CodeGenFunction &CGF) const override { 3365 return true; 3366 } 3367 }; 3368 3369 } 3370 3371 // Return true if the ABI requires Ty to be passed sign- or zero- 3372 // extended to 64 bits. 3373 bool 3374 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const { 3375 // Treat an enum type as its underlying type. 3376 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3377 Ty = EnumTy->getDecl()->getIntegerType(); 3378 3379 // Promotable integer types are required to be promoted by the ABI. 3380 if (Ty->isPromotableIntegerType()) 3381 return true; 3382 3383 // In addition to the usual promotable integer types, we also need to 3384 // extend all 32-bit types, since the ABI requires promotion to 64 bits. 3385 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 3386 switch (BT->getKind()) { 3387 case BuiltinType::Int: 3388 case BuiltinType::UInt: 3389 return true; 3390 default: 3391 break; 3392 } 3393 3394 return false; 3395 } 3396 3397 /// isAlignedParamType - Determine whether a type requires 16-byte 3398 /// alignment in the parameter area. 3399 bool 3400 PPC64_SVR4_ABIInfo::isAlignedParamType(QualType Ty) const { 3401 // Complex types are passed just like their elements. 3402 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) 3403 Ty = CTy->getElementType(); 3404 3405 // Only vector types of size 16 bytes need alignment (larger types are 3406 // passed via reference, smaller types are not aligned). 3407 if (Ty->isVectorType()) 3408 return getContext().getTypeSize(Ty) == 128; 3409 3410 // For single-element float/vector structs, we consider the whole type 3411 // to have the same alignment requirements as its single element. 3412 const Type *AlignAsType = nullptr; 3413 const Type *EltType = isSingleElementStruct(Ty, getContext()); 3414 if (EltType) { 3415 const BuiltinType *BT = EltType->getAs<BuiltinType>(); 3416 if ((EltType->isVectorType() && 3417 getContext().getTypeSize(EltType) == 128) || 3418 (BT && BT->isFloatingPoint())) 3419 AlignAsType = EltType; 3420 } 3421 3422 // Likewise for ELFv2 homogeneous aggregates. 3423 const Type *Base = nullptr; 3424 uint64_t Members = 0; 3425 if (!AlignAsType && Kind == ELFv2 && 3426 isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members)) 3427 AlignAsType = Base; 3428 3429 // With special case aggregates, only vector base types need alignment. 3430 if (AlignAsType) 3431 return AlignAsType->isVectorType(); 3432 3433 // Otherwise, we only need alignment for any aggregate type that 3434 // has an alignment requirement of >= 16 bytes. 3435 if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) 3436 return true; 3437 3438 return false; 3439 } 3440 3441 /// isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous 3442 /// aggregate. Base is set to the base element type, and Members is set 3443 /// to the number of base elements. 3444 bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base, 3445 uint64_t &Members) const { 3446 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 3447 uint64_t NElements = AT->getSize().getZExtValue(); 3448 if (NElements == 0) 3449 return false; 3450 if (!isHomogeneousAggregate(AT->getElementType(), Base, Members)) 3451 return false; 3452 Members *= NElements; 3453 } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 3454 const RecordDecl *RD = RT->getDecl(); 3455 if (RD->hasFlexibleArrayMember()) 3456 return false; 3457 3458 Members = 0; 3459 3460 // If this is a C++ record, check the bases first. 3461 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 3462 for (const auto &I : CXXRD->bases()) { 3463 // Ignore empty records. 3464 if (isEmptyRecord(getContext(), I.getType(), true)) 3465 continue; 3466 3467 uint64_t FldMembers; 3468 if (!isHomogeneousAggregate(I.getType(), Base, FldMembers)) 3469 return false; 3470 3471 Members += FldMembers; 3472 } 3473 } 3474 3475 for (const auto *FD : RD->fields()) { 3476 // Ignore (non-zero arrays of) empty records. 3477 QualType FT = FD->getType(); 3478 while (const ConstantArrayType *AT = 3479 getContext().getAsConstantArrayType(FT)) { 3480 if (AT->getSize().getZExtValue() == 0) 3481 return false; 3482 FT = AT->getElementType(); 3483 } 3484 if (isEmptyRecord(getContext(), FT, true)) 3485 continue; 3486 3487 // For compatibility with GCC, ignore empty bitfields in C++ mode. 3488 if (getContext().getLangOpts().CPlusPlus && 3489 FD->isBitField() && FD->getBitWidthValue(getContext()) == 0) 3490 continue; 3491 3492 uint64_t FldMembers; 3493 if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers)) 3494 return false; 3495 3496 Members = (RD->isUnion() ? 3497 std::max(Members, FldMembers) : Members + FldMembers); 3498 } 3499 3500 if (!Base) 3501 return false; 3502 3503 // Ensure there is no padding. 3504 if (getContext().getTypeSize(Base) * Members != 3505 getContext().getTypeSize(Ty)) 3506 return false; 3507 } else { 3508 Members = 1; 3509 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 3510 Members = 2; 3511 Ty = CT->getElementType(); 3512 } 3513 3514 // Most ABIs only support float, double, and some vector type widths. 3515 if (!isHomogeneousAggregateBaseType(Ty)) 3516 return false; 3517 3518 // The base type must be the same for all members. Types that 3519 // agree in both total size and mode (float vs. vector) are 3520 // treated as being equivalent here. 3521 const Type *TyPtr = Ty.getTypePtr(); 3522 if (!Base) 3523 Base = TyPtr; 3524 3525 if (Base->isVectorType() != TyPtr->isVectorType() || 3526 getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr)) 3527 return false; 3528 } 3529 return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members); 3530 } 3531 3532 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 3533 // Homogeneous aggregates for ELFv2 must have base types of float, 3534 // double, long double, or 128-bit vectors. 3535 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 3536 if (BT->getKind() == BuiltinType::Float || 3537 BT->getKind() == BuiltinType::Double || 3538 BT->getKind() == BuiltinType::LongDouble) 3539 return true; 3540 } 3541 if (const VectorType *VT = Ty->getAs<VectorType>()) { 3542 if (getContext().getTypeSize(VT) == 128) 3543 return true; 3544 } 3545 return false; 3546 } 3547 3548 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough( 3549 const Type *Base, uint64_t Members) const { 3550 // Vector types require one register, floating point types require one 3551 // or two registers depending on their size. 3552 uint32_t NumRegs = 3553 Base->isVectorType() ? 1 : (getContext().getTypeSize(Base) + 63) / 64; 3554 3555 // Homogeneous Aggregates may occupy at most 8 registers. 3556 return Members * NumRegs <= 8; 3557 } 3558 3559 ABIArgInfo 3560 PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const { 3561 Ty = useFirstFieldIfTransparentUnion(Ty); 3562 3563 if (Ty->isAnyComplexType()) 3564 return ABIArgInfo::getDirect(); 3565 3566 // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes) 3567 // or via reference (larger than 16 bytes). 3568 if (Ty->isVectorType()) { 3569 uint64_t Size = getContext().getTypeSize(Ty); 3570 if (Size > 128) 3571 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3572 else if (Size < 128) { 3573 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size); 3574 return ABIArgInfo::getDirect(CoerceTy); 3575 } 3576 } 3577 3578 if (isAggregateTypeForABI(Ty)) { 3579 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 3580 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); 3581 3582 uint64_t ABIAlign = isAlignedParamType(Ty)? 16 : 8; 3583 uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8; 3584 3585 // ELFv2 homogeneous aggregates are passed as array types. 3586 const Type *Base = nullptr; 3587 uint64_t Members = 0; 3588 if (Kind == ELFv2 && 3589 isHomogeneousAggregate(Ty, Base, Members)) { 3590 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0)); 3591 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members); 3592 return ABIArgInfo::getDirect(CoerceTy); 3593 } 3594 3595 // If an aggregate may end up fully in registers, we do not 3596 // use the ByVal method, but pass the aggregate as array. 3597 // This is usually beneficial since we avoid forcing the 3598 // back-end to store the argument to memory. 3599 uint64_t Bits = getContext().getTypeSize(Ty); 3600 if (Bits > 0 && Bits <= 8 * GPRBits) { 3601 llvm::Type *CoerceTy; 3602 3603 // Types up to 8 bytes are passed as integer type (which will be 3604 // properly aligned in the argument save area doubleword). 3605 if (Bits <= GPRBits) 3606 CoerceTy = llvm::IntegerType::get(getVMContext(), 3607 llvm::RoundUpToAlignment(Bits, 8)); 3608 // Larger types are passed as arrays, with the base type selected 3609 // according to the required alignment in the save area. 3610 else { 3611 uint64_t RegBits = ABIAlign * 8; 3612 uint64_t NumRegs = llvm::RoundUpToAlignment(Bits, RegBits) / RegBits; 3613 llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits); 3614 CoerceTy = llvm::ArrayType::get(RegTy, NumRegs); 3615 } 3616 3617 return ABIArgInfo::getDirect(CoerceTy); 3618 } 3619 3620 // All other aggregates are passed ByVal. 3621 return ABIArgInfo::getIndirect(ABIAlign, /*ByVal=*/true, 3622 /*Realign=*/TyAlign > ABIAlign); 3623 } 3624 3625 return (isPromotableTypeForABI(Ty) ? 3626 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3627 } 3628 3629 ABIArgInfo 3630 PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const { 3631 if (RetTy->isVoidType()) 3632 return ABIArgInfo::getIgnore(); 3633 3634 if (RetTy->isAnyComplexType()) 3635 return ABIArgInfo::getDirect(); 3636 3637 // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes) 3638 // or via reference (larger than 16 bytes). 3639 if (RetTy->isVectorType()) { 3640 uint64_t Size = getContext().getTypeSize(RetTy); 3641 if (Size > 128) 3642 return ABIArgInfo::getIndirect(0); 3643 else if (Size < 128) { 3644 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size); 3645 return ABIArgInfo::getDirect(CoerceTy); 3646 } 3647 } 3648 3649 if (isAggregateTypeForABI(RetTy)) { 3650 // ELFv2 homogeneous aggregates are returned as array types. 3651 const Type *Base = nullptr; 3652 uint64_t Members = 0; 3653 if (Kind == ELFv2 && 3654 isHomogeneousAggregate(RetTy, Base, Members)) { 3655 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0)); 3656 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members); 3657 return ABIArgInfo::getDirect(CoerceTy); 3658 } 3659 3660 // ELFv2 small aggregates are returned in up to two registers. 3661 uint64_t Bits = getContext().getTypeSize(RetTy); 3662 if (Kind == ELFv2 && Bits <= 2 * GPRBits) { 3663 if (Bits == 0) 3664 return ABIArgInfo::getIgnore(); 3665 3666 llvm::Type *CoerceTy; 3667 if (Bits > GPRBits) { 3668 CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits); 3669 CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy, nullptr); 3670 } else 3671 CoerceTy = llvm::IntegerType::get(getVMContext(), 3672 llvm::RoundUpToAlignment(Bits, 8)); 3673 return ABIArgInfo::getDirect(CoerceTy); 3674 } 3675 3676 // All other aggregates are returned indirectly. 3677 return ABIArgInfo::getIndirect(0); 3678 } 3679 3680 return (isPromotableTypeForABI(RetTy) ? 3681 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3682 } 3683 3684 // Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine. 3685 llvm::Value *PPC64_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr, 3686 QualType Ty, 3687 CodeGenFunction &CGF) const { 3688 llvm::Type *BP = CGF.Int8PtrTy; 3689 llvm::Type *BPP = CGF.Int8PtrPtrTy; 3690 3691 CGBuilderTy &Builder = CGF.Builder; 3692 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 3693 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 3694 3695 // Handle types that require 16-byte alignment in the parameter save area. 3696 if (isAlignedParamType(Ty)) { 3697 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty); 3698 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(15)); 3699 AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt64(-16)); 3700 Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align"); 3701 } 3702 3703 // Update the va_list pointer. The pointer should be bumped by the 3704 // size of the object. We can trust getTypeSize() except for a complex 3705 // type whose base type is smaller than a doubleword. For these, the 3706 // size of the object is 16 bytes; see below for further explanation. 3707 unsigned SizeInBytes = CGF.getContext().getTypeSize(Ty) / 8; 3708 QualType BaseTy; 3709 unsigned CplxBaseSize = 0; 3710 3711 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { 3712 BaseTy = CTy->getElementType(); 3713 CplxBaseSize = CGF.getContext().getTypeSize(BaseTy) / 8; 3714 if (CplxBaseSize < 8) 3715 SizeInBytes = 16; 3716 } 3717 3718 unsigned Offset = llvm::RoundUpToAlignment(SizeInBytes, 8); 3719 llvm::Value *NextAddr = 3720 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), 3721 "ap.next"); 3722 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 3723 3724 // If we have a complex type and the base type is smaller than 8 bytes, 3725 // the ABI calls for the real and imaginary parts to be right-adjusted 3726 // in separate doublewords. However, Clang expects us to produce a 3727 // pointer to a structure with the two parts packed tightly. So generate 3728 // loads of the real and imaginary parts relative to the va_list pointer, 3729 // and store them to a temporary structure. 3730 if (CplxBaseSize && CplxBaseSize < 8) { 3731 llvm::Value *RealAddr = Builder.CreatePtrToInt(Addr, CGF.Int64Ty); 3732 llvm::Value *ImagAddr = RealAddr; 3733 if (CGF.CGM.getDataLayout().isBigEndian()) { 3734 RealAddr = Builder.CreateAdd(RealAddr, Builder.getInt64(8 - CplxBaseSize)); 3735 ImagAddr = Builder.CreateAdd(ImagAddr, Builder.getInt64(16 - CplxBaseSize)); 3736 } else { 3737 ImagAddr = Builder.CreateAdd(ImagAddr, Builder.getInt64(8)); 3738 } 3739 llvm::Type *PBaseTy = llvm::PointerType::getUnqual(CGF.ConvertType(BaseTy)); 3740 RealAddr = Builder.CreateIntToPtr(RealAddr, PBaseTy); 3741 ImagAddr = Builder.CreateIntToPtr(ImagAddr, PBaseTy); 3742 llvm::Value *Real = Builder.CreateLoad(RealAddr, false, ".vareal"); 3743 llvm::Value *Imag = Builder.CreateLoad(ImagAddr, false, ".vaimag"); 3744 llvm::Value *Ptr = CGF.CreateTempAlloca(CGT.ConvertTypeForMem(Ty), 3745 "vacplx"); 3746 llvm::Value *RealPtr = Builder.CreateStructGEP(Ptr, 0, ".real"); 3747 llvm::Value *ImagPtr = Builder.CreateStructGEP(Ptr, 1, ".imag"); 3748 Builder.CreateStore(Real, RealPtr, false); 3749 Builder.CreateStore(Imag, ImagPtr, false); 3750 return Ptr; 3751 } 3752 3753 // If the argument is smaller than 8 bytes, it is right-adjusted in 3754 // its doubleword slot. Adjust the pointer to pick it up from the 3755 // correct offset. 3756 if (SizeInBytes < 8 && CGF.CGM.getDataLayout().isBigEndian()) { 3757 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty); 3758 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(8 - SizeInBytes)); 3759 Addr = Builder.CreateIntToPtr(AddrAsInt, BP); 3760 } 3761 3762 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 3763 return Builder.CreateBitCast(Addr, PTy); 3764 } 3765 3766 static bool 3767 PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3768 llvm::Value *Address) { 3769 // This is calculated from the LLVM and GCC tables and verified 3770 // against gcc output. AFAIK all ABIs use the same encoding. 3771 3772 CodeGen::CGBuilderTy &Builder = CGF.Builder; 3773 3774 llvm::IntegerType *i8 = CGF.Int8Ty; 3775 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 3776 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 3777 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 3778 3779 // 0-31: r0-31, the 8-byte general-purpose registers 3780 AssignToArrayRange(Builder, Address, Eight8, 0, 31); 3781 3782 // 32-63: fp0-31, the 8-byte floating-point registers 3783 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 3784 3785 // 64-76 are various 4-byte special-purpose registers: 3786 // 64: mq 3787 // 65: lr 3788 // 66: ctr 3789 // 67: ap 3790 // 68-75 cr0-7 3791 // 76: xer 3792 AssignToArrayRange(Builder, Address, Four8, 64, 76); 3793 3794 // 77-108: v0-31, the 16-byte vector registers 3795 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 3796 3797 // 109: vrsave 3798 // 110: vscr 3799 // 111: spe_acc 3800 // 112: spefscr 3801 // 113: sfp 3802 AssignToArrayRange(Builder, Address, Four8, 109, 113); 3803 3804 return false; 3805 } 3806 3807 bool 3808 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable( 3809 CodeGen::CodeGenFunction &CGF, 3810 llvm::Value *Address) const { 3811 3812 return PPC64_initDwarfEHRegSizeTable(CGF, Address); 3813 } 3814 3815 bool 3816 PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3817 llvm::Value *Address) const { 3818 3819 return PPC64_initDwarfEHRegSizeTable(CGF, Address); 3820 } 3821 3822 //===----------------------------------------------------------------------===// 3823 // AArch64 ABI Implementation 3824 //===----------------------------------------------------------------------===// 3825 3826 namespace { 3827 3828 class AArch64ABIInfo : public ABIInfo { 3829 public: 3830 enum ABIKind { 3831 AAPCS = 0, 3832 DarwinPCS 3833 }; 3834 3835 private: 3836 ABIKind Kind; 3837 3838 public: 3839 AArch64ABIInfo(CodeGenTypes &CGT, ABIKind Kind) : ABIInfo(CGT), Kind(Kind) {} 3840 3841 private: 3842 ABIKind getABIKind() const { return Kind; } 3843 bool isDarwinPCS() const { return Kind == DarwinPCS; } 3844 3845 ABIArgInfo classifyReturnType(QualType RetTy) const; 3846 ABIArgInfo classifyArgumentType(QualType RetTy) const; 3847 bool isHomogeneousAggregateBaseType(QualType Ty) const override; 3848 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 3849 uint64_t Members) const override; 3850 3851 bool isIllegalVectorType(QualType Ty) const; 3852 3853 void computeInfo(CGFunctionInfo &FI) const override { 3854 if (!getCXXABI().classifyReturnType(FI)) 3855 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 3856 3857 for (auto &it : FI.arguments()) 3858 it.info = classifyArgumentType(it.type); 3859 } 3860 3861 llvm::Value *EmitDarwinVAArg(llvm::Value *VAListAddr, QualType Ty, 3862 CodeGenFunction &CGF) const; 3863 3864 llvm::Value *EmitAAPCSVAArg(llvm::Value *VAListAddr, QualType Ty, 3865 CodeGenFunction &CGF) const; 3866 3867 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3868 CodeGenFunction &CGF) const override { 3869 return isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF) 3870 : EmitAAPCSVAArg(VAListAddr, Ty, CGF); 3871 } 3872 }; 3873 3874 class AArch64TargetCodeGenInfo : public TargetCodeGenInfo { 3875 public: 3876 AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind) 3877 : TargetCodeGenInfo(new AArch64ABIInfo(CGT, Kind)) {} 3878 3879 StringRef getARCRetainAutoreleasedReturnValueMarker() const { 3880 return "mov\tfp, fp\t\t; marker for objc_retainAutoreleaseReturnValue"; 3881 } 3882 3883 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { return 31; } 3884 3885 virtual bool doesReturnSlotInterfereWithArgs() const { return false; } 3886 }; 3887 } 3888 3889 ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const { 3890 Ty = useFirstFieldIfTransparentUnion(Ty); 3891 3892 // Handle illegal vector types here. 3893 if (isIllegalVectorType(Ty)) { 3894 uint64_t Size = getContext().getTypeSize(Ty); 3895 if (Size <= 32) { 3896 llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext()); 3897 return ABIArgInfo::getDirect(ResType); 3898 } 3899 if (Size == 64) { 3900 llvm::Type *ResType = 3901 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2); 3902 return ABIArgInfo::getDirect(ResType); 3903 } 3904 if (Size == 128) { 3905 llvm::Type *ResType = 3906 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4); 3907 return ABIArgInfo::getDirect(ResType); 3908 } 3909 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3910 } 3911 3912 if (!isAggregateTypeForABI(Ty)) { 3913 // Treat an enum type as its underlying type. 3914 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3915 Ty = EnumTy->getDecl()->getIntegerType(); 3916 3917 return (Ty->isPromotableIntegerType() && isDarwinPCS() 3918 ? ABIArgInfo::getExtend() 3919 : ABIArgInfo::getDirect()); 3920 } 3921 3922 // Structures with either a non-trivial destructor or a non-trivial 3923 // copy constructor are always indirect. 3924 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { 3925 return ABIArgInfo::getIndirect(0, /*ByVal=*/RAA == 3926 CGCXXABI::RAA_DirectInMemory); 3927 } 3928 3929 // Empty records are always ignored on Darwin, but actually passed in C++ mode 3930 // elsewhere for GNU compatibility. 3931 if (isEmptyRecord(getContext(), Ty, true)) { 3932 if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS()) 3933 return ABIArgInfo::getIgnore(); 3934 3935 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 3936 } 3937 3938 // Homogeneous Floating-point Aggregates (HFAs) need to be expanded. 3939 const Type *Base = nullptr; 3940 uint64_t Members = 0; 3941 if (isHomogeneousAggregate(Ty, Base, Members)) { 3942 return ABIArgInfo::getDirect( 3943 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members)); 3944 } 3945 3946 // Aggregates <= 16 bytes are passed directly in registers or on the stack. 3947 uint64_t Size = getContext().getTypeSize(Ty); 3948 if (Size <= 128) { 3949 unsigned Alignment = getContext().getTypeAlign(Ty); 3950 Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes 3951 3952 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment. 3953 // For aggregates with 16-byte alignment, we use i128. 3954 if (Alignment < 128 && Size == 128) { 3955 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext()); 3956 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64)); 3957 } 3958 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size)); 3959 } 3960 3961 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3962 } 3963 3964 ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy) const { 3965 if (RetTy->isVoidType()) 3966 return ABIArgInfo::getIgnore(); 3967 3968 // Large vector types should be returned via memory. 3969 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) 3970 return ABIArgInfo::getIndirect(0); 3971 3972 if (!isAggregateTypeForABI(RetTy)) { 3973 // Treat an enum type as its underlying type. 3974 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 3975 RetTy = EnumTy->getDecl()->getIntegerType(); 3976 3977 return (RetTy->isPromotableIntegerType() && isDarwinPCS() 3978 ? ABIArgInfo::getExtend() 3979 : ABIArgInfo::getDirect()); 3980 } 3981 3982 if (isEmptyRecord(getContext(), RetTy, true)) 3983 return ABIArgInfo::getIgnore(); 3984 3985 const Type *Base = nullptr; 3986 uint64_t Members = 0; 3987 if (isHomogeneousAggregate(RetTy, Base, Members)) 3988 // Homogeneous Floating-point Aggregates (HFAs) are returned directly. 3989 return ABIArgInfo::getDirect(); 3990 3991 // Aggregates <= 16 bytes are returned directly in registers or on the stack. 3992 uint64_t Size = getContext().getTypeSize(RetTy); 3993 if (Size <= 128) { 3994 Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes 3995 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size)); 3996 } 3997 3998 return ABIArgInfo::getIndirect(0); 3999 } 4000 4001 /// isIllegalVectorType - check whether the vector type is legal for AArch64. 4002 bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const { 4003 if (const VectorType *VT = Ty->getAs<VectorType>()) { 4004 // Check whether VT is legal. 4005 unsigned NumElements = VT->getNumElements(); 4006 uint64_t Size = getContext().getTypeSize(VT); 4007 // NumElements should be power of 2 between 1 and 16. 4008 if ((NumElements & (NumElements - 1)) != 0 || NumElements > 16) 4009 return true; 4010 return Size != 64 && (Size != 128 || NumElements == 1); 4011 } 4012 return false; 4013 } 4014 4015 bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 4016 // Homogeneous aggregates for AAPCS64 must have base types of a floating 4017 // point type or a short-vector type. This is the same as the 32-bit ABI, 4018 // but with the difference that any floating-point type is allowed, 4019 // including __fp16. 4020 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 4021 if (BT->isFloatingPoint()) 4022 return true; 4023 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 4024 unsigned VecSize = getContext().getTypeSize(VT); 4025 if (VecSize == 64 || VecSize == 128) 4026 return true; 4027 } 4028 return false; 4029 } 4030 4031 bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, 4032 uint64_t Members) const { 4033 return Members <= 4; 4034 } 4035 4036 llvm::Value *AArch64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr, 4037 QualType Ty, 4038 CodeGenFunction &CGF) const { 4039 ABIArgInfo AI = classifyArgumentType(Ty); 4040 bool IsIndirect = AI.isIndirect(); 4041 4042 llvm::Type *BaseTy = CGF.ConvertType(Ty); 4043 if (IsIndirect) 4044 BaseTy = llvm::PointerType::getUnqual(BaseTy); 4045 else if (AI.getCoerceToType()) 4046 BaseTy = AI.getCoerceToType(); 4047 4048 unsigned NumRegs = 1; 4049 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) { 4050 BaseTy = ArrTy->getElementType(); 4051 NumRegs = ArrTy->getNumElements(); 4052 } 4053 bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy(); 4054 4055 // The AArch64 va_list type and handling is specified in the Procedure Call 4056 // Standard, section B.4: 4057 // 4058 // struct { 4059 // void *__stack; 4060 // void *__gr_top; 4061 // void *__vr_top; 4062 // int __gr_offs; 4063 // int __vr_offs; 4064 // }; 4065 4066 llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg"); 4067 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 4068 llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack"); 4069 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 4070 auto &Ctx = CGF.getContext(); 4071 4072 llvm::Value *reg_offs_p = nullptr, *reg_offs = nullptr; 4073 int reg_top_index; 4074 int RegSize = IsIndirect ? 8 : getContext().getTypeSize(Ty) / 8; 4075 if (!IsFPR) { 4076 // 3 is the field number of __gr_offs 4077 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p"); 4078 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs"); 4079 reg_top_index = 1; // field number for __gr_top 4080 RegSize = llvm::RoundUpToAlignment(RegSize, 8); 4081 } else { 4082 // 4 is the field number of __vr_offs. 4083 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p"); 4084 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs"); 4085 reg_top_index = 2; // field number for __vr_top 4086 RegSize = 16 * NumRegs; 4087 } 4088 4089 //======================================= 4090 // Find out where argument was passed 4091 //======================================= 4092 4093 // If reg_offs >= 0 we're already using the stack for this type of 4094 // argument. We don't want to keep updating reg_offs (in case it overflows, 4095 // though anyone passing 2GB of arguments, each at most 16 bytes, deserves 4096 // whatever they get). 4097 llvm::Value *UsingStack = nullptr; 4098 UsingStack = CGF.Builder.CreateICmpSGE( 4099 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0)); 4100 4101 CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock); 4102 4103 // Otherwise, at least some kind of argument could go in these registers, the 4104 // question is whether this particular type is too big. 4105 CGF.EmitBlock(MaybeRegBlock); 4106 4107 // Integer arguments may need to correct register alignment (for example a 4108 // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we 4109 // align __gr_offs to calculate the potential address. 4110 if (!IsFPR && !IsIndirect && Ctx.getTypeAlign(Ty) > 64) { 4111 int Align = Ctx.getTypeAlign(Ty) / 8; 4112 4113 reg_offs = CGF.Builder.CreateAdd( 4114 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1), 4115 "align_regoffs"); 4116 reg_offs = CGF.Builder.CreateAnd( 4117 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align), 4118 "aligned_regoffs"); 4119 } 4120 4121 // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list. 4122 llvm::Value *NewOffset = nullptr; 4123 NewOffset = CGF.Builder.CreateAdd( 4124 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs"); 4125 CGF.Builder.CreateStore(NewOffset, reg_offs_p); 4126 4127 // Now we're in a position to decide whether this argument really was in 4128 // registers or not. 4129 llvm::Value *InRegs = nullptr; 4130 InRegs = CGF.Builder.CreateICmpSLE( 4131 NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg"); 4132 4133 CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock); 4134 4135 //======================================= 4136 // Argument was in registers 4137 //======================================= 4138 4139 // Now we emit the code for if the argument was originally passed in 4140 // registers. First start the appropriate block: 4141 CGF.EmitBlock(InRegBlock); 4142 4143 llvm::Value *reg_top_p = nullptr, *reg_top = nullptr; 4144 reg_top_p = 4145 CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p"); 4146 reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top"); 4147 llvm::Value *BaseAddr = CGF.Builder.CreateGEP(reg_top, reg_offs); 4148 llvm::Value *RegAddr = nullptr; 4149 llvm::Type *MemTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty)); 4150 4151 if (IsIndirect) { 4152 // If it's been passed indirectly (actually a struct), whatever we find from 4153 // stored registers or on the stack will actually be a struct **. 4154 MemTy = llvm::PointerType::getUnqual(MemTy); 4155 } 4156 4157 const Type *Base = nullptr; 4158 uint64_t NumMembers = 0; 4159 bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers); 4160 if (IsHFA && NumMembers > 1) { 4161 // Homogeneous aggregates passed in registers will have their elements split 4162 // and stored 16-bytes apart regardless of size (they're notionally in qN, 4163 // qN+1, ...). We reload and store into a temporary local variable 4164 // contiguously. 4165 assert(!IsIndirect && "Homogeneous aggregates should be passed directly"); 4166 llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0)); 4167 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers); 4168 llvm::Value *Tmp = CGF.CreateTempAlloca(HFATy); 4169 int Offset = 0; 4170 4171 if (CGF.CGM.getDataLayout().isBigEndian() && Ctx.getTypeSize(Base) < 128) 4172 Offset = 16 - Ctx.getTypeSize(Base) / 8; 4173 for (unsigned i = 0; i < NumMembers; ++i) { 4174 llvm::Value *BaseOffset = 4175 llvm::ConstantInt::get(CGF.Int32Ty, 16 * i + Offset); 4176 llvm::Value *LoadAddr = CGF.Builder.CreateGEP(BaseAddr, BaseOffset); 4177 LoadAddr = CGF.Builder.CreateBitCast( 4178 LoadAddr, llvm::PointerType::getUnqual(BaseTy)); 4179 llvm::Value *StoreAddr = CGF.Builder.CreateStructGEP(Tmp, i); 4180 4181 llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr); 4182 CGF.Builder.CreateStore(Elem, StoreAddr); 4183 } 4184 4185 RegAddr = CGF.Builder.CreateBitCast(Tmp, MemTy); 4186 } else { 4187 // Otherwise the object is contiguous in memory 4188 unsigned BeAlign = reg_top_index == 2 ? 16 : 8; 4189 if (CGF.CGM.getDataLayout().isBigEndian() && 4190 (IsHFA || !isAggregateTypeForABI(Ty)) && 4191 Ctx.getTypeSize(Ty) < (BeAlign * 8)) { 4192 int Offset = BeAlign - Ctx.getTypeSize(Ty) / 8; 4193 BaseAddr = CGF.Builder.CreatePtrToInt(BaseAddr, CGF.Int64Ty); 4194 4195 BaseAddr = CGF.Builder.CreateAdd( 4196 BaseAddr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), "align_be"); 4197 4198 BaseAddr = CGF.Builder.CreateIntToPtr(BaseAddr, CGF.Int8PtrTy); 4199 } 4200 4201 RegAddr = CGF.Builder.CreateBitCast(BaseAddr, MemTy); 4202 } 4203 4204 CGF.EmitBranch(ContBlock); 4205 4206 //======================================= 4207 // Argument was on the stack 4208 //======================================= 4209 CGF.EmitBlock(OnStackBlock); 4210 4211 llvm::Value *stack_p = nullptr, *OnStackAddr = nullptr; 4212 stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p"); 4213 OnStackAddr = CGF.Builder.CreateLoad(stack_p, "stack"); 4214 4215 // Again, stack arguments may need realigmnent. In this case both integer and 4216 // floating-point ones might be affected. 4217 if (!IsIndirect && Ctx.getTypeAlign(Ty) > 64) { 4218 int Align = Ctx.getTypeAlign(Ty) / 8; 4219 4220 OnStackAddr = CGF.Builder.CreatePtrToInt(OnStackAddr, CGF.Int64Ty); 4221 4222 OnStackAddr = CGF.Builder.CreateAdd( 4223 OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1), 4224 "align_stack"); 4225 OnStackAddr = CGF.Builder.CreateAnd( 4226 OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, -Align), 4227 "align_stack"); 4228 4229 OnStackAddr = CGF.Builder.CreateIntToPtr(OnStackAddr, CGF.Int8PtrTy); 4230 } 4231 4232 uint64_t StackSize; 4233 if (IsIndirect) 4234 StackSize = 8; 4235 else 4236 StackSize = Ctx.getTypeSize(Ty) / 8; 4237 4238 // All stack slots are 8 bytes 4239 StackSize = llvm::RoundUpToAlignment(StackSize, 8); 4240 4241 llvm::Value *StackSizeC = llvm::ConstantInt::get(CGF.Int32Ty, StackSize); 4242 llvm::Value *NewStack = 4243 CGF.Builder.CreateGEP(OnStackAddr, StackSizeC, "new_stack"); 4244 4245 // Write the new value of __stack for the next call to va_arg 4246 CGF.Builder.CreateStore(NewStack, stack_p); 4247 4248 if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) && 4249 Ctx.getTypeSize(Ty) < 64) { 4250 int Offset = 8 - Ctx.getTypeSize(Ty) / 8; 4251 OnStackAddr = CGF.Builder.CreatePtrToInt(OnStackAddr, CGF.Int64Ty); 4252 4253 OnStackAddr = CGF.Builder.CreateAdd( 4254 OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), "align_be"); 4255 4256 OnStackAddr = CGF.Builder.CreateIntToPtr(OnStackAddr, CGF.Int8PtrTy); 4257 } 4258 4259 OnStackAddr = CGF.Builder.CreateBitCast(OnStackAddr, MemTy); 4260 4261 CGF.EmitBranch(ContBlock); 4262 4263 //======================================= 4264 // Tidy up 4265 //======================================= 4266 CGF.EmitBlock(ContBlock); 4267 4268 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(MemTy, 2, "vaarg.addr"); 4269 ResAddr->addIncoming(RegAddr, InRegBlock); 4270 ResAddr->addIncoming(OnStackAddr, OnStackBlock); 4271 4272 if (IsIndirect) 4273 return CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"); 4274 4275 return ResAddr; 4276 } 4277 4278 llvm::Value *AArch64ABIInfo::EmitDarwinVAArg(llvm::Value *VAListAddr, QualType Ty, 4279 CodeGenFunction &CGF) const { 4280 // We do not support va_arg for aggregates or illegal vector types. 4281 // Lower VAArg here for these cases and use the LLVM va_arg instruction for 4282 // other cases. 4283 if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty)) 4284 return nullptr; 4285 4286 uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8; 4287 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8; 4288 4289 const Type *Base = nullptr; 4290 uint64_t Members = 0; 4291 bool isHA = isHomogeneousAggregate(Ty, Base, Members); 4292 4293 bool isIndirect = false; 4294 // Arguments bigger than 16 bytes which aren't homogeneous aggregates should 4295 // be passed indirectly. 4296 if (Size > 16 && !isHA) { 4297 isIndirect = true; 4298 Size = 8; 4299 Align = 8; 4300 } 4301 4302 llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); 4303 llvm::Type *BPP = llvm::PointerType::getUnqual(BP); 4304 4305 CGBuilderTy &Builder = CGF.Builder; 4306 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 4307 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 4308 4309 if (isEmptyRecord(getContext(), Ty, true)) { 4310 // These are ignored for parameter passing purposes. 4311 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 4312 return Builder.CreateBitCast(Addr, PTy); 4313 } 4314 4315 const uint64_t MinABIAlign = 8; 4316 if (Align > MinABIAlign) { 4317 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, Align - 1); 4318 Addr = Builder.CreateGEP(Addr, Offset); 4319 llvm::Value *AsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty); 4320 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, ~(Align - 1)); 4321 llvm::Value *Aligned = Builder.CreateAnd(AsInt, Mask); 4322 Addr = Builder.CreateIntToPtr(Aligned, BP, "ap.align"); 4323 } 4324 4325 uint64_t Offset = llvm::RoundUpToAlignment(Size, MinABIAlign); 4326 llvm::Value *NextAddr = Builder.CreateGEP( 4327 Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), "ap.next"); 4328 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 4329 4330 if (isIndirect) 4331 Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP)); 4332 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 4333 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 4334 4335 return AddrTyped; 4336 } 4337 4338 //===----------------------------------------------------------------------===// 4339 // ARM ABI Implementation 4340 //===----------------------------------------------------------------------===// 4341 4342 namespace { 4343 4344 class ARMABIInfo : public ABIInfo { 4345 public: 4346 enum ABIKind { 4347 APCS = 0, 4348 AAPCS = 1, 4349 AAPCS_VFP 4350 }; 4351 4352 private: 4353 ABIKind Kind; 4354 4355 public: 4356 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind) { 4357 setCCs(); 4358 } 4359 4360 bool isEABI() const { 4361 switch (getTarget().getTriple().getEnvironment()) { 4362 case llvm::Triple::Android: 4363 case llvm::Triple::EABI: 4364 case llvm::Triple::EABIHF: 4365 case llvm::Triple::GNUEABI: 4366 case llvm::Triple::GNUEABIHF: 4367 return true; 4368 default: 4369 return false; 4370 } 4371 } 4372 4373 bool isEABIHF() const { 4374 switch (getTarget().getTriple().getEnvironment()) { 4375 case llvm::Triple::EABIHF: 4376 case llvm::Triple::GNUEABIHF: 4377 return true; 4378 default: 4379 return false; 4380 } 4381 } 4382 4383 ABIKind getABIKind() const { return Kind; } 4384 4385 private: 4386 ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic) const; 4387 ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic) const; 4388 bool isIllegalVectorType(QualType Ty) const; 4389 4390 bool isHomogeneousAggregateBaseType(QualType Ty) const override; 4391 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 4392 uint64_t Members) const override; 4393 4394 void computeInfo(CGFunctionInfo &FI) const override; 4395 4396 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 4397 CodeGenFunction &CGF) const override; 4398 4399 llvm::CallingConv::ID getLLVMDefaultCC() const; 4400 llvm::CallingConv::ID getABIDefaultCC() const; 4401 void setCCs(); 4402 }; 4403 4404 class ARMTargetCodeGenInfo : public TargetCodeGenInfo { 4405 public: 4406 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K) 4407 :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {} 4408 4409 const ARMABIInfo &getABIInfo() const { 4410 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo()); 4411 } 4412 4413 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 4414 return 13; 4415 } 4416 4417 StringRef getARCRetainAutoreleasedReturnValueMarker() const override { 4418 return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue"; 4419 } 4420 4421 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4422 llvm::Value *Address) const override { 4423 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 4424 4425 // 0-15 are the 16 integer registers. 4426 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15); 4427 return false; 4428 } 4429 4430 unsigned getSizeOfUnwindException() const override { 4431 if (getABIInfo().isEABI()) return 88; 4432 return TargetCodeGenInfo::getSizeOfUnwindException(); 4433 } 4434 4435 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 4436 CodeGen::CodeGenModule &CGM) const override { 4437 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 4438 if (!FD) 4439 return; 4440 4441 const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>(); 4442 if (!Attr) 4443 return; 4444 4445 const char *Kind; 4446 switch (Attr->getInterrupt()) { 4447 case ARMInterruptAttr::Generic: Kind = ""; break; 4448 case ARMInterruptAttr::IRQ: Kind = "IRQ"; break; 4449 case ARMInterruptAttr::FIQ: Kind = "FIQ"; break; 4450 case ARMInterruptAttr::SWI: Kind = "SWI"; break; 4451 case ARMInterruptAttr::ABORT: Kind = "ABORT"; break; 4452 case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break; 4453 } 4454 4455 llvm::Function *Fn = cast<llvm::Function>(GV); 4456 4457 Fn->addFnAttr("interrupt", Kind); 4458 4459 if (cast<ARMABIInfo>(getABIInfo()).getABIKind() == ARMABIInfo::APCS) 4460 return; 4461 4462 // AAPCS guarantees that sp will be 8-byte aligned on any public interface, 4463 // however this is not necessarily true on taking any interrupt. Instruct 4464 // the backend to perform a realignment as part of the function prologue. 4465 llvm::AttrBuilder B; 4466 B.addStackAlignmentAttr(8); 4467 Fn->addAttributes(llvm::AttributeSet::FunctionIndex, 4468 llvm::AttributeSet::get(CGM.getLLVMContext(), 4469 llvm::AttributeSet::FunctionIndex, 4470 B)); 4471 } 4472 4473 bool hasSjLjLowering(CodeGen::CodeGenFunction &CGF) const override { 4474 return false; 4475 // FIXME: backend implementation too restricted, even on Darwin. 4476 // return CGF.getTarget().getTriple().isOSDarwin(); 4477 } 4478 }; 4479 4480 class WindowsARMTargetCodeGenInfo : public ARMTargetCodeGenInfo { 4481 void addStackProbeSizeTargetAttribute(const Decl *D, llvm::GlobalValue *GV, 4482 CodeGen::CodeGenModule &CGM) const; 4483 4484 public: 4485 WindowsARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K) 4486 : ARMTargetCodeGenInfo(CGT, K) {} 4487 4488 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 4489 CodeGen::CodeGenModule &CGM) const override; 4490 }; 4491 4492 void WindowsARMTargetCodeGenInfo::addStackProbeSizeTargetAttribute( 4493 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { 4494 if (!isa<FunctionDecl>(D)) 4495 return; 4496 if (CGM.getCodeGenOpts().StackProbeSize == 4096) 4497 return; 4498 4499 llvm::Function *F = cast<llvm::Function>(GV); 4500 F->addFnAttr("stack-probe-size", 4501 llvm::utostr(CGM.getCodeGenOpts().StackProbeSize)); 4502 } 4503 4504 void WindowsARMTargetCodeGenInfo::SetTargetAttributes( 4505 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { 4506 ARMTargetCodeGenInfo::SetTargetAttributes(D, GV, CGM); 4507 addStackProbeSizeTargetAttribute(D, GV, CGM); 4508 } 4509 } 4510 4511 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const { 4512 if (!getCXXABI().classifyReturnType(FI)) 4513 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), FI.isVariadic()); 4514 4515 for (auto &I : FI.arguments()) 4516 I.info = classifyArgumentType(I.type, FI.isVariadic()); 4517 4518 // Always honor user-specified calling convention. 4519 if (FI.getCallingConvention() != llvm::CallingConv::C) 4520 return; 4521 4522 llvm::CallingConv::ID cc = getRuntimeCC(); 4523 if (cc != llvm::CallingConv::C) 4524 FI.setEffectiveCallingConvention(cc); 4525 } 4526 4527 /// Return the default calling convention that LLVM will use. 4528 llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const { 4529 // The default calling convention that LLVM will infer. 4530 if (isEABIHF()) 4531 return llvm::CallingConv::ARM_AAPCS_VFP; 4532 else if (isEABI()) 4533 return llvm::CallingConv::ARM_AAPCS; 4534 else 4535 return llvm::CallingConv::ARM_APCS; 4536 } 4537 4538 /// Return the calling convention that our ABI would like us to use 4539 /// as the C calling convention. 4540 llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const { 4541 switch (getABIKind()) { 4542 case APCS: return llvm::CallingConv::ARM_APCS; 4543 case AAPCS: return llvm::CallingConv::ARM_AAPCS; 4544 case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 4545 } 4546 llvm_unreachable("bad ABI kind"); 4547 } 4548 4549 void ARMABIInfo::setCCs() { 4550 assert(getRuntimeCC() == llvm::CallingConv::C); 4551 4552 // Don't muddy up the IR with a ton of explicit annotations if 4553 // they'd just match what LLVM will infer from the triple. 4554 llvm::CallingConv::ID abiCC = getABIDefaultCC(); 4555 if (abiCC != getLLVMDefaultCC()) 4556 RuntimeCC = abiCC; 4557 4558 BuiltinCC = (getABIKind() == APCS ? 4559 llvm::CallingConv::ARM_APCS : llvm::CallingConv::ARM_AAPCS); 4560 } 4561 4562 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, 4563 bool isVariadic) const { 4564 // 6.1.2.1 The following argument types are VFP CPRCs: 4565 // A single-precision floating-point type (including promoted 4566 // half-precision types); A double-precision floating-point type; 4567 // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate 4568 // with a Base Type of a single- or double-precision floating-point type, 4569 // 64-bit containerized vectors or 128-bit containerized vectors with one 4570 // to four Elements. 4571 bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic; 4572 4573 Ty = useFirstFieldIfTransparentUnion(Ty); 4574 4575 // Handle illegal vector types here. 4576 if (isIllegalVectorType(Ty)) { 4577 uint64_t Size = getContext().getTypeSize(Ty); 4578 if (Size <= 32) { 4579 llvm::Type *ResType = 4580 llvm::Type::getInt32Ty(getVMContext()); 4581 return ABIArgInfo::getDirect(ResType); 4582 } 4583 if (Size == 64) { 4584 llvm::Type *ResType = llvm::VectorType::get( 4585 llvm::Type::getInt32Ty(getVMContext()), 2); 4586 return ABIArgInfo::getDirect(ResType); 4587 } 4588 if (Size == 128) { 4589 llvm::Type *ResType = llvm::VectorType::get( 4590 llvm::Type::getInt32Ty(getVMContext()), 4); 4591 return ABIArgInfo::getDirect(ResType); 4592 } 4593 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 4594 } 4595 4596 if (!isAggregateTypeForABI(Ty)) { 4597 // Treat an enum type as its underlying type. 4598 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) { 4599 Ty = EnumTy->getDecl()->getIntegerType(); 4600 } 4601 4602 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend() 4603 : ABIArgInfo::getDirect()); 4604 } 4605 4606 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { 4607 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); 4608 } 4609 4610 // Ignore empty records. 4611 if (isEmptyRecord(getContext(), Ty, true)) 4612 return ABIArgInfo::getIgnore(); 4613 4614 if (IsEffectivelyAAPCS_VFP) { 4615 // Homogeneous Aggregates need to be expanded when we can fit the aggregate 4616 // into VFP registers. 4617 const Type *Base = nullptr; 4618 uint64_t Members = 0; 4619 if (isHomogeneousAggregate(Ty, Base, Members)) { 4620 assert(Base && "Base class should be set for homogeneous aggregate"); 4621 // Base can be a floating-point or a vector. 4622 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false); 4623 } 4624 } 4625 4626 // Support byval for ARM. 4627 // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at 4628 // most 8-byte. We realign the indirect argument if type alignment is bigger 4629 // than ABI alignment. 4630 uint64_t ABIAlign = 4; 4631 uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8; 4632 if (getABIKind() == ARMABIInfo::AAPCS_VFP || 4633 getABIKind() == ARMABIInfo::AAPCS) 4634 ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8); 4635 if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) { 4636 // Update Allocated GPRs. Since this is only used when the size of the 4637 // argument is greater than 64 bytes, this will always use up any available 4638 // registers (of which there are 4). We also don't care about getting the 4639 // alignment right, because general-purpose registers cannot be back-filled. 4640 return ABIArgInfo::getIndirect(TyAlign, /*ByVal=*/true, 4641 /*Realign=*/TyAlign > ABIAlign); 4642 } 4643 4644 // Otherwise, pass by coercing to a structure of the appropriate size. 4645 llvm::Type* ElemTy; 4646 unsigned SizeRegs; 4647 // FIXME: Try to match the types of the arguments more accurately where 4648 // we can. 4649 if (getContext().getTypeAlign(Ty) <= 32) { 4650 ElemTy = llvm::Type::getInt32Ty(getVMContext()); 4651 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32; 4652 } else { 4653 ElemTy = llvm::Type::getInt64Ty(getVMContext()); 4654 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64; 4655 } 4656 4657 return ABIArgInfo::getDirect(llvm::ArrayType::get(ElemTy, SizeRegs)); 4658 } 4659 4660 static bool isIntegerLikeType(QualType Ty, ASTContext &Context, 4661 llvm::LLVMContext &VMContext) { 4662 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure 4663 // is called integer-like if its size is less than or equal to one word, and 4664 // the offset of each of its addressable sub-fields is zero. 4665 4666 uint64_t Size = Context.getTypeSize(Ty); 4667 4668 // Check that the type fits in a word. 4669 if (Size > 32) 4670 return false; 4671 4672 // FIXME: Handle vector types! 4673 if (Ty->isVectorType()) 4674 return false; 4675 4676 // Float types are never treated as "integer like". 4677 if (Ty->isRealFloatingType()) 4678 return false; 4679 4680 // If this is a builtin or pointer type then it is ok. 4681 if (Ty->getAs<BuiltinType>() || Ty->isPointerType()) 4682 return true; 4683 4684 // Small complex integer types are "integer like". 4685 if (const ComplexType *CT = Ty->getAs<ComplexType>()) 4686 return isIntegerLikeType(CT->getElementType(), Context, VMContext); 4687 4688 // Single element and zero sized arrays should be allowed, by the definition 4689 // above, but they are not. 4690 4691 // Otherwise, it must be a record type. 4692 const RecordType *RT = Ty->getAs<RecordType>(); 4693 if (!RT) return false; 4694 4695 // Ignore records with flexible arrays. 4696 const RecordDecl *RD = RT->getDecl(); 4697 if (RD->hasFlexibleArrayMember()) 4698 return false; 4699 4700 // Check that all sub-fields are at offset 0, and are themselves "integer 4701 // like". 4702 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 4703 4704 bool HadField = false; 4705 unsigned idx = 0; 4706 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 4707 i != e; ++i, ++idx) { 4708 const FieldDecl *FD = *i; 4709 4710 // Bit-fields are not addressable, we only need to verify they are "integer 4711 // like". We still have to disallow a subsequent non-bitfield, for example: 4712 // struct { int : 0; int x } 4713 // is non-integer like according to gcc. 4714 if (FD->isBitField()) { 4715 if (!RD->isUnion()) 4716 HadField = true; 4717 4718 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 4719 return false; 4720 4721 continue; 4722 } 4723 4724 // Check if this field is at offset 0. 4725 if (Layout.getFieldOffset(idx) != 0) 4726 return false; 4727 4728 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 4729 return false; 4730 4731 // Only allow at most one field in a structure. This doesn't match the 4732 // wording above, but follows gcc in situations with a field following an 4733 // empty structure. 4734 if (!RD->isUnion()) { 4735 if (HadField) 4736 return false; 4737 4738 HadField = true; 4739 } 4740 } 4741 4742 return true; 4743 } 4744 4745 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, 4746 bool isVariadic) const { 4747 bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic; 4748 4749 if (RetTy->isVoidType()) 4750 return ABIArgInfo::getIgnore(); 4751 4752 // Large vector types should be returned via memory. 4753 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) { 4754 return ABIArgInfo::getIndirect(0); 4755 } 4756 4757 if (!isAggregateTypeForABI(RetTy)) { 4758 // Treat an enum type as its underlying type. 4759 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 4760 RetTy = EnumTy->getDecl()->getIntegerType(); 4761 4762 return RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend() 4763 : ABIArgInfo::getDirect(); 4764 } 4765 4766 // Are we following APCS? 4767 if (getABIKind() == APCS) { 4768 if (isEmptyRecord(getContext(), RetTy, false)) 4769 return ABIArgInfo::getIgnore(); 4770 4771 // Complex types are all returned as packed integers. 4772 // 4773 // FIXME: Consider using 2 x vector types if the back end handles them 4774 // correctly. 4775 if (RetTy->isAnyComplexType()) 4776 return ABIArgInfo::getDirect(llvm::IntegerType::get( 4777 getVMContext(), getContext().getTypeSize(RetTy))); 4778 4779 // Integer like structures are returned in r0. 4780 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) { 4781 // Return in the smallest viable integer type. 4782 uint64_t Size = getContext().getTypeSize(RetTy); 4783 if (Size <= 8) 4784 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 4785 if (Size <= 16) 4786 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 4787 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 4788 } 4789 4790 // Otherwise return in memory. 4791 return ABIArgInfo::getIndirect(0); 4792 } 4793 4794 // Otherwise this is an AAPCS variant. 4795 4796 if (isEmptyRecord(getContext(), RetTy, true)) 4797 return ABIArgInfo::getIgnore(); 4798 4799 // Check for homogeneous aggregates with AAPCS-VFP. 4800 if (IsEffectivelyAAPCS_VFP) { 4801 const Type *Base = nullptr; 4802 uint64_t Members; 4803 if (isHomogeneousAggregate(RetTy, Base, Members)) { 4804 assert(Base && "Base class should be set for homogeneous aggregate"); 4805 // Homogeneous Aggregates are returned directly. 4806 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false); 4807 } 4808 } 4809 4810 // Aggregates <= 4 bytes are returned in r0; other aggregates 4811 // are returned indirectly. 4812 uint64_t Size = getContext().getTypeSize(RetTy); 4813 if (Size <= 32) { 4814 if (getDataLayout().isBigEndian()) 4815 // Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4) 4816 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 4817 4818 // Return in the smallest viable integer type. 4819 if (Size <= 8) 4820 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 4821 if (Size <= 16) 4822 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 4823 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 4824 } 4825 4826 return ABIArgInfo::getIndirect(0); 4827 } 4828 4829 /// isIllegalVector - check whether Ty is an illegal vector type. 4830 bool ARMABIInfo::isIllegalVectorType(QualType Ty) const { 4831 if (const VectorType *VT = Ty->getAs<VectorType>()) { 4832 // Check whether VT is legal. 4833 unsigned NumElements = VT->getNumElements(); 4834 uint64_t Size = getContext().getTypeSize(VT); 4835 // NumElements should be power of 2. 4836 if ((NumElements & (NumElements - 1)) != 0) 4837 return true; 4838 // Size should be greater than 32 bits. 4839 return Size <= 32; 4840 } 4841 return false; 4842 } 4843 4844 bool ARMABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 4845 // Homogeneous aggregates for AAPCS-VFP must have base types of float, 4846 // double, or 64-bit or 128-bit vectors. 4847 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 4848 if (BT->getKind() == BuiltinType::Float || 4849 BT->getKind() == BuiltinType::Double || 4850 BT->getKind() == BuiltinType::LongDouble) 4851 return true; 4852 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 4853 unsigned VecSize = getContext().getTypeSize(VT); 4854 if (VecSize == 64 || VecSize == 128) 4855 return true; 4856 } 4857 return false; 4858 } 4859 4860 bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, 4861 uint64_t Members) const { 4862 return Members <= 4; 4863 } 4864 4865 llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 4866 CodeGenFunction &CGF) const { 4867 llvm::Type *BP = CGF.Int8PtrTy; 4868 llvm::Type *BPP = CGF.Int8PtrPtrTy; 4869 4870 CGBuilderTy &Builder = CGF.Builder; 4871 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 4872 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 4873 4874 if (isEmptyRecord(getContext(), Ty, true)) { 4875 // These are ignored for parameter passing purposes. 4876 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 4877 return Builder.CreateBitCast(Addr, PTy); 4878 } 4879 4880 uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8; 4881 uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8; 4882 bool IsIndirect = false; 4883 4884 // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for 4885 // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte. 4886 if (getABIKind() == ARMABIInfo::AAPCS_VFP || 4887 getABIKind() == ARMABIInfo::AAPCS) 4888 TyAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8); 4889 else 4890 TyAlign = 4; 4891 // Use indirect if size of the illegal vector is bigger than 16 bytes. 4892 if (isIllegalVectorType(Ty) && Size > 16) { 4893 IsIndirect = true; 4894 Size = 4; 4895 TyAlign = 4; 4896 } 4897 4898 // Handle address alignment for ABI alignment > 4 bytes. 4899 if (TyAlign > 4) { 4900 assert((TyAlign & (TyAlign - 1)) == 0 && 4901 "Alignment is not power of 2!"); 4902 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty); 4903 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1)); 4904 AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1))); 4905 Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align"); 4906 } 4907 4908 uint64_t Offset = 4909 llvm::RoundUpToAlignment(Size, 4); 4910 llvm::Value *NextAddr = 4911 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 4912 "ap.next"); 4913 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 4914 4915 if (IsIndirect) 4916 Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP)); 4917 else if (TyAlign < CGF.getContext().getTypeAlign(Ty) / 8) { 4918 // We can't directly cast ap.cur to pointer to a vector type, since ap.cur 4919 // may not be correctly aligned for the vector type. We create an aligned 4920 // temporary space and copy the content over from ap.cur to the temporary 4921 // space. This is necessary if the natural alignment of the type is greater 4922 // than the ABI alignment. 4923 llvm::Type *I8PtrTy = Builder.getInt8PtrTy(); 4924 CharUnits CharSize = getContext().getTypeSizeInChars(Ty); 4925 llvm::Value *AlignedTemp = CGF.CreateTempAlloca(CGF.ConvertType(Ty), 4926 "var.align"); 4927 llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy); 4928 llvm::Value *Src = Builder.CreateBitCast(Addr, I8PtrTy); 4929 Builder.CreateMemCpy(Dst, Src, 4930 llvm::ConstantInt::get(CGF.IntPtrTy, CharSize.getQuantity()), 4931 TyAlign, false); 4932 Addr = AlignedTemp; //The content is in aligned location. 4933 } 4934 llvm::Type *PTy = 4935 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 4936 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 4937 4938 return AddrTyped; 4939 } 4940 4941 //===----------------------------------------------------------------------===// 4942 // NVPTX ABI Implementation 4943 //===----------------------------------------------------------------------===// 4944 4945 namespace { 4946 4947 class NVPTXABIInfo : public ABIInfo { 4948 public: 4949 NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 4950 4951 ABIArgInfo classifyReturnType(QualType RetTy) const; 4952 ABIArgInfo classifyArgumentType(QualType Ty) const; 4953 4954 void computeInfo(CGFunctionInfo &FI) const override; 4955 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 4956 CodeGenFunction &CFG) const override; 4957 }; 4958 4959 class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo { 4960 public: 4961 NVPTXTargetCodeGenInfo(CodeGenTypes &CGT) 4962 : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {} 4963 4964 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 4965 CodeGen::CodeGenModule &M) const override; 4966 private: 4967 // Adds a NamedMDNode with F, Name, and Operand as operands, and adds the 4968 // resulting MDNode to the nvvm.annotations MDNode. 4969 static void addNVVMMetadata(llvm::Function *F, StringRef Name, int Operand); 4970 }; 4971 4972 ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const { 4973 if (RetTy->isVoidType()) 4974 return ABIArgInfo::getIgnore(); 4975 4976 // note: this is different from default ABI 4977 if (!RetTy->isScalarType()) 4978 return ABIArgInfo::getDirect(); 4979 4980 // Treat an enum type as its underlying type. 4981 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 4982 RetTy = EnumTy->getDecl()->getIntegerType(); 4983 4984 return (RetTy->isPromotableIntegerType() ? 4985 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 4986 } 4987 4988 ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const { 4989 // Treat an enum type as its underlying type. 4990 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 4991 Ty = EnumTy->getDecl()->getIntegerType(); 4992 4993 // Return aggregates type as indirect by value 4994 if (isAggregateTypeForABI(Ty)) 4995 return ABIArgInfo::getIndirect(0, /* byval */ true); 4996 4997 return (Ty->isPromotableIntegerType() ? 4998 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 4999 } 5000 5001 void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const { 5002 if (!getCXXABI().classifyReturnType(FI)) 5003 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 5004 for (auto &I : FI.arguments()) 5005 I.info = classifyArgumentType(I.type); 5006 5007 // Always honor user-specified calling convention. 5008 if (FI.getCallingConvention() != llvm::CallingConv::C) 5009 return; 5010 5011 FI.setEffectiveCallingConvention(getRuntimeCC()); 5012 } 5013 5014 llvm::Value *NVPTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 5015 CodeGenFunction &CFG) const { 5016 llvm_unreachable("NVPTX does not support varargs"); 5017 } 5018 5019 void NVPTXTargetCodeGenInfo:: 5020 SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 5021 CodeGen::CodeGenModule &M) const{ 5022 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 5023 if (!FD) return; 5024 5025 llvm::Function *F = cast<llvm::Function>(GV); 5026 5027 // Perform special handling in OpenCL mode 5028 if (M.getLangOpts().OpenCL) { 5029 // Use OpenCL function attributes to check for kernel functions 5030 // By default, all functions are device functions 5031 if (FD->hasAttr<OpenCLKernelAttr>()) { 5032 // OpenCL __kernel functions get kernel metadata 5033 // Create !{<func-ref>, metadata !"kernel", i32 1} node 5034 addNVVMMetadata(F, "kernel", 1); 5035 // And kernel functions are not subject to inlining 5036 F->addFnAttr(llvm::Attribute::NoInline); 5037 } 5038 } 5039 5040 // Perform special handling in CUDA mode. 5041 if (M.getLangOpts().CUDA) { 5042 // CUDA __global__ functions get a kernel metadata entry. Since 5043 // __global__ functions cannot be called from the device, we do not 5044 // need to set the noinline attribute. 5045 if (FD->hasAttr<CUDAGlobalAttr>()) { 5046 // Create !{<func-ref>, metadata !"kernel", i32 1} node 5047 addNVVMMetadata(F, "kernel", 1); 5048 } 5049 if (FD->hasAttr<CUDALaunchBoundsAttr>()) { 5050 // Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node 5051 addNVVMMetadata(F, "maxntidx", 5052 FD->getAttr<CUDALaunchBoundsAttr>()->getMaxThreads()); 5053 // min blocks is a default argument for CUDALaunchBoundsAttr, so getting a 5054 // zero value from getMinBlocks either means it was not specified in 5055 // __launch_bounds__ or the user specified a 0 value. In both cases, we 5056 // don't have to add a PTX directive. 5057 int MinCTASM = FD->getAttr<CUDALaunchBoundsAttr>()->getMinBlocks(); 5058 if (MinCTASM > 0) { 5059 // Create !{<func-ref>, metadata !"minctasm", i32 <val>} node 5060 addNVVMMetadata(F, "minctasm", MinCTASM); 5061 } 5062 } 5063 } 5064 } 5065 5066 void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name, 5067 int Operand) { 5068 llvm::Module *M = F->getParent(); 5069 llvm::LLVMContext &Ctx = M->getContext(); 5070 5071 // Get "nvvm.annotations" metadata node 5072 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations"); 5073 5074 llvm::Metadata *MDVals[] = { 5075 llvm::ConstantAsMetadata::get(F), llvm::MDString::get(Ctx, Name), 5076 llvm::ConstantAsMetadata::get( 5077 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))}; 5078 // Append metadata to nvvm.annotations 5079 MD->addOperand(llvm::MDNode::get(Ctx, MDVals)); 5080 } 5081 } 5082 5083 //===----------------------------------------------------------------------===// 5084 // SystemZ ABI Implementation 5085 //===----------------------------------------------------------------------===// 5086 5087 namespace { 5088 5089 class SystemZABIInfo : public ABIInfo { 5090 public: 5091 SystemZABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 5092 5093 bool isPromotableIntegerType(QualType Ty) const; 5094 bool isCompoundType(QualType Ty) const; 5095 bool isFPArgumentType(QualType Ty) const; 5096 5097 ABIArgInfo classifyReturnType(QualType RetTy) const; 5098 ABIArgInfo classifyArgumentType(QualType ArgTy) const; 5099 5100 void computeInfo(CGFunctionInfo &FI) const override { 5101 if (!getCXXABI().classifyReturnType(FI)) 5102 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 5103 for (auto &I : FI.arguments()) 5104 I.info = classifyArgumentType(I.type); 5105 } 5106 5107 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 5108 CodeGenFunction &CGF) const override; 5109 }; 5110 5111 class SystemZTargetCodeGenInfo : public TargetCodeGenInfo { 5112 public: 5113 SystemZTargetCodeGenInfo(CodeGenTypes &CGT) 5114 : TargetCodeGenInfo(new SystemZABIInfo(CGT)) {} 5115 }; 5116 5117 } 5118 5119 bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const { 5120 // Treat an enum type as its underlying type. 5121 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 5122 Ty = EnumTy->getDecl()->getIntegerType(); 5123 5124 // Promotable integer types are required to be promoted by the ABI. 5125 if (Ty->isPromotableIntegerType()) 5126 return true; 5127 5128 // 32-bit values must also be promoted. 5129 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 5130 switch (BT->getKind()) { 5131 case BuiltinType::Int: 5132 case BuiltinType::UInt: 5133 return true; 5134 default: 5135 return false; 5136 } 5137 return false; 5138 } 5139 5140 bool SystemZABIInfo::isCompoundType(QualType Ty) const { 5141 return Ty->isAnyComplexType() || isAggregateTypeForABI(Ty); 5142 } 5143 5144 bool SystemZABIInfo::isFPArgumentType(QualType Ty) const { 5145 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 5146 switch (BT->getKind()) { 5147 case BuiltinType::Float: 5148 case BuiltinType::Double: 5149 return true; 5150 default: 5151 return false; 5152 } 5153 5154 if (const RecordType *RT = Ty->getAsStructureType()) { 5155 const RecordDecl *RD = RT->getDecl(); 5156 bool Found = false; 5157 5158 // If this is a C++ record, check the bases first. 5159 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 5160 for (const auto &I : CXXRD->bases()) { 5161 QualType Base = I.getType(); 5162 5163 // Empty bases don't affect things either way. 5164 if (isEmptyRecord(getContext(), Base, true)) 5165 continue; 5166 5167 if (Found) 5168 return false; 5169 Found = isFPArgumentType(Base); 5170 if (!Found) 5171 return false; 5172 } 5173 5174 // Check the fields. 5175 for (const auto *FD : RD->fields()) { 5176 // Empty bitfields don't affect things either way. 5177 // Unlike isSingleElementStruct(), empty structure and array fields 5178 // do count. So do anonymous bitfields that aren't zero-sized. 5179 if (FD->isBitField() && FD->getBitWidthValue(getContext()) == 0) 5180 return true; 5181 5182 // Unlike isSingleElementStruct(), arrays do not count. 5183 // Nested isFPArgumentType structures still do though. 5184 if (Found) 5185 return false; 5186 Found = isFPArgumentType(FD->getType()); 5187 if (!Found) 5188 return false; 5189 } 5190 5191 // Unlike isSingleElementStruct(), trailing padding is allowed. 5192 // An 8-byte aligned struct s { float f; } is passed as a double. 5193 return Found; 5194 } 5195 5196 return false; 5197 } 5198 5199 llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 5200 CodeGenFunction &CGF) const { 5201 // Assume that va_list type is correct; should be pointer to LLVM type: 5202 // struct { 5203 // i64 __gpr; 5204 // i64 __fpr; 5205 // i8 *__overflow_arg_area; 5206 // i8 *__reg_save_area; 5207 // }; 5208 5209 // Every argument occupies 8 bytes and is passed by preference in either 5210 // GPRs or FPRs. 5211 Ty = CGF.getContext().getCanonicalType(Ty); 5212 ABIArgInfo AI = classifyArgumentType(Ty); 5213 bool InFPRs = isFPArgumentType(Ty); 5214 5215 llvm::Type *APTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty)); 5216 bool IsIndirect = AI.isIndirect(); 5217 unsigned UnpaddedBitSize; 5218 if (IsIndirect) { 5219 APTy = llvm::PointerType::getUnqual(APTy); 5220 UnpaddedBitSize = 64; 5221 } else 5222 UnpaddedBitSize = getContext().getTypeSize(Ty); 5223 unsigned PaddedBitSize = 64; 5224 assert((UnpaddedBitSize <= PaddedBitSize) && "Invalid argument size."); 5225 5226 unsigned PaddedSize = PaddedBitSize / 8; 5227 unsigned Padding = (PaddedBitSize - UnpaddedBitSize) / 8; 5228 5229 unsigned MaxRegs, RegCountField, RegSaveIndex, RegPadding; 5230 if (InFPRs) { 5231 MaxRegs = 4; // Maximum of 4 FPR arguments 5232 RegCountField = 1; // __fpr 5233 RegSaveIndex = 16; // save offset for f0 5234 RegPadding = 0; // floats are passed in the high bits of an FPR 5235 } else { 5236 MaxRegs = 5; // Maximum of 5 GPR arguments 5237 RegCountField = 0; // __gpr 5238 RegSaveIndex = 2; // save offset for r2 5239 RegPadding = Padding; // values are passed in the low bits of a GPR 5240 } 5241 5242 llvm::Value *RegCountPtr = 5243 CGF.Builder.CreateStructGEP(VAListAddr, RegCountField, "reg_count_ptr"); 5244 llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count"); 5245 llvm::Type *IndexTy = RegCount->getType(); 5246 llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs); 5247 llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV, 5248 "fits_in_regs"); 5249 5250 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 5251 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); 5252 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 5253 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); 5254 5255 // Emit code to load the value if it was passed in registers. 5256 CGF.EmitBlock(InRegBlock); 5257 5258 // Work out the address of an argument register. 5259 llvm::Value *PaddedSizeV = llvm::ConstantInt::get(IndexTy, PaddedSize); 5260 llvm::Value *ScaledRegCount = 5261 CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count"); 5262 llvm::Value *RegBase = 5263 llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize + RegPadding); 5264 llvm::Value *RegOffset = 5265 CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset"); 5266 llvm::Value *RegSaveAreaPtr = 5267 CGF.Builder.CreateStructGEP(VAListAddr, 3, "reg_save_area_ptr"); 5268 llvm::Value *RegSaveArea = 5269 CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area"); 5270 llvm::Value *RawRegAddr = 5271 CGF.Builder.CreateGEP(RegSaveArea, RegOffset, "raw_reg_addr"); 5272 llvm::Value *RegAddr = 5273 CGF.Builder.CreateBitCast(RawRegAddr, APTy, "reg_addr"); 5274 5275 // Update the register count 5276 llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1); 5277 llvm::Value *NewRegCount = 5278 CGF.Builder.CreateAdd(RegCount, One, "reg_count"); 5279 CGF.Builder.CreateStore(NewRegCount, RegCountPtr); 5280 CGF.EmitBranch(ContBlock); 5281 5282 // Emit code to load the value if it was passed in memory. 5283 CGF.EmitBlock(InMemBlock); 5284 5285 // Work out the address of a stack argument. 5286 llvm::Value *OverflowArgAreaPtr = 5287 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr"); 5288 llvm::Value *OverflowArgArea = 5289 CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"); 5290 llvm::Value *PaddingV = llvm::ConstantInt::get(IndexTy, Padding); 5291 llvm::Value *RawMemAddr = 5292 CGF.Builder.CreateGEP(OverflowArgArea, PaddingV, "raw_mem_addr"); 5293 llvm::Value *MemAddr = 5294 CGF.Builder.CreateBitCast(RawMemAddr, APTy, "mem_addr"); 5295 5296 // Update overflow_arg_area_ptr pointer 5297 llvm::Value *NewOverflowArgArea = 5298 CGF.Builder.CreateGEP(OverflowArgArea, PaddedSizeV, "overflow_arg_area"); 5299 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr); 5300 CGF.EmitBranch(ContBlock); 5301 5302 // Return the appropriate result. 5303 CGF.EmitBlock(ContBlock); 5304 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(APTy, 2, "va_arg.addr"); 5305 ResAddr->addIncoming(RegAddr, InRegBlock); 5306 ResAddr->addIncoming(MemAddr, InMemBlock); 5307 5308 if (IsIndirect) 5309 return CGF.Builder.CreateLoad(ResAddr, "indirect_arg"); 5310 5311 return ResAddr; 5312 } 5313 5314 ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const { 5315 if (RetTy->isVoidType()) 5316 return ABIArgInfo::getIgnore(); 5317 if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64) 5318 return ABIArgInfo::getIndirect(0); 5319 return (isPromotableIntegerType(RetTy) ? 5320 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 5321 } 5322 5323 ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const { 5324 // Handle the generic C++ ABI. 5325 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 5326 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); 5327 5328 // Integers and enums are extended to full register width. 5329 if (isPromotableIntegerType(Ty)) 5330 return ABIArgInfo::getExtend(); 5331 5332 // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly. 5333 uint64_t Size = getContext().getTypeSize(Ty); 5334 if (Size != 8 && Size != 16 && Size != 32 && Size != 64) 5335 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 5336 5337 // Handle small structures. 5338 if (const RecordType *RT = Ty->getAs<RecordType>()) { 5339 // Structures with flexible arrays have variable length, so really 5340 // fail the size test above. 5341 const RecordDecl *RD = RT->getDecl(); 5342 if (RD->hasFlexibleArrayMember()) 5343 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 5344 5345 // The structure is passed as an unextended integer, a float, or a double. 5346 llvm::Type *PassTy; 5347 if (isFPArgumentType(Ty)) { 5348 assert(Size == 32 || Size == 64); 5349 if (Size == 32) 5350 PassTy = llvm::Type::getFloatTy(getVMContext()); 5351 else 5352 PassTy = llvm::Type::getDoubleTy(getVMContext()); 5353 } else 5354 PassTy = llvm::IntegerType::get(getVMContext(), Size); 5355 return ABIArgInfo::getDirect(PassTy); 5356 } 5357 5358 // Non-structure compounds are passed indirectly. 5359 if (isCompoundType(Ty)) 5360 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 5361 5362 return ABIArgInfo::getDirect(nullptr); 5363 } 5364 5365 //===----------------------------------------------------------------------===// 5366 // MSP430 ABI Implementation 5367 //===----------------------------------------------------------------------===// 5368 5369 namespace { 5370 5371 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo { 5372 public: 5373 MSP430TargetCodeGenInfo(CodeGenTypes &CGT) 5374 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 5375 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 5376 CodeGen::CodeGenModule &M) const override; 5377 }; 5378 5379 } 5380 5381 void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D, 5382 llvm::GlobalValue *GV, 5383 CodeGen::CodeGenModule &M) const { 5384 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 5385 if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) { 5386 // Handle 'interrupt' attribute: 5387 llvm::Function *F = cast<llvm::Function>(GV); 5388 5389 // Step 1: Set ISR calling convention. 5390 F->setCallingConv(llvm::CallingConv::MSP430_INTR); 5391 5392 // Step 2: Add attributes goodness. 5393 F->addFnAttr(llvm::Attribute::NoInline); 5394 5395 // Step 3: Emit ISR vector alias. 5396 unsigned Num = attr->getNumber() / 2; 5397 llvm::GlobalAlias::create(llvm::Function::ExternalLinkage, 5398 "__isr_" + Twine(Num), F); 5399 } 5400 } 5401 } 5402 5403 //===----------------------------------------------------------------------===// 5404 // MIPS ABI Implementation. This works for both little-endian and 5405 // big-endian variants. 5406 //===----------------------------------------------------------------------===// 5407 5408 namespace { 5409 class MipsABIInfo : public ABIInfo { 5410 bool IsO32; 5411 unsigned MinABIStackAlignInBytes, StackAlignInBytes; 5412 void CoerceToIntArgs(uint64_t TySize, 5413 SmallVectorImpl<llvm::Type *> &ArgList) const; 5414 llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const; 5415 llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const; 5416 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const; 5417 public: 5418 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) : 5419 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8), 5420 StackAlignInBytes(IsO32 ? 8 : 16) {} 5421 5422 ABIArgInfo classifyReturnType(QualType RetTy) const; 5423 ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const; 5424 void computeInfo(CGFunctionInfo &FI) const override; 5425 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 5426 CodeGenFunction &CGF) const override; 5427 }; 5428 5429 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo { 5430 unsigned SizeOfUnwindException; 5431 public: 5432 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32) 5433 : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)), 5434 SizeOfUnwindException(IsO32 ? 24 : 32) {} 5435 5436 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { 5437 return 29; 5438 } 5439 5440 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 5441 CodeGen::CodeGenModule &CGM) const override { 5442 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 5443 if (!FD) return; 5444 llvm::Function *Fn = cast<llvm::Function>(GV); 5445 if (FD->hasAttr<Mips16Attr>()) { 5446 Fn->addFnAttr("mips16"); 5447 } 5448 else if (FD->hasAttr<NoMips16Attr>()) { 5449 Fn->addFnAttr("nomips16"); 5450 } 5451 } 5452 5453 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 5454 llvm::Value *Address) const override; 5455 5456 unsigned getSizeOfUnwindException() const override { 5457 return SizeOfUnwindException; 5458 } 5459 }; 5460 } 5461 5462 void MipsABIInfo::CoerceToIntArgs(uint64_t TySize, 5463 SmallVectorImpl<llvm::Type *> &ArgList) const { 5464 llvm::IntegerType *IntTy = 5465 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8); 5466 5467 // Add (TySize / MinABIStackAlignInBytes) args of IntTy. 5468 for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N) 5469 ArgList.push_back(IntTy); 5470 5471 // If necessary, add one more integer type to ArgList. 5472 unsigned R = TySize % (MinABIStackAlignInBytes * 8); 5473 5474 if (R) 5475 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R)); 5476 } 5477 5478 // In N32/64, an aligned double precision floating point field is passed in 5479 // a register. 5480 llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const { 5481 SmallVector<llvm::Type*, 8> ArgList, IntArgList; 5482 5483 if (IsO32) { 5484 CoerceToIntArgs(TySize, ArgList); 5485 return llvm::StructType::get(getVMContext(), ArgList); 5486 } 5487 5488 if (Ty->isComplexType()) 5489 return CGT.ConvertType(Ty); 5490 5491 const RecordType *RT = Ty->getAs<RecordType>(); 5492 5493 // Unions/vectors are passed in integer registers. 5494 if (!RT || !RT->isStructureOrClassType()) { 5495 CoerceToIntArgs(TySize, ArgList); 5496 return llvm::StructType::get(getVMContext(), ArgList); 5497 } 5498 5499 const RecordDecl *RD = RT->getDecl(); 5500 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 5501 assert(!(TySize % 8) && "Size of structure must be multiple of 8."); 5502 5503 uint64_t LastOffset = 0; 5504 unsigned idx = 0; 5505 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64); 5506 5507 // Iterate over fields in the struct/class and check if there are any aligned 5508 // double fields. 5509 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 5510 i != e; ++i, ++idx) { 5511 const QualType Ty = i->getType(); 5512 const BuiltinType *BT = Ty->getAs<BuiltinType>(); 5513 5514 if (!BT || BT->getKind() != BuiltinType::Double) 5515 continue; 5516 5517 uint64_t Offset = Layout.getFieldOffset(idx); 5518 if (Offset % 64) // Ignore doubles that are not aligned. 5519 continue; 5520 5521 // Add ((Offset - LastOffset) / 64) args of type i64. 5522 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j) 5523 ArgList.push_back(I64); 5524 5525 // Add double type. 5526 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext())); 5527 LastOffset = Offset + 64; 5528 } 5529 5530 CoerceToIntArgs(TySize - LastOffset, IntArgList); 5531 ArgList.append(IntArgList.begin(), IntArgList.end()); 5532 5533 return llvm::StructType::get(getVMContext(), ArgList); 5534 } 5535 5536 llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset, 5537 uint64_t Offset) const { 5538 if (OrigOffset + MinABIStackAlignInBytes > Offset) 5539 return nullptr; 5540 5541 return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8); 5542 } 5543 5544 ABIArgInfo 5545 MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const { 5546 Ty = useFirstFieldIfTransparentUnion(Ty); 5547 5548 uint64_t OrigOffset = Offset; 5549 uint64_t TySize = getContext().getTypeSize(Ty); 5550 uint64_t Align = getContext().getTypeAlign(Ty) / 8; 5551 5552 Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes), 5553 (uint64_t)StackAlignInBytes); 5554 unsigned CurrOffset = llvm::RoundUpToAlignment(Offset, Align); 5555 Offset = CurrOffset + llvm::RoundUpToAlignment(TySize, Align * 8) / 8; 5556 5557 if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) { 5558 // Ignore empty aggregates. 5559 if (TySize == 0) 5560 return ABIArgInfo::getIgnore(); 5561 5562 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { 5563 Offset = OrigOffset + MinABIStackAlignInBytes; 5564 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); 5565 } 5566 5567 // If we have reached here, aggregates are passed directly by coercing to 5568 // another structure type. Padding is inserted if the offset of the 5569 // aggregate is unaligned. 5570 ABIArgInfo ArgInfo = 5571 ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0, 5572 getPaddingType(OrigOffset, CurrOffset)); 5573 ArgInfo.setInReg(true); 5574 return ArgInfo; 5575 } 5576 5577 // Treat an enum type as its underlying type. 5578 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 5579 Ty = EnumTy->getDecl()->getIntegerType(); 5580 5581 // All integral types are promoted to the GPR width. 5582 if (Ty->isIntegralOrEnumerationType()) 5583 return ABIArgInfo::getExtend(); 5584 5585 return ABIArgInfo::getDirect( 5586 nullptr, 0, IsO32 ? nullptr : getPaddingType(OrigOffset, CurrOffset)); 5587 } 5588 5589 llvm::Type* 5590 MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const { 5591 const RecordType *RT = RetTy->getAs<RecordType>(); 5592 SmallVector<llvm::Type*, 8> RTList; 5593 5594 if (RT && RT->isStructureOrClassType()) { 5595 const RecordDecl *RD = RT->getDecl(); 5596 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 5597 unsigned FieldCnt = Layout.getFieldCount(); 5598 5599 // N32/64 returns struct/classes in floating point registers if the 5600 // following conditions are met: 5601 // 1. The size of the struct/class is no larger than 128-bit. 5602 // 2. The struct/class has one or two fields all of which are floating 5603 // point types. 5604 // 3. The offset of the first field is zero (this follows what gcc does). 5605 // 5606 // Any other composite results are returned in integer registers. 5607 // 5608 if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) { 5609 RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end(); 5610 for (; b != e; ++b) { 5611 const BuiltinType *BT = b->getType()->getAs<BuiltinType>(); 5612 5613 if (!BT || !BT->isFloatingPoint()) 5614 break; 5615 5616 RTList.push_back(CGT.ConvertType(b->getType())); 5617 } 5618 5619 if (b == e) 5620 return llvm::StructType::get(getVMContext(), RTList, 5621 RD->hasAttr<PackedAttr>()); 5622 5623 RTList.clear(); 5624 } 5625 } 5626 5627 CoerceToIntArgs(Size, RTList); 5628 return llvm::StructType::get(getVMContext(), RTList); 5629 } 5630 5631 ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const { 5632 uint64_t Size = getContext().getTypeSize(RetTy); 5633 5634 if (RetTy->isVoidType()) 5635 return ABIArgInfo::getIgnore(); 5636 5637 // O32 doesn't treat zero-sized structs differently from other structs. 5638 // However, N32/N64 ignores zero sized return values. 5639 if (!IsO32 && Size == 0) 5640 return ABIArgInfo::getIgnore(); 5641 5642 if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) { 5643 if (Size <= 128) { 5644 if (RetTy->isAnyComplexType()) 5645 return ABIArgInfo::getDirect(); 5646 5647 // O32 returns integer vectors in registers and N32/N64 returns all small 5648 // aggregates in registers. 5649 if (!IsO32 || 5650 (RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())) { 5651 ABIArgInfo ArgInfo = 5652 ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size)); 5653 ArgInfo.setInReg(true); 5654 return ArgInfo; 5655 } 5656 } 5657 5658 return ABIArgInfo::getIndirect(0); 5659 } 5660 5661 // Treat an enum type as its underlying type. 5662 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 5663 RetTy = EnumTy->getDecl()->getIntegerType(); 5664 5665 return (RetTy->isPromotableIntegerType() ? 5666 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 5667 } 5668 5669 void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const { 5670 ABIArgInfo &RetInfo = FI.getReturnInfo(); 5671 if (!getCXXABI().classifyReturnType(FI)) 5672 RetInfo = classifyReturnType(FI.getReturnType()); 5673 5674 // Check if a pointer to an aggregate is passed as a hidden argument. 5675 uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0; 5676 5677 for (auto &I : FI.arguments()) 5678 I.info = classifyArgumentType(I.type, Offset); 5679 } 5680 5681 llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 5682 CodeGenFunction &CGF) const { 5683 llvm::Type *BP = CGF.Int8PtrTy; 5684 llvm::Type *BPP = CGF.Int8PtrPtrTy; 5685 5686 // Integer arguments are promoted to 32-bit on O32 and 64-bit on N32/N64. 5687 // Pointers are also promoted in the same way but this only matters for N32. 5688 unsigned SlotSizeInBits = IsO32 ? 32 : 64; 5689 unsigned PtrWidth = getTarget().getPointerWidth(0); 5690 if ((Ty->isIntegerType() && 5691 CGF.getContext().getIntWidth(Ty) < SlotSizeInBits) || 5692 (Ty->isPointerType() && PtrWidth < SlotSizeInBits)) { 5693 Ty = CGF.getContext().getIntTypeForBitwidth(SlotSizeInBits, 5694 Ty->isSignedIntegerType()); 5695 } 5696 5697 CGBuilderTy &Builder = CGF.Builder; 5698 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 5699 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 5700 int64_t TypeAlign = 5701 std::min(getContext().getTypeAlign(Ty) / 8, StackAlignInBytes); 5702 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 5703 llvm::Value *AddrTyped; 5704 llvm::IntegerType *IntTy = (PtrWidth == 32) ? CGF.Int32Ty : CGF.Int64Ty; 5705 5706 if (TypeAlign > MinABIStackAlignInBytes) { 5707 llvm::Value *AddrAsInt = CGF.Builder.CreatePtrToInt(Addr, IntTy); 5708 llvm::Value *Inc = llvm::ConstantInt::get(IntTy, TypeAlign - 1); 5709 llvm::Value *Mask = llvm::ConstantInt::get(IntTy, -TypeAlign); 5710 llvm::Value *Add = CGF.Builder.CreateAdd(AddrAsInt, Inc); 5711 llvm::Value *And = CGF.Builder.CreateAnd(Add, Mask); 5712 AddrTyped = CGF.Builder.CreateIntToPtr(And, PTy); 5713 } 5714 else 5715 AddrTyped = Builder.CreateBitCast(Addr, PTy); 5716 5717 llvm::Value *AlignedAddr = Builder.CreateBitCast(AddrTyped, BP); 5718 TypeAlign = std::max((unsigned)TypeAlign, MinABIStackAlignInBytes); 5719 unsigned ArgSizeInBits = CGF.getContext().getTypeSize(Ty); 5720 uint64_t Offset = llvm::RoundUpToAlignment(ArgSizeInBits / 8, TypeAlign); 5721 llvm::Value *NextAddr = 5722 Builder.CreateGEP(AlignedAddr, llvm::ConstantInt::get(IntTy, Offset), 5723 "ap.next"); 5724 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 5725 5726 return AddrTyped; 5727 } 5728 5729 bool 5730 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 5731 llvm::Value *Address) const { 5732 // This information comes from gcc's implementation, which seems to 5733 // as canonical as it gets. 5734 5735 // Everything on MIPS is 4 bytes. Double-precision FP registers 5736 // are aliased to pairs of single-precision FP registers. 5737 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 5738 5739 // 0-31 are the general purpose registers, $0 - $31. 5740 // 32-63 are the floating-point registers, $f0 - $f31. 5741 // 64 and 65 are the multiply/divide registers, $hi and $lo. 5742 // 66 is the (notional, I think) register for signal-handler return. 5743 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65); 5744 5745 // 67-74 are the floating-point status registers, $fcc0 - $fcc7. 5746 // They are one bit wide and ignored here. 5747 5748 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31. 5749 // (coprocessor 1 is the FP unit) 5750 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31. 5751 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31. 5752 // 176-181 are the DSP accumulator registers. 5753 AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181); 5754 return false; 5755 } 5756 5757 //===----------------------------------------------------------------------===// 5758 // TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults. 5759 // Currently subclassed only to implement custom OpenCL C function attribute 5760 // handling. 5761 //===----------------------------------------------------------------------===// 5762 5763 namespace { 5764 5765 class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo { 5766 public: 5767 TCETargetCodeGenInfo(CodeGenTypes &CGT) 5768 : DefaultTargetCodeGenInfo(CGT) {} 5769 5770 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 5771 CodeGen::CodeGenModule &M) const override; 5772 }; 5773 5774 void TCETargetCodeGenInfo::SetTargetAttributes(const Decl *D, 5775 llvm::GlobalValue *GV, 5776 CodeGen::CodeGenModule &M) const { 5777 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 5778 if (!FD) return; 5779 5780 llvm::Function *F = cast<llvm::Function>(GV); 5781 5782 if (M.getLangOpts().OpenCL) { 5783 if (FD->hasAttr<OpenCLKernelAttr>()) { 5784 // OpenCL C Kernel functions are not subject to inlining 5785 F->addFnAttr(llvm::Attribute::NoInline); 5786 const ReqdWorkGroupSizeAttr *Attr = FD->getAttr<ReqdWorkGroupSizeAttr>(); 5787 if (Attr) { 5788 // Convert the reqd_work_group_size() attributes to metadata. 5789 llvm::LLVMContext &Context = F->getContext(); 5790 llvm::NamedMDNode *OpenCLMetadata = 5791 M.getModule().getOrInsertNamedMetadata("opencl.kernel_wg_size_info"); 5792 5793 SmallVector<llvm::Metadata *, 5> Operands; 5794 Operands.push_back(llvm::ConstantAsMetadata::get(F)); 5795 5796 Operands.push_back( 5797 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue( 5798 M.Int32Ty, llvm::APInt(32, Attr->getXDim())))); 5799 Operands.push_back( 5800 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue( 5801 M.Int32Ty, llvm::APInt(32, Attr->getYDim())))); 5802 Operands.push_back( 5803 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue( 5804 M.Int32Ty, llvm::APInt(32, Attr->getZDim())))); 5805 5806 // Add a boolean constant operand for "required" (true) or "hint" (false) 5807 // for implementing the work_group_size_hint attr later. Currently 5808 // always true as the hint is not yet implemented. 5809 Operands.push_back( 5810 llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context))); 5811 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands)); 5812 } 5813 } 5814 } 5815 } 5816 5817 } 5818 5819 //===----------------------------------------------------------------------===// 5820 // Hexagon ABI Implementation 5821 //===----------------------------------------------------------------------===// 5822 5823 namespace { 5824 5825 class HexagonABIInfo : public ABIInfo { 5826 5827 5828 public: 5829 HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 5830 5831 private: 5832 5833 ABIArgInfo classifyReturnType(QualType RetTy) const; 5834 ABIArgInfo classifyArgumentType(QualType RetTy) const; 5835 5836 void computeInfo(CGFunctionInfo &FI) const override; 5837 5838 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 5839 CodeGenFunction &CGF) const override; 5840 }; 5841 5842 class HexagonTargetCodeGenInfo : public TargetCodeGenInfo { 5843 public: 5844 HexagonTargetCodeGenInfo(CodeGenTypes &CGT) 5845 :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {} 5846 5847 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 5848 return 29; 5849 } 5850 }; 5851 5852 } 5853 5854 void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const { 5855 if (!getCXXABI().classifyReturnType(FI)) 5856 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 5857 for (auto &I : FI.arguments()) 5858 I.info = classifyArgumentType(I.type); 5859 } 5860 5861 ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const { 5862 if (!isAggregateTypeForABI(Ty)) { 5863 // Treat an enum type as its underlying type. 5864 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 5865 Ty = EnumTy->getDecl()->getIntegerType(); 5866 5867 return (Ty->isPromotableIntegerType() ? 5868 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 5869 } 5870 5871 // Ignore empty records. 5872 if (isEmptyRecord(getContext(), Ty, true)) 5873 return ABIArgInfo::getIgnore(); 5874 5875 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 5876 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); 5877 5878 uint64_t Size = getContext().getTypeSize(Ty); 5879 if (Size > 64) 5880 return ABIArgInfo::getIndirect(0, /*ByVal=*/true); 5881 // Pass in the smallest viable integer type. 5882 else if (Size > 32) 5883 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext())); 5884 else if (Size > 16) 5885 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 5886 else if (Size > 8) 5887 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 5888 else 5889 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 5890 } 5891 5892 ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const { 5893 if (RetTy->isVoidType()) 5894 return ABIArgInfo::getIgnore(); 5895 5896 // Large vector types should be returned via memory. 5897 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64) 5898 return ABIArgInfo::getIndirect(0); 5899 5900 if (!isAggregateTypeForABI(RetTy)) { 5901 // Treat an enum type as its underlying type. 5902 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 5903 RetTy = EnumTy->getDecl()->getIntegerType(); 5904 5905 return (RetTy->isPromotableIntegerType() ? 5906 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 5907 } 5908 5909 if (isEmptyRecord(getContext(), RetTy, true)) 5910 return ABIArgInfo::getIgnore(); 5911 5912 // Aggregates <= 8 bytes are returned in r0; other aggregates 5913 // are returned indirectly. 5914 uint64_t Size = getContext().getTypeSize(RetTy); 5915 if (Size <= 64) { 5916 // Return in the smallest viable integer type. 5917 if (Size <= 8) 5918 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 5919 if (Size <= 16) 5920 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 5921 if (Size <= 32) 5922 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 5923 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext())); 5924 } 5925 5926 return ABIArgInfo::getIndirect(0, /*ByVal=*/true); 5927 } 5928 5929 llvm::Value *HexagonABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 5930 CodeGenFunction &CGF) const { 5931 // FIXME: Need to handle alignment 5932 llvm::Type *BPP = CGF.Int8PtrPtrTy; 5933 5934 CGBuilderTy &Builder = CGF.Builder; 5935 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 5936 "ap"); 5937 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 5938 llvm::Type *PTy = 5939 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 5940 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 5941 5942 uint64_t Offset = 5943 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); 5944 llvm::Value *NextAddr = 5945 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 5946 "ap.next"); 5947 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 5948 5949 return AddrTyped; 5950 } 5951 5952 //===----------------------------------------------------------------------===// 5953 // AMDGPU ABI Implementation 5954 //===----------------------------------------------------------------------===// 5955 5956 namespace { 5957 5958 class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo { 5959 public: 5960 AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT) 5961 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 5962 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 5963 CodeGen::CodeGenModule &M) const override; 5964 }; 5965 5966 } 5967 5968 void AMDGPUTargetCodeGenInfo::SetTargetAttributes( 5969 const Decl *D, 5970 llvm::GlobalValue *GV, 5971 CodeGen::CodeGenModule &M) const { 5972 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 5973 if (!FD) 5974 return; 5975 5976 if (const auto Attr = FD->getAttr<AMDGPUNumVGPRAttr>()) { 5977 llvm::Function *F = cast<llvm::Function>(GV); 5978 uint32_t NumVGPR = Attr->getNumVGPR(); 5979 if (NumVGPR != 0) 5980 F->addFnAttr("amdgpu_num_vgpr", llvm::utostr(NumVGPR)); 5981 } 5982 5983 if (const auto Attr = FD->getAttr<AMDGPUNumSGPRAttr>()) { 5984 llvm::Function *F = cast<llvm::Function>(GV); 5985 unsigned NumSGPR = Attr->getNumSGPR(); 5986 if (NumSGPR != 0) 5987 F->addFnAttr("amdgpu_num_sgpr", llvm::utostr(NumSGPR)); 5988 } 5989 } 5990 5991 5992 //===----------------------------------------------------------------------===// 5993 // SPARC v9 ABI Implementation. 5994 // Based on the SPARC Compliance Definition version 2.4.1. 5995 // 5996 // Function arguments a mapped to a nominal "parameter array" and promoted to 5997 // registers depending on their type. Each argument occupies 8 or 16 bytes in 5998 // the array, structs larger than 16 bytes are passed indirectly. 5999 // 6000 // One case requires special care: 6001 // 6002 // struct mixed { 6003 // int i; 6004 // float f; 6005 // }; 6006 // 6007 // When a struct mixed is passed by value, it only occupies 8 bytes in the 6008 // parameter array, but the int is passed in an integer register, and the float 6009 // is passed in a floating point register. This is represented as two arguments 6010 // with the LLVM IR inreg attribute: 6011 // 6012 // declare void f(i32 inreg %i, float inreg %f) 6013 // 6014 // The code generator will only allocate 4 bytes from the parameter array for 6015 // the inreg arguments. All other arguments are allocated a multiple of 8 6016 // bytes. 6017 // 6018 namespace { 6019 class SparcV9ABIInfo : public ABIInfo { 6020 public: 6021 SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 6022 6023 private: 6024 ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const; 6025 void computeInfo(CGFunctionInfo &FI) const override; 6026 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 6027 CodeGenFunction &CGF) const override; 6028 6029 // Coercion type builder for structs passed in registers. The coercion type 6030 // serves two purposes: 6031 // 6032 // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned' 6033 // in registers. 6034 // 2. Expose aligned floating point elements as first-level elements, so the 6035 // code generator knows to pass them in floating point registers. 6036 // 6037 // We also compute the InReg flag which indicates that the struct contains 6038 // aligned 32-bit floats. 6039 // 6040 struct CoerceBuilder { 6041 llvm::LLVMContext &Context; 6042 const llvm::DataLayout &DL; 6043 SmallVector<llvm::Type*, 8> Elems; 6044 uint64_t Size; 6045 bool InReg; 6046 6047 CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl) 6048 : Context(c), DL(dl), Size(0), InReg(false) {} 6049 6050 // Pad Elems with integers until Size is ToSize. 6051 void pad(uint64_t ToSize) { 6052 assert(ToSize >= Size && "Cannot remove elements"); 6053 if (ToSize == Size) 6054 return; 6055 6056 // Finish the current 64-bit word. 6057 uint64_t Aligned = llvm::RoundUpToAlignment(Size, 64); 6058 if (Aligned > Size && Aligned <= ToSize) { 6059 Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size)); 6060 Size = Aligned; 6061 } 6062 6063 // Add whole 64-bit words. 6064 while (Size + 64 <= ToSize) { 6065 Elems.push_back(llvm::Type::getInt64Ty(Context)); 6066 Size += 64; 6067 } 6068 6069 // Final in-word padding. 6070 if (Size < ToSize) { 6071 Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size)); 6072 Size = ToSize; 6073 } 6074 } 6075 6076 // Add a floating point element at Offset. 6077 void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) { 6078 // Unaligned floats are treated as integers. 6079 if (Offset % Bits) 6080 return; 6081 // The InReg flag is only required if there are any floats < 64 bits. 6082 if (Bits < 64) 6083 InReg = true; 6084 pad(Offset); 6085 Elems.push_back(Ty); 6086 Size = Offset + Bits; 6087 } 6088 6089 // Add a struct type to the coercion type, starting at Offset (in bits). 6090 void addStruct(uint64_t Offset, llvm::StructType *StrTy) { 6091 const llvm::StructLayout *Layout = DL.getStructLayout(StrTy); 6092 for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) { 6093 llvm::Type *ElemTy = StrTy->getElementType(i); 6094 uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i); 6095 switch (ElemTy->getTypeID()) { 6096 case llvm::Type::StructTyID: 6097 addStruct(ElemOffset, cast<llvm::StructType>(ElemTy)); 6098 break; 6099 case llvm::Type::FloatTyID: 6100 addFloat(ElemOffset, ElemTy, 32); 6101 break; 6102 case llvm::Type::DoubleTyID: 6103 addFloat(ElemOffset, ElemTy, 64); 6104 break; 6105 case llvm::Type::FP128TyID: 6106 addFloat(ElemOffset, ElemTy, 128); 6107 break; 6108 case llvm::Type::PointerTyID: 6109 if (ElemOffset % 64 == 0) { 6110 pad(ElemOffset); 6111 Elems.push_back(ElemTy); 6112 Size += 64; 6113 } 6114 break; 6115 default: 6116 break; 6117 } 6118 } 6119 } 6120 6121 // Check if Ty is a usable substitute for the coercion type. 6122 bool isUsableType(llvm::StructType *Ty) const { 6123 return llvm::makeArrayRef(Elems) == Ty->elements(); 6124 } 6125 6126 // Get the coercion type as a literal struct type. 6127 llvm::Type *getType() const { 6128 if (Elems.size() == 1) 6129 return Elems.front(); 6130 else 6131 return llvm::StructType::get(Context, Elems); 6132 } 6133 }; 6134 }; 6135 } // end anonymous namespace 6136 6137 ABIArgInfo 6138 SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const { 6139 if (Ty->isVoidType()) 6140 return ABIArgInfo::getIgnore(); 6141 6142 uint64_t Size = getContext().getTypeSize(Ty); 6143 6144 // Anything too big to fit in registers is passed with an explicit indirect 6145 // pointer / sret pointer. 6146 if (Size > SizeLimit) 6147 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 6148 6149 // Treat an enum type as its underlying type. 6150 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 6151 Ty = EnumTy->getDecl()->getIntegerType(); 6152 6153 // Integer types smaller than a register are extended. 6154 if (Size < 64 && Ty->isIntegerType()) 6155 return ABIArgInfo::getExtend(); 6156 6157 // Other non-aggregates go in registers. 6158 if (!isAggregateTypeForABI(Ty)) 6159 return ABIArgInfo::getDirect(); 6160 6161 // If a C++ object has either a non-trivial copy constructor or a non-trivial 6162 // destructor, it is passed with an explicit indirect pointer / sret pointer. 6163 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 6164 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); 6165 6166 // This is a small aggregate type that should be passed in registers. 6167 // Build a coercion type from the LLVM struct type. 6168 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty)); 6169 if (!StrTy) 6170 return ABIArgInfo::getDirect(); 6171 6172 CoerceBuilder CB(getVMContext(), getDataLayout()); 6173 CB.addStruct(0, StrTy); 6174 CB.pad(llvm::RoundUpToAlignment(CB.DL.getTypeSizeInBits(StrTy), 64)); 6175 6176 // Try to use the original type for coercion. 6177 llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType(); 6178 6179 if (CB.InReg) 6180 return ABIArgInfo::getDirectInReg(CoerceTy); 6181 else 6182 return ABIArgInfo::getDirect(CoerceTy); 6183 } 6184 6185 llvm::Value *SparcV9ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 6186 CodeGenFunction &CGF) const { 6187 ABIArgInfo AI = classifyType(Ty, 16 * 8); 6188 llvm::Type *ArgTy = CGT.ConvertType(Ty); 6189 if (AI.canHaveCoerceToType() && !AI.getCoerceToType()) 6190 AI.setCoerceToType(ArgTy); 6191 6192 llvm::Type *BPP = CGF.Int8PtrPtrTy; 6193 CGBuilderTy &Builder = CGF.Builder; 6194 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 6195 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 6196 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy); 6197 llvm::Value *ArgAddr; 6198 unsigned Stride; 6199 6200 switch (AI.getKind()) { 6201 case ABIArgInfo::Expand: 6202 case ABIArgInfo::InAlloca: 6203 llvm_unreachable("Unsupported ABI kind for va_arg"); 6204 6205 case ABIArgInfo::Extend: 6206 Stride = 8; 6207 ArgAddr = Builder 6208 .CreateConstGEP1_32(Addr, 8 - getDataLayout().getTypeAllocSize(ArgTy), 6209 "extend"); 6210 break; 6211 6212 case ABIArgInfo::Direct: 6213 Stride = getDataLayout().getTypeAllocSize(AI.getCoerceToType()); 6214 ArgAddr = Addr; 6215 break; 6216 6217 case ABIArgInfo::Indirect: 6218 Stride = 8; 6219 ArgAddr = Builder.CreateBitCast(Addr, 6220 llvm::PointerType::getUnqual(ArgPtrTy), 6221 "indirect"); 6222 ArgAddr = Builder.CreateLoad(ArgAddr, "indirect.arg"); 6223 break; 6224 6225 case ABIArgInfo::Ignore: 6226 return llvm::UndefValue::get(ArgPtrTy); 6227 } 6228 6229 // Update VAList. 6230 Addr = Builder.CreateConstGEP1_32(Addr, Stride, "ap.next"); 6231 Builder.CreateStore(Addr, VAListAddrAsBPP); 6232 6233 return Builder.CreatePointerCast(ArgAddr, ArgPtrTy, "arg.addr"); 6234 } 6235 6236 void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const { 6237 FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8); 6238 for (auto &I : FI.arguments()) 6239 I.info = classifyType(I.type, 16 * 8); 6240 } 6241 6242 namespace { 6243 class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo { 6244 public: 6245 SparcV9TargetCodeGenInfo(CodeGenTypes &CGT) 6246 : TargetCodeGenInfo(new SparcV9ABIInfo(CGT)) {} 6247 6248 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 6249 return 14; 6250 } 6251 6252 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 6253 llvm::Value *Address) const override; 6254 }; 6255 } // end anonymous namespace 6256 6257 bool 6258 SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 6259 llvm::Value *Address) const { 6260 // This is calculated from the LLVM and GCC tables and verified 6261 // against gcc output. AFAIK all ABIs use the same encoding. 6262 6263 CodeGen::CGBuilderTy &Builder = CGF.Builder; 6264 6265 llvm::IntegerType *i8 = CGF.Int8Ty; 6266 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 6267 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 6268 6269 // 0-31: the 8-byte general-purpose registers 6270 AssignToArrayRange(Builder, Address, Eight8, 0, 31); 6271 6272 // 32-63: f0-31, the 4-byte floating-point registers 6273 AssignToArrayRange(Builder, Address, Four8, 32, 63); 6274 6275 // Y = 64 6276 // PSR = 65 6277 // WIM = 66 6278 // TBR = 67 6279 // PC = 68 6280 // NPC = 69 6281 // FSR = 70 6282 // CSR = 71 6283 AssignToArrayRange(Builder, Address, Eight8, 64, 71); 6284 6285 // 72-87: d0-15, the 8-byte floating-point registers 6286 AssignToArrayRange(Builder, Address, Eight8, 72, 87); 6287 6288 return false; 6289 } 6290 6291 6292 //===----------------------------------------------------------------------===// 6293 // XCore ABI Implementation 6294 //===----------------------------------------------------------------------===// 6295 6296 namespace { 6297 6298 /// A SmallStringEnc instance is used to build up the TypeString by passing 6299 /// it by reference between functions that append to it. 6300 typedef llvm::SmallString<128> SmallStringEnc; 6301 6302 /// TypeStringCache caches the meta encodings of Types. 6303 /// 6304 /// The reason for caching TypeStrings is two fold: 6305 /// 1. To cache a type's encoding for later uses; 6306 /// 2. As a means to break recursive member type inclusion. 6307 /// 6308 /// A cache Entry can have a Status of: 6309 /// NonRecursive: The type encoding is not recursive; 6310 /// Recursive: The type encoding is recursive; 6311 /// Incomplete: An incomplete TypeString; 6312 /// IncompleteUsed: An incomplete TypeString that has been used in a 6313 /// Recursive type encoding. 6314 /// 6315 /// A NonRecursive entry will have all of its sub-members expanded as fully 6316 /// as possible. Whilst it may contain types which are recursive, the type 6317 /// itself is not recursive and thus its encoding may be safely used whenever 6318 /// the type is encountered. 6319 /// 6320 /// A Recursive entry will have all of its sub-members expanded as fully as 6321 /// possible. The type itself is recursive and it may contain other types which 6322 /// are recursive. The Recursive encoding must not be used during the expansion 6323 /// of a recursive type's recursive branch. For simplicity the code uses 6324 /// IncompleteCount to reject all usage of Recursive encodings for member types. 6325 /// 6326 /// An Incomplete entry is always a RecordType and only encodes its 6327 /// identifier e.g. "s(S){}". Incomplete 'StubEnc' entries are ephemeral and 6328 /// are placed into the cache during type expansion as a means to identify and 6329 /// handle recursive inclusion of types as sub-members. If there is recursion 6330 /// the entry becomes IncompleteUsed. 6331 /// 6332 /// During the expansion of a RecordType's members: 6333 /// 6334 /// If the cache contains a NonRecursive encoding for the member type, the 6335 /// cached encoding is used; 6336 /// 6337 /// If the cache contains a Recursive encoding for the member type, the 6338 /// cached encoding is 'Swapped' out, as it may be incorrect, and... 6339 /// 6340 /// If the member is a RecordType, an Incomplete encoding is placed into the 6341 /// cache to break potential recursive inclusion of itself as a sub-member; 6342 /// 6343 /// Once a member RecordType has been expanded, its temporary incomplete 6344 /// entry is removed from the cache. If a Recursive encoding was swapped out 6345 /// it is swapped back in; 6346 /// 6347 /// If an incomplete entry is used to expand a sub-member, the incomplete 6348 /// entry is marked as IncompleteUsed. The cache keeps count of how many 6349 /// IncompleteUsed entries it currently contains in IncompleteUsedCount; 6350 /// 6351 /// If a member's encoding is found to be a NonRecursive or Recursive viz: 6352 /// IncompleteUsedCount==0, the member's encoding is added to the cache. 6353 /// Else the member is part of a recursive type and thus the recursion has 6354 /// been exited too soon for the encoding to be correct for the member. 6355 /// 6356 class TypeStringCache { 6357 enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed}; 6358 struct Entry { 6359 std::string Str; // The encoded TypeString for the type. 6360 enum Status State; // Information about the encoding in 'Str'. 6361 std::string Swapped; // A temporary place holder for a Recursive encoding 6362 // during the expansion of RecordType's members. 6363 }; 6364 std::map<const IdentifierInfo *, struct Entry> Map; 6365 unsigned IncompleteCount; // Number of Incomplete entries in the Map. 6366 unsigned IncompleteUsedCount; // Number of IncompleteUsed entries in the Map. 6367 public: 6368 TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {}; 6369 void addIncomplete(const IdentifierInfo *ID, std::string StubEnc); 6370 bool removeIncomplete(const IdentifierInfo *ID); 6371 void addIfComplete(const IdentifierInfo *ID, StringRef Str, 6372 bool IsRecursive); 6373 StringRef lookupStr(const IdentifierInfo *ID); 6374 }; 6375 6376 /// TypeString encodings for enum & union fields must be order. 6377 /// FieldEncoding is a helper for this ordering process. 6378 class FieldEncoding { 6379 bool HasName; 6380 std::string Enc; 6381 public: 6382 FieldEncoding(bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {}; 6383 StringRef str() {return Enc.c_str();}; 6384 bool operator<(const FieldEncoding &rhs) const { 6385 if (HasName != rhs.HasName) return HasName; 6386 return Enc < rhs.Enc; 6387 } 6388 }; 6389 6390 class XCoreABIInfo : public DefaultABIInfo { 6391 public: 6392 XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 6393 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 6394 CodeGenFunction &CGF) const override; 6395 }; 6396 6397 class XCoreTargetCodeGenInfo : public TargetCodeGenInfo { 6398 mutable TypeStringCache TSC; 6399 public: 6400 XCoreTargetCodeGenInfo(CodeGenTypes &CGT) 6401 :TargetCodeGenInfo(new XCoreABIInfo(CGT)) {} 6402 void emitTargetMD(const Decl *D, llvm::GlobalValue *GV, 6403 CodeGen::CodeGenModule &M) const override; 6404 }; 6405 6406 } // End anonymous namespace. 6407 6408 llvm::Value *XCoreABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 6409 CodeGenFunction &CGF) const { 6410 CGBuilderTy &Builder = CGF.Builder; 6411 6412 // Get the VAList. 6413 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, 6414 CGF.Int8PtrPtrTy); 6415 llvm::Value *AP = Builder.CreateLoad(VAListAddrAsBPP); 6416 6417 // Handle the argument. 6418 ABIArgInfo AI = classifyArgumentType(Ty); 6419 llvm::Type *ArgTy = CGT.ConvertType(Ty); 6420 if (AI.canHaveCoerceToType() && !AI.getCoerceToType()) 6421 AI.setCoerceToType(ArgTy); 6422 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy); 6423 llvm::Value *Val; 6424 uint64_t ArgSize = 0; 6425 switch (AI.getKind()) { 6426 case ABIArgInfo::Expand: 6427 case ABIArgInfo::InAlloca: 6428 llvm_unreachable("Unsupported ABI kind for va_arg"); 6429 case ABIArgInfo::Ignore: 6430 Val = llvm::UndefValue::get(ArgPtrTy); 6431 ArgSize = 0; 6432 break; 6433 case ABIArgInfo::Extend: 6434 case ABIArgInfo::Direct: 6435 Val = Builder.CreatePointerCast(AP, ArgPtrTy); 6436 ArgSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType()); 6437 if (ArgSize < 4) 6438 ArgSize = 4; 6439 break; 6440 case ABIArgInfo::Indirect: 6441 llvm::Value *ArgAddr; 6442 ArgAddr = Builder.CreateBitCast(AP, llvm::PointerType::getUnqual(ArgPtrTy)); 6443 ArgAddr = Builder.CreateLoad(ArgAddr); 6444 Val = Builder.CreatePointerCast(ArgAddr, ArgPtrTy); 6445 ArgSize = 4; 6446 break; 6447 } 6448 6449 // Increment the VAList. 6450 if (ArgSize) { 6451 llvm::Value *APN = Builder.CreateConstGEP1_32(AP, ArgSize); 6452 Builder.CreateStore(APN, VAListAddrAsBPP); 6453 } 6454 return Val; 6455 } 6456 6457 /// During the expansion of a RecordType, an incomplete TypeString is placed 6458 /// into the cache as a means to identify and break recursion. 6459 /// If there is a Recursive encoding in the cache, it is swapped out and will 6460 /// be reinserted by removeIncomplete(). 6461 /// All other types of encoding should have been used rather than arriving here. 6462 void TypeStringCache::addIncomplete(const IdentifierInfo *ID, 6463 std::string StubEnc) { 6464 if (!ID) 6465 return; 6466 Entry &E = Map[ID]; 6467 assert( (E.Str.empty() || E.State == Recursive) && 6468 "Incorrectly use of addIncomplete"); 6469 assert(!StubEnc.empty() && "Passing an empty string to addIncomplete()"); 6470 E.Swapped.swap(E.Str); // swap out the Recursive 6471 E.Str.swap(StubEnc); 6472 E.State = Incomplete; 6473 ++IncompleteCount; 6474 } 6475 6476 /// Once the RecordType has been expanded, the temporary incomplete TypeString 6477 /// must be removed from the cache. 6478 /// If a Recursive was swapped out by addIncomplete(), it will be replaced. 6479 /// Returns true if the RecordType was defined recursively. 6480 bool TypeStringCache::removeIncomplete(const IdentifierInfo *ID) { 6481 if (!ID) 6482 return false; 6483 auto I = Map.find(ID); 6484 assert(I != Map.end() && "Entry not present"); 6485 Entry &E = I->second; 6486 assert( (E.State == Incomplete || 6487 E.State == IncompleteUsed) && 6488 "Entry must be an incomplete type"); 6489 bool IsRecursive = false; 6490 if (E.State == IncompleteUsed) { 6491 // We made use of our Incomplete encoding, thus we are recursive. 6492 IsRecursive = true; 6493 --IncompleteUsedCount; 6494 } 6495 if (E.Swapped.empty()) 6496 Map.erase(I); 6497 else { 6498 // Swap the Recursive back. 6499 E.Swapped.swap(E.Str); 6500 E.Swapped.clear(); 6501 E.State = Recursive; 6502 } 6503 --IncompleteCount; 6504 return IsRecursive; 6505 } 6506 6507 /// Add the encoded TypeString to the cache only if it is NonRecursive or 6508 /// Recursive (viz: all sub-members were expanded as fully as possible). 6509 void TypeStringCache::addIfComplete(const IdentifierInfo *ID, StringRef Str, 6510 bool IsRecursive) { 6511 if (!ID || IncompleteUsedCount) 6512 return; // No key or it is is an incomplete sub-type so don't add. 6513 Entry &E = Map[ID]; 6514 if (IsRecursive && !E.Str.empty()) { 6515 assert(E.State==Recursive && E.Str.size() == Str.size() && 6516 "This is not the same Recursive entry"); 6517 // The parent container was not recursive after all, so we could have used 6518 // this Recursive sub-member entry after all, but we assumed the worse when 6519 // we started viz: IncompleteCount!=0. 6520 return; 6521 } 6522 assert(E.Str.empty() && "Entry already present"); 6523 E.Str = Str.str(); 6524 E.State = IsRecursive? Recursive : NonRecursive; 6525 } 6526 6527 /// Return a cached TypeString encoding for the ID. If there isn't one, or we 6528 /// are recursively expanding a type (IncompleteCount != 0) and the cached 6529 /// encoding is Recursive, return an empty StringRef. 6530 StringRef TypeStringCache::lookupStr(const IdentifierInfo *ID) { 6531 if (!ID) 6532 return StringRef(); // We have no key. 6533 auto I = Map.find(ID); 6534 if (I == Map.end()) 6535 return StringRef(); // We have no encoding. 6536 Entry &E = I->second; 6537 if (E.State == Recursive && IncompleteCount) 6538 return StringRef(); // We don't use Recursive encodings for member types. 6539 6540 if (E.State == Incomplete) { 6541 // The incomplete type is being used to break out of recursion. 6542 E.State = IncompleteUsed; 6543 ++IncompleteUsedCount; 6544 } 6545 return E.Str.c_str(); 6546 } 6547 6548 /// The XCore ABI includes a type information section that communicates symbol 6549 /// type information to the linker. The linker uses this information to verify 6550 /// safety/correctness of things such as array bound and pointers et al. 6551 /// The ABI only requires C (and XC) language modules to emit TypeStrings. 6552 /// This type information (TypeString) is emitted into meta data for all global 6553 /// symbols: definitions, declarations, functions & variables. 6554 /// 6555 /// The TypeString carries type, qualifier, name, size & value details. 6556 /// Please see 'Tools Development Guide' section 2.16.2 for format details: 6557 /// <https://www.xmos.com/download/public/Tools-Development-Guide%28X9114A%29.pdf> 6558 /// The output is tested by test/CodeGen/xcore-stringtype.c. 6559 /// 6560 static bool getTypeString(SmallStringEnc &Enc, const Decl *D, 6561 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC); 6562 6563 /// XCore uses emitTargetMD to emit TypeString metadata for global symbols. 6564 void XCoreTargetCodeGenInfo::emitTargetMD(const Decl *D, llvm::GlobalValue *GV, 6565 CodeGen::CodeGenModule &CGM) const { 6566 SmallStringEnc Enc; 6567 if (getTypeString(Enc, D, CGM, TSC)) { 6568 llvm::LLVMContext &Ctx = CGM.getModule().getContext(); 6569 llvm::SmallVector<llvm::Metadata *, 2> MDVals; 6570 MDVals.push_back(llvm::ConstantAsMetadata::get(GV)); 6571 MDVals.push_back(llvm::MDString::get(Ctx, Enc.str())); 6572 llvm::NamedMDNode *MD = 6573 CGM.getModule().getOrInsertNamedMetadata("xcore.typestrings"); 6574 MD->addOperand(llvm::MDNode::get(Ctx, MDVals)); 6575 } 6576 } 6577 6578 static bool appendType(SmallStringEnc &Enc, QualType QType, 6579 const CodeGen::CodeGenModule &CGM, 6580 TypeStringCache &TSC); 6581 6582 /// Helper function for appendRecordType(). 6583 /// Builds a SmallVector containing the encoded field types in declaration order. 6584 static bool extractFieldType(SmallVectorImpl<FieldEncoding> &FE, 6585 const RecordDecl *RD, 6586 const CodeGen::CodeGenModule &CGM, 6587 TypeStringCache &TSC) { 6588 for (const auto *Field : RD->fields()) { 6589 SmallStringEnc Enc; 6590 Enc += "m("; 6591 Enc += Field->getName(); 6592 Enc += "){"; 6593 if (Field->isBitField()) { 6594 Enc += "b("; 6595 llvm::raw_svector_ostream OS(Enc); 6596 OS.resync(); 6597 OS << Field->getBitWidthValue(CGM.getContext()); 6598 OS.flush(); 6599 Enc += ':'; 6600 } 6601 if (!appendType(Enc, Field->getType(), CGM, TSC)) 6602 return false; 6603 if (Field->isBitField()) 6604 Enc += ')'; 6605 Enc += '}'; 6606 FE.push_back(FieldEncoding(!Field->getName().empty(), Enc)); 6607 } 6608 return true; 6609 } 6610 6611 /// Appends structure and union types to Enc and adds encoding to cache. 6612 /// Recursively calls appendType (via extractFieldType) for each field. 6613 /// Union types have their fields ordered according to the ABI. 6614 static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT, 6615 const CodeGen::CodeGenModule &CGM, 6616 TypeStringCache &TSC, const IdentifierInfo *ID) { 6617 // Append the cached TypeString if we have one. 6618 StringRef TypeString = TSC.lookupStr(ID); 6619 if (!TypeString.empty()) { 6620 Enc += TypeString; 6621 return true; 6622 } 6623 6624 // Start to emit an incomplete TypeString. 6625 size_t Start = Enc.size(); 6626 Enc += (RT->isUnionType()? 'u' : 's'); 6627 Enc += '('; 6628 if (ID) 6629 Enc += ID->getName(); 6630 Enc += "){"; 6631 6632 // We collect all encoded fields and order as necessary. 6633 bool IsRecursive = false; 6634 const RecordDecl *RD = RT->getDecl()->getDefinition(); 6635 if (RD && !RD->field_empty()) { 6636 // An incomplete TypeString stub is placed in the cache for this RecordType 6637 // so that recursive calls to this RecordType will use it whilst building a 6638 // complete TypeString for this RecordType. 6639 SmallVector<FieldEncoding, 16> FE; 6640 std::string StubEnc(Enc.substr(Start).str()); 6641 StubEnc += '}'; // StubEnc now holds a valid incomplete TypeString. 6642 TSC.addIncomplete(ID, std::move(StubEnc)); 6643 if (!extractFieldType(FE, RD, CGM, TSC)) { 6644 (void) TSC.removeIncomplete(ID); 6645 return false; 6646 } 6647 IsRecursive = TSC.removeIncomplete(ID); 6648 // The ABI requires unions to be sorted but not structures. 6649 // See FieldEncoding::operator< for sort algorithm. 6650 if (RT->isUnionType()) 6651 std::sort(FE.begin(), FE.end()); 6652 // We can now complete the TypeString. 6653 unsigned E = FE.size(); 6654 for (unsigned I = 0; I != E; ++I) { 6655 if (I) 6656 Enc += ','; 6657 Enc += FE[I].str(); 6658 } 6659 } 6660 Enc += '}'; 6661 TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive); 6662 return true; 6663 } 6664 6665 /// Appends enum types to Enc and adds the encoding to the cache. 6666 static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET, 6667 TypeStringCache &TSC, 6668 const IdentifierInfo *ID) { 6669 // Append the cached TypeString if we have one. 6670 StringRef TypeString = TSC.lookupStr(ID); 6671 if (!TypeString.empty()) { 6672 Enc += TypeString; 6673 return true; 6674 } 6675 6676 size_t Start = Enc.size(); 6677 Enc += "e("; 6678 if (ID) 6679 Enc += ID->getName(); 6680 Enc += "){"; 6681 6682 // We collect all encoded enumerations and order them alphanumerically. 6683 if (const EnumDecl *ED = ET->getDecl()->getDefinition()) { 6684 SmallVector<FieldEncoding, 16> FE; 6685 for (auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E; 6686 ++I) { 6687 SmallStringEnc EnumEnc; 6688 EnumEnc += "m("; 6689 EnumEnc += I->getName(); 6690 EnumEnc += "){"; 6691 I->getInitVal().toString(EnumEnc); 6692 EnumEnc += '}'; 6693 FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc)); 6694 } 6695 std::sort(FE.begin(), FE.end()); 6696 unsigned E = FE.size(); 6697 for (unsigned I = 0; I != E; ++I) { 6698 if (I) 6699 Enc += ','; 6700 Enc += FE[I].str(); 6701 } 6702 } 6703 Enc += '}'; 6704 TSC.addIfComplete(ID, Enc.substr(Start), false); 6705 return true; 6706 } 6707 6708 /// Appends type's qualifier to Enc. 6709 /// This is done prior to appending the type's encoding. 6710 static void appendQualifier(SmallStringEnc &Enc, QualType QT) { 6711 // Qualifiers are emitted in alphabetical order. 6712 static const char *Table[] = {"","c:","r:","cr:","v:","cv:","rv:","crv:"}; 6713 int Lookup = 0; 6714 if (QT.isConstQualified()) 6715 Lookup += 1<<0; 6716 if (QT.isRestrictQualified()) 6717 Lookup += 1<<1; 6718 if (QT.isVolatileQualified()) 6719 Lookup += 1<<2; 6720 Enc += Table[Lookup]; 6721 } 6722 6723 /// Appends built-in types to Enc. 6724 static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT) { 6725 const char *EncType; 6726 switch (BT->getKind()) { 6727 case BuiltinType::Void: 6728 EncType = "0"; 6729 break; 6730 case BuiltinType::Bool: 6731 EncType = "b"; 6732 break; 6733 case BuiltinType::Char_U: 6734 EncType = "uc"; 6735 break; 6736 case BuiltinType::UChar: 6737 EncType = "uc"; 6738 break; 6739 case BuiltinType::SChar: 6740 EncType = "sc"; 6741 break; 6742 case BuiltinType::UShort: 6743 EncType = "us"; 6744 break; 6745 case BuiltinType::Short: 6746 EncType = "ss"; 6747 break; 6748 case BuiltinType::UInt: 6749 EncType = "ui"; 6750 break; 6751 case BuiltinType::Int: 6752 EncType = "si"; 6753 break; 6754 case BuiltinType::ULong: 6755 EncType = "ul"; 6756 break; 6757 case BuiltinType::Long: 6758 EncType = "sl"; 6759 break; 6760 case BuiltinType::ULongLong: 6761 EncType = "ull"; 6762 break; 6763 case BuiltinType::LongLong: 6764 EncType = "sll"; 6765 break; 6766 case BuiltinType::Float: 6767 EncType = "ft"; 6768 break; 6769 case BuiltinType::Double: 6770 EncType = "d"; 6771 break; 6772 case BuiltinType::LongDouble: 6773 EncType = "ld"; 6774 break; 6775 default: 6776 return false; 6777 } 6778 Enc += EncType; 6779 return true; 6780 } 6781 6782 /// Appends a pointer encoding to Enc before calling appendType for the pointee. 6783 static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT, 6784 const CodeGen::CodeGenModule &CGM, 6785 TypeStringCache &TSC) { 6786 Enc += "p("; 6787 if (!appendType(Enc, PT->getPointeeType(), CGM, TSC)) 6788 return false; 6789 Enc += ')'; 6790 return true; 6791 } 6792 6793 /// Appends array encoding to Enc before calling appendType for the element. 6794 static bool appendArrayType(SmallStringEnc &Enc, QualType QT, 6795 const ArrayType *AT, 6796 const CodeGen::CodeGenModule &CGM, 6797 TypeStringCache &TSC, StringRef NoSizeEnc) { 6798 if (AT->getSizeModifier() != ArrayType::Normal) 6799 return false; 6800 Enc += "a("; 6801 if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT)) 6802 CAT->getSize().toStringUnsigned(Enc); 6803 else 6804 Enc += NoSizeEnc; // Global arrays use "*", otherwise it is "". 6805 Enc += ':'; 6806 // The Qualifiers should be attached to the type rather than the array. 6807 appendQualifier(Enc, QT); 6808 if (!appendType(Enc, AT->getElementType(), CGM, TSC)) 6809 return false; 6810 Enc += ')'; 6811 return true; 6812 } 6813 6814 /// Appends a function encoding to Enc, calling appendType for the return type 6815 /// and the arguments. 6816 static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT, 6817 const CodeGen::CodeGenModule &CGM, 6818 TypeStringCache &TSC) { 6819 Enc += "f{"; 6820 if (!appendType(Enc, FT->getReturnType(), CGM, TSC)) 6821 return false; 6822 Enc += "}("; 6823 if (const FunctionProtoType *FPT = FT->getAs<FunctionProtoType>()) { 6824 // N.B. we are only interested in the adjusted param types. 6825 auto I = FPT->param_type_begin(); 6826 auto E = FPT->param_type_end(); 6827 if (I != E) { 6828 do { 6829 if (!appendType(Enc, *I, CGM, TSC)) 6830 return false; 6831 ++I; 6832 if (I != E) 6833 Enc += ','; 6834 } while (I != E); 6835 if (FPT->isVariadic()) 6836 Enc += ",va"; 6837 } else { 6838 if (FPT->isVariadic()) 6839 Enc += "va"; 6840 else 6841 Enc += '0'; 6842 } 6843 } 6844 Enc += ')'; 6845 return true; 6846 } 6847 6848 /// Handles the type's qualifier before dispatching a call to handle specific 6849 /// type encodings. 6850 static bool appendType(SmallStringEnc &Enc, QualType QType, 6851 const CodeGen::CodeGenModule &CGM, 6852 TypeStringCache &TSC) { 6853 6854 QualType QT = QType.getCanonicalType(); 6855 6856 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) 6857 // The Qualifiers should be attached to the type rather than the array. 6858 // Thus we don't call appendQualifier() here. 6859 return appendArrayType(Enc, QT, AT, CGM, TSC, ""); 6860 6861 appendQualifier(Enc, QT); 6862 6863 if (const BuiltinType *BT = QT->getAs<BuiltinType>()) 6864 return appendBuiltinType(Enc, BT); 6865 6866 if (const PointerType *PT = QT->getAs<PointerType>()) 6867 return appendPointerType(Enc, PT, CGM, TSC); 6868 6869 if (const EnumType *ET = QT->getAs<EnumType>()) 6870 return appendEnumType(Enc, ET, TSC, QT.getBaseTypeIdentifier()); 6871 6872 if (const RecordType *RT = QT->getAsStructureType()) 6873 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier()); 6874 6875 if (const RecordType *RT = QT->getAsUnionType()) 6876 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier()); 6877 6878 if (const FunctionType *FT = QT->getAs<FunctionType>()) 6879 return appendFunctionType(Enc, FT, CGM, TSC); 6880 6881 return false; 6882 } 6883 6884 static bool getTypeString(SmallStringEnc &Enc, const Decl *D, 6885 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC) { 6886 if (!D) 6887 return false; 6888 6889 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 6890 if (FD->getLanguageLinkage() != CLanguageLinkage) 6891 return false; 6892 return appendType(Enc, FD->getType(), CGM, TSC); 6893 } 6894 6895 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { 6896 if (VD->getLanguageLinkage() != CLanguageLinkage) 6897 return false; 6898 QualType QT = VD->getType().getCanonicalType(); 6899 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) { 6900 // Global ArrayTypes are given a size of '*' if the size is unknown. 6901 // The Qualifiers should be attached to the type rather than the array. 6902 // Thus we don't call appendQualifier() here. 6903 return appendArrayType(Enc, QT, AT, CGM, TSC, "*"); 6904 } 6905 return appendType(Enc, QT, CGM, TSC); 6906 } 6907 return false; 6908 } 6909 6910 6911 //===----------------------------------------------------------------------===// 6912 // Driver code 6913 //===----------------------------------------------------------------------===// 6914 6915 const llvm::Triple &CodeGenModule::getTriple() const { 6916 return getTarget().getTriple(); 6917 } 6918 6919 bool CodeGenModule::supportsCOMDAT() const { 6920 return !getTriple().isOSBinFormatMachO(); 6921 } 6922 6923 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() { 6924 if (TheTargetCodeGenInfo) 6925 return *TheTargetCodeGenInfo; 6926 6927 const llvm::Triple &Triple = getTarget().getTriple(); 6928 switch (Triple.getArch()) { 6929 default: 6930 return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types)); 6931 6932 case llvm::Triple::le32: 6933 return *(TheTargetCodeGenInfo = new PNaClTargetCodeGenInfo(Types)); 6934 case llvm::Triple::mips: 6935 case llvm::Triple::mipsel: 6936 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, true)); 6937 6938 case llvm::Triple::mips64: 6939 case llvm::Triple::mips64el: 6940 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, false)); 6941 6942 case llvm::Triple::aarch64: 6943 case llvm::Triple::aarch64_be: { 6944 AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS; 6945 if (getTarget().getABI() == "darwinpcs") 6946 Kind = AArch64ABIInfo::DarwinPCS; 6947 6948 return *(TheTargetCodeGenInfo = new AArch64TargetCodeGenInfo(Types, Kind)); 6949 } 6950 6951 case llvm::Triple::arm: 6952 case llvm::Triple::armeb: 6953 case llvm::Triple::thumb: 6954 case llvm::Triple::thumbeb: 6955 { 6956 if (Triple.getOS() == llvm::Triple::Win32) { 6957 TheTargetCodeGenInfo = 6958 new WindowsARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS_VFP); 6959 return *TheTargetCodeGenInfo; 6960 } 6961 6962 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS; 6963 if (getTarget().getABI() == "apcs-gnu") 6964 Kind = ARMABIInfo::APCS; 6965 else if (CodeGenOpts.FloatABI == "hard" || 6966 (CodeGenOpts.FloatABI != "soft" && 6967 Triple.getEnvironment() == llvm::Triple::GNUEABIHF)) 6968 Kind = ARMABIInfo::AAPCS_VFP; 6969 6970 return *(TheTargetCodeGenInfo = new ARMTargetCodeGenInfo(Types, Kind)); 6971 } 6972 6973 case llvm::Triple::ppc: 6974 return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types)); 6975 case llvm::Triple::ppc64: 6976 if (Triple.isOSBinFormatELF()) { 6977 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1; 6978 if (getTarget().getABI() == "elfv2") 6979 Kind = PPC64_SVR4_ABIInfo::ELFv2; 6980 6981 return *(TheTargetCodeGenInfo = 6982 new PPC64_SVR4_TargetCodeGenInfo(Types, Kind)); 6983 } else 6984 return *(TheTargetCodeGenInfo = new PPC64TargetCodeGenInfo(Types)); 6985 case llvm::Triple::ppc64le: { 6986 assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!"); 6987 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2; 6988 if (getTarget().getABI() == "elfv1") 6989 Kind = PPC64_SVR4_ABIInfo::ELFv1; 6990 6991 return *(TheTargetCodeGenInfo = 6992 new PPC64_SVR4_TargetCodeGenInfo(Types, Kind)); 6993 } 6994 6995 case llvm::Triple::nvptx: 6996 case llvm::Triple::nvptx64: 6997 return *(TheTargetCodeGenInfo = new NVPTXTargetCodeGenInfo(Types)); 6998 6999 case llvm::Triple::msp430: 7000 return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types)); 7001 7002 case llvm::Triple::systemz: 7003 return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo(Types)); 7004 7005 case llvm::Triple::tce: 7006 return *(TheTargetCodeGenInfo = new TCETargetCodeGenInfo(Types)); 7007 7008 case llvm::Triple::x86: { 7009 bool IsDarwinVectorABI = Triple.isOSDarwin(); 7010 bool IsSmallStructInRegABI = 7011 X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts); 7012 bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing(); 7013 7014 if (Triple.getOS() == llvm::Triple::Win32) { 7015 return *(TheTargetCodeGenInfo = 7016 new WinX86_32TargetCodeGenInfo(Types, 7017 IsDarwinVectorABI, IsSmallStructInRegABI, 7018 IsWin32FloatStructABI, 7019 CodeGenOpts.NumRegisterParameters)); 7020 } else { 7021 return *(TheTargetCodeGenInfo = 7022 new X86_32TargetCodeGenInfo(Types, 7023 IsDarwinVectorABI, IsSmallStructInRegABI, 7024 IsWin32FloatStructABI, 7025 CodeGenOpts.NumRegisterParameters)); 7026 } 7027 } 7028 7029 case llvm::Triple::x86_64: { 7030 bool HasAVX = getTarget().getABI() == "avx"; 7031 7032 switch (Triple.getOS()) { 7033 case llvm::Triple::Win32: 7034 return *(TheTargetCodeGenInfo = 7035 new WinX86_64TargetCodeGenInfo(Types, HasAVX)); 7036 case llvm::Triple::PS4: 7037 return *(TheTargetCodeGenInfo = new PS4TargetCodeGenInfo(Types, HasAVX)); 7038 default: 7039 return *(TheTargetCodeGenInfo = 7040 new X86_64TargetCodeGenInfo(Types, HasAVX)); 7041 } 7042 } 7043 case llvm::Triple::hexagon: 7044 return *(TheTargetCodeGenInfo = new HexagonTargetCodeGenInfo(Types)); 7045 case llvm::Triple::r600: 7046 return *(TheTargetCodeGenInfo = new AMDGPUTargetCodeGenInfo(Types)); 7047 case llvm::Triple::amdgcn: 7048 return *(TheTargetCodeGenInfo = new AMDGPUTargetCodeGenInfo(Types)); 7049 case llvm::Triple::sparcv9: 7050 return *(TheTargetCodeGenInfo = new SparcV9TargetCodeGenInfo(Types)); 7051 case llvm::Triple::xcore: 7052 return *(TheTargetCodeGenInfo = new XCoreTargetCodeGenInfo(Types)); 7053 } 7054 } 7055