1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // These classes wrap the information about a call or function 11 // definition used to handle ABI compliancy. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "TargetInfo.h" 16 #include "ABIInfo.h" 17 #include "CGCXXABI.h" 18 #include "CGValue.h" 19 #include "CodeGenFunction.h" 20 #include "clang/AST/RecordLayout.h" 21 #include "clang/CodeGen/CGFunctionInfo.h" 22 #include "clang/Frontend/CodeGenOptions.h" 23 #include "llvm/ADT/StringExtras.h" 24 #include "llvm/ADT/Triple.h" 25 #include "llvm/IR/DataLayout.h" 26 #include "llvm/IR/Type.h" 27 #include "llvm/Support/raw_ostream.h" 28 29 #include <algorithm> // std::sort 30 31 using namespace clang; 32 using namespace CodeGen; 33 34 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, 35 llvm::Value *Array, 36 llvm::Value *Value, 37 unsigned FirstIndex, 38 unsigned LastIndex) { 39 // Alternatively, we could emit this as a loop in the source. 40 for (unsigned I = FirstIndex; I <= LastIndex; ++I) { 41 llvm::Value *Cell = Builder.CreateConstInBoundsGEP1_32(Array, I); 42 Builder.CreateStore(Value, Cell); 43 } 44 } 45 46 static bool isAggregateTypeForABI(QualType T) { 47 return !CodeGenFunction::hasScalarEvaluationKind(T) || 48 T->isMemberFunctionPointerType(); 49 } 50 51 ABIInfo::~ABIInfo() {} 52 53 static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, 54 CGCXXABI &CXXABI) { 55 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 56 if (!RD) 57 return CGCXXABI::RAA_Default; 58 return CXXABI.getRecordArgABI(RD); 59 } 60 61 static CGCXXABI::RecordArgABI getRecordArgABI(QualType T, 62 CGCXXABI &CXXABI) { 63 const RecordType *RT = T->getAs<RecordType>(); 64 if (!RT) 65 return CGCXXABI::RAA_Default; 66 return getRecordArgABI(RT, CXXABI); 67 } 68 69 /// Pass transparent unions as if they were the type of the first element. Sema 70 /// should ensure that all elements of the union have the same "machine type". 71 static QualType useFirstFieldIfTransparentUnion(QualType Ty) { 72 if (const RecordType *UT = Ty->getAsUnionType()) { 73 const RecordDecl *UD = UT->getDecl(); 74 if (UD->hasAttr<TransparentUnionAttr>()) { 75 assert(!UD->field_empty() && "sema created an empty transparent union"); 76 return UD->field_begin()->getType(); 77 } 78 } 79 return Ty; 80 } 81 82 CGCXXABI &ABIInfo::getCXXABI() const { 83 return CGT.getCXXABI(); 84 } 85 86 ASTContext &ABIInfo::getContext() const { 87 return CGT.getContext(); 88 } 89 90 llvm::LLVMContext &ABIInfo::getVMContext() const { 91 return CGT.getLLVMContext(); 92 } 93 94 const llvm::DataLayout &ABIInfo::getDataLayout() const { 95 return CGT.getDataLayout(); 96 } 97 98 const TargetInfo &ABIInfo::getTarget() const { 99 return CGT.getTarget(); 100 } 101 102 bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 103 return false; 104 } 105 106 bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, 107 uint64_t Members) const { 108 return false; 109 } 110 111 void ABIArgInfo::dump() const { 112 raw_ostream &OS = llvm::errs(); 113 OS << "(ABIArgInfo Kind="; 114 switch (TheKind) { 115 case Direct: 116 OS << "Direct Type="; 117 if (llvm::Type *Ty = getCoerceToType()) 118 Ty->print(OS); 119 else 120 OS << "null"; 121 break; 122 case Extend: 123 OS << "Extend"; 124 break; 125 case Ignore: 126 OS << "Ignore"; 127 break; 128 case InAlloca: 129 OS << "InAlloca Offset=" << getInAllocaFieldIndex(); 130 break; 131 case Indirect: 132 OS << "Indirect Align=" << getIndirectAlign() 133 << " ByVal=" << getIndirectByVal() 134 << " Realign=" << getIndirectRealign(); 135 break; 136 case Expand: 137 OS << "Expand"; 138 break; 139 } 140 OS << ")\n"; 141 } 142 143 TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; } 144 145 // If someone can figure out a general rule for this, that would be great. 146 // It's probably just doomed to be platform-dependent, though. 147 unsigned TargetCodeGenInfo::getSizeOfUnwindException() const { 148 // Verified for: 149 // x86-64 FreeBSD, Linux, Darwin 150 // x86-32 FreeBSD, Linux, Darwin 151 // PowerPC Linux, Darwin 152 // ARM Darwin (*not* EABI) 153 // AArch64 Linux 154 return 32; 155 } 156 157 bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args, 158 const FunctionNoProtoType *fnType) const { 159 // The following conventions are known to require this to be false: 160 // x86_stdcall 161 // MIPS 162 // For everything else, we just prefer false unless we opt out. 163 return false; 164 } 165 166 void 167 TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib, 168 llvm::SmallString<24> &Opt) const { 169 // This assumes the user is passing a library name like "rt" instead of a 170 // filename like "librt.a/so", and that they don't care whether it's static or 171 // dynamic. 172 Opt = "-l"; 173 Opt += Lib; 174 } 175 176 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays); 177 178 /// isEmptyField - Return true iff a the field is "empty", that is it 179 /// is an unnamed bit-field or an (array of) empty record(s). 180 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD, 181 bool AllowArrays) { 182 if (FD->isUnnamedBitfield()) 183 return true; 184 185 QualType FT = FD->getType(); 186 187 // Constant arrays of empty records count as empty, strip them off. 188 // Constant arrays of zero length always count as empty. 189 if (AllowArrays) 190 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 191 if (AT->getSize() == 0) 192 return true; 193 FT = AT->getElementType(); 194 } 195 196 const RecordType *RT = FT->getAs<RecordType>(); 197 if (!RT) 198 return false; 199 200 // C++ record fields are never empty, at least in the Itanium ABI. 201 // 202 // FIXME: We should use a predicate for whether this behavior is true in the 203 // current ABI. 204 if (isa<CXXRecordDecl>(RT->getDecl())) 205 return false; 206 207 return isEmptyRecord(Context, FT, AllowArrays); 208 } 209 210 /// isEmptyRecord - Return true iff a structure contains only empty 211 /// fields. Note that a structure with a flexible array member is not 212 /// considered empty. 213 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) { 214 const RecordType *RT = T->getAs<RecordType>(); 215 if (!RT) 216 return 0; 217 const RecordDecl *RD = RT->getDecl(); 218 if (RD->hasFlexibleArrayMember()) 219 return false; 220 221 // If this is a C++ record, check the bases first. 222 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 223 for (const auto &I : CXXRD->bases()) 224 if (!isEmptyRecord(Context, I.getType(), true)) 225 return false; 226 227 for (const auto *I : RD->fields()) 228 if (!isEmptyField(Context, I, AllowArrays)) 229 return false; 230 return true; 231 } 232 233 /// isSingleElementStruct - Determine if a structure is a "single 234 /// element struct", i.e. it has exactly one non-empty field or 235 /// exactly one field which is itself a single element 236 /// struct. Structures with flexible array members are never 237 /// considered single element structs. 238 /// 239 /// \return The field declaration for the single non-empty field, if 240 /// it exists. 241 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) { 242 const RecordType *RT = T->getAsStructureType(); 243 if (!RT) 244 return nullptr; 245 246 const RecordDecl *RD = RT->getDecl(); 247 if (RD->hasFlexibleArrayMember()) 248 return nullptr; 249 250 const Type *Found = nullptr; 251 252 // If this is a C++ record, check the bases first. 253 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 254 for (const auto &I : CXXRD->bases()) { 255 // Ignore empty records. 256 if (isEmptyRecord(Context, I.getType(), true)) 257 continue; 258 259 // If we already found an element then this isn't a single-element struct. 260 if (Found) 261 return nullptr; 262 263 // If this is non-empty and not a single element struct, the composite 264 // cannot be a single element struct. 265 Found = isSingleElementStruct(I.getType(), Context); 266 if (!Found) 267 return nullptr; 268 } 269 } 270 271 // Check for single element. 272 for (const auto *FD : RD->fields()) { 273 QualType FT = FD->getType(); 274 275 // Ignore empty fields. 276 if (isEmptyField(Context, FD, true)) 277 continue; 278 279 // If we already found an element then this isn't a single-element 280 // struct. 281 if (Found) 282 return nullptr; 283 284 // Treat single element arrays as the element. 285 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 286 if (AT->getSize().getZExtValue() != 1) 287 break; 288 FT = AT->getElementType(); 289 } 290 291 if (!isAggregateTypeForABI(FT)) { 292 Found = FT.getTypePtr(); 293 } else { 294 Found = isSingleElementStruct(FT, Context); 295 if (!Found) 296 return nullptr; 297 } 298 } 299 300 // We don't consider a struct a single-element struct if it has 301 // padding beyond the element type. 302 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T)) 303 return nullptr; 304 305 return Found; 306 } 307 308 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) { 309 // Treat complex types as the element type. 310 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) 311 Ty = CTy->getElementType(); 312 313 // Check for a type which we know has a simple scalar argument-passing 314 // convention without any padding. (We're specifically looking for 32 315 // and 64-bit integer and integer-equivalents, float, and double.) 316 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() && 317 !Ty->isEnumeralType() && !Ty->isBlockPointerType()) 318 return false; 319 320 uint64_t Size = Context.getTypeSize(Ty); 321 return Size == 32 || Size == 64; 322 } 323 324 /// canExpandIndirectArgument - Test whether an argument type which is to be 325 /// passed indirectly (on the stack) would have the equivalent layout if it was 326 /// expanded into separate arguments. If so, we prefer to do the latter to avoid 327 /// inhibiting optimizations. 328 /// 329 // FIXME: This predicate is missing many cases, currently it just follows 330 // llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We 331 // should probably make this smarter, or better yet make the LLVM backend 332 // capable of handling it. 333 static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) { 334 // We can only expand structure types. 335 const RecordType *RT = Ty->getAs<RecordType>(); 336 if (!RT) 337 return false; 338 339 // We can only expand (C) structures. 340 // 341 // FIXME: This needs to be generalized to handle classes as well. 342 const RecordDecl *RD = RT->getDecl(); 343 if (!RD->isStruct() || isa<CXXRecordDecl>(RD)) 344 return false; 345 346 uint64_t Size = 0; 347 348 for (const auto *FD : RD->fields()) { 349 if (!is32Or64BitBasicType(FD->getType(), Context)) 350 return false; 351 352 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know 353 // how to expand them yet, and the predicate for telling if a bitfield still 354 // counts as "basic" is more complicated than what we were doing previously. 355 if (FD->isBitField()) 356 return false; 357 358 Size += Context.getTypeSize(FD->getType()); 359 } 360 361 // Make sure there are not any holes in the struct. 362 if (Size != Context.getTypeSize(Ty)) 363 return false; 364 365 return true; 366 } 367 368 namespace { 369 /// DefaultABIInfo - The default implementation for ABI specific 370 /// details. This implementation provides information which results in 371 /// self-consistent and sensible LLVM IR generation, but does not 372 /// conform to any particular ABI. 373 class DefaultABIInfo : public ABIInfo { 374 public: 375 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 376 377 ABIArgInfo classifyReturnType(QualType RetTy) const; 378 ABIArgInfo classifyArgumentType(QualType RetTy) const; 379 380 void computeInfo(CGFunctionInfo &FI) const override { 381 if (!getCXXABI().classifyReturnType(FI)) 382 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 383 for (auto &I : FI.arguments()) 384 I.info = classifyArgumentType(I.type); 385 } 386 387 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 388 CodeGenFunction &CGF) const override; 389 }; 390 391 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo { 392 public: 393 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 394 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 395 }; 396 397 llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 398 CodeGenFunction &CGF) const { 399 return nullptr; 400 } 401 402 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const { 403 if (isAggregateTypeForABI(Ty)) 404 return ABIArgInfo::getIndirect(0); 405 406 // Treat an enum type as its underlying type. 407 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 408 Ty = EnumTy->getDecl()->getIntegerType(); 409 410 return (Ty->isPromotableIntegerType() ? 411 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 412 } 413 414 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const { 415 if (RetTy->isVoidType()) 416 return ABIArgInfo::getIgnore(); 417 418 if (isAggregateTypeForABI(RetTy)) 419 return ABIArgInfo::getIndirect(0); 420 421 // Treat an enum type as its underlying type. 422 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 423 RetTy = EnumTy->getDecl()->getIntegerType(); 424 425 return (RetTy->isPromotableIntegerType() ? 426 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 427 } 428 429 //===----------------------------------------------------------------------===// 430 // le32/PNaCl bitcode ABI Implementation 431 // 432 // This is a simplified version of the x86_32 ABI. Arguments and return values 433 // are always passed on the stack. 434 //===----------------------------------------------------------------------===// 435 436 class PNaClABIInfo : public ABIInfo { 437 public: 438 PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 439 440 ABIArgInfo classifyReturnType(QualType RetTy) const; 441 ABIArgInfo classifyArgumentType(QualType RetTy) const; 442 443 void computeInfo(CGFunctionInfo &FI) const override; 444 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 445 CodeGenFunction &CGF) const override; 446 }; 447 448 class PNaClTargetCodeGenInfo : public TargetCodeGenInfo { 449 public: 450 PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 451 : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {} 452 }; 453 454 void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const { 455 if (!getCXXABI().classifyReturnType(FI)) 456 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 457 458 for (auto &I : FI.arguments()) 459 I.info = classifyArgumentType(I.type); 460 } 461 462 llvm::Value *PNaClABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 463 CodeGenFunction &CGF) const { 464 return nullptr; 465 } 466 467 /// \brief Classify argument of given type \p Ty. 468 ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const { 469 if (isAggregateTypeForABI(Ty)) { 470 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 471 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); 472 return ABIArgInfo::getIndirect(0); 473 } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) { 474 // Treat an enum type as its underlying type. 475 Ty = EnumTy->getDecl()->getIntegerType(); 476 } else if (Ty->isFloatingType()) { 477 // Floating-point types don't go inreg. 478 return ABIArgInfo::getDirect(); 479 } 480 481 return (Ty->isPromotableIntegerType() ? 482 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 483 } 484 485 ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const { 486 if (RetTy->isVoidType()) 487 return ABIArgInfo::getIgnore(); 488 489 // In the PNaCl ABI we always return records/structures on the stack. 490 if (isAggregateTypeForABI(RetTy)) 491 return ABIArgInfo::getIndirect(0); 492 493 // Treat an enum type as its underlying type. 494 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 495 RetTy = EnumTy->getDecl()->getIntegerType(); 496 497 return (RetTy->isPromotableIntegerType() ? 498 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 499 } 500 501 /// IsX86_MMXType - Return true if this is an MMX type. 502 bool IsX86_MMXType(llvm::Type *IRType) { 503 // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>. 504 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 && 505 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() && 506 IRType->getScalarSizeInBits() != 64; 507 } 508 509 static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 510 StringRef Constraint, 511 llvm::Type* Ty) { 512 if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy()) { 513 if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) { 514 // Invalid MMX constraint 515 return nullptr; 516 } 517 518 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext()); 519 } 520 521 // No operation needed 522 return Ty; 523 } 524 525 /// Returns true if this type can be passed in SSE registers with the 526 /// X86_VectorCall calling convention. Shared between x86_32 and x86_64. 527 static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) { 528 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 529 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) 530 return true; 531 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 532 // vectorcall can pass XMM, YMM, and ZMM vectors. We don't pass SSE1 MMX 533 // registers specially. 534 unsigned VecSize = Context.getTypeSize(VT); 535 if (VecSize == 128 || VecSize == 256 || VecSize == 512) 536 return true; 537 } 538 return false; 539 } 540 541 /// Returns true if this aggregate is small enough to be passed in SSE registers 542 /// in the X86_VectorCall calling convention. Shared between x86_32 and x86_64. 543 static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) { 544 return NumMembers <= 4; 545 } 546 547 //===----------------------------------------------------------------------===// 548 // X86-32 ABI Implementation 549 //===----------------------------------------------------------------------===// 550 551 /// \brief Similar to llvm::CCState, but for Clang. 552 struct CCState { 553 CCState(unsigned CC) : CC(CC), FreeRegs(0), FreeSSERegs(0) {} 554 555 unsigned CC; 556 unsigned FreeRegs; 557 unsigned FreeSSERegs; 558 }; 559 560 /// X86_32ABIInfo - The X86-32 ABI information. 561 class X86_32ABIInfo : public ABIInfo { 562 enum Class { 563 Integer, 564 Float 565 }; 566 567 static const unsigned MinABIStackAlignInBytes = 4; 568 569 bool IsDarwinVectorABI; 570 bool IsSmallStructInRegABI; 571 bool IsWin32StructABI; 572 unsigned DefaultNumRegisterParameters; 573 574 static bool isRegisterSize(unsigned Size) { 575 return (Size == 8 || Size == 16 || Size == 32 || Size == 64); 576 } 577 578 bool isHomogeneousAggregateBaseType(QualType Ty) const override { 579 // FIXME: Assumes vectorcall is in use. 580 return isX86VectorTypeForVectorCall(getContext(), Ty); 581 } 582 583 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 584 uint64_t NumMembers) const override { 585 // FIXME: Assumes vectorcall is in use. 586 return isX86VectorCallAggregateSmallEnough(NumMembers); 587 } 588 589 bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const; 590 591 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 592 /// such that the argument will be passed in memory. 593 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const; 594 595 ABIArgInfo getIndirectReturnResult(CCState &State) const; 596 597 /// \brief Return the alignment to use for the given type on the stack. 598 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const; 599 600 Class classify(QualType Ty) const; 601 ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const; 602 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const; 603 bool shouldUseInReg(QualType Ty, CCState &State, bool &NeedsPadding) const; 604 605 /// \brief Rewrite the function info so that all memory arguments use 606 /// inalloca. 607 void rewriteWithInAlloca(CGFunctionInfo &FI) const; 608 609 void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields, 610 unsigned &StackOffset, ABIArgInfo &Info, 611 QualType Type) const; 612 613 public: 614 615 void computeInfo(CGFunctionInfo &FI) const override; 616 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 617 CodeGenFunction &CGF) const override; 618 619 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool w, 620 unsigned r) 621 : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p), 622 IsWin32StructABI(w), DefaultNumRegisterParameters(r) {} 623 }; 624 625 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo { 626 public: 627 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, 628 bool d, bool p, bool w, unsigned r) 629 :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, w, r)) {} 630 631 static bool isStructReturnInRegABI( 632 const llvm::Triple &Triple, const CodeGenOptions &Opts); 633 634 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 635 CodeGen::CodeGenModule &CGM) const override; 636 637 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { 638 // Darwin uses different dwarf register numbers for EH. 639 if (CGM.getTarget().getTriple().isOSDarwin()) return 5; 640 return 4; 641 } 642 643 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 644 llvm::Value *Address) const override; 645 646 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 647 StringRef Constraint, 648 llvm::Type* Ty) const override { 649 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 650 } 651 652 void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue, 653 std::string &Constraints, 654 std::vector<llvm::Type *> &ResultRegTypes, 655 std::vector<llvm::Type *> &ResultTruncRegTypes, 656 std::vector<LValue> &ResultRegDests, 657 std::string &AsmString, 658 unsigned NumOutputs) const override; 659 660 llvm::Constant * 661 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override { 662 unsigned Sig = (0xeb << 0) | // jmp rel8 663 (0x06 << 8) | // .+0x08 664 ('F' << 16) | 665 ('T' << 24); 666 return llvm::ConstantInt::get(CGM.Int32Ty, Sig); 667 } 668 669 }; 670 671 } 672 673 /// Rewrite input constraint references after adding some output constraints. 674 /// In the case where there is one output and one input and we add one output, 675 /// we need to replace all operand references greater than or equal to 1: 676 /// mov $0, $1 677 /// mov eax, $1 678 /// The result will be: 679 /// mov $0, $2 680 /// mov eax, $2 681 static void rewriteInputConstraintReferences(unsigned FirstIn, 682 unsigned NumNewOuts, 683 std::string &AsmString) { 684 std::string Buf; 685 llvm::raw_string_ostream OS(Buf); 686 size_t Pos = 0; 687 while (Pos < AsmString.size()) { 688 size_t DollarStart = AsmString.find('$', Pos); 689 if (DollarStart == std::string::npos) 690 DollarStart = AsmString.size(); 691 size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart); 692 if (DollarEnd == std::string::npos) 693 DollarEnd = AsmString.size(); 694 OS << StringRef(&AsmString[Pos], DollarEnd - Pos); 695 Pos = DollarEnd; 696 size_t NumDollars = DollarEnd - DollarStart; 697 if (NumDollars % 2 != 0 && Pos < AsmString.size()) { 698 // We have an operand reference. 699 size_t DigitStart = Pos; 700 size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart); 701 if (DigitEnd == std::string::npos) 702 DigitEnd = AsmString.size(); 703 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart); 704 unsigned OperandIndex; 705 if (!OperandStr.getAsInteger(10, OperandIndex)) { 706 if (OperandIndex >= FirstIn) 707 OperandIndex += NumNewOuts; 708 OS << OperandIndex; 709 } else { 710 OS << OperandStr; 711 } 712 Pos = DigitEnd; 713 } 714 } 715 AsmString = std::move(OS.str()); 716 } 717 718 /// Add output constraints for EAX:EDX because they are return registers. 719 void X86_32TargetCodeGenInfo::addReturnRegisterOutputs( 720 CodeGenFunction &CGF, LValue ReturnSlot, std::string &Constraints, 721 std::vector<llvm::Type *> &ResultRegTypes, 722 std::vector<llvm::Type *> &ResultTruncRegTypes, 723 std::vector<LValue> &ResultRegDests, std::string &AsmString, 724 unsigned NumOutputs) const { 725 uint64_t RetWidth = CGF.getContext().getTypeSize(ReturnSlot.getType()); 726 727 // Use the EAX constraint if the width is 32 or smaller and EAX:EDX if it is 728 // larger. 729 if (!Constraints.empty()) 730 Constraints += ','; 731 if (RetWidth <= 32) { 732 Constraints += "={eax}"; 733 ResultRegTypes.push_back(CGF.Int32Ty); 734 } else { 735 // Use the 'A' constraint for EAX:EDX. 736 Constraints += "=A"; 737 ResultRegTypes.push_back(CGF.Int64Ty); 738 } 739 740 // Truncate EAX or EAX:EDX to an integer of the appropriate size. 741 llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth); 742 ResultTruncRegTypes.push_back(CoerceTy); 743 744 // Coerce the integer by bitcasting the return slot pointer. 745 ReturnSlot.setAddress(CGF.Builder.CreateBitCast(ReturnSlot.getAddress(), 746 CoerceTy->getPointerTo())); 747 ResultRegDests.push_back(ReturnSlot); 748 749 rewriteInputConstraintReferences(NumOutputs, 1, AsmString); 750 } 751 752 /// shouldReturnTypeInRegister - Determine if the given type should be 753 /// passed in a register (for the Darwin ABI). 754 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty, 755 ASTContext &Context) const { 756 uint64_t Size = Context.getTypeSize(Ty); 757 758 // Type must be register sized. 759 if (!isRegisterSize(Size)) 760 return false; 761 762 if (Ty->isVectorType()) { 763 // 64- and 128- bit vectors inside structures are not returned in 764 // registers. 765 if (Size == 64 || Size == 128) 766 return false; 767 768 return true; 769 } 770 771 // If this is a builtin, pointer, enum, complex type, member pointer, or 772 // member function pointer it is ok. 773 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() || 774 Ty->isAnyComplexType() || Ty->isEnumeralType() || 775 Ty->isBlockPointerType() || Ty->isMemberPointerType()) 776 return true; 777 778 // Arrays are treated like records. 779 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) 780 return shouldReturnTypeInRegister(AT->getElementType(), Context); 781 782 // Otherwise, it must be a record type. 783 const RecordType *RT = Ty->getAs<RecordType>(); 784 if (!RT) return false; 785 786 // FIXME: Traverse bases here too. 787 788 // Structure types are passed in register if all fields would be 789 // passed in a register. 790 for (const auto *FD : RT->getDecl()->fields()) { 791 // Empty fields are ignored. 792 if (isEmptyField(Context, FD, true)) 793 continue; 794 795 // Check fields recursively. 796 if (!shouldReturnTypeInRegister(FD->getType(), Context)) 797 return false; 798 } 799 return true; 800 } 801 802 ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(CCState &State) const { 803 // If the return value is indirect, then the hidden argument is consuming one 804 // integer register. 805 if (State.FreeRegs) { 806 --State.FreeRegs; 807 return ABIArgInfo::getIndirectInReg(/*Align=*/0, /*ByVal=*/false); 808 } 809 return ABIArgInfo::getIndirect(/*Align=*/0, /*ByVal=*/false); 810 } 811 812 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, CCState &State) const { 813 if (RetTy->isVoidType()) 814 return ABIArgInfo::getIgnore(); 815 816 const Type *Base = nullptr; 817 uint64_t NumElts = 0; 818 if (State.CC == llvm::CallingConv::X86_VectorCall && 819 isHomogeneousAggregate(RetTy, Base, NumElts)) { 820 // The LLVM struct type for such an aggregate should lower properly. 821 return ABIArgInfo::getDirect(); 822 } 823 824 if (const VectorType *VT = RetTy->getAs<VectorType>()) { 825 // On Darwin, some vectors are returned in registers. 826 if (IsDarwinVectorABI) { 827 uint64_t Size = getContext().getTypeSize(RetTy); 828 829 // 128-bit vectors are a special case; they are returned in 830 // registers and we need to make sure to pick a type the LLVM 831 // backend will like. 832 if (Size == 128) 833 return ABIArgInfo::getDirect(llvm::VectorType::get( 834 llvm::Type::getInt64Ty(getVMContext()), 2)); 835 836 // Always return in register if it fits in a general purpose 837 // register, or if it is 64 bits and has a single element. 838 if ((Size == 8 || Size == 16 || Size == 32) || 839 (Size == 64 && VT->getNumElements() == 1)) 840 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 841 Size)); 842 843 return getIndirectReturnResult(State); 844 } 845 846 return ABIArgInfo::getDirect(); 847 } 848 849 if (isAggregateTypeForABI(RetTy)) { 850 if (const RecordType *RT = RetTy->getAs<RecordType>()) { 851 // Structures with flexible arrays are always indirect. 852 if (RT->getDecl()->hasFlexibleArrayMember()) 853 return getIndirectReturnResult(State); 854 } 855 856 // If specified, structs and unions are always indirect. 857 if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType()) 858 return getIndirectReturnResult(State); 859 860 // Small structures which are register sized are generally returned 861 // in a register. 862 if (shouldReturnTypeInRegister(RetTy, getContext())) { 863 uint64_t Size = getContext().getTypeSize(RetTy); 864 865 // As a special-case, if the struct is a "single-element" struct, and 866 // the field is of type "float" or "double", return it in a 867 // floating-point register. (MSVC does not apply this special case.) 868 // We apply a similar transformation for pointer types to improve the 869 // quality of the generated IR. 870 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) 871 if ((!IsWin32StructABI && SeltTy->isRealFloatingType()) 872 || SeltTy->hasPointerRepresentation()) 873 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 874 875 // FIXME: We should be able to narrow this integer in cases with dead 876 // padding. 877 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size)); 878 } 879 880 return getIndirectReturnResult(State); 881 } 882 883 // Treat an enum type as its underlying type. 884 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 885 RetTy = EnumTy->getDecl()->getIntegerType(); 886 887 return (RetTy->isPromotableIntegerType() ? 888 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 889 } 890 891 static bool isSSEVectorType(ASTContext &Context, QualType Ty) { 892 return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128; 893 } 894 895 static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) { 896 const RecordType *RT = Ty->getAs<RecordType>(); 897 if (!RT) 898 return 0; 899 const RecordDecl *RD = RT->getDecl(); 900 901 // If this is a C++ record, check the bases first. 902 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 903 for (const auto &I : CXXRD->bases()) 904 if (!isRecordWithSSEVectorType(Context, I.getType())) 905 return false; 906 907 for (const auto *i : RD->fields()) { 908 QualType FT = i->getType(); 909 910 if (isSSEVectorType(Context, FT)) 911 return true; 912 913 if (isRecordWithSSEVectorType(Context, FT)) 914 return true; 915 } 916 917 return false; 918 } 919 920 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty, 921 unsigned Align) const { 922 // Otherwise, if the alignment is less than or equal to the minimum ABI 923 // alignment, just use the default; the backend will handle this. 924 if (Align <= MinABIStackAlignInBytes) 925 return 0; // Use default alignment. 926 927 // On non-Darwin, the stack type alignment is always 4. 928 if (!IsDarwinVectorABI) { 929 // Set explicit alignment, since we may need to realign the top. 930 return MinABIStackAlignInBytes; 931 } 932 933 // Otherwise, if the type contains an SSE vector type, the alignment is 16. 934 if (Align >= 16 && (isSSEVectorType(getContext(), Ty) || 935 isRecordWithSSEVectorType(getContext(), Ty))) 936 return 16; 937 938 return MinABIStackAlignInBytes; 939 } 940 941 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal, 942 CCState &State) const { 943 if (!ByVal) { 944 if (State.FreeRegs) { 945 --State.FreeRegs; // Non-byval indirects just use one pointer. 946 return ABIArgInfo::getIndirectInReg(0, false); 947 } 948 return ABIArgInfo::getIndirect(0, false); 949 } 950 951 // Compute the byval alignment. 952 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; 953 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign); 954 if (StackAlign == 0) 955 return ABIArgInfo::getIndirect(4, /*ByVal=*/true); 956 957 // If the stack alignment is less than the type alignment, realign the 958 // argument. 959 bool Realign = TypeAlign > StackAlign; 960 return ABIArgInfo::getIndirect(StackAlign, /*ByVal=*/true, Realign); 961 } 962 963 X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const { 964 const Type *T = isSingleElementStruct(Ty, getContext()); 965 if (!T) 966 T = Ty.getTypePtr(); 967 968 if (const BuiltinType *BT = T->getAs<BuiltinType>()) { 969 BuiltinType::Kind K = BT->getKind(); 970 if (K == BuiltinType::Float || K == BuiltinType::Double) 971 return Float; 972 } 973 return Integer; 974 } 975 976 bool X86_32ABIInfo::shouldUseInReg(QualType Ty, CCState &State, 977 bool &NeedsPadding) const { 978 NeedsPadding = false; 979 Class C = classify(Ty); 980 if (C == Float) 981 return false; 982 983 unsigned Size = getContext().getTypeSize(Ty); 984 unsigned SizeInRegs = (Size + 31) / 32; 985 986 if (SizeInRegs == 0) 987 return false; 988 989 if (SizeInRegs > State.FreeRegs) { 990 State.FreeRegs = 0; 991 return false; 992 } 993 994 State.FreeRegs -= SizeInRegs; 995 996 if (State.CC == llvm::CallingConv::X86_FastCall || 997 State.CC == llvm::CallingConv::X86_VectorCall) { 998 if (Size > 32) 999 return false; 1000 1001 if (Ty->isIntegralOrEnumerationType()) 1002 return true; 1003 1004 if (Ty->isPointerType()) 1005 return true; 1006 1007 if (Ty->isReferenceType()) 1008 return true; 1009 1010 if (State.FreeRegs) 1011 NeedsPadding = true; 1012 1013 return false; 1014 } 1015 1016 return true; 1017 } 1018 1019 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, 1020 CCState &State) const { 1021 // FIXME: Set alignment on indirect arguments. 1022 1023 Ty = useFirstFieldIfTransparentUnion(Ty); 1024 1025 // Check with the C++ ABI first. 1026 const RecordType *RT = Ty->getAs<RecordType>(); 1027 if (RT) { 1028 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()); 1029 if (RAA == CGCXXABI::RAA_Indirect) { 1030 return getIndirectResult(Ty, false, State); 1031 } else if (RAA == CGCXXABI::RAA_DirectInMemory) { 1032 // The field index doesn't matter, we'll fix it up later. 1033 return ABIArgInfo::getInAlloca(/*FieldIndex=*/0); 1034 } 1035 } 1036 1037 // vectorcall adds the concept of a homogenous vector aggregate, similar 1038 // to other targets. 1039 const Type *Base = nullptr; 1040 uint64_t NumElts = 0; 1041 if (State.CC == llvm::CallingConv::X86_VectorCall && 1042 isHomogeneousAggregate(Ty, Base, NumElts)) { 1043 if (State.FreeSSERegs >= NumElts) { 1044 State.FreeSSERegs -= NumElts; 1045 if (Ty->isBuiltinType() || Ty->isVectorType()) 1046 return ABIArgInfo::getDirect(); 1047 return ABIArgInfo::getExpand(); 1048 } 1049 return getIndirectResult(Ty, /*ByVal=*/false, State); 1050 } 1051 1052 if (isAggregateTypeForABI(Ty)) { 1053 if (RT) { 1054 // Structs are always byval on win32, regardless of what they contain. 1055 if (IsWin32StructABI) 1056 return getIndirectResult(Ty, true, State); 1057 1058 // Structures with flexible arrays are always indirect. 1059 if (RT->getDecl()->hasFlexibleArrayMember()) 1060 return getIndirectResult(Ty, true, State); 1061 } 1062 1063 // Ignore empty structs/unions. 1064 if (isEmptyRecord(getContext(), Ty, true)) 1065 return ABIArgInfo::getIgnore(); 1066 1067 llvm::LLVMContext &LLVMContext = getVMContext(); 1068 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext); 1069 bool NeedsPadding; 1070 if (shouldUseInReg(Ty, State, NeedsPadding)) { 1071 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32; 1072 SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32); 1073 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements); 1074 return ABIArgInfo::getDirectInReg(Result); 1075 } 1076 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr; 1077 1078 // Expand small (<= 128-bit) record types when we know that the stack layout 1079 // of those arguments will match the struct. This is important because the 1080 // LLVM backend isn't smart enough to remove byval, which inhibits many 1081 // optimizations. 1082 if (getContext().getTypeSize(Ty) <= 4*32 && 1083 canExpandIndirectArgument(Ty, getContext())) 1084 return ABIArgInfo::getExpandWithPadding( 1085 State.CC == llvm::CallingConv::X86_FastCall || 1086 State.CC == llvm::CallingConv::X86_VectorCall, 1087 PaddingType); 1088 1089 return getIndirectResult(Ty, true, State); 1090 } 1091 1092 if (const VectorType *VT = Ty->getAs<VectorType>()) { 1093 // On Darwin, some vectors are passed in memory, we handle this by passing 1094 // it as an i8/i16/i32/i64. 1095 if (IsDarwinVectorABI) { 1096 uint64_t Size = getContext().getTypeSize(Ty); 1097 if ((Size == 8 || Size == 16 || Size == 32) || 1098 (Size == 64 && VT->getNumElements() == 1)) 1099 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 1100 Size)); 1101 } 1102 1103 if (IsX86_MMXType(CGT.ConvertType(Ty))) 1104 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64)); 1105 1106 return ABIArgInfo::getDirect(); 1107 } 1108 1109 1110 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1111 Ty = EnumTy->getDecl()->getIntegerType(); 1112 1113 bool NeedsPadding; 1114 bool InReg = shouldUseInReg(Ty, State, NeedsPadding); 1115 1116 if (Ty->isPromotableIntegerType()) { 1117 if (InReg) 1118 return ABIArgInfo::getExtendInReg(); 1119 return ABIArgInfo::getExtend(); 1120 } 1121 if (InReg) 1122 return ABIArgInfo::getDirectInReg(); 1123 return ABIArgInfo::getDirect(); 1124 } 1125 1126 void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const { 1127 CCState State(FI.getCallingConvention()); 1128 if (State.CC == llvm::CallingConv::X86_FastCall) 1129 State.FreeRegs = 2; 1130 else if (State.CC == llvm::CallingConv::X86_VectorCall) { 1131 State.FreeRegs = 2; 1132 State.FreeSSERegs = 6; 1133 } else if (FI.getHasRegParm()) 1134 State.FreeRegs = FI.getRegParm(); 1135 else 1136 State.FreeRegs = DefaultNumRegisterParameters; 1137 1138 if (!getCXXABI().classifyReturnType(FI)) { 1139 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State); 1140 } else if (FI.getReturnInfo().isIndirect()) { 1141 // The C++ ABI is not aware of register usage, so we have to check if the 1142 // return value was sret and put it in a register ourselves if appropriate. 1143 if (State.FreeRegs) { 1144 --State.FreeRegs; // The sret parameter consumes a register. 1145 FI.getReturnInfo().setInReg(true); 1146 } 1147 } 1148 1149 // The chain argument effectively gives us another free register. 1150 if (FI.isChainCall()) 1151 ++State.FreeRegs; 1152 1153 bool UsedInAlloca = false; 1154 for (auto &I : FI.arguments()) { 1155 I.info = classifyArgumentType(I.type, State); 1156 UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca); 1157 } 1158 1159 // If we needed to use inalloca for any argument, do a second pass and rewrite 1160 // all the memory arguments to use inalloca. 1161 if (UsedInAlloca) 1162 rewriteWithInAlloca(FI); 1163 } 1164 1165 void 1166 X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields, 1167 unsigned &StackOffset, 1168 ABIArgInfo &Info, QualType Type) const { 1169 assert(StackOffset % 4U == 0 && "unaligned inalloca struct"); 1170 Info = ABIArgInfo::getInAlloca(FrameFields.size()); 1171 FrameFields.push_back(CGT.ConvertTypeForMem(Type)); 1172 StackOffset += getContext().getTypeSizeInChars(Type).getQuantity(); 1173 1174 // Insert padding bytes to respect alignment. For x86_32, each argument is 4 1175 // byte aligned. 1176 if (StackOffset % 4U) { 1177 unsigned OldOffset = StackOffset; 1178 StackOffset = llvm::RoundUpToAlignment(StackOffset, 4U); 1179 unsigned NumBytes = StackOffset - OldOffset; 1180 assert(NumBytes); 1181 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext()); 1182 Ty = llvm::ArrayType::get(Ty, NumBytes); 1183 FrameFields.push_back(Ty); 1184 } 1185 } 1186 1187 static bool isArgInAlloca(const ABIArgInfo &Info) { 1188 // Leave ignored and inreg arguments alone. 1189 switch (Info.getKind()) { 1190 case ABIArgInfo::InAlloca: 1191 return true; 1192 case ABIArgInfo::Indirect: 1193 assert(Info.getIndirectByVal()); 1194 return true; 1195 case ABIArgInfo::Ignore: 1196 return false; 1197 case ABIArgInfo::Direct: 1198 case ABIArgInfo::Extend: 1199 case ABIArgInfo::Expand: 1200 if (Info.getInReg()) 1201 return false; 1202 return true; 1203 } 1204 llvm_unreachable("invalid enum"); 1205 } 1206 1207 void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const { 1208 assert(IsWin32StructABI && "inalloca only supported on win32"); 1209 1210 // Build a packed struct type for all of the arguments in memory. 1211 SmallVector<llvm::Type *, 6> FrameFields; 1212 1213 unsigned StackOffset = 0; 1214 CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end(); 1215 1216 // Put 'this' into the struct before 'sret', if necessary. 1217 bool IsThisCall = 1218 FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall; 1219 ABIArgInfo &Ret = FI.getReturnInfo(); 1220 if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall && 1221 isArgInAlloca(I->info)) { 1222 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type); 1223 ++I; 1224 } 1225 1226 // Put the sret parameter into the inalloca struct if it's in memory. 1227 if (Ret.isIndirect() && !Ret.getInReg()) { 1228 CanQualType PtrTy = getContext().getPointerType(FI.getReturnType()); 1229 addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy); 1230 // On Windows, the hidden sret parameter is always returned in eax. 1231 Ret.setInAllocaSRet(IsWin32StructABI); 1232 } 1233 1234 // Skip the 'this' parameter in ecx. 1235 if (IsThisCall) 1236 ++I; 1237 1238 // Put arguments passed in memory into the struct. 1239 for (; I != E; ++I) { 1240 if (isArgInAlloca(I->info)) 1241 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type); 1242 } 1243 1244 FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields, 1245 /*isPacked=*/true)); 1246 } 1247 1248 llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1249 CodeGenFunction &CGF) const { 1250 llvm::Type *BPP = CGF.Int8PtrPtrTy; 1251 1252 CGBuilderTy &Builder = CGF.Builder; 1253 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 1254 "ap"); 1255 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 1256 1257 // Compute if the address needs to be aligned 1258 unsigned Align = CGF.getContext().getTypeAlignInChars(Ty).getQuantity(); 1259 Align = getTypeStackAlignInBytes(Ty, Align); 1260 Align = std::max(Align, 4U); 1261 if (Align > 4) { 1262 // addr = (addr + align - 1) & -align; 1263 llvm::Value *Offset = 1264 llvm::ConstantInt::get(CGF.Int32Ty, Align - 1); 1265 Addr = CGF.Builder.CreateGEP(Addr, Offset); 1266 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(Addr, 1267 CGF.Int32Ty); 1268 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -Align); 1269 Addr = CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), 1270 Addr->getType(), 1271 "ap.cur.aligned"); 1272 } 1273 1274 llvm::Type *PTy = 1275 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 1276 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 1277 1278 uint64_t Offset = 1279 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, Align); 1280 llvm::Value *NextAddr = 1281 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 1282 "ap.next"); 1283 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 1284 1285 return AddrTyped; 1286 } 1287 1288 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI( 1289 const llvm::Triple &Triple, const CodeGenOptions &Opts) { 1290 assert(Triple.getArch() == llvm::Triple::x86); 1291 1292 switch (Opts.getStructReturnConvention()) { 1293 case CodeGenOptions::SRCK_Default: 1294 break; 1295 case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return 1296 return false; 1297 case CodeGenOptions::SRCK_InRegs: // -freg-struct-return 1298 return true; 1299 } 1300 1301 if (Triple.isOSDarwin()) 1302 return true; 1303 1304 switch (Triple.getOS()) { 1305 case llvm::Triple::DragonFly: 1306 case llvm::Triple::FreeBSD: 1307 case llvm::Triple::OpenBSD: 1308 case llvm::Triple::Bitrig: 1309 case llvm::Triple::Win32: 1310 return true; 1311 default: 1312 return false; 1313 } 1314 } 1315 1316 void X86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D, 1317 llvm::GlobalValue *GV, 1318 CodeGen::CodeGenModule &CGM) const { 1319 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 1320 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { 1321 // Get the LLVM function. 1322 llvm::Function *Fn = cast<llvm::Function>(GV); 1323 1324 // Now add the 'alignstack' attribute with a value of 16. 1325 llvm::AttrBuilder B; 1326 B.addStackAlignmentAttr(16); 1327 Fn->addAttributes(llvm::AttributeSet::FunctionIndex, 1328 llvm::AttributeSet::get(CGM.getLLVMContext(), 1329 llvm::AttributeSet::FunctionIndex, 1330 B)); 1331 } 1332 } 1333 } 1334 1335 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable( 1336 CodeGen::CodeGenFunction &CGF, 1337 llvm::Value *Address) const { 1338 CodeGen::CGBuilderTy &Builder = CGF.Builder; 1339 1340 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 1341 1342 // 0-7 are the eight integer registers; the order is different 1343 // on Darwin (for EH), but the range is the same. 1344 // 8 is %eip. 1345 AssignToArrayRange(Builder, Address, Four8, 0, 8); 1346 1347 if (CGF.CGM.getTarget().getTriple().isOSDarwin()) { 1348 // 12-16 are st(0..4). Not sure why we stop at 4. 1349 // These have size 16, which is sizeof(long double) on 1350 // platforms with 8-byte alignment for that type. 1351 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16); 1352 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16); 1353 1354 } else { 1355 // 9 is %eflags, which doesn't get a size on Darwin for some 1356 // reason. 1357 Builder.CreateStore(Four8, Builder.CreateConstInBoundsGEP1_32(Address, 9)); 1358 1359 // 11-16 are st(0..5). Not sure why we stop at 5. 1360 // These have size 12, which is sizeof(long double) on 1361 // platforms with 4-byte alignment for that type. 1362 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12); 1363 AssignToArrayRange(Builder, Address, Twelve8, 11, 16); 1364 } 1365 1366 return false; 1367 } 1368 1369 //===----------------------------------------------------------------------===// 1370 // X86-64 ABI Implementation 1371 //===----------------------------------------------------------------------===// 1372 1373 1374 namespace { 1375 /// X86_64ABIInfo - The X86_64 ABI information. 1376 class X86_64ABIInfo : public ABIInfo { 1377 enum Class { 1378 Integer = 0, 1379 SSE, 1380 SSEUp, 1381 X87, 1382 X87Up, 1383 ComplexX87, 1384 NoClass, 1385 Memory 1386 }; 1387 1388 /// merge - Implement the X86_64 ABI merging algorithm. 1389 /// 1390 /// Merge an accumulating classification \arg Accum with a field 1391 /// classification \arg Field. 1392 /// 1393 /// \param Accum - The accumulating classification. This should 1394 /// always be either NoClass or the result of a previous merge 1395 /// call. In addition, this should never be Memory (the caller 1396 /// should just return Memory for the aggregate). 1397 static Class merge(Class Accum, Class Field); 1398 1399 /// postMerge - Implement the X86_64 ABI post merging algorithm. 1400 /// 1401 /// Post merger cleanup, reduces a malformed Hi and Lo pair to 1402 /// final MEMORY or SSE classes when necessary. 1403 /// 1404 /// \param AggregateSize - The size of the current aggregate in 1405 /// the classification process. 1406 /// 1407 /// \param Lo - The classification for the parts of the type 1408 /// residing in the low word of the containing object. 1409 /// 1410 /// \param Hi - The classification for the parts of the type 1411 /// residing in the higher words of the containing object. 1412 /// 1413 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const; 1414 1415 /// classify - Determine the x86_64 register classes in which the 1416 /// given type T should be passed. 1417 /// 1418 /// \param Lo - The classification for the parts of the type 1419 /// residing in the low word of the containing object. 1420 /// 1421 /// \param Hi - The classification for the parts of the type 1422 /// residing in the high word of the containing object. 1423 /// 1424 /// \param OffsetBase - The bit offset of this type in the 1425 /// containing object. Some parameters are classified different 1426 /// depending on whether they straddle an eightbyte boundary. 1427 /// 1428 /// \param isNamedArg - Whether the argument in question is a "named" 1429 /// argument, as used in AMD64-ABI 3.5.7. 1430 /// 1431 /// If a word is unused its result will be NoClass; if a type should 1432 /// be passed in Memory then at least the classification of \arg Lo 1433 /// will be Memory. 1434 /// 1435 /// The \arg Lo class will be NoClass iff the argument is ignored. 1436 /// 1437 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will 1438 /// also be ComplexX87. 1439 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi, 1440 bool isNamedArg) const; 1441 1442 llvm::Type *GetByteVectorType(QualType Ty) const; 1443 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType, 1444 unsigned IROffset, QualType SourceTy, 1445 unsigned SourceOffset) const; 1446 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType, 1447 unsigned IROffset, QualType SourceTy, 1448 unsigned SourceOffset) const; 1449 1450 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 1451 /// such that the argument will be returned in memory. 1452 ABIArgInfo getIndirectReturnResult(QualType Ty) const; 1453 1454 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 1455 /// such that the argument will be passed in memory. 1456 /// 1457 /// \param freeIntRegs - The number of free integer registers remaining 1458 /// available. 1459 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const; 1460 1461 ABIArgInfo classifyReturnType(QualType RetTy) const; 1462 1463 ABIArgInfo classifyArgumentType(QualType Ty, 1464 unsigned freeIntRegs, 1465 unsigned &neededInt, 1466 unsigned &neededSSE, 1467 bool isNamedArg) const; 1468 1469 bool IsIllegalVectorType(QualType Ty) const; 1470 1471 /// The 0.98 ABI revision clarified a lot of ambiguities, 1472 /// unfortunately in ways that were not always consistent with 1473 /// certain previous compilers. In particular, platforms which 1474 /// required strict binary compatibility with older versions of GCC 1475 /// may need to exempt themselves. 1476 bool honorsRevision0_98() const { 1477 return !getTarget().getTriple().isOSDarwin(); 1478 } 1479 1480 bool HasAVX; 1481 // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on 1482 // 64-bit hardware. 1483 bool Has64BitPointers; 1484 1485 public: 1486 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool hasavx) : 1487 ABIInfo(CGT), HasAVX(hasavx), 1488 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) { 1489 } 1490 1491 bool isPassedUsingAVXType(QualType type) const { 1492 unsigned neededInt, neededSSE; 1493 // The freeIntRegs argument doesn't matter here. 1494 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE, 1495 /*isNamedArg*/true); 1496 if (info.isDirect()) { 1497 llvm::Type *ty = info.getCoerceToType(); 1498 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty)) 1499 return (vectorTy->getBitWidth() > 128); 1500 } 1501 return false; 1502 } 1503 1504 void computeInfo(CGFunctionInfo &FI) const override; 1505 1506 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1507 CodeGenFunction &CGF) const override; 1508 }; 1509 1510 /// WinX86_64ABIInfo - The Windows X86_64 ABI information. 1511 class WinX86_64ABIInfo : public ABIInfo { 1512 1513 ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs, 1514 bool IsReturnType) const; 1515 1516 public: 1517 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 1518 1519 void computeInfo(CGFunctionInfo &FI) const override; 1520 1521 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1522 CodeGenFunction &CGF) const override; 1523 1524 bool isHomogeneousAggregateBaseType(QualType Ty) const override { 1525 // FIXME: Assumes vectorcall is in use. 1526 return isX86VectorTypeForVectorCall(getContext(), Ty); 1527 } 1528 1529 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 1530 uint64_t NumMembers) const override { 1531 // FIXME: Assumes vectorcall is in use. 1532 return isX86VectorCallAggregateSmallEnough(NumMembers); 1533 } 1534 }; 1535 1536 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo { 1537 bool HasAVX; 1538 public: 1539 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) 1540 : TargetCodeGenInfo(new X86_64ABIInfo(CGT, HasAVX)), HasAVX(HasAVX) {} 1541 1542 const X86_64ABIInfo &getABIInfo() const { 1543 return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo()); 1544 } 1545 1546 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { 1547 return 7; 1548 } 1549 1550 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 1551 llvm::Value *Address) const override { 1552 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 1553 1554 // 0-15 are the 16 integer registers. 1555 // 16 is %rip. 1556 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 1557 return false; 1558 } 1559 1560 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 1561 StringRef Constraint, 1562 llvm::Type* Ty) const override { 1563 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 1564 } 1565 1566 bool isNoProtoCallVariadic(const CallArgList &args, 1567 const FunctionNoProtoType *fnType) const override { 1568 // The default CC on x86-64 sets %al to the number of SSA 1569 // registers used, and GCC sets this when calling an unprototyped 1570 // function, so we override the default behavior. However, don't do 1571 // that when AVX types are involved: the ABI explicitly states it is 1572 // undefined, and it doesn't work in practice because of how the ABI 1573 // defines varargs anyway. 1574 if (fnType->getCallConv() == CC_C) { 1575 bool HasAVXType = false; 1576 for (CallArgList::const_iterator 1577 it = args.begin(), ie = args.end(); it != ie; ++it) { 1578 if (getABIInfo().isPassedUsingAVXType(it->Ty)) { 1579 HasAVXType = true; 1580 break; 1581 } 1582 } 1583 1584 if (!HasAVXType) 1585 return true; 1586 } 1587 1588 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType); 1589 } 1590 1591 llvm::Constant * 1592 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override { 1593 unsigned Sig = (0xeb << 0) | // jmp rel8 1594 (0x0a << 8) | // .+0x0c 1595 ('F' << 16) | 1596 ('T' << 24); 1597 return llvm::ConstantInt::get(CGM.Int32Ty, Sig); 1598 } 1599 1600 unsigned getOpenMPSimdDefaultAlignment(QualType) const override { 1601 return HasAVX ? 32 : 16; 1602 } 1603 }; 1604 1605 static std::string qualifyWindowsLibrary(llvm::StringRef Lib) { 1606 // If the argument does not end in .lib, automatically add the suffix. This 1607 // matches the behavior of MSVC. 1608 std::string ArgStr = Lib; 1609 if (!Lib.endswith_lower(".lib")) 1610 ArgStr += ".lib"; 1611 return ArgStr; 1612 } 1613 1614 class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo { 1615 public: 1616 WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, 1617 bool d, bool p, bool w, unsigned RegParms) 1618 : X86_32TargetCodeGenInfo(CGT, d, p, w, RegParms) {} 1619 1620 void getDependentLibraryOption(llvm::StringRef Lib, 1621 llvm::SmallString<24> &Opt) const override { 1622 Opt = "/DEFAULTLIB:"; 1623 Opt += qualifyWindowsLibrary(Lib); 1624 } 1625 1626 void getDetectMismatchOption(llvm::StringRef Name, 1627 llvm::StringRef Value, 1628 llvm::SmallString<32> &Opt) const override { 1629 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; 1630 } 1631 }; 1632 1633 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo { 1634 bool HasAVX; 1635 public: 1636 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) 1637 : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)), HasAVX(HasAVX) {} 1638 1639 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { 1640 return 7; 1641 } 1642 1643 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 1644 llvm::Value *Address) const override { 1645 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 1646 1647 // 0-15 are the 16 integer registers. 1648 // 16 is %rip. 1649 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 1650 return false; 1651 } 1652 1653 void getDependentLibraryOption(llvm::StringRef Lib, 1654 llvm::SmallString<24> &Opt) const override { 1655 Opt = "/DEFAULTLIB:"; 1656 Opt += qualifyWindowsLibrary(Lib); 1657 } 1658 1659 void getDetectMismatchOption(llvm::StringRef Name, 1660 llvm::StringRef Value, 1661 llvm::SmallString<32> &Opt) const override { 1662 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; 1663 } 1664 1665 unsigned getOpenMPSimdDefaultAlignment(QualType) const override { 1666 return HasAVX ? 32 : 16; 1667 } 1668 }; 1669 1670 } 1671 1672 void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo, 1673 Class &Hi) const { 1674 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done: 1675 // 1676 // (a) If one of the classes is Memory, the whole argument is passed in 1677 // memory. 1678 // 1679 // (b) If X87UP is not preceded by X87, the whole argument is passed in 1680 // memory. 1681 // 1682 // (c) If the size of the aggregate exceeds two eightbytes and the first 1683 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole 1684 // argument is passed in memory. NOTE: This is necessary to keep the 1685 // ABI working for processors that don't support the __m256 type. 1686 // 1687 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE. 1688 // 1689 // Some of these are enforced by the merging logic. Others can arise 1690 // only with unions; for example: 1691 // union { _Complex double; unsigned; } 1692 // 1693 // Note that clauses (b) and (c) were added in 0.98. 1694 // 1695 if (Hi == Memory) 1696 Lo = Memory; 1697 if (Hi == X87Up && Lo != X87 && honorsRevision0_98()) 1698 Lo = Memory; 1699 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp)) 1700 Lo = Memory; 1701 if (Hi == SSEUp && Lo != SSE) 1702 Hi = SSE; 1703 } 1704 1705 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) { 1706 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is 1707 // classified recursively so that always two fields are 1708 // considered. The resulting class is calculated according to 1709 // the classes of the fields in the eightbyte: 1710 // 1711 // (a) If both classes are equal, this is the resulting class. 1712 // 1713 // (b) If one of the classes is NO_CLASS, the resulting class is 1714 // the other class. 1715 // 1716 // (c) If one of the classes is MEMORY, the result is the MEMORY 1717 // class. 1718 // 1719 // (d) If one of the classes is INTEGER, the result is the 1720 // INTEGER. 1721 // 1722 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class, 1723 // MEMORY is used as class. 1724 // 1725 // (f) Otherwise class SSE is used. 1726 1727 // Accum should never be memory (we should have returned) or 1728 // ComplexX87 (because this cannot be passed in a structure). 1729 assert((Accum != Memory && Accum != ComplexX87) && 1730 "Invalid accumulated classification during merge."); 1731 if (Accum == Field || Field == NoClass) 1732 return Accum; 1733 if (Field == Memory) 1734 return Memory; 1735 if (Accum == NoClass) 1736 return Field; 1737 if (Accum == Integer || Field == Integer) 1738 return Integer; 1739 if (Field == X87 || Field == X87Up || Field == ComplexX87 || 1740 Accum == X87 || Accum == X87Up) 1741 return Memory; 1742 return SSE; 1743 } 1744 1745 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, 1746 Class &Lo, Class &Hi, bool isNamedArg) const { 1747 // FIXME: This code can be simplified by introducing a simple value class for 1748 // Class pairs with appropriate constructor methods for the various 1749 // situations. 1750 1751 // FIXME: Some of the split computations are wrong; unaligned vectors 1752 // shouldn't be passed in registers for example, so there is no chance they 1753 // can straddle an eightbyte. Verify & simplify. 1754 1755 Lo = Hi = NoClass; 1756 1757 Class &Current = OffsetBase < 64 ? Lo : Hi; 1758 Current = Memory; 1759 1760 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 1761 BuiltinType::Kind k = BT->getKind(); 1762 1763 if (k == BuiltinType::Void) { 1764 Current = NoClass; 1765 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) { 1766 Lo = Integer; 1767 Hi = Integer; 1768 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) { 1769 Current = Integer; 1770 } else if ((k == BuiltinType::Float || k == BuiltinType::Double) || 1771 (k == BuiltinType::LongDouble && 1772 getTarget().getTriple().isOSNaCl())) { 1773 Current = SSE; 1774 } else if (k == BuiltinType::LongDouble) { 1775 Lo = X87; 1776 Hi = X87Up; 1777 } 1778 // FIXME: _Decimal32 and _Decimal64 are SSE. 1779 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). 1780 return; 1781 } 1782 1783 if (const EnumType *ET = Ty->getAs<EnumType>()) { 1784 // Classify the underlying integer type. 1785 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg); 1786 return; 1787 } 1788 1789 if (Ty->hasPointerRepresentation()) { 1790 Current = Integer; 1791 return; 1792 } 1793 1794 if (Ty->isMemberPointerType()) { 1795 if (Ty->isMemberFunctionPointerType()) { 1796 if (Has64BitPointers) { 1797 // If Has64BitPointers, this is an {i64, i64}, so classify both 1798 // Lo and Hi now. 1799 Lo = Hi = Integer; 1800 } else { 1801 // Otherwise, with 32-bit pointers, this is an {i32, i32}. If that 1802 // straddles an eightbyte boundary, Hi should be classified as well. 1803 uint64_t EB_FuncPtr = (OffsetBase) / 64; 1804 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64; 1805 if (EB_FuncPtr != EB_ThisAdj) { 1806 Lo = Hi = Integer; 1807 } else { 1808 Current = Integer; 1809 } 1810 } 1811 } else { 1812 Current = Integer; 1813 } 1814 return; 1815 } 1816 1817 if (const VectorType *VT = Ty->getAs<VectorType>()) { 1818 uint64_t Size = getContext().getTypeSize(VT); 1819 if (Size == 32) { 1820 // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x 1821 // float> as integer. 1822 Current = Integer; 1823 1824 // If this type crosses an eightbyte boundary, it should be 1825 // split. 1826 uint64_t EB_Real = (OffsetBase) / 64; 1827 uint64_t EB_Imag = (OffsetBase + Size - 1) / 64; 1828 if (EB_Real != EB_Imag) 1829 Hi = Lo; 1830 } else if (Size == 64) { 1831 // gcc passes <1 x double> in memory. :( 1832 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) 1833 return; 1834 1835 // gcc passes <1 x long long> as INTEGER. 1836 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong) || 1837 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULongLong) || 1838 VT->getElementType()->isSpecificBuiltinType(BuiltinType::Long) || 1839 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULong)) 1840 Current = Integer; 1841 else 1842 Current = SSE; 1843 1844 // If this type crosses an eightbyte boundary, it should be 1845 // split. 1846 if (OffsetBase && OffsetBase != 64) 1847 Hi = Lo; 1848 } else if (Size == 128 || (HasAVX && isNamedArg && Size == 256)) { 1849 // Arguments of 256-bits are split into four eightbyte chunks. The 1850 // least significant one belongs to class SSE and all the others to class 1851 // SSEUP. The original Lo and Hi design considers that types can't be 1852 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense. 1853 // This design isn't correct for 256-bits, but since there're no cases 1854 // where the upper parts would need to be inspected, avoid adding 1855 // complexity and just consider Hi to match the 64-256 part. 1856 // 1857 // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in 1858 // registers if they are "named", i.e. not part of the "..." of a 1859 // variadic function. 1860 Lo = SSE; 1861 Hi = SSEUp; 1862 } 1863 return; 1864 } 1865 1866 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 1867 QualType ET = getContext().getCanonicalType(CT->getElementType()); 1868 1869 uint64_t Size = getContext().getTypeSize(Ty); 1870 if (ET->isIntegralOrEnumerationType()) { 1871 if (Size <= 64) 1872 Current = Integer; 1873 else if (Size <= 128) 1874 Lo = Hi = Integer; 1875 } else if (ET == getContext().FloatTy) 1876 Current = SSE; 1877 else if (ET == getContext().DoubleTy || 1878 (ET == getContext().LongDoubleTy && 1879 getTarget().getTriple().isOSNaCl())) 1880 Lo = Hi = SSE; 1881 else if (ET == getContext().LongDoubleTy) 1882 Current = ComplexX87; 1883 1884 // If this complex type crosses an eightbyte boundary then it 1885 // should be split. 1886 uint64_t EB_Real = (OffsetBase) / 64; 1887 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64; 1888 if (Hi == NoClass && EB_Real != EB_Imag) 1889 Hi = Lo; 1890 1891 return; 1892 } 1893 1894 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 1895 // Arrays are treated like structures. 1896 1897 uint64_t Size = getContext().getTypeSize(Ty); 1898 1899 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 1900 // than four eightbytes, ..., it has class MEMORY. 1901 if (Size > 256) 1902 return; 1903 1904 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned 1905 // fields, it has class MEMORY. 1906 // 1907 // Only need to check alignment of array base. 1908 if (OffsetBase % getContext().getTypeAlign(AT->getElementType())) 1909 return; 1910 1911 // Otherwise implement simplified merge. We could be smarter about 1912 // this, but it isn't worth it and would be harder to verify. 1913 Current = NoClass; 1914 uint64_t EltSize = getContext().getTypeSize(AT->getElementType()); 1915 uint64_t ArraySize = AT->getSize().getZExtValue(); 1916 1917 // The only case a 256-bit wide vector could be used is when the array 1918 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 1919 // to work for sizes wider than 128, early check and fallback to memory. 1920 if (Size > 128 && EltSize != 256) 1921 return; 1922 1923 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) { 1924 Class FieldLo, FieldHi; 1925 classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg); 1926 Lo = merge(Lo, FieldLo); 1927 Hi = merge(Hi, FieldHi); 1928 if (Lo == Memory || Hi == Memory) 1929 break; 1930 } 1931 1932 postMerge(Size, Lo, Hi); 1933 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification."); 1934 return; 1935 } 1936 1937 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1938 uint64_t Size = getContext().getTypeSize(Ty); 1939 1940 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 1941 // than four eightbytes, ..., it has class MEMORY. 1942 if (Size > 256) 1943 return; 1944 1945 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial 1946 // copy constructor or a non-trivial destructor, it is passed by invisible 1947 // reference. 1948 if (getRecordArgABI(RT, getCXXABI())) 1949 return; 1950 1951 const RecordDecl *RD = RT->getDecl(); 1952 1953 // Assume variable sized types are passed in memory. 1954 if (RD->hasFlexibleArrayMember()) 1955 return; 1956 1957 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 1958 1959 // Reset Lo class, this will be recomputed. 1960 Current = NoClass; 1961 1962 // If this is a C++ record, classify the bases first. 1963 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 1964 for (const auto &I : CXXRD->bases()) { 1965 assert(!I.isVirtual() && !I.getType()->isDependentType() && 1966 "Unexpected base class!"); 1967 const CXXRecordDecl *Base = 1968 cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl()); 1969 1970 // Classify this field. 1971 // 1972 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a 1973 // single eightbyte, each is classified separately. Each eightbyte gets 1974 // initialized to class NO_CLASS. 1975 Class FieldLo, FieldHi; 1976 uint64_t Offset = 1977 OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base)); 1978 classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg); 1979 Lo = merge(Lo, FieldLo); 1980 Hi = merge(Hi, FieldHi); 1981 if (Lo == Memory || Hi == Memory) 1982 break; 1983 } 1984 } 1985 1986 // Classify the fields one at a time, merging the results. 1987 unsigned idx = 0; 1988 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1989 i != e; ++i, ++idx) { 1990 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 1991 bool BitField = i->isBitField(); 1992 1993 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than 1994 // four eightbytes, or it contains unaligned fields, it has class MEMORY. 1995 // 1996 // The only case a 256-bit wide vector could be used is when the struct 1997 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 1998 // to work for sizes wider than 128, early check and fallback to memory. 1999 // 2000 if (Size > 128 && getContext().getTypeSize(i->getType()) != 256) { 2001 Lo = Memory; 2002 return; 2003 } 2004 // Note, skip this test for bit-fields, see below. 2005 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) { 2006 Lo = Memory; 2007 return; 2008 } 2009 2010 // Classify this field. 2011 // 2012 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate 2013 // exceeds a single eightbyte, each is classified 2014 // separately. Each eightbyte gets initialized to class 2015 // NO_CLASS. 2016 Class FieldLo, FieldHi; 2017 2018 // Bit-fields require special handling, they do not force the 2019 // structure to be passed in memory even if unaligned, and 2020 // therefore they can straddle an eightbyte. 2021 if (BitField) { 2022 // Ignore padding bit-fields. 2023 if (i->isUnnamedBitfield()) 2024 continue; 2025 2026 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 2027 uint64_t Size = i->getBitWidthValue(getContext()); 2028 2029 uint64_t EB_Lo = Offset / 64; 2030 uint64_t EB_Hi = (Offset + Size - 1) / 64; 2031 2032 if (EB_Lo) { 2033 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes."); 2034 FieldLo = NoClass; 2035 FieldHi = Integer; 2036 } else { 2037 FieldLo = Integer; 2038 FieldHi = EB_Hi ? Integer : NoClass; 2039 } 2040 } else 2041 classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg); 2042 Lo = merge(Lo, FieldLo); 2043 Hi = merge(Hi, FieldHi); 2044 if (Lo == Memory || Hi == Memory) 2045 break; 2046 } 2047 2048 postMerge(Size, Lo, Hi); 2049 } 2050 } 2051 2052 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const { 2053 // If this is a scalar LLVM value then assume LLVM will pass it in the right 2054 // place naturally. 2055 if (!isAggregateTypeForABI(Ty)) { 2056 // Treat an enum type as its underlying type. 2057 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2058 Ty = EnumTy->getDecl()->getIntegerType(); 2059 2060 return (Ty->isPromotableIntegerType() ? 2061 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2062 } 2063 2064 return ABIArgInfo::getIndirect(0); 2065 } 2066 2067 bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const { 2068 if (const VectorType *VecTy = Ty->getAs<VectorType>()) { 2069 uint64_t Size = getContext().getTypeSize(VecTy); 2070 unsigned LargestVector = HasAVX ? 256 : 128; 2071 if (Size <= 64 || Size > LargestVector) 2072 return true; 2073 } 2074 2075 return false; 2076 } 2077 2078 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty, 2079 unsigned freeIntRegs) const { 2080 // If this is a scalar LLVM value then assume LLVM will pass it in the right 2081 // place naturally. 2082 // 2083 // This assumption is optimistic, as there could be free registers available 2084 // when we need to pass this argument in memory, and LLVM could try to pass 2085 // the argument in the free register. This does not seem to happen currently, 2086 // but this code would be much safer if we could mark the argument with 2087 // 'onstack'. See PR12193. 2088 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) { 2089 // Treat an enum type as its underlying type. 2090 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2091 Ty = EnumTy->getDecl()->getIntegerType(); 2092 2093 return (Ty->isPromotableIntegerType() ? 2094 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2095 } 2096 2097 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 2098 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); 2099 2100 // Compute the byval alignment. We specify the alignment of the byval in all 2101 // cases so that the mid-level optimizer knows the alignment of the byval. 2102 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U); 2103 2104 // Attempt to avoid passing indirect results using byval when possible. This 2105 // is important for good codegen. 2106 // 2107 // We do this by coercing the value into a scalar type which the backend can 2108 // handle naturally (i.e., without using byval). 2109 // 2110 // For simplicity, we currently only do this when we have exhausted all of the 2111 // free integer registers. Doing this when there are free integer registers 2112 // would require more care, as we would have to ensure that the coerced value 2113 // did not claim the unused register. That would require either reording the 2114 // arguments to the function (so that any subsequent inreg values came first), 2115 // or only doing this optimization when there were no following arguments that 2116 // might be inreg. 2117 // 2118 // We currently expect it to be rare (particularly in well written code) for 2119 // arguments to be passed on the stack when there are still free integer 2120 // registers available (this would typically imply large structs being passed 2121 // by value), so this seems like a fair tradeoff for now. 2122 // 2123 // We can revisit this if the backend grows support for 'onstack' parameter 2124 // attributes. See PR12193. 2125 if (freeIntRegs == 0) { 2126 uint64_t Size = getContext().getTypeSize(Ty); 2127 2128 // If this type fits in an eightbyte, coerce it into the matching integral 2129 // type, which will end up on the stack (with alignment 8). 2130 if (Align == 8 && Size <= 64) 2131 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2132 Size)); 2133 } 2134 2135 return ABIArgInfo::getIndirect(Align); 2136 } 2137 2138 /// GetByteVectorType - The ABI specifies that a value should be passed in an 2139 /// full vector XMM/YMM register. Pick an LLVM IR type that will be passed as a 2140 /// vector register. 2141 llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const { 2142 llvm::Type *IRType = CGT.ConvertType(Ty); 2143 2144 // Wrapper structs that just contain vectors are passed just like vectors, 2145 // strip them off if present. 2146 llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType); 2147 while (STy && STy->getNumElements() == 1) { 2148 IRType = STy->getElementType(0); 2149 STy = dyn_cast<llvm::StructType>(IRType); 2150 } 2151 2152 // If the preferred type is a 16-byte vector, prefer to pass it. 2153 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){ 2154 llvm::Type *EltTy = VT->getElementType(); 2155 unsigned BitWidth = VT->getBitWidth(); 2156 if ((BitWidth >= 128 && BitWidth <= 256) && 2157 (EltTy->isFloatTy() || EltTy->isDoubleTy() || 2158 EltTy->isIntegerTy(8) || EltTy->isIntegerTy(16) || 2159 EltTy->isIntegerTy(32) || EltTy->isIntegerTy(64) || 2160 EltTy->isIntegerTy(128))) 2161 return VT; 2162 } 2163 2164 return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), 2); 2165 } 2166 2167 /// BitsContainNoUserData - Return true if the specified [start,end) bit range 2168 /// is known to either be off the end of the specified type or being in 2169 /// alignment padding. The user type specified is known to be at most 128 bits 2170 /// in size, and have passed through X86_64ABIInfo::classify with a successful 2171 /// classification that put one of the two halves in the INTEGER class. 2172 /// 2173 /// It is conservatively correct to return false. 2174 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, 2175 unsigned EndBit, ASTContext &Context) { 2176 // If the bytes being queried are off the end of the type, there is no user 2177 // data hiding here. This handles analysis of builtins, vectors and other 2178 // types that don't contain interesting padding. 2179 unsigned TySize = (unsigned)Context.getTypeSize(Ty); 2180 if (TySize <= StartBit) 2181 return true; 2182 2183 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 2184 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType()); 2185 unsigned NumElts = (unsigned)AT->getSize().getZExtValue(); 2186 2187 // Check each element to see if the element overlaps with the queried range. 2188 for (unsigned i = 0; i != NumElts; ++i) { 2189 // If the element is after the span we care about, then we're done.. 2190 unsigned EltOffset = i*EltSize; 2191 if (EltOffset >= EndBit) break; 2192 2193 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0; 2194 if (!BitsContainNoUserData(AT->getElementType(), EltStart, 2195 EndBit-EltOffset, Context)) 2196 return false; 2197 } 2198 // If it overlaps no elements, then it is safe to process as padding. 2199 return true; 2200 } 2201 2202 if (const RecordType *RT = Ty->getAs<RecordType>()) { 2203 const RecordDecl *RD = RT->getDecl(); 2204 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 2205 2206 // If this is a C++ record, check the bases first. 2207 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 2208 for (const auto &I : CXXRD->bases()) { 2209 assert(!I.isVirtual() && !I.getType()->isDependentType() && 2210 "Unexpected base class!"); 2211 const CXXRecordDecl *Base = 2212 cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl()); 2213 2214 // If the base is after the span we care about, ignore it. 2215 unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base)); 2216 if (BaseOffset >= EndBit) continue; 2217 2218 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0; 2219 if (!BitsContainNoUserData(I.getType(), BaseStart, 2220 EndBit-BaseOffset, Context)) 2221 return false; 2222 } 2223 } 2224 2225 // Verify that no field has data that overlaps the region of interest. Yes 2226 // this could be sped up a lot by being smarter about queried fields, 2227 // however we're only looking at structs up to 16 bytes, so we don't care 2228 // much. 2229 unsigned idx = 0; 2230 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 2231 i != e; ++i, ++idx) { 2232 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx); 2233 2234 // If we found a field after the region we care about, then we're done. 2235 if (FieldOffset >= EndBit) break; 2236 2237 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0; 2238 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset, 2239 Context)) 2240 return false; 2241 } 2242 2243 // If nothing in this record overlapped the area of interest, then we're 2244 // clean. 2245 return true; 2246 } 2247 2248 return false; 2249 } 2250 2251 /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a 2252 /// float member at the specified offset. For example, {int,{float}} has a 2253 /// float at offset 4. It is conservatively correct for this routine to return 2254 /// false. 2255 static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset, 2256 const llvm::DataLayout &TD) { 2257 // Base case if we find a float. 2258 if (IROffset == 0 && IRType->isFloatTy()) 2259 return true; 2260 2261 // If this is a struct, recurse into the field at the specified offset. 2262 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 2263 const llvm::StructLayout *SL = TD.getStructLayout(STy); 2264 unsigned Elt = SL->getElementContainingOffset(IROffset); 2265 IROffset -= SL->getElementOffset(Elt); 2266 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD); 2267 } 2268 2269 // If this is an array, recurse into the field at the specified offset. 2270 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 2271 llvm::Type *EltTy = ATy->getElementType(); 2272 unsigned EltSize = TD.getTypeAllocSize(EltTy); 2273 IROffset -= IROffset/EltSize*EltSize; 2274 return ContainsFloatAtOffset(EltTy, IROffset, TD); 2275 } 2276 2277 return false; 2278 } 2279 2280 2281 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the 2282 /// low 8 bytes of an XMM register, corresponding to the SSE class. 2283 llvm::Type *X86_64ABIInfo:: 2284 GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset, 2285 QualType SourceTy, unsigned SourceOffset) const { 2286 // The only three choices we have are either double, <2 x float>, or float. We 2287 // pass as float if the last 4 bytes is just padding. This happens for 2288 // structs that contain 3 floats. 2289 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32, 2290 SourceOffset*8+64, getContext())) 2291 return llvm::Type::getFloatTy(getVMContext()); 2292 2293 // We want to pass as <2 x float> if the LLVM IR type contains a float at 2294 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the 2295 // case. 2296 if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) && 2297 ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout())) 2298 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2); 2299 2300 return llvm::Type::getDoubleTy(getVMContext()); 2301 } 2302 2303 2304 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in 2305 /// an 8-byte GPR. This means that we either have a scalar or we are talking 2306 /// about the high or low part of an up-to-16-byte struct. This routine picks 2307 /// the best LLVM IR type to represent this, which may be i64 or may be anything 2308 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*, 2309 /// etc). 2310 /// 2311 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for 2312 /// the source type. IROffset is an offset in bytes into the LLVM IR type that 2313 /// the 8-byte value references. PrefType may be null. 2314 /// 2315 /// SourceTy is the source-level type for the entire argument. SourceOffset is 2316 /// an offset into this that we're processing (which is always either 0 or 8). 2317 /// 2318 llvm::Type *X86_64ABIInfo:: 2319 GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset, 2320 QualType SourceTy, unsigned SourceOffset) const { 2321 // If we're dealing with an un-offset LLVM IR type, then it means that we're 2322 // returning an 8-byte unit starting with it. See if we can safely use it. 2323 if (IROffset == 0) { 2324 // Pointers and int64's always fill the 8-byte unit. 2325 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) || 2326 IRType->isIntegerTy(64)) 2327 return IRType; 2328 2329 // If we have a 1/2/4-byte integer, we can use it only if the rest of the 2330 // goodness in the source type is just tail padding. This is allowed to 2331 // kick in for struct {double,int} on the int, but not on 2332 // struct{double,int,int} because we wouldn't return the second int. We 2333 // have to do this analysis on the source type because we can't depend on 2334 // unions being lowered a specific way etc. 2335 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) || 2336 IRType->isIntegerTy(32) || 2337 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) { 2338 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 : 2339 cast<llvm::IntegerType>(IRType)->getBitWidth(); 2340 2341 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth, 2342 SourceOffset*8+64, getContext())) 2343 return IRType; 2344 } 2345 } 2346 2347 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 2348 // If this is a struct, recurse into the field at the specified offset. 2349 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy); 2350 if (IROffset < SL->getSizeInBytes()) { 2351 unsigned FieldIdx = SL->getElementContainingOffset(IROffset); 2352 IROffset -= SL->getElementOffset(FieldIdx); 2353 2354 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset, 2355 SourceTy, SourceOffset); 2356 } 2357 } 2358 2359 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 2360 llvm::Type *EltTy = ATy->getElementType(); 2361 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy); 2362 unsigned EltOffset = IROffset/EltSize*EltSize; 2363 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy, 2364 SourceOffset); 2365 } 2366 2367 // Okay, we don't have any better idea of what to pass, so we pass this in an 2368 // integer register that isn't too big to fit the rest of the struct. 2369 unsigned TySizeInBytes = 2370 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity(); 2371 2372 assert(TySizeInBytes != SourceOffset && "Empty field?"); 2373 2374 // It is always safe to classify this as an integer type up to i64 that 2375 // isn't larger than the structure. 2376 return llvm::IntegerType::get(getVMContext(), 2377 std::min(TySizeInBytes-SourceOffset, 8U)*8); 2378 } 2379 2380 2381 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally 2382 /// be used as elements of a two register pair to pass or return, return a 2383 /// first class aggregate to represent them. For example, if the low part of 2384 /// a by-value argument should be passed as i32* and the high part as float, 2385 /// return {i32*, float}. 2386 static llvm::Type * 2387 GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, 2388 const llvm::DataLayout &TD) { 2389 // In order to correctly satisfy the ABI, we need to the high part to start 2390 // at offset 8. If the high and low parts we inferred are both 4-byte types 2391 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have 2392 // the second element at offset 8. Check for this: 2393 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo); 2394 unsigned HiAlign = TD.getABITypeAlignment(Hi); 2395 unsigned HiStart = llvm::RoundUpToAlignment(LoSize, HiAlign); 2396 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!"); 2397 2398 // To handle this, we have to increase the size of the low part so that the 2399 // second element will start at an 8 byte offset. We can't increase the size 2400 // of the second element because it might make us access off the end of the 2401 // struct. 2402 if (HiStart != 8) { 2403 // There are only two sorts of types the ABI generation code can produce for 2404 // the low part of a pair that aren't 8 bytes in size: float or i8/i16/i32. 2405 // Promote these to a larger type. 2406 if (Lo->isFloatTy()) 2407 Lo = llvm::Type::getDoubleTy(Lo->getContext()); 2408 else { 2409 assert(Lo->isIntegerTy() && "Invalid/unknown lo type"); 2410 Lo = llvm::Type::getInt64Ty(Lo->getContext()); 2411 } 2412 } 2413 2414 llvm::StructType *Result = llvm::StructType::get(Lo, Hi, nullptr); 2415 2416 2417 // Verify that the second element is at an 8-byte offset. 2418 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 && 2419 "Invalid x86-64 argument pair!"); 2420 return Result; 2421 } 2422 2423 ABIArgInfo X86_64ABIInfo:: 2424 classifyReturnType(QualType RetTy) const { 2425 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the 2426 // classification algorithm. 2427 X86_64ABIInfo::Class Lo, Hi; 2428 classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true); 2429 2430 // Check some invariants. 2431 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 2432 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 2433 2434 llvm::Type *ResType = nullptr; 2435 switch (Lo) { 2436 case NoClass: 2437 if (Hi == NoClass) 2438 return ABIArgInfo::getIgnore(); 2439 // If the low part is just padding, it takes no register, leave ResType 2440 // null. 2441 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 2442 "Unknown missing lo part"); 2443 break; 2444 2445 case SSEUp: 2446 case X87Up: 2447 llvm_unreachable("Invalid classification for lo word."); 2448 2449 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via 2450 // hidden argument. 2451 case Memory: 2452 return getIndirectReturnResult(RetTy); 2453 2454 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next 2455 // available register of the sequence %rax, %rdx is used. 2456 case Integer: 2457 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 2458 2459 // If we have a sign or zero extended integer, make sure to return Extend 2460 // so that the parameter gets the right LLVM IR attributes. 2461 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 2462 // Treat an enum type as its underlying type. 2463 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 2464 RetTy = EnumTy->getDecl()->getIntegerType(); 2465 2466 if (RetTy->isIntegralOrEnumerationType() && 2467 RetTy->isPromotableIntegerType()) 2468 return ABIArgInfo::getExtend(); 2469 } 2470 break; 2471 2472 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next 2473 // available SSE register of the sequence %xmm0, %xmm1 is used. 2474 case SSE: 2475 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 2476 break; 2477 2478 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is 2479 // returned on the X87 stack in %st0 as 80-bit x87 number. 2480 case X87: 2481 ResType = llvm::Type::getX86_FP80Ty(getVMContext()); 2482 break; 2483 2484 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real 2485 // part of the value is returned in %st0 and the imaginary part in 2486 // %st1. 2487 case ComplexX87: 2488 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification."); 2489 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()), 2490 llvm::Type::getX86_FP80Ty(getVMContext()), 2491 nullptr); 2492 break; 2493 } 2494 2495 llvm::Type *HighPart = nullptr; 2496 switch (Hi) { 2497 // Memory was handled previously and X87 should 2498 // never occur as a hi class. 2499 case Memory: 2500 case X87: 2501 llvm_unreachable("Invalid classification for hi word."); 2502 2503 case ComplexX87: // Previously handled. 2504 case NoClass: 2505 break; 2506 2507 case Integer: 2508 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 2509 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 2510 return ABIArgInfo::getDirect(HighPart, 8); 2511 break; 2512 case SSE: 2513 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 2514 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 2515 return ABIArgInfo::getDirect(HighPart, 8); 2516 break; 2517 2518 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte 2519 // is passed in the next available eightbyte chunk if the last used 2520 // vector register. 2521 // 2522 // SSEUP should always be preceded by SSE, just widen. 2523 case SSEUp: 2524 assert(Lo == SSE && "Unexpected SSEUp classification."); 2525 ResType = GetByteVectorType(RetTy); 2526 break; 2527 2528 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is 2529 // returned together with the previous X87 value in %st0. 2530 case X87Up: 2531 // If X87Up is preceded by X87, we don't need to do 2532 // anything. However, in some cases with unions it may not be 2533 // preceded by X87. In such situations we follow gcc and pass the 2534 // extra bits in an SSE reg. 2535 if (Lo != X87) { 2536 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 2537 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 2538 return ABIArgInfo::getDirect(HighPart, 8); 2539 } 2540 break; 2541 } 2542 2543 // If a high part was specified, merge it together with the low part. It is 2544 // known to pass in the high eightbyte of the result. We do this by forming a 2545 // first class struct aggregate with the high and low part: {low, high} 2546 if (HighPart) 2547 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); 2548 2549 return ABIArgInfo::getDirect(ResType); 2550 } 2551 2552 ABIArgInfo X86_64ABIInfo::classifyArgumentType( 2553 QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE, 2554 bool isNamedArg) 2555 const 2556 { 2557 Ty = useFirstFieldIfTransparentUnion(Ty); 2558 2559 X86_64ABIInfo::Class Lo, Hi; 2560 classify(Ty, 0, Lo, Hi, isNamedArg); 2561 2562 // Check some invariants. 2563 // FIXME: Enforce these by construction. 2564 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 2565 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 2566 2567 neededInt = 0; 2568 neededSSE = 0; 2569 llvm::Type *ResType = nullptr; 2570 switch (Lo) { 2571 case NoClass: 2572 if (Hi == NoClass) 2573 return ABIArgInfo::getIgnore(); 2574 // If the low part is just padding, it takes no register, leave ResType 2575 // null. 2576 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 2577 "Unknown missing lo part"); 2578 break; 2579 2580 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument 2581 // on the stack. 2582 case Memory: 2583 2584 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or 2585 // COMPLEX_X87, it is passed in memory. 2586 case X87: 2587 case ComplexX87: 2588 if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect) 2589 ++neededInt; 2590 return getIndirectResult(Ty, freeIntRegs); 2591 2592 case SSEUp: 2593 case X87Up: 2594 llvm_unreachable("Invalid classification for lo word."); 2595 2596 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next 2597 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8 2598 // and %r9 is used. 2599 case Integer: 2600 ++neededInt; 2601 2602 // Pick an 8-byte type based on the preferred type. 2603 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0); 2604 2605 // If we have a sign or zero extended integer, make sure to return Extend 2606 // so that the parameter gets the right LLVM IR attributes. 2607 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 2608 // Treat an enum type as its underlying type. 2609 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2610 Ty = EnumTy->getDecl()->getIntegerType(); 2611 2612 if (Ty->isIntegralOrEnumerationType() && 2613 Ty->isPromotableIntegerType()) 2614 return ABIArgInfo::getExtend(); 2615 } 2616 2617 break; 2618 2619 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next 2620 // available SSE register is used, the registers are taken in the 2621 // order from %xmm0 to %xmm7. 2622 case SSE: { 2623 llvm::Type *IRType = CGT.ConvertType(Ty); 2624 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0); 2625 ++neededSSE; 2626 break; 2627 } 2628 } 2629 2630 llvm::Type *HighPart = nullptr; 2631 switch (Hi) { 2632 // Memory was handled previously, ComplexX87 and X87 should 2633 // never occur as hi classes, and X87Up must be preceded by X87, 2634 // which is passed in memory. 2635 case Memory: 2636 case X87: 2637 case ComplexX87: 2638 llvm_unreachable("Invalid classification for hi word."); 2639 2640 case NoClass: break; 2641 2642 case Integer: 2643 ++neededInt; 2644 // Pick an 8-byte type based on the preferred type. 2645 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 2646 2647 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 2648 return ABIArgInfo::getDirect(HighPart, 8); 2649 break; 2650 2651 // X87Up generally doesn't occur here (long double is passed in 2652 // memory), except in situations involving unions. 2653 case X87Up: 2654 case SSE: 2655 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 2656 2657 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 2658 return ABIArgInfo::getDirect(HighPart, 8); 2659 2660 ++neededSSE; 2661 break; 2662 2663 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the 2664 // eightbyte is passed in the upper half of the last used SSE 2665 // register. This only happens when 128-bit vectors are passed. 2666 case SSEUp: 2667 assert(Lo == SSE && "Unexpected SSEUp classification"); 2668 ResType = GetByteVectorType(Ty); 2669 break; 2670 } 2671 2672 // If a high part was specified, merge it together with the low part. It is 2673 // known to pass in the high eightbyte of the result. We do this by forming a 2674 // first class struct aggregate with the high and low part: {low, high} 2675 if (HighPart) 2676 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); 2677 2678 return ABIArgInfo::getDirect(ResType); 2679 } 2680 2681 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 2682 2683 if (!getCXXABI().classifyReturnType(FI)) 2684 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2685 2686 // Keep track of the number of assigned registers. 2687 unsigned freeIntRegs = 6, freeSSERegs = 8; 2688 2689 // If the return value is indirect, then the hidden argument is consuming one 2690 // integer register. 2691 if (FI.getReturnInfo().isIndirect()) 2692 --freeIntRegs; 2693 2694 // The chain argument effectively gives us another free register. 2695 if (FI.isChainCall()) 2696 ++freeIntRegs; 2697 2698 unsigned NumRequiredArgs = FI.getNumRequiredArgs(); 2699 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers 2700 // get assigned (in left-to-right order) for passing as follows... 2701 unsigned ArgNo = 0; 2702 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2703 it != ie; ++it, ++ArgNo) { 2704 bool IsNamedArg = ArgNo < NumRequiredArgs; 2705 2706 unsigned neededInt, neededSSE; 2707 it->info = classifyArgumentType(it->type, freeIntRegs, neededInt, 2708 neededSSE, IsNamedArg); 2709 2710 // AMD64-ABI 3.2.3p3: If there are no registers available for any 2711 // eightbyte of an argument, the whole argument is passed on the 2712 // stack. If registers have already been assigned for some 2713 // eightbytes of such an argument, the assignments get reverted. 2714 if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) { 2715 freeIntRegs -= neededInt; 2716 freeSSERegs -= neededSSE; 2717 } else { 2718 it->info = getIndirectResult(it->type, freeIntRegs); 2719 } 2720 } 2721 } 2722 2723 static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr, 2724 QualType Ty, 2725 CodeGenFunction &CGF) { 2726 llvm::Value *overflow_arg_area_p = 2727 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p"); 2728 llvm::Value *overflow_arg_area = 2729 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area"); 2730 2731 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16 2732 // byte boundary if alignment needed by type exceeds 8 byte boundary. 2733 // It isn't stated explicitly in the standard, but in practice we use 2734 // alignment greater than 16 where necessary. 2735 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8; 2736 if (Align > 8) { 2737 // overflow_arg_area = (overflow_arg_area + align - 1) & -align; 2738 llvm::Value *Offset = 2739 llvm::ConstantInt::get(CGF.Int64Ty, Align - 1); 2740 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset); 2741 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area, 2742 CGF.Int64Ty); 2743 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, -(uint64_t)Align); 2744 overflow_arg_area = 2745 CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), 2746 overflow_arg_area->getType(), 2747 "overflow_arg_area.align"); 2748 } 2749 2750 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area. 2751 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 2752 llvm::Value *Res = 2753 CGF.Builder.CreateBitCast(overflow_arg_area, 2754 llvm::PointerType::getUnqual(LTy)); 2755 2756 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to: 2757 // l->overflow_arg_area + sizeof(type). 2758 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to 2759 // an 8 byte boundary. 2760 2761 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8; 2762 llvm::Value *Offset = 2763 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7); 2764 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset, 2765 "overflow_arg_area.next"); 2766 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p); 2767 2768 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type. 2769 return Res; 2770 } 2771 2772 llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2773 CodeGenFunction &CGF) const { 2774 // Assume that va_list type is correct; should be pointer to LLVM type: 2775 // struct { 2776 // i32 gp_offset; 2777 // i32 fp_offset; 2778 // i8* overflow_arg_area; 2779 // i8* reg_save_area; 2780 // }; 2781 unsigned neededInt, neededSSE; 2782 2783 Ty = CGF.getContext().getCanonicalType(Ty); 2784 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE, 2785 /*isNamedArg*/false); 2786 2787 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed 2788 // in the registers. If not go to step 7. 2789 if (!neededInt && !neededSSE) 2790 return EmitVAArgFromMemory(VAListAddr, Ty, CGF); 2791 2792 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of 2793 // general purpose registers needed to pass type and num_fp to hold 2794 // the number of floating point registers needed. 2795 2796 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into 2797 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or 2798 // l->fp_offset > 304 - num_fp * 16 go to step 7. 2799 // 2800 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of 2801 // register save space). 2802 2803 llvm::Value *InRegs = nullptr; 2804 llvm::Value *gp_offset_p = nullptr, *gp_offset = nullptr; 2805 llvm::Value *fp_offset_p = nullptr, *fp_offset = nullptr; 2806 if (neededInt) { 2807 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p"); 2808 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset"); 2809 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8); 2810 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp"); 2811 } 2812 2813 if (neededSSE) { 2814 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p"); 2815 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset"); 2816 llvm::Value *FitsInFP = 2817 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16); 2818 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp"); 2819 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP; 2820 } 2821 2822 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 2823 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); 2824 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 2825 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); 2826 2827 // Emit code to load the value if it was passed in registers. 2828 2829 CGF.EmitBlock(InRegBlock); 2830 2831 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with 2832 // an offset of l->gp_offset and/or l->fp_offset. This may require 2833 // copying to a temporary location in case the parameter is passed 2834 // in different register classes or requires an alignment greater 2835 // than 8 for general purpose registers and 16 for XMM registers. 2836 // 2837 // FIXME: This really results in shameful code when we end up needing to 2838 // collect arguments from different places; often what should result in a 2839 // simple assembling of a structure from scattered addresses has many more 2840 // loads than necessary. Can we clean this up? 2841 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 2842 llvm::Value *RegAddr = 2843 CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3), 2844 "reg_save_area"); 2845 if (neededInt && neededSSE) { 2846 // FIXME: Cleanup. 2847 assert(AI.isDirect() && "Unexpected ABI info for mixed regs"); 2848 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType()); 2849 llvm::Value *Tmp = CGF.CreateMemTemp(Ty); 2850 Tmp = CGF.Builder.CreateBitCast(Tmp, ST->getPointerTo()); 2851 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs"); 2852 llvm::Type *TyLo = ST->getElementType(0); 2853 llvm::Type *TyHi = ST->getElementType(1); 2854 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) && 2855 "Unexpected ABI info for mixed regs"); 2856 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo); 2857 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi); 2858 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 2859 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2860 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr; 2861 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr; 2862 llvm::Value *V = 2863 CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo)); 2864 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 2865 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi)); 2866 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 2867 2868 RegAddr = CGF.Builder.CreateBitCast(Tmp, 2869 llvm::PointerType::getUnqual(LTy)); 2870 } else if (neededInt) { 2871 RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 2872 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 2873 llvm::PointerType::getUnqual(LTy)); 2874 2875 // Copy to a temporary if necessary to ensure the appropriate alignment. 2876 std::pair<CharUnits, CharUnits> SizeAlign = 2877 CGF.getContext().getTypeInfoInChars(Ty); 2878 uint64_t TySize = SizeAlign.first.getQuantity(); 2879 unsigned TyAlign = SizeAlign.second.getQuantity(); 2880 if (TyAlign > 8) { 2881 llvm::Value *Tmp = CGF.CreateMemTemp(Ty); 2882 CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, 8, false); 2883 RegAddr = Tmp; 2884 } 2885 } else if (neededSSE == 1) { 2886 RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2887 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 2888 llvm::PointerType::getUnqual(LTy)); 2889 } else { 2890 assert(neededSSE == 2 && "Invalid number of needed registers!"); 2891 // SSE registers are spaced 16 bytes apart in the register save 2892 // area, we need to collect the two eightbytes together. 2893 llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2894 llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16); 2895 llvm::Type *DoubleTy = CGF.DoubleTy; 2896 llvm::Type *DblPtrTy = 2897 llvm::PointerType::getUnqual(DoubleTy); 2898 llvm::StructType *ST = llvm::StructType::get(DoubleTy, DoubleTy, nullptr); 2899 llvm::Value *V, *Tmp = CGF.CreateMemTemp(Ty); 2900 Tmp = CGF.Builder.CreateBitCast(Tmp, ST->getPointerTo()); 2901 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo, 2902 DblPtrTy)); 2903 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 2904 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi, 2905 DblPtrTy)); 2906 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 2907 RegAddr = CGF.Builder.CreateBitCast(Tmp, 2908 llvm::PointerType::getUnqual(LTy)); 2909 } 2910 2911 // AMD64-ABI 3.5.7p5: Step 5. Set: 2912 // l->gp_offset = l->gp_offset + num_gp * 8 2913 // l->fp_offset = l->fp_offset + num_fp * 16. 2914 if (neededInt) { 2915 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8); 2916 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset), 2917 gp_offset_p); 2918 } 2919 if (neededSSE) { 2920 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16); 2921 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset), 2922 fp_offset_p); 2923 } 2924 CGF.EmitBranch(ContBlock); 2925 2926 // Emit code to load the value if it was passed in memory. 2927 2928 CGF.EmitBlock(InMemBlock); 2929 llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF); 2930 2931 // Return the appropriate result. 2932 2933 CGF.EmitBlock(ContBlock); 2934 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(), 2, 2935 "vaarg.addr"); 2936 ResAddr->addIncoming(RegAddr, InRegBlock); 2937 ResAddr->addIncoming(MemAddr, InMemBlock); 2938 return ResAddr; 2939 } 2940 2941 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs, 2942 bool IsReturnType) const { 2943 2944 if (Ty->isVoidType()) 2945 return ABIArgInfo::getIgnore(); 2946 2947 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2948 Ty = EnumTy->getDecl()->getIntegerType(); 2949 2950 TypeInfo Info = getContext().getTypeInfo(Ty); 2951 uint64_t Width = Info.Width; 2952 unsigned Align = getContext().toCharUnitsFromBits(Info.Align).getQuantity(); 2953 2954 const RecordType *RT = Ty->getAs<RecordType>(); 2955 if (RT) { 2956 if (!IsReturnType) { 2957 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI())) 2958 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); 2959 } 2960 2961 if (RT->getDecl()->hasFlexibleArrayMember()) 2962 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2963 2964 // FIXME: mingw-w64-gcc emits 128-bit struct as i128 2965 if (Width == 128 && getTarget().getTriple().isWindowsGNUEnvironment()) 2966 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2967 Width)); 2968 } 2969 2970 // vectorcall adds the concept of a homogenous vector aggregate, similar to 2971 // other targets. 2972 const Type *Base = nullptr; 2973 uint64_t NumElts = 0; 2974 if (FreeSSERegs && isHomogeneousAggregate(Ty, Base, NumElts)) { 2975 if (FreeSSERegs >= NumElts) { 2976 FreeSSERegs -= NumElts; 2977 if (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType()) 2978 return ABIArgInfo::getDirect(); 2979 return ABIArgInfo::getExpand(); 2980 } 2981 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false); 2982 } 2983 2984 2985 if (Ty->isMemberPointerType()) { 2986 // If the member pointer is represented by an LLVM int or ptr, pass it 2987 // directly. 2988 llvm::Type *LLTy = CGT.ConvertType(Ty); 2989 if (LLTy->isPointerTy() || LLTy->isIntegerTy()) 2990 return ABIArgInfo::getDirect(); 2991 } 2992 2993 if (RT || Ty->isMemberPointerType()) { 2994 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is 2995 // not 1, 2, 4, or 8 bytes, must be passed by reference." 2996 if (Width > 64 || !llvm::isPowerOf2_64(Width)) 2997 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2998 2999 // Otherwise, coerce it to a small integer. 3000 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width)); 3001 } 3002 3003 // Bool type is always extended to the ABI, other builtin types are not 3004 // extended. 3005 const BuiltinType *BT = Ty->getAs<BuiltinType>(); 3006 if (BT && BT->getKind() == BuiltinType::Bool) 3007 return ABIArgInfo::getExtend(); 3008 3009 return ABIArgInfo::getDirect(); 3010 } 3011 3012 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 3013 bool IsVectorCall = 3014 FI.getCallingConvention() == llvm::CallingConv::X86_VectorCall; 3015 3016 // We can use up to 4 SSE return registers with vectorcall. 3017 unsigned FreeSSERegs = IsVectorCall ? 4 : 0; 3018 if (!getCXXABI().classifyReturnType(FI)) 3019 FI.getReturnInfo() = classify(FI.getReturnType(), FreeSSERegs, true); 3020 3021 // We can use up to 6 SSE register parameters with vectorcall. 3022 FreeSSERegs = IsVectorCall ? 6 : 0; 3023 for (auto &I : FI.arguments()) 3024 I.info = classify(I.type, FreeSSERegs, false); 3025 } 3026 3027 llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3028 CodeGenFunction &CGF) const { 3029 llvm::Type *BPP = CGF.Int8PtrPtrTy; 3030 3031 CGBuilderTy &Builder = CGF.Builder; 3032 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 3033 "ap"); 3034 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 3035 llvm::Type *PTy = 3036 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 3037 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 3038 3039 uint64_t Offset = 3040 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8); 3041 llvm::Value *NextAddr = 3042 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 3043 "ap.next"); 3044 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 3045 3046 return AddrTyped; 3047 } 3048 3049 namespace { 3050 3051 class NaClX86_64ABIInfo : public ABIInfo { 3052 public: 3053 NaClX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) 3054 : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, HasAVX) {} 3055 void computeInfo(CGFunctionInfo &FI) const override; 3056 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3057 CodeGenFunction &CGF) const override; 3058 private: 3059 PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv. 3060 X86_64ABIInfo NInfo; // Used for everything else. 3061 }; 3062 3063 class NaClX86_64TargetCodeGenInfo : public TargetCodeGenInfo { 3064 bool HasAVX; 3065 public: 3066 NaClX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) 3067 : TargetCodeGenInfo(new NaClX86_64ABIInfo(CGT, HasAVX)), HasAVX(HasAVX) { 3068 } 3069 unsigned getOpenMPSimdDefaultAlignment(QualType) const override { 3070 return HasAVX ? 32 : 16; 3071 } 3072 }; 3073 3074 } 3075 3076 void NaClX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 3077 if (FI.getASTCallingConvention() == CC_PnaclCall) 3078 PInfo.computeInfo(FI); 3079 else 3080 NInfo.computeInfo(FI); 3081 } 3082 3083 llvm::Value *NaClX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3084 CodeGenFunction &CGF) const { 3085 // Always use the native convention; calling pnacl-style varargs functions 3086 // is unuspported. 3087 return NInfo.EmitVAArg(VAListAddr, Ty, CGF); 3088 } 3089 3090 3091 // PowerPC-32 3092 namespace { 3093 /// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information. 3094 class PPC32_SVR4_ABIInfo : public DefaultABIInfo { 3095 public: 3096 PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 3097 3098 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3099 CodeGenFunction &CGF) const override; 3100 }; 3101 3102 class PPC32TargetCodeGenInfo : public TargetCodeGenInfo { 3103 public: 3104 PPC32TargetCodeGenInfo(CodeGenTypes &CGT) : TargetCodeGenInfo(new PPC32_SVR4_ABIInfo(CGT)) {} 3105 3106 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 3107 // This is recovered from gcc output. 3108 return 1; // r1 is the dedicated stack pointer 3109 } 3110 3111 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3112 llvm::Value *Address) const override; 3113 3114 unsigned getOpenMPSimdDefaultAlignment(QualType) const override { 3115 return 16; // Natural alignment for Altivec vectors. 3116 } 3117 }; 3118 3119 } 3120 3121 llvm::Value *PPC32_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr, 3122 QualType Ty, 3123 CodeGenFunction &CGF) const { 3124 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { 3125 // TODO: Implement this. For now ignore. 3126 (void)CTy; 3127 return nullptr; 3128 } 3129 3130 bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64; 3131 bool isInt = Ty->isIntegerType() || Ty->isPointerType() || Ty->isAggregateType(); 3132 llvm::Type *CharPtr = CGF.Int8PtrTy; 3133 llvm::Type *CharPtrPtr = CGF.Int8PtrPtrTy; 3134 3135 CGBuilderTy &Builder = CGF.Builder; 3136 llvm::Value *GPRPtr = Builder.CreateBitCast(VAListAddr, CharPtr, "gprptr"); 3137 llvm::Value *GPRPtrAsInt = Builder.CreatePtrToInt(GPRPtr, CGF.Int32Ty); 3138 llvm::Value *FPRPtrAsInt = Builder.CreateAdd(GPRPtrAsInt, Builder.getInt32(1)); 3139 llvm::Value *FPRPtr = Builder.CreateIntToPtr(FPRPtrAsInt, CharPtr); 3140 llvm::Value *OverflowAreaPtrAsInt = Builder.CreateAdd(FPRPtrAsInt, Builder.getInt32(3)); 3141 llvm::Value *OverflowAreaPtr = Builder.CreateIntToPtr(OverflowAreaPtrAsInt, CharPtrPtr); 3142 llvm::Value *RegsaveAreaPtrAsInt = Builder.CreateAdd(OverflowAreaPtrAsInt, Builder.getInt32(4)); 3143 llvm::Value *RegsaveAreaPtr = Builder.CreateIntToPtr(RegsaveAreaPtrAsInt, CharPtrPtr); 3144 llvm::Value *GPR = Builder.CreateLoad(GPRPtr, false, "gpr"); 3145 // Align GPR when TY is i64. 3146 if (isI64) { 3147 llvm::Value *GPRAnd = Builder.CreateAnd(GPR, Builder.getInt8(1)); 3148 llvm::Value *CC64 = Builder.CreateICmpEQ(GPRAnd, Builder.getInt8(1)); 3149 llvm::Value *GPRPlusOne = Builder.CreateAdd(GPR, Builder.getInt8(1)); 3150 GPR = Builder.CreateSelect(CC64, GPRPlusOne, GPR); 3151 } 3152 llvm::Value *FPR = Builder.CreateLoad(FPRPtr, false, "fpr"); 3153 llvm::Value *OverflowArea = Builder.CreateLoad(OverflowAreaPtr, false, "overflow_area"); 3154 llvm::Value *OverflowAreaAsInt = Builder.CreatePtrToInt(OverflowArea, CGF.Int32Ty); 3155 llvm::Value *RegsaveArea = Builder.CreateLoad(RegsaveAreaPtr, false, "regsave_area"); 3156 llvm::Value *RegsaveAreaAsInt = Builder.CreatePtrToInt(RegsaveArea, CGF.Int32Ty); 3157 3158 llvm::Value *CC = Builder.CreateICmpULT(isInt ? GPR : FPR, 3159 Builder.getInt8(8), "cond"); 3160 3161 llvm::Value *RegConstant = Builder.CreateMul(isInt ? GPR : FPR, 3162 Builder.getInt8(isInt ? 4 : 8)); 3163 3164 llvm::Value *OurReg = Builder.CreateAdd(RegsaveAreaAsInt, Builder.CreateSExt(RegConstant, CGF.Int32Ty)); 3165 3166 if (Ty->isFloatingType()) 3167 OurReg = Builder.CreateAdd(OurReg, Builder.getInt32(32)); 3168 3169 llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs"); 3170 llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow"); 3171 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont"); 3172 3173 Builder.CreateCondBr(CC, UsingRegs, UsingOverflow); 3174 3175 CGF.EmitBlock(UsingRegs); 3176 3177 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 3178 llvm::Value *Result1 = Builder.CreateIntToPtr(OurReg, PTy); 3179 // Increase the GPR/FPR indexes. 3180 if (isInt) { 3181 GPR = Builder.CreateAdd(GPR, Builder.getInt8(isI64 ? 2 : 1)); 3182 Builder.CreateStore(GPR, GPRPtr); 3183 } else { 3184 FPR = Builder.CreateAdd(FPR, Builder.getInt8(1)); 3185 Builder.CreateStore(FPR, FPRPtr); 3186 } 3187 CGF.EmitBranch(Cont); 3188 3189 CGF.EmitBlock(UsingOverflow); 3190 3191 // Increase the overflow area. 3192 llvm::Value *Result2 = Builder.CreateIntToPtr(OverflowAreaAsInt, PTy); 3193 OverflowAreaAsInt = Builder.CreateAdd(OverflowAreaAsInt, Builder.getInt32(isInt ? 4 : 8)); 3194 Builder.CreateStore(Builder.CreateIntToPtr(OverflowAreaAsInt, CharPtr), OverflowAreaPtr); 3195 CGF.EmitBranch(Cont); 3196 3197 CGF.EmitBlock(Cont); 3198 3199 llvm::PHINode *Result = CGF.Builder.CreatePHI(PTy, 2, "vaarg.addr"); 3200 Result->addIncoming(Result1, UsingRegs); 3201 Result->addIncoming(Result2, UsingOverflow); 3202 3203 if (Ty->isAggregateType()) { 3204 llvm::Value *AGGPtr = Builder.CreateBitCast(Result, CharPtrPtr, "aggrptr") ; 3205 return Builder.CreateLoad(AGGPtr, false, "aggr"); 3206 } 3207 3208 return Result; 3209 } 3210 3211 bool 3212 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3213 llvm::Value *Address) const { 3214 // This is calculated from the LLVM and GCC tables and verified 3215 // against gcc output. AFAIK all ABIs use the same encoding. 3216 3217 CodeGen::CGBuilderTy &Builder = CGF.Builder; 3218 3219 llvm::IntegerType *i8 = CGF.Int8Ty; 3220 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 3221 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 3222 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 3223 3224 // 0-31: r0-31, the 4-byte general-purpose registers 3225 AssignToArrayRange(Builder, Address, Four8, 0, 31); 3226 3227 // 32-63: fp0-31, the 8-byte floating-point registers 3228 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 3229 3230 // 64-76 are various 4-byte special-purpose registers: 3231 // 64: mq 3232 // 65: lr 3233 // 66: ctr 3234 // 67: ap 3235 // 68-75 cr0-7 3236 // 76: xer 3237 AssignToArrayRange(Builder, Address, Four8, 64, 76); 3238 3239 // 77-108: v0-31, the 16-byte vector registers 3240 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 3241 3242 // 109: vrsave 3243 // 110: vscr 3244 // 111: spe_acc 3245 // 112: spefscr 3246 // 113: sfp 3247 AssignToArrayRange(Builder, Address, Four8, 109, 113); 3248 3249 return false; 3250 } 3251 3252 // PowerPC-64 3253 3254 namespace { 3255 /// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information. 3256 class PPC64_SVR4_ABIInfo : public DefaultABIInfo { 3257 public: 3258 enum ABIKind { 3259 ELFv1 = 0, 3260 ELFv2 3261 }; 3262 3263 private: 3264 static const unsigned GPRBits = 64; 3265 ABIKind Kind; 3266 3267 public: 3268 PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind) 3269 : DefaultABIInfo(CGT), Kind(Kind) {} 3270 3271 bool isPromotableTypeForABI(QualType Ty) const; 3272 bool isAlignedParamType(QualType Ty) const; 3273 3274 ABIArgInfo classifyReturnType(QualType RetTy) const; 3275 ABIArgInfo classifyArgumentType(QualType Ty) const; 3276 3277 bool isHomogeneousAggregateBaseType(QualType Ty) const override; 3278 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 3279 uint64_t Members) const override; 3280 3281 // TODO: We can add more logic to computeInfo to improve performance. 3282 // Example: For aggregate arguments that fit in a register, we could 3283 // use getDirectInReg (as is done below for structs containing a single 3284 // floating-point value) to avoid pushing them to memory on function 3285 // entry. This would require changing the logic in PPCISelLowering 3286 // when lowering the parameters in the caller and args in the callee. 3287 void computeInfo(CGFunctionInfo &FI) const override { 3288 if (!getCXXABI().classifyReturnType(FI)) 3289 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 3290 for (auto &I : FI.arguments()) { 3291 // We rely on the default argument classification for the most part. 3292 // One exception: An aggregate containing a single floating-point 3293 // or vector item must be passed in a register if one is available. 3294 const Type *T = isSingleElementStruct(I.type, getContext()); 3295 if (T) { 3296 const BuiltinType *BT = T->getAs<BuiltinType>(); 3297 if ((T->isVectorType() && getContext().getTypeSize(T) == 128) || 3298 (BT && BT->isFloatingPoint())) { 3299 QualType QT(T, 0); 3300 I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT)); 3301 continue; 3302 } 3303 } 3304 I.info = classifyArgumentType(I.type); 3305 } 3306 } 3307 3308 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3309 CodeGenFunction &CGF) const override; 3310 }; 3311 3312 class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo { 3313 public: 3314 PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT, 3315 PPC64_SVR4_ABIInfo::ABIKind Kind) 3316 : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT, Kind)) {} 3317 3318 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 3319 // This is recovered from gcc output. 3320 return 1; // r1 is the dedicated stack pointer 3321 } 3322 3323 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3324 llvm::Value *Address) const override; 3325 3326 unsigned getOpenMPSimdDefaultAlignment(QualType) const override { 3327 return 16; // Natural alignment for Altivec and VSX vectors. 3328 } 3329 }; 3330 3331 class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo { 3332 public: 3333 PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} 3334 3335 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 3336 // This is recovered from gcc output. 3337 return 1; // r1 is the dedicated stack pointer 3338 } 3339 3340 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3341 llvm::Value *Address) const override; 3342 3343 unsigned getOpenMPSimdDefaultAlignment(QualType) const override { 3344 return 16; // Natural alignment for Altivec vectors. 3345 } 3346 }; 3347 3348 } 3349 3350 // Return true if the ABI requires Ty to be passed sign- or zero- 3351 // extended to 64 bits. 3352 bool 3353 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const { 3354 // Treat an enum type as its underlying type. 3355 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3356 Ty = EnumTy->getDecl()->getIntegerType(); 3357 3358 // Promotable integer types are required to be promoted by the ABI. 3359 if (Ty->isPromotableIntegerType()) 3360 return true; 3361 3362 // In addition to the usual promotable integer types, we also need to 3363 // extend all 32-bit types, since the ABI requires promotion to 64 bits. 3364 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 3365 switch (BT->getKind()) { 3366 case BuiltinType::Int: 3367 case BuiltinType::UInt: 3368 return true; 3369 default: 3370 break; 3371 } 3372 3373 return false; 3374 } 3375 3376 /// isAlignedParamType - Determine whether a type requires 16-byte 3377 /// alignment in the parameter area. 3378 bool 3379 PPC64_SVR4_ABIInfo::isAlignedParamType(QualType Ty) const { 3380 // Complex types are passed just like their elements. 3381 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) 3382 Ty = CTy->getElementType(); 3383 3384 // Only vector types of size 16 bytes need alignment (larger types are 3385 // passed via reference, smaller types are not aligned). 3386 if (Ty->isVectorType()) 3387 return getContext().getTypeSize(Ty) == 128; 3388 3389 // For single-element float/vector structs, we consider the whole type 3390 // to have the same alignment requirements as its single element. 3391 const Type *AlignAsType = nullptr; 3392 const Type *EltType = isSingleElementStruct(Ty, getContext()); 3393 if (EltType) { 3394 const BuiltinType *BT = EltType->getAs<BuiltinType>(); 3395 if ((EltType->isVectorType() && 3396 getContext().getTypeSize(EltType) == 128) || 3397 (BT && BT->isFloatingPoint())) 3398 AlignAsType = EltType; 3399 } 3400 3401 // Likewise for ELFv2 homogeneous aggregates. 3402 const Type *Base = nullptr; 3403 uint64_t Members = 0; 3404 if (!AlignAsType && Kind == ELFv2 && 3405 isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members)) 3406 AlignAsType = Base; 3407 3408 // With special case aggregates, only vector base types need alignment. 3409 if (AlignAsType) 3410 return AlignAsType->isVectorType(); 3411 3412 // Otherwise, we only need alignment for any aggregate type that 3413 // has an alignment requirement of >= 16 bytes. 3414 if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) 3415 return true; 3416 3417 return false; 3418 } 3419 3420 /// isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous 3421 /// aggregate. Base is set to the base element type, and Members is set 3422 /// to the number of base elements. 3423 bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base, 3424 uint64_t &Members) const { 3425 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 3426 uint64_t NElements = AT->getSize().getZExtValue(); 3427 if (NElements == 0) 3428 return false; 3429 if (!isHomogeneousAggregate(AT->getElementType(), Base, Members)) 3430 return false; 3431 Members *= NElements; 3432 } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 3433 const RecordDecl *RD = RT->getDecl(); 3434 if (RD->hasFlexibleArrayMember()) 3435 return false; 3436 3437 Members = 0; 3438 3439 // If this is a C++ record, check the bases first. 3440 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 3441 for (const auto &I : CXXRD->bases()) { 3442 // Ignore empty records. 3443 if (isEmptyRecord(getContext(), I.getType(), true)) 3444 continue; 3445 3446 uint64_t FldMembers; 3447 if (!isHomogeneousAggregate(I.getType(), Base, FldMembers)) 3448 return false; 3449 3450 Members += FldMembers; 3451 } 3452 } 3453 3454 for (const auto *FD : RD->fields()) { 3455 // Ignore (non-zero arrays of) empty records. 3456 QualType FT = FD->getType(); 3457 while (const ConstantArrayType *AT = 3458 getContext().getAsConstantArrayType(FT)) { 3459 if (AT->getSize().getZExtValue() == 0) 3460 return false; 3461 FT = AT->getElementType(); 3462 } 3463 if (isEmptyRecord(getContext(), FT, true)) 3464 continue; 3465 3466 // For compatibility with GCC, ignore empty bitfields in C++ mode. 3467 if (getContext().getLangOpts().CPlusPlus && 3468 FD->isBitField() && FD->getBitWidthValue(getContext()) == 0) 3469 continue; 3470 3471 uint64_t FldMembers; 3472 if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers)) 3473 return false; 3474 3475 Members = (RD->isUnion() ? 3476 std::max(Members, FldMembers) : Members + FldMembers); 3477 } 3478 3479 if (!Base) 3480 return false; 3481 3482 // Ensure there is no padding. 3483 if (getContext().getTypeSize(Base) * Members != 3484 getContext().getTypeSize(Ty)) 3485 return false; 3486 } else { 3487 Members = 1; 3488 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 3489 Members = 2; 3490 Ty = CT->getElementType(); 3491 } 3492 3493 // Most ABIs only support float, double, and some vector type widths. 3494 if (!isHomogeneousAggregateBaseType(Ty)) 3495 return false; 3496 3497 // The base type must be the same for all members. Types that 3498 // agree in both total size and mode (float vs. vector) are 3499 // treated as being equivalent here. 3500 const Type *TyPtr = Ty.getTypePtr(); 3501 if (!Base) 3502 Base = TyPtr; 3503 3504 if (Base->isVectorType() != TyPtr->isVectorType() || 3505 getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr)) 3506 return false; 3507 } 3508 return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members); 3509 } 3510 3511 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 3512 // Homogeneous aggregates for ELFv2 must have base types of float, 3513 // double, long double, or 128-bit vectors. 3514 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 3515 if (BT->getKind() == BuiltinType::Float || 3516 BT->getKind() == BuiltinType::Double || 3517 BT->getKind() == BuiltinType::LongDouble) 3518 return true; 3519 } 3520 if (const VectorType *VT = Ty->getAs<VectorType>()) { 3521 if (getContext().getTypeSize(VT) == 128) 3522 return true; 3523 } 3524 return false; 3525 } 3526 3527 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough( 3528 const Type *Base, uint64_t Members) const { 3529 // Vector types require one register, floating point types require one 3530 // or two registers depending on their size. 3531 uint32_t NumRegs = 3532 Base->isVectorType() ? 1 : (getContext().getTypeSize(Base) + 63) / 64; 3533 3534 // Homogeneous Aggregates may occupy at most 8 registers. 3535 return Members * NumRegs <= 8; 3536 } 3537 3538 ABIArgInfo 3539 PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const { 3540 Ty = useFirstFieldIfTransparentUnion(Ty); 3541 3542 if (Ty->isAnyComplexType()) 3543 return ABIArgInfo::getDirect(); 3544 3545 // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes) 3546 // or via reference (larger than 16 bytes). 3547 if (Ty->isVectorType()) { 3548 uint64_t Size = getContext().getTypeSize(Ty); 3549 if (Size > 128) 3550 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3551 else if (Size < 128) { 3552 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size); 3553 return ABIArgInfo::getDirect(CoerceTy); 3554 } 3555 } 3556 3557 if (isAggregateTypeForABI(Ty)) { 3558 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 3559 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); 3560 3561 uint64_t ABIAlign = isAlignedParamType(Ty)? 16 : 8; 3562 uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8; 3563 3564 // ELFv2 homogeneous aggregates are passed as array types. 3565 const Type *Base = nullptr; 3566 uint64_t Members = 0; 3567 if (Kind == ELFv2 && 3568 isHomogeneousAggregate(Ty, Base, Members)) { 3569 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0)); 3570 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members); 3571 return ABIArgInfo::getDirect(CoerceTy); 3572 } 3573 3574 // If an aggregate may end up fully in registers, we do not 3575 // use the ByVal method, but pass the aggregate as array. 3576 // This is usually beneficial since we avoid forcing the 3577 // back-end to store the argument to memory. 3578 uint64_t Bits = getContext().getTypeSize(Ty); 3579 if (Bits > 0 && Bits <= 8 * GPRBits) { 3580 llvm::Type *CoerceTy; 3581 3582 // Types up to 8 bytes are passed as integer type (which will be 3583 // properly aligned in the argument save area doubleword). 3584 if (Bits <= GPRBits) 3585 CoerceTy = llvm::IntegerType::get(getVMContext(), 3586 llvm::RoundUpToAlignment(Bits, 8)); 3587 // Larger types are passed as arrays, with the base type selected 3588 // according to the required alignment in the save area. 3589 else { 3590 uint64_t RegBits = ABIAlign * 8; 3591 uint64_t NumRegs = llvm::RoundUpToAlignment(Bits, RegBits) / RegBits; 3592 llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits); 3593 CoerceTy = llvm::ArrayType::get(RegTy, NumRegs); 3594 } 3595 3596 return ABIArgInfo::getDirect(CoerceTy); 3597 } 3598 3599 // All other aggregates are passed ByVal. 3600 return ABIArgInfo::getIndirect(ABIAlign, /*ByVal=*/true, 3601 /*Realign=*/TyAlign > ABIAlign); 3602 } 3603 3604 return (isPromotableTypeForABI(Ty) ? 3605 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3606 } 3607 3608 ABIArgInfo 3609 PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const { 3610 if (RetTy->isVoidType()) 3611 return ABIArgInfo::getIgnore(); 3612 3613 if (RetTy->isAnyComplexType()) 3614 return ABIArgInfo::getDirect(); 3615 3616 // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes) 3617 // or via reference (larger than 16 bytes). 3618 if (RetTy->isVectorType()) { 3619 uint64_t Size = getContext().getTypeSize(RetTy); 3620 if (Size > 128) 3621 return ABIArgInfo::getIndirect(0); 3622 else if (Size < 128) { 3623 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size); 3624 return ABIArgInfo::getDirect(CoerceTy); 3625 } 3626 } 3627 3628 if (isAggregateTypeForABI(RetTy)) { 3629 // ELFv2 homogeneous aggregates are returned as array types. 3630 const Type *Base = nullptr; 3631 uint64_t Members = 0; 3632 if (Kind == ELFv2 && 3633 isHomogeneousAggregate(RetTy, Base, Members)) { 3634 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0)); 3635 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members); 3636 return ABIArgInfo::getDirect(CoerceTy); 3637 } 3638 3639 // ELFv2 small aggregates are returned in up to two registers. 3640 uint64_t Bits = getContext().getTypeSize(RetTy); 3641 if (Kind == ELFv2 && Bits <= 2 * GPRBits) { 3642 if (Bits == 0) 3643 return ABIArgInfo::getIgnore(); 3644 3645 llvm::Type *CoerceTy; 3646 if (Bits > GPRBits) { 3647 CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits); 3648 CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy, nullptr); 3649 } else 3650 CoerceTy = llvm::IntegerType::get(getVMContext(), 3651 llvm::RoundUpToAlignment(Bits, 8)); 3652 return ABIArgInfo::getDirect(CoerceTy); 3653 } 3654 3655 // All other aggregates are returned indirectly. 3656 return ABIArgInfo::getIndirect(0); 3657 } 3658 3659 return (isPromotableTypeForABI(RetTy) ? 3660 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3661 } 3662 3663 // Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine. 3664 llvm::Value *PPC64_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr, 3665 QualType Ty, 3666 CodeGenFunction &CGF) const { 3667 llvm::Type *BP = CGF.Int8PtrTy; 3668 llvm::Type *BPP = CGF.Int8PtrPtrTy; 3669 3670 CGBuilderTy &Builder = CGF.Builder; 3671 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 3672 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 3673 3674 // Handle types that require 16-byte alignment in the parameter save area. 3675 if (isAlignedParamType(Ty)) { 3676 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty); 3677 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(15)); 3678 AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt64(-16)); 3679 Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align"); 3680 } 3681 3682 // Update the va_list pointer. The pointer should be bumped by the 3683 // size of the object. We can trust getTypeSize() except for a complex 3684 // type whose base type is smaller than a doubleword. For these, the 3685 // size of the object is 16 bytes; see below for further explanation. 3686 unsigned SizeInBytes = CGF.getContext().getTypeSize(Ty) / 8; 3687 QualType BaseTy; 3688 unsigned CplxBaseSize = 0; 3689 3690 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { 3691 BaseTy = CTy->getElementType(); 3692 CplxBaseSize = CGF.getContext().getTypeSize(BaseTy) / 8; 3693 if (CplxBaseSize < 8) 3694 SizeInBytes = 16; 3695 } 3696 3697 unsigned Offset = llvm::RoundUpToAlignment(SizeInBytes, 8); 3698 llvm::Value *NextAddr = 3699 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), 3700 "ap.next"); 3701 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 3702 3703 // If we have a complex type and the base type is smaller than 8 bytes, 3704 // the ABI calls for the real and imaginary parts to be right-adjusted 3705 // in separate doublewords. However, Clang expects us to produce a 3706 // pointer to a structure with the two parts packed tightly. So generate 3707 // loads of the real and imaginary parts relative to the va_list pointer, 3708 // and store them to a temporary structure. 3709 if (CplxBaseSize && CplxBaseSize < 8) { 3710 llvm::Value *RealAddr = Builder.CreatePtrToInt(Addr, CGF.Int64Ty); 3711 llvm::Value *ImagAddr = RealAddr; 3712 if (CGF.CGM.getDataLayout().isBigEndian()) { 3713 RealAddr = Builder.CreateAdd(RealAddr, Builder.getInt64(8 - CplxBaseSize)); 3714 ImagAddr = Builder.CreateAdd(ImagAddr, Builder.getInt64(16 - CplxBaseSize)); 3715 } else { 3716 ImagAddr = Builder.CreateAdd(ImagAddr, Builder.getInt64(8)); 3717 } 3718 llvm::Type *PBaseTy = llvm::PointerType::getUnqual(CGF.ConvertType(BaseTy)); 3719 RealAddr = Builder.CreateIntToPtr(RealAddr, PBaseTy); 3720 ImagAddr = Builder.CreateIntToPtr(ImagAddr, PBaseTy); 3721 llvm::Value *Real = Builder.CreateLoad(RealAddr, false, ".vareal"); 3722 llvm::Value *Imag = Builder.CreateLoad(ImagAddr, false, ".vaimag"); 3723 llvm::Value *Ptr = CGF.CreateTempAlloca(CGT.ConvertTypeForMem(Ty), 3724 "vacplx"); 3725 llvm::Value *RealPtr = Builder.CreateStructGEP(Ptr, 0, ".real"); 3726 llvm::Value *ImagPtr = Builder.CreateStructGEP(Ptr, 1, ".imag"); 3727 Builder.CreateStore(Real, RealPtr, false); 3728 Builder.CreateStore(Imag, ImagPtr, false); 3729 return Ptr; 3730 } 3731 3732 // If the argument is smaller than 8 bytes, it is right-adjusted in 3733 // its doubleword slot. Adjust the pointer to pick it up from the 3734 // correct offset. 3735 if (SizeInBytes < 8 && CGF.CGM.getDataLayout().isBigEndian()) { 3736 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty); 3737 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(8 - SizeInBytes)); 3738 Addr = Builder.CreateIntToPtr(AddrAsInt, BP); 3739 } 3740 3741 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 3742 return Builder.CreateBitCast(Addr, PTy); 3743 } 3744 3745 static bool 3746 PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3747 llvm::Value *Address) { 3748 // This is calculated from the LLVM and GCC tables and verified 3749 // against gcc output. AFAIK all ABIs use the same encoding. 3750 3751 CodeGen::CGBuilderTy &Builder = CGF.Builder; 3752 3753 llvm::IntegerType *i8 = CGF.Int8Ty; 3754 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 3755 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 3756 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 3757 3758 // 0-31: r0-31, the 8-byte general-purpose registers 3759 AssignToArrayRange(Builder, Address, Eight8, 0, 31); 3760 3761 // 32-63: fp0-31, the 8-byte floating-point registers 3762 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 3763 3764 // 64-76 are various 4-byte special-purpose registers: 3765 // 64: mq 3766 // 65: lr 3767 // 66: ctr 3768 // 67: ap 3769 // 68-75 cr0-7 3770 // 76: xer 3771 AssignToArrayRange(Builder, Address, Four8, 64, 76); 3772 3773 // 77-108: v0-31, the 16-byte vector registers 3774 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 3775 3776 // 109: vrsave 3777 // 110: vscr 3778 // 111: spe_acc 3779 // 112: spefscr 3780 // 113: sfp 3781 AssignToArrayRange(Builder, Address, Four8, 109, 113); 3782 3783 return false; 3784 } 3785 3786 bool 3787 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable( 3788 CodeGen::CodeGenFunction &CGF, 3789 llvm::Value *Address) const { 3790 3791 return PPC64_initDwarfEHRegSizeTable(CGF, Address); 3792 } 3793 3794 bool 3795 PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3796 llvm::Value *Address) const { 3797 3798 return PPC64_initDwarfEHRegSizeTable(CGF, Address); 3799 } 3800 3801 //===----------------------------------------------------------------------===// 3802 // AArch64 ABI Implementation 3803 //===----------------------------------------------------------------------===// 3804 3805 namespace { 3806 3807 class AArch64ABIInfo : public ABIInfo { 3808 public: 3809 enum ABIKind { 3810 AAPCS = 0, 3811 DarwinPCS 3812 }; 3813 3814 private: 3815 ABIKind Kind; 3816 3817 public: 3818 AArch64ABIInfo(CodeGenTypes &CGT, ABIKind Kind) : ABIInfo(CGT), Kind(Kind) {} 3819 3820 private: 3821 ABIKind getABIKind() const { return Kind; } 3822 bool isDarwinPCS() const { return Kind == DarwinPCS; } 3823 3824 ABIArgInfo classifyReturnType(QualType RetTy) const; 3825 ABIArgInfo classifyArgumentType(QualType RetTy) const; 3826 bool isHomogeneousAggregateBaseType(QualType Ty) const override; 3827 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 3828 uint64_t Members) const override; 3829 3830 bool isIllegalVectorType(QualType Ty) const; 3831 3832 void computeInfo(CGFunctionInfo &FI) const override { 3833 if (!getCXXABI().classifyReturnType(FI)) 3834 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 3835 3836 for (auto &it : FI.arguments()) 3837 it.info = classifyArgumentType(it.type); 3838 } 3839 3840 llvm::Value *EmitDarwinVAArg(llvm::Value *VAListAddr, QualType Ty, 3841 CodeGenFunction &CGF) const; 3842 3843 llvm::Value *EmitAAPCSVAArg(llvm::Value *VAListAddr, QualType Ty, 3844 CodeGenFunction &CGF) const; 3845 3846 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3847 CodeGenFunction &CGF) const override { 3848 return isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF) 3849 : EmitAAPCSVAArg(VAListAddr, Ty, CGF); 3850 } 3851 }; 3852 3853 class AArch64TargetCodeGenInfo : public TargetCodeGenInfo { 3854 public: 3855 AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind) 3856 : TargetCodeGenInfo(new AArch64ABIInfo(CGT, Kind)) {} 3857 3858 StringRef getARCRetainAutoreleasedReturnValueMarker() const { 3859 return "mov\tfp, fp\t\t; marker for objc_retainAutoreleaseReturnValue"; 3860 } 3861 3862 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { return 31; } 3863 3864 virtual bool doesReturnSlotInterfereWithArgs() const { return false; } 3865 }; 3866 } 3867 3868 ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const { 3869 Ty = useFirstFieldIfTransparentUnion(Ty); 3870 3871 // Handle illegal vector types here. 3872 if (isIllegalVectorType(Ty)) { 3873 uint64_t Size = getContext().getTypeSize(Ty); 3874 if (Size <= 32) { 3875 llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext()); 3876 return ABIArgInfo::getDirect(ResType); 3877 } 3878 if (Size == 64) { 3879 llvm::Type *ResType = 3880 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2); 3881 return ABIArgInfo::getDirect(ResType); 3882 } 3883 if (Size == 128) { 3884 llvm::Type *ResType = 3885 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4); 3886 return ABIArgInfo::getDirect(ResType); 3887 } 3888 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3889 } 3890 3891 if (!isAggregateTypeForABI(Ty)) { 3892 // Treat an enum type as its underlying type. 3893 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3894 Ty = EnumTy->getDecl()->getIntegerType(); 3895 3896 return (Ty->isPromotableIntegerType() && isDarwinPCS() 3897 ? ABIArgInfo::getExtend() 3898 : ABIArgInfo::getDirect()); 3899 } 3900 3901 // Structures with either a non-trivial destructor or a non-trivial 3902 // copy constructor are always indirect. 3903 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { 3904 return ABIArgInfo::getIndirect(0, /*ByVal=*/RAA == 3905 CGCXXABI::RAA_DirectInMemory); 3906 } 3907 3908 // Empty records are always ignored on Darwin, but actually passed in C++ mode 3909 // elsewhere for GNU compatibility. 3910 if (isEmptyRecord(getContext(), Ty, true)) { 3911 if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS()) 3912 return ABIArgInfo::getIgnore(); 3913 3914 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 3915 } 3916 3917 // Homogeneous Floating-point Aggregates (HFAs) need to be expanded. 3918 const Type *Base = nullptr; 3919 uint64_t Members = 0; 3920 if (isHomogeneousAggregate(Ty, Base, Members)) { 3921 return ABIArgInfo::getDirect( 3922 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members)); 3923 } 3924 3925 // Aggregates <= 16 bytes are passed directly in registers or on the stack. 3926 uint64_t Size = getContext().getTypeSize(Ty); 3927 if (Size <= 128) { 3928 unsigned Alignment = getContext().getTypeAlign(Ty); 3929 Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes 3930 3931 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment. 3932 // For aggregates with 16-byte alignment, we use i128. 3933 if (Alignment < 128 && Size == 128) { 3934 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext()); 3935 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64)); 3936 } 3937 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size)); 3938 } 3939 3940 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3941 } 3942 3943 ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy) const { 3944 if (RetTy->isVoidType()) 3945 return ABIArgInfo::getIgnore(); 3946 3947 // Large vector types should be returned via memory. 3948 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) 3949 return ABIArgInfo::getIndirect(0); 3950 3951 if (!isAggregateTypeForABI(RetTy)) { 3952 // Treat an enum type as its underlying type. 3953 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 3954 RetTy = EnumTy->getDecl()->getIntegerType(); 3955 3956 return (RetTy->isPromotableIntegerType() && isDarwinPCS() 3957 ? ABIArgInfo::getExtend() 3958 : ABIArgInfo::getDirect()); 3959 } 3960 3961 if (isEmptyRecord(getContext(), RetTy, true)) 3962 return ABIArgInfo::getIgnore(); 3963 3964 const Type *Base = nullptr; 3965 uint64_t Members = 0; 3966 if (isHomogeneousAggregate(RetTy, Base, Members)) 3967 // Homogeneous Floating-point Aggregates (HFAs) are returned directly. 3968 return ABIArgInfo::getDirect(); 3969 3970 // Aggregates <= 16 bytes are returned directly in registers or on the stack. 3971 uint64_t Size = getContext().getTypeSize(RetTy); 3972 if (Size <= 128) { 3973 Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes 3974 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size)); 3975 } 3976 3977 return ABIArgInfo::getIndirect(0); 3978 } 3979 3980 /// isIllegalVectorType - check whether the vector type is legal for AArch64. 3981 bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const { 3982 if (const VectorType *VT = Ty->getAs<VectorType>()) { 3983 // Check whether VT is legal. 3984 unsigned NumElements = VT->getNumElements(); 3985 uint64_t Size = getContext().getTypeSize(VT); 3986 // NumElements should be power of 2 between 1 and 16. 3987 if ((NumElements & (NumElements - 1)) != 0 || NumElements > 16) 3988 return true; 3989 return Size != 64 && (Size != 128 || NumElements == 1); 3990 } 3991 return false; 3992 } 3993 3994 bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 3995 // Homogeneous aggregates for AAPCS64 must have base types of a floating 3996 // point type or a short-vector type. This is the same as the 32-bit ABI, 3997 // but with the difference that any floating-point type is allowed, 3998 // including __fp16. 3999 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 4000 if (BT->isFloatingPoint()) 4001 return true; 4002 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 4003 unsigned VecSize = getContext().getTypeSize(VT); 4004 if (VecSize == 64 || VecSize == 128) 4005 return true; 4006 } 4007 return false; 4008 } 4009 4010 bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, 4011 uint64_t Members) const { 4012 return Members <= 4; 4013 } 4014 4015 llvm::Value *AArch64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr, 4016 QualType Ty, 4017 CodeGenFunction &CGF) const { 4018 ABIArgInfo AI = classifyArgumentType(Ty); 4019 bool IsIndirect = AI.isIndirect(); 4020 4021 llvm::Type *BaseTy = CGF.ConvertType(Ty); 4022 if (IsIndirect) 4023 BaseTy = llvm::PointerType::getUnqual(BaseTy); 4024 else if (AI.getCoerceToType()) 4025 BaseTy = AI.getCoerceToType(); 4026 4027 unsigned NumRegs = 1; 4028 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) { 4029 BaseTy = ArrTy->getElementType(); 4030 NumRegs = ArrTy->getNumElements(); 4031 } 4032 bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy(); 4033 4034 // The AArch64 va_list type and handling is specified in the Procedure Call 4035 // Standard, section B.4: 4036 // 4037 // struct { 4038 // void *__stack; 4039 // void *__gr_top; 4040 // void *__vr_top; 4041 // int __gr_offs; 4042 // int __vr_offs; 4043 // }; 4044 4045 llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg"); 4046 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 4047 llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack"); 4048 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 4049 auto &Ctx = CGF.getContext(); 4050 4051 llvm::Value *reg_offs_p = nullptr, *reg_offs = nullptr; 4052 int reg_top_index; 4053 int RegSize = IsIndirect ? 8 : getContext().getTypeSize(Ty) / 8; 4054 if (!IsFPR) { 4055 // 3 is the field number of __gr_offs 4056 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p"); 4057 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs"); 4058 reg_top_index = 1; // field number for __gr_top 4059 RegSize = llvm::RoundUpToAlignment(RegSize, 8); 4060 } else { 4061 // 4 is the field number of __vr_offs. 4062 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p"); 4063 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs"); 4064 reg_top_index = 2; // field number for __vr_top 4065 RegSize = 16 * NumRegs; 4066 } 4067 4068 //======================================= 4069 // Find out where argument was passed 4070 //======================================= 4071 4072 // If reg_offs >= 0 we're already using the stack for this type of 4073 // argument. We don't want to keep updating reg_offs (in case it overflows, 4074 // though anyone passing 2GB of arguments, each at most 16 bytes, deserves 4075 // whatever they get). 4076 llvm::Value *UsingStack = nullptr; 4077 UsingStack = CGF.Builder.CreateICmpSGE( 4078 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0)); 4079 4080 CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock); 4081 4082 // Otherwise, at least some kind of argument could go in these registers, the 4083 // question is whether this particular type is too big. 4084 CGF.EmitBlock(MaybeRegBlock); 4085 4086 // Integer arguments may need to correct register alignment (for example a 4087 // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we 4088 // align __gr_offs to calculate the potential address. 4089 if (!IsFPR && !IsIndirect && Ctx.getTypeAlign(Ty) > 64) { 4090 int Align = Ctx.getTypeAlign(Ty) / 8; 4091 4092 reg_offs = CGF.Builder.CreateAdd( 4093 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1), 4094 "align_regoffs"); 4095 reg_offs = CGF.Builder.CreateAnd( 4096 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align), 4097 "aligned_regoffs"); 4098 } 4099 4100 // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list. 4101 llvm::Value *NewOffset = nullptr; 4102 NewOffset = CGF.Builder.CreateAdd( 4103 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs"); 4104 CGF.Builder.CreateStore(NewOffset, reg_offs_p); 4105 4106 // Now we're in a position to decide whether this argument really was in 4107 // registers or not. 4108 llvm::Value *InRegs = nullptr; 4109 InRegs = CGF.Builder.CreateICmpSLE( 4110 NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg"); 4111 4112 CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock); 4113 4114 //======================================= 4115 // Argument was in registers 4116 //======================================= 4117 4118 // Now we emit the code for if the argument was originally passed in 4119 // registers. First start the appropriate block: 4120 CGF.EmitBlock(InRegBlock); 4121 4122 llvm::Value *reg_top_p = nullptr, *reg_top = nullptr; 4123 reg_top_p = 4124 CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p"); 4125 reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top"); 4126 llvm::Value *BaseAddr = CGF.Builder.CreateGEP(reg_top, reg_offs); 4127 llvm::Value *RegAddr = nullptr; 4128 llvm::Type *MemTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty)); 4129 4130 if (IsIndirect) { 4131 // If it's been passed indirectly (actually a struct), whatever we find from 4132 // stored registers or on the stack will actually be a struct **. 4133 MemTy = llvm::PointerType::getUnqual(MemTy); 4134 } 4135 4136 const Type *Base = nullptr; 4137 uint64_t NumMembers = 0; 4138 bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers); 4139 if (IsHFA && NumMembers > 1) { 4140 // Homogeneous aggregates passed in registers will have their elements split 4141 // and stored 16-bytes apart regardless of size (they're notionally in qN, 4142 // qN+1, ...). We reload and store into a temporary local variable 4143 // contiguously. 4144 assert(!IsIndirect && "Homogeneous aggregates should be passed directly"); 4145 llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0)); 4146 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers); 4147 llvm::Value *Tmp = CGF.CreateTempAlloca(HFATy); 4148 int Offset = 0; 4149 4150 if (CGF.CGM.getDataLayout().isBigEndian() && Ctx.getTypeSize(Base) < 128) 4151 Offset = 16 - Ctx.getTypeSize(Base) / 8; 4152 for (unsigned i = 0; i < NumMembers; ++i) { 4153 llvm::Value *BaseOffset = 4154 llvm::ConstantInt::get(CGF.Int32Ty, 16 * i + Offset); 4155 llvm::Value *LoadAddr = CGF.Builder.CreateGEP(BaseAddr, BaseOffset); 4156 LoadAddr = CGF.Builder.CreateBitCast( 4157 LoadAddr, llvm::PointerType::getUnqual(BaseTy)); 4158 llvm::Value *StoreAddr = CGF.Builder.CreateStructGEP(Tmp, i); 4159 4160 llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr); 4161 CGF.Builder.CreateStore(Elem, StoreAddr); 4162 } 4163 4164 RegAddr = CGF.Builder.CreateBitCast(Tmp, MemTy); 4165 } else { 4166 // Otherwise the object is contiguous in memory 4167 unsigned BeAlign = reg_top_index == 2 ? 16 : 8; 4168 if (CGF.CGM.getDataLayout().isBigEndian() && 4169 (IsHFA || !isAggregateTypeForABI(Ty)) && 4170 Ctx.getTypeSize(Ty) < (BeAlign * 8)) { 4171 int Offset = BeAlign - Ctx.getTypeSize(Ty) / 8; 4172 BaseAddr = CGF.Builder.CreatePtrToInt(BaseAddr, CGF.Int64Ty); 4173 4174 BaseAddr = CGF.Builder.CreateAdd( 4175 BaseAddr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), "align_be"); 4176 4177 BaseAddr = CGF.Builder.CreateIntToPtr(BaseAddr, CGF.Int8PtrTy); 4178 } 4179 4180 RegAddr = CGF.Builder.CreateBitCast(BaseAddr, MemTy); 4181 } 4182 4183 CGF.EmitBranch(ContBlock); 4184 4185 //======================================= 4186 // Argument was on the stack 4187 //======================================= 4188 CGF.EmitBlock(OnStackBlock); 4189 4190 llvm::Value *stack_p = nullptr, *OnStackAddr = nullptr; 4191 stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p"); 4192 OnStackAddr = CGF.Builder.CreateLoad(stack_p, "stack"); 4193 4194 // Again, stack arguments may need realigmnent. In this case both integer and 4195 // floating-point ones might be affected. 4196 if (!IsIndirect && Ctx.getTypeAlign(Ty) > 64) { 4197 int Align = Ctx.getTypeAlign(Ty) / 8; 4198 4199 OnStackAddr = CGF.Builder.CreatePtrToInt(OnStackAddr, CGF.Int64Ty); 4200 4201 OnStackAddr = CGF.Builder.CreateAdd( 4202 OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1), 4203 "align_stack"); 4204 OnStackAddr = CGF.Builder.CreateAnd( 4205 OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, -Align), 4206 "align_stack"); 4207 4208 OnStackAddr = CGF.Builder.CreateIntToPtr(OnStackAddr, CGF.Int8PtrTy); 4209 } 4210 4211 uint64_t StackSize; 4212 if (IsIndirect) 4213 StackSize = 8; 4214 else 4215 StackSize = Ctx.getTypeSize(Ty) / 8; 4216 4217 // All stack slots are 8 bytes 4218 StackSize = llvm::RoundUpToAlignment(StackSize, 8); 4219 4220 llvm::Value *StackSizeC = llvm::ConstantInt::get(CGF.Int32Ty, StackSize); 4221 llvm::Value *NewStack = 4222 CGF.Builder.CreateGEP(OnStackAddr, StackSizeC, "new_stack"); 4223 4224 // Write the new value of __stack for the next call to va_arg 4225 CGF.Builder.CreateStore(NewStack, stack_p); 4226 4227 if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) && 4228 Ctx.getTypeSize(Ty) < 64) { 4229 int Offset = 8 - Ctx.getTypeSize(Ty) / 8; 4230 OnStackAddr = CGF.Builder.CreatePtrToInt(OnStackAddr, CGF.Int64Ty); 4231 4232 OnStackAddr = CGF.Builder.CreateAdd( 4233 OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), "align_be"); 4234 4235 OnStackAddr = CGF.Builder.CreateIntToPtr(OnStackAddr, CGF.Int8PtrTy); 4236 } 4237 4238 OnStackAddr = CGF.Builder.CreateBitCast(OnStackAddr, MemTy); 4239 4240 CGF.EmitBranch(ContBlock); 4241 4242 //======================================= 4243 // Tidy up 4244 //======================================= 4245 CGF.EmitBlock(ContBlock); 4246 4247 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(MemTy, 2, "vaarg.addr"); 4248 ResAddr->addIncoming(RegAddr, InRegBlock); 4249 ResAddr->addIncoming(OnStackAddr, OnStackBlock); 4250 4251 if (IsIndirect) 4252 return CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"); 4253 4254 return ResAddr; 4255 } 4256 4257 llvm::Value *AArch64ABIInfo::EmitDarwinVAArg(llvm::Value *VAListAddr, QualType Ty, 4258 CodeGenFunction &CGF) const { 4259 // We do not support va_arg for aggregates or illegal vector types. 4260 // Lower VAArg here for these cases and use the LLVM va_arg instruction for 4261 // other cases. 4262 if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty)) 4263 return nullptr; 4264 4265 uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8; 4266 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8; 4267 4268 const Type *Base = nullptr; 4269 uint64_t Members = 0; 4270 bool isHA = isHomogeneousAggregate(Ty, Base, Members); 4271 4272 bool isIndirect = false; 4273 // Arguments bigger than 16 bytes which aren't homogeneous aggregates should 4274 // be passed indirectly. 4275 if (Size > 16 && !isHA) { 4276 isIndirect = true; 4277 Size = 8; 4278 Align = 8; 4279 } 4280 4281 llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); 4282 llvm::Type *BPP = llvm::PointerType::getUnqual(BP); 4283 4284 CGBuilderTy &Builder = CGF.Builder; 4285 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 4286 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 4287 4288 if (isEmptyRecord(getContext(), Ty, true)) { 4289 // These are ignored for parameter passing purposes. 4290 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 4291 return Builder.CreateBitCast(Addr, PTy); 4292 } 4293 4294 const uint64_t MinABIAlign = 8; 4295 if (Align > MinABIAlign) { 4296 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, Align - 1); 4297 Addr = Builder.CreateGEP(Addr, Offset); 4298 llvm::Value *AsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty); 4299 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, ~(Align - 1)); 4300 llvm::Value *Aligned = Builder.CreateAnd(AsInt, Mask); 4301 Addr = Builder.CreateIntToPtr(Aligned, BP, "ap.align"); 4302 } 4303 4304 uint64_t Offset = llvm::RoundUpToAlignment(Size, MinABIAlign); 4305 llvm::Value *NextAddr = Builder.CreateGEP( 4306 Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), "ap.next"); 4307 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 4308 4309 if (isIndirect) 4310 Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP)); 4311 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 4312 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 4313 4314 return AddrTyped; 4315 } 4316 4317 //===----------------------------------------------------------------------===// 4318 // ARM ABI Implementation 4319 //===----------------------------------------------------------------------===// 4320 4321 namespace { 4322 4323 class ARMABIInfo : public ABIInfo { 4324 public: 4325 enum ABIKind { 4326 APCS = 0, 4327 AAPCS = 1, 4328 AAPCS_VFP 4329 }; 4330 4331 private: 4332 ABIKind Kind; 4333 mutable int VFPRegs[16]; 4334 const unsigned NumVFPs; 4335 const unsigned NumGPRs; 4336 mutable unsigned AllocatedGPRs; 4337 mutable unsigned AllocatedVFPs; 4338 4339 public: 4340 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind), 4341 NumVFPs(16), NumGPRs(4) { 4342 setCCs(); 4343 resetAllocatedRegs(); 4344 } 4345 4346 bool isEABI() const { 4347 switch (getTarget().getTriple().getEnvironment()) { 4348 case llvm::Triple::Android: 4349 case llvm::Triple::EABI: 4350 case llvm::Triple::EABIHF: 4351 case llvm::Triple::GNUEABI: 4352 case llvm::Triple::GNUEABIHF: 4353 return true; 4354 default: 4355 return false; 4356 } 4357 } 4358 4359 bool isEABIHF() const { 4360 switch (getTarget().getTriple().getEnvironment()) { 4361 case llvm::Triple::EABIHF: 4362 case llvm::Triple::GNUEABIHF: 4363 return true; 4364 default: 4365 return false; 4366 } 4367 } 4368 4369 ABIKind getABIKind() const { return Kind; } 4370 4371 private: 4372 ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic) const; 4373 ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic, 4374 bool &IsCPRC) const; 4375 bool isIllegalVectorType(QualType Ty) const; 4376 4377 bool isHomogeneousAggregateBaseType(QualType Ty) const override; 4378 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 4379 uint64_t Members) const override; 4380 4381 void computeInfo(CGFunctionInfo &FI) const override; 4382 4383 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 4384 CodeGenFunction &CGF) const override; 4385 4386 llvm::CallingConv::ID getLLVMDefaultCC() const; 4387 llvm::CallingConv::ID getABIDefaultCC() const; 4388 void setCCs(); 4389 4390 void markAllocatedGPRs(unsigned Alignment, unsigned NumRequired) const; 4391 void markAllocatedVFPs(unsigned Alignment, unsigned NumRequired) const; 4392 void resetAllocatedRegs(void) const; 4393 }; 4394 4395 class ARMTargetCodeGenInfo : public TargetCodeGenInfo { 4396 public: 4397 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K) 4398 :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {} 4399 4400 const ARMABIInfo &getABIInfo() const { 4401 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo()); 4402 } 4403 4404 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 4405 return 13; 4406 } 4407 4408 StringRef getARCRetainAutoreleasedReturnValueMarker() const override { 4409 return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue"; 4410 } 4411 4412 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4413 llvm::Value *Address) const override { 4414 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 4415 4416 // 0-15 are the 16 integer registers. 4417 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15); 4418 return false; 4419 } 4420 4421 unsigned getSizeOfUnwindException() const override { 4422 if (getABIInfo().isEABI()) return 88; 4423 return TargetCodeGenInfo::getSizeOfUnwindException(); 4424 } 4425 4426 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 4427 CodeGen::CodeGenModule &CGM) const override { 4428 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 4429 if (!FD) 4430 return; 4431 4432 const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>(); 4433 if (!Attr) 4434 return; 4435 4436 const char *Kind; 4437 switch (Attr->getInterrupt()) { 4438 case ARMInterruptAttr::Generic: Kind = ""; break; 4439 case ARMInterruptAttr::IRQ: Kind = "IRQ"; break; 4440 case ARMInterruptAttr::FIQ: Kind = "FIQ"; break; 4441 case ARMInterruptAttr::SWI: Kind = "SWI"; break; 4442 case ARMInterruptAttr::ABORT: Kind = "ABORT"; break; 4443 case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break; 4444 } 4445 4446 llvm::Function *Fn = cast<llvm::Function>(GV); 4447 4448 Fn->addFnAttr("interrupt", Kind); 4449 4450 if (cast<ARMABIInfo>(getABIInfo()).getABIKind() == ARMABIInfo::APCS) 4451 return; 4452 4453 // AAPCS guarantees that sp will be 8-byte aligned on any public interface, 4454 // however this is not necessarily true on taking any interrupt. Instruct 4455 // the backend to perform a realignment as part of the function prologue. 4456 llvm::AttrBuilder B; 4457 B.addStackAlignmentAttr(8); 4458 Fn->addAttributes(llvm::AttributeSet::FunctionIndex, 4459 llvm::AttributeSet::get(CGM.getLLVMContext(), 4460 llvm::AttributeSet::FunctionIndex, 4461 B)); 4462 } 4463 4464 }; 4465 4466 } 4467 4468 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const { 4469 // To correctly handle Homogeneous Aggregate, we need to keep track of the 4470 // VFP registers allocated so far. 4471 // C.1.vfp If the argument is a VFP CPRC and there are sufficient consecutive 4472 // VFP registers of the appropriate type unallocated then the argument is 4473 // allocated to the lowest-numbered sequence of such registers. 4474 // C.2.vfp If the argument is a VFP CPRC then any VFP registers that are 4475 // unallocated are marked as unavailable. 4476 resetAllocatedRegs(); 4477 4478 if (getCXXABI().classifyReturnType(FI)) { 4479 if (FI.getReturnInfo().isIndirect()) 4480 markAllocatedGPRs(1, 1); 4481 } else { 4482 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), FI.isVariadic()); 4483 } 4484 for (auto &I : FI.arguments()) { 4485 unsigned PreAllocationVFPs = AllocatedVFPs; 4486 unsigned PreAllocationGPRs = AllocatedGPRs; 4487 bool IsCPRC = false; 4488 // 6.1.2.3 There is one VFP co-processor register class using registers 4489 // s0-s15 (d0-d7) for passing arguments. 4490 I.info = classifyArgumentType(I.type, FI.isVariadic(), IsCPRC); 4491 4492 // If we have allocated some arguments onto the stack (due to running 4493 // out of VFP registers), we cannot split an argument between GPRs and 4494 // the stack. If this situation occurs, we add padding to prevent the 4495 // GPRs from being used. In this situation, the current argument could 4496 // only be allocated by rule C.8, so rule C.6 would mark these GPRs as 4497 // unusable anyway. 4498 // We do not have to do this if the argument is being passed ByVal, as the 4499 // backend can handle that situation correctly. 4500 const bool StackUsed = PreAllocationGPRs > NumGPRs || PreAllocationVFPs > NumVFPs; 4501 const bool IsByVal = I.info.isIndirect() && I.info.getIndirectByVal(); 4502 if (!IsCPRC && PreAllocationGPRs < NumGPRs && AllocatedGPRs > NumGPRs && 4503 StackUsed && !IsByVal) { 4504 llvm::Type *PaddingTy = llvm::ArrayType::get( 4505 llvm::Type::getInt32Ty(getVMContext()), NumGPRs - PreAllocationGPRs); 4506 if (I.info.canHaveCoerceToType()) { 4507 I.info = ABIArgInfo::getDirect(I.info.getCoerceToType() /* type */, 4508 0 /* offset */, PaddingTy, true); 4509 } else { 4510 I.info = ABIArgInfo::getDirect(nullptr /* type */, 0 /* offset */, 4511 PaddingTy, true); 4512 } 4513 } 4514 } 4515 4516 // Always honor user-specified calling convention. 4517 if (FI.getCallingConvention() != llvm::CallingConv::C) 4518 return; 4519 4520 llvm::CallingConv::ID cc = getRuntimeCC(); 4521 if (cc != llvm::CallingConv::C) 4522 FI.setEffectiveCallingConvention(cc); 4523 } 4524 4525 /// Return the default calling convention that LLVM will use. 4526 llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const { 4527 // The default calling convention that LLVM will infer. 4528 if (isEABIHF()) 4529 return llvm::CallingConv::ARM_AAPCS_VFP; 4530 else if (isEABI()) 4531 return llvm::CallingConv::ARM_AAPCS; 4532 else 4533 return llvm::CallingConv::ARM_APCS; 4534 } 4535 4536 /// Return the calling convention that our ABI would like us to use 4537 /// as the C calling convention. 4538 llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const { 4539 switch (getABIKind()) { 4540 case APCS: return llvm::CallingConv::ARM_APCS; 4541 case AAPCS: return llvm::CallingConv::ARM_AAPCS; 4542 case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 4543 } 4544 llvm_unreachable("bad ABI kind"); 4545 } 4546 4547 void ARMABIInfo::setCCs() { 4548 assert(getRuntimeCC() == llvm::CallingConv::C); 4549 4550 // Don't muddy up the IR with a ton of explicit annotations if 4551 // they'd just match what LLVM will infer from the triple. 4552 llvm::CallingConv::ID abiCC = getABIDefaultCC(); 4553 if (abiCC != getLLVMDefaultCC()) 4554 RuntimeCC = abiCC; 4555 4556 BuiltinCC = (getABIKind() == APCS ? 4557 llvm::CallingConv::ARM_APCS : llvm::CallingConv::ARM_AAPCS); 4558 } 4559 4560 /// markAllocatedVFPs - update VFPRegs according to the alignment and 4561 /// number of VFP registers (unit is S register) requested. 4562 void ARMABIInfo::markAllocatedVFPs(unsigned Alignment, 4563 unsigned NumRequired) const { 4564 // Early Exit. 4565 if (AllocatedVFPs >= 16) { 4566 // We use AllocatedVFP > 16 to signal that some CPRCs were allocated on 4567 // the stack. 4568 AllocatedVFPs = 17; 4569 return; 4570 } 4571 // C.1.vfp If the argument is a VFP CPRC and there are sufficient consecutive 4572 // VFP registers of the appropriate type unallocated then the argument is 4573 // allocated to the lowest-numbered sequence of such registers. 4574 for (unsigned I = 0; I < 16; I += Alignment) { 4575 bool FoundSlot = true; 4576 for (unsigned J = I, JEnd = I + NumRequired; J < JEnd; J++) 4577 if (J >= 16 || VFPRegs[J]) { 4578 FoundSlot = false; 4579 break; 4580 } 4581 if (FoundSlot) { 4582 for (unsigned J = I, JEnd = I + NumRequired; J < JEnd; J++) 4583 VFPRegs[J] = 1; 4584 AllocatedVFPs += NumRequired; 4585 return; 4586 } 4587 } 4588 // C.2.vfp If the argument is a VFP CPRC then any VFP registers that are 4589 // unallocated are marked as unavailable. 4590 for (unsigned I = 0; I < 16; I++) 4591 VFPRegs[I] = 1; 4592 AllocatedVFPs = 17; // We do not have enough VFP registers. 4593 } 4594 4595 /// Update AllocatedGPRs to record the number of general purpose registers 4596 /// which have been allocated. It is valid for AllocatedGPRs to go above 4, 4597 /// this represents arguments being stored on the stack. 4598 void ARMABIInfo::markAllocatedGPRs(unsigned Alignment, 4599 unsigned NumRequired) const { 4600 assert((Alignment == 1 || Alignment == 2) && "Alignment must be 4 or 8 bytes"); 4601 4602 if (Alignment == 2 && AllocatedGPRs & 0x1) 4603 AllocatedGPRs += 1; 4604 4605 AllocatedGPRs += NumRequired; 4606 } 4607 4608 void ARMABIInfo::resetAllocatedRegs(void) const { 4609 AllocatedGPRs = 0; 4610 AllocatedVFPs = 0; 4611 for (unsigned i = 0; i < NumVFPs; ++i) 4612 VFPRegs[i] = 0; 4613 } 4614 4615 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic, 4616 bool &IsCPRC) const { 4617 // We update number of allocated VFPs according to 4618 // 6.1.2.1 The following argument types are VFP CPRCs: 4619 // A single-precision floating-point type (including promoted 4620 // half-precision types); A double-precision floating-point type; 4621 // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate 4622 // with a Base Type of a single- or double-precision floating-point type, 4623 // 64-bit containerized vectors or 128-bit containerized vectors with one 4624 // to four Elements. 4625 bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic; 4626 4627 Ty = useFirstFieldIfTransparentUnion(Ty); 4628 4629 // Handle illegal vector types here. 4630 if (isIllegalVectorType(Ty)) { 4631 uint64_t Size = getContext().getTypeSize(Ty); 4632 if (Size <= 32) { 4633 llvm::Type *ResType = 4634 llvm::Type::getInt32Ty(getVMContext()); 4635 markAllocatedGPRs(1, 1); 4636 return ABIArgInfo::getDirect(ResType); 4637 } 4638 if (Size == 64) { 4639 llvm::Type *ResType = llvm::VectorType::get( 4640 llvm::Type::getInt32Ty(getVMContext()), 2); 4641 if (getABIKind() == ARMABIInfo::AAPCS || isVariadic){ 4642 markAllocatedGPRs(2, 2); 4643 } else { 4644 markAllocatedVFPs(2, 2); 4645 IsCPRC = true; 4646 } 4647 return ABIArgInfo::getDirect(ResType); 4648 } 4649 if (Size == 128) { 4650 llvm::Type *ResType = llvm::VectorType::get( 4651 llvm::Type::getInt32Ty(getVMContext()), 4); 4652 if (getABIKind() == ARMABIInfo::AAPCS || isVariadic) { 4653 markAllocatedGPRs(2, 4); 4654 } else { 4655 markAllocatedVFPs(4, 4); 4656 IsCPRC = true; 4657 } 4658 return ABIArgInfo::getDirect(ResType); 4659 } 4660 markAllocatedGPRs(1, 1); 4661 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 4662 } 4663 // Update VFPRegs for legal vector types. 4664 if (getABIKind() == ARMABIInfo::AAPCS_VFP && !isVariadic) { 4665 if (const VectorType *VT = Ty->getAs<VectorType>()) { 4666 uint64_t Size = getContext().getTypeSize(VT); 4667 // Size of a legal vector should be power of 2 and above 64. 4668 markAllocatedVFPs(Size >= 128 ? 4 : 2, Size / 32); 4669 IsCPRC = true; 4670 } 4671 } 4672 // Update VFPRegs for floating point types. 4673 if (getABIKind() == ARMABIInfo::AAPCS_VFP && !isVariadic) { 4674 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 4675 if (BT->getKind() == BuiltinType::Half || 4676 BT->getKind() == BuiltinType::Float) { 4677 markAllocatedVFPs(1, 1); 4678 IsCPRC = true; 4679 } 4680 if (BT->getKind() == BuiltinType::Double || 4681 BT->getKind() == BuiltinType::LongDouble) { 4682 markAllocatedVFPs(2, 2); 4683 IsCPRC = true; 4684 } 4685 } 4686 } 4687 4688 if (!isAggregateTypeForABI(Ty)) { 4689 // Treat an enum type as its underlying type. 4690 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) { 4691 Ty = EnumTy->getDecl()->getIntegerType(); 4692 } 4693 4694 unsigned Size = getContext().getTypeSize(Ty); 4695 if (!IsCPRC) 4696 markAllocatedGPRs(Size > 32 ? 2 : 1, (Size + 31) / 32); 4697 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend() 4698 : ABIArgInfo::getDirect()); 4699 } 4700 4701 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { 4702 markAllocatedGPRs(1, 1); 4703 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); 4704 } 4705 4706 // Ignore empty records. 4707 if (isEmptyRecord(getContext(), Ty, true)) 4708 return ABIArgInfo::getIgnore(); 4709 4710 if (IsEffectivelyAAPCS_VFP) { 4711 // Homogeneous Aggregates need to be expanded when we can fit the aggregate 4712 // into VFP registers. 4713 const Type *Base = nullptr; 4714 uint64_t Members = 0; 4715 if (isHomogeneousAggregate(Ty, Base, Members)) { 4716 assert(Base && "Base class should be set for homogeneous aggregate"); 4717 // Base can be a floating-point or a vector. 4718 if (Base->isVectorType()) { 4719 // ElementSize is in number of floats. 4720 unsigned ElementSize = getContext().getTypeSize(Base) == 64 ? 2 : 4; 4721 markAllocatedVFPs(ElementSize, 4722 Members * ElementSize); 4723 } else if (Base->isSpecificBuiltinType(BuiltinType::Float)) 4724 markAllocatedVFPs(1, Members); 4725 else { 4726 assert(Base->isSpecificBuiltinType(BuiltinType::Double) || 4727 Base->isSpecificBuiltinType(BuiltinType::LongDouble)); 4728 markAllocatedVFPs(2, Members * 2); 4729 } 4730 IsCPRC = true; 4731 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false); 4732 } 4733 } 4734 4735 // Support byval for ARM. 4736 // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at 4737 // most 8-byte. We realign the indirect argument if type alignment is bigger 4738 // than ABI alignment. 4739 uint64_t ABIAlign = 4; 4740 uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8; 4741 if (getABIKind() == ARMABIInfo::AAPCS_VFP || 4742 getABIKind() == ARMABIInfo::AAPCS) 4743 ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8); 4744 if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) { 4745 // Update Allocated GPRs. Since this is only used when the size of the 4746 // argument is greater than 64 bytes, this will always use up any available 4747 // registers (of which there are 4). We also don't care about getting the 4748 // alignment right, because general-purpose registers cannot be back-filled. 4749 markAllocatedGPRs(1, 4); 4750 return ABIArgInfo::getIndirect(TyAlign, /*ByVal=*/true, 4751 /*Realign=*/TyAlign > ABIAlign); 4752 } 4753 4754 // Otherwise, pass by coercing to a structure of the appropriate size. 4755 llvm::Type* ElemTy; 4756 unsigned SizeRegs; 4757 // FIXME: Try to match the types of the arguments more accurately where 4758 // we can. 4759 if (getContext().getTypeAlign(Ty) <= 32) { 4760 ElemTy = llvm::Type::getInt32Ty(getVMContext()); 4761 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32; 4762 markAllocatedGPRs(1, SizeRegs); 4763 } else { 4764 ElemTy = llvm::Type::getInt64Ty(getVMContext()); 4765 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64; 4766 markAllocatedGPRs(2, SizeRegs * 2); 4767 } 4768 4769 return ABIArgInfo::getDirect(llvm::ArrayType::get(ElemTy, SizeRegs)); 4770 } 4771 4772 static bool isIntegerLikeType(QualType Ty, ASTContext &Context, 4773 llvm::LLVMContext &VMContext) { 4774 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure 4775 // is called integer-like if its size is less than or equal to one word, and 4776 // the offset of each of its addressable sub-fields is zero. 4777 4778 uint64_t Size = Context.getTypeSize(Ty); 4779 4780 // Check that the type fits in a word. 4781 if (Size > 32) 4782 return false; 4783 4784 // FIXME: Handle vector types! 4785 if (Ty->isVectorType()) 4786 return false; 4787 4788 // Float types are never treated as "integer like". 4789 if (Ty->isRealFloatingType()) 4790 return false; 4791 4792 // If this is a builtin or pointer type then it is ok. 4793 if (Ty->getAs<BuiltinType>() || Ty->isPointerType()) 4794 return true; 4795 4796 // Small complex integer types are "integer like". 4797 if (const ComplexType *CT = Ty->getAs<ComplexType>()) 4798 return isIntegerLikeType(CT->getElementType(), Context, VMContext); 4799 4800 // Single element and zero sized arrays should be allowed, by the definition 4801 // above, but they are not. 4802 4803 // Otherwise, it must be a record type. 4804 const RecordType *RT = Ty->getAs<RecordType>(); 4805 if (!RT) return false; 4806 4807 // Ignore records with flexible arrays. 4808 const RecordDecl *RD = RT->getDecl(); 4809 if (RD->hasFlexibleArrayMember()) 4810 return false; 4811 4812 // Check that all sub-fields are at offset 0, and are themselves "integer 4813 // like". 4814 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 4815 4816 bool HadField = false; 4817 unsigned idx = 0; 4818 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 4819 i != e; ++i, ++idx) { 4820 const FieldDecl *FD = *i; 4821 4822 // Bit-fields are not addressable, we only need to verify they are "integer 4823 // like". We still have to disallow a subsequent non-bitfield, for example: 4824 // struct { int : 0; int x } 4825 // is non-integer like according to gcc. 4826 if (FD->isBitField()) { 4827 if (!RD->isUnion()) 4828 HadField = true; 4829 4830 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 4831 return false; 4832 4833 continue; 4834 } 4835 4836 // Check if this field is at offset 0. 4837 if (Layout.getFieldOffset(idx) != 0) 4838 return false; 4839 4840 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 4841 return false; 4842 4843 // Only allow at most one field in a structure. This doesn't match the 4844 // wording above, but follows gcc in situations with a field following an 4845 // empty structure. 4846 if (!RD->isUnion()) { 4847 if (HadField) 4848 return false; 4849 4850 HadField = true; 4851 } 4852 } 4853 4854 return true; 4855 } 4856 4857 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, 4858 bool isVariadic) const { 4859 bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic; 4860 4861 if (RetTy->isVoidType()) 4862 return ABIArgInfo::getIgnore(); 4863 4864 // Large vector types should be returned via memory. 4865 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) { 4866 markAllocatedGPRs(1, 1); 4867 return ABIArgInfo::getIndirect(0); 4868 } 4869 4870 if (!isAggregateTypeForABI(RetTy)) { 4871 // Treat an enum type as its underlying type. 4872 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 4873 RetTy = EnumTy->getDecl()->getIntegerType(); 4874 4875 return RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend() 4876 : ABIArgInfo::getDirect(); 4877 } 4878 4879 // Are we following APCS? 4880 if (getABIKind() == APCS) { 4881 if (isEmptyRecord(getContext(), RetTy, false)) 4882 return ABIArgInfo::getIgnore(); 4883 4884 // Complex types are all returned as packed integers. 4885 // 4886 // FIXME: Consider using 2 x vector types if the back end handles them 4887 // correctly. 4888 if (RetTy->isAnyComplexType()) 4889 return ABIArgInfo::getDirect(llvm::IntegerType::get( 4890 getVMContext(), getContext().getTypeSize(RetTy))); 4891 4892 // Integer like structures are returned in r0. 4893 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) { 4894 // Return in the smallest viable integer type. 4895 uint64_t Size = getContext().getTypeSize(RetTy); 4896 if (Size <= 8) 4897 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 4898 if (Size <= 16) 4899 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 4900 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 4901 } 4902 4903 // Otherwise return in memory. 4904 markAllocatedGPRs(1, 1); 4905 return ABIArgInfo::getIndirect(0); 4906 } 4907 4908 // Otherwise this is an AAPCS variant. 4909 4910 if (isEmptyRecord(getContext(), RetTy, true)) 4911 return ABIArgInfo::getIgnore(); 4912 4913 // Check for homogeneous aggregates with AAPCS-VFP. 4914 if (IsEffectivelyAAPCS_VFP) { 4915 const Type *Base = nullptr; 4916 uint64_t Members; 4917 if (isHomogeneousAggregate(RetTy, Base, Members)) { 4918 assert(Base && "Base class should be set for homogeneous aggregate"); 4919 // Homogeneous Aggregates are returned directly. 4920 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false); 4921 } 4922 } 4923 4924 // Aggregates <= 4 bytes are returned in r0; other aggregates 4925 // are returned indirectly. 4926 uint64_t Size = getContext().getTypeSize(RetTy); 4927 if (Size <= 32) { 4928 if (getDataLayout().isBigEndian()) 4929 // Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4) 4930 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 4931 4932 // Return in the smallest viable integer type. 4933 if (Size <= 8) 4934 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 4935 if (Size <= 16) 4936 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 4937 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 4938 } 4939 4940 markAllocatedGPRs(1, 1); 4941 return ABIArgInfo::getIndirect(0); 4942 } 4943 4944 /// isIllegalVector - check whether Ty is an illegal vector type. 4945 bool ARMABIInfo::isIllegalVectorType(QualType Ty) const { 4946 if (const VectorType *VT = Ty->getAs<VectorType>()) { 4947 // Check whether VT is legal. 4948 unsigned NumElements = VT->getNumElements(); 4949 uint64_t Size = getContext().getTypeSize(VT); 4950 // NumElements should be power of 2. 4951 if ((NumElements & (NumElements - 1)) != 0) 4952 return true; 4953 // Size should be greater than 32 bits. 4954 return Size <= 32; 4955 } 4956 return false; 4957 } 4958 4959 bool ARMABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 4960 // Homogeneous aggregates for AAPCS-VFP must have base types of float, 4961 // double, or 64-bit or 128-bit vectors. 4962 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 4963 if (BT->getKind() == BuiltinType::Float || 4964 BT->getKind() == BuiltinType::Double || 4965 BT->getKind() == BuiltinType::LongDouble) 4966 return true; 4967 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 4968 unsigned VecSize = getContext().getTypeSize(VT); 4969 if (VecSize == 64 || VecSize == 128) 4970 return true; 4971 } 4972 return false; 4973 } 4974 4975 bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, 4976 uint64_t Members) const { 4977 return Members <= 4; 4978 } 4979 4980 llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 4981 CodeGenFunction &CGF) const { 4982 llvm::Type *BP = CGF.Int8PtrTy; 4983 llvm::Type *BPP = CGF.Int8PtrPtrTy; 4984 4985 CGBuilderTy &Builder = CGF.Builder; 4986 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 4987 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 4988 4989 if (isEmptyRecord(getContext(), Ty, true)) { 4990 // These are ignored for parameter passing purposes. 4991 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 4992 return Builder.CreateBitCast(Addr, PTy); 4993 } 4994 4995 uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8; 4996 uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8; 4997 bool IsIndirect = false; 4998 4999 // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for 5000 // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte. 5001 if (getABIKind() == ARMABIInfo::AAPCS_VFP || 5002 getABIKind() == ARMABIInfo::AAPCS) 5003 TyAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8); 5004 else 5005 TyAlign = 4; 5006 // Use indirect if size of the illegal vector is bigger than 16 bytes. 5007 if (isIllegalVectorType(Ty) && Size > 16) { 5008 IsIndirect = true; 5009 Size = 4; 5010 TyAlign = 4; 5011 } 5012 5013 // Handle address alignment for ABI alignment > 4 bytes. 5014 if (TyAlign > 4) { 5015 assert((TyAlign & (TyAlign - 1)) == 0 && 5016 "Alignment is not power of 2!"); 5017 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty); 5018 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1)); 5019 AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1))); 5020 Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align"); 5021 } 5022 5023 uint64_t Offset = 5024 llvm::RoundUpToAlignment(Size, 4); 5025 llvm::Value *NextAddr = 5026 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 5027 "ap.next"); 5028 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 5029 5030 if (IsIndirect) 5031 Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP)); 5032 else if (TyAlign < CGF.getContext().getTypeAlign(Ty) / 8) { 5033 // We can't directly cast ap.cur to pointer to a vector type, since ap.cur 5034 // may not be correctly aligned for the vector type. We create an aligned 5035 // temporary space and copy the content over from ap.cur to the temporary 5036 // space. This is necessary if the natural alignment of the type is greater 5037 // than the ABI alignment. 5038 llvm::Type *I8PtrTy = Builder.getInt8PtrTy(); 5039 CharUnits CharSize = getContext().getTypeSizeInChars(Ty); 5040 llvm::Value *AlignedTemp = CGF.CreateTempAlloca(CGF.ConvertType(Ty), 5041 "var.align"); 5042 llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy); 5043 llvm::Value *Src = Builder.CreateBitCast(Addr, I8PtrTy); 5044 Builder.CreateMemCpy(Dst, Src, 5045 llvm::ConstantInt::get(CGF.IntPtrTy, CharSize.getQuantity()), 5046 TyAlign, false); 5047 Addr = AlignedTemp; //The content is in aligned location. 5048 } 5049 llvm::Type *PTy = 5050 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 5051 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 5052 5053 return AddrTyped; 5054 } 5055 5056 namespace { 5057 5058 class NaClARMABIInfo : public ABIInfo { 5059 public: 5060 NaClARMABIInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind) 5061 : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, Kind) {} 5062 void computeInfo(CGFunctionInfo &FI) const override; 5063 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 5064 CodeGenFunction &CGF) const override; 5065 private: 5066 PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv. 5067 ARMABIInfo NInfo; // Used for everything else. 5068 }; 5069 5070 class NaClARMTargetCodeGenInfo : public TargetCodeGenInfo { 5071 public: 5072 NaClARMTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind) 5073 : TargetCodeGenInfo(new NaClARMABIInfo(CGT, Kind)) {} 5074 }; 5075 5076 } 5077 5078 void NaClARMABIInfo::computeInfo(CGFunctionInfo &FI) const { 5079 if (FI.getASTCallingConvention() == CC_PnaclCall) 5080 PInfo.computeInfo(FI); 5081 else 5082 static_cast<const ABIInfo&>(NInfo).computeInfo(FI); 5083 } 5084 5085 llvm::Value *NaClARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 5086 CodeGenFunction &CGF) const { 5087 // Always use the native convention; calling pnacl-style varargs functions 5088 // is unsupported. 5089 return static_cast<const ABIInfo&>(NInfo).EmitVAArg(VAListAddr, Ty, CGF); 5090 } 5091 5092 //===----------------------------------------------------------------------===// 5093 // NVPTX ABI Implementation 5094 //===----------------------------------------------------------------------===// 5095 5096 namespace { 5097 5098 class NVPTXABIInfo : public ABIInfo { 5099 public: 5100 NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 5101 5102 ABIArgInfo classifyReturnType(QualType RetTy) const; 5103 ABIArgInfo classifyArgumentType(QualType Ty) const; 5104 5105 void computeInfo(CGFunctionInfo &FI) const override; 5106 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 5107 CodeGenFunction &CFG) const override; 5108 }; 5109 5110 class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo { 5111 public: 5112 NVPTXTargetCodeGenInfo(CodeGenTypes &CGT) 5113 : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {} 5114 5115 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 5116 CodeGen::CodeGenModule &M) const override; 5117 private: 5118 // Adds a NamedMDNode with F, Name, and Operand as operands, and adds the 5119 // resulting MDNode to the nvvm.annotations MDNode. 5120 static void addNVVMMetadata(llvm::Function *F, StringRef Name, int Operand); 5121 }; 5122 5123 ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const { 5124 if (RetTy->isVoidType()) 5125 return ABIArgInfo::getIgnore(); 5126 5127 // note: this is different from default ABI 5128 if (!RetTy->isScalarType()) 5129 return ABIArgInfo::getDirect(); 5130 5131 // Treat an enum type as its underlying type. 5132 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 5133 RetTy = EnumTy->getDecl()->getIntegerType(); 5134 5135 return (RetTy->isPromotableIntegerType() ? 5136 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 5137 } 5138 5139 ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const { 5140 // Treat an enum type as its underlying type. 5141 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 5142 Ty = EnumTy->getDecl()->getIntegerType(); 5143 5144 // Return aggregates type as indirect by value 5145 if (isAggregateTypeForABI(Ty)) 5146 return ABIArgInfo::getIndirect(0, /* byval */ true); 5147 5148 return (Ty->isPromotableIntegerType() ? 5149 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 5150 } 5151 5152 void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const { 5153 if (!getCXXABI().classifyReturnType(FI)) 5154 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 5155 for (auto &I : FI.arguments()) 5156 I.info = classifyArgumentType(I.type); 5157 5158 // Always honor user-specified calling convention. 5159 if (FI.getCallingConvention() != llvm::CallingConv::C) 5160 return; 5161 5162 FI.setEffectiveCallingConvention(getRuntimeCC()); 5163 } 5164 5165 llvm::Value *NVPTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 5166 CodeGenFunction &CFG) const { 5167 llvm_unreachable("NVPTX does not support varargs"); 5168 } 5169 5170 void NVPTXTargetCodeGenInfo:: 5171 SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 5172 CodeGen::CodeGenModule &M) const{ 5173 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 5174 if (!FD) return; 5175 5176 llvm::Function *F = cast<llvm::Function>(GV); 5177 5178 // Perform special handling in OpenCL mode 5179 if (M.getLangOpts().OpenCL) { 5180 // Use OpenCL function attributes to check for kernel functions 5181 // By default, all functions are device functions 5182 if (FD->hasAttr<OpenCLKernelAttr>()) { 5183 // OpenCL __kernel functions get kernel metadata 5184 // Create !{<func-ref>, metadata !"kernel", i32 1} node 5185 addNVVMMetadata(F, "kernel", 1); 5186 // And kernel functions are not subject to inlining 5187 F->addFnAttr(llvm::Attribute::NoInline); 5188 } 5189 } 5190 5191 // Perform special handling in CUDA mode. 5192 if (M.getLangOpts().CUDA) { 5193 // CUDA __global__ functions get a kernel metadata entry. Since 5194 // __global__ functions cannot be called from the device, we do not 5195 // need to set the noinline attribute. 5196 if (FD->hasAttr<CUDAGlobalAttr>()) { 5197 // Create !{<func-ref>, metadata !"kernel", i32 1} node 5198 addNVVMMetadata(F, "kernel", 1); 5199 } 5200 if (FD->hasAttr<CUDALaunchBoundsAttr>()) { 5201 // Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node 5202 addNVVMMetadata(F, "maxntidx", 5203 FD->getAttr<CUDALaunchBoundsAttr>()->getMaxThreads()); 5204 // min blocks is a default argument for CUDALaunchBoundsAttr, so getting a 5205 // zero value from getMinBlocks either means it was not specified in 5206 // __launch_bounds__ or the user specified a 0 value. In both cases, we 5207 // don't have to add a PTX directive. 5208 int MinCTASM = FD->getAttr<CUDALaunchBoundsAttr>()->getMinBlocks(); 5209 if (MinCTASM > 0) { 5210 // Create !{<func-ref>, metadata !"minctasm", i32 <val>} node 5211 addNVVMMetadata(F, "minctasm", MinCTASM); 5212 } 5213 } 5214 } 5215 } 5216 5217 void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name, 5218 int Operand) { 5219 llvm::Module *M = F->getParent(); 5220 llvm::LLVMContext &Ctx = M->getContext(); 5221 5222 // Get "nvvm.annotations" metadata node 5223 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations"); 5224 5225 llvm::Metadata *MDVals[] = { 5226 llvm::ConstantAsMetadata::get(F), llvm::MDString::get(Ctx, Name), 5227 llvm::ConstantAsMetadata::get( 5228 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))}; 5229 // Append metadata to nvvm.annotations 5230 MD->addOperand(llvm::MDNode::get(Ctx, MDVals)); 5231 } 5232 } 5233 5234 //===----------------------------------------------------------------------===// 5235 // SystemZ ABI Implementation 5236 //===----------------------------------------------------------------------===// 5237 5238 namespace { 5239 5240 class SystemZABIInfo : public ABIInfo { 5241 public: 5242 SystemZABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 5243 5244 bool isPromotableIntegerType(QualType Ty) const; 5245 bool isCompoundType(QualType Ty) const; 5246 bool isFPArgumentType(QualType Ty) const; 5247 5248 ABIArgInfo classifyReturnType(QualType RetTy) const; 5249 ABIArgInfo classifyArgumentType(QualType ArgTy) const; 5250 5251 void computeInfo(CGFunctionInfo &FI) const override { 5252 if (!getCXXABI().classifyReturnType(FI)) 5253 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 5254 for (auto &I : FI.arguments()) 5255 I.info = classifyArgumentType(I.type); 5256 } 5257 5258 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 5259 CodeGenFunction &CGF) const override; 5260 }; 5261 5262 class SystemZTargetCodeGenInfo : public TargetCodeGenInfo { 5263 public: 5264 SystemZTargetCodeGenInfo(CodeGenTypes &CGT) 5265 : TargetCodeGenInfo(new SystemZABIInfo(CGT)) {} 5266 }; 5267 5268 } 5269 5270 bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const { 5271 // Treat an enum type as its underlying type. 5272 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 5273 Ty = EnumTy->getDecl()->getIntegerType(); 5274 5275 // Promotable integer types are required to be promoted by the ABI. 5276 if (Ty->isPromotableIntegerType()) 5277 return true; 5278 5279 // 32-bit values must also be promoted. 5280 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 5281 switch (BT->getKind()) { 5282 case BuiltinType::Int: 5283 case BuiltinType::UInt: 5284 return true; 5285 default: 5286 return false; 5287 } 5288 return false; 5289 } 5290 5291 bool SystemZABIInfo::isCompoundType(QualType Ty) const { 5292 return Ty->isAnyComplexType() || isAggregateTypeForABI(Ty); 5293 } 5294 5295 bool SystemZABIInfo::isFPArgumentType(QualType Ty) const { 5296 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 5297 switch (BT->getKind()) { 5298 case BuiltinType::Float: 5299 case BuiltinType::Double: 5300 return true; 5301 default: 5302 return false; 5303 } 5304 5305 if (const RecordType *RT = Ty->getAsStructureType()) { 5306 const RecordDecl *RD = RT->getDecl(); 5307 bool Found = false; 5308 5309 // If this is a C++ record, check the bases first. 5310 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 5311 for (const auto &I : CXXRD->bases()) { 5312 QualType Base = I.getType(); 5313 5314 // Empty bases don't affect things either way. 5315 if (isEmptyRecord(getContext(), Base, true)) 5316 continue; 5317 5318 if (Found) 5319 return false; 5320 Found = isFPArgumentType(Base); 5321 if (!Found) 5322 return false; 5323 } 5324 5325 // Check the fields. 5326 for (const auto *FD : RD->fields()) { 5327 // Empty bitfields don't affect things either way. 5328 // Unlike isSingleElementStruct(), empty structure and array fields 5329 // do count. So do anonymous bitfields that aren't zero-sized. 5330 if (FD->isBitField() && FD->getBitWidthValue(getContext()) == 0) 5331 return true; 5332 5333 // Unlike isSingleElementStruct(), arrays do not count. 5334 // Nested isFPArgumentType structures still do though. 5335 if (Found) 5336 return false; 5337 Found = isFPArgumentType(FD->getType()); 5338 if (!Found) 5339 return false; 5340 } 5341 5342 // Unlike isSingleElementStruct(), trailing padding is allowed. 5343 // An 8-byte aligned struct s { float f; } is passed as a double. 5344 return Found; 5345 } 5346 5347 return false; 5348 } 5349 5350 llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 5351 CodeGenFunction &CGF) const { 5352 // Assume that va_list type is correct; should be pointer to LLVM type: 5353 // struct { 5354 // i64 __gpr; 5355 // i64 __fpr; 5356 // i8 *__overflow_arg_area; 5357 // i8 *__reg_save_area; 5358 // }; 5359 5360 // Every argument occupies 8 bytes and is passed by preference in either 5361 // GPRs or FPRs. 5362 Ty = CGF.getContext().getCanonicalType(Ty); 5363 ABIArgInfo AI = classifyArgumentType(Ty); 5364 bool InFPRs = isFPArgumentType(Ty); 5365 5366 llvm::Type *APTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty)); 5367 bool IsIndirect = AI.isIndirect(); 5368 unsigned UnpaddedBitSize; 5369 if (IsIndirect) { 5370 APTy = llvm::PointerType::getUnqual(APTy); 5371 UnpaddedBitSize = 64; 5372 } else 5373 UnpaddedBitSize = getContext().getTypeSize(Ty); 5374 unsigned PaddedBitSize = 64; 5375 assert((UnpaddedBitSize <= PaddedBitSize) && "Invalid argument size."); 5376 5377 unsigned PaddedSize = PaddedBitSize / 8; 5378 unsigned Padding = (PaddedBitSize - UnpaddedBitSize) / 8; 5379 5380 unsigned MaxRegs, RegCountField, RegSaveIndex, RegPadding; 5381 if (InFPRs) { 5382 MaxRegs = 4; // Maximum of 4 FPR arguments 5383 RegCountField = 1; // __fpr 5384 RegSaveIndex = 16; // save offset for f0 5385 RegPadding = 0; // floats are passed in the high bits of an FPR 5386 } else { 5387 MaxRegs = 5; // Maximum of 5 GPR arguments 5388 RegCountField = 0; // __gpr 5389 RegSaveIndex = 2; // save offset for r2 5390 RegPadding = Padding; // values are passed in the low bits of a GPR 5391 } 5392 5393 llvm::Value *RegCountPtr = 5394 CGF.Builder.CreateStructGEP(VAListAddr, RegCountField, "reg_count_ptr"); 5395 llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count"); 5396 llvm::Type *IndexTy = RegCount->getType(); 5397 llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs); 5398 llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV, 5399 "fits_in_regs"); 5400 5401 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 5402 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); 5403 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 5404 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); 5405 5406 // Emit code to load the value if it was passed in registers. 5407 CGF.EmitBlock(InRegBlock); 5408 5409 // Work out the address of an argument register. 5410 llvm::Value *PaddedSizeV = llvm::ConstantInt::get(IndexTy, PaddedSize); 5411 llvm::Value *ScaledRegCount = 5412 CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count"); 5413 llvm::Value *RegBase = 5414 llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize + RegPadding); 5415 llvm::Value *RegOffset = 5416 CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset"); 5417 llvm::Value *RegSaveAreaPtr = 5418 CGF.Builder.CreateStructGEP(VAListAddr, 3, "reg_save_area_ptr"); 5419 llvm::Value *RegSaveArea = 5420 CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area"); 5421 llvm::Value *RawRegAddr = 5422 CGF.Builder.CreateGEP(RegSaveArea, RegOffset, "raw_reg_addr"); 5423 llvm::Value *RegAddr = 5424 CGF.Builder.CreateBitCast(RawRegAddr, APTy, "reg_addr"); 5425 5426 // Update the register count 5427 llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1); 5428 llvm::Value *NewRegCount = 5429 CGF.Builder.CreateAdd(RegCount, One, "reg_count"); 5430 CGF.Builder.CreateStore(NewRegCount, RegCountPtr); 5431 CGF.EmitBranch(ContBlock); 5432 5433 // Emit code to load the value if it was passed in memory. 5434 CGF.EmitBlock(InMemBlock); 5435 5436 // Work out the address of a stack argument. 5437 llvm::Value *OverflowArgAreaPtr = 5438 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr"); 5439 llvm::Value *OverflowArgArea = 5440 CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"); 5441 llvm::Value *PaddingV = llvm::ConstantInt::get(IndexTy, Padding); 5442 llvm::Value *RawMemAddr = 5443 CGF.Builder.CreateGEP(OverflowArgArea, PaddingV, "raw_mem_addr"); 5444 llvm::Value *MemAddr = 5445 CGF.Builder.CreateBitCast(RawMemAddr, APTy, "mem_addr"); 5446 5447 // Update overflow_arg_area_ptr pointer 5448 llvm::Value *NewOverflowArgArea = 5449 CGF.Builder.CreateGEP(OverflowArgArea, PaddedSizeV, "overflow_arg_area"); 5450 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr); 5451 CGF.EmitBranch(ContBlock); 5452 5453 // Return the appropriate result. 5454 CGF.EmitBlock(ContBlock); 5455 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(APTy, 2, "va_arg.addr"); 5456 ResAddr->addIncoming(RegAddr, InRegBlock); 5457 ResAddr->addIncoming(MemAddr, InMemBlock); 5458 5459 if (IsIndirect) 5460 return CGF.Builder.CreateLoad(ResAddr, "indirect_arg"); 5461 5462 return ResAddr; 5463 } 5464 5465 ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const { 5466 if (RetTy->isVoidType()) 5467 return ABIArgInfo::getIgnore(); 5468 if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64) 5469 return ABIArgInfo::getIndirect(0); 5470 return (isPromotableIntegerType(RetTy) ? 5471 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 5472 } 5473 5474 ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const { 5475 // Handle the generic C++ ABI. 5476 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 5477 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); 5478 5479 // Integers and enums are extended to full register width. 5480 if (isPromotableIntegerType(Ty)) 5481 return ABIArgInfo::getExtend(); 5482 5483 // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly. 5484 uint64_t Size = getContext().getTypeSize(Ty); 5485 if (Size != 8 && Size != 16 && Size != 32 && Size != 64) 5486 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 5487 5488 // Handle small structures. 5489 if (const RecordType *RT = Ty->getAs<RecordType>()) { 5490 // Structures with flexible arrays have variable length, so really 5491 // fail the size test above. 5492 const RecordDecl *RD = RT->getDecl(); 5493 if (RD->hasFlexibleArrayMember()) 5494 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 5495 5496 // The structure is passed as an unextended integer, a float, or a double. 5497 llvm::Type *PassTy; 5498 if (isFPArgumentType(Ty)) { 5499 assert(Size == 32 || Size == 64); 5500 if (Size == 32) 5501 PassTy = llvm::Type::getFloatTy(getVMContext()); 5502 else 5503 PassTy = llvm::Type::getDoubleTy(getVMContext()); 5504 } else 5505 PassTy = llvm::IntegerType::get(getVMContext(), Size); 5506 return ABIArgInfo::getDirect(PassTy); 5507 } 5508 5509 // Non-structure compounds are passed indirectly. 5510 if (isCompoundType(Ty)) 5511 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 5512 5513 return ABIArgInfo::getDirect(nullptr); 5514 } 5515 5516 //===----------------------------------------------------------------------===// 5517 // MSP430 ABI Implementation 5518 //===----------------------------------------------------------------------===// 5519 5520 namespace { 5521 5522 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo { 5523 public: 5524 MSP430TargetCodeGenInfo(CodeGenTypes &CGT) 5525 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 5526 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 5527 CodeGen::CodeGenModule &M) const override; 5528 }; 5529 5530 } 5531 5532 void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D, 5533 llvm::GlobalValue *GV, 5534 CodeGen::CodeGenModule &M) const { 5535 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 5536 if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) { 5537 // Handle 'interrupt' attribute: 5538 llvm::Function *F = cast<llvm::Function>(GV); 5539 5540 // Step 1: Set ISR calling convention. 5541 F->setCallingConv(llvm::CallingConv::MSP430_INTR); 5542 5543 // Step 2: Add attributes goodness. 5544 F->addFnAttr(llvm::Attribute::NoInline); 5545 5546 // Step 3: Emit ISR vector alias. 5547 unsigned Num = attr->getNumber() / 2; 5548 llvm::GlobalAlias::create(llvm::Function::ExternalLinkage, 5549 "__isr_" + Twine(Num), F); 5550 } 5551 } 5552 } 5553 5554 //===----------------------------------------------------------------------===// 5555 // MIPS ABI Implementation. This works for both little-endian and 5556 // big-endian variants. 5557 //===----------------------------------------------------------------------===// 5558 5559 namespace { 5560 class MipsABIInfo : public ABIInfo { 5561 bool IsO32; 5562 unsigned MinABIStackAlignInBytes, StackAlignInBytes; 5563 void CoerceToIntArgs(uint64_t TySize, 5564 SmallVectorImpl<llvm::Type *> &ArgList) const; 5565 llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const; 5566 llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const; 5567 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const; 5568 public: 5569 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) : 5570 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8), 5571 StackAlignInBytes(IsO32 ? 8 : 16) {} 5572 5573 ABIArgInfo classifyReturnType(QualType RetTy) const; 5574 ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const; 5575 void computeInfo(CGFunctionInfo &FI) const override; 5576 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 5577 CodeGenFunction &CGF) const override; 5578 }; 5579 5580 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo { 5581 unsigned SizeOfUnwindException; 5582 public: 5583 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32) 5584 : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)), 5585 SizeOfUnwindException(IsO32 ? 24 : 32) {} 5586 5587 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { 5588 return 29; 5589 } 5590 5591 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 5592 CodeGen::CodeGenModule &CGM) const override { 5593 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 5594 if (!FD) return; 5595 llvm::Function *Fn = cast<llvm::Function>(GV); 5596 if (FD->hasAttr<Mips16Attr>()) { 5597 Fn->addFnAttr("mips16"); 5598 } 5599 else if (FD->hasAttr<NoMips16Attr>()) { 5600 Fn->addFnAttr("nomips16"); 5601 } 5602 } 5603 5604 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 5605 llvm::Value *Address) const override; 5606 5607 unsigned getSizeOfUnwindException() const override { 5608 return SizeOfUnwindException; 5609 } 5610 }; 5611 } 5612 5613 void MipsABIInfo::CoerceToIntArgs(uint64_t TySize, 5614 SmallVectorImpl<llvm::Type *> &ArgList) const { 5615 llvm::IntegerType *IntTy = 5616 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8); 5617 5618 // Add (TySize / MinABIStackAlignInBytes) args of IntTy. 5619 for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N) 5620 ArgList.push_back(IntTy); 5621 5622 // If necessary, add one more integer type to ArgList. 5623 unsigned R = TySize % (MinABIStackAlignInBytes * 8); 5624 5625 if (R) 5626 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R)); 5627 } 5628 5629 // In N32/64, an aligned double precision floating point field is passed in 5630 // a register. 5631 llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const { 5632 SmallVector<llvm::Type*, 8> ArgList, IntArgList; 5633 5634 if (IsO32) { 5635 CoerceToIntArgs(TySize, ArgList); 5636 return llvm::StructType::get(getVMContext(), ArgList); 5637 } 5638 5639 if (Ty->isComplexType()) 5640 return CGT.ConvertType(Ty); 5641 5642 const RecordType *RT = Ty->getAs<RecordType>(); 5643 5644 // Unions/vectors are passed in integer registers. 5645 if (!RT || !RT->isStructureOrClassType()) { 5646 CoerceToIntArgs(TySize, ArgList); 5647 return llvm::StructType::get(getVMContext(), ArgList); 5648 } 5649 5650 const RecordDecl *RD = RT->getDecl(); 5651 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 5652 assert(!(TySize % 8) && "Size of structure must be multiple of 8."); 5653 5654 uint64_t LastOffset = 0; 5655 unsigned idx = 0; 5656 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64); 5657 5658 // Iterate over fields in the struct/class and check if there are any aligned 5659 // double fields. 5660 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 5661 i != e; ++i, ++idx) { 5662 const QualType Ty = i->getType(); 5663 const BuiltinType *BT = Ty->getAs<BuiltinType>(); 5664 5665 if (!BT || BT->getKind() != BuiltinType::Double) 5666 continue; 5667 5668 uint64_t Offset = Layout.getFieldOffset(idx); 5669 if (Offset % 64) // Ignore doubles that are not aligned. 5670 continue; 5671 5672 // Add ((Offset - LastOffset) / 64) args of type i64. 5673 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j) 5674 ArgList.push_back(I64); 5675 5676 // Add double type. 5677 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext())); 5678 LastOffset = Offset + 64; 5679 } 5680 5681 CoerceToIntArgs(TySize - LastOffset, IntArgList); 5682 ArgList.append(IntArgList.begin(), IntArgList.end()); 5683 5684 return llvm::StructType::get(getVMContext(), ArgList); 5685 } 5686 5687 llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset, 5688 uint64_t Offset) const { 5689 if (OrigOffset + MinABIStackAlignInBytes > Offset) 5690 return nullptr; 5691 5692 return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8); 5693 } 5694 5695 ABIArgInfo 5696 MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const { 5697 uint64_t OrigOffset = Offset; 5698 uint64_t TySize = getContext().getTypeSize(Ty); 5699 uint64_t Align = getContext().getTypeAlign(Ty) / 8; 5700 5701 Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes), 5702 (uint64_t)StackAlignInBytes); 5703 unsigned CurrOffset = llvm::RoundUpToAlignment(Offset, Align); 5704 Offset = CurrOffset + llvm::RoundUpToAlignment(TySize, Align * 8) / 8; 5705 5706 if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) { 5707 // Ignore empty aggregates. 5708 if (TySize == 0) 5709 return ABIArgInfo::getIgnore(); 5710 5711 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { 5712 Offset = OrigOffset + MinABIStackAlignInBytes; 5713 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); 5714 } 5715 5716 // If we have reached here, aggregates are passed directly by coercing to 5717 // another structure type. Padding is inserted if the offset of the 5718 // aggregate is unaligned. 5719 ABIArgInfo ArgInfo = 5720 ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0, 5721 getPaddingType(OrigOffset, CurrOffset)); 5722 ArgInfo.setInReg(true); 5723 return ArgInfo; 5724 } 5725 5726 // Treat an enum type as its underlying type. 5727 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 5728 Ty = EnumTy->getDecl()->getIntegerType(); 5729 5730 // All integral types are promoted to the GPR width. 5731 if (Ty->isIntegralOrEnumerationType()) 5732 return ABIArgInfo::getExtend(); 5733 5734 return ABIArgInfo::getDirect( 5735 nullptr, 0, IsO32 ? nullptr : getPaddingType(OrigOffset, CurrOffset)); 5736 } 5737 5738 llvm::Type* 5739 MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const { 5740 const RecordType *RT = RetTy->getAs<RecordType>(); 5741 SmallVector<llvm::Type*, 8> RTList; 5742 5743 if (RT && RT->isStructureOrClassType()) { 5744 const RecordDecl *RD = RT->getDecl(); 5745 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 5746 unsigned FieldCnt = Layout.getFieldCount(); 5747 5748 // N32/64 returns struct/classes in floating point registers if the 5749 // following conditions are met: 5750 // 1. The size of the struct/class is no larger than 128-bit. 5751 // 2. The struct/class has one or two fields all of which are floating 5752 // point types. 5753 // 3. The offset of the first field is zero (this follows what gcc does). 5754 // 5755 // Any other composite results are returned in integer registers. 5756 // 5757 if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) { 5758 RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end(); 5759 for (; b != e; ++b) { 5760 const BuiltinType *BT = b->getType()->getAs<BuiltinType>(); 5761 5762 if (!BT || !BT->isFloatingPoint()) 5763 break; 5764 5765 RTList.push_back(CGT.ConvertType(b->getType())); 5766 } 5767 5768 if (b == e) 5769 return llvm::StructType::get(getVMContext(), RTList, 5770 RD->hasAttr<PackedAttr>()); 5771 5772 RTList.clear(); 5773 } 5774 } 5775 5776 CoerceToIntArgs(Size, RTList); 5777 return llvm::StructType::get(getVMContext(), RTList); 5778 } 5779 5780 ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const { 5781 uint64_t Size = getContext().getTypeSize(RetTy); 5782 5783 if (RetTy->isVoidType()) 5784 return ABIArgInfo::getIgnore(); 5785 5786 // O32 doesn't treat zero-sized structs differently from other structs. 5787 // However, N32/N64 ignores zero sized return values. 5788 if (!IsO32 && Size == 0) 5789 return ABIArgInfo::getIgnore(); 5790 5791 if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) { 5792 if (Size <= 128) { 5793 if (RetTy->isAnyComplexType()) 5794 return ABIArgInfo::getDirect(); 5795 5796 // O32 returns integer vectors in registers and N32/N64 returns all small 5797 // aggregates in registers. 5798 if (!IsO32 || 5799 (RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())) { 5800 ABIArgInfo ArgInfo = 5801 ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size)); 5802 ArgInfo.setInReg(true); 5803 return ArgInfo; 5804 } 5805 } 5806 5807 return ABIArgInfo::getIndirect(0); 5808 } 5809 5810 // Treat an enum type as its underlying type. 5811 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 5812 RetTy = EnumTy->getDecl()->getIntegerType(); 5813 5814 return (RetTy->isPromotableIntegerType() ? 5815 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 5816 } 5817 5818 void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const { 5819 ABIArgInfo &RetInfo = FI.getReturnInfo(); 5820 if (!getCXXABI().classifyReturnType(FI)) 5821 RetInfo = classifyReturnType(FI.getReturnType()); 5822 5823 // Check if a pointer to an aggregate is passed as a hidden argument. 5824 uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0; 5825 5826 for (auto &I : FI.arguments()) 5827 I.info = classifyArgumentType(I.type, Offset); 5828 } 5829 5830 llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 5831 CodeGenFunction &CGF) const { 5832 llvm::Type *BP = CGF.Int8PtrTy; 5833 llvm::Type *BPP = CGF.Int8PtrPtrTy; 5834 5835 // Integer arguments are promoted 32-bit on O32 and 64-bit on N32/N64. 5836 unsigned SlotSizeInBits = IsO32 ? 32 : 64; 5837 if (Ty->isIntegerType() && 5838 CGF.getContext().getIntWidth(Ty) < SlotSizeInBits) { 5839 Ty = CGF.getContext().getIntTypeForBitwidth(SlotSizeInBits, 5840 Ty->isSignedIntegerType()); 5841 } 5842 5843 CGBuilderTy &Builder = CGF.Builder; 5844 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 5845 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 5846 int64_t TypeAlign = 5847 std::min(getContext().getTypeAlign(Ty) / 8, StackAlignInBytes); 5848 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 5849 llvm::Value *AddrTyped; 5850 unsigned PtrWidth = getTarget().getPointerWidth(0); 5851 llvm::IntegerType *IntTy = (PtrWidth == 32) ? CGF.Int32Ty : CGF.Int64Ty; 5852 5853 if (TypeAlign > MinABIStackAlignInBytes) { 5854 llvm::Value *AddrAsInt = CGF.Builder.CreatePtrToInt(Addr, IntTy); 5855 llvm::Value *Inc = llvm::ConstantInt::get(IntTy, TypeAlign - 1); 5856 llvm::Value *Mask = llvm::ConstantInt::get(IntTy, -TypeAlign); 5857 llvm::Value *Add = CGF.Builder.CreateAdd(AddrAsInt, Inc); 5858 llvm::Value *And = CGF.Builder.CreateAnd(Add, Mask); 5859 AddrTyped = CGF.Builder.CreateIntToPtr(And, PTy); 5860 } 5861 else 5862 AddrTyped = Builder.CreateBitCast(Addr, PTy); 5863 5864 llvm::Value *AlignedAddr = Builder.CreateBitCast(AddrTyped, BP); 5865 TypeAlign = std::max((unsigned)TypeAlign, MinABIStackAlignInBytes); 5866 unsigned ArgSizeInBits = CGF.getContext().getTypeSize(Ty); 5867 uint64_t Offset = llvm::RoundUpToAlignment(ArgSizeInBits / 8, TypeAlign); 5868 llvm::Value *NextAddr = 5869 Builder.CreateGEP(AlignedAddr, llvm::ConstantInt::get(IntTy, Offset), 5870 "ap.next"); 5871 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 5872 5873 return AddrTyped; 5874 } 5875 5876 bool 5877 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 5878 llvm::Value *Address) const { 5879 // This information comes from gcc's implementation, which seems to 5880 // as canonical as it gets. 5881 5882 // Everything on MIPS is 4 bytes. Double-precision FP registers 5883 // are aliased to pairs of single-precision FP registers. 5884 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 5885 5886 // 0-31 are the general purpose registers, $0 - $31. 5887 // 32-63 are the floating-point registers, $f0 - $f31. 5888 // 64 and 65 are the multiply/divide registers, $hi and $lo. 5889 // 66 is the (notional, I think) register for signal-handler return. 5890 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65); 5891 5892 // 67-74 are the floating-point status registers, $fcc0 - $fcc7. 5893 // They are one bit wide and ignored here. 5894 5895 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31. 5896 // (coprocessor 1 is the FP unit) 5897 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31. 5898 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31. 5899 // 176-181 are the DSP accumulator registers. 5900 AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181); 5901 return false; 5902 } 5903 5904 //===----------------------------------------------------------------------===// 5905 // TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults. 5906 // Currently subclassed only to implement custom OpenCL C function attribute 5907 // handling. 5908 //===----------------------------------------------------------------------===// 5909 5910 namespace { 5911 5912 class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo { 5913 public: 5914 TCETargetCodeGenInfo(CodeGenTypes &CGT) 5915 : DefaultTargetCodeGenInfo(CGT) {} 5916 5917 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 5918 CodeGen::CodeGenModule &M) const override; 5919 }; 5920 5921 void TCETargetCodeGenInfo::SetTargetAttributes(const Decl *D, 5922 llvm::GlobalValue *GV, 5923 CodeGen::CodeGenModule &M) const { 5924 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 5925 if (!FD) return; 5926 5927 llvm::Function *F = cast<llvm::Function>(GV); 5928 5929 if (M.getLangOpts().OpenCL) { 5930 if (FD->hasAttr<OpenCLKernelAttr>()) { 5931 // OpenCL C Kernel functions are not subject to inlining 5932 F->addFnAttr(llvm::Attribute::NoInline); 5933 const ReqdWorkGroupSizeAttr *Attr = FD->getAttr<ReqdWorkGroupSizeAttr>(); 5934 if (Attr) { 5935 // Convert the reqd_work_group_size() attributes to metadata. 5936 llvm::LLVMContext &Context = F->getContext(); 5937 llvm::NamedMDNode *OpenCLMetadata = 5938 M.getModule().getOrInsertNamedMetadata("opencl.kernel_wg_size_info"); 5939 5940 SmallVector<llvm::Metadata *, 5> Operands; 5941 Operands.push_back(llvm::ConstantAsMetadata::get(F)); 5942 5943 Operands.push_back( 5944 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue( 5945 M.Int32Ty, llvm::APInt(32, Attr->getXDim())))); 5946 Operands.push_back( 5947 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue( 5948 M.Int32Ty, llvm::APInt(32, Attr->getYDim())))); 5949 Operands.push_back( 5950 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue( 5951 M.Int32Ty, llvm::APInt(32, Attr->getZDim())))); 5952 5953 // Add a boolean constant operand for "required" (true) or "hint" (false) 5954 // for implementing the work_group_size_hint attr later. Currently 5955 // always true as the hint is not yet implemented. 5956 Operands.push_back( 5957 llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context))); 5958 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands)); 5959 } 5960 } 5961 } 5962 } 5963 5964 } 5965 5966 //===----------------------------------------------------------------------===// 5967 // Hexagon ABI Implementation 5968 //===----------------------------------------------------------------------===// 5969 5970 namespace { 5971 5972 class HexagonABIInfo : public ABIInfo { 5973 5974 5975 public: 5976 HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 5977 5978 private: 5979 5980 ABIArgInfo classifyReturnType(QualType RetTy) const; 5981 ABIArgInfo classifyArgumentType(QualType RetTy) const; 5982 5983 void computeInfo(CGFunctionInfo &FI) const override; 5984 5985 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 5986 CodeGenFunction &CGF) const override; 5987 }; 5988 5989 class HexagonTargetCodeGenInfo : public TargetCodeGenInfo { 5990 public: 5991 HexagonTargetCodeGenInfo(CodeGenTypes &CGT) 5992 :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {} 5993 5994 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 5995 return 29; 5996 } 5997 }; 5998 5999 } 6000 6001 void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const { 6002 if (!getCXXABI().classifyReturnType(FI)) 6003 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 6004 for (auto &I : FI.arguments()) 6005 I.info = classifyArgumentType(I.type); 6006 } 6007 6008 ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const { 6009 if (!isAggregateTypeForABI(Ty)) { 6010 // Treat an enum type as its underlying type. 6011 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 6012 Ty = EnumTy->getDecl()->getIntegerType(); 6013 6014 return (Ty->isPromotableIntegerType() ? 6015 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 6016 } 6017 6018 // Ignore empty records. 6019 if (isEmptyRecord(getContext(), Ty, true)) 6020 return ABIArgInfo::getIgnore(); 6021 6022 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 6023 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); 6024 6025 uint64_t Size = getContext().getTypeSize(Ty); 6026 if (Size > 64) 6027 return ABIArgInfo::getIndirect(0, /*ByVal=*/true); 6028 // Pass in the smallest viable integer type. 6029 else if (Size > 32) 6030 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext())); 6031 else if (Size > 16) 6032 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 6033 else if (Size > 8) 6034 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 6035 else 6036 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 6037 } 6038 6039 ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const { 6040 if (RetTy->isVoidType()) 6041 return ABIArgInfo::getIgnore(); 6042 6043 // Large vector types should be returned via memory. 6044 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64) 6045 return ABIArgInfo::getIndirect(0); 6046 6047 if (!isAggregateTypeForABI(RetTy)) { 6048 // Treat an enum type as its underlying type. 6049 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 6050 RetTy = EnumTy->getDecl()->getIntegerType(); 6051 6052 return (RetTy->isPromotableIntegerType() ? 6053 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 6054 } 6055 6056 if (isEmptyRecord(getContext(), RetTy, true)) 6057 return ABIArgInfo::getIgnore(); 6058 6059 // Aggregates <= 8 bytes are returned in r0; other aggregates 6060 // are returned indirectly. 6061 uint64_t Size = getContext().getTypeSize(RetTy); 6062 if (Size <= 64) { 6063 // Return in the smallest viable integer type. 6064 if (Size <= 8) 6065 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 6066 if (Size <= 16) 6067 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 6068 if (Size <= 32) 6069 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 6070 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext())); 6071 } 6072 6073 return ABIArgInfo::getIndirect(0, /*ByVal=*/true); 6074 } 6075 6076 llvm::Value *HexagonABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 6077 CodeGenFunction &CGF) const { 6078 // FIXME: Need to handle alignment 6079 llvm::Type *BPP = CGF.Int8PtrPtrTy; 6080 6081 CGBuilderTy &Builder = CGF.Builder; 6082 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 6083 "ap"); 6084 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 6085 llvm::Type *PTy = 6086 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 6087 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 6088 6089 uint64_t Offset = 6090 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); 6091 llvm::Value *NextAddr = 6092 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 6093 "ap.next"); 6094 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 6095 6096 return AddrTyped; 6097 } 6098 6099 //===----------------------------------------------------------------------===// 6100 // AMDGPU ABI Implementation 6101 //===----------------------------------------------------------------------===// 6102 6103 namespace { 6104 6105 class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo { 6106 public: 6107 AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT) 6108 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 6109 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 6110 CodeGen::CodeGenModule &M) const override; 6111 }; 6112 6113 } 6114 6115 void AMDGPUTargetCodeGenInfo::SetTargetAttributes( 6116 const Decl *D, 6117 llvm::GlobalValue *GV, 6118 CodeGen::CodeGenModule &M) const { 6119 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 6120 if (!FD) 6121 return; 6122 6123 if (const auto Attr = FD->getAttr<AMDGPUNumVGPRAttr>()) { 6124 llvm::Function *F = cast<llvm::Function>(GV); 6125 uint32_t NumVGPR = Attr->getNumVGPR(); 6126 if (NumVGPR != 0) 6127 F->addFnAttr("amdgpu_num_vgpr", llvm::utostr(NumVGPR)); 6128 } 6129 6130 if (const auto Attr = FD->getAttr<AMDGPUNumSGPRAttr>()) { 6131 llvm::Function *F = cast<llvm::Function>(GV); 6132 unsigned NumSGPR = Attr->getNumSGPR(); 6133 if (NumSGPR != 0) 6134 F->addFnAttr("amdgpu_num_sgpr", llvm::utostr(NumSGPR)); 6135 } 6136 } 6137 6138 6139 //===----------------------------------------------------------------------===// 6140 // SPARC v9 ABI Implementation. 6141 // Based on the SPARC Compliance Definition version 2.4.1. 6142 // 6143 // Function arguments a mapped to a nominal "parameter array" and promoted to 6144 // registers depending on their type. Each argument occupies 8 or 16 bytes in 6145 // the array, structs larger than 16 bytes are passed indirectly. 6146 // 6147 // One case requires special care: 6148 // 6149 // struct mixed { 6150 // int i; 6151 // float f; 6152 // }; 6153 // 6154 // When a struct mixed is passed by value, it only occupies 8 bytes in the 6155 // parameter array, but the int is passed in an integer register, and the float 6156 // is passed in a floating point register. This is represented as two arguments 6157 // with the LLVM IR inreg attribute: 6158 // 6159 // declare void f(i32 inreg %i, float inreg %f) 6160 // 6161 // The code generator will only allocate 4 bytes from the parameter array for 6162 // the inreg arguments. All other arguments are allocated a multiple of 8 6163 // bytes. 6164 // 6165 namespace { 6166 class SparcV9ABIInfo : public ABIInfo { 6167 public: 6168 SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 6169 6170 private: 6171 ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const; 6172 void computeInfo(CGFunctionInfo &FI) const override; 6173 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 6174 CodeGenFunction &CGF) const override; 6175 6176 // Coercion type builder for structs passed in registers. The coercion type 6177 // serves two purposes: 6178 // 6179 // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned' 6180 // in registers. 6181 // 2. Expose aligned floating point elements as first-level elements, so the 6182 // code generator knows to pass them in floating point registers. 6183 // 6184 // We also compute the InReg flag which indicates that the struct contains 6185 // aligned 32-bit floats. 6186 // 6187 struct CoerceBuilder { 6188 llvm::LLVMContext &Context; 6189 const llvm::DataLayout &DL; 6190 SmallVector<llvm::Type*, 8> Elems; 6191 uint64_t Size; 6192 bool InReg; 6193 6194 CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl) 6195 : Context(c), DL(dl), Size(0), InReg(false) {} 6196 6197 // Pad Elems with integers until Size is ToSize. 6198 void pad(uint64_t ToSize) { 6199 assert(ToSize >= Size && "Cannot remove elements"); 6200 if (ToSize == Size) 6201 return; 6202 6203 // Finish the current 64-bit word. 6204 uint64_t Aligned = llvm::RoundUpToAlignment(Size, 64); 6205 if (Aligned > Size && Aligned <= ToSize) { 6206 Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size)); 6207 Size = Aligned; 6208 } 6209 6210 // Add whole 64-bit words. 6211 while (Size + 64 <= ToSize) { 6212 Elems.push_back(llvm::Type::getInt64Ty(Context)); 6213 Size += 64; 6214 } 6215 6216 // Final in-word padding. 6217 if (Size < ToSize) { 6218 Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size)); 6219 Size = ToSize; 6220 } 6221 } 6222 6223 // Add a floating point element at Offset. 6224 void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) { 6225 // Unaligned floats are treated as integers. 6226 if (Offset % Bits) 6227 return; 6228 // The InReg flag is only required if there are any floats < 64 bits. 6229 if (Bits < 64) 6230 InReg = true; 6231 pad(Offset); 6232 Elems.push_back(Ty); 6233 Size = Offset + Bits; 6234 } 6235 6236 // Add a struct type to the coercion type, starting at Offset (in bits). 6237 void addStruct(uint64_t Offset, llvm::StructType *StrTy) { 6238 const llvm::StructLayout *Layout = DL.getStructLayout(StrTy); 6239 for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) { 6240 llvm::Type *ElemTy = StrTy->getElementType(i); 6241 uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i); 6242 switch (ElemTy->getTypeID()) { 6243 case llvm::Type::StructTyID: 6244 addStruct(ElemOffset, cast<llvm::StructType>(ElemTy)); 6245 break; 6246 case llvm::Type::FloatTyID: 6247 addFloat(ElemOffset, ElemTy, 32); 6248 break; 6249 case llvm::Type::DoubleTyID: 6250 addFloat(ElemOffset, ElemTy, 64); 6251 break; 6252 case llvm::Type::FP128TyID: 6253 addFloat(ElemOffset, ElemTy, 128); 6254 break; 6255 case llvm::Type::PointerTyID: 6256 if (ElemOffset % 64 == 0) { 6257 pad(ElemOffset); 6258 Elems.push_back(ElemTy); 6259 Size += 64; 6260 } 6261 break; 6262 default: 6263 break; 6264 } 6265 } 6266 } 6267 6268 // Check if Ty is a usable substitute for the coercion type. 6269 bool isUsableType(llvm::StructType *Ty) const { 6270 if (Ty->getNumElements() != Elems.size()) 6271 return false; 6272 for (unsigned i = 0, e = Elems.size(); i != e; ++i) 6273 if (Elems[i] != Ty->getElementType(i)) 6274 return false; 6275 return true; 6276 } 6277 6278 // Get the coercion type as a literal struct type. 6279 llvm::Type *getType() const { 6280 if (Elems.size() == 1) 6281 return Elems.front(); 6282 else 6283 return llvm::StructType::get(Context, Elems); 6284 } 6285 }; 6286 }; 6287 } // end anonymous namespace 6288 6289 ABIArgInfo 6290 SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const { 6291 if (Ty->isVoidType()) 6292 return ABIArgInfo::getIgnore(); 6293 6294 uint64_t Size = getContext().getTypeSize(Ty); 6295 6296 // Anything too big to fit in registers is passed with an explicit indirect 6297 // pointer / sret pointer. 6298 if (Size > SizeLimit) 6299 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 6300 6301 // Treat an enum type as its underlying type. 6302 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 6303 Ty = EnumTy->getDecl()->getIntegerType(); 6304 6305 // Integer types smaller than a register are extended. 6306 if (Size < 64 && Ty->isIntegerType()) 6307 return ABIArgInfo::getExtend(); 6308 6309 // Other non-aggregates go in registers. 6310 if (!isAggregateTypeForABI(Ty)) 6311 return ABIArgInfo::getDirect(); 6312 6313 // If a C++ object has either a non-trivial copy constructor or a non-trivial 6314 // destructor, it is passed with an explicit indirect pointer / sret pointer. 6315 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 6316 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); 6317 6318 // This is a small aggregate type that should be passed in registers. 6319 // Build a coercion type from the LLVM struct type. 6320 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty)); 6321 if (!StrTy) 6322 return ABIArgInfo::getDirect(); 6323 6324 CoerceBuilder CB(getVMContext(), getDataLayout()); 6325 CB.addStruct(0, StrTy); 6326 CB.pad(llvm::RoundUpToAlignment(CB.DL.getTypeSizeInBits(StrTy), 64)); 6327 6328 // Try to use the original type for coercion. 6329 llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType(); 6330 6331 if (CB.InReg) 6332 return ABIArgInfo::getDirectInReg(CoerceTy); 6333 else 6334 return ABIArgInfo::getDirect(CoerceTy); 6335 } 6336 6337 llvm::Value *SparcV9ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 6338 CodeGenFunction &CGF) const { 6339 ABIArgInfo AI = classifyType(Ty, 16 * 8); 6340 llvm::Type *ArgTy = CGT.ConvertType(Ty); 6341 if (AI.canHaveCoerceToType() && !AI.getCoerceToType()) 6342 AI.setCoerceToType(ArgTy); 6343 6344 llvm::Type *BPP = CGF.Int8PtrPtrTy; 6345 CGBuilderTy &Builder = CGF.Builder; 6346 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 6347 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 6348 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy); 6349 llvm::Value *ArgAddr; 6350 unsigned Stride; 6351 6352 switch (AI.getKind()) { 6353 case ABIArgInfo::Expand: 6354 case ABIArgInfo::InAlloca: 6355 llvm_unreachable("Unsupported ABI kind for va_arg"); 6356 6357 case ABIArgInfo::Extend: 6358 Stride = 8; 6359 ArgAddr = Builder 6360 .CreateConstGEP1_32(Addr, 8 - getDataLayout().getTypeAllocSize(ArgTy), 6361 "extend"); 6362 break; 6363 6364 case ABIArgInfo::Direct: 6365 Stride = getDataLayout().getTypeAllocSize(AI.getCoerceToType()); 6366 ArgAddr = Addr; 6367 break; 6368 6369 case ABIArgInfo::Indirect: 6370 Stride = 8; 6371 ArgAddr = Builder.CreateBitCast(Addr, 6372 llvm::PointerType::getUnqual(ArgPtrTy), 6373 "indirect"); 6374 ArgAddr = Builder.CreateLoad(ArgAddr, "indirect.arg"); 6375 break; 6376 6377 case ABIArgInfo::Ignore: 6378 return llvm::UndefValue::get(ArgPtrTy); 6379 } 6380 6381 // Update VAList. 6382 Addr = Builder.CreateConstGEP1_32(Addr, Stride, "ap.next"); 6383 Builder.CreateStore(Addr, VAListAddrAsBPP); 6384 6385 return Builder.CreatePointerCast(ArgAddr, ArgPtrTy, "arg.addr"); 6386 } 6387 6388 void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const { 6389 FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8); 6390 for (auto &I : FI.arguments()) 6391 I.info = classifyType(I.type, 16 * 8); 6392 } 6393 6394 namespace { 6395 class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo { 6396 public: 6397 SparcV9TargetCodeGenInfo(CodeGenTypes &CGT) 6398 : TargetCodeGenInfo(new SparcV9ABIInfo(CGT)) {} 6399 6400 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 6401 return 14; 6402 } 6403 6404 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 6405 llvm::Value *Address) const override; 6406 }; 6407 } // end anonymous namespace 6408 6409 bool 6410 SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 6411 llvm::Value *Address) const { 6412 // This is calculated from the LLVM and GCC tables and verified 6413 // against gcc output. AFAIK all ABIs use the same encoding. 6414 6415 CodeGen::CGBuilderTy &Builder = CGF.Builder; 6416 6417 llvm::IntegerType *i8 = CGF.Int8Ty; 6418 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 6419 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 6420 6421 // 0-31: the 8-byte general-purpose registers 6422 AssignToArrayRange(Builder, Address, Eight8, 0, 31); 6423 6424 // 32-63: f0-31, the 4-byte floating-point registers 6425 AssignToArrayRange(Builder, Address, Four8, 32, 63); 6426 6427 // Y = 64 6428 // PSR = 65 6429 // WIM = 66 6430 // TBR = 67 6431 // PC = 68 6432 // NPC = 69 6433 // FSR = 70 6434 // CSR = 71 6435 AssignToArrayRange(Builder, Address, Eight8, 64, 71); 6436 6437 // 72-87: d0-15, the 8-byte floating-point registers 6438 AssignToArrayRange(Builder, Address, Eight8, 72, 87); 6439 6440 return false; 6441 } 6442 6443 6444 //===----------------------------------------------------------------------===// 6445 // XCore ABI Implementation 6446 //===----------------------------------------------------------------------===// 6447 6448 namespace { 6449 6450 /// A SmallStringEnc instance is used to build up the TypeString by passing 6451 /// it by reference between functions that append to it. 6452 typedef llvm::SmallString<128> SmallStringEnc; 6453 6454 /// TypeStringCache caches the meta encodings of Types. 6455 /// 6456 /// The reason for caching TypeStrings is two fold: 6457 /// 1. To cache a type's encoding for later uses; 6458 /// 2. As a means to break recursive member type inclusion. 6459 /// 6460 /// A cache Entry can have a Status of: 6461 /// NonRecursive: The type encoding is not recursive; 6462 /// Recursive: The type encoding is recursive; 6463 /// Incomplete: An incomplete TypeString; 6464 /// IncompleteUsed: An incomplete TypeString that has been used in a 6465 /// Recursive type encoding. 6466 /// 6467 /// A NonRecursive entry will have all of its sub-members expanded as fully 6468 /// as possible. Whilst it may contain types which are recursive, the type 6469 /// itself is not recursive and thus its encoding may be safely used whenever 6470 /// the type is encountered. 6471 /// 6472 /// A Recursive entry will have all of its sub-members expanded as fully as 6473 /// possible. The type itself is recursive and it may contain other types which 6474 /// are recursive. The Recursive encoding must not be used during the expansion 6475 /// of a recursive type's recursive branch. For simplicity the code uses 6476 /// IncompleteCount to reject all usage of Recursive encodings for member types. 6477 /// 6478 /// An Incomplete entry is always a RecordType and only encodes its 6479 /// identifier e.g. "s(S){}". Incomplete 'StubEnc' entries are ephemeral and 6480 /// are placed into the cache during type expansion as a means to identify and 6481 /// handle recursive inclusion of types as sub-members. If there is recursion 6482 /// the entry becomes IncompleteUsed. 6483 /// 6484 /// During the expansion of a RecordType's members: 6485 /// 6486 /// If the cache contains a NonRecursive encoding for the member type, the 6487 /// cached encoding is used; 6488 /// 6489 /// If the cache contains a Recursive encoding for the member type, the 6490 /// cached encoding is 'Swapped' out, as it may be incorrect, and... 6491 /// 6492 /// If the member is a RecordType, an Incomplete encoding is placed into the 6493 /// cache to break potential recursive inclusion of itself as a sub-member; 6494 /// 6495 /// Once a member RecordType has been expanded, its temporary incomplete 6496 /// entry is removed from the cache. If a Recursive encoding was swapped out 6497 /// it is swapped back in; 6498 /// 6499 /// If an incomplete entry is used to expand a sub-member, the incomplete 6500 /// entry is marked as IncompleteUsed. The cache keeps count of how many 6501 /// IncompleteUsed entries it currently contains in IncompleteUsedCount; 6502 /// 6503 /// If a member's encoding is found to be a NonRecursive or Recursive viz: 6504 /// IncompleteUsedCount==0, the member's encoding is added to the cache. 6505 /// Else the member is part of a recursive type and thus the recursion has 6506 /// been exited too soon for the encoding to be correct for the member. 6507 /// 6508 class TypeStringCache { 6509 enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed}; 6510 struct Entry { 6511 std::string Str; // The encoded TypeString for the type. 6512 enum Status State; // Information about the encoding in 'Str'. 6513 std::string Swapped; // A temporary place holder for a Recursive encoding 6514 // during the expansion of RecordType's members. 6515 }; 6516 std::map<const IdentifierInfo *, struct Entry> Map; 6517 unsigned IncompleteCount; // Number of Incomplete entries in the Map. 6518 unsigned IncompleteUsedCount; // Number of IncompleteUsed entries in the Map. 6519 public: 6520 TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {}; 6521 void addIncomplete(const IdentifierInfo *ID, std::string StubEnc); 6522 bool removeIncomplete(const IdentifierInfo *ID); 6523 void addIfComplete(const IdentifierInfo *ID, StringRef Str, 6524 bool IsRecursive); 6525 StringRef lookupStr(const IdentifierInfo *ID); 6526 }; 6527 6528 /// TypeString encodings for enum & union fields must be order. 6529 /// FieldEncoding is a helper for this ordering process. 6530 class FieldEncoding { 6531 bool HasName; 6532 std::string Enc; 6533 public: 6534 FieldEncoding(bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {}; 6535 StringRef str() {return Enc.c_str();}; 6536 bool operator<(const FieldEncoding &rhs) const { 6537 if (HasName != rhs.HasName) return HasName; 6538 return Enc < rhs.Enc; 6539 } 6540 }; 6541 6542 class XCoreABIInfo : public DefaultABIInfo { 6543 public: 6544 XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 6545 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 6546 CodeGenFunction &CGF) const override; 6547 }; 6548 6549 class XCoreTargetCodeGenInfo : public TargetCodeGenInfo { 6550 mutable TypeStringCache TSC; 6551 public: 6552 XCoreTargetCodeGenInfo(CodeGenTypes &CGT) 6553 :TargetCodeGenInfo(new XCoreABIInfo(CGT)) {} 6554 void emitTargetMD(const Decl *D, llvm::GlobalValue *GV, 6555 CodeGen::CodeGenModule &M) const override; 6556 }; 6557 6558 } // End anonymous namespace. 6559 6560 llvm::Value *XCoreABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 6561 CodeGenFunction &CGF) const { 6562 CGBuilderTy &Builder = CGF.Builder; 6563 6564 // Get the VAList. 6565 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, 6566 CGF.Int8PtrPtrTy); 6567 llvm::Value *AP = Builder.CreateLoad(VAListAddrAsBPP); 6568 6569 // Handle the argument. 6570 ABIArgInfo AI = classifyArgumentType(Ty); 6571 llvm::Type *ArgTy = CGT.ConvertType(Ty); 6572 if (AI.canHaveCoerceToType() && !AI.getCoerceToType()) 6573 AI.setCoerceToType(ArgTy); 6574 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy); 6575 llvm::Value *Val; 6576 uint64_t ArgSize = 0; 6577 switch (AI.getKind()) { 6578 case ABIArgInfo::Expand: 6579 case ABIArgInfo::InAlloca: 6580 llvm_unreachable("Unsupported ABI kind for va_arg"); 6581 case ABIArgInfo::Ignore: 6582 Val = llvm::UndefValue::get(ArgPtrTy); 6583 ArgSize = 0; 6584 break; 6585 case ABIArgInfo::Extend: 6586 case ABIArgInfo::Direct: 6587 Val = Builder.CreatePointerCast(AP, ArgPtrTy); 6588 ArgSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType()); 6589 if (ArgSize < 4) 6590 ArgSize = 4; 6591 break; 6592 case ABIArgInfo::Indirect: 6593 llvm::Value *ArgAddr; 6594 ArgAddr = Builder.CreateBitCast(AP, llvm::PointerType::getUnqual(ArgPtrTy)); 6595 ArgAddr = Builder.CreateLoad(ArgAddr); 6596 Val = Builder.CreatePointerCast(ArgAddr, ArgPtrTy); 6597 ArgSize = 4; 6598 break; 6599 } 6600 6601 // Increment the VAList. 6602 if (ArgSize) { 6603 llvm::Value *APN = Builder.CreateConstGEP1_32(AP, ArgSize); 6604 Builder.CreateStore(APN, VAListAddrAsBPP); 6605 } 6606 return Val; 6607 } 6608 6609 /// During the expansion of a RecordType, an incomplete TypeString is placed 6610 /// into the cache as a means to identify and break recursion. 6611 /// If there is a Recursive encoding in the cache, it is swapped out and will 6612 /// be reinserted by removeIncomplete(). 6613 /// All other types of encoding should have been used rather than arriving here. 6614 void TypeStringCache::addIncomplete(const IdentifierInfo *ID, 6615 std::string StubEnc) { 6616 if (!ID) 6617 return; 6618 Entry &E = Map[ID]; 6619 assert( (E.Str.empty() || E.State == Recursive) && 6620 "Incorrectly use of addIncomplete"); 6621 assert(!StubEnc.empty() && "Passing an empty string to addIncomplete()"); 6622 E.Swapped.swap(E.Str); // swap out the Recursive 6623 E.Str.swap(StubEnc); 6624 E.State = Incomplete; 6625 ++IncompleteCount; 6626 } 6627 6628 /// Once the RecordType has been expanded, the temporary incomplete TypeString 6629 /// must be removed from the cache. 6630 /// If a Recursive was swapped out by addIncomplete(), it will be replaced. 6631 /// Returns true if the RecordType was defined recursively. 6632 bool TypeStringCache::removeIncomplete(const IdentifierInfo *ID) { 6633 if (!ID) 6634 return false; 6635 auto I = Map.find(ID); 6636 assert(I != Map.end() && "Entry not present"); 6637 Entry &E = I->second; 6638 assert( (E.State == Incomplete || 6639 E.State == IncompleteUsed) && 6640 "Entry must be an incomplete type"); 6641 bool IsRecursive = false; 6642 if (E.State == IncompleteUsed) { 6643 // We made use of our Incomplete encoding, thus we are recursive. 6644 IsRecursive = true; 6645 --IncompleteUsedCount; 6646 } 6647 if (E.Swapped.empty()) 6648 Map.erase(I); 6649 else { 6650 // Swap the Recursive back. 6651 E.Swapped.swap(E.Str); 6652 E.Swapped.clear(); 6653 E.State = Recursive; 6654 } 6655 --IncompleteCount; 6656 return IsRecursive; 6657 } 6658 6659 /// Add the encoded TypeString to the cache only if it is NonRecursive or 6660 /// Recursive (viz: all sub-members were expanded as fully as possible). 6661 void TypeStringCache::addIfComplete(const IdentifierInfo *ID, StringRef Str, 6662 bool IsRecursive) { 6663 if (!ID || IncompleteUsedCount) 6664 return; // No key or it is is an incomplete sub-type so don't add. 6665 Entry &E = Map[ID]; 6666 if (IsRecursive && !E.Str.empty()) { 6667 assert(E.State==Recursive && E.Str.size() == Str.size() && 6668 "This is not the same Recursive entry"); 6669 // The parent container was not recursive after all, so we could have used 6670 // this Recursive sub-member entry after all, but we assumed the worse when 6671 // we started viz: IncompleteCount!=0. 6672 return; 6673 } 6674 assert(E.Str.empty() && "Entry already present"); 6675 E.Str = Str.str(); 6676 E.State = IsRecursive? Recursive : NonRecursive; 6677 } 6678 6679 /// Return a cached TypeString encoding for the ID. If there isn't one, or we 6680 /// are recursively expanding a type (IncompleteCount != 0) and the cached 6681 /// encoding is Recursive, return an empty StringRef. 6682 StringRef TypeStringCache::lookupStr(const IdentifierInfo *ID) { 6683 if (!ID) 6684 return StringRef(); // We have no key. 6685 auto I = Map.find(ID); 6686 if (I == Map.end()) 6687 return StringRef(); // We have no encoding. 6688 Entry &E = I->second; 6689 if (E.State == Recursive && IncompleteCount) 6690 return StringRef(); // We don't use Recursive encodings for member types. 6691 6692 if (E.State == Incomplete) { 6693 // The incomplete type is being used to break out of recursion. 6694 E.State = IncompleteUsed; 6695 ++IncompleteUsedCount; 6696 } 6697 return E.Str.c_str(); 6698 } 6699 6700 /// The XCore ABI includes a type information section that communicates symbol 6701 /// type information to the linker. The linker uses this information to verify 6702 /// safety/correctness of things such as array bound and pointers et al. 6703 /// The ABI only requires C (and XC) language modules to emit TypeStrings. 6704 /// This type information (TypeString) is emitted into meta data for all global 6705 /// symbols: definitions, declarations, functions & variables. 6706 /// 6707 /// The TypeString carries type, qualifier, name, size & value details. 6708 /// Please see 'Tools Development Guide' section 2.16.2 for format details: 6709 /// <https://www.xmos.com/download/public/Tools-Development-Guide%28X9114A%29.pdf> 6710 /// The output is tested by test/CodeGen/xcore-stringtype.c. 6711 /// 6712 static bool getTypeString(SmallStringEnc &Enc, const Decl *D, 6713 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC); 6714 6715 /// XCore uses emitTargetMD to emit TypeString metadata for global symbols. 6716 void XCoreTargetCodeGenInfo::emitTargetMD(const Decl *D, llvm::GlobalValue *GV, 6717 CodeGen::CodeGenModule &CGM) const { 6718 SmallStringEnc Enc; 6719 if (getTypeString(Enc, D, CGM, TSC)) { 6720 llvm::LLVMContext &Ctx = CGM.getModule().getContext(); 6721 llvm::SmallVector<llvm::Metadata *, 2> MDVals; 6722 MDVals.push_back(llvm::ConstantAsMetadata::get(GV)); 6723 MDVals.push_back(llvm::MDString::get(Ctx, Enc.str())); 6724 llvm::NamedMDNode *MD = 6725 CGM.getModule().getOrInsertNamedMetadata("xcore.typestrings"); 6726 MD->addOperand(llvm::MDNode::get(Ctx, MDVals)); 6727 } 6728 } 6729 6730 static bool appendType(SmallStringEnc &Enc, QualType QType, 6731 const CodeGen::CodeGenModule &CGM, 6732 TypeStringCache &TSC); 6733 6734 /// Helper function for appendRecordType(). 6735 /// Builds a SmallVector containing the encoded field types in declaration order. 6736 static bool extractFieldType(SmallVectorImpl<FieldEncoding> &FE, 6737 const RecordDecl *RD, 6738 const CodeGen::CodeGenModule &CGM, 6739 TypeStringCache &TSC) { 6740 for (const auto *Field : RD->fields()) { 6741 SmallStringEnc Enc; 6742 Enc += "m("; 6743 Enc += Field->getName(); 6744 Enc += "){"; 6745 if (Field->isBitField()) { 6746 Enc += "b("; 6747 llvm::raw_svector_ostream OS(Enc); 6748 OS.resync(); 6749 OS << Field->getBitWidthValue(CGM.getContext()); 6750 OS.flush(); 6751 Enc += ':'; 6752 } 6753 if (!appendType(Enc, Field->getType(), CGM, TSC)) 6754 return false; 6755 if (Field->isBitField()) 6756 Enc += ')'; 6757 Enc += '}'; 6758 FE.push_back(FieldEncoding(!Field->getName().empty(), Enc)); 6759 } 6760 return true; 6761 } 6762 6763 /// Appends structure and union types to Enc and adds encoding to cache. 6764 /// Recursively calls appendType (via extractFieldType) for each field. 6765 /// Union types have their fields ordered according to the ABI. 6766 static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT, 6767 const CodeGen::CodeGenModule &CGM, 6768 TypeStringCache &TSC, const IdentifierInfo *ID) { 6769 // Append the cached TypeString if we have one. 6770 StringRef TypeString = TSC.lookupStr(ID); 6771 if (!TypeString.empty()) { 6772 Enc += TypeString; 6773 return true; 6774 } 6775 6776 // Start to emit an incomplete TypeString. 6777 size_t Start = Enc.size(); 6778 Enc += (RT->isUnionType()? 'u' : 's'); 6779 Enc += '('; 6780 if (ID) 6781 Enc += ID->getName(); 6782 Enc += "){"; 6783 6784 // We collect all encoded fields and order as necessary. 6785 bool IsRecursive = false; 6786 const RecordDecl *RD = RT->getDecl()->getDefinition(); 6787 if (RD && !RD->field_empty()) { 6788 // An incomplete TypeString stub is placed in the cache for this RecordType 6789 // so that recursive calls to this RecordType will use it whilst building a 6790 // complete TypeString for this RecordType. 6791 SmallVector<FieldEncoding, 16> FE; 6792 std::string StubEnc(Enc.substr(Start).str()); 6793 StubEnc += '}'; // StubEnc now holds a valid incomplete TypeString. 6794 TSC.addIncomplete(ID, std::move(StubEnc)); 6795 if (!extractFieldType(FE, RD, CGM, TSC)) { 6796 (void) TSC.removeIncomplete(ID); 6797 return false; 6798 } 6799 IsRecursive = TSC.removeIncomplete(ID); 6800 // The ABI requires unions to be sorted but not structures. 6801 // See FieldEncoding::operator< for sort algorithm. 6802 if (RT->isUnionType()) 6803 std::sort(FE.begin(), FE.end()); 6804 // We can now complete the TypeString. 6805 unsigned E = FE.size(); 6806 for (unsigned I = 0; I != E; ++I) { 6807 if (I) 6808 Enc += ','; 6809 Enc += FE[I].str(); 6810 } 6811 } 6812 Enc += '}'; 6813 TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive); 6814 return true; 6815 } 6816 6817 /// Appends enum types to Enc and adds the encoding to the cache. 6818 static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET, 6819 TypeStringCache &TSC, 6820 const IdentifierInfo *ID) { 6821 // Append the cached TypeString if we have one. 6822 StringRef TypeString = TSC.lookupStr(ID); 6823 if (!TypeString.empty()) { 6824 Enc += TypeString; 6825 return true; 6826 } 6827 6828 size_t Start = Enc.size(); 6829 Enc += "e("; 6830 if (ID) 6831 Enc += ID->getName(); 6832 Enc += "){"; 6833 6834 // We collect all encoded enumerations and order them alphanumerically. 6835 if (const EnumDecl *ED = ET->getDecl()->getDefinition()) { 6836 SmallVector<FieldEncoding, 16> FE; 6837 for (auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E; 6838 ++I) { 6839 SmallStringEnc EnumEnc; 6840 EnumEnc += "m("; 6841 EnumEnc += I->getName(); 6842 EnumEnc += "){"; 6843 I->getInitVal().toString(EnumEnc); 6844 EnumEnc += '}'; 6845 FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc)); 6846 } 6847 std::sort(FE.begin(), FE.end()); 6848 unsigned E = FE.size(); 6849 for (unsigned I = 0; I != E; ++I) { 6850 if (I) 6851 Enc += ','; 6852 Enc += FE[I].str(); 6853 } 6854 } 6855 Enc += '}'; 6856 TSC.addIfComplete(ID, Enc.substr(Start), false); 6857 return true; 6858 } 6859 6860 /// Appends type's qualifier to Enc. 6861 /// This is done prior to appending the type's encoding. 6862 static void appendQualifier(SmallStringEnc &Enc, QualType QT) { 6863 // Qualifiers are emitted in alphabetical order. 6864 static const char *Table[] = {"","c:","r:","cr:","v:","cv:","rv:","crv:"}; 6865 int Lookup = 0; 6866 if (QT.isConstQualified()) 6867 Lookup += 1<<0; 6868 if (QT.isRestrictQualified()) 6869 Lookup += 1<<1; 6870 if (QT.isVolatileQualified()) 6871 Lookup += 1<<2; 6872 Enc += Table[Lookup]; 6873 } 6874 6875 /// Appends built-in types to Enc. 6876 static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT) { 6877 const char *EncType; 6878 switch (BT->getKind()) { 6879 case BuiltinType::Void: 6880 EncType = "0"; 6881 break; 6882 case BuiltinType::Bool: 6883 EncType = "b"; 6884 break; 6885 case BuiltinType::Char_U: 6886 EncType = "uc"; 6887 break; 6888 case BuiltinType::UChar: 6889 EncType = "uc"; 6890 break; 6891 case BuiltinType::SChar: 6892 EncType = "sc"; 6893 break; 6894 case BuiltinType::UShort: 6895 EncType = "us"; 6896 break; 6897 case BuiltinType::Short: 6898 EncType = "ss"; 6899 break; 6900 case BuiltinType::UInt: 6901 EncType = "ui"; 6902 break; 6903 case BuiltinType::Int: 6904 EncType = "si"; 6905 break; 6906 case BuiltinType::ULong: 6907 EncType = "ul"; 6908 break; 6909 case BuiltinType::Long: 6910 EncType = "sl"; 6911 break; 6912 case BuiltinType::ULongLong: 6913 EncType = "ull"; 6914 break; 6915 case BuiltinType::LongLong: 6916 EncType = "sll"; 6917 break; 6918 case BuiltinType::Float: 6919 EncType = "ft"; 6920 break; 6921 case BuiltinType::Double: 6922 EncType = "d"; 6923 break; 6924 case BuiltinType::LongDouble: 6925 EncType = "ld"; 6926 break; 6927 default: 6928 return false; 6929 } 6930 Enc += EncType; 6931 return true; 6932 } 6933 6934 /// Appends a pointer encoding to Enc before calling appendType for the pointee. 6935 static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT, 6936 const CodeGen::CodeGenModule &CGM, 6937 TypeStringCache &TSC) { 6938 Enc += "p("; 6939 if (!appendType(Enc, PT->getPointeeType(), CGM, TSC)) 6940 return false; 6941 Enc += ')'; 6942 return true; 6943 } 6944 6945 /// Appends array encoding to Enc before calling appendType for the element. 6946 static bool appendArrayType(SmallStringEnc &Enc, QualType QT, 6947 const ArrayType *AT, 6948 const CodeGen::CodeGenModule &CGM, 6949 TypeStringCache &TSC, StringRef NoSizeEnc) { 6950 if (AT->getSizeModifier() != ArrayType::Normal) 6951 return false; 6952 Enc += "a("; 6953 if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT)) 6954 CAT->getSize().toStringUnsigned(Enc); 6955 else 6956 Enc += NoSizeEnc; // Global arrays use "*", otherwise it is "". 6957 Enc += ':'; 6958 // The Qualifiers should be attached to the type rather than the array. 6959 appendQualifier(Enc, QT); 6960 if (!appendType(Enc, AT->getElementType(), CGM, TSC)) 6961 return false; 6962 Enc += ')'; 6963 return true; 6964 } 6965 6966 /// Appends a function encoding to Enc, calling appendType for the return type 6967 /// and the arguments. 6968 static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT, 6969 const CodeGen::CodeGenModule &CGM, 6970 TypeStringCache &TSC) { 6971 Enc += "f{"; 6972 if (!appendType(Enc, FT->getReturnType(), CGM, TSC)) 6973 return false; 6974 Enc += "}("; 6975 if (const FunctionProtoType *FPT = FT->getAs<FunctionProtoType>()) { 6976 // N.B. we are only interested in the adjusted param types. 6977 auto I = FPT->param_type_begin(); 6978 auto E = FPT->param_type_end(); 6979 if (I != E) { 6980 do { 6981 if (!appendType(Enc, *I, CGM, TSC)) 6982 return false; 6983 ++I; 6984 if (I != E) 6985 Enc += ','; 6986 } while (I != E); 6987 if (FPT->isVariadic()) 6988 Enc += ",va"; 6989 } else { 6990 if (FPT->isVariadic()) 6991 Enc += "va"; 6992 else 6993 Enc += '0'; 6994 } 6995 } 6996 Enc += ')'; 6997 return true; 6998 } 6999 7000 /// Handles the type's qualifier before dispatching a call to handle specific 7001 /// type encodings. 7002 static bool appendType(SmallStringEnc &Enc, QualType QType, 7003 const CodeGen::CodeGenModule &CGM, 7004 TypeStringCache &TSC) { 7005 7006 QualType QT = QType.getCanonicalType(); 7007 7008 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) 7009 // The Qualifiers should be attached to the type rather than the array. 7010 // Thus we don't call appendQualifier() here. 7011 return appendArrayType(Enc, QT, AT, CGM, TSC, ""); 7012 7013 appendQualifier(Enc, QT); 7014 7015 if (const BuiltinType *BT = QT->getAs<BuiltinType>()) 7016 return appendBuiltinType(Enc, BT); 7017 7018 if (const PointerType *PT = QT->getAs<PointerType>()) 7019 return appendPointerType(Enc, PT, CGM, TSC); 7020 7021 if (const EnumType *ET = QT->getAs<EnumType>()) 7022 return appendEnumType(Enc, ET, TSC, QT.getBaseTypeIdentifier()); 7023 7024 if (const RecordType *RT = QT->getAsStructureType()) 7025 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier()); 7026 7027 if (const RecordType *RT = QT->getAsUnionType()) 7028 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier()); 7029 7030 if (const FunctionType *FT = QT->getAs<FunctionType>()) 7031 return appendFunctionType(Enc, FT, CGM, TSC); 7032 7033 return false; 7034 } 7035 7036 static bool getTypeString(SmallStringEnc &Enc, const Decl *D, 7037 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC) { 7038 if (!D) 7039 return false; 7040 7041 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 7042 if (FD->getLanguageLinkage() != CLanguageLinkage) 7043 return false; 7044 return appendType(Enc, FD->getType(), CGM, TSC); 7045 } 7046 7047 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { 7048 if (VD->getLanguageLinkage() != CLanguageLinkage) 7049 return false; 7050 QualType QT = VD->getType().getCanonicalType(); 7051 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) { 7052 // Global ArrayTypes are given a size of '*' if the size is unknown. 7053 // The Qualifiers should be attached to the type rather than the array. 7054 // Thus we don't call appendQualifier() here. 7055 return appendArrayType(Enc, QT, AT, CGM, TSC, "*"); 7056 } 7057 return appendType(Enc, QT, CGM, TSC); 7058 } 7059 return false; 7060 } 7061 7062 7063 //===----------------------------------------------------------------------===// 7064 // Driver code 7065 //===----------------------------------------------------------------------===// 7066 7067 const llvm::Triple &CodeGenModule::getTriple() const { 7068 return getTarget().getTriple(); 7069 } 7070 7071 bool CodeGenModule::supportsCOMDAT() const { 7072 return !getTriple().isOSBinFormatMachO(); 7073 } 7074 7075 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() { 7076 if (TheTargetCodeGenInfo) 7077 return *TheTargetCodeGenInfo; 7078 7079 const llvm::Triple &Triple = getTarget().getTriple(); 7080 switch (Triple.getArch()) { 7081 default: 7082 return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types)); 7083 7084 case llvm::Triple::le32: 7085 return *(TheTargetCodeGenInfo = new PNaClTargetCodeGenInfo(Types)); 7086 case llvm::Triple::mips: 7087 case llvm::Triple::mipsel: 7088 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, true)); 7089 7090 case llvm::Triple::mips64: 7091 case llvm::Triple::mips64el: 7092 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, false)); 7093 7094 case llvm::Triple::aarch64: 7095 case llvm::Triple::aarch64_be: { 7096 AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS; 7097 if (getTarget().getABI() == "darwinpcs") 7098 Kind = AArch64ABIInfo::DarwinPCS; 7099 7100 return *(TheTargetCodeGenInfo = new AArch64TargetCodeGenInfo(Types, Kind)); 7101 } 7102 7103 case llvm::Triple::arm: 7104 case llvm::Triple::armeb: 7105 case llvm::Triple::thumb: 7106 case llvm::Triple::thumbeb: 7107 { 7108 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS; 7109 if (getTarget().getABI() == "apcs-gnu") 7110 Kind = ARMABIInfo::APCS; 7111 else if (CodeGenOpts.FloatABI == "hard" || 7112 (CodeGenOpts.FloatABI != "soft" && 7113 Triple.getEnvironment() == llvm::Triple::GNUEABIHF)) 7114 Kind = ARMABIInfo::AAPCS_VFP; 7115 7116 switch (Triple.getOS()) { 7117 case llvm::Triple::NaCl: 7118 return *(TheTargetCodeGenInfo = 7119 new NaClARMTargetCodeGenInfo(Types, Kind)); 7120 default: 7121 return *(TheTargetCodeGenInfo = 7122 new ARMTargetCodeGenInfo(Types, Kind)); 7123 } 7124 } 7125 7126 case llvm::Triple::ppc: 7127 return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types)); 7128 case llvm::Triple::ppc64: 7129 if (Triple.isOSBinFormatELF()) { 7130 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1; 7131 if (getTarget().getABI() == "elfv2") 7132 Kind = PPC64_SVR4_ABIInfo::ELFv2; 7133 7134 return *(TheTargetCodeGenInfo = 7135 new PPC64_SVR4_TargetCodeGenInfo(Types, Kind)); 7136 } else 7137 return *(TheTargetCodeGenInfo = new PPC64TargetCodeGenInfo(Types)); 7138 case llvm::Triple::ppc64le: { 7139 assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!"); 7140 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2; 7141 if (getTarget().getABI() == "elfv1") 7142 Kind = PPC64_SVR4_ABIInfo::ELFv1; 7143 7144 return *(TheTargetCodeGenInfo = 7145 new PPC64_SVR4_TargetCodeGenInfo(Types, Kind)); 7146 } 7147 7148 case llvm::Triple::nvptx: 7149 case llvm::Triple::nvptx64: 7150 return *(TheTargetCodeGenInfo = new NVPTXTargetCodeGenInfo(Types)); 7151 7152 case llvm::Triple::msp430: 7153 return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types)); 7154 7155 case llvm::Triple::systemz: 7156 return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo(Types)); 7157 7158 case llvm::Triple::tce: 7159 return *(TheTargetCodeGenInfo = new TCETargetCodeGenInfo(Types)); 7160 7161 case llvm::Triple::x86: { 7162 bool IsDarwinVectorABI = Triple.isOSDarwin(); 7163 bool IsSmallStructInRegABI = 7164 X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts); 7165 bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing(); 7166 7167 if (Triple.getOS() == llvm::Triple::Win32) { 7168 return *(TheTargetCodeGenInfo = 7169 new WinX86_32TargetCodeGenInfo(Types, 7170 IsDarwinVectorABI, IsSmallStructInRegABI, 7171 IsWin32FloatStructABI, 7172 CodeGenOpts.NumRegisterParameters)); 7173 } else { 7174 return *(TheTargetCodeGenInfo = 7175 new X86_32TargetCodeGenInfo(Types, 7176 IsDarwinVectorABI, IsSmallStructInRegABI, 7177 IsWin32FloatStructABI, 7178 CodeGenOpts.NumRegisterParameters)); 7179 } 7180 } 7181 7182 case llvm::Triple::x86_64: { 7183 bool HasAVX = getTarget().getABI() == "avx"; 7184 7185 switch (Triple.getOS()) { 7186 case llvm::Triple::Win32: 7187 return *(TheTargetCodeGenInfo = 7188 new WinX86_64TargetCodeGenInfo(Types, HasAVX)); 7189 case llvm::Triple::NaCl: 7190 return *(TheTargetCodeGenInfo = 7191 new NaClX86_64TargetCodeGenInfo(Types, HasAVX)); 7192 default: 7193 return *(TheTargetCodeGenInfo = 7194 new X86_64TargetCodeGenInfo(Types, HasAVX)); 7195 } 7196 } 7197 case llvm::Triple::hexagon: 7198 return *(TheTargetCodeGenInfo = new HexagonTargetCodeGenInfo(Types)); 7199 case llvm::Triple::r600: 7200 return *(TheTargetCodeGenInfo = new AMDGPUTargetCodeGenInfo(Types)); 7201 case llvm::Triple::amdgcn: 7202 return *(TheTargetCodeGenInfo = new AMDGPUTargetCodeGenInfo(Types)); 7203 case llvm::Triple::sparcv9: 7204 return *(TheTargetCodeGenInfo = new SparcV9TargetCodeGenInfo(Types)); 7205 case llvm::Triple::xcore: 7206 return *(TheTargetCodeGenInfo = new XCoreTargetCodeGenInfo(Types)); 7207 } 7208 } 7209