1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // These classes wrap the information about a call or function 11 // definition used to handle ABI compliancy. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "TargetInfo.h" 16 #include "ABIInfo.h" 17 #include "CGCXXABI.h" 18 #include "CGValue.h" 19 #include "CodeGenFunction.h" 20 #include "clang/AST/RecordLayout.h" 21 #include "clang/CodeGen/CGFunctionInfo.h" 22 #include "clang/Frontend/CodeGenOptions.h" 23 #include "llvm/ADT/StringExtras.h" 24 #include "llvm/ADT/Triple.h" 25 #include "llvm/IR/DataLayout.h" 26 #include "llvm/IR/Type.h" 27 #include "llvm/Support/raw_ostream.h" 28 #include <algorithm> // std::sort 29 30 using namespace clang; 31 using namespace CodeGen; 32 33 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, 34 llvm::Value *Array, 35 llvm::Value *Value, 36 unsigned FirstIndex, 37 unsigned LastIndex) { 38 // Alternatively, we could emit this as a loop in the source. 39 for (unsigned I = FirstIndex; I <= LastIndex; ++I) { 40 llvm::Value *Cell = Builder.CreateConstInBoundsGEP1_32(Array, I); 41 Builder.CreateStore(Value, Cell); 42 } 43 } 44 45 static bool isAggregateTypeForABI(QualType T) { 46 return !CodeGenFunction::hasScalarEvaluationKind(T) || 47 T->isMemberFunctionPointerType(); 48 } 49 50 ABIInfo::~ABIInfo() {} 51 52 static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, 53 CGCXXABI &CXXABI) { 54 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 55 if (!RD) 56 return CGCXXABI::RAA_Default; 57 return CXXABI.getRecordArgABI(RD); 58 } 59 60 static CGCXXABI::RecordArgABI getRecordArgABI(QualType T, 61 CGCXXABI &CXXABI) { 62 const RecordType *RT = T->getAs<RecordType>(); 63 if (!RT) 64 return CGCXXABI::RAA_Default; 65 return getRecordArgABI(RT, CXXABI); 66 } 67 68 /// Pass transparent unions as if they were the type of the first element. Sema 69 /// should ensure that all elements of the union have the same "machine type". 70 static QualType useFirstFieldIfTransparentUnion(QualType Ty) { 71 if (const RecordType *UT = Ty->getAsUnionType()) { 72 const RecordDecl *UD = UT->getDecl(); 73 if (UD->hasAttr<TransparentUnionAttr>()) { 74 assert(!UD->field_empty() && "sema created an empty transparent union"); 75 return UD->field_begin()->getType(); 76 } 77 } 78 return Ty; 79 } 80 81 CGCXXABI &ABIInfo::getCXXABI() const { 82 return CGT.getCXXABI(); 83 } 84 85 ASTContext &ABIInfo::getContext() const { 86 return CGT.getContext(); 87 } 88 89 llvm::LLVMContext &ABIInfo::getVMContext() const { 90 return CGT.getLLVMContext(); 91 } 92 93 const llvm::DataLayout &ABIInfo::getDataLayout() const { 94 return CGT.getDataLayout(); 95 } 96 97 const TargetInfo &ABIInfo::getTarget() const { 98 return CGT.getTarget(); 99 } 100 101 bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 102 return false; 103 } 104 105 bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, 106 uint64_t Members) const { 107 return false; 108 } 109 110 void ABIArgInfo::dump() const { 111 raw_ostream &OS = llvm::errs(); 112 OS << "(ABIArgInfo Kind="; 113 switch (TheKind) { 114 case Direct: 115 OS << "Direct Type="; 116 if (llvm::Type *Ty = getCoerceToType()) 117 Ty->print(OS); 118 else 119 OS << "null"; 120 break; 121 case Extend: 122 OS << "Extend"; 123 break; 124 case Ignore: 125 OS << "Ignore"; 126 break; 127 case InAlloca: 128 OS << "InAlloca Offset=" << getInAllocaFieldIndex(); 129 break; 130 case Indirect: 131 OS << "Indirect Align=" << getIndirectAlign() 132 << " ByVal=" << getIndirectByVal() 133 << " Realign=" << getIndirectRealign(); 134 break; 135 case Expand: 136 OS << "Expand"; 137 break; 138 } 139 OS << ")\n"; 140 } 141 142 TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; } 143 144 // If someone can figure out a general rule for this, that would be great. 145 // It's probably just doomed to be platform-dependent, though. 146 unsigned TargetCodeGenInfo::getSizeOfUnwindException() const { 147 // Verified for: 148 // x86-64 FreeBSD, Linux, Darwin 149 // x86-32 FreeBSD, Linux, Darwin 150 // PowerPC Linux, Darwin 151 // ARM Darwin (*not* EABI) 152 // AArch64 Linux 153 return 32; 154 } 155 156 bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args, 157 const FunctionNoProtoType *fnType) const { 158 // The following conventions are known to require this to be false: 159 // x86_stdcall 160 // MIPS 161 // For everything else, we just prefer false unless we opt out. 162 return false; 163 } 164 165 void 166 TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib, 167 llvm::SmallString<24> &Opt) const { 168 // This assumes the user is passing a library name like "rt" instead of a 169 // filename like "librt.a/so", and that they don't care whether it's static or 170 // dynamic. 171 Opt = "-l"; 172 Opt += Lib; 173 } 174 175 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays); 176 177 /// isEmptyField - Return true iff a the field is "empty", that is it 178 /// is an unnamed bit-field or an (array of) empty record(s). 179 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD, 180 bool AllowArrays) { 181 if (FD->isUnnamedBitfield()) 182 return true; 183 184 QualType FT = FD->getType(); 185 186 // Constant arrays of empty records count as empty, strip them off. 187 // Constant arrays of zero length always count as empty. 188 if (AllowArrays) 189 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 190 if (AT->getSize() == 0) 191 return true; 192 FT = AT->getElementType(); 193 } 194 195 const RecordType *RT = FT->getAs<RecordType>(); 196 if (!RT) 197 return false; 198 199 // C++ record fields are never empty, at least in the Itanium ABI. 200 // 201 // FIXME: We should use a predicate for whether this behavior is true in the 202 // current ABI. 203 if (isa<CXXRecordDecl>(RT->getDecl())) 204 return false; 205 206 return isEmptyRecord(Context, FT, AllowArrays); 207 } 208 209 /// isEmptyRecord - Return true iff a structure contains only empty 210 /// fields. Note that a structure with a flexible array member is not 211 /// considered empty. 212 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) { 213 const RecordType *RT = T->getAs<RecordType>(); 214 if (!RT) 215 return 0; 216 const RecordDecl *RD = RT->getDecl(); 217 if (RD->hasFlexibleArrayMember()) 218 return false; 219 220 // If this is a C++ record, check the bases first. 221 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 222 for (const auto &I : CXXRD->bases()) 223 if (!isEmptyRecord(Context, I.getType(), true)) 224 return false; 225 226 for (const auto *I : RD->fields()) 227 if (!isEmptyField(Context, I, AllowArrays)) 228 return false; 229 return true; 230 } 231 232 /// isSingleElementStruct - Determine if a structure is a "single 233 /// element struct", i.e. it has exactly one non-empty field or 234 /// exactly one field which is itself a single element 235 /// struct. Structures with flexible array members are never 236 /// considered single element structs. 237 /// 238 /// \return The field declaration for the single non-empty field, if 239 /// it exists. 240 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) { 241 const RecordType *RT = T->getAsStructureType(); 242 if (!RT) 243 return nullptr; 244 245 const RecordDecl *RD = RT->getDecl(); 246 if (RD->hasFlexibleArrayMember()) 247 return nullptr; 248 249 const Type *Found = nullptr; 250 251 // If this is a C++ record, check the bases first. 252 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 253 for (const auto &I : CXXRD->bases()) { 254 // Ignore empty records. 255 if (isEmptyRecord(Context, I.getType(), true)) 256 continue; 257 258 // If we already found an element then this isn't a single-element struct. 259 if (Found) 260 return nullptr; 261 262 // If this is non-empty and not a single element struct, the composite 263 // cannot be a single element struct. 264 Found = isSingleElementStruct(I.getType(), Context); 265 if (!Found) 266 return nullptr; 267 } 268 } 269 270 // Check for single element. 271 for (const auto *FD : RD->fields()) { 272 QualType FT = FD->getType(); 273 274 // Ignore empty fields. 275 if (isEmptyField(Context, FD, true)) 276 continue; 277 278 // If we already found an element then this isn't a single-element 279 // struct. 280 if (Found) 281 return nullptr; 282 283 // Treat single element arrays as the element. 284 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 285 if (AT->getSize().getZExtValue() != 1) 286 break; 287 FT = AT->getElementType(); 288 } 289 290 if (!isAggregateTypeForABI(FT)) { 291 Found = FT.getTypePtr(); 292 } else { 293 Found = isSingleElementStruct(FT, Context); 294 if (!Found) 295 return nullptr; 296 } 297 } 298 299 // We don't consider a struct a single-element struct if it has 300 // padding beyond the element type. 301 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T)) 302 return nullptr; 303 304 return Found; 305 } 306 307 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) { 308 // Treat complex types as the element type. 309 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) 310 Ty = CTy->getElementType(); 311 312 // Check for a type which we know has a simple scalar argument-passing 313 // convention without any padding. (We're specifically looking for 32 314 // and 64-bit integer and integer-equivalents, float, and double.) 315 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() && 316 !Ty->isEnumeralType() && !Ty->isBlockPointerType()) 317 return false; 318 319 uint64_t Size = Context.getTypeSize(Ty); 320 return Size == 32 || Size == 64; 321 } 322 323 /// canExpandIndirectArgument - Test whether an argument type which is to be 324 /// passed indirectly (on the stack) would have the equivalent layout if it was 325 /// expanded into separate arguments. If so, we prefer to do the latter to avoid 326 /// inhibiting optimizations. 327 /// 328 // FIXME: This predicate is missing many cases, currently it just follows 329 // llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We 330 // should probably make this smarter, or better yet make the LLVM backend 331 // capable of handling it. 332 static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) { 333 // We can only expand structure types. 334 const RecordType *RT = Ty->getAs<RecordType>(); 335 if (!RT) 336 return false; 337 338 // We can only expand (C) structures. 339 // 340 // FIXME: This needs to be generalized to handle classes as well. 341 const RecordDecl *RD = RT->getDecl(); 342 if (!RD->isStruct() || isa<CXXRecordDecl>(RD)) 343 return false; 344 345 uint64_t Size = 0; 346 347 for (const auto *FD : RD->fields()) { 348 if (!is32Or64BitBasicType(FD->getType(), Context)) 349 return false; 350 351 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know 352 // how to expand them yet, and the predicate for telling if a bitfield still 353 // counts as "basic" is more complicated than what we were doing previously. 354 if (FD->isBitField()) 355 return false; 356 357 Size += Context.getTypeSize(FD->getType()); 358 } 359 360 // Make sure there are not any holes in the struct. 361 if (Size != Context.getTypeSize(Ty)) 362 return false; 363 364 return true; 365 } 366 367 namespace { 368 /// DefaultABIInfo - The default implementation for ABI specific 369 /// details. This implementation provides information which results in 370 /// self-consistent and sensible LLVM IR generation, but does not 371 /// conform to any particular ABI. 372 class DefaultABIInfo : public ABIInfo { 373 public: 374 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 375 376 ABIArgInfo classifyReturnType(QualType RetTy) const; 377 ABIArgInfo classifyArgumentType(QualType RetTy) const; 378 379 void computeInfo(CGFunctionInfo &FI) const override { 380 if (!getCXXABI().classifyReturnType(FI)) 381 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 382 for (auto &I : FI.arguments()) 383 I.info = classifyArgumentType(I.type); 384 } 385 386 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 387 CodeGenFunction &CGF) const override; 388 }; 389 390 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo { 391 public: 392 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 393 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 394 }; 395 396 llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 397 CodeGenFunction &CGF) const { 398 return nullptr; 399 } 400 401 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const { 402 if (isAggregateTypeForABI(Ty)) 403 return ABIArgInfo::getIndirect(0); 404 405 // Treat an enum type as its underlying type. 406 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 407 Ty = EnumTy->getDecl()->getIntegerType(); 408 409 return (Ty->isPromotableIntegerType() ? 410 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 411 } 412 413 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const { 414 if (RetTy->isVoidType()) 415 return ABIArgInfo::getIgnore(); 416 417 if (isAggregateTypeForABI(RetTy)) 418 return ABIArgInfo::getIndirect(0); 419 420 // Treat an enum type as its underlying type. 421 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 422 RetTy = EnumTy->getDecl()->getIntegerType(); 423 424 return (RetTy->isPromotableIntegerType() ? 425 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 426 } 427 428 //===----------------------------------------------------------------------===// 429 // le32/PNaCl bitcode ABI Implementation 430 // 431 // This is a simplified version of the x86_32 ABI. Arguments and return values 432 // are always passed on the stack. 433 //===----------------------------------------------------------------------===// 434 435 class PNaClABIInfo : public ABIInfo { 436 public: 437 PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 438 439 ABIArgInfo classifyReturnType(QualType RetTy) const; 440 ABIArgInfo classifyArgumentType(QualType RetTy) const; 441 442 void computeInfo(CGFunctionInfo &FI) const override; 443 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 444 CodeGenFunction &CGF) const override; 445 }; 446 447 class PNaClTargetCodeGenInfo : public TargetCodeGenInfo { 448 public: 449 PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 450 : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {} 451 }; 452 453 void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const { 454 if (!getCXXABI().classifyReturnType(FI)) 455 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 456 457 for (auto &I : FI.arguments()) 458 I.info = classifyArgumentType(I.type); 459 } 460 461 llvm::Value *PNaClABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 462 CodeGenFunction &CGF) const { 463 return nullptr; 464 } 465 466 /// \brief Classify argument of given type \p Ty. 467 ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const { 468 if (isAggregateTypeForABI(Ty)) { 469 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 470 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); 471 return ABIArgInfo::getIndirect(0); 472 } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) { 473 // Treat an enum type as its underlying type. 474 Ty = EnumTy->getDecl()->getIntegerType(); 475 } else if (Ty->isFloatingType()) { 476 // Floating-point types don't go inreg. 477 return ABIArgInfo::getDirect(); 478 } 479 480 return (Ty->isPromotableIntegerType() ? 481 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 482 } 483 484 ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const { 485 if (RetTy->isVoidType()) 486 return ABIArgInfo::getIgnore(); 487 488 // In the PNaCl ABI we always return records/structures on the stack. 489 if (isAggregateTypeForABI(RetTy)) 490 return ABIArgInfo::getIndirect(0); 491 492 // Treat an enum type as its underlying type. 493 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 494 RetTy = EnumTy->getDecl()->getIntegerType(); 495 496 return (RetTy->isPromotableIntegerType() ? 497 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 498 } 499 500 /// IsX86_MMXType - Return true if this is an MMX type. 501 bool IsX86_MMXType(llvm::Type *IRType) { 502 // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>. 503 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 && 504 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() && 505 IRType->getScalarSizeInBits() != 64; 506 } 507 508 static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 509 StringRef Constraint, 510 llvm::Type* Ty) { 511 if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy()) { 512 if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) { 513 // Invalid MMX constraint 514 return nullptr; 515 } 516 517 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext()); 518 } 519 520 // No operation needed 521 return Ty; 522 } 523 524 /// Returns true if this type can be passed in SSE registers with the 525 /// X86_VectorCall calling convention. Shared between x86_32 and x86_64. 526 static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) { 527 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 528 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) 529 return true; 530 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 531 // vectorcall can pass XMM, YMM, and ZMM vectors. We don't pass SSE1 MMX 532 // registers specially. 533 unsigned VecSize = Context.getTypeSize(VT); 534 if (VecSize == 128 || VecSize == 256 || VecSize == 512) 535 return true; 536 } 537 return false; 538 } 539 540 /// Returns true if this aggregate is small enough to be passed in SSE registers 541 /// in the X86_VectorCall calling convention. Shared between x86_32 and x86_64. 542 static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) { 543 return NumMembers <= 4; 544 } 545 546 //===----------------------------------------------------------------------===// 547 // X86-32 ABI Implementation 548 //===----------------------------------------------------------------------===// 549 550 /// \brief Similar to llvm::CCState, but for Clang. 551 struct CCState { 552 CCState(unsigned CC) : CC(CC), FreeRegs(0), FreeSSERegs(0) {} 553 554 unsigned CC; 555 unsigned FreeRegs; 556 unsigned FreeSSERegs; 557 }; 558 559 /// X86_32ABIInfo - The X86-32 ABI information. 560 class X86_32ABIInfo : public ABIInfo { 561 enum Class { 562 Integer, 563 Float 564 }; 565 566 static const unsigned MinABIStackAlignInBytes = 4; 567 568 bool IsDarwinVectorABI; 569 bool IsSmallStructInRegABI; 570 bool IsWin32StructABI; 571 unsigned DefaultNumRegisterParameters; 572 573 static bool isRegisterSize(unsigned Size) { 574 return (Size == 8 || Size == 16 || Size == 32 || Size == 64); 575 } 576 577 bool isHomogeneousAggregateBaseType(QualType Ty) const override { 578 // FIXME: Assumes vectorcall is in use. 579 return isX86VectorTypeForVectorCall(getContext(), Ty); 580 } 581 582 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 583 uint64_t NumMembers) const override { 584 // FIXME: Assumes vectorcall is in use. 585 return isX86VectorCallAggregateSmallEnough(NumMembers); 586 } 587 588 bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const; 589 590 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 591 /// such that the argument will be passed in memory. 592 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const; 593 594 ABIArgInfo getIndirectReturnResult(CCState &State) const; 595 596 /// \brief Return the alignment to use for the given type on the stack. 597 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const; 598 599 Class classify(QualType Ty) const; 600 ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const; 601 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const; 602 bool shouldUseInReg(QualType Ty, CCState &State, bool &NeedsPadding) const; 603 604 /// \brief Rewrite the function info so that all memory arguments use 605 /// inalloca. 606 void rewriteWithInAlloca(CGFunctionInfo &FI) const; 607 608 void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields, 609 unsigned &StackOffset, ABIArgInfo &Info, 610 QualType Type) const; 611 612 public: 613 614 void computeInfo(CGFunctionInfo &FI) const override; 615 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 616 CodeGenFunction &CGF) const override; 617 618 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool w, 619 unsigned r) 620 : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p), 621 IsWin32StructABI(w), DefaultNumRegisterParameters(r) {} 622 }; 623 624 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo { 625 public: 626 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, 627 bool d, bool p, bool w, unsigned r) 628 :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, w, r)) {} 629 630 static bool isStructReturnInRegABI( 631 const llvm::Triple &Triple, const CodeGenOptions &Opts); 632 633 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 634 CodeGen::CodeGenModule &CGM) const override; 635 636 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { 637 // Darwin uses different dwarf register numbers for EH. 638 if (CGM.getTarget().getTriple().isOSDarwin()) return 5; 639 return 4; 640 } 641 642 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 643 llvm::Value *Address) const override; 644 645 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 646 StringRef Constraint, 647 llvm::Type* Ty) const override { 648 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 649 } 650 651 void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue, 652 std::string &Constraints, 653 std::vector<llvm::Type *> &ResultRegTypes, 654 std::vector<llvm::Type *> &ResultTruncRegTypes, 655 std::vector<LValue> &ResultRegDests, 656 std::string &AsmString, 657 unsigned NumOutputs) const override; 658 659 llvm::Constant * 660 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override { 661 unsigned Sig = (0xeb << 0) | // jmp rel8 662 (0x06 << 8) | // .+0x08 663 ('F' << 16) | 664 ('T' << 24); 665 return llvm::ConstantInt::get(CGM.Int32Ty, Sig); 666 } 667 668 }; 669 670 } 671 672 /// Rewrite input constraint references after adding some output constraints. 673 /// In the case where there is one output and one input and we add one output, 674 /// we need to replace all operand references greater than or equal to 1: 675 /// mov $0, $1 676 /// mov eax, $1 677 /// The result will be: 678 /// mov $0, $2 679 /// mov eax, $2 680 static void rewriteInputConstraintReferences(unsigned FirstIn, 681 unsigned NumNewOuts, 682 std::string &AsmString) { 683 std::string Buf; 684 llvm::raw_string_ostream OS(Buf); 685 size_t Pos = 0; 686 while (Pos < AsmString.size()) { 687 size_t DollarStart = AsmString.find('$', Pos); 688 if (DollarStart == std::string::npos) 689 DollarStart = AsmString.size(); 690 size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart); 691 if (DollarEnd == std::string::npos) 692 DollarEnd = AsmString.size(); 693 OS << StringRef(&AsmString[Pos], DollarEnd - Pos); 694 Pos = DollarEnd; 695 size_t NumDollars = DollarEnd - DollarStart; 696 if (NumDollars % 2 != 0 && Pos < AsmString.size()) { 697 // We have an operand reference. 698 size_t DigitStart = Pos; 699 size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart); 700 if (DigitEnd == std::string::npos) 701 DigitEnd = AsmString.size(); 702 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart); 703 unsigned OperandIndex; 704 if (!OperandStr.getAsInteger(10, OperandIndex)) { 705 if (OperandIndex >= FirstIn) 706 OperandIndex += NumNewOuts; 707 OS << OperandIndex; 708 } else { 709 OS << OperandStr; 710 } 711 Pos = DigitEnd; 712 } 713 } 714 AsmString = std::move(OS.str()); 715 } 716 717 /// Add output constraints for EAX:EDX because they are return registers. 718 void X86_32TargetCodeGenInfo::addReturnRegisterOutputs( 719 CodeGenFunction &CGF, LValue ReturnSlot, std::string &Constraints, 720 std::vector<llvm::Type *> &ResultRegTypes, 721 std::vector<llvm::Type *> &ResultTruncRegTypes, 722 std::vector<LValue> &ResultRegDests, std::string &AsmString, 723 unsigned NumOutputs) const { 724 uint64_t RetWidth = CGF.getContext().getTypeSize(ReturnSlot.getType()); 725 726 // Use the EAX constraint if the width is 32 or smaller and EAX:EDX if it is 727 // larger. 728 if (!Constraints.empty()) 729 Constraints += ','; 730 if (RetWidth <= 32) { 731 Constraints += "={eax}"; 732 ResultRegTypes.push_back(CGF.Int32Ty); 733 } else { 734 // Use the 'A' constraint for EAX:EDX. 735 Constraints += "=A"; 736 ResultRegTypes.push_back(CGF.Int64Ty); 737 } 738 739 // Truncate EAX or EAX:EDX to an integer of the appropriate size. 740 llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth); 741 ResultTruncRegTypes.push_back(CoerceTy); 742 743 // Coerce the integer by bitcasting the return slot pointer. 744 ReturnSlot.setAddress(CGF.Builder.CreateBitCast(ReturnSlot.getAddress(), 745 CoerceTy->getPointerTo())); 746 ResultRegDests.push_back(ReturnSlot); 747 748 rewriteInputConstraintReferences(NumOutputs, 1, AsmString); 749 } 750 751 /// shouldReturnTypeInRegister - Determine if the given type should be 752 /// passed in a register (for the Darwin ABI). 753 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty, 754 ASTContext &Context) const { 755 uint64_t Size = Context.getTypeSize(Ty); 756 757 // Type must be register sized. 758 if (!isRegisterSize(Size)) 759 return false; 760 761 if (Ty->isVectorType()) { 762 // 64- and 128- bit vectors inside structures are not returned in 763 // registers. 764 if (Size == 64 || Size == 128) 765 return false; 766 767 return true; 768 } 769 770 // If this is a builtin, pointer, enum, complex type, member pointer, or 771 // member function pointer it is ok. 772 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() || 773 Ty->isAnyComplexType() || Ty->isEnumeralType() || 774 Ty->isBlockPointerType() || Ty->isMemberPointerType()) 775 return true; 776 777 // Arrays are treated like records. 778 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) 779 return shouldReturnTypeInRegister(AT->getElementType(), Context); 780 781 // Otherwise, it must be a record type. 782 const RecordType *RT = Ty->getAs<RecordType>(); 783 if (!RT) return false; 784 785 // FIXME: Traverse bases here too. 786 787 // Structure types are passed in register if all fields would be 788 // passed in a register. 789 for (const auto *FD : RT->getDecl()->fields()) { 790 // Empty fields are ignored. 791 if (isEmptyField(Context, FD, true)) 792 continue; 793 794 // Check fields recursively. 795 if (!shouldReturnTypeInRegister(FD->getType(), Context)) 796 return false; 797 } 798 return true; 799 } 800 801 ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(CCState &State) const { 802 // If the return value is indirect, then the hidden argument is consuming one 803 // integer register. 804 if (State.FreeRegs) { 805 --State.FreeRegs; 806 return ABIArgInfo::getIndirectInReg(/*Align=*/0, /*ByVal=*/false); 807 } 808 return ABIArgInfo::getIndirect(/*Align=*/0, /*ByVal=*/false); 809 } 810 811 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, CCState &State) const { 812 if (RetTy->isVoidType()) 813 return ABIArgInfo::getIgnore(); 814 815 const Type *Base = nullptr; 816 uint64_t NumElts = 0; 817 if (State.CC == llvm::CallingConv::X86_VectorCall && 818 isHomogeneousAggregate(RetTy, Base, NumElts)) { 819 // The LLVM struct type for such an aggregate should lower properly. 820 return ABIArgInfo::getDirect(); 821 } 822 823 if (const VectorType *VT = RetTy->getAs<VectorType>()) { 824 // On Darwin, some vectors are returned in registers. 825 if (IsDarwinVectorABI) { 826 uint64_t Size = getContext().getTypeSize(RetTy); 827 828 // 128-bit vectors are a special case; they are returned in 829 // registers and we need to make sure to pick a type the LLVM 830 // backend will like. 831 if (Size == 128) 832 return ABIArgInfo::getDirect(llvm::VectorType::get( 833 llvm::Type::getInt64Ty(getVMContext()), 2)); 834 835 // Always return in register if it fits in a general purpose 836 // register, or if it is 64 bits and has a single element. 837 if ((Size == 8 || Size == 16 || Size == 32) || 838 (Size == 64 && VT->getNumElements() == 1)) 839 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 840 Size)); 841 842 return getIndirectReturnResult(State); 843 } 844 845 return ABIArgInfo::getDirect(); 846 } 847 848 if (isAggregateTypeForABI(RetTy)) { 849 if (const RecordType *RT = RetTy->getAs<RecordType>()) { 850 // Structures with flexible arrays are always indirect. 851 if (RT->getDecl()->hasFlexibleArrayMember()) 852 return getIndirectReturnResult(State); 853 } 854 855 // If specified, structs and unions are always indirect. 856 if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType()) 857 return getIndirectReturnResult(State); 858 859 // Small structures which are register sized are generally returned 860 // in a register. 861 if (shouldReturnTypeInRegister(RetTy, getContext())) { 862 uint64_t Size = getContext().getTypeSize(RetTy); 863 864 // As a special-case, if the struct is a "single-element" struct, and 865 // the field is of type "float" or "double", return it in a 866 // floating-point register. (MSVC does not apply this special case.) 867 // We apply a similar transformation for pointer types to improve the 868 // quality of the generated IR. 869 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) 870 if ((!IsWin32StructABI && SeltTy->isRealFloatingType()) 871 || SeltTy->hasPointerRepresentation()) 872 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 873 874 // FIXME: We should be able to narrow this integer in cases with dead 875 // padding. 876 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size)); 877 } 878 879 return getIndirectReturnResult(State); 880 } 881 882 // Treat an enum type as its underlying type. 883 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 884 RetTy = EnumTy->getDecl()->getIntegerType(); 885 886 return (RetTy->isPromotableIntegerType() ? 887 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 888 } 889 890 static bool isSSEVectorType(ASTContext &Context, QualType Ty) { 891 return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128; 892 } 893 894 static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) { 895 const RecordType *RT = Ty->getAs<RecordType>(); 896 if (!RT) 897 return 0; 898 const RecordDecl *RD = RT->getDecl(); 899 900 // If this is a C++ record, check the bases first. 901 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 902 for (const auto &I : CXXRD->bases()) 903 if (!isRecordWithSSEVectorType(Context, I.getType())) 904 return false; 905 906 for (const auto *i : RD->fields()) { 907 QualType FT = i->getType(); 908 909 if (isSSEVectorType(Context, FT)) 910 return true; 911 912 if (isRecordWithSSEVectorType(Context, FT)) 913 return true; 914 } 915 916 return false; 917 } 918 919 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty, 920 unsigned Align) const { 921 // Otherwise, if the alignment is less than or equal to the minimum ABI 922 // alignment, just use the default; the backend will handle this. 923 if (Align <= MinABIStackAlignInBytes) 924 return 0; // Use default alignment. 925 926 // On non-Darwin, the stack type alignment is always 4. 927 if (!IsDarwinVectorABI) { 928 // Set explicit alignment, since we may need to realign the top. 929 return MinABIStackAlignInBytes; 930 } 931 932 // Otherwise, if the type contains an SSE vector type, the alignment is 16. 933 if (Align >= 16 && (isSSEVectorType(getContext(), Ty) || 934 isRecordWithSSEVectorType(getContext(), Ty))) 935 return 16; 936 937 return MinABIStackAlignInBytes; 938 } 939 940 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal, 941 CCState &State) const { 942 if (!ByVal) { 943 if (State.FreeRegs) { 944 --State.FreeRegs; // Non-byval indirects just use one pointer. 945 return ABIArgInfo::getIndirectInReg(0, false); 946 } 947 return ABIArgInfo::getIndirect(0, false); 948 } 949 950 // Compute the byval alignment. 951 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; 952 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign); 953 if (StackAlign == 0) 954 return ABIArgInfo::getIndirect(4, /*ByVal=*/true); 955 956 // If the stack alignment is less than the type alignment, realign the 957 // argument. 958 bool Realign = TypeAlign > StackAlign; 959 return ABIArgInfo::getIndirect(StackAlign, /*ByVal=*/true, Realign); 960 } 961 962 X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const { 963 const Type *T = isSingleElementStruct(Ty, getContext()); 964 if (!T) 965 T = Ty.getTypePtr(); 966 967 if (const BuiltinType *BT = T->getAs<BuiltinType>()) { 968 BuiltinType::Kind K = BT->getKind(); 969 if (K == BuiltinType::Float || K == BuiltinType::Double) 970 return Float; 971 } 972 return Integer; 973 } 974 975 bool X86_32ABIInfo::shouldUseInReg(QualType Ty, CCState &State, 976 bool &NeedsPadding) const { 977 NeedsPadding = false; 978 Class C = classify(Ty); 979 if (C == Float) 980 return false; 981 982 unsigned Size = getContext().getTypeSize(Ty); 983 unsigned SizeInRegs = (Size + 31) / 32; 984 985 if (SizeInRegs == 0) 986 return false; 987 988 if (SizeInRegs > State.FreeRegs) { 989 State.FreeRegs = 0; 990 return false; 991 } 992 993 State.FreeRegs -= SizeInRegs; 994 995 if (State.CC == llvm::CallingConv::X86_FastCall || 996 State.CC == llvm::CallingConv::X86_VectorCall) { 997 if (Size > 32) 998 return false; 999 1000 if (Ty->isIntegralOrEnumerationType()) 1001 return true; 1002 1003 if (Ty->isPointerType()) 1004 return true; 1005 1006 if (Ty->isReferenceType()) 1007 return true; 1008 1009 if (State.FreeRegs) 1010 NeedsPadding = true; 1011 1012 return false; 1013 } 1014 1015 return true; 1016 } 1017 1018 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, 1019 CCState &State) const { 1020 // FIXME: Set alignment on indirect arguments. 1021 1022 Ty = useFirstFieldIfTransparentUnion(Ty); 1023 1024 // Check with the C++ ABI first. 1025 const RecordType *RT = Ty->getAs<RecordType>(); 1026 if (RT) { 1027 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()); 1028 if (RAA == CGCXXABI::RAA_Indirect) { 1029 return getIndirectResult(Ty, false, State); 1030 } else if (RAA == CGCXXABI::RAA_DirectInMemory) { 1031 // The field index doesn't matter, we'll fix it up later. 1032 return ABIArgInfo::getInAlloca(/*FieldIndex=*/0); 1033 } 1034 } 1035 1036 // vectorcall adds the concept of a homogenous vector aggregate, similar 1037 // to other targets. 1038 const Type *Base = nullptr; 1039 uint64_t NumElts = 0; 1040 if (State.CC == llvm::CallingConv::X86_VectorCall && 1041 isHomogeneousAggregate(Ty, Base, NumElts)) { 1042 if (State.FreeSSERegs >= NumElts) { 1043 State.FreeSSERegs -= NumElts; 1044 if (Ty->isBuiltinType() || Ty->isVectorType()) 1045 return ABIArgInfo::getDirect(); 1046 return ABIArgInfo::getExpand(); 1047 } 1048 return getIndirectResult(Ty, /*ByVal=*/false, State); 1049 } 1050 1051 if (isAggregateTypeForABI(Ty)) { 1052 if (RT) { 1053 // Structs are always byval on win32, regardless of what they contain. 1054 if (IsWin32StructABI) 1055 return getIndirectResult(Ty, true, State); 1056 1057 // Structures with flexible arrays are always indirect. 1058 if (RT->getDecl()->hasFlexibleArrayMember()) 1059 return getIndirectResult(Ty, true, State); 1060 } 1061 1062 // Ignore empty structs/unions. 1063 if (isEmptyRecord(getContext(), Ty, true)) 1064 return ABIArgInfo::getIgnore(); 1065 1066 llvm::LLVMContext &LLVMContext = getVMContext(); 1067 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext); 1068 bool NeedsPadding; 1069 if (shouldUseInReg(Ty, State, NeedsPadding)) { 1070 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32; 1071 SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32); 1072 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements); 1073 return ABIArgInfo::getDirectInReg(Result); 1074 } 1075 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr; 1076 1077 // Expand small (<= 128-bit) record types when we know that the stack layout 1078 // of those arguments will match the struct. This is important because the 1079 // LLVM backend isn't smart enough to remove byval, which inhibits many 1080 // optimizations. 1081 if (getContext().getTypeSize(Ty) <= 4*32 && 1082 canExpandIndirectArgument(Ty, getContext())) 1083 return ABIArgInfo::getExpandWithPadding( 1084 State.CC == llvm::CallingConv::X86_FastCall || 1085 State.CC == llvm::CallingConv::X86_VectorCall, 1086 PaddingType); 1087 1088 return getIndirectResult(Ty, true, State); 1089 } 1090 1091 if (const VectorType *VT = Ty->getAs<VectorType>()) { 1092 // On Darwin, some vectors are passed in memory, we handle this by passing 1093 // it as an i8/i16/i32/i64. 1094 if (IsDarwinVectorABI) { 1095 uint64_t Size = getContext().getTypeSize(Ty); 1096 if ((Size == 8 || Size == 16 || Size == 32) || 1097 (Size == 64 && VT->getNumElements() == 1)) 1098 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 1099 Size)); 1100 } 1101 1102 if (IsX86_MMXType(CGT.ConvertType(Ty))) 1103 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64)); 1104 1105 return ABIArgInfo::getDirect(); 1106 } 1107 1108 1109 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1110 Ty = EnumTy->getDecl()->getIntegerType(); 1111 1112 bool NeedsPadding; 1113 bool InReg = shouldUseInReg(Ty, State, NeedsPadding); 1114 1115 if (Ty->isPromotableIntegerType()) { 1116 if (InReg) 1117 return ABIArgInfo::getExtendInReg(); 1118 return ABIArgInfo::getExtend(); 1119 } 1120 if (InReg) 1121 return ABIArgInfo::getDirectInReg(); 1122 return ABIArgInfo::getDirect(); 1123 } 1124 1125 void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const { 1126 CCState State(FI.getCallingConvention()); 1127 if (State.CC == llvm::CallingConv::X86_FastCall) 1128 State.FreeRegs = 2; 1129 else if (State.CC == llvm::CallingConv::X86_VectorCall) { 1130 State.FreeRegs = 2; 1131 State.FreeSSERegs = 6; 1132 } else if (FI.getHasRegParm()) 1133 State.FreeRegs = FI.getRegParm(); 1134 else 1135 State.FreeRegs = DefaultNumRegisterParameters; 1136 1137 if (!getCXXABI().classifyReturnType(FI)) { 1138 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State); 1139 } else if (FI.getReturnInfo().isIndirect()) { 1140 // The C++ ABI is not aware of register usage, so we have to check if the 1141 // return value was sret and put it in a register ourselves if appropriate. 1142 if (State.FreeRegs) { 1143 --State.FreeRegs; // The sret parameter consumes a register. 1144 FI.getReturnInfo().setInReg(true); 1145 } 1146 } 1147 1148 // The chain argument effectively gives us another free register. 1149 if (FI.isChainCall()) 1150 ++State.FreeRegs; 1151 1152 bool UsedInAlloca = false; 1153 for (auto &I : FI.arguments()) { 1154 I.info = classifyArgumentType(I.type, State); 1155 UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca); 1156 } 1157 1158 // If we needed to use inalloca for any argument, do a second pass and rewrite 1159 // all the memory arguments to use inalloca. 1160 if (UsedInAlloca) 1161 rewriteWithInAlloca(FI); 1162 } 1163 1164 void 1165 X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields, 1166 unsigned &StackOffset, 1167 ABIArgInfo &Info, QualType Type) const { 1168 assert(StackOffset % 4U == 0 && "unaligned inalloca struct"); 1169 Info = ABIArgInfo::getInAlloca(FrameFields.size()); 1170 FrameFields.push_back(CGT.ConvertTypeForMem(Type)); 1171 StackOffset += getContext().getTypeSizeInChars(Type).getQuantity(); 1172 1173 // Insert padding bytes to respect alignment. For x86_32, each argument is 4 1174 // byte aligned. 1175 if (StackOffset % 4U) { 1176 unsigned OldOffset = StackOffset; 1177 StackOffset = llvm::RoundUpToAlignment(StackOffset, 4U); 1178 unsigned NumBytes = StackOffset - OldOffset; 1179 assert(NumBytes); 1180 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext()); 1181 Ty = llvm::ArrayType::get(Ty, NumBytes); 1182 FrameFields.push_back(Ty); 1183 } 1184 } 1185 1186 static bool isArgInAlloca(const ABIArgInfo &Info) { 1187 // Leave ignored and inreg arguments alone. 1188 switch (Info.getKind()) { 1189 case ABIArgInfo::InAlloca: 1190 return true; 1191 case ABIArgInfo::Indirect: 1192 assert(Info.getIndirectByVal()); 1193 return true; 1194 case ABIArgInfo::Ignore: 1195 return false; 1196 case ABIArgInfo::Direct: 1197 case ABIArgInfo::Extend: 1198 case ABIArgInfo::Expand: 1199 if (Info.getInReg()) 1200 return false; 1201 return true; 1202 } 1203 llvm_unreachable("invalid enum"); 1204 } 1205 1206 void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const { 1207 assert(IsWin32StructABI && "inalloca only supported on win32"); 1208 1209 // Build a packed struct type for all of the arguments in memory. 1210 SmallVector<llvm::Type *, 6> FrameFields; 1211 1212 unsigned StackOffset = 0; 1213 CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end(); 1214 1215 // Put 'this' into the struct before 'sret', if necessary. 1216 bool IsThisCall = 1217 FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall; 1218 ABIArgInfo &Ret = FI.getReturnInfo(); 1219 if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall && 1220 isArgInAlloca(I->info)) { 1221 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type); 1222 ++I; 1223 } 1224 1225 // Put the sret parameter into the inalloca struct if it's in memory. 1226 if (Ret.isIndirect() && !Ret.getInReg()) { 1227 CanQualType PtrTy = getContext().getPointerType(FI.getReturnType()); 1228 addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy); 1229 // On Windows, the hidden sret parameter is always returned in eax. 1230 Ret.setInAllocaSRet(IsWin32StructABI); 1231 } 1232 1233 // Skip the 'this' parameter in ecx. 1234 if (IsThisCall) 1235 ++I; 1236 1237 // Put arguments passed in memory into the struct. 1238 for (; I != E; ++I) { 1239 if (isArgInAlloca(I->info)) 1240 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type); 1241 } 1242 1243 FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields, 1244 /*isPacked=*/true)); 1245 } 1246 1247 llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1248 CodeGenFunction &CGF) const { 1249 llvm::Type *BPP = CGF.Int8PtrPtrTy; 1250 1251 CGBuilderTy &Builder = CGF.Builder; 1252 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 1253 "ap"); 1254 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 1255 1256 // Compute if the address needs to be aligned 1257 unsigned Align = CGF.getContext().getTypeAlignInChars(Ty).getQuantity(); 1258 Align = getTypeStackAlignInBytes(Ty, Align); 1259 Align = std::max(Align, 4U); 1260 if (Align > 4) { 1261 // addr = (addr + align - 1) & -align; 1262 llvm::Value *Offset = 1263 llvm::ConstantInt::get(CGF.Int32Ty, Align - 1); 1264 Addr = CGF.Builder.CreateGEP(Addr, Offset); 1265 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(Addr, 1266 CGF.Int32Ty); 1267 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -Align); 1268 Addr = CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), 1269 Addr->getType(), 1270 "ap.cur.aligned"); 1271 } 1272 1273 llvm::Type *PTy = 1274 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 1275 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 1276 1277 uint64_t Offset = 1278 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, Align); 1279 llvm::Value *NextAddr = 1280 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 1281 "ap.next"); 1282 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 1283 1284 return AddrTyped; 1285 } 1286 1287 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI( 1288 const llvm::Triple &Triple, const CodeGenOptions &Opts) { 1289 assert(Triple.getArch() == llvm::Triple::x86); 1290 1291 switch (Opts.getStructReturnConvention()) { 1292 case CodeGenOptions::SRCK_Default: 1293 break; 1294 case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return 1295 return false; 1296 case CodeGenOptions::SRCK_InRegs: // -freg-struct-return 1297 return true; 1298 } 1299 1300 if (Triple.isOSDarwin()) 1301 return true; 1302 1303 switch (Triple.getOS()) { 1304 case llvm::Triple::DragonFly: 1305 case llvm::Triple::FreeBSD: 1306 case llvm::Triple::OpenBSD: 1307 case llvm::Triple::Bitrig: 1308 case llvm::Triple::Win32: 1309 return true; 1310 default: 1311 return false; 1312 } 1313 } 1314 1315 void X86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D, 1316 llvm::GlobalValue *GV, 1317 CodeGen::CodeGenModule &CGM) const { 1318 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 1319 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { 1320 // Get the LLVM function. 1321 llvm::Function *Fn = cast<llvm::Function>(GV); 1322 1323 // Now add the 'alignstack' attribute with a value of 16. 1324 llvm::AttrBuilder B; 1325 B.addStackAlignmentAttr(16); 1326 Fn->addAttributes(llvm::AttributeSet::FunctionIndex, 1327 llvm::AttributeSet::get(CGM.getLLVMContext(), 1328 llvm::AttributeSet::FunctionIndex, 1329 B)); 1330 } 1331 } 1332 } 1333 1334 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable( 1335 CodeGen::CodeGenFunction &CGF, 1336 llvm::Value *Address) const { 1337 CodeGen::CGBuilderTy &Builder = CGF.Builder; 1338 1339 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 1340 1341 // 0-7 are the eight integer registers; the order is different 1342 // on Darwin (for EH), but the range is the same. 1343 // 8 is %eip. 1344 AssignToArrayRange(Builder, Address, Four8, 0, 8); 1345 1346 if (CGF.CGM.getTarget().getTriple().isOSDarwin()) { 1347 // 12-16 are st(0..4). Not sure why we stop at 4. 1348 // These have size 16, which is sizeof(long double) on 1349 // platforms with 8-byte alignment for that type. 1350 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16); 1351 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16); 1352 1353 } else { 1354 // 9 is %eflags, which doesn't get a size on Darwin for some 1355 // reason. 1356 Builder.CreateStore(Four8, Builder.CreateConstInBoundsGEP1_32(Address, 9)); 1357 1358 // 11-16 are st(0..5). Not sure why we stop at 5. 1359 // These have size 12, which is sizeof(long double) on 1360 // platforms with 4-byte alignment for that type. 1361 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12); 1362 AssignToArrayRange(Builder, Address, Twelve8, 11, 16); 1363 } 1364 1365 return false; 1366 } 1367 1368 //===----------------------------------------------------------------------===// 1369 // X86-64 ABI Implementation 1370 //===----------------------------------------------------------------------===// 1371 1372 1373 namespace { 1374 /// X86_64ABIInfo - The X86_64 ABI information. 1375 class X86_64ABIInfo : public ABIInfo { 1376 enum Class { 1377 Integer = 0, 1378 SSE, 1379 SSEUp, 1380 X87, 1381 X87Up, 1382 ComplexX87, 1383 NoClass, 1384 Memory 1385 }; 1386 1387 /// merge - Implement the X86_64 ABI merging algorithm. 1388 /// 1389 /// Merge an accumulating classification \arg Accum with a field 1390 /// classification \arg Field. 1391 /// 1392 /// \param Accum - The accumulating classification. This should 1393 /// always be either NoClass or the result of a previous merge 1394 /// call. In addition, this should never be Memory (the caller 1395 /// should just return Memory for the aggregate). 1396 static Class merge(Class Accum, Class Field); 1397 1398 /// postMerge - Implement the X86_64 ABI post merging algorithm. 1399 /// 1400 /// Post merger cleanup, reduces a malformed Hi and Lo pair to 1401 /// final MEMORY or SSE classes when necessary. 1402 /// 1403 /// \param AggregateSize - The size of the current aggregate in 1404 /// the classification process. 1405 /// 1406 /// \param Lo - The classification for the parts of the type 1407 /// residing in the low word of the containing object. 1408 /// 1409 /// \param Hi - The classification for the parts of the type 1410 /// residing in the higher words of the containing object. 1411 /// 1412 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const; 1413 1414 /// classify - Determine the x86_64 register classes in which the 1415 /// given type T should be passed. 1416 /// 1417 /// \param Lo - The classification for the parts of the type 1418 /// residing in the low word of the containing object. 1419 /// 1420 /// \param Hi - The classification for the parts of the type 1421 /// residing in the high word of the containing object. 1422 /// 1423 /// \param OffsetBase - The bit offset of this type in the 1424 /// containing object. Some parameters are classified different 1425 /// depending on whether they straddle an eightbyte boundary. 1426 /// 1427 /// \param isNamedArg - Whether the argument in question is a "named" 1428 /// argument, as used in AMD64-ABI 3.5.7. 1429 /// 1430 /// If a word is unused its result will be NoClass; if a type should 1431 /// be passed in Memory then at least the classification of \arg Lo 1432 /// will be Memory. 1433 /// 1434 /// The \arg Lo class will be NoClass iff the argument is ignored. 1435 /// 1436 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will 1437 /// also be ComplexX87. 1438 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi, 1439 bool isNamedArg) const; 1440 1441 llvm::Type *GetByteVectorType(QualType Ty) const; 1442 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType, 1443 unsigned IROffset, QualType SourceTy, 1444 unsigned SourceOffset) const; 1445 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType, 1446 unsigned IROffset, QualType SourceTy, 1447 unsigned SourceOffset) const; 1448 1449 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 1450 /// such that the argument will be returned in memory. 1451 ABIArgInfo getIndirectReturnResult(QualType Ty) const; 1452 1453 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 1454 /// such that the argument will be passed in memory. 1455 /// 1456 /// \param freeIntRegs - The number of free integer registers remaining 1457 /// available. 1458 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const; 1459 1460 ABIArgInfo classifyReturnType(QualType RetTy) const; 1461 1462 ABIArgInfo classifyArgumentType(QualType Ty, 1463 unsigned freeIntRegs, 1464 unsigned &neededInt, 1465 unsigned &neededSSE, 1466 bool isNamedArg) const; 1467 1468 bool IsIllegalVectorType(QualType Ty) const; 1469 1470 /// The 0.98 ABI revision clarified a lot of ambiguities, 1471 /// unfortunately in ways that were not always consistent with 1472 /// certain previous compilers. In particular, platforms which 1473 /// required strict binary compatibility with older versions of GCC 1474 /// may need to exempt themselves. 1475 bool honorsRevision0_98() const { 1476 return !getTarget().getTriple().isOSDarwin(); 1477 } 1478 1479 bool HasAVX; 1480 // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on 1481 // 64-bit hardware. 1482 bool Has64BitPointers; 1483 1484 public: 1485 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool hasavx) : 1486 ABIInfo(CGT), HasAVX(hasavx), 1487 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) { 1488 } 1489 1490 bool isPassedUsingAVXType(QualType type) const { 1491 unsigned neededInt, neededSSE; 1492 // The freeIntRegs argument doesn't matter here. 1493 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE, 1494 /*isNamedArg*/true); 1495 if (info.isDirect()) { 1496 llvm::Type *ty = info.getCoerceToType(); 1497 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty)) 1498 return (vectorTy->getBitWidth() > 128); 1499 } 1500 return false; 1501 } 1502 1503 void computeInfo(CGFunctionInfo &FI) const override; 1504 1505 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1506 CodeGenFunction &CGF) const override; 1507 }; 1508 1509 /// WinX86_64ABIInfo - The Windows X86_64 ABI information. 1510 class WinX86_64ABIInfo : public ABIInfo { 1511 1512 ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs, 1513 bool IsReturnType) const; 1514 1515 public: 1516 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 1517 1518 void computeInfo(CGFunctionInfo &FI) const override; 1519 1520 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1521 CodeGenFunction &CGF) const override; 1522 1523 bool isHomogeneousAggregateBaseType(QualType Ty) const override { 1524 // FIXME: Assumes vectorcall is in use. 1525 return isX86VectorTypeForVectorCall(getContext(), Ty); 1526 } 1527 1528 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 1529 uint64_t NumMembers) const override { 1530 // FIXME: Assumes vectorcall is in use. 1531 return isX86VectorCallAggregateSmallEnough(NumMembers); 1532 } 1533 }; 1534 1535 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo { 1536 bool HasAVX; 1537 public: 1538 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) 1539 : TargetCodeGenInfo(new X86_64ABIInfo(CGT, HasAVX)), HasAVX(HasAVX) {} 1540 1541 const X86_64ABIInfo &getABIInfo() const { 1542 return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo()); 1543 } 1544 1545 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { 1546 return 7; 1547 } 1548 1549 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 1550 llvm::Value *Address) const override { 1551 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 1552 1553 // 0-15 are the 16 integer registers. 1554 // 16 is %rip. 1555 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 1556 return false; 1557 } 1558 1559 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 1560 StringRef Constraint, 1561 llvm::Type* Ty) const override { 1562 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 1563 } 1564 1565 bool isNoProtoCallVariadic(const CallArgList &args, 1566 const FunctionNoProtoType *fnType) const override { 1567 // The default CC on x86-64 sets %al to the number of SSA 1568 // registers used, and GCC sets this when calling an unprototyped 1569 // function, so we override the default behavior. However, don't do 1570 // that when AVX types are involved: the ABI explicitly states it is 1571 // undefined, and it doesn't work in practice because of how the ABI 1572 // defines varargs anyway. 1573 if (fnType->getCallConv() == CC_C) { 1574 bool HasAVXType = false; 1575 for (CallArgList::const_iterator 1576 it = args.begin(), ie = args.end(); it != ie; ++it) { 1577 if (getABIInfo().isPassedUsingAVXType(it->Ty)) { 1578 HasAVXType = true; 1579 break; 1580 } 1581 } 1582 1583 if (!HasAVXType) 1584 return true; 1585 } 1586 1587 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType); 1588 } 1589 1590 llvm::Constant * 1591 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override { 1592 unsigned Sig = (0xeb << 0) | // jmp rel8 1593 (0x0a << 8) | // .+0x0c 1594 ('F' << 16) | 1595 ('T' << 24); 1596 return llvm::ConstantInt::get(CGM.Int32Ty, Sig); 1597 } 1598 1599 unsigned getOpenMPSimdDefaultAlignment(QualType) const override { 1600 return HasAVX ? 32 : 16; 1601 } 1602 }; 1603 1604 static std::string qualifyWindowsLibrary(llvm::StringRef Lib) { 1605 // If the argument does not end in .lib, automatically add the suffix. This 1606 // matches the behavior of MSVC. 1607 std::string ArgStr = Lib; 1608 if (!Lib.endswith_lower(".lib")) 1609 ArgStr += ".lib"; 1610 return ArgStr; 1611 } 1612 1613 class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo { 1614 public: 1615 WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, 1616 bool d, bool p, bool w, unsigned RegParms) 1617 : X86_32TargetCodeGenInfo(CGT, d, p, w, RegParms) {} 1618 1619 void getDependentLibraryOption(llvm::StringRef Lib, 1620 llvm::SmallString<24> &Opt) const override { 1621 Opt = "/DEFAULTLIB:"; 1622 Opt += qualifyWindowsLibrary(Lib); 1623 } 1624 1625 void getDetectMismatchOption(llvm::StringRef Name, 1626 llvm::StringRef Value, 1627 llvm::SmallString<32> &Opt) const override { 1628 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; 1629 } 1630 }; 1631 1632 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo { 1633 bool HasAVX; 1634 public: 1635 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) 1636 : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)), HasAVX(HasAVX) {} 1637 1638 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { 1639 return 7; 1640 } 1641 1642 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 1643 llvm::Value *Address) const override { 1644 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 1645 1646 // 0-15 are the 16 integer registers. 1647 // 16 is %rip. 1648 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 1649 return false; 1650 } 1651 1652 void getDependentLibraryOption(llvm::StringRef Lib, 1653 llvm::SmallString<24> &Opt) const override { 1654 Opt = "/DEFAULTLIB:"; 1655 Opt += qualifyWindowsLibrary(Lib); 1656 } 1657 1658 void getDetectMismatchOption(llvm::StringRef Name, 1659 llvm::StringRef Value, 1660 llvm::SmallString<32> &Opt) const override { 1661 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; 1662 } 1663 1664 unsigned getOpenMPSimdDefaultAlignment(QualType) const override { 1665 return HasAVX ? 32 : 16; 1666 } 1667 }; 1668 1669 } 1670 1671 void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo, 1672 Class &Hi) const { 1673 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done: 1674 // 1675 // (a) If one of the classes is Memory, the whole argument is passed in 1676 // memory. 1677 // 1678 // (b) If X87UP is not preceded by X87, the whole argument is passed in 1679 // memory. 1680 // 1681 // (c) If the size of the aggregate exceeds two eightbytes and the first 1682 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole 1683 // argument is passed in memory. NOTE: This is necessary to keep the 1684 // ABI working for processors that don't support the __m256 type. 1685 // 1686 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE. 1687 // 1688 // Some of these are enforced by the merging logic. Others can arise 1689 // only with unions; for example: 1690 // union { _Complex double; unsigned; } 1691 // 1692 // Note that clauses (b) and (c) were added in 0.98. 1693 // 1694 if (Hi == Memory) 1695 Lo = Memory; 1696 if (Hi == X87Up && Lo != X87 && honorsRevision0_98()) 1697 Lo = Memory; 1698 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp)) 1699 Lo = Memory; 1700 if (Hi == SSEUp && Lo != SSE) 1701 Hi = SSE; 1702 } 1703 1704 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) { 1705 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is 1706 // classified recursively so that always two fields are 1707 // considered. The resulting class is calculated according to 1708 // the classes of the fields in the eightbyte: 1709 // 1710 // (a) If both classes are equal, this is the resulting class. 1711 // 1712 // (b) If one of the classes is NO_CLASS, the resulting class is 1713 // the other class. 1714 // 1715 // (c) If one of the classes is MEMORY, the result is the MEMORY 1716 // class. 1717 // 1718 // (d) If one of the classes is INTEGER, the result is the 1719 // INTEGER. 1720 // 1721 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class, 1722 // MEMORY is used as class. 1723 // 1724 // (f) Otherwise class SSE is used. 1725 1726 // Accum should never be memory (we should have returned) or 1727 // ComplexX87 (because this cannot be passed in a structure). 1728 assert((Accum != Memory && Accum != ComplexX87) && 1729 "Invalid accumulated classification during merge."); 1730 if (Accum == Field || Field == NoClass) 1731 return Accum; 1732 if (Field == Memory) 1733 return Memory; 1734 if (Accum == NoClass) 1735 return Field; 1736 if (Accum == Integer || Field == Integer) 1737 return Integer; 1738 if (Field == X87 || Field == X87Up || Field == ComplexX87 || 1739 Accum == X87 || Accum == X87Up) 1740 return Memory; 1741 return SSE; 1742 } 1743 1744 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, 1745 Class &Lo, Class &Hi, bool isNamedArg) const { 1746 // FIXME: This code can be simplified by introducing a simple value class for 1747 // Class pairs with appropriate constructor methods for the various 1748 // situations. 1749 1750 // FIXME: Some of the split computations are wrong; unaligned vectors 1751 // shouldn't be passed in registers for example, so there is no chance they 1752 // can straddle an eightbyte. Verify & simplify. 1753 1754 Lo = Hi = NoClass; 1755 1756 Class &Current = OffsetBase < 64 ? Lo : Hi; 1757 Current = Memory; 1758 1759 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 1760 BuiltinType::Kind k = BT->getKind(); 1761 1762 if (k == BuiltinType::Void) { 1763 Current = NoClass; 1764 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) { 1765 Lo = Integer; 1766 Hi = Integer; 1767 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) { 1768 Current = Integer; 1769 } else if ((k == BuiltinType::Float || k == BuiltinType::Double) || 1770 (k == BuiltinType::LongDouble && 1771 getTarget().getTriple().isOSNaCl())) { 1772 Current = SSE; 1773 } else if (k == BuiltinType::LongDouble) { 1774 Lo = X87; 1775 Hi = X87Up; 1776 } 1777 // FIXME: _Decimal32 and _Decimal64 are SSE. 1778 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). 1779 return; 1780 } 1781 1782 if (const EnumType *ET = Ty->getAs<EnumType>()) { 1783 // Classify the underlying integer type. 1784 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg); 1785 return; 1786 } 1787 1788 if (Ty->hasPointerRepresentation()) { 1789 Current = Integer; 1790 return; 1791 } 1792 1793 if (Ty->isMemberPointerType()) { 1794 if (Ty->isMemberFunctionPointerType()) { 1795 if (Has64BitPointers) { 1796 // If Has64BitPointers, this is an {i64, i64}, so classify both 1797 // Lo and Hi now. 1798 Lo = Hi = Integer; 1799 } else { 1800 // Otherwise, with 32-bit pointers, this is an {i32, i32}. If that 1801 // straddles an eightbyte boundary, Hi should be classified as well. 1802 uint64_t EB_FuncPtr = (OffsetBase) / 64; 1803 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64; 1804 if (EB_FuncPtr != EB_ThisAdj) { 1805 Lo = Hi = Integer; 1806 } else { 1807 Current = Integer; 1808 } 1809 } 1810 } else { 1811 Current = Integer; 1812 } 1813 return; 1814 } 1815 1816 if (const VectorType *VT = Ty->getAs<VectorType>()) { 1817 uint64_t Size = getContext().getTypeSize(VT); 1818 if (Size == 32) { 1819 // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x 1820 // float> as integer. 1821 Current = Integer; 1822 1823 // If this type crosses an eightbyte boundary, it should be 1824 // split. 1825 uint64_t EB_Real = (OffsetBase) / 64; 1826 uint64_t EB_Imag = (OffsetBase + Size - 1) / 64; 1827 if (EB_Real != EB_Imag) 1828 Hi = Lo; 1829 } else if (Size == 64) { 1830 // gcc passes <1 x double> in memory. :( 1831 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) 1832 return; 1833 1834 // gcc passes <1 x long long> as INTEGER. 1835 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong) || 1836 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULongLong) || 1837 VT->getElementType()->isSpecificBuiltinType(BuiltinType::Long) || 1838 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULong)) 1839 Current = Integer; 1840 else 1841 Current = SSE; 1842 1843 // If this type crosses an eightbyte boundary, it should be 1844 // split. 1845 if (OffsetBase && OffsetBase != 64) 1846 Hi = Lo; 1847 } else if (Size == 128 || (HasAVX && isNamedArg && Size == 256)) { 1848 // Arguments of 256-bits are split into four eightbyte chunks. The 1849 // least significant one belongs to class SSE and all the others to class 1850 // SSEUP. The original Lo and Hi design considers that types can't be 1851 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense. 1852 // This design isn't correct for 256-bits, but since there're no cases 1853 // where the upper parts would need to be inspected, avoid adding 1854 // complexity and just consider Hi to match the 64-256 part. 1855 // 1856 // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in 1857 // registers if they are "named", i.e. not part of the "..." of a 1858 // variadic function. 1859 Lo = SSE; 1860 Hi = SSEUp; 1861 } 1862 return; 1863 } 1864 1865 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 1866 QualType ET = getContext().getCanonicalType(CT->getElementType()); 1867 1868 uint64_t Size = getContext().getTypeSize(Ty); 1869 if (ET->isIntegralOrEnumerationType()) { 1870 if (Size <= 64) 1871 Current = Integer; 1872 else if (Size <= 128) 1873 Lo = Hi = Integer; 1874 } else if (ET == getContext().FloatTy) 1875 Current = SSE; 1876 else if (ET == getContext().DoubleTy || 1877 (ET == getContext().LongDoubleTy && 1878 getTarget().getTriple().isOSNaCl())) 1879 Lo = Hi = SSE; 1880 else if (ET == getContext().LongDoubleTy) 1881 Current = ComplexX87; 1882 1883 // If this complex type crosses an eightbyte boundary then it 1884 // should be split. 1885 uint64_t EB_Real = (OffsetBase) / 64; 1886 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64; 1887 if (Hi == NoClass && EB_Real != EB_Imag) 1888 Hi = Lo; 1889 1890 return; 1891 } 1892 1893 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 1894 // Arrays are treated like structures. 1895 1896 uint64_t Size = getContext().getTypeSize(Ty); 1897 1898 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 1899 // than four eightbytes, ..., it has class MEMORY. 1900 if (Size > 256) 1901 return; 1902 1903 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned 1904 // fields, it has class MEMORY. 1905 // 1906 // Only need to check alignment of array base. 1907 if (OffsetBase % getContext().getTypeAlign(AT->getElementType())) 1908 return; 1909 1910 // Otherwise implement simplified merge. We could be smarter about 1911 // this, but it isn't worth it and would be harder to verify. 1912 Current = NoClass; 1913 uint64_t EltSize = getContext().getTypeSize(AT->getElementType()); 1914 uint64_t ArraySize = AT->getSize().getZExtValue(); 1915 1916 // The only case a 256-bit wide vector could be used is when the array 1917 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 1918 // to work for sizes wider than 128, early check and fallback to memory. 1919 if (Size > 128 && EltSize != 256) 1920 return; 1921 1922 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) { 1923 Class FieldLo, FieldHi; 1924 classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg); 1925 Lo = merge(Lo, FieldLo); 1926 Hi = merge(Hi, FieldHi); 1927 if (Lo == Memory || Hi == Memory) 1928 break; 1929 } 1930 1931 postMerge(Size, Lo, Hi); 1932 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification."); 1933 return; 1934 } 1935 1936 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1937 uint64_t Size = getContext().getTypeSize(Ty); 1938 1939 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 1940 // than four eightbytes, ..., it has class MEMORY. 1941 if (Size > 256) 1942 return; 1943 1944 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial 1945 // copy constructor or a non-trivial destructor, it is passed by invisible 1946 // reference. 1947 if (getRecordArgABI(RT, getCXXABI())) 1948 return; 1949 1950 const RecordDecl *RD = RT->getDecl(); 1951 1952 // Assume variable sized types are passed in memory. 1953 if (RD->hasFlexibleArrayMember()) 1954 return; 1955 1956 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 1957 1958 // Reset Lo class, this will be recomputed. 1959 Current = NoClass; 1960 1961 // If this is a C++ record, classify the bases first. 1962 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 1963 for (const auto &I : CXXRD->bases()) { 1964 assert(!I.isVirtual() && !I.getType()->isDependentType() && 1965 "Unexpected base class!"); 1966 const CXXRecordDecl *Base = 1967 cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl()); 1968 1969 // Classify this field. 1970 // 1971 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a 1972 // single eightbyte, each is classified separately. Each eightbyte gets 1973 // initialized to class NO_CLASS. 1974 Class FieldLo, FieldHi; 1975 uint64_t Offset = 1976 OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base)); 1977 classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg); 1978 Lo = merge(Lo, FieldLo); 1979 Hi = merge(Hi, FieldHi); 1980 if (Lo == Memory || Hi == Memory) 1981 break; 1982 } 1983 } 1984 1985 // Classify the fields one at a time, merging the results. 1986 unsigned idx = 0; 1987 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1988 i != e; ++i, ++idx) { 1989 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 1990 bool BitField = i->isBitField(); 1991 1992 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than 1993 // four eightbytes, or it contains unaligned fields, it has class MEMORY. 1994 // 1995 // The only case a 256-bit wide vector could be used is when the struct 1996 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 1997 // to work for sizes wider than 128, early check and fallback to memory. 1998 // 1999 if (Size > 128 && getContext().getTypeSize(i->getType()) != 256) { 2000 Lo = Memory; 2001 return; 2002 } 2003 // Note, skip this test for bit-fields, see below. 2004 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) { 2005 Lo = Memory; 2006 return; 2007 } 2008 2009 // Classify this field. 2010 // 2011 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate 2012 // exceeds a single eightbyte, each is classified 2013 // separately. Each eightbyte gets initialized to class 2014 // NO_CLASS. 2015 Class FieldLo, FieldHi; 2016 2017 // Bit-fields require special handling, they do not force the 2018 // structure to be passed in memory even if unaligned, and 2019 // therefore they can straddle an eightbyte. 2020 if (BitField) { 2021 // Ignore padding bit-fields. 2022 if (i->isUnnamedBitfield()) 2023 continue; 2024 2025 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 2026 uint64_t Size = i->getBitWidthValue(getContext()); 2027 2028 uint64_t EB_Lo = Offset / 64; 2029 uint64_t EB_Hi = (Offset + Size - 1) / 64; 2030 2031 if (EB_Lo) { 2032 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes."); 2033 FieldLo = NoClass; 2034 FieldHi = Integer; 2035 } else { 2036 FieldLo = Integer; 2037 FieldHi = EB_Hi ? Integer : NoClass; 2038 } 2039 } else 2040 classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg); 2041 Lo = merge(Lo, FieldLo); 2042 Hi = merge(Hi, FieldHi); 2043 if (Lo == Memory || Hi == Memory) 2044 break; 2045 } 2046 2047 postMerge(Size, Lo, Hi); 2048 } 2049 } 2050 2051 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const { 2052 // If this is a scalar LLVM value then assume LLVM will pass it in the right 2053 // place naturally. 2054 if (!isAggregateTypeForABI(Ty)) { 2055 // Treat an enum type as its underlying type. 2056 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2057 Ty = EnumTy->getDecl()->getIntegerType(); 2058 2059 return (Ty->isPromotableIntegerType() ? 2060 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2061 } 2062 2063 return ABIArgInfo::getIndirect(0); 2064 } 2065 2066 bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const { 2067 if (const VectorType *VecTy = Ty->getAs<VectorType>()) { 2068 uint64_t Size = getContext().getTypeSize(VecTy); 2069 unsigned LargestVector = HasAVX ? 256 : 128; 2070 if (Size <= 64 || Size > LargestVector) 2071 return true; 2072 } 2073 2074 return false; 2075 } 2076 2077 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty, 2078 unsigned freeIntRegs) const { 2079 // If this is a scalar LLVM value then assume LLVM will pass it in the right 2080 // place naturally. 2081 // 2082 // This assumption is optimistic, as there could be free registers available 2083 // when we need to pass this argument in memory, and LLVM could try to pass 2084 // the argument in the free register. This does not seem to happen currently, 2085 // but this code would be much safer if we could mark the argument with 2086 // 'onstack'. See PR12193. 2087 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) { 2088 // Treat an enum type as its underlying type. 2089 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2090 Ty = EnumTy->getDecl()->getIntegerType(); 2091 2092 return (Ty->isPromotableIntegerType() ? 2093 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2094 } 2095 2096 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 2097 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); 2098 2099 // Compute the byval alignment. We specify the alignment of the byval in all 2100 // cases so that the mid-level optimizer knows the alignment of the byval. 2101 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U); 2102 2103 // Attempt to avoid passing indirect results using byval when possible. This 2104 // is important for good codegen. 2105 // 2106 // We do this by coercing the value into a scalar type which the backend can 2107 // handle naturally (i.e., without using byval). 2108 // 2109 // For simplicity, we currently only do this when we have exhausted all of the 2110 // free integer registers. Doing this when there are free integer registers 2111 // would require more care, as we would have to ensure that the coerced value 2112 // did not claim the unused register. That would require either reording the 2113 // arguments to the function (so that any subsequent inreg values came first), 2114 // or only doing this optimization when there were no following arguments that 2115 // might be inreg. 2116 // 2117 // We currently expect it to be rare (particularly in well written code) for 2118 // arguments to be passed on the stack when there are still free integer 2119 // registers available (this would typically imply large structs being passed 2120 // by value), so this seems like a fair tradeoff for now. 2121 // 2122 // We can revisit this if the backend grows support for 'onstack' parameter 2123 // attributes. See PR12193. 2124 if (freeIntRegs == 0) { 2125 uint64_t Size = getContext().getTypeSize(Ty); 2126 2127 // If this type fits in an eightbyte, coerce it into the matching integral 2128 // type, which will end up on the stack (with alignment 8). 2129 if (Align == 8 && Size <= 64) 2130 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2131 Size)); 2132 } 2133 2134 return ABIArgInfo::getIndirect(Align); 2135 } 2136 2137 /// The ABI specifies that a value should be passed in a full vector XMM/YMM 2138 /// register. Pick an LLVM IR type that will be passed as a vector register. 2139 llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const { 2140 // Wrapper structs/arrays that only contain vectors are passed just like 2141 // vectors; strip them off if present. 2142 if (const Type *InnerTy = isSingleElementStruct(Ty, getContext())) 2143 Ty = QualType(InnerTy, 0); 2144 2145 llvm::Type *IRType = CGT.ConvertType(Ty); 2146 2147 // If the preferred type is a 16-byte vector, prefer to pass it. 2148 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){ 2149 llvm::Type *EltTy = VT->getElementType(); 2150 unsigned BitWidth = VT->getBitWidth(); 2151 if ((BitWidth >= 128 && BitWidth <= 256) && 2152 (EltTy->isFloatTy() || EltTy->isDoubleTy() || 2153 EltTy->isIntegerTy(8) || EltTy->isIntegerTy(16) || 2154 EltTy->isIntegerTy(32) || EltTy->isIntegerTy(64) || 2155 EltTy->isIntegerTy(128))) 2156 return VT; 2157 } 2158 2159 return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), 2); 2160 } 2161 2162 /// BitsContainNoUserData - Return true if the specified [start,end) bit range 2163 /// is known to either be off the end of the specified type or being in 2164 /// alignment padding. The user type specified is known to be at most 128 bits 2165 /// in size, and have passed through X86_64ABIInfo::classify with a successful 2166 /// classification that put one of the two halves in the INTEGER class. 2167 /// 2168 /// It is conservatively correct to return false. 2169 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, 2170 unsigned EndBit, ASTContext &Context) { 2171 // If the bytes being queried are off the end of the type, there is no user 2172 // data hiding here. This handles analysis of builtins, vectors and other 2173 // types that don't contain interesting padding. 2174 unsigned TySize = (unsigned)Context.getTypeSize(Ty); 2175 if (TySize <= StartBit) 2176 return true; 2177 2178 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 2179 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType()); 2180 unsigned NumElts = (unsigned)AT->getSize().getZExtValue(); 2181 2182 // Check each element to see if the element overlaps with the queried range. 2183 for (unsigned i = 0; i != NumElts; ++i) { 2184 // If the element is after the span we care about, then we're done.. 2185 unsigned EltOffset = i*EltSize; 2186 if (EltOffset >= EndBit) break; 2187 2188 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0; 2189 if (!BitsContainNoUserData(AT->getElementType(), EltStart, 2190 EndBit-EltOffset, Context)) 2191 return false; 2192 } 2193 // If it overlaps no elements, then it is safe to process as padding. 2194 return true; 2195 } 2196 2197 if (const RecordType *RT = Ty->getAs<RecordType>()) { 2198 const RecordDecl *RD = RT->getDecl(); 2199 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 2200 2201 // If this is a C++ record, check the bases first. 2202 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 2203 for (const auto &I : CXXRD->bases()) { 2204 assert(!I.isVirtual() && !I.getType()->isDependentType() && 2205 "Unexpected base class!"); 2206 const CXXRecordDecl *Base = 2207 cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl()); 2208 2209 // If the base is after the span we care about, ignore it. 2210 unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base)); 2211 if (BaseOffset >= EndBit) continue; 2212 2213 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0; 2214 if (!BitsContainNoUserData(I.getType(), BaseStart, 2215 EndBit-BaseOffset, Context)) 2216 return false; 2217 } 2218 } 2219 2220 // Verify that no field has data that overlaps the region of interest. Yes 2221 // this could be sped up a lot by being smarter about queried fields, 2222 // however we're only looking at structs up to 16 bytes, so we don't care 2223 // much. 2224 unsigned idx = 0; 2225 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 2226 i != e; ++i, ++idx) { 2227 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx); 2228 2229 // If we found a field after the region we care about, then we're done. 2230 if (FieldOffset >= EndBit) break; 2231 2232 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0; 2233 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset, 2234 Context)) 2235 return false; 2236 } 2237 2238 // If nothing in this record overlapped the area of interest, then we're 2239 // clean. 2240 return true; 2241 } 2242 2243 return false; 2244 } 2245 2246 /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a 2247 /// float member at the specified offset. For example, {int,{float}} has a 2248 /// float at offset 4. It is conservatively correct for this routine to return 2249 /// false. 2250 static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset, 2251 const llvm::DataLayout &TD) { 2252 // Base case if we find a float. 2253 if (IROffset == 0 && IRType->isFloatTy()) 2254 return true; 2255 2256 // If this is a struct, recurse into the field at the specified offset. 2257 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 2258 const llvm::StructLayout *SL = TD.getStructLayout(STy); 2259 unsigned Elt = SL->getElementContainingOffset(IROffset); 2260 IROffset -= SL->getElementOffset(Elt); 2261 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD); 2262 } 2263 2264 // If this is an array, recurse into the field at the specified offset. 2265 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 2266 llvm::Type *EltTy = ATy->getElementType(); 2267 unsigned EltSize = TD.getTypeAllocSize(EltTy); 2268 IROffset -= IROffset/EltSize*EltSize; 2269 return ContainsFloatAtOffset(EltTy, IROffset, TD); 2270 } 2271 2272 return false; 2273 } 2274 2275 2276 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the 2277 /// low 8 bytes of an XMM register, corresponding to the SSE class. 2278 llvm::Type *X86_64ABIInfo:: 2279 GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset, 2280 QualType SourceTy, unsigned SourceOffset) const { 2281 // The only three choices we have are either double, <2 x float>, or float. We 2282 // pass as float if the last 4 bytes is just padding. This happens for 2283 // structs that contain 3 floats. 2284 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32, 2285 SourceOffset*8+64, getContext())) 2286 return llvm::Type::getFloatTy(getVMContext()); 2287 2288 // We want to pass as <2 x float> if the LLVM IR type contains a float at 2289 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the 2290 // case. 2291 if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) && 2292 ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout())) 2293 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2); 2294 2295 return llvm::Type::getDoubleTy(getVMContext()); 2296 } 2297 2298 2299 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in 2300 /// an 8-byte GPR. This means that we either have a scalar or we are talking 2301 /// about the high or low part of an up-to-16-byte struct. This routine picks 2302 /// the best LLVM IR type to represent this, which may be i64 or may be anything 2303 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*, 2304 /// etc). 2305 /// 2306 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for 2307 /// the source type. IROffset is an offset in bytes into the LLVM IR type that 2308 /// the 8-byte value references. PrefType may be null. 2309 /// 2310 /// SourceTy is the source-level type for the entire argument. SourceOffset is 2311 /// an offset into this that we're processing (which is always either 0 or 8). 2312 /// 2313 llvm::Type *X86_64ABIInfo:: 2314 GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset, 2315 QualType SourceTy, unsigned SourceOffset) const { 2316 // If we're dealing with an un-offset LLVM IR type, then it means that we're 2317 // returning an 8-byte unit starting with it. See if we can safely use it. 2318 if (IROffset == 0) { 2319 // Pointers and int64's always fill the 8-byte unit. 2320 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) || 2321 IRType->isIntegerTy(64)) 2322 return IRType; 2323 2324 // If we have a 1/2/4-byte integer, we can use it only if the rest of the 2325 // goodness in the source type is just tail padding. This is allowed to 2326 // kick in for struct {double,int} on the int, but not on 2327 // struct{double,int,int} because we wouldn't return the second int. We 2328 // have to do this analysis on the source type because we can't depend on 2329 // unions being lowered a specific way etc. 2330 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) || 2331 IRType->isIntegerTy(32) || 2332 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) { 2333 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 : 2334 cast<llvm::IntegerType>(IRType)->getBitWidth(); 2335 2336 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth, 2337 SourceOffset*8+64, getContext())) 2338 return IRType; 2339 } 2340 } 2341 2342 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 2343 // If this is a struct, recurse into the field at the specified offset. 2344 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy); 2345 if (IROffset < SL->getSizeInBytes()) { 2346 unsigned FieldIdx = SL->getElementContainingOffset(IROffset); 2347 IROffset -= SL->getElementOffset(FieldIdx); 2348 2349 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset, 2350 SourceTy, SourceOffset); 2351 } 2352 } 2353 2354 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 2355 llvm::Type *EltTy = ATy->getElementType(); 2356 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy); 2357 unsigned EltOffset = IROffset/EltSize*EltSize; 2358 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy, 2359 SourceOffset); 2360 } 2361 2362 // Okay, we don't have any better idea of what to pass, so we pass this in an 2363 // integer register that isn't too big to fit the rest of the struct. 2364 unsigned TySizeInBytes = 2365 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity(); 2366 2367 assert(TySizeInBytes != SourceOffset && "Empty field?"); 2368 2369 // It is always safe to classify this as an integer type up to i64 that 2370 // isn't larger than the structure. 2371 return llvm::IntegerType::get(getVMContext(), 2372 std::min(TySizeInBytes-SourceOffset, 8U)*8); 2373 } 2374 2375 2376 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally 2377 /// be used as elements of a two register pair to pass or return, return a 2378 /// first class aggregate to represent them. For example, if the low part of 2379 /// a by-value argument should be passed as i32* and the high part as float, 2380 /// return {i32*, float}. 2381 static llvm::Type * 2382 GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, 2383 const llvm::DataLayout &TD) { 2384 // In order to correctly satisfy the ABI, we need to the high part to start 2385 // at offset 8. If the high and low parts we inferred are both 4-byte types 2386 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have 2387 // the second element at offset 8. Check for this: 2388 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo); 2389 unsigned HiAlign = TD.getABITypeAlignment(Hi); 2390 unsigned HiStart = llvm::RoundUpToAlignment(LoSize, HiAlign); 2391 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!"); 2392 2393 // To handle this, we have to increase the size of the low part so that the 2394 // second element will start at an 8 byte offset. We can't increase the size 2395 // of the second element because it might make us access off the end of the 2396 // struct. 2397 if (HiStart != 8) { 2398 // There are only two sorts of types the ABI generation code can produce for 2399 // the low part of a pair that aren't 8 bytes in size: float or i8/i16/i32. 2400 // Promote these to a larger type. 2401 if (Lo->isFloatTy()) 2402 Lo = llvm::Type::getDoubleTy(Lo->getContext()); 2403 else { 2404 assert(Lo->isIntegerTy() && "Invalid/unknown lo type"); 2405 Lo = llvm::Type::getInt64Ty(Lo->getContext()); 2406 } 2407 } 2408 2409 llvm::StructType *Result = llvm::StructType::get(Lo, Hi, nullptr); 2410 2411 2412 // Verify that the second element is at an 8-byte offset. 2413 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 && 2414 "Invalid x86-64 argument pair!"); 2415 return Result; 2416 } 2417 2418 ABIArgInfo X86_64ABIInfo:: 2419 classifyReturnType(QualType RetTy) const { 2420 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the 2421 // classification algorithm. 2422 X86_64ABIInfo::Class Lo, Hi; 2423 classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true); 2424 2425 // Check some invariants. 2426 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 2427 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 2428 2429 llvm::Type *ResType = nullptr; 2430 switch (Lo) { 2431 case NoClass: 2432 if (Hi == NoClass) 2433 return ABIArgInfo::getIgnore(); 2434 // If the low part is just padding, it takes no register, leave ResType 2435 // null. 2436 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 2437 "Unknown missing lo part"); 2438 break; 2439 2440 case SSEUp: 2441 case X87Up: 2442 llvm_unreachable("Invalid classification for lo word."); 2443 2444 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via 2445 // hidden argument. 2446 case Memory: 2447 return getIndirectReturnResult(RetTy); 2448 2449 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next 2450 // available register of the sequence %rax, %rdx is used. 2451 case Integer: 2452 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 2453 2454 // If we have a sign or zero extended integer, make sure to return Extend 2455 // so that the parameter gets the right LLVM IR attributes. 2456 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 2457 // Treat an enum type as its underlying type. 2458 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 2459 RetTy = EnumTy->getDecl()->getIntegerType(); 2460 2461 if (RetTy->isIntegralOrEnumerationType() && 2462 RetTy->isPromotableIntegerType()) 2463 return ABIArgInfo::getExtend(); 2464 } 2465 break; 2466 2467 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next 2468 // available SSE register of the sequence %xmm0, %xmm1 is used. 2469 case SSE: 2470 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 2471 break; 2472 2473 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is 2474 // returned on the X87 stack in %st0 as 80-bit x87 number. 2475 case X87: 2476 ResType = llvm::Type::getX86_FP80Ty(getVMContext()); 2477 break; 2478 2479 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real 2480 // part of the value is returned in %st0 and the imaginary part in 2481 // %st1. 2482 case ComplexX87: 2483 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification."); 2484 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()), 2485 llvm::Type::getX86_FP80Ty(getVMContext()), 2486 nullptr); 2487 break; 2488 } 2489 2490 llvm::Type *HighPart = nullptr; 2491 switch (Hi) { 2492 // Memory was handled previously and X87 should 2493 // never occur as a hi class. 2494 case Memory: 2495 case X87: 2496 llvm_unreachable("Invalid classification for hi word."); 2497 2498 case ComplexX87: // Previously handled. 2499 case NoClass: 2500 break; 2501 2502 case Integer: 2503 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 2504 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 2505 return ABIArgInfo::getDirect(HighPart, 8); 2506 break; 2507 case SSE: 2508 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 2509 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 2510 return ABIArgInfo::getDirect(HighPart, 8); 2511 break; 2512 2513 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte 2514 // is passed in the next available eightbyte chunk if the last used 2515 // vector register. 2516 // 2517 // SSEUP should always be preceded by SSE, just widen. 2518 case SSEUp: 2519 assert(Lo == SSE && "Unexpected SSEUp classification."); 2520 ResType = GetByteVectorType(RetTy); 2521 break; 2522 2523 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is 2524 // returned together with the previous X87 value in %st0. 2525 case X87Up: 2526 // If X87Up is preceded by X87, we don't need to do 2527 // anything. However, in some cases with unions it may not be 2528 // preceded by X87. In such situations we follow gcc and pass the 2529 // extra bits in an SSE reg. 2530 if (Lo != X87) { 2531 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 2532 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 2533 return ABIArgInfo::getDirect(HighPart, 8); 2534 } 2535 break; 2536 } 2537 2538 // If a high part was specified, merge it together with the low part. It is 2539 // known to pass in the high eightbyte of the result. We do this by forming a 2540 // first class struct aggregate with the high and low part: {low, high} 2541 if (HighPart) 2542 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); 2543 2544 return ABIArgInfo::getDirect(ResType); 2545 } 2546 2547 ABIArgInfo X86_64ABIInfo::classifyArgumentType( 2548 QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE, 2549 bool isNamedArg) 2550 const 2551 { 2552 Ty = useFirstFieldIfTransparentUnion(Ty); 2553 2554 X86_64ABIInfo::Class Lo, Hi; 2555 classify(Ty, 0, Lo, Hi, isNamedArg); 2556 2557 // Check some invariants. 2558 // FIXME: Enforce these by construction. 2559 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 2560 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 2561 2562 neededInt = 0; 2563 neededSSE = 0; 2564 llvm::Type *ResType = nullptr; 2565 switch (Lo) { 2566 case NoClass: 2567 if (Hi == NoClass) 2568 return ABIArgInfo::getIgnore(); 2569 // If the low part is just padding, it takes no register, leave ResType 2570 // null. 2571 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 2572 "Unknown missing lo part"); 2573 break; 2574 2575 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument 2576 // on the stack. 2577 case Memory: 2578 2579 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or 2580 // COMPLEX_X87, it is passed in memory. 2581 case X87: 2582 case ComplexX87: 2583 if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect) 2584 ++neededInt; 2585 return getIndirectResult(Ty, freeIntRegs); 2586 2587 case SSEUp: 2588 case X87Up: 2589 llvm_unreachable("Invalid classification for lo word."); 2590 2591 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next 2592 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8 2593 // and %r9 is used. 2594 case Integer: 2595 ++neededInt; 2596 2597 // Pick an 8-byte type based on the preferred type. 2598 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0); 2599 2600 // If we have a sign or zero extended integer, make sure to return Extend 2601 // so that the parameter gets the right LLVM IR attributes. 2602 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 2603 // Treat an enum type as its underlying type. 2604 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2605 Ty = EnumTy->getDecl()->getIntegerType(); 2606 2607 if (Ty->isIntegralOrEnumerationType() && 2608 Ty->isPromotableIntegerType()) 2609 return ABIArgInfo::getExtend(); 2610 } 2611 2612 break; 2613 2614 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next 2615 // available SSE register is used, the registers are taken in the 2616 // order from %xmm0 to %xmm7. 2617 case SSE: { 2618 llvm::Type *IRType = CGT.ConvertType(Ty); 2619 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0); 2620 ++neededSSE; 2621 break; 2622 } 2623 } 2624 2625 llvm::Type *HighPart = nullptr; 2626 switch (Hi) { 2627 // Memory was handled previously, ComplexX87 and X87 should 2628 // never occur as hi classes, and X87Up must be preceded by X87, 2629 // which is passed in memory. 2630 case Memory: 2631 case X87: 2632 case ComplexX87: 2633 llvm_unreachable("Invalid classification for hi word."); 2634 2635 case NoClass: break; 2636 2637 case Integer: 2638 ++neededInt; 2639 // Pick an 8-byte type based on the preferred type. 2640 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 2641 2642 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 2643 return ABIArgInfo::getDirect(HighPart, 8); 2644 break; 2645 2646 // X87Up generally doesn't occur here (long double is passed in 2647 // memory), except in situations involving unions. 2648 case X87Up: 2649 case SSE: 2650 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 2651 2652 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 2653 return ABIArgInfo::getDirect(HighPart, 8); 2654 2655 ++neededSSE; 2656 break; 2657 2658 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the 2659 // eightbyte is passed in the upper half of the last used SSE 2660 // register. This only happens when 128-bit vectors are passed. 2661 case SSEUp: 2662 assert(Lo == SSE && "Unexpected SSEUp classification"); 2663 ResType = GetByteVectorType(Ty); 2664 break; 2665 } 2666 2667 // If a high part was specified, merge it together with the low part. It is 2668 // known to pass in the high eightbyte of the result. We do this by forming a 2669 // first class struct aggregate with the high and low part: {low, high} 2670 if (HighPart) 2671 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); 2672 2673 return ABIArgInfo::getDirect(ResType); 2674 } 2675 2676 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 2677 2678 if (!getCXXABI().classifyReturnType(FI)) 2679 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2680 2681 // Keep track of the number of assigned registers. 2682 unsigned freeIntRegs = 6, freeSSERegs = 8; 2683 2684 // If the return value is indirect, then the hidden argument is consuming one 2685 // integer register. 2686 if (FI.getReturnInfo().isIndirect()) 2687 --freeIntRegs; 2688 2689 // The chain argument effectively gives us another free register. 2690 if (FI.isChainCall()) 2691 ++freeIntRegs; 2692 2693 unsigned NumRequiredArgs = FI.getNumRequiredArgs(); 2694 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers 2695 // get assigned (in left-to-right order) for passing as follows... 2696 unsigned ArgNo = 0; 2697 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2698 it != ie; ++it, ++ArgNo) { 2699 bool IsNamedArg = ArgNo < NumRequiredArgs; 2700 2701 unsigned neededInt, neededSSE; 2702 it->info = classifyArgumentType(it->type, freeIntRegs, neededInt, 2703 neededSSE, IsNamedArg); 2704 2705 // AMD64-ABI 3.2.3p3: If there are no registers available for any 2706 // eightbyte of an argument, the whole argument is passed on the 2707 // stack. If registers have already been assigned for some 2708 // eightbytes of such an argument, the assignments get reverted. 2709 if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) { 2710 freeIntRegs -= neededInt; 2711 freeSSERegs -= neededSSE; 2712 } else { 2713 it->info = getIndirectResult(it->type, freeIntRegs); 2714 } 2715 } 2716 } 2717 2718 static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr, 2719 QualType Ty, 2720 CodeGenFunction &CGF) { 2721 llvm::Value *overflow_arg_area_p = 2722 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p"); 2723 llvm::Value *overflow_arg_area = 2724 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area"); 2725 2726 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16 2727 // byte boundary if alignment needed by type exceeds 8 byte boundary. 2728 // It isn't stated explicitly in the standard, but in practice we use 2729 // alignment greater than 16 where necessary. 2730 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8; 2731 if (Align > 8) { 2732 // overflow_arg_area = (overflow_arg_area + align - 1) & -align; 2733 llvm::Value *Offset = 2734 llvm::ConstantInt::get(CGF.Int64Ty, Align - 1); 2735 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset); 2736 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area, 2737 CGF.Int64Ty); 2738 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, -(uint64_t)Align); 2739 overflow_arg_area = 2740 CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), 2741 overflow_arg_area->getType(), 2742 "overflow_arg_area.align"); 2743 } 2744 2745 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area. 2746 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 2747 llvm::Value *Res = 2748 CGF.Builder.CreateBitCast(overflow_arg_area, 2749 llvm::PointerType::getUnqual(LTy)); 2750 2751 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to: 2752 // l->overflow_arg_area + sizeof(type). 2753 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to 2754 // an 8 byte boundary. 2755 2756 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8; 2757 llvm::Value *Offset = 2758 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7); 2759 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset, 2760 "overflow_arg_area.next"); 2761 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p); 2762 2763 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type. 2764 return Res; 2765 } 2766 2767 llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2768 CodeGenFunction &CGF) const { 2769 // Assume that va_list type is correct; should be pointer to LLVM type: 2770 // struct { 2771 // i32 gp_offset; 2772 // i32 fp_offset; 2773 // i8* overflow_arg_area; 2774 // i8* reg_save_area; 2775 // }; 2776 unsigned neededInt, neededSSE; 2777 2778 Ty = CGF.getContext().getCanonicalType(Ty); 2779 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE, 2780 /*isNamedArg*/false); 2781 2782 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed 2783 // in the registers. If not go to step 7. 2784 if (!neededInt && !neededSSE) 2785 return EmitVAArgFromMemory(VAListAddr, Ty, CGF); 2786 2787 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of 2788 // general purpose registers needed to pass type and num_fp to hold 2789 // the number of floating point registers needed. 2790 2791 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into 2792 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or 2793 // l->fp_offset > 304 - num_fp * 16 go to step 7. 2794 // 2795 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of 2796 // register save space). 2797 2798 llvm::Value *InRegs = nullptr; 2799 llvm::Value *gp_offset_p = nullptr, *gp_offset = nullptr; 2800 llvm::Value *fp_offset_p = nullptr, *fp_offset = nullptr; 2801 if (neededInt) { 2802 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p"); 2803 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset"); 2804 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8); 2805 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp"); 2806 } 2807 2808 if (neededSSE) { 2809 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p"); 2810 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset"); 2811 llvm::Value *FitsInFP = 2812 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16); 2813 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp"); 2814 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP; 2815 } 2816 2817 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 2818 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); 2819 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 2820 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); 2821 2822 // Emit code to load the value if it was passed in registers. 2823 2824 CGF.EmitBlock(InRegBlock); 2825 2826 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with 2827 // an offset of l->gp_offset and/or l->fp_offset. This may require 2828 // copying to a temporary location in case the parameter is passed 2829 // in different register classes or requires an alignment greater 2830 // than 8 for general purpose registers and 16 for XMM registers. 2831 // 2832 // FIXME: This really results in shameful code when we end up needing to 2833 // collect arguments from different places; often what should result in a 2834 // simple assembling of a structure from scattered addresses has many more 2835 // loads than necessary. Can we clean this up? 2836 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 2837 llvm::Value *RegAddr = 2838 CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3), 2839 "reg_save_area"); 2840 if (neededInt && neededSSE) { 2841 // FIXME: Cleanup. 2842 assert(AI.isDirect() && "Unexpected ABI info for mixed regs"); 2843 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType()); 2844 llvm::Value *Tmp = CGF.CreateMemTemp(Ty); 2845 Tmp = CGF.Builder.CreateBitCast(Tmp, ST->getPointerTo()); 2846 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs"); 2847 llvm::Type *TyLo = ST->getElementType(0); 2848 llvm::Type *TyHi = ST->getElementType(1); 2849 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) && 2850 "Unexpected ABI info for mixed regs"); 2851 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo); 2852 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi); 2853 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 2854 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2855 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr; 2856 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr; 2857 llvm::Value *V = 2858 CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo)); 2859 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 2860 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi)); 2861 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 2862 2863 RegAddr = CGF.Builder.CreateBitCast(Tmp, 2864 llvm::PointerType::getUnqual(LTy)); 2865 } else if (neededInt) { 2866 RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 2867 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 2868 llvm::PointerType::getUnqual(LTy)); 2869 2870 // Copy to a temporary if necessary to ensure the appropriate alignment. 2871 std::pair<CharUnits, CharUnits> SizeAlign = 2872 CGF.getContext().getTypeInfoInChars(Ty); 2873 uint64_t TySize = SizeAlign.first.getQuantity(); 2874 unsigned TyAlign = SizeAlign.second.getQuantity(); 2875 if (TyAlign > 8) { 2876 llvm::Value *Tmp = CGF.CreateMemTemp(Ty); 2877 CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, 8, false); 2878 RegAddr = Tmp; 2879 } 2880 } else if (neededSSE == 1) { 2881 RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2882 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 2883 llvm::PointerType::getUnqual(LTy)); 2884 } else { 2885 assert(neededSSE == 2 && "Invalid number of needed registers!"); 2886 // SSE registers are spaced 16 bytes apart in the register save 2887 // area, we need to collect the two eightbytes together. 2888 llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2889 llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16); 2890 llvm::Type *DoubleTy = CGF.DoubleTy; 2891 llvm::Type *DblPtrTy = 2892 llvm::PointerType::getUnqual(DoubleTy); 2893 llvm::StructType *ST = llvm::StructType::get(DoubleTy, DoubleTy, nullptr); 2894 llvm::Value *V, *Tmp = CGF.CreateMemTemp(Ty); 2895 Tmp = CGF.Builder.CreateBitCast(Tmp, ST->getPointerTo()); 2896 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo, 2897 DblPtrTy)); 2898 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 2899 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi, 2900 DblPtrTy)); 2901 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 2902 RegAddr = CGF.Builder.CreateBitCast(Tmp, 2903 llvm::PointerType::getUnqual(LTy)); 2904 } 2905 2906 // AMD64-ABI 3.5.7p5: Step 5. Set: 2907 // l->gp_offset = l->gp_offset + num_gp * 8 2908 // l->fp_offset = l->fp_offset + num_fp * 16. 2909 if (neededInt) { 2910 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8); 2911 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset), 2912 gp_offset_p); 2913 } 2914 if (neededSSE) { 2915 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16); 2916 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset), 2917 fp_offset_p); 2918 } 2919 CGF.EmitBranch(ContBlock); 2920 2921 // Emit code to load the value if it was passed in memory. 2922 2923 CGF.EmitBlock(InMemBlock); 2924 llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF); 2925 2926 // Return the appropriate result. 2927 2928 CGF.EmitBlock(ContBlock); 2929 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(), 2, 2930 "vaarg.addr"); 2931 ResAddr->addIncoming(RegAddr, InRegBlock); 2932 ResAddr->addIncoming(MemAddr, InMemBlock); 2933 return ResAddr; 2934 } 2935 2936 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs, 2937 bool IsReturnType) const { 2938 2939 if (Ty->isVoidType()) 2940 return ABIArgInfo::getIgnore(); 2941 2942 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2943 Ty = EnumTy->getDecl()->getIntegerType(); 2944 2945 TypeInfo Info = getContext().getTypeInfo(Ty); 2946 uint64_t Width = Info.Width; 2947 unsigned Align = getContext().toCharUnitsFromBits(Info.Align).getQuantity(); 2948 2949 const RecordType *RT = Ty->getAs<RecordType>(); 2950 if (RT) { 2951 if (!IsReturnType) { 2952 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI())) 2953 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); 2954 } 2955 2956 if (RT->getDecl()->hasFlexibleArrayMember()) 2957 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2958 2959 // FIXME: mingw-w64-gcc emits 128-bit struct as i128 2960 if (Width == 128 && getTarget().getTriple().isWindowsGNUEnvironment()) 2961 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2962 Width)); 2963 } 2964 2965 // vectorcall adds the concept of a homogenous vector aggregate, similar to 2966 // other targets. 2967 const Type *Base = nullptr; 2968 uint64_t NumElts = 0; 2969 if (FreeSSERegs && isHomogeneousAggregate(Ty, Base, NumElts)) { 2970 if (FreeSSERegs >= NumElts) { 2971 FreeSSERegs -= NumElts; 2972 if (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType()) 2973 return ABIArgInfo::getDirect(); 2974 return ABIArgInfo::getExpand(); 2975 } 2976 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false); 2977 } 2978 2979 2980 if (Ty->isMemberPointerType()) { 2981 // If the member pointer is represented by an LLVM int or ptr, pass it 2982 // directly. 2983 llvm::Type *LLTy = CGT.ConvertType(Ty); 2984 if (LLTy->isPointerTy() || LLTy->isIntegerTy()) 2985 return ABIArgInfo::getDirect(); 2986 } 2987 2988 if (RT || Ty->isMemberPointerType()) { 2989 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is 2990 // not 1, 2, 4, or 8 bytes, must be passed by reference." 2991 if (Width > 64 || !llvm::isPowerOf2_64(Width)) 2992 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2993 2994 // Otherwise, coerce it to a small integer. 2995 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width)); 2996 } 2997 2998 // Bool type is always extended to the ABI, other builtin types are not 2999 // extended. 3000 const BuiltinType *BT = Ty->getAs<BuiltinType>(); 3001 if (BT && BT->getKind() == BuiltinType::Bool) 3002 return ABIArgInfo::getExtend(); 3003 3004 return ABIArgInfo::getDirect(); 3005 } 3006 3007 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 3008 bool IsVectorCall = 3009 FI.getCallingConvention() == llvm::CallingConv::X86_VectorCall; 3010 3011 // We can use up to 4 SSE return registers with vectorcall. 3012 unsigned FreeSSERegs = IsVectorCall ? 4 : 0; 3013 if (!getCXXABI().classifyReturnType(FI)) 3014 FI.getReturnInfo() = classify(FI.getReturnType(), FreeSSERegs, true); 3015 3016 // We can use up to 6 SSE register parameters with vectorcall. 3017 FreeSSERegs = IsVectorCall ? 6 : 0; 3018 for (auto &I : FI.arguments()) 3019 I.info = classify(I.type, FreeSSERegs, false); 3020 } 3021 3022 llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3023 CodeGenFunction &CGF) const { 3024 llvm::Type *BPP = CGF.Int8PtrPtrTy; 3025 3026 CGBuilderTy &Builder = CGF.Builder; 3027 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 3028 "ap"); 3029 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 3030 llvm::Type *PTy = 3031 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 3032 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 3033 3034 uint64_t Offset = 3035 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8); 3036 llvm::Value *NextAddr = 3037 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 3038 "ap.next"); 3039 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 3040 3041 return AddrTyped; 3042 } 3043 3044 namespace { 3045 3046 class NaClX86_64ABIInfo : public ABIInfo { 3047 public: 3048 NaClX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) 3049 : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, HasAVX) {} 3050 void computeInfo(CGFunctionInfo &FI) const override; 3051 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3052 CodeGenFunction &CGF) const override; 3053 private: 3054 PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv. 3055 X86_64ABIInfo NInfo; // Used for everything else. 3056 }; 3057 3058 class NaClX86_64TargetCodeGenInfo : public TargetCodeGenInfo { 3059 bool HasAVX; 3060 public: 3061 NaClX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) 3062 : TargetCodeGenInfo(new NaClX86_64ABIInfo(CGT, HasAVX)), HasAVX(HasAVX) { 3063 } 3064 unsigned getOpenMPSimdDefaultAlignment(QualType) const override { 3065 return HasAVX ? 32 : 16; 3066 } 3067 }; 3068 3069 } 3070 3071 void NaClX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 3072 if (FI.getASTCallingConvention() == CC_PnaclCall) 3073 PInfo.computeInfo(FI); 3074 else 3075 NInfo.computeInfo(FI); 3076 } 3077 3078 llvm::Value *NaClX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3079 CodeGenFunction &CGF) const { 3080 // Always use the native convention; calling pnacl-style varargs functions 3081 // is unuspported. 3082 return NInfo.EmitVAArg(VAListAddr, Ty, CGF); 3083 } 3084 3085 3086 // PowerPC-32 3087 namespace { 3088 /// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information. 3089 class PPC32_SVR4_ABIInfo : public DefaultABIInfo { 3090 public: 3091 PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 3092 3093 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3094 CodeGenFunction &CGF) const override; 3095 }; 3096 3097 class PPC32TargetCodeGenInfo : public TargetCodeGenInfo { 3098 public: 3099 PPC32TargetCodeGenInfo(CodeGenTypes &CGT) : TargetCodeGenInfo(new PPC32_SVR4_ABIInfo(CGT)) {} 3100 3101 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 3102 // This is recovered from gcc output. 3103 return 1; // r1 is the dedicated stack pointer 3104 } 3105 3106 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3107 llvm::Value *Address) const override; 3108 3109 unsigned getOpenMPSimdDefaultAlignment(QualType) const override { 3110 return 16; // Natural alignment for Altivec vectors. 3111 } 3112 }; 3113 3114 } 3115 3116 llvm::Value *PPC32_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr, 3117 QualType Ty, 3118 CodeGenFunction &CGF) const { 3119 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { 3120 // TODO: Implement this. For now ignore. 3121 (void)CTy; 3122 return nullptr; 3123 } 3124 3125 bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64; 3126 bool isInt = Ty->isIntegerType() || Ty->isPointerType() || Ty->isAggregateType(); 3127 llvm::Type *CharPtr = CGF.Int8PtrTy; 3128 llvm::Type *CharPtrPtr = CGF.Int8PtrPtrTy; 3129 3130 CGBuilderTy &Builder = CGF.Builder; 3131 llvm::Value *GPRPtr = Builder.CreateBitCast(VAListAddr, CharPtr, "gprptr"); 3132 llvm::Value *GPRPtrAsInt = Builder.CreatePtrToInt(GPRPtr, CGF.Int32Ty); 3133 llvm::Value *FPRPtrAsInt = Builder.CreateAdd(GPRPtrAsInt, Builder.getInt32(1)); 3134 llvm::Value *FPRPtr = Builder.CreateIntToPtr(FPRPtrAsInt, CharPtr); 3135 llvm::Value *OverflowAreaPtrAsInt = Builder.CreateAdd(FPRPtrAsInt, Builder.getInt32(3)); 3136 llvm::Value *OverflowAreaPtr = Builder.CreateIntToPtr(OverflowAreaPtrAsInt, CharPtrPtr); 3137 llvm::Value *RegsaveAreaPtrAsInt = Builder.CreateAdd(OverflowAreaPtrAsInt, Builder.getInt32(4)); 3138 llvm::Value *RegsaveAreaPtr = Builder.CreateIntToPtr(RegsaveAreaPtrAsInt, CharPtrPtr); 3139 llvm::Value *GPR = Builder.CreateLoad(GPRPtr, false, "gpr"); 3140 // Align GPR when TY is i64. 3141 if (isI64) { 3142 llvm::Value *GPRAnd = Builder.CreateAnd(GPR, Builder.getInt8(1)); 3143 llvm::Value *CC64 = Builder.CreateICmpEQ(GPRAnd, Builder.getInt8(1)); 3144 llvm::Value *GPRPlusOne = Builder.CreateAdd(GPR, Builder.getInt8(1)); 3145 GPR = Builder.CreateSelect(CC64, GPRPlusOne, GPR); 3146 } 3147 llvm::Value *FPR = Builder.CreateLoad(FPRPtr, false, "fpr"); 3148 llvm::Value *OverflowArea = Builder.CreateLoad(OverflowAreaPtr, false, "overflow_area"); 3149 llvm::Value *OverflowAreaAsInt = Builder.CreatePtrToInt(OverflowArea, CGF.Int32Ty); 3150 llvm::Value *RegsaveArea = Builder.CreateLoad(RegsaveAreaPtr, false, "regsave_area"); 3151 llvm::Value *RegsaveAreaAsInt = Builder.CreatePtrToInt(RegsaveArea, CGF.Int32Ty); 3152 3153 llvm::Value *CC = Builder.CreateICmpULT(isInt ? GPR : FPR, 3154 Builder.getInt8(8), "cond"); 3155 3156 llvm::Value *RegConstant = Builder.CreateMul(isInt ? GPR : FPR, 3157 Builder.getInt8(isInt ? 4 : 8)); 3158 3159 llvm::Value *OurReg = Builder.CreateAdd(RegsaveAreaAsInt, Builder.CreateSExt(RegConstant, CGF.Int32Ty)); 3160 3161 if (Ty->isFloatingType()) 3162 OurReg = Builder.CreateAdd(OurReg, Builder.getInt32(32)); 3163 3164 llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs"); 3165 llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow"); 3166 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont"); 3167 3168 Builder.CreateCondBr(CC, UsingRegs, UsingOverflow); 3169 3170 CGF.EmitBlock(UsingRegs); 3171 3172 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 3173 llvm::Value *Result1 = Builder.CreateIntToPtr(OurReg, PTy); 3174 // Increase the GPR/FPR indexes. 3175 if (isInt) { 3176 GPR = Builder.CreateAdd(GPR, Builder.getInt8(isI64 ? 2 : 1)); 3177 Builder.CreateStore(GPR, GPRPtr); 3178 } else { 3179 FPR = Builder.CreateAdd(FPR, Builder.getInt8(1)); 3180 Builder.CreateStore(FPR, FPRPtr); 3181 } 3182 CGF.EmitBranch(Cont); 3183 3184 CGF.EmitBlock(UsingOverflow); 3185 3186 // Increase the overflow area. 3187 llvm::Value *Result2 = Builder.CreateIntToPtr(OverflowAreaAsInt, PTy); 3188 OverflowAreaAsInt = Builder.CreateAdd(OverflowAreaAsInt, Builder.getInt32(isInt ? 4 : 8)); 3189 Builder.CreateStore(Builder.CreateIntToPtr(OverflowAreaAsInt, CharPtr), OverflowAreaPtr); 3190 CGF.EmitBranch(Cont); 3191 3192 CGF.EmitBlock(Cont); 3193 3194 llvm::PHINode *Result = CGF.Builder.CreatePHI(PTy, 2, "vaarg.addr"); 3195 Result->addIncoming(Result1, UsingRegs); 3196 Result->addIncoming(Result2, UsingOverflow); 3197 3198 if (Ty->isAggregateType()) { 3199 llvm::Value *AGGPtr = Builder.CreateBitCast(Result, CharPtrPtr, "aggrptr") ; 3200 return Builder.CreateLoad(AGGPtr, false, "aggr"); 3201 } 3202 3203 return Result; 3204 } 3205 3206 bool 3207 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3208 llvm::Value *Address) const { 3209 // This is calculated from the LLVM and GCC tables and verified 3210 // against gcc output. AFAIK all ABIs use the same encoding. 3211 3212 CodeGen::CGBuilderTy &Builder = CGF.Builder; 3213 3214 llvm::IntegerType *i8 = CGF.Int8Ty; 3215 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 3216 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 3217 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 3218 3219 // 0-31: r0-31, the 4-byte general-purpose registers 3220 AssignToArrayRange(Builder, Address, Four8, 0, 31); 3221 3222 // 32-63: fp0-31, the 8-byte floating-point registers 3223 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 3224 3225 // 64-76 are various 4-byte special-purpose registers: 3226 // 64: mq 3227 // 65: lr 3228 // 66: ctr 3229 // 67: ap 3230 // 68-75 cr0-7 3231 // 76: xer 3232 AssignToArrayRange(Builder, Address, Four8, 64, 76); 3233 3234 // 77-108: v0-31, the 16-byte vector registers 3235 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 3236 3237 // 109: vrsave 3238 // 110: vscr 3239 // 111: spe_acc 3240 // 112: spefscr 3241 // 113: sfp 3242 AssignToArrayRange(Builder, Address, Four8, 109, 113); 3243 3244 return false; 3245 } 3246 3247 // PowerPC-64 3248 3249 namespace { 3250 /// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information. 3251 class PPC64_SVR4_ABIInfo : public DefaultABIInfo { 3252 public: 3253 enum ABIKind { 3254 ELFv1 = 0, 3255 ELFv2 3256 }; 3257 3258 private: 3259 static const unsigned GPRBits = 64; 3260 ABIKind Kind; 3261 3262 public: 3263 PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind) 3264 : DefaultABIInfo(CGT), Kind(Kind) {} 3265 3266 bool isPromotableTypeForABI(QualType Ty) const; 3267 bool isAlignedParamType(QualType Ty) const; 3268 3269 ABIArgInfo classifyReturnType(QualType RetTy) const; 3270 ABIArgInfo classifyArgumentType(QualType Ty) const; 3271 3272 bool isHomogeneousAggregateBaseType(QualType Ty) const override; 3273 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 3274 uint64_t Members) const override; 3275 3276 // TODO: We can add more logic to computeInfo to improve performance. 3277 // Example: For aggregate arguments that fit in a register, we could 3278 // use getDirectInReg (as is done below for structs containing a single 3279 // floating-point value) to avoid pushing them to memory on function 3280 // entry. This would require changing the logic in PPCISelLowering 3281 // when lowering the parameters in the caller and args in the callee. 3282 void computeInfo(CGFunctionInfo &FI) const override { 3283 if (!getCXXABI().classifyReturnType(FI)) 3284 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 3285 for (auto &I : FI.arguments()) { 3286 // We rely on the default argument classification for the most part. 3287 // One exception: An aggregate containing a single floating-point 3288 // or vector item must be passed in a register if one is available. 3289 const Type *T = isSingleElementStruct(I.type, getContext()); 3290 if (T) { 3291 const BuiltinType *BT = T->getAs<BuiltinType>(); 3292 if ((T->isVectorType() && getContext().getTypeSize(T) == 128) || 3293 (BT && BT->isFloatingPoint())) { 3294 QualType QT(T, 0); 3295 I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT)); 3296 continue; 3297 } 3298 } 3299 I.info = classifyArgumentType(I.type); 3300 } 3301 } 3302 3303 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3304 CodeGenFunction &CGF) const override; 3305 }; 3306 3307 class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo { 3308 public: 3309 PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT, 3310 PPC64_SVR4_ABIInfo::ABIKind Kind) 3311 : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT, Kind)) {} 3312 3313 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 3314 // This is recovered from gcc output. 3315 return 1; // r1 is the dedicated stack pointer 3316 } 3317 3318 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3319 llvm::Value *Address) const override; 3320 3321 unsigned getOpenMPSimdDefaultAlignment(QualType) const override { 3322 return 16; // Natural alignment for Altivec and VSX vectors. 3323 } 3324 }; 3325 3326 class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo { 3327 public: 3328 PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} 3329 3330 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 3331 // This is recovered from gcc output. 3332 return 1; // r1 is the dedicated stack pointer 3333 } 3334 3335 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3336 llvm::Value *Address) const override; 3337 3338 unsigned getOpenMPSimdDefaultAlignment(QualType) const override { 3339 return 16; // Natural alignment for Altivec vectors. 3340 } 3341 }; 3342 3343 } 3344 3345 // Return true if the ABI requires Ty to be passed sign- or zero- 3346 // extended to 64 bits. 3347 bool 3348 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const { 3349 // Treat an enum type as its underlying type. 3350 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3351 Ty = EnumTy->getDecl()->getIntegerType(); 3352 3353 // Promotable integer types are required to be promoted by the ABI. 3354 if (Ty->isPromotableIntegerType()) 3355 return true; 3356 3357 // In addition to the usual promotable integer types, we also need to 3358 // extend all 32-bit types, since the ABI requires promotion to 64 bits. 3359 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 3360 switch (BT->getKind()) { 3361 case BuiltinType::Int: 3362 case BuiltinType::UInt: 3363 return true; 3364 default: 3365 break; 3366 } 3367 3368 return false; 3369 } 3370 3371 /// isAlignedParamType - Determine whether a type requires 16-byte 3372 /// alignment in the parameter area. 3373 bool 3374 PPC64_SVR4_ABIInfo::isAlignedParamType(QualType Ty) const { 3375 // Complex types are passed just like their elements. 3376 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) 3377 Ty = CTy->getElementType(); 3378 3379 // Only vector types of size 16 bytes need alignment (larger types are 3380 // passed via reference, smaller types are not aligned). 3381 if (Ty->isVectorType()) 3382 return getContext().getTypeSize(Ty) == 128; 3383 3384 // For single-element float/vector structs, we consider the whole type 3385 // to have the same alignment requirements as its single element. 3386 const Type *AlignAsType = nullptr; 3387 const Type *EltType = isSingleElementStruct(Ty, getContext()); 3388 if (EltType) { 3389 const BuiltinType *BT = EltType->getAs<BuiltinType>(); 3390 if ((EltType->isVectorType() && 3391 getContext().getTypeSize(EltType) == 128) || 3392 (BT && BT->isFloatingPoint())) 3393 AlignAsType = EltType; 3394 } 3395 3396 // Likewise for ELFv2 homogeneous aggregates. 3397 const Type *Base = nullptr; 3398 uint64_t Members = 0; 3399 if (!AlignAsType && Kind == ELFv2 && 3400 isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members)) 3401 AlignAsType = Base; 3402 3403 // With special case aggregates, only vector base types need alignment. 3404 if (AlignAsType) 3405 return AlignAsType->isVectorType(); 3406 3407 // Otherwise, we only need alignment for any aggregate type that 3408 // has an alignment requirement of >= 16 bytes. 3409 if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) 3410 return true; 3411 3412 return false; 3413 } 3414 3415 /// isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous 3416 /// aggregate. Base is set to the base element type, and Members is set 3417 /// to the number of base elements. 3418 bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base, 3419 uint64_t &Members) const { 3420 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 3421 uint64_t NElements = AT->getSize().getZExtValue(); 3422 if (NElements == 0) 3423 return false; 3424 if (!isHomogeneousAggregate(AT->getElementType(), Base, Members)) 3425 return false; 3426 Members *= NElements; 3427 } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 3428 const RecordDecl *RD = RT->getDecl(); 3429 if (RD->hasFlexibleArrayMember()) 3430 return false; 3431 3432 Members = 0; 3433 3434 // If this is a C++ record, check the bases first. 3435 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 3436 for (const auto &I : CXXRD->bases()) { 3437 // Ignore empty records. 3438 if (isEmptyRecord(getContext(), I.getType(), true)) 3439 continue; 3440 3441 uint64_t FldMembers; 3442 if (!isHomogeneousAggregate(I.getType(), Base, FldMembers)) 3443 return false; 3444 3445 Members += FldMembers; 3446 } 3447 } 3448 3449 for (const auto *FD : RD->fields()) { 3450 // Ignore (non-zero arrays of) empty records. 3451 QualType FT = FD->getType(); 3452 while (const ConstantArrayType *AT = 3453 getContext().getAsConstantArrayType(FT)) { 3454 if (AT->getSize().getZExtValue() == 0) 3455 return false; 3456 FT = AT->getElementType(); 3457 } 3458 if (isEmptyRecord(getContext(), FT, true)) 3459 continue; 3460 3461 // For compatibility with GCC, ignore empty bitfields in C++ mode. 3462 if (getContext().getLangOpts().CPlusPlus && 3463 FD->isBitField() && FD->getBitWidthValue(getContext()) == 0) 3464 continue; 3465 3466 uint64_t FldMembers; 3467 if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers)) 3468 return false; 3469 3470 Members = (RD->isUnion() ? 3471 std::max(Members, FldMembers) : Members + FldMembers); 3472 } 3473 3474 if (!Base) 3475 return false; 3476 3477 // Ensure there is no padding. 3478 if (getContext().getTypeSize(Base) * Members != 3479 getContext().getTypeSize(Ty)) 3480 return false; 3481 } else { 3482 Members = 1; 3483 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 3484 Members = 2; 3485 Ty = CT->getElementType(); 3486 } 3487 3488 // Most ABIs only support float, double, and some vector type widths. 3489 if (!isHomogeneousAggregateBaseType(Ty)) 3490 return false; 3491 3492 // The base type must be the same for all members. Types that 3493 // agree in both total size and mode (float vs. vector) are 3494 // treated as being equivalent here. 3495 const Type *TyPtr = Ty.getTypePtr(); 3496 if (!Base) 3497 Base = TyPtr; 3498 3499 if (Base->isVectorType() != TyPtr->isVectorType() || 3500 getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr)) 3501 return false; 3502 } 3503 return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members); 3504 } 3505 3506 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 3507 // Homogeneous aggregates for ELFv2 must have base types of float, 3508 // double, long double, or 128-bit vectors. 3509 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 3510 if (BT->getKind() == BuiltinType::Float || 3511 BT->getKind() == BuiltinType::Double || 3512 BT->getKind() == BuiltinType::LongDouble) 3513 return true; 3514 } 3515 if (const VectorType *VT = Ty->getAs<VectorType>()) { 3516 if (getContext().getTypeSize(VT) == 128) 3517 return true; 3518 } 3519 return false; 3520 } 3521 3522 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough( 3523 const Type *Base, uint64_t Members) const { 3524 // Vector types require one register, floating point types require one 3525 // or two registers depending on their size. 3526 uint32_t NumRegs = 3527 Base->isVectorType() ? 1 : (getContext().getTypeSize(Base) + 63) / 64; 3528 3529 // Homogeneous Aggregates may occupy at most 8 registers. 3530 return Members * NumRegs <= 8; 3531 } 3532 3533 ABIArgInfo 3534 PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const { 3535 Ty = useFirstFieldIfTransparentUnion(Ty); 3536 3537 if (Ty->isAnyComplexType()) 3538 return ABIArgInfo::getDirect(); 3539 3540 // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes) 3541 // or via reference (larger than 16 bytes). 3542 if (Ty->isVectorType()) { 3543 uint64_t Size = getContext().getTypeSize(Ty); 3544 if (Size > 128) 3545 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3546 else if (Size < 128) { 3547 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size); 3548 return ABIArgInfo::getDirect(CoerceTy); 3549 } 3550 } 3551 3552 if (isAggregateTypeForABI(Ty)) { 3553 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 3554 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); 3555 3556 uint64_t ABIAlign = isAlignedParamType(Ty)? 16 : 8; 3557 uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8; 3558 3559 // ELFv2 homogeneous aggregates are passed as array types. 3560 const Type *Base = nullptr; 3561 uint64_t Members = 0; 3562 if (Kind == ELFv2 && 3563 isHomogeneousAggregate(Ty, Base, Members)) { 3564 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0)); 3565 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members); 3566 return ABIArgInfo::getDirect(CoerceTy); 3567 } 3568 3569 // If an aggregate may end up fully in registers, we do not 3570 // use the ByVal method, but pass the aggregate as array. 3571 // This is usually beneficial since we avoid forcing the 3572 // back-end to store the argument to memory. 3573 uint64_t Bits = getContext().getTypeSize(Ty); 3574 if (Bits > 0 && Bits <= 8 * GPRBits) { 3575 llvm::Type *CoerceTy; 3576 3577 // Types up to 8 bytes are passed as integer type (which will be 3578 // properly aligned in the argument save area doubleword). 3579 if (Bits <= GPRBits) 3580 CoerceTy = llvm::IntegerType::get(getVMContext(), 3581 llvm::RoundUpToAlignment(Bits, 8)); 3582 // Larger types are passed as arrays, with the base type selected 3583 // according to the required alignment in the save area. 3584 else { 3585 uint64_t RegBits = ABIAlign * 8; 3586 uint64_t NumRegs = llvm::RoundUpToAlignment(Bits, RegBits) / RegBits; 3587 llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits); 3588 CoerceTy = llvm::ArrayType::get(RegTy, NumRegs); 3589 } 3590 3591 return ABIArgInfo::getDirect(CoerceTy); 3592 } 3593 3594 // All other aggregates are passed ByVal. 3595 return ABIArgInfo::getIndirect(ABIAlign, /*ByVal=*/true, 3596 /*Realign=*/TyAlign > ABIAlign); 3597 } 3598 3599 return (isPromotableTypeForABI(Ty) ? 3600 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3601 } 3602 3603 ABIArgInfo 3604 PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const { 3605 if (RetTy->isVoidType()) 3606 return ABIArgInfo::getIgnore(); 3607 3608 if (RetTy->isAnyComplexType()) 3609 return ABIArgInfo::getDirect(); 3610 3611 // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes) 3612 // or via reference (larger than 16 bytes). 3613 if (RetTy->isVectorType()) { 3614 uint64_t Size = getContext().getTypeSize(RetTy); 3615 if (Size > 128) 3616 return ABIArgInfo::getIndirect(0); 3617 else if (Size < 128) { 3618 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size); 3619 return ABIArgInfo::getDirect(CoerceTy); 3620 } 3621 } 3622 3623 if (isAggregateTypeForABI(RetTy)) { 3624 // ELFv2 homogeneous aggregates are returned as array types. 3625 const Type *Base = nullptr; 3626 uint64_t Members = 0; 3627 if (Kind == ELFv2 && 3628 isHomogeneousAggregate(RetTy, Base, Members)) { 3629 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0)); 3630 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members); 3631 return ABIArgInfo::getDirect(CoerceTy); 3632 } 3633 3634 // ELFv2 small aggregates are returned in up to two registers. 3635 uint64_t Bits = getContext().getTypeSize(RetTy); 3636 if (Kind == ELFv2 && Bits <= 2 * GPRBits) { 3637 if (Bits == 0) 3638 return ABIArgInfo::getIgnore(); 3639 3640 llvm::Type *CoerceTy; 3641 if (Bits > GPRBits) { 3642 CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits); 3643 CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy, nullptr); 3644 } else 3645 CoerceTy = llvm::IntegerType::get(getVMContext(), 3646 llvm::RoundUpToAlignment(Bits, 8)); 3647 return ABIArgInfo::getDirect(CoerceTy); 3648 } 3649 3650 // All other aggregates are returned indirectly. 3651 return ABIArgInfo::getIndirect(0); 3652 } 3653 3654 return (isPromotableTypeForABI(RetTy) ? 3655 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3656 } 3657 3658 // Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine. 3659 llvm::Value *PPC64_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr, 3660 QualType Ty, 3661 CodeGenFunction &CGF) const { 3662 llvm::Type *BP = CGF.Int8PtrTy; 3663 llvm::Type *BPP = CGF.Int8PtrPtrTy; 3664 3665 CGBuilderTy &Builder = CGF.Builder; 3666 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 3667 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 3668 3669 // Handle types that require 16-byte alignment in the parameter save area. 3670 if (isAlignedParamType(Ty)) { 3671 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty); 3672 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(15)); 3673 AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt64(-16)); 3674 Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align"); 3675 } 3676 3677 // Update the va_list pointer. The pointer should be bumped by the 3678 // size of the object. We can trust getTypeSize() except for a complex 3679 // type whose base type is smaller than a doubleword. For these, the 3680 // size of the object is 16 bytes; see below for further explanation. 3681 unsigned SizeInBytes = CGF.getContext().getTypeSize(Ty) / 8; 3682 QualType BaseTy; 3683 unsigned CplxBaseSize = 0; 3684 3685 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { 3686 BaseTy = CTy->getElementType(); 3687 CplxBaseSize = CGF.getContext().getTypeSize(BaseTy) / 8; 3688 if (CplxBaseSize < 8) 3689 SizeInBytes = 16; 3690 } 3691 3692 unsigned Offset = llvm::RoundUpToAlignment(SizeInBytes, 8); 3693 llvm::Value *NextAddr = 3694 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), 3695 "ap.next"); 3696 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 3697 3698 // If we have a complex type and the base type is smaller than 8 bytes, 3699 // the ABI calls for the real and imaginary parts to be right-adjusted 3700 // in separate doublewords. However, Clang expects us to produce a 3701 // pointer to a structure with the two parts packed tightly. So generate 3702 // loads of the real and imaginary parts relative to the va_list pointer, 3703 // and store them to a temporary structure. 3704 if (CplxBaseSize && CplxBaseSize < 8) { 3705 llvm::Value *RealAddr = Builder.CreatePtrToInt(Addr, CGF.Int64Ty); 3706 llvm::Value *ImagAddr = RealAddr; 3707 if (CGF.CGM.getDataLayout().isBigEndian()) { 3708 RealAddr = Builder.CreateAdd(RealAddr, Builder.getInt64(8 - CplxBaseSize)); 3709 ImagAddr = Builder.CreateAdd(ImagAddr, Builder.getInt64(16 - CplxBaseSize)); 3710 } else { 3711 ImagAddr = Builder.CreateAdd(ImagAddr, Builder.getInt64(8)); 3712 } 3713 llvm::Type *PBaseTy = llvm::PointerType::getUnqual(CGF.ConvertType(BaseTy)); 3714 RealAddr = Builder.CreateIntToPtr(RealAddr, PBaseTy); 3715 ImagAddr = Builder.CreateIntToPtr(ImagAddr, PBaseTy); 3716 llvm::Value *Real = Builder.CreateLoad(RealAddr, false, ".vareal"); 3717 llvm::Value *Imag = Builder.CreateLoad(ImagAddr, false, ".vaimag"); 3718 llvm::Value *Ptr = CGF.CreateTempAlloca(CGT.ConvertTypeForMem(Ty), 3719 "vacplx"); 3720 llvm::Value *RealPtr = Builder.CreateStructGEP(Ptr, 0, ".real"); 3721 llvm::Value *ImagPtr = Builder.CreateStructGEP(Ptr, 1, ".imag"); 3722 Builder.CreateStore(Real, RealPtr, false); 3723 Builder.CreateStore(Imag, ImagPtr, false); 3724 return Ptr; 3725 } 3726 3727 // If the argument is smaller than 8 bytes, it is right-adjusted in 3728 // its doubleword slot. Adjust the pointer to pick it up from the 3729 // correct offset. 3730 if (SizeInBytes < 8 && CGF.CGM.getDataLayout().isBigEndian()) { 3731 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty); 3732 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(8 - SizeInBytes)); 3733 Addr = Builder.CreateIntToPtr(AddrAsInt, BP); 3734 } 3735 3736 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 3737 return Builder.CreateBitCast(Addr, PTy); 3738 } 3739 3740 static bool 3741 PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3742 llvm::Value *Address) { 3743 // This is calculated from the LLVM and GCC tables and verified 3744 // against gcc output. AFAIK all ABIs use the same encoding. 3745 3746 CodeGen::CGBuilderTy &Builder = CGF.Builder; 3747 3748 llvm::IntegerType *i8 = CGF.Int8Ty; 3749 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 3750 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 3751 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 3752 3753 // 0-31: r0-31, the 8-byte general-purpose registers 3754 AssignToArrayRange(Builder, Address, Eight8, 0, 31); 3755 3756 // 32-63: fp0-31, the 8-byte floating-point registers 3757 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 3758 3759 // 64-76 are various 4-byte special-purpose registers: 3760 // 64: mq 3761 // 65: lr 3762 // 66: ctr 3763 // 67: ap 3764 // 68-75 cr0-7 3765 // 76: xer 3766 AssignToArrayRange(Builder, Address, Four8, 64, 76); 3767 3768 // 77-108: v0-31, the 16-byte vector registers 3769 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 3770 3771 // 109: vrsave 3772 // 110: vscr 3773 // 111: spe_acc 3774 // 112: spefscr 3775 // 113: sfp 3776 AssignToArrayRange(Builder, Address, Four8, 109, 113); 3777 3778 return false; 3779 } 3780 3781 bool 3782 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable( 3783 CodeGen::CodeGenFunction &CGF, 3784 llvm::Value *Address) const { 3785 3786 return PPC64_initDwarfEHRegSizeTable(CGF, Address); 3787 } 3788 3789 bool 3790 PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3791 llvm::Value *Address) const { 3792 3793 return PPC64_initDwarfEHRegSizeTable(CGF, Address); 3794 } 3795 3796 //===----------------------------------------------------------------------===// 3797 // AArch64 ABI Implementation 3798 //===----------------------------------------------------------------------===// 3799 3800 namespace { 3801 3802 class AArch64ABIInfo : public ABIInfo { 3803 public: 3804 enum ABIKind { 3805 AAPCS = 0, 3806 DarwinPCS 3807 }; 3808 3809 private: 3810 ABIKind Kind; 3811 3812 public: 3813 AArch64ABIInfo(CodeGenTypes &CGT, ABIKind Kind) : ABIInfo(CGT), Kind(Kind) {} 3814 3815 private: 3816 ABIKind getABIKind() const { return Kind; } 3817 bool isDarwinPCS() const { return Kind == DarwinPCS; } 3818 3819 ABIArgInfo classifyReturnType(QualType RetTy) const; 3820 ABIArgInfo classifyArgumentType(QualType RetTy) const; 3821 bool isHomogeneousAggregateBaseType(QualType Ty) const override; 3822 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 3823 uint64_t Members) const override; 3824 3825 bool isIllegalVectorType(QualType Ty) const; 3826 3827 void computeInfo(CGFunctionInfo &FI) const override { 3828 if (!getCXXABI().classifyReturnType(FI)) 3829 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 3830 3831 for (auto &it : FI.arguments()) 3832 it.info = classifyArgumentType(it.type); 3833 } 3834 3835 llvm::Value *EmitDarwinVAArg(llvm::Value *VAListAddr, QualType Ty, 3836 CodeGenFunction &CGF) const; 3837 3838 llvm::Value *EmitAAPCSVAArg(llvm::Value *VAListAddr, QualType Ty, 3839 CodeGenFunction &CGF) const; 3840 3841 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 3842 CodeGenFunction &CGF) const override { 3843 return isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF) 3844 : EmitAAPCSVAArg(VAListAddr, Ty, CGF); 3845 } 3846 }; 3847 3848 class AArch64TargetCodeGenInfo : public TargetCodeGenInfo { 3849 public: 3850 AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind) 3851 : TargetCodeGenInfo(new AArch64ABIInfo(CGT, Kind)) {} 3852 3853 StringRef getARCRetainAutoreleasedReturnValueMarker() const { 3854 return "mov\tfp, fp\t\t; marker for objc_retainAutoreleaseReturnValue"; 3855 } 3856 3857 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { return 31; } 3858 3859 virtual bool doesReturnSlotInterfereWithArgs() const { return false; } 3860 }; 3861 } 3862 3863 ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const { 3864 Ty = useFirstFieldIfTransparentUnion(Ty); 3865 3866 // Handle illegal vector types here. 3867 if (isIllegalVectorType(Ty)) { 3868 uint64_t Size = getContext().getTypeSize(Ty); 3869 if (Size <= 32) { 3870 llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext()); 3871 return ABIArgInfo::getDirect(ResType); 3872 } 3873 if (Size == 64) { 3874 llvm::Type *ResType = 3875 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2); 3876 return ABIArgInfo::getDirect(ResType); 3877 } 3878 if (Size == 128) { 3879 llvm::Type *ResType = 3880 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4); 3881 return ABIArgInfo::getDirect(ResType); 3882 } 3883 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3884 } 3885 3886 if (!isAggregateTypeForABI(Ty)) { 3887 // Treat an enum type as its underlying type. 3888 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3889 Ty = EnumTy->getDecl()->getIntegerType(); 3890 3891 return (Ty->isPromotableIntegerType() && isDarwinPCS() 3892 ? ABIArgInfo::getExtend() 3893 : ABIArgInfo::getDirect()); 3894 } 3895 3896 // Structures with either a non-trivial destructor or a non-trivial 3897 // copy constructor are always indirect. 3898 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { 3899 return ABIArgInfo::getIndirect(0, /*ByVal=*/RAA == 3900 CGCXXABI::RAA_DirectInMemory); 3901 } 3902 3903 // Empty records are always ignored on Darwin, but actually passed in C++ mode 3904 // elsewhere for GNU compatibility. 3905 if (isEmptyRecord(getContext(), Ty, true)) { 3906 if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS()) 3907 return ABIArgInfo::getIgnore(); 3908 3909 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 3910 } 3911 3912 // Homogeneous Floating-point Aggregates (HFAs) need to be expanded. 3913 const Type *Base = nullptr; 3914 uint64_t Members = 0; 3915 if (isHomogeneousAggregate(Ty, Base, Members)) { 3916 return ABIArgInfo::getDirect( 3917 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members)); 3918 } 3919 3920 // Aggregates <= 16 bytes are passed directly in registers or on the stack. 3921 uint64_t Size = getContext().getTypeSize(Ty); 3922 if (Size <= 128) { 3923 unsigned Alignment = getContext().getTypeAlign(Ty); 3924 Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes 3925 3926 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment. 3927 // For aggregates with 16-byte alignment, we use i128. 3928 if (Alignment < 128 && Size == 128) { 3929 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext()); 3930 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64)); 3931 } 3932 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size)); 3933 } 3934 3935 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 3936 } 3937 3938 ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy) const { 3939 if (RetTy->isVoidType()) 3940 return ABIArgInfo::getIgnore(); 3941 3942 // Large vector types should be returned via memory. 3943 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) 3944 return ABIArgInfo::getIndirect(0); 3945 3946 if (!isAggregateTypeForABI(RetTy)) { 3947 // Treat an enum type as its underlying type. 3948 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 3949 RetTy = EnumTy->getDecl()->getIntegerType(); 3950 3951 return (RetTy->isPromotableIntegerType() && isDarwinPCS() 3952 ? ABIArgInfo::getExtend() 3953 : ABIArgInfo::getDirect()); 3954 } 3955 3956 if (isEmptyRecord(getContext(), RetTy, true)) 3957 return ABIArgInfo::getIgnore(); 3958 3959 const Type *Base = nullptr; 3960 uint64_t Members = 0; 3961 if (isHomogeneousAggregate(RetTy, Base, Members)) 3962 // Homogeneous Floating-point Aggregates (HFAs) are returned directly. 3963 return ABIArgInfo::getDirect(); 3964 3965 // Aggregates <= 16 bytes are returned directly in registers or on the stack. 3966 uint64_t Size = getContext().getTypeSize(RetTy); 3967 if (Size <= 128) { 3968 Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes 3969 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size)); 3970 } 3971 3972 return ABIArgInfo::getIndirect(0); 3973 } 3974 3975 /// isIllegalVectorType - check whether the vector type is legal for AArch64. 3976 bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const { 3977 if (const VectorType *VT = Ty->getAs<VectorType>()) { 3978 // Check whether VT is legal. 3979 unsigned NumElements = VT->getNumElements(); 3980 uint64_t Size = getContext().getTypeSize(VT); 3981 // NumElements should be power of 2 between 1 and 16. 3982 if ((NumElements & (NumElements - 1)) != 0 || NumElements > 16) 3983 return true; 3984 return Size != 64 && (Size != 128 || NumElements == 1); 3985 } 3986 return false; 3987 } 3988 3989 bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 3990 // Homogeneous aggregates for AAPCS64 must have base types of a floating 3991 // point type or a short-vector type. This is the same as the 32-bit ABI, 3992 // but with the difference that any floating-point type is allowed, 3993 // including __fp16. 3994 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 3995 if (BT->isFloatingPoint()) 3996 return true; 3997 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 3998 unsigned VecSize = getContext().getTypeSize(VT); 3999 if (VecSize == 64 || VecSize == 128) 4000 return true; 4001 } 4002 return false; 4003 } 4004 4005 bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, 4006 uint64_t Members) const { 4007 return Members <= 4; 4008 } 4009 4010 llvm::Value *AArch64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr, 4011 QualType Ty, 4012 CodeGenFunction &CGF) const { 4013 ABIArgInfo AI = classifyArgumentType(Ty); 4014 bool IsIndirect = AI.isIndirect(); 4015 4016 llvm::Type *BaseTy = CGF.ConvertType(Ty); 4017 if (IsIndirect) 4018 BaseTy = llvm::PointerType::getUnqual(BaseTy); 4019 else if (AI.getCoerceToType()) 4020 BaseTy = AI.getCoerceToType(); 4021 4022 unsigned NumRegs = 1; 4023 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) { 4024 BaseTy = ArrTy->getElementType(); 4025 NumRegs = ArrTy->getNumElements(); 4026 } 4027 bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy(); 4028 4029 // The AArch64 va_list type and handling is specified in the Procedure Call 4030 // Standard, section B.4: 4031 // 4032 // struct { 4033 // void *__stack; 4034 // void *__gr_top; 4035 // void *__vr_top; 4036 // int __gr_offs; 4037 // int __vr_offs; 4038 // }; 4039 4040 llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg"); 4041 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 4042 llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack"); 4043 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 4044 auto &Ctx = CGF.getContext(); 4045 4046 llvm::Value *reg_offs_p = nullptr, *reg_offs = nullptr; 4047 int reg_top_index; 4048 int RegSize = IsIndirect ? 8 : getContext().getTypeSize(Ty) / 8; 4049 if (!IsFPR) { 4050 // 3 is the field number of __gr_offs 4051 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p"); 4052 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs"); 4053 reg_top_index = 1; // field number for __gr_top 4054 RegSize = llvm::RoundUpToAlignment(RegSize, 8); 4055 } else { 4056 // 4 is the field number of __vr_offs. 4057 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p"); 4058 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs"); 4059 reg_top_index = 2; // field number for __vr_top 4060 RegSize = 16 * NumRegs; 4061 } 4062 4063 //======================================= 4064 // Find out where argument was passed 4065 //======================================= 4066 4067 // If reg_offs >= 0 we're already using the stack for this type of 4068 // argument. We don't want to keep updating reg_offs (in case it overflows, 4069 // though anyone passing 2GB of arguments, each at most 16 bytes, deserves 4070 // whatever they get). 4071 llvm::Value *UsingStack = nullptr; 4072 UsingStack = CGF.Builder.CreateICmpSGE( 4073 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0)); 4074 4075 CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock); 4076 4077 // Otherwise, at least some kind of argument could go in these registers, the 4078 // question is whether this particular type is too big. 4079 CGF.EmitBlock(MaybeRegBlock); 4080 4081 // Integer arguments may need to correct register alignment (for example a 4082 // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we 4083 // align __gr_offs to calculate the potential address. 4084 if (!IsFPR && !IsIndirect && Ctx.getTypeAlign(Ty) > 64) { 4085 int Align = Ctx.getTypeAlign(Ty) / 8; 4086 4087 reg_offs = CGF.Builder.CreateAdd( 4088 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1), 4089 "align_regoffs"); 4090 reg_offs = CGF.Builder.CreateAnd( 4091 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align), 4092 "aligned_regoffs"); 4093 } 4094 4095 // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list. 4096 llvm::Value *NewOffset = nullptr; 4097 NewOffset = CGF.Builder.CreateAdd( 4098 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs"); 4099 CGF.Builder.CreateStore(NewOffset, reg_offs_p); 4100 4101 // Now we're in a position to decide whether this argument really was in 4102 // registers or not. 4103 llvm::Value *InRegs = nullptr; 4104 InRegs = CGF.Builder.CreateICmpSLE( 4105 NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg"); 4106 4107 CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock); 4108 4109 //======================================= 4110 // Argument was in registers 4111 //======================================= 4112 4113 // Now we emit the code for if the argument was originally passed in 4114 // registers. First start the appropriate block: 4115 CGF.EmitBlock(InRegBlock); 4116 4117 llvm::Value *reg_top_p = nullptr, *reg_top = nullptr; 4118 reg_top_p = 4119 CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p"); 4120 reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top"); 4121 llvm::Value *BaseAddr = CGF.Builder.CreateGEP(reg_top, reg_offs); 4122 llvm::Value *RegAddr = nullptr; 4123 llvm::Type *MemTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty)); 4124 4125 if (IsIndirect) { 4126 // If it's been passed indirectly (actually a struct), whatever we find from 4127 // stored registers or on the stack will actually be a struct **. 4128 MemTy = llvm::PointerType::getUnqual(MemTy); 4129 } 4130 4131 const Type *Base = nullptr; 4132 uint64_t NumMembers = 0; 4133 bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers); 4134 if (IsHFA && NumMembers > 1) { 4135 // Homogeneous aggregates passed in registers will have their elements split 4136 // and stored 16-bytes apart regardless of size (they're notionally in qN, 4137 // qN+1, ...). We reload and store into a temporary local variable 4138 // contiguously. 4139 assert(!IsIndirect && "Homogeneous aggregates should be passed directly"); 4140 llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0)); 4141 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers); 4142 llvm::Value *Tmp = CGF.CreateTempAlloca(HFATy); 4143 int Offset = 0; 4144 4145 if (CGF.CGM.getDataLayout().isBigEndian() && Ctx.getTypeSize(Base) < 128) 4146 Offset = 16 - Ctx.getTypeSize(Base) / 8; 4147 for (unsigned i = 0; i < NumMembers; ++i) { 4148 llvm::Value *BaseOffset = 4149 llvm::ConstantInt::get(CGF.Int32Ty, 16 * i + Offset); 4150 llvm::Value *LoadAddr = CGF.Builder.CreateGEP(BaseAddr, BaseOffset); 4151 LoadAddr = CGF.Builder.CreateBitCast( 4152 LoadAddr, llvm::PointerType::getUnqual(BaseTy)); 4153 llvm::Value *StoreAddr = CGF.Builder.CreateStructGEP(Tmp, i); 4154 4155 llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr); 4156 CGF.Builder.CreateStore(Elem, StoreAddr); 4157 } 4158 4159 RegAddr = CGF.Builder.CreateBitCast(Tmp, MemTy); 4160 } else { 4161 // Otherwise the object is contiguous in memory 4162 unsigned BeAlign = reg_top_index == 2 ? 16 : 8; 4163 if (CGF.CGM.getDataLayout().isBigEndian() && 4164 (IsHFA || !isAggregateTypeForABI(Ty)) && 4165 Ctx.getTypeSize(Ty) < (BeAlign * 8)) { 4166 int Offset = BeAlign - Ctx.getTypeSize(Ty) / 8; 4167 BaseAddr = CGF.Builder.CreatePtrToInt(BaseAddr, CGF.Int64Ty); 4168 4169 BaseAddr = CGF.Builder.CreateAdd( 4170 BaseAddr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), "align_be"); 4171 4172 BaseAddr = CGF.Builder.CreateIntToPtr(BaseAddr, CGF.Int8PtrTy); 4173 } 4174 4175 RegAddr = CGF.Builder.CreateBitCast(BaseAddr, MemTy); 4176 } 4177 4178 CGF.EmitBranch(ContBlock); 4179 4180 //======================================= 4181 // Argument was on the stack 4182 //======================================= 4183 CGF.EmitBlock(OnStackBlock); 4184 4185 llvm::Value *stack_p = nullptr, *OnStackAddr = nullptr; 4186 stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p"); 4187 OnStackAddr = CGF.Builder.CreateLoad(stack_p, "stack"); 4188 4189 // Again, stack arguments may need realigmnent. In this case both integer and 4190 // floating-point ones might be affected. 4191 if (!IsIndirect && Ctx.getTypeAlign(Ty) > 64) { 4192 int Align = Ctx.getTypeAlign(Ty) / 8; 4193 4194 OnStackAddr = CGF.Builder.CreatePtrToInt(OnStackAddr, CGF.Int64Ty); 4195 4196 OnStackAddr = CGF.Builder.CreateAdd( 4197 OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1), 4198 "align_stack"); 4199 OnStackAddr = CGF.Builder.CreateAnd( 4200 OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, -Align), 4201 "align_stack"); 4202 4203 OnStackAddr = CGF.Builder.CreateIntToPtr(OnStackAddr, CGF.Int8PtrTy); 4204 } 4205 4206 uint64_t StackSize; 4207 if (IsIndirect) 4208 StackSize = 8; 4209 else 4210 StackSize = Ctx.getTypeSize(Ty) / 8; 4211 4212 // All stack slots are 8 bytes 4213 StackSize = llvm::RoundUpToAlignment(StackSize, 8); 4214 4215 llvm::Value *StackSizeC = llvm::ConstantInt::get(CGF.Int32Ty, StackSize); 4216 llvm::Value *NewStack = 4217 CGF.Builder.CreateGEP(OnStackAddr, StackSizeC, "new_stack"); 4218 4219 // Write the new value of __stack for the next call to va_arg 4220 CGF.Builder.CreateStore(NewStack, stack_p); 4221 4222 if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) && 4223 Ctx.getTypeSize(Ty) < 64) { 4224 int Offset = 8 - Ctx.getTypeSize(Ty) / 8; 4225 OnStackAddr = CGF.Builder.CreatePtrToInt(OnStackAddr, CGF.Int64Ty); 4226 4227 OnStackAddr = CGF.Builder.CreateAdd( 4228 OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), "align_be"); 4229 4230 OnStackAddr = CGF.Builder.CreateIntToPtr(OnStackAddr, CGF.Int8PtrTy); 4231 } 4232 4233 OnStackAddr = CGF.Builder.CreateBitCast(OnStackAddr, MemTy); 4234 4235 CGF.EmitBranch(ContBlock); 4236 4237 //======================================= 4238 // Tidy up 4239 //======================================= 4240 CGF.EmitBlock(ContBlock); 4241 4242 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(MemTy, 2, "vaarg.addr"); 4243 ResAddr->addIncoming(RegAddr, InRegBlock); 4244 ResAddr->addIncoming(OnStackAddr, OnStackBlock); 4245 4246 if (IsIndirect) 4247 return CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"); 4248 4249 return ResAddr; 4250 } 4251 4252 llvm::Value *AArch64ABIInfo::EmitDarwinVAArg(llvm::Value *VAListAddr, QualType Ty, 4253 CodeGenFunction &CGF) const { 4254 // We do not support va_arg for aggregates or illegal vector types. 4255 // Lower VAArg here for these cases and use the LLVM va_arg instruction for 4256 // other cases. 4257 if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty)) 4258 return nullptr; 4259 4260 uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8; 4261 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8; 4262 4263 const Type *Base = nullptr; 4264 uint64_t Members = 0; 4265 bool isHA = isHomogeneousAggregate(Ty, Base, Members); 4266 4267 bool isIndirect = false; 4268 // Arguments bigger than 16 bytes which aren't homogeneous aggregates should 4269 // be passed indirectly. 4270 if (Size > 16 && !isHA) { 4271 isIndirect = true; 4272 Size = 8; 4273 Align = 8; 4274 } 4275 4276 llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); 4277 llvm::Type *BPP = llvm::PointerType::getUnqual(BP); 4278 4279 CGBuilderTy &Builder = CGF.Builder; 4280 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 4281 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 4282 4283 if (isEmptyRecord(getContext(), Ty, true)) { 4284 // These are ignored for parameter passing purposes. 4285 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 4286 return Builder.CreateBitCast(Addr, PTy); 4287 } 4288 4289 const uint64_t MinABIAlign = 8; 4290 if (Align > MinABIAlign) { 4291 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, Align - 1); 4292 Addr = Builder.CreateGEP(Addr, Offset); 4293 llvm::Value *AsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty); 4294 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, ~(Align - 1)); 4295 llvm::Value *Aligned = Builder.CreateAnd(AsInt, Mask); 4296 Addr = Builder.CreateIntToPtr(Aligned, BP, "ap.align"); 4297 } 4298 4299 uint64_t Offset = llvm::RoundUpToAlignment(Size, MinABIAlign); 4300 llvm::Value *NextAddr = Builder.CreateGEP( 4301 Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), "ap.next"); 4302 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 4303 4304 if (isIndirect) 4305 Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP)); 4306 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 4307 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 4308 4309 return AddrTyped; 4310 } 4311 4312 //===----------------------------------------------------------------------===// 4313 // ARM ABI Implementation 4314 //===----------------------------------------------------------------------===// 4315 4316 namespace { 4317 4318 class ARMABIInfo : public ABIInfo { 4319 public: 4320 enum ABIKind { 4321 APCS = 0, 4322 AAPCS = 1, 4323 AAPCS_VFP 4324 }; 4325 4326 private: 4327 ABIKind Kind; 4328 mutable int VFPRegs[16]; 4329 const unsigned NumVFPs; 4330 const unsigned NumGPRs; 4331 mutable unsigned AllocatedGPRs; 4332 mutable unsigned AllocatedVFPs; 4333 4334 public: 4335 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind), 4336 NumVFPs(16), NumGPRs(4) { 4337 setCCs(); 4338 resetAllocatedRegs(); 4339 } 4340 4341 bool isEABI() const { 4342 switch (getTarget().getTriple().getEnvironment()) { 4343 case llvm::Triple::Android: 4344 case llvm::Triple::EABI: 4345 case llvm::Triple::EABIHF: 4346 case llvm::Triple::GNUEABI: 4347 case llvm::Triple::GNUEABIHF: 4348 return true; 4349 default: 4350 return false; 4351 } 4352 } 4353 4354 bool isEABIHF() const { 4355 switch (getTarget().getTriple().getEnvironment()) { 4356 case llvm::Triple::EABIHF: 4357 case llvm::Triple::GNUEABIHF: 4358 return true; 4359 default: 4360 return false; 4361 } 4362 } 4363 4364 ABIKind getABIKind() const { return Kind; } 4365 4366 private: 4367 ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic) const; 4368 ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic, 4369 bool &IsCPRC) const; 4370 bool isIllegalVectorType(QualType Ty) const; 4371 4372 bool isHomogeneousAggregateBaseType(QualType Ty) const override; 4373 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 4374 uint64_t Members) const override; 4375 4376 void computeInfo(CGFunctionInfo &FI) const override; 4377 4378 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 4379 CodeGenFunction &CGF) const override; 4380 4381 llvm::CallingConv::ID getLLVMDefaultCC() const; 4382 llvm::CallingConv::ID getABIDefaultCC() const; 4383 void setCCs(); 4384 4385 void markAllocatedGPRs(unsigned Alignment, unsigned NumRequired) const; 4386 void markAllocatedVFPs(unsigned Alignment, unsigned NumRequired) const; 4387 void resetAllocatedRegs(void) const; 4388 }; 4389 4390 class ARMTargetCodeGenInfo : public TargetCodeGenInfo { 4391 public: 4392 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K) 4393 :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {} 4394 4395 const ARMABIInfo &getABIInfo() const { 4396 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo()); 4397 } 4398 4399 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 4400 return 13; 4401 } 4402 4403 StringRef getARCRetainAutoreleasedReturnValueMarker() const override { 4404 return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue"; 4405 } 4406 4407 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4408 llvm::Value *Address) const override { 4409 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 4410 4411 // 0-15 are the 16 integer registers. 4412 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15); 4413 return false; 4414 } 4415 4416 unsigned getSizeOfUnwindException() const override { 4417 if (getABIInfo().isEABI()) return 88; 4418 return TargetCodeGenInfo::getSizeOfUnwindException(); 4419 } 4420 4421 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 4422 CodeGen::CodeGenModule &CGM) const override { 4423 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 4424 if (!FD) 4425 return; 4426 4427 const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>(); 4428 if (!Attr) 4429 return; 4430 4431 const char *Kind; 4432 switch (Attr->getInterrupt()) { 4433 case ARMInterruptAttr::Generic: Kind = ""; break; 4434 case ARMInterruptAttr::IRQ: Kind = "IRQ"; break; 4435 case ARMInterruptAttr::FIQ: Kind = "FIQ"; break; 4436 case ARMInterruptAttr::SWI: Kind = "SWI"; break; 4437 case ARMInterruptAttr::ABORT: Kind = "ABORT"; break; 4438 case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break; 4439 } 4440 4441 llvm::Function *Fn = cast<llvm::Function>(GV); 4442 4443 Fn->addFnAttr("interrupt", Kind); 4444 4445 if (cast<ARMABIInfo>(getABIInfo()).getABIKind() == ARMABIInfo::APCS) 4446 return; 4447 4448 // AAPCS guarantees that sp will be 8-byte aligned on any public interface, 4449 // however this is not necessarily true on taking any interrupt. Instruct 4450 // the backend to perform a realignment as part of the function prologue. 4451 llvm::AttrBuilder B; 4452 B.addStackAlignmentAttr(8); 4453 Fn->addAttributes(llvm::AttributeSet::FunctionIndex, 4454 llvm::AttributeSet::get(CGM.getLLVMContext(), 4455 llvm::AttributeSet::FunctionIndex, 4456 B)); 4457 } 4458 4459 }; 4460 4461 } 4462 4463 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const { 4464 // To correctly handle Homogeneous Aggregate, we need to keep track of the 4465 // VFP registers allocated so far. 4466 // C.1.vfp If the argument is a VFP CPRC and there are sufficient consecutive 4467 // VFP registers of the appropriate type unallocated then the argument is 4468 // allocated to the lowest-numbered sequence of such registers. 4469 // C.2.vfp If the argument is a VFP CPRC then any VFP registers that are 4470 // unallocated are marked as unavailable. 4471 resetAllocatedRegs(); 4472 4473 if (getCXXABI().classifyReturnType(FI)) { 4474 if (FI.getReturnInfo().isIndirect()) 4475 markAllocatedGPRs(1, 1); 4476 } else { 4477 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), FI.isVariadic()); 4478 } 4479 for (auto &I : FI.arguments()) { 4480 unsigned PreAllocationVFPs = AllocatedVFPs; 4481 unsigned PreAllocationGPRs = AllocatedGPRs; 4482 bool IsCPRC = false; 4483 // 6.1.2.3 There is one VFP co-processor register class using registers 4484 // s0-s15 (d0-d7) for passing arguments. 4485 I.info = classifyArgumentType(I.type, FI.isVariadic(), IsCPRC); 4486 4487 // If we have allocated some arguments onto the stack (due to running 4488 // out of VFP registers), we cannot split an argument between GPRs and 4489 // the stack. If this situation occurs, we add padding to prevent the 4490 // GPRs from being used. In this situation, the current argument could 4491 // only be allocated by rule C.8, so rule C.6 would mark these GPRs as 4492 // unusable anyway. 4493 // We do not have to do this if the argument is being passed ByVal, as the 4494 // backend can handle that situation correctly. 4495 const bool StackUsed = PreAllocationGPRs > NumGPRs || PreAllocationVFPs > NumVFPs; 4496 const bool IsByVal = I.info.isIndirect() && I.info.getIndirectByVal(); 4497 if (!IsCPRC && PreAllocationGPRs < NumGPRs && AllocatedGPRs > NumGPRs && 4498 StackUsed && !IsByVal) { 4499 llvm::Type *PaddingTy = llvm::ArrayType::get( 4500 llvm::Type::getInt32Ty(getVMContext()), NumGPRs - PreAllocationGPRs); 4501 if (I.info.canHaveCoerceToType()) { 4502 I.info = ABIArgInfo::getDirect(I.info.getCoerceToType() /* type */, 4503 0 /* offset */, PaddingTy, true); 4504 } else { 4505 I.info = ABIArgInfo::getDirect(nullptr /* type */, 0 /* offset */, 4506 PaddingTy, true); 4507 } 4508 } 4509 } 4510 4511 // Always honor user-specified calling convention. 4512 if (FI.getCallingConvention() != llvm::CallingConv::C) 4513 return; 4514 4515 llvm::CallingConv::ID cc = getRuntimeCC(); 4516 if (cc != llvm::CallingConv::C) 4517 FI.setEffectiveCallingConvention(cc); 4518 } 4519 4520 /// Return the default calling convention that LLVM will use. 4521 llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const { 4522 // The default calling convention that LLVM will infer. 4523 if (isEABIHF()) 4524 return llvm::CallingConv::ARM_AAPCS_VFP; 4525 else if (isEABI()) 4526 return llvm::CallingConv::ARM_AAPCS; 4527 else 4528 return llvm::CallingConv::ARM_APCS; 4529 } 4530 4531 /// Return the calling convention that our ABI would like us to use 4532 /// as the C calling convention. 4533 llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const { 4534 switch (getABIKind()) { 4535 case APCS: return llvm::CallingConv::ARM_APCS; 4536 case AAPCS: return llvm::CallingConv::ARM_AAPCS; 4537 case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 4538 } 4539 llvm_unreachable("bad ABI kind"); 4540 } 4541 4542 void ARMABIInfo::setCCs() { 4543 assert(getRuntimeCC() == llvm::CallingConv::C); 4544 4545 // Don't muddy up the IR with a ton of explicit annotations if 4546 // they'd just match what LLVM will infer from the triple. 4547 llvm::CallingConv::ID abiCC = getABIDefaultCC(); 4548 if (abiCC != getLLVMDefaultCC()) 4549 RuntimeCC = abiCC; 4550 4551 BuiltinCC = (getABIKind() == APCS ? 4552 llvm::CallingConv::ARM_APCS : llvm::CallingConv::ARM_AAPCS); 4553 } 4554 4555 /// markAllocatedVFPs - update VFPRegs according to the alignment and 4556 /// number of VFP registers (unit is S register) requested. 4557 void ARMABIInfo::markAllocatedVFPs(unsigned Alignment, 4558 unsigned NumRequired) const { 4559 // Early Exit. 4560 if (AllocatedVFPs >= 16) { 4561 // We use AllocatedVFP > 16 to signal that some CPRCs were allocated on 4562 // the stack. 4563 AllocatedVFPs = 17; 4564 return; 4565 } 4566 // C.1.vfp If the argument is a VFP CPRC and there are sufficient consecutive 4567 // VFP registers of the appropriate type unallocated then the argument is 4568 // allocated to the lowest-numbered sequence of such registers. 4569 for (unsigned I = 0; I < 16; I += Alignment) { 4570 bool FoundSlot = true; 4571 for (unsigned J = I, JEnd = I + NumRequired; J < JEnd; J++) 4572 if (J >= 16 || VFPRegs[J]) { 4573 FoundSlot = false; 4574 break; 4575 } 4576 if (FoundSlot) { 4577 for (unsigned J = I, JEnd = I + NumRequired; J < JEnd; J++) 4578 VFPRegs[J] = 1; 4579 AllocatedVFPs += NumRequired; 4580 return; 4581 } 4582 } 4583 // C.2.vfp If the argument is a VFP CPRC then any VFP registers that are 4584 // unallocated are marked as unavailable. 4585 for (unsigned I = 0; I < 16; I++) 4586 VFPRegs[I] = 1; 4587 AllocatedVFPs = 17; // We do not have enough VFP registers. 4588 } 4589 4590 /// Update AllocatedGPRs to record the number of general purpose registers 4591 /// which have been allocated. It is valid for AllocatedGPRs to go above 4, 4592 /// this represents arguments being stored on the stack. 4593 void ARMABIInfo::markAllocatedGPRs(unsigned Alignment, 4594 unsigned NumRequired) const { 4595 assert((Alignment == 1 || Alignment == 2) && "Alignment must be 4 or 8 bytes"); 4596 4597 if (Alignment == 2 && AllocatedGPRs & 0x1) 4598 AllocatedGPRs += 1; 4599 4600 AllocatedGPRs += NumRequired; 4601 } 4602 4603 void ARMABIInfo::resetAllocatedRegs(void) const { 4604 AllocatedGPRs = 0; 4605 AllocatedVFPs = 0; 4606 for (unsigned i = 0; i < NumVFPs; ++i) 4607 VFPRegs[i] = 0; 4608 } 4609 4610 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic, 4611 bool &IsCPRC) const { 4612 // We update number of allocated VFPs according to 4613 // 6.1.2.1 The following argument types are VFP CPRCs: 4614 // A single-precision floating-point type (including promoted 4615 // half-precision types); A double-precision floating-point type; 4616 // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate 4617 // with a Base Type of a single- or double-precision floating-point type, 4618 // 64-bit containerized vectors or 128-bit containerized vectors with one 4619 // to four Elements. 4620 bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic; 4621 4622 Ty = useFirstFieldIfTransparentUnion(Ty); 4623 4624 // Handle illegal vector types here. 4625 if (isIllegalVectorType(Ty)) { 4626 uint64_t Size = getContext().getTypeSize(Ty); 4627 if (Size <= 32) { 4628 llvm::Type *ResType = 4629 llvm::Type::getInt32Ty(getVMContext()); 4630 markAllocatedGPRs(1, 1); 4631 return ABIArgInfo::getDirect(ResType); 4632 } 4633 if (Size == 64) { 4634 llvm::Type *ResType = llvm::VectorType::get( 4635 llvm::Type::getInt32Ty(getVMContext()), 2); 4636 if (getABIKind() == ARMABIInfo::AAPCS || isVariadic){ 4637 markAllocatedGPRs(2, 2); 4638 } else { 4639 markAllocatedVFPs(2, 2); 4640 IsCPRC = true; 4641 } 4642 return ABIArgInfo::getDirect(ResType); 4643 } 4644 if (Size == 128) { 4645 llvm::Type *ResType = llvm::VectorType::get( 4646 llvm::Type::getInt32Ty(getVMContext()), 4); 4647 if (getABIKind() == ARMABIInfo::AAPCS || isVariadic) { 4648 markAllocatedGPRs(2, 4); 4649 } else { 4650 markAllocatedVFPs(4, 4); 4651 IsCPRC = true; 4652 } 4653 return ABIArgInfo::getDirect(ResType); 4654 } 4655 markAllocatedGPRs(1, 1); 4656 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 4657 } 4658 // Update VFPRegs for legal vector types. 4659 if (getABIKind() == ARMABIInfo::AAPCS_VFP && !isVariadic) { 4660 if (const VectorType *VT = Ty->getAs<VectorType>()) { 4661 uint64_t Size = getContext().getTypeSize(VT); 4662 // Size of a legal vector should be power of 2 and above 64. 4663 markAllocatedVFPs(Size >= 128 ? 4 : 2, Size / 32); 4664 IsCPRC = true; 4665 } 4666 } 4667 // Update VFPRegs for floating point types. 4668 if (getABIKind() == ARMABIInfo::AAPCS_VFP && !isVariadic) { 4669 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 4670 if (BT->getKind() == BuiltinType::Half || 4671 BT->getKind() == BuiltinType::Float) { 4672 markAllocatedVFPs(1, 1); 4673 IsCPRC = true; 4674 } 4675 if (BT->getKind() == BuiltinType::Double || 4676 BT->getKind() == BuiltinType::LongDouble) { 4677 markAllocatedVFPs(2, 2); 4678 IsCPRC = true; 4679 } 4680 } 4681 } 4682 4683 if (!isAggregateTypeForABI(Ty)) { 4684 // Treat an enum type as its underlying type. 4685 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) { 4686 Ty = EnumTy->getDecl()->getIntegerType(); 4687 } 4688 4689 unsigned Size = getContext().getTypeSize(Ty); 4690 if (!IsCPRC) 4691 markAllocatedGPRs(Size > 32 ? 2 : 1, (Size + 31) / 32); 4692 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend() 4693 : ABIArgInfo::getDirect()); 4694 } 4695 4696 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { 4697 markAllocatedGPRs(1, 1); 4698 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); 4699 } 4700 4701 // Ignore empty records. 4702 if (isEmptyRecord(getContext(), Ty, true)) 4703 return ABIArgInfo::getIgnore(); 4704 4705 if (IsEffectivelyAAPCS_VFP) { 4706 // Homogeneous Aggregates need to be expanded when we can fit the aggregate 4707 // into VFP registers. 4708 const Type *Base = nullptr; 4709 uint64_t Members = 0; 4710 if (isHomogeneousAggregate(Ty, Base, Members)) { 4711 assert(Base && "Base class should be set for homogeneous aggregate"); 4712 // Base can be a floating-point or a vector. 4713 if (Base->isVectorType()) { 4714 // ElementSize is in number of floats. 4715 unsigned ElementSize = getContext().getTypeSize(Base) == 64 ? 2 : 4; 4716 markAllocatedVFPs(ElementSize, 4717 Members * ElementSize); 4718 } else if (Base->isSpecificBuiltinType(BuiltinType::Float)) 4719 markAllocatedVFPs(1, Members); 4720 else { 4721 assert(Base->isSpecificBuiltinType(BuiltinType::Double) || 4722 Base->isSpecificBuiltinType(BuiltinType::LongDouble)); 4723 markAllocatedVFPs(2, Members * 2); 4724 } 4725 IsCPRC = true; 4726 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false); 4727 } 4728 } 4729 4730 // Support byval for ARM. 4731 // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at 4732 // most 8-byte. We realign the indirect argument if type alignment is bigger 4733 // than ABI alignment. 4734 uint64_t ABIAlign = 4; 4735 uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8; 4736 if (getABIKind() == ARMABIInfo::AAPCS_VFP || 4737 getABIKind() == ARMABIInfo::AAPCS) 4738 ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8); 4739 if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) { 4740 // Update Allocated GPRs. Since this is only used when the size of the 4741 // argument is greater than 64 bytes, this will always use up any available 4742 // registers (of which there are 4). We also don't care about getting the 4743 // alignment right, because general-purpose registers cannot be back-filled. 4744 markAllocatedGPRs(1, 4); 4745 return ABIArgInfo::getIndirect(TyAlign, /*ByVal=*/true, 4746 /*Realign=*/TyAlign > ABIAlign); 4747 } 4748 4749 // Otherwise, pass by coercing to a structure of the appropriate size. 4750 llvm::Type* ElemTy; 4751 unsigned SizeRegs; 4752 // FIXME: Try to match the types of the arguments more accurately where 4753 // we can. 4754 if (getContext().getTypeAlign(Ty) <= 32) { 4755 ElemTy = llvm::Type::getInt32Ty(getVMContext()); 4756 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32; 4757 markAllocatedGPRs(1, SizeRegs); 4758 } else { 4759 ElemTy = llvm::Type::getInt64Ty(getVMContext()); 4760 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64; 4761 markAllocatedGPRs(2, SizeRegs * 2); 4762 } 4763 4764 return ABIArgInfo::getDirect(llvm::ArrayType::get(ElemTy, SizeRegs)); 4765 } 4766 4767 static bool isIntegerLikeType(QualType Ty, ASTContext &Context, 4768 llvm::LLVMContext &VMContext) { 4769 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure 4770 // is called integer-like if its size is less than or equal to one word, and 4771 // the offset of each of its addressable sub-fields is zero. 4772 4773 uint64_t Size = Context.getTypeSize(Ty); 4774 4775 // Check that the type fits in a word. 4776 if (Size > 32) 4777 return false; 4778 4779 // FIXME: Handle vector types! 4780 if (Ty->isVectorType()) 4781 return false; 4782 4783 // Float types are never treated as "integer like". 4784 if (Ty->isRealFloatingType()) 4785 return false; 4786 4787 // If this is a builtin or pointer type then it is ok. 4788 if (Ty->getAs<BuiltinType>() || Ty->isPointerType()) 4789 return true; 4790 4791 // Small complex integer types are "integer like". 4792 if (const ComplexType *CT = Ty->getAs<ComplexType>()) 4793 return isIntegerLikeType(CT->getElementType(), Context, VMContext); 4794 4795 // Single element and zero sized arrays should be allowed, by the definition 4796 // above, but they are not. 4797 4798 // Otherwise, it must be a record type. 4799 const RecordType *RT = Ty->getAs<RecordType>(); 4800 if (!RT) return false; 4801 4802 // Ignore records with flexible arrays. 4803 const RecordDecl *RD = RT->getDecl(); 4804 if (RD->hasFlexibleArrayMember()) 4805 return false; 4806 4807 // Check that all sub-fields are at offset 0, and are themselves "integer 4808 // like". 4809 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 4810 4811 bool HadField = false; 4812 unsigned idx = 0; 4813 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 4814 i != e; ++i, ++idx) { 4815 const FieldDecl *FD = *i; 4816 4817 // Bit-fields are not addressable, we only need to verify they are "integer 4818 // like". We still have to disallow a subsequent non-bitfield, for example: 4819 // struct { int : 0; int x } 4820 // is non-integer like according to gcc. 4821 if (FD->isBitField()) { 4822 if (!RD->isUnion()) 4823 HadField = true; 4824 4825 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 4826 return false; 4827 4828 continue; 4829 } 4830 4831 // Check if this field is at offset 0. 4832 if (Layout.getFieldOffset(idx) != 0) 4833 return false; 4834 4835 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 4836 return false; 4837 4838 // Only allow at most one field in a structure. This doesn't match the 4839 // wording above, but follows gcc in situations with a field following an 4840 // empty structure. 4841 if (!RD->isUnion()) { 4842 if (HadField) 4843 return false; 4844 4845 HadField = true; 4846 } 4847 } 4848 4849 return true; 4850 } 4851 4852 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, 4853 bool isVariadic) const { 4854 bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic; 4855 4856 if (RetTy->isVoidType()) 4857 return ABIArgInfo::getIgnore(); 4858 4859 // Large vector types should be returned via memory. 4860 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) { 4861 markAllocatedGPRs(1, 1); 4862 return ABIArgInfo::getIndirect(0); 4863 } 4864 4865 if (!isAggregateTypeForABI(RetTy)) { 4866 // Treat an enum type as its underlying type. 4867 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 4868 RetTy = EnumTy->getDecl()->getIntegerType(); 4869 4870 return RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend() 4871 : ABIArgInfo::getDirect(); 4872 } 4873 4874 // Are we following APCS? 4875 if (getABIKind() == APCS) { 4876 if (isEmptyRecord(getContext(), RetTy, false)) 4877 return ABIArgInfo::getIgnore(); 4878 4879 // Complex types are all returned as packed integers. 4880 // 4881 // FIXME: Consider using 2 x vector types if the back end handles them 4882 // correctly. 4883 if (RetTy->isAnyComplexType()) 4884 return ABIArgInfo::getDirect(llvm::IntegerType::get( 4885 getVMContext(), getContext().getTypeSize(RetTy))); 4886 4887 // Integer like structures are returned in r0. 4888 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) { 4889 // Return in the smallest viable integer type. 4890 uint64_t Size = getContext().getTypeSize(RetTy); 4891 if (Size <= 8) 4892 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 4893 if (Size <= 16) 4894 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 4895 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 4896 } 4897 4898 // Otherwise return in memory. 4899 markAllocatedGPRs(1, 1); 4900 return ABIArgInfo::getIndirect(0); 4901 } 4902 4903 // Otherwise this is an AAPCS variant. 4904 4905 if (isEmptyRecord(getContext(), RetTy, true)) 4906 return ABIArgInfo::getIgnore(); 4907 4908 // Check for homogeneous aggregates with AAPCS-VFP. 4909 if (IsEffectivelyAAPCS_VFP) { 4910 const Type *Base = nullptr; 4911 uint64_t Members; 4912 if (isHomogeneousAggregate(RetTy, Base, Members)) { 4913 assert(Base && "Base class should be set for homogeneous aggregate"); 4914 // Homogeneous Aggregates are returned directly. 4915 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false); 4916 } 4917 } 4918 4919 // Aggregates <= 4 bytes are returned in r0; other aggregates 4920 // are returned indirectly. 4921 uint64_t Size = getContext().getTypeSize(RetTy); 4922 if (Size <= 32) { 4923 if (getDataLayout().isBigEndian()) 4924 // Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4) 4925 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 4926 4927 // Return in the smallest viable integer type. 4928 if (Size <= 8) 4929 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 4930 if (Size <= 16) 4931 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 4932 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 4933 } 4934 4935 markAllocatedGPRs(1, 1); 4936 return ABIArgInfo::getIndirect(0); 4937 } 4938 4939 /// isIllegalVector - check whether Ty is an illegal vector type. 4940 bool ARMABIInfo::isIllegalVectorType(QualType Ty) const { 4941 if (const VectorType *VT = Ty->getAs<VectorType>()) { 4942 // Check whether VT is legal. 4943 unsigned NumElements = VT->getNumElements(); 4944 uint64_t Size = getContext().getTypeSize(VT); 4945 // NumElements should be power of 2. 4946 if ((NumElements & (NumElements - 1)) != 0) 4947 return true; 4948 // Size should be greater than 32 bits. 4949 return Size <= 32; 4950 } 4951 return false; 4952 } 4953 4954 bool ARMABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 4955 // Homogeneous aggregates for AAPCS-VFP must have base types of float, 4956 // double, or 64-bit or 128-bit vectors. 4957 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 4958 if (BT->getKind() == BuiltinType::Float || 4959 BT->getKind() == BuiltinType::Double || 4960 BT->getKind() == BuiltinType::LongDouble) 4961 return true; 4962 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 4963 unsigned VecSize = getContext().getTypeSize(VT); 4964 if (VecSize == 64 || VecSize == 128) 4965 return true; 4966 } 4967 return false; 4968 } 4969 4970 bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, 4971 uint64_t Members) const { 4972 return Members <= 4; 4973 } 4974 4975 llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 4976 CodeGenFunction &CGF) const { 4977 llvm::Type *BP = CGF.Int8PtrTy; 4978 llvm::Type *BPP = CGF.Int8PtrPtrTy; 4979 4980 CGBuilderTy &Builder = CGF.Builder; 4981 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 4982 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 4983 4984 if (isEmptyRecord(getContext(), Ty, true)) { 4985 // These are ignored for parameter passing purposes. 4986 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 4987 return Builder.CreateBitCast(Addr, PTy); 4988 } 4989 4990 uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8; 4991 uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8; 4992 bool IsIndirect = false; 4993 4994 // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for 4995 // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte. 4996 if (getABIKind() == ARMABIInfo::AAPCS_VFP || 4997 getABIKind() == ARMABIInfo::AAPCS) 4998 TyAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8); 4999 else 5000 TyAlign = 4; 5001 // Use indirect if size of the illegal vector is bigger than 16 bytes. 5002 if (isIllegalVectorType(Ty) && Size > 16) { 5003 IsIndirect = true; 5004 Size = 4; 5005 TyAlign = 4; 5006 } 5007 5008 // Handle address alignment for ABI alignment > 4 bytes. 5009 if (TyAlign > 4) { 5010 assert((TyAlign & (TyAlign - 1)) == 0 && 5011 "Alignment is not power of 2!"); 5012 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty); 5013 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1)); 5014 AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1))); 5015 Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align"); 5016 } 5017 5018 uint64_t Offset = 5019 llvm::RoundUpToAlignment(Size, 4); 5020 llvm::Value *NextAddr = 5021 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 5022 "ap.next"); 5023 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 5024 5025 if (IsIndirect) 5026 Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP)); 5027 else if (TyAlign < CGF.getContext().getTypeAlign(Ty) / 8) { 5028 // We can't directly cast ap.cur to pointer to a vector type, since ap.cur 5029 // may not be correctly aligned for the vector type. We create an aligned 5030 // temporary space and copy the content over from ap.cur to the temporary 5031 // space. This is necessary if the natural alignment of the type is greater 5032 // than the ABI alignment. 5033 llvm::Type *I8PtrTy = Builder.getInt8PtrTy(); 5034 CharUnits CharSize = getContext().getTypeSizeInChars(Ty); 5035 llvm::Value *AlignedTemp = CGF.CreateTempAlloca(CGF.ConvertType(Ty), 5036 "var.align"); 5037 llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy); 5038 llvm::Value *Src = Builder.CreateBitCast(Addr, I8PtrTy); 5039 Builder.CreateMemCpy(Dst, Src, 5040 llvm::ConstantInt::get(CGF.IntPtrTy, CharSize.getQuantity()), 5041 TyAlign, false); 5042 Addr = AlignedTemp; //The content is in aligned location. 5043 } 5044 llvm::Type *PTy = 5045 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 5046 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 5047 5048 return AddrTyped; 5049 } 5050 5051 namespace { 5052 5053 class NaClARMABIInfo : public ABIInfo { 5054 public: 5055 NaClARMABIInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind) 5056 : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, Kind) {} 5057 void computeInfo(CGFunctionInfo &FI) const override; 5058 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 5059 CodeGenFunction &CGF) const override; 5060 private: 5061 PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv. 5062 ARMABIInfo NInfo; // Used for everything else. 5063 }; 5064 5065 class NaClARMTargetCodeGenInfo : public TargetCodeGenInfo { 5066 public: 5067 NaClARMTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind) 5068 : TargetCodeGenInfo(new NaClARMABIInfo(CGT, Kind)) {} 5069 }; 5070 5071 } 5072 5073 void NaClARMABIInfo::computeInfo(CGFunctionInfo &FI) const { 5074 if (FI.getASTCallingConvention() == CC_PnaclCall) 5075 PInfo.computeInfo(FI); 5076 else 5077 static_cast<const ABIInfo&>(NInfo).computeInfo(FI); 5078 } 5079 5080 llvm::Value *NaClARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 5081 CodeGenFunction &CGF) const { 5082 // Always use the native convention; calling pnacl-style varargs functions 5083 // is unsupported. 5084 return static_cast<const ABIInfo&>(NInfo).EmitVAArg(VAListAddr, Ty, CGF); 5085 } 5086 5087 //===----------------------------------------------------------------------===// 5088 // NVPTX ABI Implementation 5089 //===----------------------------------------------------------------------===// 5090 5091 namespace { 5092 5093 class NVPTXABIInfo : public ABIInfo { 5094 public: 5095 NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 5096 5097 ABIArgInfo classifyReturnType(QualType RetTy) const; 5098 ABIArgInfo classifyArgumentType(QualType Ty) const; 5099 5100 void computeInfo(CGFunctionInfo &FI) const override; 5101 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 5102 CodeGenFunction &CFG) const override; 5103 }; 5104 5105 class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo { 5106 public: 5107 NVPTXTargetCodeGenInfo(CodeGenTypes &CGT) 5108 : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {} 5109 5110 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 5111 CodeGen::CodeGenModule &M) const override; 5112 private: 5113 // Adds a NamedMDNode with F, Name, and Operand as operands, and adds the 5114 // resulting MDNode to the nvvm.annotations MDNode. 5115 static void addNVVMMetadata(llvm::Function *F, StringRef Name, int Operand); 5116 }; 5117 5118 ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const { 5119 if (RetTy->isVoidType()) 5120 return ABIArgInfo::getIgnore(); 5121 5122 // note: this is different from default ABI 5123 if (!RetTy->isScalarType()) 5124 return ABIArgInfo::getDirect(); 5125 5126 // Treat an enum type as its underlying type. 5127 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 5128 RetTy = EnumTy->getDecl()->getIntegerType(); 5129 5130 return (RetTy->isPromotableIntegerType() ? 5131 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 5132 } 5133 5134 ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const { 5135 // Treat an enum type as its underlying type. 5136 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 5137 Ty = EnumTy->getDecl()->getIntegerType(); 5138 5139 // Return aggregates type as indirect by value 5140 if (isAggregateTypeForABI(Ty)) 5141 return ABIArgInfo::getIndirect(0, /* byval */ true); 5142 5143 return (Ty->isPromotableIntegerType() ? 5144 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 5145 } 5146 5147 void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const { 5148 if (!getCXXABI().classifyReturnType(FI)) 5149 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 5150 for (auto &I : FI.arguments()) 5151 I.info = classifyArgumentType(I.type); 5152 5153 // Always honor user-specified calling convention. 5154 if (FI.getCallingConvention() != llvm::CallingConv::C) 5155 return; 5156 5157 FI.setEffectiveCallingConvention(getRuntimeCC()); 5158 } 5159 5160 llvm::Value *NVPTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 5161 CodeGenFunction &CFG) const { 5162 llvm_unreachable("NVPTX does not support varargs"); 5163 } 5164 5165 void NVPTXTargetCodeGenInfo:: 5166 SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 5167 CodeGen::CodeGenModule &M) const{ 5168 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 5169 if (!FD) return; 5170 5171 llvm::Function *F = cast<llvm::Function>(GV); 5172 5173 // Perform special handling in OpenCL mode 5174 if (M.getLangOpts().OpenCL) { 5175 // Use OpenCL function attributes to check for kernel functions 5176 // By default, all functions are device functions 5177 if (FD->hasAttr<OpenCLKernelAttr>()) { 5178 // OpenCL __kernel functions get kernel metadata 5179 // Create !{<func-ref>, metadata !"kernel", i32 1} node 5180 addNVVMMetadata(F, "kernel", 1); 5181 // And kernel functions are not subject to inlining 5182 F->addFnAttr(llvm::Attribute::NoInline); 5183 } 5184 } 5185 5186 // Perform special handling in CUDA mode. 5187 if (M.getLangOpts().CUDA) { 5188 // CUDA __global__ functions get a kernel metadata entry. Since 5189 // __global__ functions cannot be called from the device, we do not 5190 // need to set the noinline attribute. 5191 if (FD->hasAttr<CUDAGlobalAttr>()) { 5192 // Create !{<func-ref>, metadata !"kernel", i32 1} node 5193 addNVVMMetadata(F, "kernel", 1); 5194 } 5195 if (FD->hasAttr<CUDALaunchBoundsAttr>()) { 5196 // Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node 5197 addNVVMMetadata(F, "maxntidx", 5198 FD->getAttr<CUDALaunchBoundsAttr>()->getMaxThreads()); 5199 // min blocks is a default argument for CUDALaunchBoundsAttr, so getting a 5200 // zero value from getMinBlocks either means it was not specified in 5201 // __launch_bounds__ or the user specified a 0 value. In both cases, we 5202 // don't have to add a PTX directive. 5203 int MinCTASM = FD->getAttr<CUDALaunchBoundsAttr>()->getMinBlocks(); 5204 if (MinCTASM > 0) { 5205 // Create !{<func-ref>, metadata !"minctasm", i32 <val>} node 5206 addNVVMMetadata(F, "minctasm", MinCTASM); 5207 } 5208 } 5209 } 5210 } 5211 5212 void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name, 5213 int Operand) { 5214 llvm::Module *M = F->getParent(); 5215 llvm::LLVMContext &Ctx = M->getContext(); 5216 5217 // Get "nvvm.annotations" metadata node 5218 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations"); 5219 5220 llvm::Metadata *MDVals[] = { 5221 llvm::ConstantAsMetadata::get(F), llvm::MDString::get(Ctx, Name), 5222 llvm::ConstantAsMetadata::get( 5223 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))}; 5224 // Append metadata to nvvm.annotations 5225 MD->addOperand(llvm::MDNode::get(Ctx, MDVals)); 5226 } 5227 } 5228 5229 //===----------------------------------------------------------------------===// 5230 // SystemZ ABI Implementation 5231 //===----------------------------------------------------------------------===// 5232 5233 namespace { 5234 5235 class SystemZABIInfo : public ABIInfo { 5236 public: 5237 SystemZABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 5238 5239 bool isPromotableIntegerType(QualType Ty) const; 5240 bool isCompoundType(QualType Ty) const; 5241 bool isFPArgumentType(QualType Ty) const; 5242 5243 ABIArgInfo classifyReturnType(QualType RetTy) const; 5244 ABIArgInfo classifyArgumentType(QualType ArgTy) const; 5245 5246 void computeInfo(CGFunctionInfo &FI) const override { 5247 if (!getCXXABI().classifyReturnType(FI)) 5248 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 5249 for (auto &I : FI.arguments()) 5250 I.info = classifyArgumentType(I.type); 5251 } 5252 5253 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 5254 CodeGenFunction &CGF) const override; 5255 }; 5256 5257 class SystemZTargetCodeGenInfo : public TargetCodeGenInfo { 5258 public: 5259 SystemZTargetCodeGenInfo(CodeGenTypes &CGT) 5260 : TargetCodeGenInfo(new SystemZABIInfo(CGT)) {} 5261 }; 5262 5263 } 5264 5265 bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const { 5266 // Treat an enum type as its underlying type. 5267 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 5268 Ty = EnumTy->getDecl()->getIntegerType(); 5269 5270 // Promotable integer types are required to be promoted by the ABI. 5271 if (Ty->isPromotableIntegerType()) 5272 return true; 5273 5274 // 32-bit values must also be promoted. 5275 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 5276 switch (BT->getKind()) { 5277 case BuiltinType::Int: 5278 case BuiltinType::UInt: 5279 return true; 5280 default: 5281 return false; 5282 } 5283 return false; 5284 } 5285 5286 bool SystemZABIInfo::isCompoundType(QualType Ty) const { 5287 return Ty->isAnyComplexType() || isAggregateTypeForABI(Ty); 5288 } 5289 5290 bool SystemZABIInfo::isFPArgumentType(QualType Ty) const { 5291 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 5292 switch (BT->getKind()) { 5293 case BuiltinType::Float: 5294 case BuiltinType::Double: 5295 return true; 5296 default: 5297 return false; 5298 } 5299 5300 if (const RecordType *RT = Ty->getAsStructureType()) { 5301 const RecordDecl *RD = RT->getDecl(); 5302 bool Found = false; 5303 5304 // If this is a C++ record, check the bases first. 5305 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 5306 for (const auto &I : CXXRD->bases()) { 5307 QualType Base = I.getType(); 5308 5309 // Empty bases don't affect things either way. 5310 if (isEmptyRecord(getContext(), Base, true)) 5311 continue; 5312 5313 if (Found) 5314 return false; 5315 Found = isFPArgumentType(Base); 5316 if (!Found) 5317 return false; 5318 } 5319 5320 // Check the fields. 5321 for (const auto *FD : RD->fields()) { 5322 // Empty bitfields don't affect things either way. 5323 // Unlike isSingleElementStruct(), empty structure and array fields 5324 // do count. So do anonymous bitfields that aren't zero-sized. 5325 if (FD->isBitField() && FD->getBitWidthValue(getContext()) == 0) 5326 return true; 5327 5328 // Unlike isSingleElementStruct(), arrays do not count. 5329 // Nested isFPArgumentType structures still do though. 5330 if (Found) 5331 return false; 5332 Found = isFPArgumentType(FD->getType()); 5333 if (!Found) 5334 return false; 5335 } 5336 5337 // Unlike isSingleElementStruct(), trailing padding is allowed. 5338 // An 8-byte aligned struct s { float f; } is passed as a double. 5339 return Found; 5340 } 5341 5342 return false; 5343 } 5344 5345 llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 5346 CodeGenFunction &CGF) const { 5347 // Assume that va_list type is correct; should be pointer to LLVM type: 5348 // struct { 5349 // i64 __gpr; 5350 // i64 __fpr; 5351 // i8 *__overflow_arg_area; 5352 // i8 *__reg_save_area; 5353 // }; 5354 5355 // Every argument occupies 8 bytes and is passed by preference in either 5356 // GPRs or FPRs. 5357 Ty = CGF.getContext().getCanonicalType(Ty); 5358 ABIArgInfo AI = classifyArgumentType(Ty); 5359 bool InFPRs = isFPArgumentType(Ty); 5360 5361 llvm::Type *APTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty)); 5362 bool IsIndirect = AI.isIndirect(); 5363 unsigned UnpaddedBitSize; 5364 if (IsIndirect) { 5365 APTy = llvm::PointerType::getUnqual(APTy); 5366 UnpaddedBitSize = 64; 5367 } else 5368 UnpaddedBitSize = getContext().getTypeSize(Ty); 5369 unsigned PaddedBitSize = 64; 5370 assert((UnpaddedBitSize <= PaddedBitSize) && "Invalid argument size."); 5371 5372 unsigned PaddedSize = PaddedBitSize / 8; 5373 unsigned Padding = (PaddedBitSize - UnpaddedBitSize) / 8; 5374 5375 unsigned MaxRegs, RegCountField, RegSaveIndex, RegPadding; 5376 if (InFPRs) { 5377 MaxRegs = 4; // Maximum of 4 FPR arguments 5378 RegCountField = 1; // __fpr 5379 RegSaveIndex = 16; // save offset for f0 5380 RegPadding = 0; // floats are passed in the high bits of an FPR 5381 } else { 5382 MaxRegs = 5; // Maximum of 5 GPR arguments 5383 RegCountField = 0; // __gpr 5384 RegSaveIndex = 2; // save offset for r2 5385 RegPadding = Padding; // values are passed in the low bits of a GPR 5386 } 5387 5388 llvm::Value *RegCountPtr = 5389 CGF.Builder.CreateStructGEP(VAListAddr, RegCountField, "reg_count_ptr"); 5390 llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count"); 5391 llvm::Type *IndexTy = RegCount->getType(); 5392 llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs); 5393 llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV, 5394 "fits_in_regs"); 5395 5396 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 5397 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); 5398 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 5399 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); 5400 5401 // Emit code to load the value if it was passed in registers. 5402 CGF.EmitBlock(InRegBlock); 5403 5404 // Work out the address of an argument register. 5405 llvm::Value *PaddedSizeV = llvm::ConstantInt::get(IndexTy, PaddedSize); 5406 llvm::Value *ScaledRegCount = 5407 CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count"); 5408 llvm::Value *RegBase = 5409 llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize + RegPadding); 5410 llvm::Value *RegOffset = 5411 CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset"); 5412 llvm::Value *RegSaveAreaPtr = 5413 CGF.Builder.CreateStructGEP(VAListAddr, 3, "reg_save_area_ptr"); 5414 llvm::Value *RegSaveArea = 5415 CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area"); 5416 llvm::Value *RawRegAddr = 5417 CGF.Builder.CreateGEP(RegSaveArea, RegOffset, "raw_reg_addr"); 5418 llvm::Value *RegAddr = 5419 CGF.Builder.CreateBitCast(RawRegAddr, APTy, "reg_addr"); 5420 5421 // Update the register count 5422 llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1); 5423 llvm::Value *NewRegCount = 5424 CGF.Builder.CreateAdd(RegCount, One, "reg_count"); 5425 CGF.Builder.CreateStore(NewRegCount, RegCountPtr); 5426 CGF.EmitBranch(ContBlock); 5427 5428 // Emit code to load the value if it was passed in memory. 5429 CGF.EmitBlock(InMemBlock); 5430 5431 // Work out the address of a stack argument. 5432 llvm::Value *OverflowArgAreaPtr = 5433 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr"); 5434 llvm::Value *OverflowArgArea = 5435 CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"); 5436 llvm::Value *PaddingV = llvm::ConstantInt::get(IndexTy, Padding); 5437 llvm::Value *RawMemAddr = 5438 CGF.Builder.CreateGEP(OverflowArgArea, PaddingV, "raw_mem_addr"); 5439 llvm::Value *MemAddr = 5440 CGF.Builder.CreateBitCast(RawMemAddr, APTy, "mem_addr"); 5441 5442 // Update overflow_arg_area_ptr pointer 5443 llvm::Value *NewOverflowArgArea = 5444 CGF.Builder.CreateGEP(OverflowArgArea, PaddedSizeV, "overflow_arg_area"); 5445 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr); 5446 CGF.EmitBranch(ContBlock); 5447 5448 // Return the appropriate result. 5449 CGF.EmitBlock(ContBlock); 5450 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(APTy, 2, "va_arg.addr"); 5451 ResAddr->addIncoming(RegAddr, InRegBlock); 5452 ResAddr->addIncoming(MemAddr, InMemBlock); 5453 5454 if (IsIndirect) 5455 return CGF.Builder.CreateLoad(ResAddr, "indirect_arg"); 5456 5457 return ResAddr; 5458 } 5459 5460 ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const { 5461 if (RetTy->isVoidType()) 5462 return ABIArgInfo::getIgnore(); 5463 if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64) 5464 return ABIArgInfo::getIndirect(0); 5465 return (isPromotableIntegerType(RetTy) ? 5466 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 5467 } 5468 5469 ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const { 5470 // Handle the generic C++ ABI. 5471 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 5472 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); 5473 5474 // Integers and enums are extended to full register width. 5475 if (isPromotableIntegerType(Ty)) 5476 return ABIArgInfo::getExtend(); 5477 5478 // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly. 5479 uint64_t Size = getContext().getTypeSize(Ty); 5480 if (Size != 8 && Size != 16 && Size != 32 && Size != 64) 5481 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 5482 5483 // Handle small structures. 5484 if (const RecordType *RT = Ty->getAs<RecordType>()) { 5485 // Structures with flexible arrays have variable length, so really 5486 // fail the size test above. 5487 const RecordDecl *RD = RT->getDecl(); 5488 if (RD->hasFlexibleArrayMember()) 5489 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 5490 5491 // The structure is passed as an unextended integer, a float, or a double. 5492 llvm::Type *PassTy; 5493 if (isFPArgumentType(Ty)) { 5494 assert(Size == 32 || Size == 64); 5495 if (Size == 32) 5496 PassTy = llvm::Type::getFloatTy(getVMContext()); 5497 else 5498 PassTy = llvm::Type::getDoubleTy(getVMContext()); 5499 } else 5500 PassTy = llvm::IntegerType::get(getVMContext(), Size); 5501 return ABIArgInfo::getDirect(PassTy); 5502 } 5503 5504 // Non-structure compounds are passed indirectly. 5505 if (isCompoundType(Ty)) 5506 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 5507 5508 return ABIArgInfo::getDirect(nullptr); 5509 } 5510 5511 //===----------------------------------------------------------------------===// 5512 // MSP430 ABI Implementation 5513 //===----------------------------------------------------------------------===// 5514 5515 namespace { 5516 5517 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo { 5518 public: 5519 MSP430TargetCodeGenInfo(CodeGenTypes &CGT) 5520 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 5521 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 5522 CodeGen::CodeGenModule &M) const override; 5523 }; 5524 5525 } 5526 5527 void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D, 5528 llvm::GlobalValue *GV, 5529 CodeGen::CodeGenModule &M) const { 5530 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 5531 if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) { 5532 // Handle 'interrupt' attribute: 5533 llvm::Function *F = cast<llvm::Function>(GV); 5534 5535 // Step 1: Set ISR calling convention. 5536 F->setCallingConv(llvm::CallingConv::MSP430_INTR); 5537 5538 // Step 2: Add attributes goodness. 5539 F->addFnAttr(llvm::Attribute::NoInline); 5540 5541 // Step 3: Emit ISR vector alias. 5542 unsigned Num = attr->getNumber() / 2; 5543 llvm::GlobalAlias::create(llvm::Function::ExternalLinkage, 5544 "__isr_" + Twine(Num), F); 5545 } 5546 } 5547 } 5548 5549 //===----------------------------------------------------------------------===// 5550 // MIPS ABI Implementation. This works for both little-endian and 5551 // big-endian variants. 5552 //===----------------------------------------------------------------------===// 5553 5554 namespace { 5555 class MipsABIInfo : public ABIInfo { 5556 bool IsO32; 5557 unsigned MinABIStackAlignInBytes, StackAlignInBytes; 5558 void CoerceToIntArgs(uint64_t TySize, 5559 SmallVectorImpl<llvm::Type *> &ArgList) const; 5560 llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const; 5561 llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const; 5562 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const; 5563 public: 5564 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) : 5565 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8), 5566 StackAlignInBytes(IsO32 ? 8 : 16) {} 5567 5568 ABIArgInfo classifyReturnType(QualType RetTy) const; 5569 ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const; 5570 void computeInfo(CGFunctionInfo &FI) const override; 5571 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 5572 CodeGenFunction &CGF) const override; 5573 }; 5574 5575 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo { 5576 unsigned SizeOfUnwindException; 5577 public: 5578 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32) 5579 : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)), 5580 SizeOfUnwindException(IsO32 ? 24 : 32) {} 5581 5582 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { 5583 return 29; 5584 } 5585 5586 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 5587 CodeGen::CodeGenModule &CGM) const override { 5588 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 5589 if (!FD) return; 5590 llvm::Function *Fn = cast<llvm::Function>(GV); 5591 if (FD->hasAttr<Mips16Attr>()) { 5592 Fn->addFnAttr("mips16"); 5593 } 5594 else if (FD->hasAttr<NoMips16Attr>()) { 5595 Fn->addFnAttr("nomips16"); 5596 } 5597 } 5598 5599 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 5600 llvm::Value *Address) const override; 5601 5602 unsigned getSizeOfUnwindException() const override { 5603 return SizeOfUnwindException; 5604 } 5605 }; 5606 } 5607 5608 void MipsABIInfo::CoerceToIntArgs(uint64_t TySize, 5609 SmallVectorImpl<llvm::Type *> &ArgList) const { 5610 llvm::IntegerType *IntTy = 5611 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8); 5612 5613 // Add (TySize / MinABIStackAlignInBytes) args of IntTy. 5614 for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N) 5615 ArgList.push_back(IntTy); 5616 5617 // If necessary, add one more integer type to ArgList. 5618 unsigned R = TySize % (MinABIStackAlignInBytes * 8); 5619 5620 if (R) 5621 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R)); 5622 } 5623 5624 // In N32/64, an aligned double precision floating point field is passed in 5625 // a register. 5626 llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const { 5627 SmallVector<llvm::Type*, 8> ArgList, IntArgList; 5628 5629 if (IsO32) { 5630 CoerceToIntArgs(TySize, ArgList); 5631 return llvm::StructType::get(getVMContext(), ArgList); 5632 } 5633 5634 if (Ty->isComplexType()) 5635 return CGT.ConvertType(Ty); 5636 5637 const RecordType *RT = Ty->getAs<RecordType>(); 5638 5639 // Unions/vectors are passed in integer registers. 5640 if (!RT || !RT->isStructureOrClassType()) { 5641 CoerceToIntArgs(TySize, ArgList); 5642 return llvm::StructType::get(getVMContext(), ArgList); 5643 } 5644 5645 const RecordDecl *RD = RT->getDecl(); 5646 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 5647 assert(!(TySize % 8) && "Size of structure must be multiple of 8."); 5648 5649 uint64_t LastOffset = 0; 5650 unsigned idx = 0; 5651 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64); 5652 5653 // Iterate over fields in the struct/class and check if there are any aligned 5654 // double fields. 5655 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 5656 i != e; ++i, ++idx) { 5657 const QualType Ty = i->getType(); 5658 const BuiltinType *BT = Ty->getAs<BuiltinType>(); 5659 5660 if (!BT || BT->getKind() != BuiltinType::Double) 5661 continue; 5662 5663 uint64_t Offset = Layout.getFieldOffset(idx); 5664 if (Offset % 64) // Ignore doubles that are not aligned. 5665 continue; 5666 5667 // Add ((Offset - LastOffset) / 64) args of type i64. 5668 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j) 5669 ArgList.push_back(I64); 5670 5671 // Add double type. 5672 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext())); 5673 LastOffset = Offset + 64; 5674 } 5675 5676 CoerceToIntArgs(TySize - LastOffset, IntArgList); 5677 ArgList.append(IntArgList.begin(), IntArgList.end()); 5678 5679 return llvm::StructType::get(getVMContext(), ArgList); 5680 } 5681 5682 llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset, 5683 uint64_t Offset) const { 5684 if (OrigOffset + MinABIStackAlignInBytes > Offset) 5685 return nullptr; 5686 5687 return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8); 5688 } 5689 5690 ABIArgInfo 5691 MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const { 5692 Ty = useFirstFieldIfTransparentUnion(Ty); 5693 5694 uint64_t OrigOffset = Offset; 5695 uint64_t TySize = getContext().getTypeSize(Ty); 5696 uint64_t Align = getContext().getTypeAlign(Ty) / 8; 5697 5698 Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes), 5699 (uint64_t)StackAlignInBytes); 5700 unsigned CurrOffset = llvm::RoundUpToAlignment(Offset, Align); 5701 Offset = CurrOffset + llvm::RoundUpToAlignment(TySize, Align * 8) / 8; 5702 5703 if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) { 5704 // Ignore empty aggregates. 5705 if (TySize == 0) 5706 return ABIArgInfo::getIgnore(); 5707 5708 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { 5709 Offset = OrigOffset + MinABIStackAlignInBytes; 5710 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); 5711 } 5712 5713 // If we have reached here, aggregates are passed directly by coercing to 5714 // another structure type. Padding is inserted if the offset of the 5715 // aggregate is unaligned. 5716 ABIArgInfo ArgInfo = 5717 ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0, 5718 getPaddingType(OrigOffset, CurrOffset)); 5719 ArgInfo.setInReg(true); 5720 return ArgInfo; 5721 } 5722 5723 // Treat an enum type as its underlying type. 5724 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 5725 Ty = EnumTy->getDecl()->getIntegerType(); 5726 5727 // All integral types are promoted to the GPR width. 5728 if (Ty->isIntegralOrEnumerationType()) 5729 return ABIArgInfo::getExtend(); 5730 5731 return ABIArgInfo::getDirect( 5732 nullptr, 0, IsO32 ? nullptr : getPaddingType(OrigOffset, CurrOffset)); 5733 } 5734 5735 llvm::Type* 5736 MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const { 5737 const RecordType *RT = RetTy->getAs<RecordType>(); 5738 SmallVector<llvm::Type*, 8> RTList; 5739 5740 if (RT && RT->isStructureOrClassType()) { 5741 const RecordDecl *RD = RT->getDecl(); 5742 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 5743 unsigned FieldCnt = Layout.getFieldCount(); 5744 5745 // N32/64 returns struct/classes in floating point registers if the 5746 // following conditions are met: 5747 // 1. The size of the struct/class is no larger than 128-bit. 5748 // 2. The struct/class has one or two fields all of which are floating 5749 // point types. 5750 // 3. The offset of the first field is zero (this follows what gcc does). 5751 // 5752 // Any other composite results are returned in integer registers. 5753 // 5754 if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) { 5755 RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end(); 5756 for (; b != e; ++b) { 5757 const BuiltinType *BT = b->getType()->getAs<BuiltinType>(); 5758 5759 if (!BT || !BT->isFloatingPoint()) 5760 break; 5761 5762 RTList.push_back(CGT.ConvertType(b->getType())); 5763 } 5764 5765 if (b == e) 5766 return llvm::StructType::get(getVMContext(), RTList, 5767 RD->hasAttr<PackedAttr>()); 5768 5769 RTList.clear(); 5770 } 5771 } 5772 5773 CoerceToIntArgs(Size, RTList); 5774 return llvm::StructType::get(getVMContext(), RTList); 5775 } 5776 5777 ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const { 5778 uint64_t Size = getContext().getTypeSize(RetTy); 5779 5780 if (RetTy->isVoidType()) 5781 return ABIArgInfo::getIgnore(); 5782 5783 // O32 doesn't treat zero-sized structs differently from other structs. 5784 // However, N32/N64 ignores zero sized return values. 5785 if (!IsO32 && Size == 0) 5786 return ABIArgInfo::getIgnore(); 5787 5788 if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) { 5789 if (Size <= 128) { 5790 if (RetTy->isAnyComplexType()) 5791 return ABIArgInfo::getDirect(); 5792 5793 // O32 returns integer vectors in registers and N32/N64 returns all small 5794 // aggregates in registers. 5795 if (!IsO32 || 5796 (RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())) { 5797 ABIArgInfo ArgInfo = 5798 ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size)); 5799 ArgInfo.setInReg(true); 5800 return ArgInfo; 5801 } 5802 } 5803 5804 return ABIArgInfo::getIndirect(0); 5805 } 5806 5807 // Treat an enum type as its underlying type. 5808 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 5809 RetTy = EnumTy->getDecl()->getIntegerType(); 5810 5811 return (RetTy->isPromotableIntegerType() ? 5812 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 5813 } 5814 5815 void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const { 5816 ABIArgInfo &RetInfo = FI.getReturnInfo(); 5817 if (!getCXXABI().classifyReturnType(FI)) 5818 RetInfo = classifyReturnType(FI.getReturnType()); 5819 5820 // Check if a pointer to an aggregate is passed as a hidden argument. 5821 uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0; 5822 5823 for (auto &I : FI.arguments()) 5824 I.info = classifyArgumentType(I.type, Offset); 5825 } 5826 5827 llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 5828 CodeGenFunction &CGF) const { 5829 llvm::Type *BP = CGF.Int8PtrTy; 5830 llvm::Type *BPP = CGF.Int8PtrPtrTy; 5831 5832 // Integer arguments are promoted to 32-bit on O32 and 64-bit on N32/N64. 5833 // Pointers are also promoted in the same way but this only matters for N32. 5834 unsigned SlotSizeInBits = IsO32 ? 32 : 64; 5835 unsigned PtrWidth = getTarget().getPointerWidth(0); 5836 if ((Ty->isIntegerType() && 5837 CGF.getContext().getIntWidth(Ty) < SlotSizeInBits) || 5838 (Ty->isPointerType() && PtrWidth < SlotSizeInBits)) { 5839 Ty = CGF.getContext().getIntTypeForBitwidth(SlotSizeInBits, 5840 Ty->isSignedIntegerType()); 5841 } 5842 5843 CGBuilderTy &Builder = CGF.Builder; 5844 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 5845 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 5846 int64_t TypeAlign = 5847 std::min(getContext().getTypeAlign(Ty) / 8, StackAlignInBytes); 5848 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 5849 llvm::Value *AddrTyped; 5850 llvm::IntegerType *IntTy = (PtrWidth == 32) ? CGF.Int32Ty : CGF.Int64Ty; 5851 5852 if (TypeAlign > MinABIStackAlignInBytes) { 5853 llvm::Value *AddrAsInt = CGF.Builder.CreatePtrToInt(Addr, IntTy); 5854 llvm::Value *Inc = llvm::ConstantInt::get(IntTy, TypeAlign - 1); 5855 llvm::Value *Mask = llvm::ConstantInt::get(IntTy, -TypeAlign); 5856 llvm::Value *Add = CGF.Builder.CreateAdd(AddrAsInt, Inc); 5857 llvm::Value *And = CGF.Builder.CreateAnd(Add, Mask); 5858 AddrTyped = CGF.Builder.CreateIntToPtr(And, PTy); 5859 } 5860 else 5861 AddrTyped = Builder.CreateBitCast(Addr, PTy); 5862 5863 llvm::Value *AlignedAddr = Builder.CreateBitCast(AddrTyped, BP); 5864 TypeAlign = std::max((unsigned)TypeAlign, MinABIStackAlignInBytes); 5865 unsigned ArgSizeInBits = CGF.getContext().getTypeSize(Ty); 5866 uint64_t Offset = llvm::RoundUpToAlignment(ArgSizeInBits / 8, TypeAlign); 5867 llvm::Value *NextAddr = 5868 Builder.CreateGEP(AlignedAddr, llvm::ConstantInt::get(IntTy, Offset), 5869 "ap.next"); 5870 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 5871 5872 return AddrTyped; 5873 } 5874 5875 bool 5876 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 5877 llvm::Value *Address) const { 5878 // This information comes from gcc's implementation, which seems to 5879 // as canonical as it gets. 5880 5881 // Everything on MIPS is 4 bytes. Double-precision FP registers 5882 // are aliased to pairs of single-precision FP registers. 5883 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 5884 5885 // 0-31 are the general purpose registers, $0 - $31. 5886 // 32-63 are the floating-point registers, $f0 - $f31. 5887 // 64 and 65 are the multiply/divide registers, $hi and $lo. 5888 // 66 is the (notional, I think) register for signal-handler return. 5889 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65); 5890 5891 // 67-74 are the floating-point status registers, $fcc0 - $fcc7. 5892 // They are one bit wide and ignored here. 5893 5894 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31. 5895 // (coprocessor 1 is the FP unit) 5896 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31. 5897 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31. 5898 // 176-181 are the DSP accumulator registers. 5899 AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181); 5900 return false; 5901 } 5902 5903 //===----------------------------------------------------------------------===// 5904 // TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults. 5905 // Currently subclassed only to implement custom OpenCL C function attribute 5906 // handling. 5907 //===----------------------------------------------------------------------===// 5908 5909 namespace { 5910 5911 class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo { 5912 public: 5913 TCETargetCodeGenInfo(CodeGenTypes &CGT) 5914 : DefaultTargetCodeGenInfo(CGT) {} 5915 5916 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 5917 CodeGen::CodeGenModule &M) const override; 5918 }; 5919 5920 void TCETargetCodeGenInfo::SetTargetAttributes(const Decl *D, 5921 llvm::GlobalValue *GV, 5922 CodeGen::CodeGenModule &M) const { 5923 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 5924 if (!FD) return; 5925 5926 llvm::Function *F = cast<llvm::Function>(GV); 5927 5928 if (M.getLangOpts().OpenCL) { 5929 if (FD->hasAttr<OpenCLKernelAttr>()) { 5930 // OpenCL C Kernel functions are not subject to inlining 5931 F->addFnAttr(llvm::Attribute::NoInline); 5932 const ReqdWorkGroupSizeAttr *Attr = FD->getAttr<ReqdWorkGroupSizeAttr>(); 5933 if (Attr) { 5934 // Convert the reqd_work_group_size() attributes to metadata. 5935 llvm::LLVMContext &Context = F->getContext(); 5936 llvm::NamedMDNode *OpenCLMetadata = 5937 M.getModule().getOrInsertNamedMetadata("opencl.kernel_wg_size_info"); 5938 5939 SmallVector<llvm::Metadata *, 5> Operands; 5940 Operands.push_back(llvm::ConstantAsMetadata::get(F)); 5941 5942 Operands.push_back( 5943 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue( 5944 M.Int32Ty, llvm::APInt(32, Attr->getXDim())))); 5945 Operands.push_back( 5946 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue( 5947 M.Int32Ty, llvm::APInt(32, Attr->getYDim())))); 5948 Operands.push_back( 5949 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue( 5950 M.Int32Ty, llvm::APInt(32, Attr->getZDim())))); 5951 5952 // Add a boolean constant operand for "required" (true) or "hint" (false) 5953 // for implementing the work_group_size_hint attr later. Currently 5954 // always true as the hint is not yet implemented. 5955 Operands.push_back( 5956 llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context))); 5957 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands)); 5958 } 5959 } 5960 } 5961 } 5962 5963 } 5964 5965 //===----------------------------------------------------------------------===// 5966 // Hexagon ABI Implementation 5967 //===----------------------------------------------------------------------===// 5968 5969 namespace { 5970 5971 class HexagonABIInfo : public ABIInfo { 5972 5973 5974 public: 5975 HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 5976 5977 private: 5978 5979 ABIArgInfo classifyReturnType(QualType RetTy) const; 5980 ABIArgInfo classifyArgumentType(QualType RetTy) const; 5981 5982 void computeInfo(CGFunctionInfo &FI) const override; 5983 5984 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 5985 CodeGenFunction &CGF) const override; 5986 }; 5987 5988 class HexagonTargetCodeGenInfo : public TargetCodeGenInfo { 5989 public: 5990 HexagonTargetCodeGenInfo(CodeGenTypes &CGT) 5991 :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {} 5992 5993 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 5994 return 29; 5995 } 5996 }; 5997 5998 } 5999 6000 void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const { 6001 if (!getCXXABI().classifyReturnType(FI)) 6002 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 6003 for (auto &I : FI.arguments()) 6004 I.info = classifyArgumentType(I.type); 6005 } 6006 6007 ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const { 6008 if (!isAggregateTypeForABI(Ty)) { 6009 // Treat an enum type as its underlying type. 6010 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 6011 Ty = EnumTy->getDecl()->getIntegerType(); 6012 6013 return (Ty->isPromotableIntegerType() ? 6014 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 6015 } 6016 6017 // Ignore empty records. 6018 if (isEmptyRecord(getContext(), Ty, true)) 6019 return ABIArgInfo::getIgnore(); 6020 6021 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 6022 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); 6023 6024 uint64_t Size = getContext().getTypeSize(Ty); 6025 if (Size > 64) 6026 return ABIArgInfo::getIndirect(0, /*ByVal=*/true); 6027 // Pass in the smallest viable integer type. 6028 else if (Size > 32) 6029 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext())); 6030 else if (Size > 16) 6031 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 6032 else if (Size > 8) 6033 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 6034 else 6035 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 6036 } 6037 6038 ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const { 6039 if (RetTy->isVoidType()) 6040 return ABIArgInfo::getIgnore(); 6041 6042 // Large vector types should be returned via memory. 6043 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64) 6044 return ABIArgInfo::getIndirect(0); 6045 6046 if (!isAggregateTypeForABI(RetTy)) { 6047 // Treat an enum type as its underlying type. 6048 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 6049 RetTy = EnumTy->getDecl()->getIntegerType(); 6050 6051 return (RetTy->isPromotableIntegerType() ? 6052 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 6053 } 6054 6055 if (isEmptyRecord(getContext(), RetTy, true)) 6056 return ABIArgInfo::getIgnore(); 6057 6058 // Aggregates <= 8 bytes are returned in r0; other aggregates 6059 // are returned indirectly. 6060 uint64_t Size = getContext().getTypeSize(RetTy); 6061 if (Size <= 64) { 6062 // Return in the smallest viable integer type. 6063 if (Size <= 8) 6064 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 6065 if (Size <= 16) 6066 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 6067 if (Size <= 32) 6068 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 6069 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext())); 6070 } 6071 6072 return ABIArgInfo::getIndirect(0, /*ByVal=*/true); 6073 } 6074 6075 llvm::Value *HexagonABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 6076 CodeGenFunction &CGF) const { 6077 // FIXME: Need to handle alignment 6078 llvm::Type *BPP = CGF.Int8PtrPtrTy; 6079 6080 CGBuilderTy &Builder = CGF.Builder; 6081 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 6082 "ap"); 6083 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 6084 llvm::Type *PTy = 6085 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 6086 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 6087 6088 uint64_t Offset = 6089 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); 6090 llvm::Value *NextAddr = 6091 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 6092 "ap.next"); 6093 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 6094 6095 return AddrTyped; 6096 } 6097 6098 //===----------------------------------------------------------------------===// 6099 // AMDGPU ABI Implementation 6100 //===----------------------------------------------------------------------===// 6101 6102 namespace { 6103 6104 class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo { 6105 public: 6106 AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT) 6107 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 6108 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 6109 CodeGen::CodeGenModule &M) const override; 6110 }; 6111 6112 } 6113 6114 void AMDGPUTargetCodeGenInfo::SetTargetAttributes( 6115 const Decl *D, 6116 llvm::GlobalValue *GV, 6117 CodeGen::CodeGenModule &M) const { 6118 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 6119 if (!FD) 6120 return; 6121 6122 if (const auto Attr = FD->getAttr<AMDGPUNumVGPRAttr>()) { 6123 llvm::Function *F = cast<llvm::Function>(GV); 6124 uint32_t NumVGPR = Attr->getNumVGPR(); 6125 if (NumVGPR != 0) 6126 F->addFnAttr("amdgpu_num_vgpr", llvm::utostr(NumVGPR)); 6127 } 6128 6129 if (const auto Attr = FD->getAttr<AMDGPUNumSGPRAttr>()) { 6130 llvm::Function *F = cast<llvm::Function>(GV); 6131 unsigned NumSGPR = Attr->getNumSGPR(); 6132 if (NumSGPR != 0) 6133 F->addFnAttr("amdgpu_num_sgpr", llvm::utostr(NumSGPR)); 6134 } 6135 } 6136 6137 6138 //===----------------------------------------------------------------------===// 6139 // SPARC v9 ABI Implementation. 6140 // Based on the SPARC Compliance Definition version 2.4.1. 6141 // 6142 // Function arguments a mapped to a nominal "parameter array" and promoted to 6143 // registers depending on their type. Each argument occupies 8 or 16 bytes in 6144 // the array, structs larger than 16 bytes are passed indirectly. 6145 // 6146 // One case requires special care: 6147 // 6148 // struct mixed { 6149 // int i; 6150 // float f; 6151 // }; 6152 // 6153 // When a struct mixed is passed by value, it only occupies 8 bytes in the 6154 // parameter array, but the int is passed in an integer register, and the float 6155 // is passed in a floating point register. This is represented as two arguments 6156 // with the LLVM IR inreg attribute: 6157 // 6158 // declare void f(i32 inreg %i, float inreg %f) 6159 // 6160 // The code generator will only allocate 4 bytes from the parameter array for 6161 // the inreg arguments. All other arguments are allocated a multiple of 8 6162 // bytes. 6163 // 6164 namespace { 6165 class SparcV9ABIInfo : public ABIInfo { 6166 public: 6167 SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 6168 6169 private: 6170 ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const; 6171 void computeInfo(CGFunctionInfo &FI) const override; 6172 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 6173 CodeGenFunction &CGF) const override; 6174 6175 // Coercion type builder for structs passed in registers. The coercion type 6176 // serves two purposes: 6177 // 6178 // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned' 6179 // in registers. 6180 // 2. Expose aligned floating point elements as first-level elements, so the 6181 // code generator knows to pass them in floating point registers. 6182 // 6183 // We also compute the InReg flag which indicates that the struct contains 6184 // aligned 32-bit floats. 6185 // 6186 struct CoerceBuilder { 6187 llvm::LLVMContext &Context; 6188 const llvm::DataLayout &DL; 6189 SmallVector<llvm::Type*, 8> Elems; 6190 uint64_t Size; 6191 bool InReg; 6192 6193 CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl) 6194 : Context(c), DL(dl), Size(0), InReg(false) {} 6195 6196 // Pad Elems with integers until Size is ToSize. 6197 void pad(uint64_t ToSize) { 6198 assert(ToSize >= Size && "Cannot remove elements"); 6199 if (ToSize == Size) 6200 return; 6201 6202 // Finish the current 64-bit word. 6203 uint64_t Aligned = llvm::RoundUpToAlignment(Size, 64); 6204 if (Aligned > Size && Aligned <= ToSize) { 6205 Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size)); 6206 Size = Aligned; 6207 } 6208 6209 // Add whole 64-bit words. 6210 while (Size + 64 <= ToSize) { 6211 Elems.push_back(llvm::Type::getInt64Ty(Context)); 6212 Size += 64; 6213 } 6214 6215 // Final in-word padding. 6216 if (Size < ToSize) { 6217 Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size)); 6218 Size = ToSize; 6219 } 6220 } 6221 6222 // Add a floating point element at Offset. 6223 void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) { 6224 // Unaligned floats are treated as integers. 6225 if (Offset % Bits) 6226 return; 6227 // The InReg flag is only required if there are any floats < 64 bits. 6228 if (Bits < 64) 6229 InReg = true; 6230 pad(Offset); 6231 Elems.push_back(Ty); 6232 Size = Offset + Bits; 6233 } 6234 6235 // Add a struct type to the coercion type, starting at Offset (in bits). 6236 void addStruct(uint64_t Offset, llvm::StructType *StrTy) { 6237 const llvm::StructLayout *Layout = DL.getStructLayout(StrTy); 6238 for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) { 6239 llvm::Type *ElemTy = StrTy->getElementType(i); 6240 uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i); 6241 switch (ElemTy->getTypeID()) { 6242 case llvm::Type::StructTyID: 6243 addStruct(ElemOffset, cast<llvm::StructType>(ElemTy)); 6244 break; 6245 case llvm::Type::FloatTyID: 6246 addFloat(ElemOffset, ElemTy, 32); 6247 break; 6248 case llvm::Type::DoubleTyID: 6249 addFloat(ElemOffset, ElemTy, 64); 6250 break; 6251 case llvm::Type::FP128TyID: 6252 addFloat(ElemOffset, ElemTy, 128); 6253 break; 6254 case llvm::Type::PointerTyID: 6255 if (ElemOffset % 64 == 0) { 6256 pad(ElemOffset); 6257 Elems.push_back(ElemTy); 6258 Size += 64; 6259 } 6260 break; 6261 default: 6262 break; 6263 } 6264 } 6265 } 6266 6267 // Check if Ty is a usable substitute for the coercion type. 6268 bool isUsableType(llvm::StructType *Ty) const { 6269 if (Ty->getNumElements() != Elems.size()) 6270 return false; 6271 for (unsigned i = 0, e = Elems.size(); i != e; ++i) 6272 if (Elems[i] != Ty->getElementType(i)) 6273 return false; 6274 return true; 6275 } 6276 6277 // Get the coercion type as a literal struct type. 6278 llvm::Type *getType() const { 6279 if (Elems.size() == 1) 6280 return Elems.front(); 6281 else 6282 return llvm::StructType::get(Context, Elems); 6283 } 6284 }; 6285 }; 6286 } // end anonymous namespace 6287 6288 ABIArgInfo 6289 SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const { 6290 if (Ty->isVoidType()) 6291 return ABIArgInfo::getIgnore(); 6292 6293 uint64_t Size = getContext().getTypeSize(Ty); 6294 6295 // Anything too big to fit in registers is passed with an explicit indirect 6296 // pointer / sret pointer. 6297 if (Size > SizeLimit) 6298 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 6299 6300 // Treat an enum type as its underlying type. 6301 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 6302 Ty = EnumTy->getDecl()->getIntegerType(); 6303 6304 // Integer types smaller than a register are extended. 6305 if (Size < 64 && Ty->isIntegerType()) 6306 return ABIArgInfo::getExtend(); 6307 6308 // Other non-aggregates go in registers. 6309 if (!isAggregateTypeForABI(Ty)) 6310 return ABIArgInfo::getDirect(); 6311 6312 // If a C++ object has either a non-trivial copy constructor or a non-trivial 6313 // destructor, it is passed with an explicit indirect pointer / sret pointer. 6314 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 6315 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); 6316 6317 // This is a small aggregate type that should be passed in registers. 6318 // Build a coercion type from the LLVM struct type. 6319 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty)); 6320 if (!StrTy) 6321 return ABIArgInfo::getDirect(); 6322 6323 CoerceBuilder CB(getVMContext(), getDataLayout()); 6324 CB.addStruct(0, StrTy); 6325 CB.pad(llvm::RoundUpToAlignment(CB.DL.getTypeSizeInBits(StrTy), 64)); 6326 6327 // Try to use the original type for coercion. 6328 llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType(); 6329 6330 if (CB.InReg) 6331 return ABIArgInfo::getDirectInReg(CoerceTy); 6332 else 6333 return ABIArgInfo::getDirect(CoerceTy); 6334 } 6335 6336 llvm::Value *SparcV9ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 6337 CodeGenFunction &CGF) const { 6338 ABIArgInfo AI = classifyType(Ty, 16 * 8); 6339 llvm::Type *ArgTy = CGT.ConvertType(Ty); 6340 if (AI.canHaveCoerceToType() && !AI.getCoerceToType()) 6341 AI.setCoerceToType(ArgTy); 6342 6343 llvm::Type *BPP = CGF.Int8PtrPtrTy; 6344 CGBuilderTy &Builder = CGF.Builder; 6345 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 6346 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 6347 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy); 6348 llvm::Value *ArgAddr; 6349 unsigned Stride; 6350 6351 switch (AI.getKind()) { 6352 case ABIArgInfo::Expand: 6353 case ABIArgInfo::InAlloca: 6354 llvm_unreachable("Unsupported ABI kind for va_arg"); 6355 6356 case ABIArgInfo::Extend: 6357 Stride = 8; 6358 ArgAddr = Builder 6359 .CreateConstGEP1_32(Addr, 8 - getDataLayout().getTypeAllocSize(ArgTy), 6360 "extend"); 6361 break; 6362 6363 case ABIArgInfo::Direct: 6364 Stride = getDataLayout().getTypeAllocSize(AI.getCoerceToType()); 6365 ArgAddr = Addr; 6366 break; 6367 6368 case ABIArgInfo::Indirect: 6369 Stride = 8; 6370 ArgAddr = Builder.CreateBitCast(Addr, 6371 llvm::PointerType::getUnqual(ArgPtrTy), 6372 "indirect"); 6373 ArgAddr = Builder.CreateLoad(ArgAddr, "indirect.arg"); 6374 break; 6375 6376 case ABIArgInfo::Ignore: 6377 return llvm::UndefValue::get(ArgPtrTy); 6378 } 6379 6380 // Update VAList. 6381 Addr = Builder.CreateConstGEP1_32(Addr, Stride, "ap.next"); 6382 Builder.CreateStore(Addr, VAListAddrAsBPP); 6383 6384 return Builder.CreatePointerCast(ArgAddr, ArgPtrTy, "arg.addr"); 6385 } 6386 6387 void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const { 6388 FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8); 6389 for (auto &I : FI.arguments()) 6390 I.info = classifyType(I.type, 16 * 8); 6391 } 6392 6393 namespace { 6394 class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo { 6395 public: 6396 SparcV9TargetCodeGenInfo(CodeGenTypes &CGT) 6397 : TargetCodeGenInfo(new SparcV9ABIInfo(CGT)) {} 6398 6399 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 6400 return 14; 6401 } 6402 6403 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 6404 llvm::Value *Address) const override; 6405 }; 6406 } // end anonymous namespace 6407 6408 bool 6409 SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 6410 llvm::Value *Address) const { 6411 // This is calculated from the LLVM and GCC tables and verified 6412 // against gcc output. AFAIK all ABIs use the same encoding. 6413 6414 CodeGen::CGBuilderTy &Builder = CGF.Builder; 6415 6416 llvm::IntegerType *i8 = CGF.Int8Ty; 6417 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 6418 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 6419 6420 // 0-31: the 8-byte general-purpose registers 6421 AssignToArrayRange(Builder, Address, Eight8, 0, 31); 6422 6423 // 32-63: f0-31, the 4-byte floating-point registers 6424 AssignToArrayRange(Builder, Address, Four8, 32, 63); 6425 6426 // Y = 64 6427 // PSR = 65 6428 // WIM = 66 6429 // TBR = 67 6430 // PC = 68 6431 // NPC = 69 6432 // FSR = 70 6433 // CSR = 71 6434 AssignToArrayRange(Builder, Address, Eight8, 64, 71); 6435 6436 // 72-87: d0-15, the 8-byte floating-point registers 6437 AssignToArrayRange(Builder, Address, Eight8, 72, 87); 6438 6439 return false; 6440 } 6441 6442 6443 //===----------------------------------------------------------------------===// 6444 // XCore ABI Implementation 6445 //===----------------------------------------------------------------------===// 6446 6447 namespace { 6448 6449 /// A SmallStringEnc instance is used to build up the TypeString by passing 6450 /// it by reference between functions that append to it. 6451 typedef llvm::SmallString<128> SmallStringEnc; 6452 6453 /// TypeStringCache caches the meta encodings of Types. 6454 /// 6455 /// The reason for caching TypeStrings is two fold: 6456 /// 1. To cache a type's encoding for later uses; 6457 /// 2. As a means to break recursive member type inclusion. 6458 /// 6459 /// A cache Entry can have a Status of: 6460 /// NonRecursive: The type encoding is not recursive; 6461 /// Recursive: The type encoding is recursive; 6462 /// Incomplete: An incomplete TypeString; 6463 /// IncompleteUsed: An incomplete TypeString that has been used in a 6464 /// Recursive type encoding. 6465 /// 6466 /// A NonRecursive entry will have all of its sub-members expanded as fully 6467 /// as possible. Whilst it may contain types which are recursive, the type 6468 /// itself is not recursive and thus its encoding may be safely used whenever 6469 /// the type is encountered. 6470 /// 6471 /// A Recursive entry will have all of its sub-members expanded as fully as 6472 /// possible. The type itself is recursive and it may contain other types which 6473 /// are recursive. The Recursive encoding must not be used during the expansion 6474 /// of a recursive type's recursive branch. For simplicity the code uses 6475 /// IncompleteCount to reject all usage of Recursive encodings for member types. 6476 /// 6477 /// An Incomplete entry is always a RecordType and only encodes its 6478 /// identifier e.g. "s(S){}". Incomplete 'StubEnc' entries are ephemeral and 6479 /// are placed into the cache during type expansion as a means to identify and 6480 /// handle recursive inclusion of types as sub-members. If there is recursion 6481 /// the entry becomes IncompleteUsed. 6482 /// 6483 /// During the expansion of a RecordType's members: 6484 /// 6485 /// If the cache contains a NonRecursive encoding for the member type, the 6486 /// cached encoding is used; 6487 /// 6488 /// If the cache contains a Recursive encoding for the member type, the 6489 /// cached encoding is 'Swapped' out, as it may be incorrect, and... 6490 /// 6491 /// If the member is a RecordType, an Incomplete encoding is placed into the 6492 /// cache to break potential recursive inclusion of itself as a sub-member; 6493 /// 6494 /// Once a member RecordType has been expanded, its temporary incomplete 6495 /// entry is removed from the cache. If a Recursive encoding was swapped out 6496 /// it is swapped back in; 6497 /// 6498 /// If an incomplete entry is used to expand a sub-member, the incomplete 6499 /// entry is marked as IncompleteUsed. The cache keeps count of how many 6500 /// IncompleteUsed entries it currently contains in IncompleteUsedCount; 6501 /// 6502 /// If a member's encoding is found to be a NonRecursive or Recursive viz: 6503 /// IncompleteUsedCount==0, the member's encoding is added to the cache. 6504 /// Else the member is part of a recursive type and thus the recursion has 6505 /// been exited too soon for the encoding to be correct for the member. 6506 /// 6507 class TypeStringCache { 6508 enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed}; 6509 struct Entry { 6510 std::string Str; // The encoded TypeString for the type. 6511 enum Status State; // Information about the encoding in 'Str'. 6512 std::string Swapped; // A temporary place holder for a Recursive encoding 6513 // during the expansion of RecordType's members. 6514 }; 6515 std::map<const IdentifierInfo *, struct Entry> Map; 6516 unsigned IncompleteCount; // Number of Incomplete entries in the Map. 6517 unsigned IncompleteUsedCount; // Number of IncompleteUsed entries in the Map. 6518 public: 6519 TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {}; 6520 void addIncomplete(const IdentifierInfo *ID, std::string StubEnc); 6521 bool removeIncomplete(const IdentifierInfo *ID); 6522 void addIfComplete(const IdentifierInfo *ID, StringRef Str, 6523 bool IsRecursive); 6524 StringRef lookupStr(const IdentifierInfo *ID); 6525 }; 6526 6527 /// TypeString encodings for enum & union fields must be order. 6528 /// FieldEncoding is a helper for this ordering process. 6529 class FieldEncoding { 6530 bool HasName; 6531 std::string Enc; 6532 public: 6533 FieldEncoding(bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {}; 6534 StringRef str() {return Enc.c_str();}; 6535 bool operator<(const FieldEncoding &rhs) const { 6536 if (HasName != rhs.HasName) return HasName; 6537 return Enc < rhs.Enc; 6538 } 6539 }; 6540 6541 class XCoreABIInfo : public DefaultABIInfo { 6542 public: 6543 XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 6544 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 6545 CodeGenFunction &CGF) const override; 6546 }; 6547 6548 class XCoreTargetCodeGenInfo : public TargetCodeGenInfo { 6549 mutable TypeStringCache TSC; 6550 public: 6551 XCoreTargetCodeGenInfo(CodeGenTypes &CGT) 6552 :TargetCodeGenInfo(new XCoreABIInfo(CGT)) {} 6553 void emitTargetMD(const Decl *D, llvm::GlobalValue *GV, 6554 CodeGen::CodeGenModule &M) const override; 6555 }; 6556 6557 } // End anonymous namespace. 6558 6559 llvm::Value *XCoreABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 6560 CodeGenFunction &CGF) const { 6561 CGBuilderTy &Builder = CGF.Builder; 6562 6563 // Get the VAList. 6564 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, 6565 CGF.Int8PtrPtrTy); 6566 llvm::Value *AP = Builder.CreateLoad(VAListAddrAsBPP); 6567 6568 // Handle the argument. 6569 ABIArgInfo AI = classifyArgumentType(Ty); 6570 llvm::Type *ArgTy = CGT.ConvertType(Ty); 6571 if (AI.canHaveCoerceToType() && !AI.getCoerceToType()) 6572 AI.setCoerceToType(ArgTy); 6573 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy); 6574 llvm::Value *Val; 6575 uint64_t ArgSize = 0; 6576 switch (AI.getKind()) { 6577 case ABIArgInfo::Expand: 6578 case ABIArgInfo::InAlloca: 6579 llvm_unreachable("Unsupported ABI kind for va_arg"); 6580 case ABIArgInfo::Ignore: 6581 Val = llvm::UndefValue::get(ArgPtrTy); 6582 ArgSize = 0; 6583 break; 6584 case ABIArgInfo::Extend: 6585 case ABIArgInfo::Direct: 6586 Val = Builder.CreatePointerCast(AP, ArgPtrTy); 6587 ArgSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType()); 6588 if (ArgSize < 4) 6589 ArgSize = 4; 6590 break; 6591 case ABIArgInfo::Indirect: 6592 llvm::Value *ArgAddr; 6593 ArgAddr = Builder.CreateBitCast(AP, llvm::PointerType::getUnqual(ArgPtrTy)); 6594 ArgAddr = Builder.CreateLoad(ArgAddr); 6595 Val = Builder.CreatePointerCast(ArgAddr, ArgPtrTy); 6596 ArgSize = 4; 6597 break; 6598 } 6599 6600 // Increment the VAList. 6601 if (ArgSize) { 6602 llvm::Value *APN = Builder.CreateConstGEP1_32(AP, ArgSize); 6603 Builder.CreateStore(APN, VAListAddrAsBPP); 6604 } 6605 return Val; 6606 } 6607 6608 /// During the expansion of a RecordType, an incomplete TypeString is placed 6609 /// into the cache as a means to identify and break recursion. 6610 /// If there is a Recursive encoding in the cache, it is swapped out and will 6611 /// be reinserted by removeIncomplete(). 6612 /// All other types of encoding should have been used rather than arriving here. 6613 void TypeStringCache::addIncomplete(const IdentifierInfo *ID, 6614 std::string StubEnc) { 6615 if (!ID) 6616 return; 6617 Entry &E = Map[ID]; 6618 assert( (E.Str.empty() || E.State == Recursive) && 6619 "Incorrectly use of addIncomplete"); 6620 assert(!StubEnc.empty() && "Passing an empty string to addIncomplete()"); 6621 E.Swapped.swap(E.Str); // swap out the Recursive 6622 E.Str.swap(StubEnc); 6623 E.State = Incomplete; 6624 ++IncompleteCount; 6625 } 6626 6627 /// Once the RecordType has been expanded, the temporary incomplete TypeString 6628 /// must be removed from the cache. 6629 /// If a Recursive was swapped out by addIncomplete(), it will be replaced. 6630 /// Returns true if the RecordType was defined recursively. 6631 bool TypeStringCache::removeIncomplete(const IdentifierInfo *ID) { 6632 if (!ID) 6633 return false; 6634 auto I = Map.find(ID); 6635 assert(I != Map.end() && "Entry not present"); 6636 Entry &E = I->second; 6637 assert( (E.State == Incomplete || 6638 E.State == IncompleteUsed) && 6639 "Entry must be an incomplete type"); 6640 bool IsRecursive = false; 6641 if (E.State == IncompleteUsed) { 6642 // We made use of our Incomplete encoding, thus we are recursive. 6643 IsRecursive = true; 6644 --IncompleteUsedCount; 6645 } 6646 if (E.Swapped.empty()) 6647 Map.erase(I); 6648 else { 6649 // Swap the Recursive back. 6650 E.Swapped.swap(E.Str); 6651 E.Swapped.clear(); 6652 E.State = Recursive; 6653 } 6654 --IncompleteCount; 6655 return IsRecursive; 6656 } 6657 6658 /// Add the encoded TypeString to the cache only if it is NonRecursive or 6659 /// Recursive (viz: all sub-members were expanded as fully as possible). 6660 void TypeStringCache::addIfComplete(const IdentifierInfo *ID, StringRef Str, 6661 bool IsRecursive) { 6662 if (!ID || IncompleteUsedCount) 6663 return; // No key or it is is an incomplete sub-type so don't add. 6664 Entry &E = Map[ID]; 6665 if (IsRecursive && !E.Str.empty()) { 6666 assert(E.State==Recursive && E.Str.size() == Str.size() && 6667 "This is not the same Recursive entry"); 6668 // The parent container was not recursive after all, so we could have used 6669 // this Recursive sub-member entry after all, but we assumed the worse when 6670 // we started viz: IncompleteCount!=0. 6671 return; 6672 } 6673 assert(E.Str.empty() && "Entry already present"); 6674 E.Str = Str.str(); 6675 E.State = IsRecursive? Recursive : NonRecursive; 6676 } 6677 6678 /// Return a cached TypeString encoding for the ID. If there isn't one, or we 6679 /// are recursively expanding a type (IncompleteCount != 0) and the cached 6680 /// encoding is Recursive, return an empty StringRef. 6681 StringRef TypeStringCache::lookupStr(const IdentifierInfo *ID) { 6682 if (!ID) 6683 return StringRef(); // We have no key. 6684 auto I = Map.find(ID); 6685 if (I == Map.end()) 6686 return StringRef(); // We have no encoding. 6687 Entry &E = I->second; 6688 if (E.State == Recursive && IncompleteCount) 6689 return StringRef(); // We don't use Recursive encodings for member types. 6690 6691 if (E.State == Incomplete) { 6692 // The incomplete type is being used to break out of recursion. 6693 E.State = IncompleteUsed; 6694 ++IncompleteUsedCount; 6695 } 6696 return E.Str.c_str(); 6697 } 6698 6699 /// The XCore ABI includes a type information section that communicates symbol 6700 /// type information to the linker. The linker uses this information to verify 6701 /// safety/correctness of things such as array bound and pointers et al. 6702 /// The ABI only requires C (and XC) language modules to emit TypeStrings. 6703 /// This type information (TypeString) is emitted into meta data for all global 6704 /// symbols: definitions, declarations, functions & variables. 6705 /// 6706 /// The TypeString carries type, qualifier, name, size & value details. 6707 /// Please see 'Tools Development Guide' section 2.16.2 for format details: 6708 /// <https://www.xmos.com/download/public/Tools-Development-Guide%28X9114A%29.pdf> 6709 /// The output is tested by test/CodeGen/xcore-stringtype.c. 6710 /// 6711 static bool getTypeString(SmallStringEnc &Enc, const Decl *D, 6712 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC); 6713 6714 /// XCore uses emitTargetMD to emit TypeString metadata for global symbols. 6715 void XCoreTargetCodeGenInfo::emitTargetMD(const Decl *D, llvm::GlobalValue *GV, 6716 CodeGen::CodeGenModule &CGM) const { 6717 SmallStringEnc Enc; 6718 if (getTypeString(Enc, D, CGM, TSC)) { 6719 llvm::LLVMContext &Ctx = CGM.getModule().getContext(); 6720 llvm::SmallVector<llvm::Metadata *, 2> MDVals; 6721 MDVals.push_back(llvm::ConstantAsMetadata::get(GV)); 6722 MDVals.push_back(llvm::MDString::get(Ctx, Enc.str())); 6723 llvm::NamedMDNode *MD = 6724 CGM.getModule().getOrInsertNamedMetadata("xcore.typestrings"); 6725 MD->addOperand(llvm::MDNode::get(Ctx, MDVals)); 6726 } 6727 } 6728 6729 static bool appendType(SmallStringEnc &Enc, QualType QType, 6730 const CodeGen::CodeGenModule &CGM, 6731 TypeStringCache &TSC); 6732 6733 /// Helper function for appendRecordType(). 6734 /// Builds a SmallVector containing the encoded field types in declaration order. 6735 static bool extractFieldType(SmallVectorImpl<FieldEncoding> &FE, 6736 const RecordDecl *RD, 6737 const CodeGen::CodeGenModule &CGM, 6738 TypeStringCache &TSC) { 6739 for (const auto *Field : RD->fields()) { 6740 SmallStringEnc Enc; 6741 Enc += "m("; 6742 Enc += Field->getName(); 6743 Enc += "){"; 6744 if (Field->isBitField()) { 6745 Enc += "b("; 6746 llvm::raw_svector_ostream OS(Enc); 6747 OS.resync(); 6748 OS << Field->getBitWidthValue(CGM.getContext()); 6749 OS.flush(); 6750 Enc += ':'; 6751 } 6752 if (!appendType(Enc, Field->getType(), CGM, TSC)) 6753 return false; 6754 if (Field->isBitField()) 6755 Enc += ')'; 6756 Enc += '}'; 6757 FE.push_back(FieldEncoding(!Field->getName().empty(), Enc)); 6758 } 6759 return true; 6760 } 6761 6762 /// Appends structure and union types to Enc and adds encoding to cache. 6763 /// Recursively calls appendType (via extractFieldType) for each field. 6764 /// Union types have their fields ordered according to the ABI. 6765 static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT, 6766 const CodeGen::CodeGenModule &CGM, 6767 TypeStringCache &TSC, const IdentifierInfo *ID) { 6768 // Append the cached TypeString if we have one. 6769 StringRef TypeString = TSC.lookupStr(ID); 6770 if (!TypeString.empty()) { 6771 Enc += TypeString; 6772 return true; 6773 } 6774 6775 // Start to emit an incomplete TypeString. 6776 size_t Start = Enc.size(); 6777 Enc += (RT->isUnionType()? 'u' : 's'); 6778 Enc += '('; 6779 if (ID) 6780 Enc += ID->getName(); 6781 Enc += "){"; 6782 6783 // We collect all encoded fields and order as necessary. 6784 bool IsRecursive = false; 6785 const RecordDecl *RD = RT->getDecl()->getDefinition(); 6786 if (RD && !RD->field_empty()) { 6787 // An incomplete TypeString stub is placed in the cache for this RecordType 6788 // so that recursive calls to this RecordType will use it whilst building a 6789 // complete TypeString for this RecordType. 6790 SmallVector<FieldEncoding, 16> FE; 6791 std::string StubEnc(Enc.substr(Start).str()); 6792 StubEnc += '}'; // StubEnc now holds a valid incomplete TypeString. 6793 TSC.addIncomplete(ID, std::move(StubEnc)); 6794 if (!extractFieldType(FE, RD, CGM, TSC)) { 6795 (void) TSC.removeIncomplete(ID); 6796 return false; 6797 } 6798 IsRecursive = TSC.removeIncomplete(ID); 6799 // The ABI requires unions to be sorted but not structures. 6800 // See FieldEncoding::operator< for sort algorithm. 6801 if (RT->isUnionType()) 6802 std::sort(FE.begin(), FE.end()); 6803 // We can now complete the TypeString. 6804 unsigned E = FE.size(); 6805 for (unsigned I = 0; I != E; ++I) { 6806 if (I) 6807 Enc += ','; 6808 Enc += FE[I].str(); 6809 } 6810 } 6811 Enc += '}'; 6812 TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive); 6813 return true; 6814 } 6815 6816 /// Appends enum types to Enc and adds the encoding to the cache. 6817 static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET, 6818 TypeStringCache &TSC, 6819 const IdentifierInfo *ID) { 6820 // Append the cached TypeString if we have one. 6821 StringRef TypeString = TSC.lookupStr(ID); 6822 if (!TypeString.empty()) { 6823 Enc += TypeString; 6824 return true; 6825 } 6826 6827 size_t Start = Enc.size(); 6828 Enc += "e("; 6829 if (ID) 6830 Enc += ID->getName(); 6831 Enc += "){"; 6832 6833 // We collect all encoded enumerations and order them alphanumerically. 6834 if (const EnumDecl *ED = ET->getDecl()->getDefinition()) { 6835 SmallVector<FieldEncoding, 16> FE; 6836 for (auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E; 6837 ++I) { 6838 SmallStringEnc EnumEnc; 6839 EnumEnc += "m("; 6840 EnumEnc += I->getName(); 6841 EnumEnc += "){"; 6842 I->getInitVal().toString(EnumEnc); 6843 EnumEnc += '}'; 6844 FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc)); 6845 } 6846 std::sort(FE.begin(), FE.end()); 6847 unsigned E = FE.size(); 6848 for (unsigned I = 0; I != E; ++I) { 6849 if (I) 6850 Enc += ','; 6851 Enc += FE[I].str(); 6852 } 6853 } 6854 Enc += '}'; 6855 TSC.addIfComplete(ID, Enc.substr(Start), false); 6856 return true; 6857 } 6858 6859 /// Appends type's qualifier to Enc. 6860 /// This is done prior to appending the type's encoding. 6861 static void appendQualifier(SmallStringEnc &Enc, QualType QT) { 6862 // Qualifiers are emitted in alphabetical order. 6863 static const char *Table[] = {"","c:","r:","cr:","v:","cv:","rv:","crv:"}; 6864 int Lookup = 0; 6865 if (QT.isConstQualified()) 6866 Lookup += 1<<0; 6867 if (QT.isRestrictQualified()) 6868 Lookup += 1<<1; 6869 if (QT.isVolatileQualified()) 6870 Lookup += 1<<2; 6871 Enc += Table[Lookup]; 6872 } 6873 6874 /// Appends built-in types to Enc. 6875 static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT) { 6876 const char *EncType; 6877 switch (BT->getKind()) { 6878 case BuiltinType::Void: 6879 EncType = "0"; 6880 break; 6881 case BuiltinType::Bool: 6882 EncType = "b"; 6883 break; 6884 case BuiltinType::Char_U: 6885 EncType = "uc"; 6886 break; 6887 case BuiltinType::UChar: 6888 EncType = "uc"; 6889 break; 6890 case BuiltinType::SChar: 6891 EncType = "sc"; 6892 break; 6893 case BuiltinType::UShort: 6894 EncType = "us"; 6895 break; 6896 case BuiltinType::Short: 6897 EncType = "ss"; 6898 break; 6899 case BuiltinType::UInt: 6900 EncType = "ui"; 6901 break; 6902 case BuiltinType::Int: 6903 EncType = "si"; 6904 break; 6905 case BuiltinType::ULong: 6906 EncType = "ul"; 6907 break; 6908 case BuiltinType::Long: 6909 EncType = "sl"; 6910 break; 6911 case BuiltinType::ULongLong: 6912 EncType = "ull"; 6913 break; 6914 case BuiltinType::LongLong: 6915 EncType = "sll"; 6916 break; 6917 case BuiltinType::Float: 6918 EncType = "ft"; 6919 break; 6920 case BuiltinType::Double: 6921 EncType = "d"; 6922 break; 6923 case BuiltinType::LongDouble: 6924 EncType = "ld"; 6925 break; 6926 default: 6927 return false; 6928 } 6929 Enc += EncType; 6930 return true; 6931 } 6932 6933 /// Appends a pointer encoding to Enc before calling appendType for the pointee. 6934 static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT, 6935 const CodeGen::CodeGenModule &CGM, 6936 TypeStringCache &TSC) { 6937 Enc += "p("; 6938 if (!appendType(Enc, PT->getPointeeType(), CGM, TSC)) 6939 return false; 6940 Enc += ')'; 6941 return true; 6942 } 6943 6944 /// Appends array encoding to Enc before calling appendType for the element. 6945 static bool appendArrayType(SmallStringEnc &Enc, QualType QT, 6946 const ArrayType *AT, 6947 const CodeGen::CodeGenModule &CGM, 6948 TypeStringCache &TSC, StringRef NoSizeEnc) { 6949 if (AT->getSizeModifier() != ArrayType::Normal) 6950 return false; 6951 Enc += "a("; 6952 if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT)) 6953 CAT->getSize().toStringUnsigned(Enc); 6954 else 6955 Enc += NoSizeEnc; // Global arrays use "*", otherwise it is "". 6956 Enc += ':'; 6957 // The Qualifiers should be attached to the type rather than the array. 6958 appendQualifier(Enc, QT); 6959 if (!appendType(Enc, AT->getElementType(), CGM, TSC)) 6960 return false; 6961 Enc += ')'; 6962 return true; 6963 } 6964 6965 /// Appends a function encoding to Enc, calling appendType for the return type 6966 /// and the arguments. 6967 static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT, 6968 const CodeGen::CodeGenModule &CGM, 6969 TypeStringCache &TSC) { 6970 Enc += "f{"; 6971 if (!appendType(Enc, FT->getReturnType(), CGM, TSC)) 6972 return false; 6973 Enc += "}("; 6974 if (const FunctionProtoType *FPT = FT->getAs<FunctionProtoType>()) { 6975 // N.B. we are only interested in the adjusted param types. 6976 auto I = FPT->param_type_begin(); 6977 auto E = FPT->param_type_end(); 6978 if (I != E) { 6979 do { 6980 if (!appendType(Enc, *I, CGM, TSC)) 6981 return false; 6982 ++I; 6983 if (I != E) 6984 Enc += ','; 6985 } while (I != E); 6986 if (FPT->isVariadic()) 6987 Enc += ",va"; 6988 } else { 6989 if (FPT->isVariadic()) 6990 Enc += "va"; 6991 else 6992 Enc += '0'; 6993 } 6994 } 6995 Enc += ')'; 6996 return true; 6997 } 6998 6999 /// Handles the type's qualifier before dispatching a call to handle specific 7000 /// type encodings. 7001 static bool appendType(SmallStringEnc &Enc, QualType QType, 7002 const CodeGen::CodeGenModule &CGM, 7003 TypeStringCache &TSC) { 7004 7005 QualType QT = QType.getCanonicalType(); 7006 7007 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) 7008 // The Qualifiers should be attached to the type rather than the array. 7009 // Thus we don't call appendQualifier() here. 7010 return appendArrayType(Enc, QT, AT, CGM, TSC, ""); 7011 7012 appendQualifier(Enc, QT); 7013 7014 if (const BuiltinType *BT = QT->getAs<BuiltinType>()) 7015 return appendBuiltinType(Enc, BT); 7016 7017 if (const PointerType *PT = QT->getAs<PointerType>()) 7018 return appendPointerType(Enc, PT, CGM, TSC); 7019 7020 if (const EnumType *ET = QT->getAs<EnumType>()) 7021 return appendEnumType(Enc, ET, TSC, QT.getBaseTypeIdentifier()); 7022 7023 if (const RecordType *RT = QT->getAsStructureType()) 7024 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier()); 7025 7026 if (const RecordType *RT = QT->getAsUnionType()) 7027 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier()); 7028 7029 if (const FunctionType *FT = QT->getAs<FunctionType>()) 7030 return appendFunctionType(Enc, FT, CGM, TSC); 7031 7032 return false; 7033 } 7034 7035 static bool getTypeString(SmallStringEnc &Enc, const Decl *D, 7036 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC) { 7037 if (!D) 7038 return false; 7039 7040 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 7041 if (FD->getLanguageLinkage() != CLanguageLinkage) 7042 return false; 7043 return appendType(Enc, FD->getType(), CGM, TSC); 7044 } 7045 7046 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { 7047 if (VD->getLanguageLinkage() != CLanguageLinkage) 7048 return false; 7049 QualType QT = VD->getType().getCanonicalType(); 7050 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) { 7051 // Global ArrayTypes are given a size of '*' if the size is unknown. 7052 // The Qualifiers should be attached to the type rather than the array. 7053 // Thus we don't call appendQualifier() here. 7054 return appendArrayType(Enc, QT, AT, CGM, TSC, "*"); 7055 } 7056 return appendType(Enc, QT, CGM, TSC); 7057 } 7058 return false; 7059 } 7060 7061 7062 //===----------------------------------------------------------------------===// 7063 // Driver code 7064 //===----------------------------------------------------------------------===// 7065 7066 const llvm::Triple &CodeGenModule::getTriple() const { 7067 return getTarget().getTriple(); 7068 } 7069 7070 bool CodeGenModule::supportsCOMDAT() const { 7071 return !getTriple().isOSBinFormatMachO(); 7072 } 7073 7074 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() { 7075 if (TheTargetCodeGenInfo) 7076 return *TheTargetCodeGenInfo; 7077 7078 const llvm::Triple &Triple = getTarget().getTriple(); 7079 switch (Triple.getArch()) { 7080 default: 7081 return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types)); 7082 7083 case llvm::Triple::le32: 7084 return *(TheTargetCodeGenInfo = new PNaClTargetCodeGenInfo(Types)); 7085 case llvm::Triple::mips: 7086 case llvm::Triple::mipsel: 7087 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, true)); 7088 7089 case llvm::Triple::mips64: 7090 case llvm::Triple::mips64el: 7091 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, false)); 7092 7093 case llvm::Triple::aarch64: 7094 case llvm::Triple::aarch64_be: { 7095 AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS; 7096 if (getTarget().getABI() == "darwinpcs") 7097 Kind = AArch64ABIInfo::DarwinPCS; 7098 7099 return *(TheTargetCodeGenInfo = new AArch64TargetCodeGenInfo(Types, Kind)); 7100 } 7101 7102 case llvm::Triple::arm: 7103 case llvm::Triple::armeb: 7104 case llvm::Triple::thumb: 7105 case llvm::Triple::thumbeb: 7106 { 7107 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS; 7108 if (getTarget().getABI() == "apcs-gnu") 7109 Kind = ARMABIInfo::APCS; 7110 else if (CodeGenOpts.FloatABI == "hard" || 7111 (CodeGenOpts.FloatABI != "soft" && 7112 Triple.getEnvironment() == llvm::Triple::GNUEABIHF)) 7113 Kind = ARMABIInfo::AAPCS_VFP; 7114 7115 switch (Triple.getOS()) { 7116 case llvm::Triple::NaCl: 7117 return *(TheTargetCodeGenInfo = 7118 new NaClARMTargetCodeGenInfo(Types, Kind)); 7119 default: 7120 return *(TheTargetCodeGenInfo = 7121 new ARMTargetCodeGenInfo(Types, Kind)); 7122 } 7123 } 7124 7125 case llvm::Triple::ppc: 7126 return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types)); 7127 case llvm::Triple::ppc64: 7128 if (Triple.isOSBinFormatELF()) { 7129 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1; 7130 if (getTarget().getABI() == "elfv2") 7131 Kind = PPC64_SVR4_ABIInfo::ELFv2; 7132 7133 return *(TheTargetCodeGenInfo = 7134 new PPC64_SVR4_TargetCodeGenInfo(Types, Kind)); 7135 } else 7136 return *(TheTargetCodeGenInfo = new PPC64TargetCodeGenInfo(Types)); 7137 case llvm::Triple::ppc64le: { 7138 assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!"); 7139 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2; 7140 if (getTarget().getABI() == "elfv1") 7141 Kind = PPC64_SVR4_ABIInfo::ELFv1; 7142 7143 return *(TheTargetCodeGenInfo = 7144 new PPC64_SVR4_TargetCodeGenInfo(Types, Kind)); 7145 } 7146 7147 case llvm::Triple::nvptx: 7148 case llvm::Triple::nvptx64: 7149 return *(TheTargetCodeGenInfo = new NVPTXTargetCodeGenInfo(Types)); 7150 7151 case llvm::Triple::msp430: 7152 return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types)); 7153 7154 case llvm::Triple::systemz: 7155 return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo(Types)); 7156 7157 case llvm::Triple::tce: 7158 return *(TheTargetCodeGenInfo = new TCETargetCodeGenInfo(Types)); 7159 7160 case llvm::Triple::x86: { 7161 bool IsDarwinVectorABI = Triple.isOSDarwin(); 7162 bool IsSmallStructInRegABI = 7163 X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts); 7164 bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing(); 7165 7166 if (Triple.getOS() == llvm::Triple::Win32) { 7167 return *(TheTargetCodeGenInfo = 7168 new WinX86_32TargetCodeGenInfo(Types, 7169 IsDarwinVectorABI, IsSmallStructInRegABI, 7170 IsWin32FloatStructABI, 7171 CodeGenOpts.NumRegisterParameters)); 7172 } else { 7173 return *(TheTargetCodeGenInfo = 7174 new X86_32TargetCodeGenInfo(Types, 7175 IsDarwinVectorABI, IsSmallStructInRegABI, 7176 IsWin32FloatStructABI, 7177 CodeGenOpts.NumRegisterParameters)); 7178 } 7179 } 7180 7181 case llvm::Triple::x86_64: { 7182 bool HasAVX = getTarget().getABI() == "avx"; 7183 7184 switch (Triple.getOS()) { 7185 case llvm::Triple::Win32: 7186 return *(TheTargetCodeGenInfo = 7187 new WinX86_64TargetCodeGenInfo(Types, HasAVX)); 7188 case llvm::Triple::NaCl: 7189 return *(TheTargetCodeGenInfo = 7190 new NaClX86_64TargetCodeGenInfo(Types, HasAVX)); 7191 default: 7192 return *(TheTargetCodeGenInfo = 7193 new X86_64TargetCodeGenInfo(Types, HasAVX)); 7194 } 7195 } 7196 case llvm::Triple::hexagon: 7197 return *(TheTargetCodeGenInfo = new HexagonTargetCodeGenInfo(Types)); 7198 case llvm::Triple::r600: 7199 return *(TheTargetCodeGenInfo = new AMDGPUTargetCodeGenInfo(Types)); 7200 case llvm::Triple::amdgcn: 7201 return *(TheTargetCodeGenInfo = new AMDGPUTargetCodeGenInfo(Types)); 7202 case llvm::Triple::sparcv9: 7203 return *(TheTargetCodeGenInfo = new SparcV9TargetCodeGenInfo(Types)); 7204 case llvm::Triple::xcore: 7205 return *(TheTargetCodeGenInfo = new XCoreTargetCodeGenInfo(Types)); 7206 } 7207 } 7208