1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // These classes wrap the information about a call or function 11 // definition used to handle ABI compliancy. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "TargetInfo.h" 16 #include "ABIInfo.h" 17 #include "CGCXXABI.h" 18 #include "CGValue.h" 19 #include "CodeGenFunction.h" 20 #include "clang/AST/RecordLayout.h" 21 #include "clang/CodeGen/CGFunctionInfo.h" 22 #include "clang/Frontend/CodeGenOptions.h" 23 #include "llvm/ADT/StringExtras.h" 24 #include "llvm/ADT/Triple.h" 25 #include "llvm/IR/DataLayout.h" 26 #include "llvm/IR/Type.h" 27 #include "llvm/Support/raw_ostream.h" 28 #include <algorithm> // std::sort 29 30 using namespace clang; 31 using namespace CodeGen; 32 33 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, 34 llvm::Value *Array, 35 llvm::Value *Value, 36 unsigned FirstIndex, 37 unsigned LastIndex) { 38 // Alternatively, we could emit this as a loop in the source. 39 for (unsigned I = FirstIndex; I <= LastIndex; ++I) { 40 llvm::Value *Cell = 41 Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I); 42 Builder.CreateAlignedStore(Value, Cell, CharUnits::One()); 43 } 44 } 45 46 static bool isAggregateTypeForABI(QualType T) { 47 return !CodeGenFunction::hasScalarEvaluationKind(T) || 48 T->isMemberFunctionPointerType(); 49 } 50 51 ABIArgInfo 52 ABIInfo::getNaturalAlignIndirect(QualType Ty, bool ByRef, bool Realign, 53 llvm::Type *Padding) const { 54 return ABIArgInfo::getIndirect(getContext().getTypeAlignInChars(Ty), 55 ByRef, Realign, Padding); 56 } 57 58 ABIArgInfo 59 ABIInfo::getNaturalAlignIndirectInReg(QualType Ty, bool Realign) const { 60 return ABIArgInfo::getIndirectInReg(getContext().getTypeAlignInChars(Ty), 61 /*ByRef*/ false, Realign); 62 } 63 64 Address ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, 65 QualType Ty) const { 66 return Address::invalid(); 67 } 68 69 ABIInfo::~ABIInfo() {} 70 71 static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, 72 CGCXXABI &CXXABI) { 73 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 74 if (!RD) 75 return CGCXXABI::RAA_Default; 76 return CXXABI.getRecordArgABI(RD); 77 } 78 79 static CGCXXABI::RecordArgABI getRecordArgABI(QualType T, 80 CGCXXABI &CXXABI) { 81 const RecordType *RT = T->getAs<RecordType>(); 82 if (!RT) 83 return CGCXXABI::RAA_Default; 84 return getRecordArgABI(RT, CXXABI); 85 } 86 87 /// Pass transparent unions as if they were the type of the first element. Sema 88 /// should ensure that all elements of the union have the same "machine type". 89 static QualType useFirstFieldIfTransparentUnion(QualType Ty) { 90 if (const RecordType *UT = Ty->getAsUnionType()) { 91 const RecordDecl *UD = UT->getDecl(); 92 if (UD->hasAttr<TransparentUnionAttr>()) { 93 assert(!UD->field_empty() && "sema created an empty transparent union"); 94 return UD->field_begin()->getType(); 95 } 96 } 97 return Ty; 98 } 99 100 CGCXXABI &ABIInfo::getCXXABI() const { 101 return CGT.getCXXABI(); 102 } 103 104 ASTContext &ABIInfo::getContext() const { 105 return CGT.getContext(); 106 } 107 108 llvm::LLVMContext &ABIInfo::getVMContext() const { 109 return CGT.getLLVMContext(); 110 } 111 112 const llvm::DataLayout &ABIInfo::getDataLayout() const { 113 return CGT.getDataLayout(); 114 } 115 116 const TargetInfo &ABIInfo::getTarget() const { 117 return CGT.getTarget(); 118 } 119 120 bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 121 return false; 122 } 123 124 bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, 125 uint64_t Members) const { 126 return false; 127 } 128 129 bool ABIInfo::shouldSignExtUnsignedType(QualType Ty) const { 130 return false; 131 } 132 133 void ABIArgInfo::dump() const { 134 raw_ostream &OS = llvm::errs(); 135 OS << "(ABIArgInfo Kind="; 136 switch (TheKind) { 137 case Direct: 138 OS << "Direct Type="; 139 if (llvm::Type *Ty = getCoerceToType()) 140 Ty->print(OS); 141 else 142 OS << "null"; 143 break; 144 case Extend: 145 OS << "Extend"; 146 break; 147 case Ignore: 148 OS << "Ignore"; 149 break; 150 case InAlloca: 151 OS << "InAlloca Offset=" << getInAllocaFieldIndex(); 152 break; 153 case Indirect: 154 OS << "Indirect Align=" << getIndirectAlign().getQuantity() 155 << " ByVal=" << getIndirectByVal() 156 << " Realign=" << getIndirectRealign(); 157 break; 158 case Expand: 159 OS << "Expand"; 160 break; 161 } 162 OS << ")\n"; 163 } 164 165 /// Emit va_arg for a platform using the common void* representation, 166 /// where arguments are simply emitted in an array of slots on the stack. 167 /// 168 /// This version implements the core direct-value passing rules. 169 /// 170 /// \param SlotSize - The size and alignment of a stack slot. 171 /// Each argument will be allocated to a multiple of this number of 172 /// slots, and all the slots will be aligned to this value. 173 /// \param AllowHigherAlign - The slot alignment is not a cap; 174 /// an argument type with an alignment greater than the slot size 175 /// will be emitted on a higher-alignment address, potentially 176 /// leaving one or more empty slots behind as padding. If this 177 /// is false, the returned address might be less-aligned than 178 /// DirectAlign. 179 static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF, 180 Address VAListAddr, 181 llvm::Type *DirectTy, 182 CharUnits DirectSize, 183 CharUnits DirectAlign, 184 CharUnits SlotSize, 185 bool AllowHigherAlign) { 186 // Cast the element type to i8* if necessary. Some platforms define 187 // va_list as a struct containing an i8* instead of just an i8*. 188 if (VAListAddr.getElementType() != CGF.Int8PtrTy) 189 VAListAddr = CGF.Builder.CreateElementBitCast(VAListAddr, CGF.Int8PtrTy); 190 191 llvm::Value *Ptr = CGF.Builder.CreateLoad(VAListAddr, "argp.cur"); 192 193 // If the CC aligns values higher than the slot size, do so if needed. 194 Address Addr = Address::invalid(); 195 if (AllowHigherAlign && DirectAlign > SlotSize) { 196 llvm::Value *PtrAsInt = Ptr; 197 PtrAsInt = CGF.Builder.CreatePtrToInt(PtrAsInt, CGF.IntPtrTy); 198 PtrAsInt = CGF.Builder.CreateAdd(PtrAsInt, 199 llvm::ConstantInt::get(CGF.IntPtrTy, DirectAlign.getQuantity() - 1)); 200 PtrAsInt = CGF.Builder.CreateAnd(PtrAsInt, 201 llvm::ConstantInt::get(CGF.IntPtrTy, -DirectAlign.getQuantity())); 202 Addr = Address(CGF.Builder.CreateIntToPtr(PtrAsInt, Ptr->getType(), 203 "argp.cur.aligned"), 204 DirectAlign); 205 } else { 206 Addr = Address(Ptr, SlotSize); 207 } 208 209 // Advance the pointer past the argument, then store that back. 210 CharUnits FullDirectSize = DirectSize.RoundUpToAlignment(SlotSize); 211 llvm::Value *NextPtr = 212 CGF.Builder.CreateConstInBoundsByteGEP(Addr.getPointer(), FullDirectSize, 213 "argp.next"); 214 CGF.Builder.CreateStore(NextPtr, VAListAddr); 215 216 // If the argument is smaller than a slot, and this is a big-endian 217 // target, the argument will be right-adjusted in its slot. 218 if (DirectSize < SlotSize && CGF.CGM.getDataLayout().isBigEndian()) { 219 Addr = CGF.Builder.CreateConstInBoundsByteGEP(Addr, SlotSize - DirectSize); 220 } 221 222 Addr = CGF.Builder.CreateElementBitCast(Addr, DirectTy); 223 return Addr; 224 } 225 226 /// Emit va_arg for a platform using the common void* representation, 227 /// where arguments are simply emitted in an array of slots on the stack. 228 /// 229 /// \param IsIndirect - Values of this type are passed indirectly. 230 /// \param ValueInfo - The size and alignment of this type, generally 231 /// computed with getContext().getTypeInfoInChars(ValueTy). 232 /// \param SlotSizeAndAlign - The size and alignment of a stack slot. 233 /// Each argument will be allocated to a multiple of this number of 234 /// slots, and all the slots will be aligned to this value. 235 /// \param AllowHigherAlign - The slot alignment is not a cap; 236 /// an argument type with an alignment greater than the slot size 237 /// will be emitted on a higher-alignment address, potentially 238 /// leaving one or more empty slots behind as padding. 239 static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, 240 QualType ValueTy, bool IsIndirect, 241 std::pair<CharUnits, CharUnits> ValueInfo, 242 CharUnits SlotSizeAndAlign, 243 bool AllowHigherAlign) { 244 // The size and alignment of the value that was passed directly. 245 CharUnits DirectSize, DirectAlign; 246 if (IsIndirect) { 247 DirectSize = CGF.getPointerSize(); 248 DirectAlign = CGF.getPointerAlign(); 249 } else { 250 DirectSize = ValueInfo.first; 251 DirectAlign = ValueInfo.second; 252 } 253 254 // Cast the address we've calculated to the right type. 255 llvm::Type *DirectTy = CGF.ConvertTypeForMem(ValueTy); 256 if (IsIndirect) 257 DirectTy = DirectTy->getPointerTo(0); 258 259 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, DirectTy, 260 DirectSize, DirectAlign, 261 SlotSizeAndAlign, 262 AllowHigherAlign); 263 264 if (IsIndirect) { 265 Addr = Address(CGF.Builder.CreateLoad(Addr), ValueInfo.second); 266 } 267 268 return Addr; 269 270 } 271 272 static Address emitMergePHI(CodeGenFunction &CGF, 273 Address Addr1, llvm::BasicBlock *Block1, 274 Address Addr2, llvm::BasicBlock *Block2, 275 const llvm::Twine &Name = "") { 276 assert(Addr1.getType() == Addr2.getType()); 277 llvm::PHINode *PHI = CGF.Builder.CreatePHI(Addr1.getType(), 2, Name); 278 PHI->addIncoming(Addr1.getPointer(), Block1); 279 PHI->addIncoming(Addr2.getPointer(), Block2); 280 CharUnits Align = std::min(Addr1.getAlignment(), Addr2.getAlignment()); 281 return Address(PHI, Align); 282 } 283 284 TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; } 285 286 // If someone can figure out a general rule for this, that would be great. 287 // It's probably just doomed to be platform-dependent, though. 288 unsigned TargetCodeGenInfo::getSizeOfUnwindException() const { 289 // Verified for: 290 // x86-64 FreeBSD, Linux, Darwin 291 // x86-32 FreeBSD, Linux, Darwin 292 // PowerPC Linux, Darwin 293 // ARM Darwin (*not* EABI) 294 // AArch64 Linux 295 return 32; 296 } 297 298 bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args, 299 const FunctionNoProtoType *fnType) const { 300 // The following conventions are known to require this to be false: 301 // x86_stdcall 302 // MIPS 303 // For everything else, we just prefer false unless we opt out. 304 return false; 305 } 306 307 void 308 TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib, 309 llvm::SmallString<24> &Opt) const { 310 // This assumes the user is passing a library name like "rt" instead of a 311 // filename like "librt.a/so", and that they don't care whether it's static or 312 // dynamic. 313 Opt = "-l"; 314 Opt += Lib; 315 } 316 317 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays); 318 319 /// isEmptyField - Return true iff a the field is "empty", that is it 320 /// is an unnamed bit-field or an (array of) empty record(s). 321 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD, 322 bool AllowArrays) { 323 if (FD->isUnnamedBitfield()) 324 return true; 325 326 QualType FT = FD->getType(); 327 328 // Constant arrays of empty records count as empty, strip them off. 329 // Constant arrays of zero length always count as empty. 330 if (AllowArrays) 331 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 332 if (AT->getSize() == 0) 333 return true; 334 FT = AT->getElementType(); 335 } 336 337 const RecordType *RT = FT->getAs<RecordType>(); 338 if (!RT) 339 return false; 340 341 // C++ record fields are never empty, at least in the Itanium ABI. 342 // 343 // FIXME: We should use a predicate for whether this behavior is true in the 344 // current ABI. 345 if (isa<CXXRecordDecl>(RT->getDecl())) 346 return false; 347 348 return isEmptyRecord(Context, FT, AllowArrays); 349 } 350 351 /// isEmptyRecord - Return true iff a structure contains only empty 352 /// fields. Note that a structure with a flexible array member is not 353 /// considered empty. 354 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) { 355 const RecordType *RT = T->getAs<RecordType>(); 356 if (!RT) 357 return 0; 358 const RecordDecl *RD = RT->getDecl(); 359 if (RD->hasFlexibleArrayMember()) 360 return false; 361 362 // If this is a C++ record, check the bases first. 363 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 364 for (const auto &I : CXXRD->bases()) 365 if (!isEmptyRecord(Context, I.getType(), true)) 366 return false; 367 368 for (const auto *I : RD->fields()) 369 if (!isEmptyField(Context, I, AllowArrays)) 370 return false; 371 return true; 372 } 373 374 /// isSingleElementStruct - Determine if a structure is a "single 375 /// element struct", i.e. it has exactly one non-empty field or 376 /// exactly one field which is itself a single element 377 /// struct. Structures with flexible array members are never 378 /// considered single element structs. 379 /// 380 /// \return The field declaration for the single non-empty field, if 381 /// it exists. 382 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) { 383 const RecordType *RT = T->getAs<RecordType>(); 384 if (!RT) 385 return nullptr; 386 387 const RecordDecl *RD = RT->getDecl(); 388 if (RD->hasFlexibleArrayMember()) 389 return nullptr; 390 391 const Type *Found = nullptr; 392 393 // If this is a C++ record, check the bases first. 394 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 395 for (const auto &I : CXXRD->bases()) { 396 // Ignore empty records. 397 if (isEmptyRecord(Context, I.getType(), true)) 398 continue; 399 400 // If we already found an element then this isn't a single-element struct. 401 if (Found) 402 return nullptr; 403 404 // If this is non-empty and not a single element struct, the composite 405 // cannot be a single element struct. 406 Found = isSingleElementStruct(I.getType(), Context); 407 if (!Found) 408 return nullptr; 409 } 410 } 411 412 // Check for single element. 413 for (const auto *FD : RD->fields()) { 414 QualType FT = FD->getType(); 415 416 // Ignore empty fields. 417 if (isEmptyField(Context, FD, true)) 418 continue; 419 420 // If we already found an element then this isn't a single-element 421 // struct. 422 if (Found) 423 return nullptr; 424 425 // Treat single element arrays as the element. 426 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 427 if (AT->getSize().getZExtValue() != 1) 428 break; 429 FT = AT->getElementType(); 430 } 431 432 if (!isAggregateTypeForABI(FT)) { 433 Found = FT.getTypePtr(); 434 } else { 435 Found = isSingleElementStruct(FT, Context); 436 if (!Found) 437 return nullptr; 438 } 439 } 440 441 // We don't consider a struct a single-element struct if it has 442 // padding beyond the element type. 443 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T)) 444 return nullptr; 445 446 return Found; 447 } 448 449 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) { 450 // Treat complex types as the element type. 451 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) 452 Ty = CTy->getElementType(); 453 454 // Check for a type which we know has a simple scalar argument-passing 455 // convention without any padding. (We're specifically looking for 32 456 // and 64-bit integer and integer-equivalents, float, and double.) 457 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() && 458 !Ty->isEnumeralType() && !Ty->isBlockPointerType()) 459 return false; 460 461 uint64_t Size = Context.getTypeSize(Ty); 462 return Size == 32 || Size == 64; 463 } 464 465 /// canExpandIndirectArgument - Test whether an argument type which is to be 466 /// passed indirectly (on the stack) would have the equivalent layout if it was 467 /// expanded into separate arguments. If so, we prefer to do the latter to avoid 468 /// inhibiting optimizations. 469 /// 470 // FIXME: This predicate is missing many cases, currently it just follows 471 // llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We 472 // should probably make this smarter, or better yet make the LLVM backend 473 // capable of handling it. 474 static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) { 475 // We can only expand structure types. 476 const RecordType *RT = Ty->getAs<RecordType>(); 477 if (!RT) 478 return false; 479 480 // We can only expand (C) structures. 481 // 482 // FIXME: This needs to be generalized to handle classes as well. 483 const RecordDecl *RD = RT->getDecl(); 484 if (!RD->isStruct()) 485 return false; 486 487 // We try to expand CLike CXXRecordDecl. 488 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 489 if (!CXXRD->isCLike()) 490 return false; 491 } 492 493 uint64_t Size = 0; 494 495 for (const auto *FD : RD->fields()) { 496 if (!is32Or64BitBasicType(FD->getType(), Context)) 497 return false; 498 499 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know 500 // how to expand them yet, and the predicate for telling if a bitfield still 501 // counts as "basic" is more complicated than what we were doing previously. 502 if (FD->isBitField()) 503 return false; 504 505 Size += Context.getTypeSize(FD->getType()); 506 } 507 508 // Make sure there are not any holes in the struct. 509 if (Size != Context.getTypeSize(Ty)) 510 return false; 511 512 return true; 513 } 514 515 namespace { 516 /// DefaultABIInfo - The default implementation for ABI specific 517 /// details. This implementation provides information which results in 518 /// self-consistent and sensible LLVM IR generation, but does not 519 /// conform to any particular ABI. 520 class DefaultABIInfo : public ABIInfo { 521 public: 522 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 523 524 ABIArgInfo classifyReturnType(QualType RetTy) const; 525 ABIArgInfo classifyArgumentType(QualType RetTy) const; 526 527 void computeInfo(CGFunctionInfo &FI) const override { 528 if (!getCXXABI().classifyReturnType(FI)) 529 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 530 for (auto &I : FI.arguments()) 531 I.info = classifyArgumentType(I.type); 532 } 533 534 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 535 QualType Ty) const override; 536 }; 537 538 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo { 539 public: 540 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 541 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 542 }; 543 544 Address DefaultABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 545 QualType Ty) const { 546 return Address::invalid(); 547 } 548 549 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const { 550 Ty = useFirstFieldIfTransparentUnion(Ty); 551 552 if (isAggregateTypeForABI(Ty)) { 553 // Records with non-trivial destructors/copy-constructors should not be 554 // passed by value. 555 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 556 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 557 558 return getNaturalAlignIndirect(Ty); 559 } 560 561 // Treat an enum type as its underlying type. 562 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 563 Ty = EnumTy->getDecl()->getIntegerType(); 564 565 return (Ty->isPromotableIntegerType() ? 566 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 567 } 568 569 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const { 570 if (RetTy->isVoidType()) 571 return ABIArgInfo::getIgnore(); 572 573 if (isAggregateTypeForABI(RetTy)) 574 return getNaturalAlignIndirect(RetTy); 575 576 // Treat an enum type as its underlying type. 577 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 578 RetTy = EnumTy->getDecl()->getIntegerType(); 579 580 return (RetTy->isPromotableIntegerType() ? 581 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 582 } 583 584 //===----------------------------------------------------------------------===// 585 // WebAssembly ABI Implementation 586 // 587 // This is a very simple ABI that relies a lot on DefaultABIInfo. 588 //===----------------------------------------------------------------------===// 589 590 class WebAssemblyABIInfo final : public DefaultABIInfo { 591 public: 592 explicit WebAssemblyABIInfo(CodeGen::CodeGenTypes &CGT) 593 : DefaultABIInfo(CGT) {} 594 595 private: 596 ABIArgInfo classifyReturnType(QualType RetTy) const; 597 ABIArgInfo classifyArgumentType(QualType Ty) const; 598 599 // DefaultABIInfo's classifyReturnType and classifyArgumentType are 600 // non-virtual, but computeInfo is virtual, so we overload that. 601 void computeInfo(CGFunctionInfo &FI) const override { 602 if (!getCXXABI().classifyReturnType(FI)) 603 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 604 for (auto &Arg : FI.arguments()) 605 Arg.info = classifyArgumentType(Arg.type); 606 } 607 }; 608 609 class WebAssemblyTargetCodeGenInfo final : public TargetCodeGenInfo { 610 public: 611 explicit WebAssemblyTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 612 : TargetCodeGenInfo(new WebAssemblyABIInfo(CGT)) {} 613 }; 614 615 /// \brief Classify argument of given type \p Ty. 616 ABIArgInfo WebAssemblyABIInfo::classifyArgumentType(QualType Ty) const { 617 Ty = useFirstFieldIfTransparentUnion(Ty); 618 619 if (isAggregateTypeForABI(Ty)) { 620 // Records with non-trivial destructors/copy-constructors should not be 621 // passed by value. 622 if (auto RAA = getRecordArgABI(Ty, getCXXABI())) 623 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 624 // Ignore empty structs/unions. 625 if (isEmptyRecord(getContext(), Ty, true)) 626 return ABIArgInfo::getIgnore(); 627 // Lower single-element structs to just pass a regular value. TODO: We 628 // could do reasonable-size multiple-element structs too, using getExpand(), 629 // though watch out for things like bitfields. 630 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext())) 631 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 632 } 633 634 // Otherwise just do the default thing. 635 return DefaultABIInfo::classifyArgumentType(Ty); 636 } 637 638 ABIArgInfo WebAssemblyABIInfo::classifyReturnType(QualType RetTy) const { 639 if (isAggregateTypeForABI(RetTy)) { 640 // Records with non-trivial destructors/copy-constructors should not be 641 // returned by value. 642 if (!getRecordArgABI(RetTy, getCXXABI())) { 643 // Ignore empty structs/unions. 644 if (isEmptyRecord(getContext(), RetTy, true)) 645 return ABIArgInfo::getIgnore(); 646 // Lower single-element structs to just return a regular value. TODO: We 647 // could do reasonable-size multiple-element structs too, using 648 // ABIArgInfo::getDirect(). 649 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) 650 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 651 } 652 } 653 654 // Otherwise just do the default thing. 655 return DefaultABIInfo::classifyReturnType(RetTy); 656 } 657 658 //===----------------------------------------------------------------------===// 659 // le32/PNaCl bitcode ABI Implementation 660 // 661 // This is a simplified version of the x86_32 ABI. Arguments and return values 662 // are always passed on the stack. 663 //===----------------------------------------------------------------------===// 664 665 class PNaClABIInfo : public ABIInfo { 666 public: 667 PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 668 669 ABIArgInfo classifyReturnType(QualType RetTy) const; 670 ABIArgInfo classifyArgumentType(QualType RetTy) const; 671 672 void computeInfo(CGFunctionInfo &FI) const override; 673 Address EmitVAArg(CodeGenFunction &CGF, 674 Address VAListAddr, QualType Ty) const override; 675 }; 676 677 class PNaClTargetCodeGenInfo : public TargetCodeGenInfo { 678 public: 679 PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 680 : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {} 681 }; 682 683 void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const { 684 if (!getCXXABI().classifyReturnType(FI)) 685 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 686 687 for (auto &I : FI.arguments()) 688 I.info = classifyArgumentType(I.type); 689 } 690 691 Address PNaClABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 692 QualType Ty) const { 693 return Address::invalid(); 694 } 695 696 /// \brief Classify argument of given type \p Ty. 697 ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const { 698 if (isAggregateTypeForABI(Ty)) { 699 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 700 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 701 return getNaturalAlignIndirect(Ty); 702 } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) { 703 // Treat an enum type as its underlying type. 704 Ty = EnumTy->getDecl()->getIntegerType(); 705 } else if (Ty->isFloatingType()) { 706 // Floating-point types don't go inreg. 707 return ABIArgInfo::getDirect(); 708 } 709 710 return (Ty->isPromotableIntegerType() ? 711 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 712 } 713 714 ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const { 715 if (RetTy->isVoidType()) 716 return ABIArgInfo::getIgnore(); 717 718 // In the PNaCl ABI we always return records/structures on the stack. 719 if (isAggregateTypeForABI(RetTy)) 720 return getNaturalAlignIndirect(RetTy); 721 722 // Treat an enum type as its underlying type. 723 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 724 RetTy = EnumTy->getDecl()->getIntegerType(); 725 726 return (RetTy->isPromotableIntegerType() ? 727 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 728 } 729 730 /// IsX86_MMXType - Return true if this is an MMX type. 731 bool IsX86_MMXType(llvm::Type *IRType) { 732 // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>. 733 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 && 734 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() && 735 IRType->getScalarSizeInBits() != 64; 736 } 737 738 static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 739 StringRef Constraint, 740 llvm::Type* Ty) { 741 if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy()) { 742 if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) { 743 // Invalid MMX constraint 744 return nullptr; 745 } 746 747 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext()); 748 } 749 750 // No operation needed 751 return Ty; 752 } 753 754 /// Returns true if this type can be passed in SSE registers with the 755 /// X86_VectorCall calling convention. Shared between x86_32 and x86_64. 756 static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) { 757 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 758 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) 759 return true; 760 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 761 // vectorcall can pass XMM, YMM, and ZMM vectors. We don't pass SSE1 MMX 762 // registers specially. 763 unsigned VecSize = Context.getTypeSize(VT); 764 if (VecSize == 128 || VecSize == 256 || VecSize == 512) 765 return true; 766 } 767 return false; 768 } 769 770 /// Returns true if this aggregate is small enough to be passed in SSE registers 771 /// in the X86_VectorCall calling convention. Shared between x86_32 and x86_64. 772 static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) { 773 return NumMembers <= 4; 774 } 775 776 //===----------------------------------------------------------------------===// 777 // X86-32 ABI Implementation 778 //===----------------------------------------------------------------------===// 779 780 /// \brief Similar to llvm::CCState, but for Clang. 781 struct CCState { 782 CCState(unsigned CC) : CC(CC), FreeRegs(0), FreeSSERegs(0) {} 783 784 unsigned CC; 785 unsigned FreeRegs; 786 unsigned FreeSSERegs; 787 }; 788 789 /// X86_32ABIInfo - The X86-32 ABI information. 790 class X86_32ABIInfo : public ABIInfo { 791 enum Class { 792 Integer, 793 Float 794 }; 795 796 static const unsigned MinABIStackAlignInBytes = 4; 797 798 bool IsDarwinVectorABI; 799 bool IsRetSmallStructInRegABI; 800 bool IsWin32StructABI; 801 bool IsSoftFloatABI; 802 unsigned DefaultNumRegisterParameters; 803 804 static bool isRegisterSize(unsigned Size) { 805 return (Size == 8 || Size == 16 || Size == 32 || Size == 64); 806 } 807 808 bool isHomogeneousAggregateBaseType(QualType Ty) const override { 809 // FIXME: Assumes vectorcall is in use. 810 return isX86VectorTypeForVectorCall(getContext(), Ty); 811 } 812 813 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 814 uint64_t NumMembers) const override { 815 // FIXME: Assumes vectorcall is in use. 816 return isX86VectorCallAggregateSmallEnough(NumMembers); 817 } 818 819 bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const; 820 821 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 822 /// such that the argument will be passed in memory. 823 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const; 824 825 ABIArgInfo getIndirectReturnResult(QualType Ty, CCState &State) const; 826 827 /// \brief Return the alignment to use for the given type on the stack. 828 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const; 829 830 Class classify(QualType Ty) const; 831 ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const; 832 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const; 833 bool shouldUseInReg(QualType Ty, CCState &State, bool &NeedsPadding) const; 834 835 /// \brief Rewrite the function info so that all memory arguments use 836 /// inalloca. 837 void rewriteWithInAlloca(CGFunctionInfo &FI) const; 838 839 void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields, 840 CharUnits &StackOffset, ABIArgInfo &Info, 841 QualType Type) const; 842 843 public: 844 845 void computeInfo(CGFunctionInfo &FI) const override; 846 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 847 QualType Ty) const override; 848 849 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI, 850 bool RetSmallStructInRegABI, bool Win32StructABI, 851 unsigned NumRegisterParameters, bool SoftFloatABI) 852 : ABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI), 853 IsRetSmallStructInRegABI(RetSmallStructInRegABI), 854 IsWin32StructABI(Win32StructABI), 855 IsSoftFloatABI(SoftFloatABI), 856 DefaultNumRegisterParameters(NumRegisterParameters) {} 857 }; 858 859 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo { 860 public: 861 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI, 862 bool RetSmallStructInRegABI, bool Win32StructABI, 863 unsigned NumRegisterParameters, bool SoftFloatABI) 864 : TargetCodeGenInfo(new X86_32ABIInfo( 865 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI, 866 NumRegisterParameters, SoftFloatABI)) {} 867 868 static bool isStructReturnInRegABI( 869 const llvm::Triple &Triple, const CodeGenOptions &Opts); 870 871 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 872 CodeGen::CodeGenModule &CGM) const override; 873 874 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { 875 // Darwin uses different dwarf register numbers for EH. 876 if (CGM.getTarget().getTriple().isOSDarwin()) return 5; 877 return 4; 878 } 879 880 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 881 llvm::Value *Address) const override; 882 883 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 884 StringRef Constraint, 885 llvm::Type* Ty) const override { 886 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 887 } 888 889 void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue, 890 std::string &Constraints, 891 std::vector<llvm::Type *> &ResultRegTypes, 892 std::vector<llvm::Type *> &ResultTruncRegTypes, 893 std::vector<LValue> &ResultRegDests, 894 std::string &AsmString, 895 unsigned NumOutputs) const override; 896 897 llvm::Constant * 898 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override { 899 unsigned Sig = (0xeb << 0) | // jmp rel8 900 (0x06 << 8) | // .+0x08 901 ('F' << 16) | 902 ('T' << 24); 903 return llvm::ConstantInt::get(CGM.Int32Ty, Sig); 904 } 905 }; 906 907 } 908 909 /// Rewrite input constraint references after adding some output constraints. 910 /// In the case where there is one output and one input and we add one output, 911 /// we need to replace all operand references greater than or equal to 1: 912 /// mov $0, $1 913 /// mov eax, $1 914 /// The result will be: 915 /// mov $0, $2 916 /// mov eax, $2 917 static void rewriteInputConstraintReferences(unsigned FirstIn, 918 unsigned NumNewOuts, 919 std::string &AsmString) { 920 std::string Buf; 921 llvm::raw_string_ostream OS(Buf); 922 size_t Pos = 0; 923 while (Pos < AsmString.size()) { 924 size_t DollarStart = AsmString.find('$', Pos); 925 if (DollarStart == std::string::npos) 926 DollarStart = AsmString.size(); 927 size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart); 928 if (DollarEnd == std::string::npos) 929 DollarEnd = AsmString.size(); 930 OS << StringRef(&AsmString[Pos], DollarEnd - Pos); 931 Pos = DollarEnd; 932 size_t NumDollars = DollarEnd - DollarStart; 933 if (NumDollars % 2 != 0 && Pos < AsmString.size()) { 934 // We have an operand reference. 935 size_t DigitStart = Pos; 936 size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart); 937 if (DigitEnd == std::string::npos) 938 DigitEnd = AsmString.size(); 939 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart); 940 unsigned OperandIndex; 941 if (!OperandStr.getAsInteger(10, OperandIndex)) { 942 if (OperandIndex >= FirstIn) 943 OperandIndex += NumNewOuts; 944 OS << OperandIndex; 945 } else { 946 OS << OperandStr; 947 } 948 Pos = DigitEnd; 949 } 950 } 951 AsmString = std::move(OS.str()); 952 } 953 954 /// Add output constraints for EAX:EDX because they are return registers. 955 void X86_32TargetCodeGenInfo::addReturnRegisterOutputs( 956 CodeGenFunction &CGF, LValue ReturnSlot, std::string &Constraints, 957 std::vector<llvm::Type *> &ResultRegTypes, 958 std::vector<llvm::Type *> &ResultTruncRegTypes, 959 std::vector<LValue> &ResultRegDests, std::string &AsmString, 960 unsigned NumOutputs) const { 961 uint64_t RetWidth = CGF.getContext().getTypeSize(ReturnSlot.getType()); 962 963 // Use the EAX constraint if the width is 32 or smaller and EAX:EDX if it is 964 // larger. 965 if (!Constraints.empty()) 966 Constraints += ','; 967 if (RetWidth <= 32) { 968 Constraints += "={eax}"; 969 ResultRegTypes.push_back(CGF.Int32Ty); 970 } else { 971 // Use the 'A' constraint for EAX:EDX. 972 Constraints += "=A"; 973 ResultRegTypes.push_back(CGF.Int64Ty); 974 } 975 976 // Truncate EAX or EAX:EDX to an integer of the appropriate size. 977 llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth); 978 ResultTruncRegTypes.push_back(CoerceTy); 979 980 // Coerce the integer by bitcasting the return slot pointer. 981 ReturnSlot.setAddress(CGF.Builder.CreateBitCast(ReturnSlot.getAddress(), 982 CoerceTy->getPointerTo())); 983 ResultRegDests.push_back(ReturnSlot); 984 985 rewriteInputConstraintReferences(NumOutputs, 1, AsmString); 986 } 987 988 /// shouldReturnTypeInRegister - Determine if the given type should be 989 /// returned in a register (for the Darwin ABI). 990 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty, 991 ASTContext &Context) const { 992 uint64_t Size = Context.getTypeSize(Ty); 993 994 // Type must be register sized. 995 if (!isRegisterSize(Size)) 996 return false; 997 998 if (Ty->isVectorType()) { 999 // 64- and 128- bit vectors inside structures are not returned in 1000 // registers. 1001 if (Size == 64 || Size == 128) 1002 return false; 1003 1004 return true; 1005 } 1006 1007 // If this is a builtin, pointer, enum, complex type, member pointer, or 1008 // member function pointer it is ok. 1009 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() || 1010 Ty->isAnyComplexType() || Ty->isEnumeralType() || 1011 Ty->isBlockPointerType() || Ty->isMemberPointerType()) 1012 return true; 1013 1014 // Arrays are treated like records. 1015 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) 1016 return shouldReturnTypeInRegister(AT->getElementType(), Context); 1017 1018 // Otherwise, it must be a record type. 1019 const RecordType *RT = Ty->getAs<RecordType>(); 1020 if (!RT) return false; 1021 1022 // FIXME: Traverse bases here too. 1023 1024 // Structure types are passed in register if all fields would be 1025 // passed in a register. 1026 for (const auto *FD : RT->getDecl()->fields()) { 1027 // Empty fields are ignored. 1028 if (isEmptyField(Context, FD, true)) 1029 continue; 1030 1031 // Check fields recursively. 1032 if (!shouldReturnTypeInRegister(FD->getType(), Context)) 1033 return false; 1034 } 1035 return true; 1036 } 1037 1038 ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, CCState &State) const { 1039 // If the return value is indirect, then the hidden argument is consuming one 1040 // integer register. 1041 if (State.FreeRegs) { 1042 --State.FreeRegs; 1043 return getNaturalAlignIndirectInReg(RetTy); 1044 } 1045 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false); 1046 } 1047 1048 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, 1049 CCState &State) const { 1050 if (RetTy->isVoidType()) 1051 return ABIArgInfo::getIgnore(); 1052 1053 const Type *Base = nullptr; 1054 uint64_t NumElts = 0; 1055 if (State.CC == llvm::CallingConv::X86_VectorCall && 1056 isHomogeneousAggregate(RetTy, Base, NumElts)) { 1057 // The LLVM struct type for such an aggregate should lower properly. 1058 return ABIArgInfo::getDirect(); 1059 } 1060 1061 if (const VectorType *VT = RetTy->getAs<VectorType>()) { 1062 // On Darwin, some vectors are returned in registers. 1063 if (IsDarwinVectorABI) { 1064 uint64_t Size = getContext().getTypeSize(RetTy); 1065 1066 // 128-bit vectors are a special case; they are returned in 1067 // registers and we need to make sure to pick a type the LLVM 1068 // backend will like. 1069 if (Size == 128) 1070 return ABIArgInfo::getDirect(llvm::VectorType::get( 1071 llvm::Type::getInt64Ty(getVMContext()), 2)); 1072 1073 // Always return in register if it fits in a general purpose 1074 // register, or if it is 64 bits and has a single element. 1075 if ((Size == 8 || Size == 16 || Size == 32) || 1076 (Size == 64 && VT->getNumElements() == 1)) 1077 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 1078 Size)); 1079 1080 return getIndirectReturnResult(RetTy, State); 1081 } 1082 1083 return ABIArgInfo::getDirect(); 1084 } 1085 1086 if (isAggregateTypeForABI(RetTy)) { 1087 if (const RecordType *RT = RetTy->getAs<RecordType>()) { 1088 // Structures with flexible arrays are always indirect. 1089 if (RT->getDecl()->hasFlexibleArrayMember()) 1090 return getIndirectReturnResult(RetTy, State); 1091 } 1092 1093 // If specified, structs and unions are always indirect. 1094 if (!IsRetSmallStructInRegABI && !RetTy->isAnyComplexType()) 1095 return getIndirectReturnResult(RetTy, State); 1096 1097 // Small structures which are register sized are generally returned 1098 // in a register. 1099 if (shouldReturnTypeInRegister(RetTy, getContext())) { 1100 uint64_t Size = getContext().getTypeSize(RetTy); 1101 1102 // As a special-case, if the struct is a "single-element" struct, and 1103 // the field is of type "float" or "double", return it in a 1104 // floating-point register. (MSVC does not apply this special case.) 1105 // We apply a similar transformation for pointer types to improve the 1106 // quality of the generated IR. 1107 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) 1108 if ((!IsWin32StructABI && SeltTy->isRealFloatingType()) 1109 || SeltTy->hasPointerRepresentation()) 1110 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 1111 1112 // FIXME: We should be able to narrow this integer in cases with dead 1113 // padding. 1114 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size)); 1115 } 1116 1117 return getIndirectReturnResult(RetTy, State); 1118 } 1119 1120 // Treat an enum type as its underlying type. 1121 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 1122 RetTy = EnumTy->getDecl()->getIntegerType(); 1123 1124 return (RetTy->isPromotableIntegerType() ? 1125 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 1126 } 1127 1128 static bool isSSEVectorType(ASTContext &Context, QualType Ty) { 1129 return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128; 1130 } 1131 1132 static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) { 1133 const RecordType *RT = Ty->getAs<RecordType>(); 1134 if (!RT) 1135 return 0; 1136 const RecordDecl *RD = RT->getDecl(); 1137 1138 // If this is a C++ record, check the bases first. 1139 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 1140 for (const auto &I : CXXRD->bases()) 1141 if (!isRecordWithSSEVectorType(Context, I.getType())) 1142 return false; 1143 1144 for (const auto *i : RD->fields()) { 1145 QualType FT = i->getType(); 1146 1147 if (isSSEVectorType(Context, FT)) 1148 return true; 1149 1150 if (isRecordWithSSEVectorType(Context, FT)) 1151 return true; 1152 } 1153 1154 return false; 1155 } 1156 1157 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty, 1158 unsigned Align) const { 1159 // Otherwise, if the alignment is less than or equal to the minimum ABI 1160 // alignment, just use the default; the backend will handle this. 1161 if (Align <= MinABIStackAlignInBytes) 1162 return 0; // Use default alignment. 1163 1164 // On non-Darwin, the stack type alignment is always 4. 1165 if (!IsDarwinVectorABI) { 1166 // Set explicit alignment, since we may need to realign the top. 1167 return MinABIStackAlignInBytes; 1168 } 1169 1170 // Otherwise, if the type contains an SSE vector type, the alignment is 16. 1171 if (Align >= 16 && (isSSEVectorType(getContext(), Ty) || 1172 isRecordWithSSEVectorType(getContext(), Ty))) 1173 return 16; 1174 1175 return MinABIStackAlignInBytes; 1176 } 1177 1178 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal, 1179 CCState &State) const { 1180 if (!ByVal) { 1181 if (State.FreeRegs) { 1182 --State.FreeRegs; // Non-byval indirects just use one pointer. 1183 return getNaturalAlignIndirectInReg(Ty); 1184 } 1185 return getNaturalAlignIndirect(Ty, false); 1186 } 1187 1188 // Compute the byval alignment. 1189 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; 1190 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign); 1191 if (StackAlign == 0) 1192 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true); 1193 1194 // If the stack alignment is less than the type alignment, realign the 1195 // argument. 1196 bool Realign = TypeAlign > StackAlign; 1197 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(StackAlign), 1198 /*ByVal=*/true, Realign); 1199 } 1200 1201 X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const { 1202 const Type *T = isSingleElementStruct(Ty, getContext()); 1203 if (!T) 1204 T = Ty.getTypePtr(); 1205 1206 if (const BuiltinType *BT = T->getAs<BuiltinType>()) { 1207 BuiltinType::Kind K = BT->getKind(); 1208 if (K == BuiltinType::Float || K == BuiltinType::Double) 1209 return Float; 1210 } 1211 return Integer; 1212 } 1213 1214 bool X86_32ABIInfo::shouldUseInReg(QualType Ty, CCState &State, 1215 bool &NeedsPadding) const { 1216 NeedsPadding = false; 1217 if (!IsSoftFloatABI) { 1218 Class C = classify(Ty); 1219 if (C == Float) 1220 return false; 1221 } 1222 1223 unsigned Size = getContext().getTypeSize(Ty); 1224 unsigned SizeInRegs = (Size + 31) / 32; 1225 1226 if (SizeInRegs == 0) 1227 return false; 1228 1229 if (SizeInRegs > State.FreeRegs) { 1230 State.FreeRegs = 0; 1231 return false; 1232 } 1233 1234 State.FreeRegs -= SizeInRegs; 1235 1236 if (State.CC == llvm::CallingConv::X86_FastCall || 1237 State.CC == llvm::CallingConv::X86_VectorCall) { 1238 if (Size > 32) 1239 return false; 1240 1241 if (Ty->isIntegralOrEnumerationType()) 1242 return true; 1243 1244 if (Ty->isPointerType()) 1245 return true; 1246 1247 if (Ty->isReferenceType()) 1248 return true; 1249 1250 if (State.FreeRegs) 1251 NeedsPadding = true; 1252 1253 return false; 1254 } 1255 1256 return true; 1257 } 1258 1259 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, 1260 CCState &State) const { 1261 // FIXME: Set alignment on indirect arguments. 1262 1263 Ty = useFirstFieldIfTransparentUnion(Ty); 1264 1265 // Check with the C++ ABI first. 1266 const RecordType *RT = Ty->getAs<RecordType>(); 1267 if (RT) { 1268 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()); 1269 if (RAA == CGCXXABI::RAA_Indirect) { 1270 return getIndirectResult(Ty, false, State); 1271 } else if (RAA == CGCXXABI::RAA_DirectInMemory) { 1272 // The field index doesn't matter, we'll fix it up later. 1273 return ABIArgInfo::getInAlloca(/*FieldIndex=*/0); 1274 } 1275 } 1276 1277 // vectorcall adds the concept of a homogenous vector aggregate, similar 1278 // to other targets. 1279 const Type *Base = nullptr; 1280 uint64_t NumElts = 0; 1281 if (State.CC == llvm::CallingConv::X86_VectorCall && 1282 isHomogeneousAggregate(Ty, Base, NumElts)) { 1283 if (State.FreeSSERegs >= NumElts) { 1284 State.FreeSSERegs -= NumElts; 1285 if (Ty->isBuiltinType() || Ty->isVectorType()) 1286 return ABIArgInfo::getDirect(); 1287 return ABIArgInfo::getExpand(); 1288 } 1289 return getIndirectResult(Ty, /*ByVal=*/false, State); 1290 } 1291 1292 if (isAggregateTypeForABI(Ty)) { 1293 if (RT) { 1294 // Structs are always byval on win32, regardless of what they contain. 1295 if (IsWin32StructABI) 1296 return getIndirectResult(Ty, true, State); 1297 1298 // Structures with flexible arrays are always indirect. 1299 if (RT->getDecl()->hasFlexibleArrayMember()) 1300 return getIndirectResult(Ty, true, State); 1301 } 1302 1303 // Ignore empty structs/unions. 1304 if (isEmptyRecord(getContext(), Ty, true)) 1305 return ABIArgInfo::getIgnore(); 1306 1307 llvm::LLVMContext &LLVMContext = getVMContext(); 1308 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext); 1309 bool NeedsPadding; 1310 if (shouldUseInReg(Ty, State, NeedsPadding)) { 1311 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32; 1312 SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32); 1313 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements); 1314 return ABIArgInfo::getDirectInReg(Result); 1315 } 1316 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr; 1317 1318 // Expand small (<= 128-bit) record types when we know that the stack layout 1319 // of those arguments will match the struct. This is important because the 1320 // LLVM backend isn't smart enough to remove byval, which inhibits many 1321 // optimizations. 1322 if (getContext().getTypeSize(Ty) <= 4*32 && 1323 canExpandIndirectArgument(Ty, getContext())) 1324 return ABIArgInfo::getExpandWithPadding( 1325 State.CC == llvm::CallingConv::X86_FastCall || 1326 State.CC == llvm::CallingConv::X86_VectorCall, 1327 PaddingType); 1328 1329 return getIndirectResult(Ty, true, State); 1330 } 1331 1332 if (const VectorType *VT = Ty->getAs<VectorType>()) { 1333 // On Darwin, some vectors are passed in memory, we handle this by passing 1334 // it as an i8/i16/i32/i64. 1335 if (IsDarwinVectorABI) { 1336 uint64_t Size = getContext().getTypeSize(Ty); 1337 if ((Size == 8 || Size == 16 || Size == 32) || 1338 (Size == 64 && VT->getNumElements() == 1)) 1339 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 1340 Size)); 1341 } 1342 1343 if (IsX86_MMXType(CGT.ConvertType(Ty))) 1344 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64)); 1345 1346 return ABIArgInfo::getDirect(); 1347 } 1348 1349 1350 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1351 Ty = EnumTy->getDecl()->getIntegerType(); 1352 1353 bool NeedsPadding; 1354 bool InReg = shouldUseInReg(Ty, State, NeedsPadding); 1355 1356 if (Ty->isPromotableIntegerType()) { 1357 if (InReg) 1358 return ABIArgInfo::getExtendInReg(); 1359 return ABIArgInfo::getExtend(); 1360 } 1361 if (InReg) 1362 return ABIArgInfo::getDirectInReg(); 1363 return ABIArgInfo::getDirect(); 1364 } 1365 1366 void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const { 1367 CCState State(FI.getCallingConvention()); 1368 if (State.CC == llvm::CallingConv::X86_FastCall) 1369 State.FreeRegs = 2; 1370 else if (State.CC == llvm::CallingConv::X86_VectorCall) { 1371 State.FreeRegs = 2; 1372 State.FreeSSERegs = 6; 1373 } else if (FI.getHasRegParm()) 1374 State.FreeRegs = FI.getRegParm(); 1375 else 1376 State.FreeRegs = DefaultNumRegisterParameters; 1377 1378 if (!getCXXABI().classifyReturnType(FI)) { 1379 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State); 1380 } else if (FI.getReturnInfo().isIndirect()) { 1381 // The C++ ABI is not aware of register usage, so we have to check if the 1382 // return value was sret and put it in a register ourselves if appropriate. 1383 if (State.FreeRegs) { 1384 --State.FreeRegs; // The sret parameter consumes a register. 1385 FI.getReturnInfo().setInReg(true); 1386 } 1387 } 1388 1389 // The chain argument effectively gives us another free register. 1390 if (FI.isChainCall()) 1391 ++State.FreeRegs; 1392 1393 bool UsedInAlloca = false; 1394 for (auto &I : FI.arguments()) { 1395 I.info = classifyArgumentType(I.type, State); 1396 UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca); 1397 } 1398 1399 // If we needed to use inalloca for any argument, do a second pass and rewrite 1400 // all the memory arguments to use inalloca. 1401 if (UsedInAlloca) 1402 rewriteWithInAlloca(FI); 1403 } 1404 1405 void 1406 X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields, 1407 CharUnits &StackOffset, ABIArgInfo &Info, 1408 QualType Type) const { 1409 // Arguments are always 4-byte-aligned. 1410 CharUnits FieldAlign = CharUnits::fromQuantity(4); 1411 1412 assert(StackOffset.isMultipleOf(FieldAlign) && "unaligned inalloca struct"); 1413 Info = ABIArgInfo::getInAlloca(FrameFields.size()); 1414 FrameFields.push_back(CGT.ConvertTypeForMem(Type)); 1415 StackOffset += getContext().getTypeSizeInChars(Type); 1416 1417 // Insert padding bytes to respect alignment. 1418 CharUnits FieldEnd = StackOffset; 1419 StackOffset = FieldEnd.RoundUpToAlignment(FieldAlign); 1420 if (StackOffset != FieldEnd) { 1421 CharUnits NumBytes = StackOffset - FieldEnd; 1422 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext()); 1423 Ty = llvm::ArrayType::get(Ty, NumBytes.getQuantity()); 1424 FrameFields.push_back(Ty); 1425 } 1426 } 1427 1428 static bool isArgInAlloca(const ABIArgInfo &Info) { 1429 // Leave ignored and inreg arguments alone. 1430 switch (Info.getKind()) { 1431 case ABIArgInfo::InAlloca: 1432 return true; 1433 case ABIArgInfo::Indirect: 1434 assert(Info.getIndirectByVal()); 1435 return true; 1436 case ABIArgInfo::Ignore: 1437 return false; 1438 case ABIArgInfo::Direct: 1439 case ABIArgInfo::Extend: 1440 case ABIArgInfo::Expand: 1441 if (Info.getInReg()) 1442 return false; 1443 return true; 1444 } 1445 llvm_unreachable("invalid enum"); 1446 } 1447 1448 void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const { 1449 assert(IsWin32StructABI && "inalloca only supported on win32"); 1450 1451 // Build a packed struct type for all of the arguments in memory. 1452 SmallVector<llvm::Type *, 6> FrameFields; 1453 1454 // The stack alignment is always 4. 1455 CharUnits StackAlign = CharUnits::fromQuantity(4); 1456 1457 CharUnits StackOffset; 1458 CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end(); 1459 1460 // Put 'this' into the struct before 'sret', if necessary. 1461 bool IsThisCall = 1462 FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall; 1463 ABIArgInfo &Ret = FI.getReturnInfo(); 1464 if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall && 1465 isArgInAlloca(I->info)) { 1466 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type); 1467 ++I; 1468 } 1469 1470 // Put the sret parameter into the inalloca struct if it's in memory. 1471 if (Ret.isIndirect() && !Ret.getInReg()) { 1472 CanQualType PtrTy = getContext().getPointerType(FI.getReturnType()); 1473 addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy); 1474 // On Windows, the hidden sret parameter is always returned in eax. 1475 Ret.setInAllocaSRet(IsWin32StructABI); 1476 } 1477 1478 // Skip the 'this' parameter in ecx. 1479 if (IsThisCall) 1480 ++I; 1481 1482 // Put arguments passed in memory into the struct. 1483 for (; I != E; ++I) { 1484 if (isArgInAlloca(I->info)) 1485 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type); 1486 } 1487 1488 FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields, 1489 /*isPacked=*/true), 1490 StackAlign); 1491 } 1492 1493 Address X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF, 1494 Address VAListAddr, QualType Ty) const { 1495 1496 auto TypeInfo = getContext().getTypeInfoInChars(Ty); 1497 1498 // x86-32 changes the alignment of certain arguments on the stack. 1499 // 1500 // Just messing with TypeInfo like this works because we never pass 1501 // anything indirectly. 1502 TypeInfo.second = CharUnits::fromQuantity( 1503 getTypeStackAlignInBytes(Ty, TypeInfo.second.getQuantity())); 1504 1505 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, 1506 TypeInfo, CharUnits::fromQuantity(4), 1507 /*AllowHigherAlign*/ true); 1508 } 1509 1510 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI( 1511 const llvm::Triple &Triple, const CodeGenOptions &Opts) { 1512 assert(Triple.getArch() == llvm::Triple::x86); 1513 1514 switch (Opts.getStructReturnConvention()) { 1515 case CodeGenOptions::SRCK_Default: 1516 break; 1517 case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return 1518 return false; 1519 case CodeGenOptions::SRCK_InRegs: // -freg-struct-return 1520 return true; 1521 } 1522 1523 if (Triple.isOSDarwin()) 1524 return true; 1525 1526 switch (Triple.getOS()) { 1527 case llvm::Triple::DragonFly: 1528 case llvm::Triple::FreeBSD: 1529 case llvm::Triple::OpenBSD: 1530 case llvm::Triple::Bitrig: 1531 case llvm::Triple::Win32: 1532 return true; 1533 default: 1534 return false; 1535 } 1536 } 1537 1538 void X86_32TargetCodeGenInfo::setTargetAttributes(const Decl *D, 1539 llvm::GlobalValue *GV, 1540 CodeGen::CodeGenModule &CGM) const { 1541 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { 1542 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { 1543 // Get the LLVM function. 1544 llvm::Function *Fn = cast<llvm::Function>(GV); 1545 1546 // Now add the 'alignstack' attribute with a value of 16. 1547 llvm::AttrBuilder B; 1548 B.addStackAlignmentAttr(16); 1549 Fn->addAttributes(llvm::AttributeSet::FunctionIndex, 1550 llvm::AttributeSet::get(CGM.getLLVMContext(), 1551 llvm::AttributeSet::FunctionIndex, 1552 B)); 1553 } 1554 } 1555 } 1556 1557 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable( 1558 CodeGen::CodeGenFunction &CGF, 1559 llvm::Value *Address) const { 1560 CodeGen::CGBuilderTy &Builder = CGF.Builder; 1561 1562 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 1563 1564 // 0-7 are the eight integer registers; the order is different 1565 // on Darwin (for EH), but the range is the same. 1566 // 8 is %eip. 1567 AssignToArrayRange(Builder, Address, Four8, 0, 8); 1568 1569 if (CGF.CGM.getTarget().getTriple().isOSDarwin()) { 1570 // 12-16 are st(0..4). Not sure why we stop at 4. 1571 // These have size 16, which is sizeof(long double) on 1572 // platforms with 8-byte alignment for that type. 1573 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16); 1574 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16); 1575 1576 } else { 1577 // 9 is %eflags, which doesn't get a size on Darwin for some 1578 // reason. 1579 Builder.CreateAlignedStore( 1580 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9), 1581 CharUnits::One()); 1582 1583 // 11-16 are st(0..5). Not sure why we stop at 5. 1584 // These have size 12, which is sizeof(long double) on 1585 // platforms with 4-byte alignment for that type. 1586 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12); 1587 AssignToArrayRange(Builder, Address, Twelve8, 11, 16); 1588 } 1589 1590 return false; 1591 } 1592 1593 //===----------------------------------------------------------------------===// 1594 // X86-64 ABI Implementation 1595 //===----------------------------------------------------------------------===// 1596 1597 1598 namespace { 1599 /// The AVX ABI level for X86 targets. 1600 enum class X86AVXABILevel { 1601 None, 1602 AVX, 1603 AVX512 1604 }; 1605 1606 /// \p returns the size in bits of the largest (native) vector for \p AVXLevel. 1607 static unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) { 1608 switch (AVXLevel) { 1609 case X86AVXABILevel::AVX512: 1610 return 512; 1611 case X86AVXABILevel::AVX: 1612 return 256; 1613 case X86AVXABILevel::None: 1614 return 128; 1615 } 1616 llvm_unreachable("Unknown AVXLevel"); 1617 } 1618 1619 /// X86_64ABIInfo - The X86_64 ABI information. 1620 class X86_64ABIInfo : public ABIInfo { 1621 enum Class { 1622 Integer = 0, 1623 SSE, 1624 SSEUp, 1625 X87, 1626 X87Up, 1627 ComplexX87, 1628 NoClass, 1629 Memory 1630 }; 1631 1632 /// merge - Implement the X86_64 ABI merging algorithm. 1633 /// 1634 /// Merge an accumulating classification \arg Accum with a field 1635 /// classification \arg Field. 1636 /// 1637 /// \param Accum - The accumulating classification. This should 1638 /// always be either NoClass or the result of a previous merge 1639 /// call. In addition, this should never be Memory (the caller 1640 /// should just return Memory for the aggregate). 1641 static Class merge(Class Accum, Class Field); 1642 1643 /// postMerge - Implement the X86_64 ABI post merging algorithm. 1644 /// 1645 /// Post merger cleanup, reduces a malformed Hi and Lo pair to 1646 /// final MEMORY or SSE classes when necessary. 1647 /// 1648 /// \param AggregateSize - The size of the current aggregate in 1649 /// the classification process. 1650 /// 1651 /// \param Lo - The classification for the parts of the type 1652 /// residing in the low word of the containing object. 1653 /// 1654 /// \param Hi - The classification for the parts of the type 1655 /// residing in the higher words of the containing object. 1656 /// 1657 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const; 1658 1659 /// classify - Determine the x86_64 register classes in which the 1660 /// given type T should be passed. 1661 /// 1662 /// \param Lo - The classification for the parts of the type 1663 /// residing in the low word of the containing object. 1664 /// 1665 /// \param Hi - The classification for the parts of the type 1666 /// residing in the high word of the containing object. 1667 /// 1668 /// \param OffsetBase - The bit offset of this type in the 1669 /// containing object. Some parameters are classified different 1670 /// depending on whether they straddle an eightbyte boundary. 1671 /// 1672 /// \param isNamedArg - Whether the argument in question is a "named" 1673 /// argument, as used in AMD64-ABI 3.5.7. 1674 /// 1675 /// If a word is unused its result will be NoClass; if a type should 1676 /// be passed in Memory then at least the classification of \arg Lo 1677 /// will be Memory. 1678 /// 1679 /// The \arg Lo class will be NoClass iff the argument is ignored. 1680 /// 1681 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will 1682 /// also be ComplexX87. 1683 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi, 1684 bool isNamedArg) const; 1685 1686 llvm::Type *GetByteVectorType(QualType Ty) const; 1687 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType, 1688 unsigned IROffset, QualType SourceTy, 1689 unsigned SourceOffset) const; 1690 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType, 1691 unsigned IROffset, QualType SourceTy, 1692 unsigned SourceOffset) const; 1693 1694 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 1695 /// such that the argument will be returned in memory. 1696 ABIArgInfo getIndirectReturnResult(QualType Ty) const; 1697 1698 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 1699 /// such that the argument will be passed in memory. 1700 /// 1701 /// \param freeIntRegs - The number of free integer registers remaining 1702 /// available. 1703 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const; 1704 1705 ABIArgInfo classifyReturnType(QualType RetTy) const; 1706 1707 ABIArgInfo classifyArgumentType(QualType Ty, 1708 unsigned freeIntRegs, 1709 unsigned &neededInt, 1710 unsigned &neededSSE, 1711 bool isNamedArg) const; 1712 1713 bool IsIllegalVectorType(QualType Ty) const; 1714 1715 /// The 0.98 ABI revision clarified a lot of ambiguities, 1716 /// unfortunately in ways that were not always consistent with 1717 /// certain previous compilers. In particular, platforms which 1718 /// required strict binary compatibility with older versions of GCC 1719 /// may need to exempt themselves. 1720 bool honorsRevision0_98() const { 1721 return !getTarget().getTriple().isOSDarwin(); 1722 } 1723 1724 X86AVXABILevel AVXLevel; 1725 // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on 1726 // 64-bit hardware. 1727 bool Has64BitPointers; 1728 1729 public: 1730 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) : 1731 ABIInfo(CGT), AVXLevel(AVXLevel), 1732 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) { 1733 } 1734 1735 bool isPassedUsingAVXType(QualType type) const { 1736 unsigned neededInt, neededSSE; 1737 // The freeIntRegs argument doesn't matter here. 1738 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE, 1739 /*isNamedArg*/true); 1740 if (info.isDirect()) { 1741 llvm::Type *ty = info.getCoerceToType(); 1742 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty)) 1743 return (vectorTy->getBitWidth() > 128); 1744 } 1745 return false; 1746 } 1747 1748 void computeInfo(CGFunctionInfo &FI) const override; 1749 1750 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 1751 QualType Ty) const override; 1752 Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, 1753 QualType Ty) const override; 1754 1755 bool has64BitPointers() const { 1756 return Has64BitPointers; 1757 } 1758 }; 1759 1760 /// WinX86_64ABIInfo - The Windows X86_64 ABI information. 1761 class WinX86_64ABIInfo : public ABIInfo { 1762 1763 ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs, 1764 bool IsReturnType) const; 1765 1766 public: 1767 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 1768 1769 void computeInfo(CGFunctionInfo &FI) const override; 1770 1771 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 1772 QualType Ty) const override; 1773 1774 bool isHomogeneousAggregateBaseType(QualType Ty) const override { 1775 // FIXME: Assumes vectorcall is in use. 1776 return isX86VectorTypeForVectorCall(getContext(), Ty); 1777 } 1778 1779 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 1780 uint64_t NumMembers) const override { 1781 // FIXME: Assumes vectorcall is in use. 1782 return isX86VectorCallAggregateSmallEnough(NumMembers); 1783 } 1784 }; 1785 1786 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo { 1787 public: 1788 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) 1789 : TargetCodeGenInfo(new X86_64ABIInfo(CGT, AVXLevel)) {} 1790 1791 const X86_64ABIInfo &getABIInfo() const { 1792 return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo()); 1793 } 1794 1795 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { 1796 return 7; 1797 } 1798 1799 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 1800 llvm::Value *Address) const override { 1801 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 1802 1803 // 0-15 are the 16 integer registers. 1804 // 16 is %rip. 1805 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 1806 return false; 1807 } 1808 1809 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 1810 StringRef Constraint, 1811 llvm::Type* Ty) const override { 1812 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 1813 } 1814 1815 bool isNoProtoCallVariadic(const CallArgList &args, 1816 const FunctionNoProtoType *fnType) const override { 1817 // The default CC on x86-64 sets %al to the number of SSA 1818 // registers used, and GCC sets this when calling an unprototyped 1819 // function, so we override the default behavior. However, don't do 1820 // that when AVX types are involved: the ABI explicitly states it is 1821 // undefined, and it doesn't work in practice because of how the ABI 1822 // defines varargs anyway. 1823 if (fnType->getCallConv() == CC_C) { 1824 bool HasAVXType = false; 1825 for (CallArgList::const_iterator 1826 it = args.begin(), ie = args.end(); it != ie; ++it) { 1827 if (getABIInfo().isPassedUsingAVXType(it->Ty)) { 1828 HasAVXType = true; 1829 break; 1830 } 1831 } 1832 1833 if (!HasAVXType) 1834 return true; 1835 } 1836 1837 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType); 1838 } 1839 1840 llvm::Constant * 1841 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override { 1842 unsigned Sig; 1843 if (getABIInfo().has64BitPointers()) 1844 Sig = (0xeb << 0) | // jmp rel8 1845 (0x0a << 8) | // .+0x0c 1846 ('F' << 16) | 1847 ('T' << 24); 1848 else 1849 Sig = (0xeb << 0) | // jmp rel8 1850 (0x06 << 8) | // .+0x08 1851 ('F' << 16) | 1852 ('T' << 24); 1853 return llvm::ConstantInt::get(CGM.Int32Ty, Sig); 1854 } 1855 }; 1856 1857 class PS4TargetCodeGenInfo : public X86_64TargetCodeGenInfo { 1858 public: 1859 PS4TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) 1860 : X86_64TargetCodeGenInfo(CGT, AVXLevel) {} 1861 1862 void getDependentLibraryOption(llvm::StringRef Lib, 1863 llvm::SmallString<24> &Opt) const override { 1864 Opt = "\01"; 1865 // If the argument contains a space, enclose it in quotes. 1866 if (Lib.find(" ") != StringRef::npos) 1867 Opt += "\"" + Lib.str() + "\""; 1868 else 1869 Opt += Lib; 1870 } 1871 }; 1872 1873 static std::string qualifyWindowsLibrary(llvm::StringRef Lib) { 1874 // If the argument does not end in .lib, automatically add the suffix. 1875 // If the argument contains a space, enclose it in quotes. 1876 // This matches the behavior of MSVC. 1877 bool Quote = (Lib.find(" ") != StringRef::npos); 1878 std::string ArgStr = Quote ? "\"" : ""; 1879 ArgStr += Lib; 1880 if (!Lib.endswith_lower(".lib")) 1881 ArgStr += ".lib"; 1882 ArgStr += Quote ? "\"" : ""; 1883 return ArgStr; 1884 } 1885 1886 class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo { 1887 public: 1888 WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, 1889 bool DarwinVectorABI, bool RetSmallStructInRegABI, bool Win32StructABI, 1890 unsigned NumRegisterParameters) 1891 : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI, 1892 Win32StructABI, NumRegisterParameters, false) {} 1893 1894 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 1895 CodeGen::CodeGenModule &CGM) const override; 1896 1897 void getDependentLibraryOption(llvm::StringRef Lib, 1898 llvm::SmallString<24> &Opt) const override { 1899 Opt = "/DEFAULTLIB:"; 1900 Opt += qualifyWindowsLibrary(Lib); 1901 } 1902 1903 void getDetectMismatchOption(llvm::StringRef Name, 1904 llvm::StringRef Value, 1905 llvm::SmallString<32> &Opt) const override { 1906 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; 1907 } 1908 }; 1909 1910 static void addStackProbeSizeTargetAttribute(const Decl *D, 1911 llvm::GlobalValue *GV, 1912 CodeGen::CodeGenModule &CGM) { 1913 if (D && isa<FunctionDecl>(D)) { 1914 if (CGM.getCodeGenOpts().StackProbeSize != 4096) { 1915 llvm::Function *Fn = cast<llvm::Function>(GV); 1916 1917 Fn->addFnAttr("stack-probe-size", 1918 llvm::utostr(CGM.getCodeGenOpts().StackProbeSize)); 1919 } 1920 } 1921 } 1922 1923 void WinX86_32TargetCodeGenInfo::setTargetAttributes(const Decl *D, 1924 llvm::GlobalValue *GV, 1925 CodeGen::CodeGenModule &CGM) const { 1926 X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM); 1927 1928 addStackProbeSizeTargetAttribute(D, GV, CGM); 1929 } 1930 1931 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo { 1932 public: 1933 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, 1934 X86AVXABILevel AVXLevel) 1935 : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {} 1936 1937 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 1938 CodeGen::CodeGenModule &CGM) const override; 1939 1940 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { 1941 return 7; 1942 } 1943 1944 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 1945 llvm::Value *Address) const override { 1946 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 1947 1948 // 0-15 are the 16 integer registers. 1949 // 16 is %rip. 1950 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 1951 return false; 1952 } 1953 1954 void getDependentLibraryOption(llvm::StringRef Lib, 1955 llvm::SmallString<24> &Opt) const override { 1956 Opt = "/DEFAULTLIB:"; 1957 Opt += qualifyWindowsLibrary(Lib); 1958 } 1959 1960 void getDetectMismatchOption(llvm::StringRef Name, 1961 llvm::StringRef Value, 1962 llvm::SmallString<32> &Opt) const override { 1963 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; 1964 } 1965 }; 1966 1967 void WinX86_64TargetCodeGenInfo::setTargetAttributes(const Decl *D, 1968 llvm::GlobalValue *GV, 1969 CodeGen::CodeGenModule &CGM) const { 1970 TargetCodeGenInfo::setTargetAttributes(D, GV, CGM); 1971 1972 addStackProbeSizeTargetAttribute(D, GV, CGM); 1973 } 1974 } 1975 1976 void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo, 1977 Class &Hi) const { 1978 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done: 1979 // 1980 // (a) If one of the classes is Memory, the whole argument is passed in 1981 // memory. 1982 // 1983 // (b) If X87UP is not preceded by X87, the whole argument is passed in 1984 // memory. 1985 // 1986 // (c) If the size of the aggregate exceeds two eightbytes and the first 1987 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole 1988 // argument is passed in memory. NOTE: This is necessary to keep the 1989 // ABI working for processors that don't support the __m256 type. 1990 // 1991 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE. 1992 // 1993 // Some of these are enforced by the merging logic. Others can arise 1994 // only with unions; for example: 1995 // union { _Complex double; unsigned; } 1996 // 1997 // Note that clauses (b) and (c) were added in 0.98. 1998 // 1999 if (Hi == Memory) 2000 Lo = Memory; 2001 if (Hi == X87Up && Lo != X87 && honorsRevision0_98()) 2002 Lo = Memory; 2003 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp)) 2004 Lo = Memory; 2005 if (Hi == SSEUp && Lo != SSE) 2006 Hi = SSE; 2007 } 2008 2009 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) { 2010 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is 2011 // classified recursively so that always two fields are 2012 // considered. The resulting class is calculated according to 2013 // the classes of the fields in the eightbyte: 2014 // 2015 // (a) If both classes are equal, this is the resulting class. 2016 // 2017 // (b) If one of the classes is NO_CLASS, the resulting class is 2018 // the other class. 2019 // 2020 // (c) If one of the classes is MEMORY, the result is the MEMORY 2021 // class. 2022 // 2023 // (d) If one of the classes is INTEGER, the result is the 2024 // INTEGER. 2025 // 2026 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class, 2027 // MEMORY is used as class. 2028 // 2029 // (f) Otherwise class SSE is used. 2030 2031 // Accum should never be memory (we should have returned) or 2032 // ComplexX87 (because this cannot be passed in a structure). 2033 assert((Accum != Memory && Accum != ComplexX87) && 2034 "Invalid accumulated classification during merge."); 2035 if (Accum == Field || Field == NoClass) 2036 return Accum; 2037 if (Field == Memory) 2038 return Memory; 2039 if (Accum == NoClass) 2040 return Field; 2041 if (Accum == Integer || Field == Integer) 2042 return Integer; 2043 if (Field == X87 || Field == X87Up || Field == ComplexX87 || 2044 Accum == X87 || Accum == X87Up) 2045 return Memory; 2046 return SSE; 2047 } 2048 2049 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, 2050 Class &Lo, Class &Hi, bool isNamedArg) const { 2051 // FIXME: This code can be simplified by introducing a simple value class for 2052 // Class pairs with appropriate constructor methods for the various 2053 // situations. 2054 2055 // FIXME: Some of the split computations are wrong; unaligned vectors 2056 // shouldn't be passed in registers for example, so there is no chance they 2057 // can straddle an eightbyte. Verify & simplify. 2058 2059 Lo = Hi = NoClass; 2060 2061 Class &Current = OffsetBase < 64 ? Lo : Hi; 2062 Current = Memory; 2063 2064 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 2065 BuiltinType::Kind k = BT->getKind(); 2066 2067 if (k == BuiltinType::Void) { 2068 Current = NoClass; 2069 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) { 2070 Lo = Integer; 2071 Hi = Integer; 2072 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) { 2073 Current = Integer; 2074 } else if (k == BuiltinType::Float || k == BuiltinType::Double) { 2075 Current = SSE; 2076 } else if (k == BuiltinType::LongDouble) { 2077 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat(); 2078 if (LDF == &llvm::APFloat::IEEEquad) { 2079 Lo = SSE; 2080 Hi = SSEUp; 2081 } else if (LDF == &llvm::APFloat::x87DoubleExtended) { 2082 Lo = X87; 2083 Hi = X87Up; 2084 } else if (LDF == &llvm::APFloat::IEEEdouble) { 2085 Current = SSE; 2086 } else 2087 llvm_unreachable("unexpected long double representation!"); 2088 } 2089 // FIXME: _Decimal32 and _Decimal64 are SSE. 2090 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). 2091 return; 2092 } 2093 2094 if (const EnumType *ET = Ty->getAs<EnumType>()) { 2095 // Classify the underlying integer type. 2096 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg); 2097 return; 2098 } 2099 2100 if (Ty->hasPointerRepresentation()) { 2101 Current = Integer; 2102 return; 2103 } 2104 2105 if (Ty->isMemberPointerType()) { 2106 if (Ty->isMemberFunctionPointerType()) { 2107 if (Has64BitPointers) { 2108 // If Has64BitPointers, this is an {i64, i64}, so classify both 2109 // Lo and Hi now. 2110 Lo = Hi = Integer; 2111 } else { 2112 // Otherwise, with 32-bit pointers, this is an {i32, i32}. If that 2113 // straddles an eightbyte boundary, Hi should be classified as well. 2114 uint64_t EB_FuncPtr = (OffsetBase) / 64; 2115 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64; 2116 if (EB_FuncPtr != EB_ThisAdj) { 2117 Lo = Hi = Integer; 2118 } else { 2119 Current = Integer; 2120 } 2121 } 2122 } else { 2123 Current = Integer; 2124 } 2125 return; 2126 } 2127 2128 if (const VectorType *VT = Ty->getAs<VectorType>()) { 2129 uint64_t Size = getContext().getTypeSize(VT); 2130 if (Size == 1 || Size == 8 || Size == 16 || Size == 32) { 2131 // gcc passes the following as integer: 2132 // 4 bytes - <4 x char>, <2 x short>, <1 x int>, <1 x float> 2133 // 2 bytes - <2 x char>, <1 x short> 2134 // 1 byte - <1 x char> 2135 Current = Integer; 2136 2137 // If this type crosses an eightbyte boundary, it should be 2138 // split. 2139 uint64_t EB_Lo = (OffsetBase) / 64; 2140 uint64_t EB_Hi = (OffsetBase + Size - 1) / 64; 2141 if (EB_Lo != EB_Hi) 2142 Hi = Lo; 2143 } else if (Size == 64) { 2144 // gcc passes <1 x double> in memory. :( 2145 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) 2146 return; 2147 2148 // gcc passes <1 x long long> as INTEGER. 2149 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong) || 2150 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULongLong) || 2151 VT->getElementType()->isSpecificBuiltinType(BuiltinType::Long) || 2152 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULong)) 2153 Current = Integer; 2154 else 2155 Current = SSE; 2156 2157 // If this type crosses an eightbyte boundary, it should be 2158 // split. 2159 if (OffsetBase && OffsetBase != 64) 2160 Hi = Lo; 2161 } else if (Size == 128 || 2162 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) { 2163 // Arguments of 256-bits are split into four eightbyte chunks. The 2164 // least significant one belongs to class SSE and all the others to class 2165 // SSEUP. The original Lo and Hi design considers that types can't be 2166 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense. 2167 // This design isn't correct for 256-bits, but since there're no cases 2168 // where the upper parts would need to be inspected, avoid adding 2169 // complexity and just consider Hi to match the 64-256 part. 2170 // 2171 // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in 2172 // registers if they are "named", i.e. not part of the "..." of a 2173 // variadic function. 2174 // 2175 // Similarly, per 3.2.3. of the AVX512 draft, 512-bits ("named") args are 2176 // split into eight eightbyte chunks, one SSE and seven SSEUP. 2177 Lo = SSE; 2178 Hi = SSEUp; 2179 } 2180 return; 2181 } 2182 2183 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 2184 QualType ET = getContext().getCanonicalType(CT->getElementType()); 2185 2186 uint64_t Size = getContext().getTypeSize(Ty); 2187 if (ET->isIntegralOrEnumerationType()) { 2188 if (Size <= 64) 2189 Current = Integer; 2190 else if (Size <= 128) 2191 Lo = Hi = Integer; 2192 } else if (ET == getContext().FloatTy) { 2193 Current = SSE; 2194 } else if (ET == getContext().DoubleTy) { 2195 Lo = Hi = SSE; 2196 } else if (ET == getContext().LongDoubleTy) { 2197 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat(); 2198 if (LDF == &llvm::APFloat::IEEEquad) 2199 Current = Memory; 2200 else if (LDF == &llvm::APFloat::x87DoubleExtended) 2201 Current = ComplexX87; 2202 else if (LDF == &llvm::APFloat::IEEEdouble) 2203 Lo = Hi = SSE; 2204 else 2205 llvm_unreachable("unexpected long double representation!"); 2206 } 2207 2208 // If this complex type crosses an eightbyte boundary then it 2209 // should be split. 2210 uint64_t EB_Real = (OffsetBase) / 64; 2211 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64; 2212 if (Hi == NoClass && EB_Real != EB_Imag) 2213 Hi = Lo; 2214 2215 return; 2216 } 2217 2218 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 2219 // Arrays are treated like structures. 2220 2221 uint64_t Size = getContext().getTypeSize(Ty); 2222 2223 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 2224 // than four eightbytes, ..., it has class MEMORY. 2225 if (Size > 256) 2226 return; 2227 2228 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned 2229 // fields, it has class MEMORY. 2230 // 2231 // Only need to check alignment of array base. 2232 if (OffsetBase % getContext().getTypeAlign(AT->getElementType())) 2233 return; 2234 2235 // Otherwise implement simplified merge. We could be smarter about 2236 // this, but it isn't worth it and would be harder to verify. 2237 Current = NoClass; 2238 uint64_t EltSize = getContext().getTypeSize(AT->getElementType()); 2239 uint64_t ArraySize = AT->getSize().getZExtValue(); 2240 2241 // The only case a 256-bit wide vector could be used is when the array 2242 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 2243 // to work for sizes wider than 128, early check and fallback to memory. 2244 if (Size > 128 && EltSize != 256) 2245 return; 2246 2247 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) { 2248 Class FieldLo, FieldHi; 2249 classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg); 2250 Lo = merge(Lo, FieldLo); 2251 Hi = merge(Hi, FieldHi); 2252 if (Lo == Memory || Hi == Memory) 2253 break; 2254 } 2255 2256 postMerge(Size, Lo, Hi); 2257 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification."); 2258 return; 2259 } 2260 2261 if (const RecordType *RT = Ty->getAs<RecordType>()) { 2262 uint64_t Size = getContext().getTypeSize(Ty); 2263 2264 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 2265 // than four eightbytes, ..., it has class MEMORY. 2266 if (Size > 256) 2267 return; 2268 2269 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial 2270 // copy constructor or a non-trivial destructor, it is passed by invisible 2271 // reference. 2272 if (getRecordArgABI(RT, getCXXABI())) 2273 return; 2274 2275 const RecordDecl *RD = RT->getDecl(); 2276 2277 // Assume variable sized types are passed in memory. 2278 if (RD->hasFlexibleArrayMember()) 2279 return; 2280 2281 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 2282 2283 // Reset Lo class, this will be recomputed. 2284 Current = NoClass; 2285 2286 // If this is a C++ record, classify the bases first. 2287 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 2288 for (const auto &I : CXXRD->bases()) { 2289 assert(!I.isVirtual() && !I.getType()->isDependentType() && 2290 "Unexpected base class!"); 2291 const CXXRecordDecl *Base = 2292 cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl()); 2293 2294 // Classify this field. 2295 // 2296 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a 2297 // single eightbyte, each is classified separately. Each eightbyte gets 2298 // initialized to class NO_CLASS. 2299 Class FieldLo, FieldHi; 2300 uint64_t Offset = 2301 OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base)); 2302 classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg); 2303 Lo = merge(Lo, FieldLo); 2304 Hi = merge(Hi, FieldHi); 2305 if (Lo == Memory || Hi == Memory) { 2306 postMerge(Size, Lo, Hi); 2307 return; 2308 } 2309 } 2310 } 2311 2312 // Classify the fields one at a time, merging the results. 2313 unsigned idx = 0; 2314 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 2315 i != e; ++i, ++idx) { 2316 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 2317 bool BitField = i->isBitField(); 2318 2319 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than 2320 // four eightbytes, or it contains unaligned fields, it has class MEMORY. 2321 // 2322 // The only case a 256-bit wide vector could be used is when the struct 2323 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 2324 // to work for sizes wider than 128, early check and fallback to memory. 2325 // 2326 if (Size > 128 && getContext().getTypeSize(i->getType()) != 256) { 2327 Lo = Memory; 2328 postMerge(Size, Lo, Hi); 2329 return; 2330 } 2331 // Note, skip this test for bit-fields, see below. 2332 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) { 2333 Lo = Memory; 2334 postMerge(Size, Lo, Hi); 2335 return; 2336 } 2337 2338 // Classify this field. 2339 // 2340 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate 2341 // exceeds a single eightbyte, each is classified 2342 // separately. Each eightbyte gets initialized to class 2343 // NO_CLASS. 2344 Class FieldLo, FieldHi; 2345 2346 // Bit-fields require special handling, they do not force the 2347 // structure to be passed in memory even if unaligned, and 2348 // therefore they can straddle an eightbyte. 2349 if (BitField) { 2350 // Ignore padding bit-fields. 2351 if (i->isUnnamedBitfield()) 2352 continue; 2353 2354 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 2355 uint64_t Size = i->getBitWidthValue(getContext()); 2356 2357 uint64_t EB_Lo = Offset / 64; 2358 uint64_t EB_Hi = (Offset + Size - 1) / 64; 2359 2360 if (EB_Lo) { 2361 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes."); 2362 FieldLo = NoClass; 2363 FieldHi = Integer; 2364 } else { 2365 FieldLo = Integer; 2366 FieldHi = EB_Hi ? Integer : NoClass; 2367 } 2368 } else 2369 classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg); 2370 Lo = merge(Lo, FieldLo); 2371 Hi = merge(Hi, FieldHi); 2372 if (Lo == Memory || Hi == Memory) 2373 break; 2374 } 2375 2376 postMerge(Size, Lo, Hi); 2377 } 2378 } 2379 2380 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const { 2381 // If this is a scalar LLVM value then assume LLVM will pass it in the right 2382 // place naturally. 2383 if (!isAggregateTypeForABI(Ty)) { 2384 // Treat an enum type as its underlying type. 2385 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2386 Ty = EnumTy->getDecl()->getIntegerType(); 2387 2388 return (Ty->isPromotableIntegerType() ? 2389 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2390 } 2391 2392 return getNaturalAlignIndirect(Ty); 2393 } 2394 2395 bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const { 2396 if (const VectorType *VecTy = Ty->getAs<VectorType>()) { 2397 uint64_t Size = getContext().getTypeSize(VecTy); 2398 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel); 2399 if (Size <= 64 || Size > LargestVector) 2400 return true; 2401 } 2402 2403 return false; 2404 } 2405 2406 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty, 2407 unsigned freeIntRegs) const { 2408 // If this is a scalar LLVM value then assume LLVM will pass it in the right 2409 // place naturally. 2410 // 2411 // This assumption is optimistic, as there could be free registers available 2412 // when we need to pass this argument in memory, and LLVM could try to pass 2413 // the argument in the free register. This does not seem to happen currently, 2414 // but this code would be much safer if we could mark the argument with 2415 // 'onstack'. See PR12193. 2416 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) { 2417 // Treat an enum type as its underlying type. 2418 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2419 Ty = EnumTy->getDecl()->getIntegerType(); 2420 2421 return (Ty->isPromotableIntegerType() ? 2422 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2423 } 2424 2425 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 2426 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 2427 2428 // Compute the byval alignment. We specify the alignment of the byval in all 2429 // cases so that the mid-level optimizer knows the alignment of the byval. 2430 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U); 2431 2432 // Attempt to avoid passing indirect results using byval when possible. This 2433 // is important for good codegen. 2434 // 2435 // We do this by coercing the value into a scalar type which the backend can 2436 // handle naturally (i.e., without using byval). 2437 // 2438 // For simplicity, we currently only do this when we have exhausted all of the 2439 // free integer registers. Doing this when there are free integer registers 2440 // would require more care, as we would have to ensure that the coerced value 2441 // did not claim the unused register. That would require either reording the 2442 // arguments to the function (so that any subsequent inreg values came first), 2443 // or only doing this optimization when there were no following arguments that 2444 // might be inreg. 2445 // 2446 // We currently expect it to be rare (particularly in well written code) for 2447 // arguments to be passed on the stack when there are still free integer 2448 // registers available (this would typically imply large structs being passed 2449 // by value), so this seems like a fair tradeoff for now. 2450 // 2451 // We can revisit this if the backend grows support for 'onstack' parameter 2452 // attributes. See PR12193. 2453 if (freeIntRegs == 0) { 2454 uint64_t Size = getContext().getTypeSize(Ty); 2455 2456 // If this type fits in an eightbyte, coerce it into the matching integral 2457 // type, which will end up on the stack (with alignment 8). 2458 if (Align == 8 && Size <= 64) 2459 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2460 Size)); 2461 } 2462 2463 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(Align)); 2464 } 2465 2466 /// The ABI specifies that a value should be passed in a full vector XMM/YMM 2467 /// register. Pick an LLVM IR type that will be passed as a vector register. 2468 llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const { 2469 // Wrapper structs/arrays that only contain vectors are passed just like 2470 // vectors; strip them off if present. 2471 if (const Type *InnerTy = isSingleElementStruct(Ty, getContext())) 2472 Ty = QualType(InnerTy, 0); 2473 2474 llvm::Type *IRType = CGT.ConvertType(Ty); 2475 if (isa<llvm::VectorType>(IRType) || 2476 IRType->getTypeID() == llvm::Type::FP128TyID) 2477 return IRType; 2478 2479 // We couldn't find the preferred IR vector type for 'Ty'. 2480 uint64_t Size = getContext().getTypeSize(Ty); 2481 assert((Size == 128 || Size == 256) && "Invalid type found!"); 2482 2483 // Return a LLVM IR vector type based on the size of 'Ty'. 2484 return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), 2485 Size / 64); 2486 } 2487 2488 /// BitsContainNoUserData - Return true if the specified [start,end) bit range 2489 /// is known to either be off the end of the specified type or being in 2490 /// alignment padding. The user type specified is known to be at most 128 bits 2491 /// in size, and have passed through X86_64ABIInfo::classify with a successful 2492 /// classification that put one of the two halves in the INTEGER class. 2493 /// 2494 /// It is conservatively correct to return false. 2495 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, 2496 unsigned EndBit, ASTContext &Context) { 2497 // If the bytes being queried are off the end of the type, there is no user 2498 // data hiding here. This handles analysis of builtins, vectors and other 2499 // types that don't contain interesting padding. 2500 unsigned TySize = (unsigned)Context.getTypeSize(Ty); 2501 if (TySize <= StartBit) 2502 return true; 2503 2504 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 2505 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType()); 2506 unsigned NumElts = (unsigned)AT->getSize().getZExtValue(); 2507 2508 // Check each element to see if the element overlaps with the queried range. 2509 for (unsigned i = 0; i != NumElts; ++i) { 2510 // If the element is after the span we care about, then we're done.. 2511 unsigned EltOffset = i*EltSize; 2512 if (EltOffset >= EndBit) break; 2513 2514 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0; 2515 if (!BitsContainNoUserData(AT->getElementType(), EltStart, 2516 EndBit-EltOffset, Context)) 2517 return false; 2518 } 2519 // If it overlaps no elements, then it is safe to process as padding. 2520 return true; 2521 } 2522 2523 if (const RecordType *RT = Ty->getAs<RecordType>()) { 2524 const RecordDecl *RD = RT->getDecl(); 2525 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 2526 2527 // If this is a C++ record, check the bases first. 2528 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 2529 for (const auto &I : CXXRD->bases()) { 2530 assert(!I.isVirtual() && !I.getType()->isDependentType() && 2531 "Unexpected base class!"); 2532 const CXXRecordDecl *Base = 2533 cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl()); 2534 2535 // If the base is after the span we care about, ignore it. 2536 unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base)); 2537 if (BaseOffset >= EndBit) continue; 2538 2539 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0; 2540 if (!BitsContainNoUserData(I.getType(), BaseStart, 2541 EndBit-BaseOffset, Context)) 2542 return false; 2543 } 2544 } 2545 2546 // Verify that no field has data that overlaps the region of interest. Yes 2547 // this could be sped up a lot by being smarter about queried fields, 2548 // however we're only looking at structs up to 16 bytes, so we don't care 2549 // much. 2550 unsigned idx = 0; 2551 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 2552 i != e; ++i, ++idx) { 2553 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx); 2554 2555 // If we found a field after the region we care about, then we're done. 2556 if (FieldOffset >= EndBit) break; 2557 2558 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0; 2559 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset, 2560 Context)) 2561 return false; 2562 } 2563 2564 // If nothing in this record overlapped the area of interest, then we're 2565 // clean. 2566 return true; 2567 } 2568 2569 return false; 2570 } 2571 2572 /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a 2573 /// float member at the specified offset. For example, {int,{float}} has a 2574 /// float at offset 4. It is conservatively correct for this routine to return 2575 /// false. 2576 static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset, 2577 const llvm::DataLayout &TD) { 2578 // Base case if we find a float. 2579 if (IROffset == 0 && IRType->isFloatTy()) 2580 return true; 2581 2582 // If this is a struct, recurse into the field at the specified offset. 2583 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 2584 const llvm::StructLayout *SL = TD.getStructLayout(STy); 2585 unsigned Elt = SL->getElementContainingOffset(IROffset); 2586 IROffset -= SL->getElementOffset(Elt); 2587 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD); 2588 } 2589 2590 // If this is an array, recurse into the field at the specified offset. 2591 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 2592 llvm::Type *EltTy = ATy->getElementType(); 2593 unsigned EltSize = TD.getTypeAllocSize(EltTy); 2594 IROffset -= IROffset/EltSize*EltSize; 2595 return ContainsFloatAtOffset(EltTy, IROffset, TD); 2596 } 2597 2598 return false; 2599 } 2600 2601 2602 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the 2603 /// low 8 bytes of an XMM register, corresponding to the SSE class. 2604 llvm::Type *X86_64ABIInfo:: 2605 GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset, 2606 QualType SourceTy, unsigned SourceOffset) const { 2607 // The only three choices we have are either double, <2 x float>, or float. We 2608 // pass as float if the last 4 bytes is just padding. This happens for 2609 // structs that contain 3 floats. 2610 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32, 2611 SourceOffset*8+64, getContext())) 2612 return llvm::Type::getFloatTy(getVMContext()); 2613 2614 // We want to pass as <2 x float> if the LLVM IR type contains a float at 2615 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the 2616 // case. 2617 if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) && 2618 ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout())) 2619 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2); 2620 2621 return llvm::Type::getDoubleTy(getVMContext()); 2622 } 2623 2624 2625 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in 2626 /// an 8-byte GPR. This means that we either have a scalar or we are talking 2627 /// about the high or low part of an up-to-16-byte struct. This routine picks 2628 /// the best LLVM IR type to represent this, which may be i64 or may be anything 2629 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*, 2630 /// etc). 2631 /// 2632 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for 2633 /// the source type. IROffset is an offset in bytes into the LLVM IR type that 2634 /// the 8-byte value references. PrefType may be null. 2635 /// 2636 /// SourceTy is the source-level type for the entire argument. SourceOffset is 2637 /// an offset into this that we're processing (which is always either 0 or 8). 2638 /// 2639 llvm::Type *X86_64ABIInfo:: 2640 GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset, 2641 QualType SourceTy, unsigned SourceOffset) const { 2642 // If we're dealing with an un-offset LLVM IR type, then it means that we're 2643 // returning an 8-byte unit starting with it. See if we can safely use it. 2644 if (IROffset == 0) { 2645 // Pointers and int64's always fill the 8-byte unit. 2646 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) || 2647 IRType->isIntegerTy(64)) 2648 return IRType; 2649 2650 // If we have a 1/2/4-byte integer, we can use it only if the rest of the 2651 // goodness in the source type is just tail padding. This is allowed to 2652 // kick in for struct {double,int} on the int, but not on 2653 // struct{double,int,int} because we wouldn't return the second int. We 2654 // have to do this analysis on the source type because we can't depend on 2655 // unions being lowered a specific way etc. 2656 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) || 2657 IRType->isIntegerTy(32) || 2658 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) { 2659 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 : 2660 cast<llvm::IntegerType>(IRType)->getBitWidth(); 2661 2662 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth, 2663 SourceOffset*8+64, getContext())) 2664 return IRType; 2665 } 2666 } 2667 2668 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 2669 // If this is a struct, recurse into the field at the specified offset. 2670 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy); 2671 if (IROffset < SL->getSizeInBytes()) { 2672 unsigned FieldIdx = SL->getElementContainingOffset(IROffset); 2673 IROffset -= SL->getElementOffset(FieldIdx); 2674 2675 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset, 2676 SourceTy, SourceOffset); 2677 } 2678 } 2679 2680 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 2681 llvm::Type *EltTy = ATy->getElementType(); 2682 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy); 2683 unsigned EltOffset = IROffset/EltSize*EltSize; 2684 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy, 2685 SourceOffset); 2686 } 2687 2688 // Okay, we don't have any better idea of what to pass, so we pass this in an 2689 // integer register that isn't too big to fit the rest of the struct. 2690 unsigned TySizeInBytes = 2691 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity(); 2692 2693 assert(TySizeInBytes != SourceOffset && "Empty field?"); 2694 2695 // It is always safe to classify this as an integer type up to i64 that 2696 // isn't larger than the structure. 2697 return llvm::IntegerType::get(getVMContext(), 2698 std::min(TySizeInBytes-SourceOffset, 8U)*8); 2699 } 2700 2701 2702 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally 2703 /// be used as elements of a two register pair to pass or return, return a 2704 /// first class aggregate to represent them. For example, if the low part of 2705 /// a by-value argument should be passed as i32* and the high part as float, 2706 /// return {i32*, float}. 2707 static llvm::Type * 2708 GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, 2709 const llvm::DataLayout &TD) { 2710 // In order to correctly satisfy the ABI, we need to the high part to start 2711 // at offset 8. If the high and low parts we inferred are both 4-byte types 2712 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have 2713 // the second element at offset 8. Check for this: 2714 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo); 2715 unsigned HiAlign = TD.getABITypeAlignment(Hi); 2716 unsigned HiStart = llvm::RoundUpToAlignment(LoSize, HiAlign); 2717 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!"); 2718 2719 // To handle this, we have to increase the size of the low part so that the 2720 // second element will start at an 8 byte offset. We can't increase the size 2721 // of the second element because it might make us access off the end of the 2722 // struct. 2723 if (HiStart != 8) { 2724 // There are usually two sorts of types the ABI generation code can produce 2725 // for the low part of a pair that aren't 8 bytes in size: float or 2726 // i8/i16/i32. This can also include pointers when they are 32-bit (X32 and 2727 // NaCl). 2728 // Promote these to a larger type. 2729 if (Lo->isFloatTy()) 2730 Lo = llvm::Type::getDoubleTy(Lo->getContext()); 2731 else { 2732 assert((Lo->isIntegerTy() || Lo->isPointerTy()) 2733 && "Invalid/unknown lo type"); 2734 Lo = llvm::Type::getInt64Ty(Lo->getContext()); 2735 } 2736 } 2737 2738 llvm::StructType *Result = llvm::StructType::get(Lo, Hi, nullptr); 2739 2740 2741 // Verify that the second element is at an 8-byte offset. 2742 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 && 2743 "Invalid x86-64 argument pair!"); 2744 return Result; 2745 } 2746 2747 ABIArgInfo X86_64ABIInfo:: 2748 classifyReturnType(QualType RetTy) const { 2749 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the 2750 // classification algorithm. 2751 X86_64ABIInfo::Class Lo, Hi; 2752 classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true); 2753 2754 // Check some invariants. 2755 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 2756 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 2757 2758 llvm::Type *ResType = nullptr; 2759 switch (Lo) { 2760 case NoClass: 2761 if (Hi == NoClass) 2762 return ABIArgInfo::getIgnore(); 2763 // If the low part is just padding, it takes no register, leave ResType 2764 // null. 2765 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 2766 "Unknown missing lo part"); 2767 break; 2768 2769 case SSEUp: 2770 case X87Up: 2771 llvm_unreachable("Invalid classification for lo word."); 2772 2773 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via 2774 // hidden argument. 2775 case Memory: 2776 return getIndirectReturnResult(RetTy); 2777 2778 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next 2779 // available register of the sequence %rax, %rdx is used. 2780 case Integer: 2781 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 2782 2783 // If we have a sign or zero extended integer, make sure to return Extend 2784 // so that the parameter gets the right LLVM IR attributes. 2785 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 2786 // Treat an enum type as its underlying type. 2787 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 2788 RetTy = EnumTy->getDecl()->getIntegerType(); 2789 2790 if (RetTy->isIntegralOrEnumerationType() && 2791 RetTy->isPromotableIntegerType()) 2792 return ABIArgInfo::getExtend(); 2793 } 2794 break; 2795 2796 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next 2797 // available SSE register of the sequence %xmm0, %xmm1 is used. 2798 case SSE: 2799 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 2800 break; 2801 2802 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is 2803 // returned on the X87 stack in %st0 as 80-bit x87 number. 2804 case X87: 2805 ResType = llvm::Type::getX86_FP80Ty(getVMContext()); 2806 break; 2807 2808 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real 2809 // part of the value is returned in %st0 and the imaginary part in 2810 // %st1. 2811 case ComplexX87: 2812 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification."); 2813 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()), 2814 llvm::Type::getX86_FP80Ty(getVMContext()), 2815 nullptr); 2816 break; 2817 } 2818 2819 llvm::Type *HighPart = nullptr; 2820 switch (Hi) { 2821 // Memory was handled previously and X87 should 2822 // never occur as a hi class. 2823 case Memory: 2824 case X87: 2825 llvm_unreachable("Invalid classification for hi word."); 2826 2827 case ComplexX87: // Previously handled. 2828 case NoClass: 2829 break; 2830 2831 case Integer: 2832 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 2833 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 2834 return ABIArgInfo::getDirect(HighPart, 8); 2835 break; 2836 case SSE: 2837 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 2838 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 2839 return ABIArgInfo::getDirect(HighPart, 8); 2840 break; 2841 2842 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte 2843 // is passed in the next available eightbyte chunk if the last used 2844 // vector register. 2845 // 2846 // SSEUP should always be preceded by SSE, just widen. 2847 case SSEUp: 2848 assert(Lo == SSE && "Unexpected SSEUp classification."); 2849 ResType = GetByteVectorType(RetTy); 2850 break; 2851 2852 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is 2853 // returned together with the previous X87 value in %st0. 2854 case X87Up: 2855 // If X87Up is preceded by X87, we don't need to do 2856 // anything. However, in some cases with unions it may not be 2857 // preceded by X87. In such situations we follow gcc and pass the 2858 // extra bits in an SSE reg. 2859 if (Lo != X87) { 2860 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 2861 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 2862 return ABIArgInfo::getDirect(HighPart, 8); 2863 } 2864 break; 2865 } 2866 2867 // If a high part was specified, merge it together with the low part. It is 2868 // known to pass in the high eightbyte of the result. We do this by forming a 2869 // first class struct aggregate with the high and low part: {low, high} 2870 if (HighPart) 2871 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); 2872 2873 return ABIArgInfo::getDirect(ResType); 2874 } 2875 2876 ABIArgInfo X86_64ABIInfo::classifyArgumentType( 2877 QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE, 2878 bool isNamedArg) 2879 const 2880 { 2881 Ty = useFirstFieldIfTransparentUnion(Ty); 2882 2883 X86_64ABIInfo::Class Lo, Hi; 2884 classify(Ty, 0, Lo, Hi, isNamedArg); 2885 2886 // Check some invariants. 2887 // FIXME: Enforce these by construction. 2888 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 2889 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 2890 2891 neededInt = 0; 2892 neededSSE = 0; 2893 llvm::Type *ResType = nullptr; 2894 switch (Lo) { 2895 case NoClass: 2896 if (Hi == NoClass) 2897 return ABIArgInfo::getIgnore(); 2898 // If the low part is just padding, it takes no register, leave ResType 2899 // null. 2900 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 2901 "Unknown missing lo part"); 2902 break; 2903 2904 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument 2905 // on the stack. 2906 case Memory: 2907 2908 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or 2909 // COMPLEX_X87, it is passed in memory. 2910 case X87: 2911 case ComplexX87: 2912 if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect) 2913 ++neededInt; 2914 return getIndirectResult(Ty, freeIntRegs); 2915 2916 case SSEUp: 2917 case X87Up: 2918 llvm_unreachable("Invalid classification for lo word."); 2919 2920 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next 2921 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8 2922 // and %r9 is used. 2923 case Integer: 2924 ++neededInt; 2925 2926 // Pick an 8-byte type based on the preferred type. 2927 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0); 2928 2929 // If we have a sign or zero extended integer, make sure to return Extend 2930 // so that the parameter gets the right LLVM IR attributes. 2931 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 2932 // Treat an enum type as its underlying type. 2933 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2934 Ty = EnumTy->getDecl()->getIntegerType(); 2935 2936 if (Ty->isIntegralOrEnumerationType() && 2937 Ty->isPromotableIntegerType()) 2938 return ABIArgInfo::getExtend(); 2939 } 2940 2941 break; 2942 2943 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next 2944 // available SSE register is used, the registers are taken in the 2945 // order from %xmm0 to %xmm7. 2946 case SSE: { 2947 llvm::Type *IRType = CGT.ConvertType(Ty); 2948 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0); 2949 ++neededSSE; 2950 break; 2951 } 2952 } 2953 2954 llvm::Type *HighPart = nullptr; 2955 switch (Hi) { 2956 // Memory was handled previously, ComplexX87 and X87 should 2957 // never occur as hi classes, and X87Up must be preceded by X87, 2958 // which is passed in memory. 2959 case Memory: 2960 case X87: 2961 case ComplexX87: 2962 llvm_unreachable("Invalid classification for hi word."); 2963 2964 case NoClass: break; 2965 2966 case Integer: 2967 ++neededInt; 2968 // Pick an 8-byte type based on the preferred type. 2969 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 2970 2971 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 2972 return ABIArgInfo::getDirect(HighPart, 8); 2973 break; 2974 2975 // X87Up generally doesn't occur here (long double is passed in 2976 // memory), except in situations involving unions. 2977 case X87Up: 2978 case SSE: 2979 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 2980 2981 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 2982 return ABIArgInfo::getDirect(HighPart, 8); 2983 2984 ++neededSSE; 2985 break; 2986 2987 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the 2988 // eightbyte is passed in the upper half of the last used SSE 2989 // register. This only happens when 128-bit vectors are passed. 2990 case SSEUp: 2991 assert(Lo == SSE && "Unexpected SSEUp classification"); 2992 ResType = GetByteVectorType(Ty); 2993 break; 2994 } 2995 2996 // If a high part was specified, merge it together with the low part. It is 2997 // known to pass in the high eightbyte of the result. We do this by forming a 2998 // first class struct aggregate with the high and low part: {low, high} 2999 if (HighPart) 3000 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); 3001 3002 return ABIArgInfo::getDirect(ResType); 3003 } 3004 3005 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 3006 3007 if (!getCXXABI().classifyReturnType(FI)) 3008 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 3009 3010 // Keep track of the number of assigned registers. 3011 unsigned freeIntRegs = 6, freeSSERegs = 8; 3012 3013 // If the return value is indirect, then the hidden argument is consuming one 3014 // integer register. 3015 if (FI.getReturnInfo().isIndirect()) 3016 --freeIntRegs; 3017 3018 // The chain argument effectively gives us another free register. 3019 if (FI.isChainCall()) 3020 ++freeIntRegs; 3021 3022 unsigned NumRequiredArgs = FI.getNumRequiredArgs(); 3023 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers 3024 // get assigned (in left-to-right order) for passing as follows... 3025 unsigned ArgNo = 0; 3026 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 3027 it != ie; ++it, ++ArgNo) { 3028 bool IsNamedArg = ArgNo < NumRequiredArgs; 3029 3030 unsigned neededInt, neededSSE; 3031 it->info = classifyArgumentType(it->type, freeIntRegs, neededInt, 3032 neededSSE, IsNamedArg); 3033 3034 // AMD64-ABI 3.2.3p3: If there are no registers available for any 3035 // eightbyte of an argument, the whole argument is passed on the 3036 // stack. If registers have already been assigned for some 3037 // eightbytes of such an argument, the assignments get reverted. 3038 if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) { 3039 freeIntRegs -= neededInt; 3040 freeSSERegs -= neededSSE; 3041 } else { 3042 it->info = getIndirectResult(it->type, freeIntRegs); 3043 } 3044 } 3045 } 3046 3047 static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF, 3048 Address VAListAddr, QualType Ty) { 3049 Address overflow_arg_area_p = CGF.Builder.CreateStructGEP( 3050 VAListAddr, 2, CharUnits::fromQuantity(8), "overflow_arg_area_p"); 3051 llvm::Value *overflow_arg_area = 3052 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area"); 3053 3054 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16 3055 // byte boundary if alignment needed by type exceeds 8 byte boundary. 3056 // It isn't stated explicitly in the standard, but in practice we use 3057 // alignment greater than 16 where necessary. 3058 uint64_t Align = CGF.getContext().getTypeAlignInChars(Ty).getQuantity(); 3059 if (Align > 8) { 3060 // overflow_arg_area = (overflow_arg_area + align - 1) & -align; 3061 llvm::Value *Offset = 3062 llvm::ConstantInt::get(CGF.Int64Ty, Align - 1); 3063 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset); 3064 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area, 3065 CGF.Int64Ty); 3066 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, -(uint64_t)Align); 3067 overflow_arg_area = 3068 CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), 3069 overflow_arg_area->getType(), 3070 "overflow_arg_area.align"); 3071 } 3072 3073 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area. 3074 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 3075 llvm::Value *Res = 3076 CGF.Builder.CreateBitCast(overflow_arg_area, 3077 llvm::PointerType::getUnqual(LTy)); 3078 3079 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to: 3080 // l->overflow_arg_area + sizeof(type). 3081 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to 3082 // an 8 byte boundary. 3083 3084 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8; 3085 llvm::Value *Offset = 3086 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7); 3087 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset, 3088 "overflow_arg_area.next"); 3089 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p); 3090 3091 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type. 3092 return Address(Res, CharUnits::fromQuantity(Align)); 3093 } 3094 3095 Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 3096 QualType Ty) const { 3097 // Assume that va_list type is correct; should be pointer to LLVM type: 3098 // struct { 3099 // i32 gp_offset; 3100 // i32 fp_offset; 3101 // i8* overflow_arg_area; 3102 // i8* reg_save_area; 3103 // }; 3104 unsigned neededInt, neededSSE; 3105 3106 Ty = getContext().getCanonicalType(Ty); 3107 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE, 3108 /*isNamedArg*/false); 3109 3110 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed 3111 // in the registers. If not go to step 7. 3112 if (!neededInt && !neededSSE) 3113 return EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty); 3114 3115 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of 3116 // general purpose registers needed to pass type and num_fp to hold 3117 // the number of floating point registers needed. 3118 3119 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into 3120 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or 3121 // l->fp_offset > 304 - num_fp * 16 go to step 7. 3122 // 3123 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of 3124 // register save space). 3125 3126 llvm::Value *InRegs = nullptr; 3127 Address gp_offset_p = Address::invalid(), fp_offset_p = Address::invalid(); 3128 llvm::Value *gp_offset = nullptr, *fp_offset = nullptr; 3129 if (neededInt) { 3130 gp_offset_p = 3131 CGF.Builder.CreateStructGEP(VAListAddr, 0, CharUnits::Zero(), 3132 "gp_offset_p"); 3133 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset"); 3134 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8); 3135 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp"); 3136 } 3137 3138 if (neededSSE) { 3139 fp_offset_p = 3140 CGF.Builder.CreateStructGEP(VAListAddr, 1, CharUnits::fromQuantity(4), 3141 "fp_offset_p"); 3142 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset"); 3143 llvm::Value *FitsInFP = 3144 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16); 3145 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp"); 3146 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP; 3147 } 3148 3149 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 3150 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); 3151 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 3152 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); 3153 3154 // Emit code to load the value if it was passed in registers. 3155 3156 CGF.EmitBlock(InRegBlock); 3157 3158 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with 3159 // an offset of l->gp_offset and/or l->fp_offset. This may require 3160 // copying to a temporary location in case the parameter is passed 3161 // in different register classes or requires an alignment greater 3162 // than 8 for general purpose registers and 16 for XMM registers. 3163 // 3164 // FIXME: This really results in shameful code when we end up needing to 3165 // collect arguments from different places; often what should result in a 3166 // simple assembling of a structure from scattered addresses has many more 3167 // loads than necessary. Can we clean this up? 3168 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 3169 llvm::Value *RegSaveArea = CGF.Builder.CreateLoad( 3170 CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(16)), 3171 "reg_save_area"); 3172 3173 Address RegAddr = Address::invalid(); 3174 if (neededInt && neededSSE) { 3175 // FIXME: Cleanup. 3176 assert(AI.isDirect() && "Unexpected ABI info for mixed regs"); 3177 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType()); 3178 Address Tmp = CGF.CreateMemTemp(Ty); 3179 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST); 3180 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs"); 3181 llvm::Type *TyLo = ST->getElementType(0); 3182 llvm::Type *TyHi = ST->getElementType(1); 3183 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) && 3184 "Unexpected ABI info for mixed regs"); 3185 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo); 3186 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi); 3187 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegSaveArea, gp_offset); 3188 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegSaveArea, fp_offset); 3189 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr; 3190 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr; 3191 3192 // Copy the first element. 3193 llvm::Value *V = 3194 CGF.Builder.CreateDefaultAlignedLoad( 3195 CGF.Builder.CreateBitCast(RegLoAddr, PTyLo)); 3196 CGF.Builder.CreateStore(V, 3197 CGF.Builder.CreateStructGEP(Tmp, 0, CharUnits::Zero())); 3198 3199 // Copy the second element. 3200 V = CGF.Builder.CreateDefaultAlignedLoad( 3201 CGF.Builder.CreateBitCast(RegHiAddr, PTyHi)); 3202 CharUnits Offset = CharUnits::fromQuantity( 3203 getDataLayout().getStructLayout(ST)->getElementOffset(1)); 3204 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1, Offset)); 3205 3206 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy); 3207 } else if (neededInt) { 3208 RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, gp_offset), 3209 CharUnits::fromQuantity(8)); 3210 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy); 3211 3212 // Copy to a temporary if necessary to ensure the appropriate alignment. 3213 std::pair<CharUnits, CharUnits> SizeAlign = 3214 getContext().getTypeInfoInChars(Ty); 3215 uint64_t TySize = SizeAlign.first.getQuantity(); 3216 CharUnits TyAlign = SizeAlign.second; 3217 3218 // Copy into a temporary if the type is more aligned than the 3219 // register save area. 3220 if (TyAlign.getQuantity() > 8) { 3221 Address Tmp = CGF.CreateMemTemp(Ty); 3222 CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, false); 3223 RegAddr = Tmp; 3224 } 3225 3226 } else if (neededSSE == 1) { 3227 RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset), 3228 CharUnits::fromQuantity(16)); 3229 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy); 3230 } else { 3231 assert(neededSSE == 2 && "Invalid number of needed registers!"); 3232 // SSE registers are spaced 16 bytes apart in the register save 3233 // area, we need to collect the two eightbytes together. 3234 // The ABI isn't explicit about this, but it seems reasonable 3235 // to assume that the slots are 16-byte aligned, since the stack is 3236 // naturally 16-byte aligned and the prologue is expected to store 3237 // all the SSE registers to the RSA. 3238 Address RegAddrLo = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset), 3239 CharUnits::fromQuantity(16)); 3240 Address RegAddrHi = 3241 CGF.Builder.CreateConstInBoundsByteGEP(RegAddrLo, 3242 CharUnits::fromQuantity(16)); 3243 llvm::Type *DoubleTy = CGF.DoubleTy; 3244 llvm::StructType *ST = llvm::StructType::get(DoubleTy, DoubleTy, nullptr); 3245 llvm::Value *V; 3246 Address Tmp = CGF.CreateMemTemp(Ty); 3247 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST); 3248 V = CGF.Builder.CreateLoad( 3249 CGF.Builder.CreateElementBitCast(RegAddrLo, DoubleTy)); 3250 CGF.Builder.CreateStore(V, 3251 CGF.Builder.CreateStructGEP(Tmp, 0, CharUnits::Zero())); 3252 V = CGF.Builder.CreateLoad( 3253 CGF.Builder.CreateElementBitCast(RegAddrHi, DoubleTy)); 3254 CGF.Builder.CreateStore(V, 3255 CGF.Builder.CreateStructGEP(Tmp, 1, CharUnits::fromQuantity(8))); 3256 3257 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy); 3258 } 3259 3260 // AMD64-ABI 3.5.7p5: Step 5. Set: 3261 // l->gp_offset = l->gp_offset + num_gp * 8 3262 // l->fp_offset = l->fp_offset + num_fp * 16. 3263 if (neededInt) { 3264 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8); 3265 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset), 3266 gp_offset_p); 3267 } 3268 if (neededSSE) { 3269 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16); 3270 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset), 3271 fp_offset_p); 3272 } 3273 CGF.EmitBranch(ContBlock); 3274 3275 // Emit code to load the value if it was passed in memory. 3276 3277 CGF.EmitBlock(InMemBlock); 3278 Address MemAddr = EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty); 3279 3280 // Return the appropriate result. 3281 3282 CGF.EmitBlock(ContBlock); 3283 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock, 3284 "vaarg.addr"); 3285 return ResAddr; 3286 } 3287 3288 Address X86_64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, 3289 QualType Ty) const { 3290 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false, 3291 CGF.getContext().getTypeInfoInChars(Ty), 3292 CharUnits::fromQuantity(8), 3293 /*allowHigherAlign*/ false); 3294 } 3295 3296 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs, 3297 bool IsReturnType) const { 3298 3299 if (Ty->isVoidType()) 3300 return ABIArgInfo::getIgnore(); 3301 3302 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3303 Ty = EnumTy->getDecl()->getIntegerType(); 3304 3305 TypeInfo Info = getContext().getTypeInfo(Ty); 3306 uint64_t Width = Info.Width; 3307 unsigned Align = getContext().toCharUnitsFromBits(Info.Align).getQuantity(); 3308 3309 const RecordType *RT = Ty->getAs<RecordType>(); 3310 if (RT) { 3311 if (!IsReturnType) { 3312 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI())) 3313 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 3314 } 3315 3316 if (RT->getDecl()->hasFlexibleArrayMember()) 3317 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 3318 3319 // FIXME: mingw-w64-gcc emits 128-bit struct as i128 3320 if (Width == 128 && getTarget().getTriple().isWindowsGNUEnvironment()) 3321 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 3322 Width)); 3323 } 3324 3325 // vectorcall adds the concept of a homogenous vector aggregate, similar to 3326 // other targets. 3327 const Type *Base = nullptr; 3328 uint64_t NumElts = 0; 3329 if (FreeSSERegs && isHomogeneousAggregate(Ty, Base, NumElts)) { 3330 if (FreeSSERegs >= NumElts) { 3331 FreeSSERegs -= NumElts; 3332 if (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType()) 3333 return ABIArgInfo::getDirect(); 3334 return ABIArgInfo::getExpand(); 3335 } 3336 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(Align), 3337 /*ByVal=*/false); 3338 } 3339 3340 3341 if (Ty->isMemberPointerType()) { 3342 // If the member pointer is represented by an LLVM int or ptr, pass it 3343 // directly. 3344 llvm::Type *LLTy = CGT.ConvertType(Ty); 3345 if (LLTy->isPointerTy() || LLTy->isIntegerTy()) 3346 return ABIArgInfo::getDirect(); 3347 } 3348 3349 if (RT || Ty->isAnyComplexType() || Ty->isMemberPointerType()) { 3350 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is 3351 // not 1, 2, 4, or 8 bytes, must be passed by reference." 3352 if (Width > 64 || !llvm::isPowerOf2_64(Width)) 3353 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 3354 3355 // Otherwise, coerce it to a small integer. 3356 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width)); 3357 } 3358 3359 // Bool type is always extended to the ABI, other builtin types are not 3360 // extended. 3361 const BuiltinType *BT = Ty->getAs<BuiltinType>(); 3362 if (BT && BT->getKind() == BuiltinType::Bool) 3363 return ABIArgInfo::getExtend(); 3364 3365 return ABIArgInfo::getDirect(); 3366 } 3367 3368 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 3369 bool IsVectorCall = 3370 FI.getCallingConvention() == llvm::CallingConv::X86_VectorCall; 3371 3372 // We can use up to 4 SSE return registers with vectorcall. 3373 unsigned FreeSSERegs = IsVectorCall ? 4 : 0; 3374 if (!getCXXABI().classifyReturnType(FI)) 3375 FI.getReturnInfo() = classify(FI.getReturnType(), FreeSSERegs, true); 3376 3377 // We can use up to 6 SSE register parameters with vectorcall. 3378 FreeSSERegs = IsVectorCall ? 6 : 0; 3379 for (auto &I : FI.arguments()) 3380 I.info = classify(I.type, FreeSSERegs, false); 3381 } 3382 3383 Address WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 3384 QualType Ty) const { 3385 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false, 3386 CGF.getContext().getTypeInfoInChars(Ty), 3387 CharUnits::fromQuantity(8), 3388 /*allowHigherAlign*/ false); 3389 } 3390 3391 // PowerPC-32 3392 namespace { 3393 /// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information. 3394 class PPC32_SVR4_ABIInfo : public DefaultABIInfo { 3395 public: 3396 PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 3397 3398 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 3399 QualType Ty) const override; 3400 }; 3401 3402 class PPC32TargetCodeGenInfo : public TargetCodeGenInfo { 3403 public: 3404 PPC32TargetCodeGenInfo(CodeGenTypes &CGT) 3405 : TargetCodeGenInfo(new PPC32_SVR4_ABIInfo(CGT)) {} 3406 3407 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 3408 // This is recovered from gcc output. 3409 return 1; // r1 is the dedicated stack pointer 3410 } 3411 3412 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3413 llvm::Value *Address) const override; 3414 }; 3415 3416 } 3417 3418 Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList, 3419 QualType Ty) const { 3420 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { 3421 // TODO: Implement this. For now ignore. 3422 (void)CTy; 3423 return Address::invalid(); 3424 } 3425 3426 // struct __va_list_tag { 3427 // unsigned char gpr; 3428 // unsigned char fpr; 3429 // unsigned short reserved; 3430 // void *overflow_arg_area; 3431 // void *reg_save_area; 3432 // }; 3433 3434 bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64; 3435 bool isInt = 3436 Ty->isIntegerType() || Ty->isPointerType() || Ty->isAggregateType(); 3437 3438 // All aggregates are passed indirectly? That doesn't seem consistent 3439 // with the argument-lowering code. 3440 bool isIndirect = Ty->isAggregateType(); 3441 3442 CGBuilderTy &Builder = CGF.Builder; 3443 3444 // The calling convention either uses 1-2 GPRs or 1 FPR. 3445 Address NumRegsAddr = Address::invalid(); 3446 if (isInt) { 3447 NumRegsAddr = Builder.CreateStructGEP(VAList, 0, CharUnits::Zero(), "gpr"); 3448 } else { 3449 NumRegsAddr = Builder.CreateStructGEP(VAList, 1, CharUnits::One(), "fpr"); 3450 } 3451 3452 llvm::Value *NumRegs = Builder.CreateLoad(NumRegsAddr, "numUsedRegs"); 3453 3454 // "Align" the register count when TY is i64. 3455 if (isI64) { 3456 NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1)); 3457 NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U)); 3458 } 3459 3460 llvm::Value *CC = 3461 Builder.CreateICmpULT(NumRegs, Builder.getInt8(8), "cond"); 3462 3463 llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs"); 3464 llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow"); 3465 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont"); 3466 3467 Builder.CreateCondBr(CC, UsingRegs, UsingOverflow); 3468 3469 llvm::Type *DirectTy = CGF.ConvertType(Ty); 3470 if (isIndirect) DirectTy = DirectTy->getPointerTo(0); 3471 3472 // Case 1: consume registers. 3473 Address RegAddr = Address::invalid(); 3474 { 3475 CGF.EmitBlock(UsingRegs); 3476 3477 Address RegSaveAreaPtr = 3478 Builder.CreateStructGEP(VAList, 4, CharUnits::fromQuantity(8)); 3479 RegAddr = Address(Builder.CreateLoad(RegSaveAreaPtr), 3480 CharUnits::fromQuantity(8)); 3481 assert(RegAddr.getElementType() == CGF.Int8Ty); 3482 3483 // Floating-point registers start after the general-purpose registers. 3484 if (!isInt) { 3485 RegAddr = Builder.CreateConstInBoundsByteGEP(RegAddr, 3486 CharUnits::fromQuantity(32)); 3487 } 3488 3489 // Get the address of the saved value by scaling the number of 3490 // registers we've used by the number of 3491 CharUnits RegSize = CharUnits::fromQuantity(isInt ? 4 : 8); 3492 llvm::Value *RegOffset = 3493 Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.getQuantity())); 3494 RegAddr = Address(Builder.CreateInBoundsGEP(CGF.Int8Ty, 3495 RegAddr.getPointer(), RegOffset), 3496 RegAddr.getAlignment().alignmentOfArrayElement(RegSize)); 3497 RegAddr = Builder.CreateElementBitCast(RegAddr, DirectTy); 3498 3499 // Increase the used-register count. 3500 NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(isI64 ? 2 : 1)); 3501 Builder.CreateStore(NumRegs, NumRegsAddr); 3502 3503 CGF.EmitBranch(Cont); 3504 } 3505 3506 // Case 2: consume space in the overflow area. 3507 Address MemAddr = Address::invalid(); 3508 { 3509 CGF.EmitBlock(UsingOverflow); 3510 3511 // Everything in the overflow area is rounded up to a size of at least 4. 3512 CharUnits OverflowAreaAlign = CharUnits::fromQuantity(4); 3513 3514 CharUnits Size; 3515 if (!isIndirect) { 3516 auto TypeInfo = CGF.getContext().getTypeInfoInChars(Ty); 3517 Size = TypeInfo.first.RoundUpToAlignment(OverflowAreaAlign); 3518 } else { 3519 Size = CGF.getPointerSize(); 3520 } 3521 3522 Address OverflowAreaAddr = 3523 Builder.CreateStructGEP(VAList, 3, CharUnits::fromQuantity(4)); 3524 Address OverflowArea(Builder.CreateLoad(OverflowAreaAddr), 3525 OverflowAreaAlign); 3526 3527 // The current address is the address of the varargs element. 3528 // FIXME: do we not need to round up to alignment? 3529 MemAddr = Builder.CreateElementBitCast(OverflowArea, DirectTy); 3530 3531 // Increase the overflow area. 3532 OverflowArea = Builder.CreateConstInBoundsByteGEP(OverflowArea, Size); 3533 Builder.CreateStore(OverflowArea.getPointer(), OverflowAreaAddr); 3534 CGF.EmitBranch(Cont); 3535 } 3536 3537 CGF.EmitBlock(Cont); 3538 3539 // Merge the cases with a phi. 3540 Address Result = emitMergePHI(CGF, RegAddr, UsingRegs, MemAddr, UsingOverflow, 3541 "vaarg.addr"); 3542 3543 // Load the pointer if the argument was passed indirectly. 3544 if (isIndirect) { 3545 Result = Address(Builder.CreateLoad(Result, "aggr"), 3546 getContext().getTypeAlignInChars(Ty)); 3547 } 3548 3549 return Result; 3550 } 3551 3552 bool 3553 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3554 llvm::Value *Address) const { 3555 // This is calculated from the LLVM and GCC tables and verified 3556 // against gcc output. AFAIK all ABIs use the same encoding. 3557 3558 CodeGen::CGBuilderTy &Builder = CGF.Builder; 3559 3560 llvm::IntegerType *i8 = CGF.Int8Ty; 3561 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 3562 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 3563 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 3564 3565 // 0-31: r0-31, the 4-byte general-purpose registers 3566 AssignToArrayRange(Builder, Address, Four8, 0, 31); 3567 3568 // 32-63: fp0-31, the 8-byte floating-point registers 3569 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 3570 3571 // 64-76 are various 4-byte special-purpose registers: 3572 // 64: mq 3573 // 65: lr 3574 // 66: ctr 3575 // 67: ap 3576 // 68-75 cr0-7 3577 // 76: xer 3578 AssignToArrayRange(Builder, Address, Four8, 64, 76); 3579 3580 // 77-108: v0-31, the 16-byte vector registers 3581 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 3582 3583 // 109: vrsave 3584 // 110: vscr 3585 // 111: spe_acc 3586 // 112: spefscr 3587 // 113: sfp 3588 AssignToArrayRange(Builder, Address, Four8, 109, 113); 3589 3590 return false; 3591 } 3592 3593 // PowerPC-64 3594 3595 namespace { 3596 /// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information. 3597 class PPC64_SVR4_ABIInfo : public DefaultABIInfo { 3598 public: 3599 enum ABIKind { 3600 ELFv1 = 0, 3601 ELFv2 3602 }; 3603 3604 private: 3605 static const unsigned GPRBits = 64; 3606 ABIKind Kind; 3607 bool HasQPX; 3608 3609 // A vector of float or double will be promoted to <4 x f32> or <4 x f64> and 3610 // will be passed in a QPX register. 3611 bool IsQPXVectorTy(const Type *Ty) const { 3612 if (!HasQPX) 3613 return false; 3614 3615 if (const VectorType *VT = Ty->getAs<VectorType>()) { 3616 unsigned NumElements = VT->getNumElements(); 3617 if (NumElements == 1) 3618 return false; 3619 3620 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) { 3621 if (getContext().getTypeSize(Ty) <= 256) 3622 return true; 3623 } else if (VT->getElementType()-> 3624 isSpecificBuiltinType(BuiltinType::Float)) { 3625 if (getContext().getTypeSize(Ty) <= 128) 3626 return true; 3627 } 3628 } 3629 3630 return false; 3631 } 3632 3633 bool IsQPXVectorTy(QualType Ty) const { 3634 return IsQPXVectorTy(Ty.getTypePtr()); 3635 } 3636 3637 public: 3638 PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind, bool HasQPX) 3639 : DefaultABIInfo(CGT), Kind(Kind), HasQPX(HasQPX) {} 3640 3641 bool isPromotableTypeForABI(QualType Ty) const; 3642 CharUnits getParamTypeAlignment(QualType Ty) const; 3643 3644 ABIArgInfo classifyReturnType(QualType RetTy) const; 3645 ABIArgInfo classifyArgumentType(QualType Ty) const; 3646 3647 bool isHomogeneousAggregateBaseType(QualType Ty) const override; 3648 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 3649 uint64_t Members) const override; 3650 3651 // TODO: We can add more logic to computeInfo to improve performance. 3652 // Example: For aggregate arguments that fit in a register, we could 3653 // use getDirectInReg (as is done below for structs containing a single 3654 // floating-point value) to avoid pushing them to memory on function 3655 // entry. This would require changing the logic in PPCISelLowering 3656 // when lowering the parameters in the caller and args in the callee. 3657 void computeInfo(CGFunctionInfo &FI) const override { 3658 if (!getCXXABI().classifyReturnType(FI)) 3659 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 3660 for (auto &I : FI.arguments()) { 3661 // We rely on the default argument classification for the most part. 3662 // One exception: An aggregate containing a single floating-point 3663 // or vector item must be passed in a register if one is available. 3664 const Type *T = isSingleElementStruct(I.type, getContext()); 3665 if (T) { 3666 const BuiltinType *BT = T->getAs<BuiltinType>(); 3667 if (IsQPXVectorTy(T) || 3668 (T->isVectorType() && getContext().getTypeSize(T) == 128) || 3669 (BT && BT->isFloatingPoint())) { 3670 QualType QT(T, 0); 3671 I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT)); 3672 continue; 3673 } 3674 } 3675 I.info = classifyArgumentType(I.type); 3676 } 3677 } 3678 3679 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 3680 QualType Ty) const override; 3681 }; 3682 3683 class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo { 3684 3685 public: 3686 PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT, 3687 PPC64_SVR4_ABIInfo::ABIKind Kind, bool HasQPX) 3688 : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT, Kind, HasQPX)) {} 3689 3690 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 3691 // This is recovered from gcc output. 3692 return 1; // r1 is the dedicated stack pointer 3693 } 3694 3695 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3696 llvm::Value *Address) const override; 3697 }; 3698 3699 class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo { 3700 public: 3701 PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} 3702 3703 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 3704 // This is recovered from gcc output. 3705 return 1; // r1 is the dedicated stack pointer 3706 } 3707 3708 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3709 llvm::Value *Address) const override; 3710 }; 3711 3712 } 3713 3714 // Return true if the ABI requires Ty to be passed sign- or zero- 3715 // extended to 64 bits. 3716 bool 3717 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const { 3718 // Treat an enum type as its underlying type. 3719 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3720 Ty = EnumTy->getDecl()->getIntegerType(); 3721 3722 // Promotable integer types are required to be promoted by the ABI. 3723 if (Ty->isPromotableIntegerType()) 3724 return true; 3725 3726 // In addition to the usual promotable integer types, we also need to 3727 // extend all 32-bit types, since the ABI requires promotion to 64 bits. 3728 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 3729 switch (BT->getKind()) { 3730 case BuiltinType::Int: 3731 case BuiltinType::UInt: 3732 return true; 3733 default: 3734 break; 3735 } 3736 3737 return false; 3738 } 3739 3740 /// isAlignedParamType - Determine whether a type requires 16-byte or 3741 /// higher alignment in the parameter area. Always returns at least 8. 3742 CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const { 3743 // Complex types are passed just like their elements. 3744 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) 3745 Ty = CTy->getElementType(); 3746 3747 // Only vector types of size 16 bytes need alignment (larger types are 3748 // passed via reference, smaller types are not aligned). 3749 if (IsQPXVectorTy(Ty)) { 3750 if (getContext().getTypeSize(Ty) > 128) 3751 return CharUnits::fromQuantity(32); 3752 3753 return CharUnits::fromQuantity(16); 3754 } else if (Ty->isVectorType()) { 3755 return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 : 8); 3756 } 3757 3758 // For single-element float/vector structs, we consider the whole type 3759 // to have the same alignment requirements as its single element. 3760 const Type *AlignAsType = nullptr; 3761 const Type *EltType = isSingleElementStruct(Ty, getContext()); 3762 if (EltType) { 3763 const BuiltinType *BT = EltType->getAs<BuiltinType>(); 3764 if (IsQPXVectorTy(EltType) || (EltType->isVectorType() && 3765 getContext().getTypeSize(EltType) == 128) || 3766 (BT && BT->isFloatingPoint())) 3767 AlignAsType = EltType; 3768 } 3769 3770 // Likewise for ELFv2 homogeneous aggregates. 3771 const Type *Base = nullptr; 3772 uint64_t Members = 0; 3773 if (!AlignAsType && Kind == ELFv2 && 3774 isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members)) 3775 AlignAsType = Base; 3776 3777 // With special case aggregates, only vector base types need alignment. 3778 if (AlignAsType && IsQPXVectorTy(AlignAsType)) { 3779 if (getContext().getTypeSize(AlignAsType) > 128) 3780 return CharUnits::fromQuantity(32); 3781 3782 return CharUnits::fromQuantity(16); 3783 } else if (AlignAsType) { 3784 return CharUnits::fromQuantity(AlignAsType->isVectorType() ? 16 : 8); 3785 } 3786 3787 // Otherwise, we only need alignment for any aggregate type that 3788 // has an alignment requirement of >= 16 bytes. 3789 if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) { 3790 if (HasQPX && getContext().getTypeAlign(Ty) >= 256) 3791 return CharUnits::fromQuantity(32); 3792 return CharUnits::fromQuantity(16); 3793 } 3794 3795 return CharUnits::fromQuantity(8); 3796 } 3797 3798 /// isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous 3799 /// aggregate. Base is set to the base element type, and Members is set 3800 /// to the number of base elements. 3801 bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base, 3802 uint64_t &Members) const { 3803 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 3804 uint64_t NElements = AT->getSize().getZExtValue(); 3805 if (NElements == 0) 3806 return false; 3807 if (!isHomogeneousAggregate(AT->getElementType(), Base, Members)) 3808 return false; 3809 Members *= NElements; 3810 } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 3811 const RecordDecl *RD = RT->getDecl(); 3812 if (RD->hasFlexibleArrayMember()) 3813 return false; 3814 3815 Members = 0; 3816 3817 // If this is a C++ record, check the bases first. 3818 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 3819 for (const auto &I : CXXRD->bases()) { 3820 // Ignore empty records. 3821 if (isEmptyRecord(getContext(), I.getType(), true)) 3822 continue; 3823 3824 uint64_t FldMembers; 3825 if (!isHomogeneousAggregate(I.getType(), Base, FldMembers)) 3826 return false; 3827 3828 Members += FldMembers; 3829 } 3830 } 3831 3832 for (const auto *FD : RD->fields()) { 3833 // Ignore (non-zero arrays of) empty records. 3834 QualType FT = FD->getType(); 3835 while (const ConstantArrayType *AT = 3836 getContext().getAsConstantArrayType(FT)) { 3837 if (AT->getSize().getZExtValue() == 0) 3838 return false; 3839 FT = AT->getElementType(); 3840 } 3841 if (isEmptyRecord(getContext(), FT, true)) 3842 continue; 3843 3844 // For compatibility with GCC, ignore empty bitfields in C++ mode. 3845 if (getContext().getLangOpts().CPlusPlus && 3846 FD->isBitField() && FD->getBitWidthValue(getContext()) == 0) 3847 continue; 3848 3849 uint64_t FldMembers; 3850 if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers)) 3851 return false; 3852 3853 Members = (RD->isUnion() ? 3854 std::max(Members, FldMembers) : Members + FldMembers); 3855 } 3856 3857 if (!Base) 3858 return false; 3859 3860 // Ensure there is no padding. 3861 if (getContext().getTypeSize(Base) * Members != 3862 getContext().getTypeSize(Ty)) 3863 return false; 3864 } else { 3865 Members = 1; 3866 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 3867 Members = 2; 3868 Ty = CT->getElementType(); 3869 } 3870 3871 // Most ABIs only support float, double, and some vector type widths. 3872 if (!isHomogeneousAggregateBaseType(Ty)) 3873 return false; 3874 3875 // The base type must be the same for all members. Types that 3876 // agree in both total size and mode (float vs. vector) are 3877 // treated as being equivalent here. 3878 const Type *TyPtr = Ty.getTypePtr(); 3879 if (!Base) 3880 Base = TyPtr; 3881 3882 if (Base->isVectorType() != TyPtr->isVectorType() || 3883 getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr)) 3884 return false; 3885 } 3886 return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members); 3887 } 3888 3889 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 3890 // Homogeneous aggregates for ELFv2 must have base types of float, 3891 // double, long double, or 128-bit vectors. 3892 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 3893 if (BT->getKind() == BuiltinType::Float || 3894 BT->getKind() == BuiltinType::Double || 3895 BT->getKind() == BuiltinType::LongDouble) 3896 return true; 3897 } 3898 if (const VectorType *VT = Ty->getAs<VectorType>()) { 3899 if (getContext().getTypeSize(VT) == 128 || IsQPXVectorTy(Ty)) 3900 return true; 3901 } 3902 return false; 3903 } 3904 3905 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough( 3906 const Type *Base, uint64_t Members) const { 3907 // Vector types require one register, floating point types require one 3908 // or two registers depending on their size. 3909 uint32_t NumRegs = 3910 Base->isVectorType() ? 1 : (getContext().getTypeSize(Base) + 63) / 64; 3911 3912 // Homogeneous Aggregates may occupy at most 8 registers. 3913 return Members * NumRegs <= 8; 3914 } 3915 3916 ABIArgInfo 3917 PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const { 3918 Ty = useFirstFieldIfTransparentUnion(Ty); 3919 3920 if (Ty->isAnyComplexType()) 3921 return ABIArgInfo::getDirect(); 3922 3923 // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes) 3924 // or via reference (larger than 16 bytes). 3925 if (Ty->isVectorType() && !IsQPXVectorTy(Ty)) { 3926 uint64_t Size = getContext().getTypeSize(Ty); 3927 if (Size > 128) 3928 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 3929 else if (Size < 128) { 3930 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size); 3931 return ABIArgInfo::getDirect(CoerceTy); 3932 } 3933 } 3934 3935 if (isAggregateTypeForABI(Ty)) { 3936 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 3937 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 3938 3939 uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity(); 3940 uint64_t TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity(); 3941 3942 // ELFv2 homogeneous aggregates are passed as array types. 3943 const Type *Base = nullptr; 3944 uint64_t Members = 0; 3945 if (Kind == ELFv2 && 3946 isHomogeneousAggregate(Ty, Base, Members)) { 3947 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0)); 3948 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members); 3949 return ABIArgInfo::getDirect(CoerceTy); 3950 } 3951 3952 // If an aggregate may end up fully in registers, we do not 3953 // use the ByVal method, but pass the aggregate as array. 3954 // This is usually beneficial since we avoid forcing the 3955 // back-end to store the argument to memory. 3956 uint64_t Bits = getContext().getTypeSize(Ty); 3957 if (Bits > 0 && Bits <= 8 * GPRBits) { 3958 llvm::Type *CoerceTy; 3959 3960 // Types up to 8 bytes are passed as integer type (which will be 3961 // properly aligned in the argument save area doubleword). 3962 if (Bits <= GPRBits) 3963 CoerceTy = llvm::IntegerType::get(getVMContext(), 3964 llvm::RoundUpToAlignment(Bits, 8)); 3965 // Larger types are passed as arrays, with the base type selected 3966 // according to the required alignment in the save area. 3967 else { 3968 uint64_t RegBits = ABIAlign * 8; 3969 uint64_t NumRegs = llvm::RoundUpToAlignment(Bits, RegBits) / RegBits; 3970 llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits); 3971 CoerceTy = llvm::ArrayType::get(RegTy, NumRegs); 3972 } 3973 3974 return ABIArgInfo::getDirect(CoerceTy); 3975 } 3976 3977 // All other aggregates are passed ByVal. 3978 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign), 3979 /*ByVal=*/true, 3980 /*Realign=*/TyAlign > ABIAlign); 3981 } 3982 3983 return (isPromotableTypeForABI(Ty) ? 3984 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 3985 } 3986 3987 ABIArgInfo 3988 PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const { 3989 if (RetTy->isVoidType()) 3990 return ABIArgInfo::getIgnore(); 3991 3992 if (RetTy->isAnyComplexType()) 3993 return ABIArgInfo::getDirect(); 3994 3995 // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes) 3996 // or via reference (larger than 16 bytes). 3997 if (RetTy->isVectorType() && !IsQPXVectorTy(RetTy)) { 3998 uint64_t Size = getContext().getTypeSize(RetTy); 3999 if (Size > 128) 4000 return getNaturalAlignIndirect(RetTy); 4001 else if (Size < 128) { 4002 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size); 4003 return ABIArgInfo::getDirect(CoerceTy); 4004 } 4005 } 4006 4007 if (isAggregateTypeForABI(RetTy)) { 4008 // ELFv2 homogeneous aggregates are returned as array types. 4009 const Type *Base = nullptr; 4010 uint64_t Members = 0; 4011 if (Kind == ELFv2 && 4012 isHomogeneousAggregate(RetTy, Base, Members)) { 4013 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0)); 4014 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members); 4015 return ABIArgInfo::getDirect(CoerceTy); 4016 } 4017 4018 // ELFv2 small aggregates are returned in up to two registers. 4019 uint64_t Bits = getContext().getTypeSize(RetTy); 4020 if (Kind == ELFv2 && Bits <= 2 * GPRBits) { 4021 if (Bits == 0) 4022 return ABIArgInfo::getIgnore(); 4023 4024 llvm::Type *CoerceTy; 4025 if (Bits > GPRBits) { 4026 CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits); 4027 CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy, nullptr); 4028 } else 4029 CoerceTy = llvm::IntegerType::get(getVMContext(), 4030 llvm::RoundUpToAlignment(Bits, 8)); 4031 return ABIArgInfo::getDirect(CoerceTy); 4032 } 4033 4034 // All other aggregates are returned indirectly. 4035 return getNaturalAlignIndirect(RetTy); 4036 } 4037 4038 return (isPromotableTypeForABI(RetTy) ? 4039 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 4040 } 4041 4042 // Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine. 4043 Address PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 4044 QualType Ty) const { 4045 auto TypeInfo = getContext().getTypeInfoInChars(Ty); 4046 TypeInfo.second = getParamTypeAlignment(Ty); 4047 4048 CharUnits SlotSize = CharUnits::fromQuantity(8); 4049 4050 // If we have a complex type and the base type is smaller than 8 bytes, 4051 // the ABI calls for the real and imaginary parts to be right-adjusted 4052 // in separate doublewords. However, Clang expects us to produce a 4053 // pointer to a structure with the two parts packed tightly. So generate 4054 // loads of the real and imaginary parts relative to the va_list pointer, 4055 // and store them to a temporary structure. 4056 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { 4057 CharUnits EltSize = TypeInfo.first / 2; 4058 if (EltSize < SlotSize) { 4059 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, CGF.Int8Ty, 4060 SlotSize * 2, SlotSize, 4061 SlotSize, /*AllowHigher*/ true); 4062 4063 Address RealAddr = Addr; 4064 Address ImagAddr = RealAddr; 4065 if (CGF.CGM.getDataLayout().isBigEndian()) { 4066 RealAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, 4067 SlotSize - EltSize); 4068 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(ImagAddr, 4069 2 * SlotSize - EltSize); 4070 } else { 4071 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize); 4072 } 4073 4074 llvm::Type *EltTy = CGF.ConvertTypeForMem(CTy->getElementType()); 4075 RealAddr = CGF.Builder.CreateElementBitCast(RealAddr, EltTy); 4076 ImagAddr = CGF.Builder.CreateElementBitCast(ImagAddr, EltTy); 4077 llvm::Value *Real = CGF.Builder.CreateLoad(RealAddr, ".vareal"); 4078 llvm::Value *Imag = CGF.Builder.CreateLoad(ImagAddr, ".vaimag"); 4079 4080 Address Temp = CGF.CreateMemTemp(Ty, "vacplx"); 4081 CGF.EmitStoreOfComplex({Real, Imag}, CGF.MakeAddrLValue(Temp, Ty), 4082 /*init*/ true); 4083 return Temp; 4084 } 4085 } 4086 4087 // Otherwise, just use the general rule. 4088 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, 4089 TypeInfo, SlotSize, /*AllowHigher*/ true); 4090 } 4091 4092 static bool 4093 PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4094 llvm::Value *Address) { 4095 // This is calculated from the LLVM and GCC tables and verified 4096 // against gcc output. AFAIK all ABIs use the same encoding. 4097 4098 CodeGen::CGBuilderTy &Builder = CGF.Builder; 4099 4100 llvm::IntegerType *i8 = CGF.Int8Ty; 4101 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 4102 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 4103 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 4104 4105 // 0-31: r0-31, the 8-byte general-purpose registers 4106 AssignToArrayRange(Builder, Address, Eight8, 0, 31); 4107 4108 // 32-63: fp0-31, the 8-byte floating-point registers 4109 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 4110 4111 // 64-76 are various 4-byte special-purpose registers: 4112 // 64: mq 4113 // 65: lr 4114 // 66: ctr 4115 // 67: ap 4116 // 68-75 cr0-7 4117 // 76: xer 4118 AssignToArrayRange(Builder, Address, Four8, 64, 76); 4119 4120 // 77-108: v0-31, the 16-byte vector registers 4121 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 4122 4123 // 109: vrsave 4124 // 110: vscr 4125 // 111: spe_acc 4126 // 112: spefscr 4127 // 113: sfp 4128 AssignToArrayRange(Builder, Address, Four8, 109, 113); 4129 4130 return false; 4131 } 4132 4133 bool 4134 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable( 4135 CodeGen::CodeGenFunction &CGF, 4136 llvm::Value *Address) const { 4137 4138 return PPC64_initDwarfEHRegSizeTable(CGF, Address); 4139 } 4140 4141 bool 4142 PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4143 llvm::Value *Address) const { 4144 4145 return PPC64_initDwarfEHRegSizeTable(CGF, Address); 4146 } 4147 4148 //===----------------------------------------------------------------------===// 4149 // AArch64 ABI Implementation 4150 //===----------------------------------------------------------------------===// 4151 4152 namespace { 4153 4154 class AArch64ABIInfo : public ABIInfo { 4155 public: 4156 enum ABIKind { 4157 AAPCS = 0, 4158 DarwinPCS 4159 }; 4160 4161 private: 4162 ABIKind Kind; 4163 4164 public: 4165 AArch64ABIInfo(CodeGenTypes &CGT, ABIKind Kind) : ABIInfo(CGT), Kind(Kind) {} 4166 4167 private: 4168 ABIKind getABIKind() const { return Kind; } 4169 bool isDarwinPCS() const { return Kind == DarwinPCS; } 4170 4171 ABIArgInfo classifyReturnType(QualType RetTy) const; 4172 ABIArgInfo classifyArgumentType(QualType RetTy) const; 4173 bool isHomogeneousAggregateBaseType(QualType Ty) const override; 4174 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 4175 uint64_t Members) const override; 4176 4177 bool isIllegalVectorType(QualType Ty) const; 4178 4179 void computeInfo(CGFunctionInfo &FI) const override { 4180 if (!getCXXABI().classifyReturnType(FI)) 4181 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 4182 4183 for (auto &it : FI.arguments()) 4184 it.info = classifyArgumentType(it.type); 4185 } 4186 4187 Address EmitDarwinVAArg(Address VAListAddr, QualType Ty, 4188 CodeGenFunction &CGF) const; 4189 4190 Address EmitAAPCSVAArg(Address VAListAddr, QualType Ty, 4191 CodeGenFunction &CGF) const; 4192 4193 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 4194 QualType Ty) const override { 4195 return isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF) 4196 : EmitAAPCSVAArg(VAListAddr, Ty, CGF); 4197 } 4198 }; 4199 4200 class AArch64TargetCodeGenInfo : public TargetCodeGenInfo { 4201 public: 4202 AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind) 4203 : TargetCodeGenInfo(new AArch64ABIInfo(CGT, Kind)) {} 4204 4205 StringRef getARCRetainAutoreleasedReturnValueMarker() const override { 4206 return "mov\tfp, fp\t\t; marker for objc_retainAutoreleaseReturnValue"; 4207 } 4208 4209 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 4210 return 31; 4211 } 4212 4213 bool doesReturnSlotInterfereWithArgs() const override { return false; } 4214 }; 4215 } 4216 4217 ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const { 4218 Ty = useFirstFieldIfTransparentUnion(Ty); 4219 4220 // Handle illegal vector types here. 4221 if (isIllegalVectorType(Ty)) { 4222 uint64_t Size = getContext().getTypeSize(Ty); 4223 if (Size <= 32) { 4224 llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext()); 4225 return ABIArgInfo::getDirect(ResType); 4226 } 4227 if (Size == 64) { 4228 llvm::Type *ResType = 4229 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2); 4230 return ABIArgInfo::getDirect(ResType); 4231 } 4232 if (Size == 128) { 4233 llvm::Type *ResType = 4234 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4); 4235 return ABIArgInfo::getDirect(ResType); 4236 } 4237 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 4238 } 4239 4240 if (!isAggregateTypeForABI(Ty)) { 4241 // Treat an enum type as its underlying type. 4242 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 4243 Ty = EnumTy->getDecl()->getIntegerType(); 4244 4245 return (Ty->isPromotableIntegerType() && isDarwinPCS() 4246 ? ABIArgInfo::getExtend() 4247 : ABIArgInfo::getDirect()); 4248 } 4249 4250 // Structures with either a non-trivial destructor or a non-trivial 4251 // copy constructor are always indirect. 4252 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { 4253 return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA == 4254 CGCXXABI::RAA_DirectInMemory); 4255 } 4256 4257 // Empty records are always ignored on Darwin, but actually passed in C++ mode 4258 // elsewhere for GNU compatibility. 4259 if (isEmptyRecord(getContext(), Ty, true)) { 4260 if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS()) 4261 return ABIArgInfo::getIgnore(); 4262 4263 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 4264 } 4265 4266 // Homogeneous Floating-point Aggregates (HFAs) need to be expanded. 4267 const Type *Base = nullptr; 4268 uint64_t Members = 0; 4269 if (isHomogeneousAggregate(Ty, Base, Members)) { 4270 return ABIArgInfo::getDirect( 4271 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members)); 4272 } 4273 4274 // Aggregates <= 16 bytes are passed directly in registers or on the stack. 4275 uint64_t Size = getContext().getTypeSize(Ty); 4276 if (Size <= 128) { 4277 unsigned Alignment = getContext().getTypeAlign(Ty); 4278 Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes 4279 4280 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment. 4281 // For aggregates with 16-byte alignment, we use i128. 4282 if (Alignment < 128 && Size == 128) { 4283 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext()); 4284 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64)); 4285 } 4286 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size)); 4287 } 4288 4289 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 4290 } 4291 4292 ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy) const { 4293 if (RetTy->isVoidType()) 4294 return ABIArgInfo::getIgnore(); 4295 4296 // Large vector types should be returned via memory. 4297 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) 4298 return getNaturalAlignIndirect(RetTy); 4299 4300 if (!isAggregateTypeForABI(RetTy)) { 4301 // Treat an enum type as its underlying type. 4302 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 4303 RetTy = EnumTy->getDecl()->getIntegerType(); 4304 4305 return (RetTy->isPromotableIntegerType() && isDarwinPCS() 4306 ? ABIArgInfo::getExtend() 4307 : ABIArgInfo::getDirect()); 4308 } 4309 4310 if (isEmptyRecord(getContext(), RetTy, true)) 4311 return ABIArgInfo::getIgnore(); 4312 4313 const Type *Base = nullptr; 4314 uint64_t Members = 0; 4315 if (isHomogeneousAggregate(RetTy, Base, Members)) 4316 // Homogeneous Floating-point Aggregates (HFAs) are returned directly. 4317 return ABIArgInfo::getDirect(); 4318 4319 // Aggregates <= 16 bytes are returned directly in registers or on the stack. 4320 uint64_t Size = getContext().getTypeSize(RetTy); 4321 if (Size <= 128) { 4322 unsigned Alignment = getContext().getTypeAlign(RetTy); 4323 Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes 4324 4325 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment. 4326 // For aggregates with 16-byte alignment, we use i128. 4327 if (Alignment < 128 && Size == 128) { 4328 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext()); 4329 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64)); 4330 } 4331 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size)); 4332 } 4333 4334 return getNaturalAlignIndirect(RetTy); 4335 } 4336 4337 /// isIllegalVectorType - check whether the vector type is legal for AArch64. 4338 bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const { 4339 if (const VectorType *VT = Ty->getAs<VectorType>()) { 4340 // Check whether VT is legal. 4341 unsigned NumElements = VT->getNumElements(); 4342 uint64_t Size = getContext().getTypeSize(VT); 4343 // NumElements should be power of 2 between 1 and 16. 4344 if ((NumElements & (NumElements - 1)) != 0 || NumElements > 16) 4345 return true; 4346 return Size != 64 && (Size != 128 || NumElements == 1); 4347 } 4348 return false; 4349 } 4350 4351 bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 4352 // Homogeneous aggregates for AAPCS64 must have base types of a floating 4353 // point type or a short-vector type. This is the same as the 32-bit ABI, 4354 // but with the difference that any floating-point type is allowed, 4355 // including __fp16. 4356 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 4357 if (BT->isFloatingPoint()) 4358 return true; 4359 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 4360 unsigned VecSize = getContext().getTypeSize(VT); 4361 if (VecSize == 64 || VecSize == 128) 4362 return true; 4363 } 4364 return false; 4365 } 4366 4367 bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, 4368 uint64_t Members) const { 4369 return Members <= 4; 4370 } 4371 4372 Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr, 4373 QualType Ty, 4374 CodeGenFunction &CGF) const { 4375 ABIArgInfo AI = classifyArgumentType(Ty); 4376 bool IsIndirect = AI.isIndirect(); 4377 4378 llvm::Type *BaseTy = CGF.ConvertType(Ty); 4379 if (IsIndirect) 4380 BaseTy = llvm::PointerType::getUnqual(BaseTy); 4381 else if (AI.getCoerceToType()) 4382 BaseTy = AI.getCoerceToType(); 4383 4384 unsigned NumRegs = 1; 4385 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) { 4386 BaseTy = ArrTy->getElementType(); 4387 NumRegs = ArrTy->getNumElements(); 4388 } 4389 bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy(); 4390 4391 // The AArch64 va_list type and handling is specified in the Procedure Call 4392 // Standard, section B.4: 4393 // 4394 // struct { 4395 // void *__stack; 4396 // void *__gr_top; 4397 // void *__vr_top; 4398 // int __gr_offs; 4399 // int __vr_offs; 4400 // }; 4401 4402 llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg"); 4403 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 4404 llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack"); 4405 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 4406 4407 auto TyInfo = getContext().getTypeInfoInChars(Ty); 4408 CharUnits TyAlign = TyInfo.second; 4409 4410 Address reg_offs_p = Address::invalid(); 4411 llvm::Value *reg_offs = nullptr; 4412 int reg_top_index; 4413 CharUnits reg_top_offset; 4414 int RegSize = IsIndirect ? 8 : TyInfo.first.getQuantity(); 4415 if (!IsFPR) { 4416 // 3 is the field number of __gr_offs 4417 reg_offs_p = 4418 CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(24), 4419 "gr_offs_p"); 4420 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs"); 4421 reg_top_index = 1; // field number for __gr_top 4422 reg_top_offset = CharUnits::fromQuantity(8); 4423 RegSize = llvm::RoundUpToAlignment(RegSize, 8); 4424 } else { 4425 // 4 is the field number of __vr_offs. 4426 reg_offs_p = 4427 CGF.Builder.CreateStructGEP(VAListAddr, 4, CharUnits::fromQuantity(28), 4428 "vr_offs_p"); 4429 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs"); 4430 reg_top_index = 2; // field number for __vr_top 4431 reg_top_offset = CharUnits::fromQuantity(16); 4432 RegSize = 16 * NumRegs; 4433 } 4434 4435 //======================================= 4436 // Find out where argument was passed 4437 //======================================= 4438 4439 // If reg_offs >= 0 we're already using the stack for this type of 4440 // argument. We don't want to keep updating reg_offs (in case it overflows, 4441 // though anyone passing 2GB of arguments, each at most 16 bytes, deserves 4442 // whatever they get). 4443 llvm::Value *UsingStack = nullptr; 4444 UsingStack = CGF.Builder.CreateICmpSGE( 4445 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0)); 4446 4447 CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock); 4448 4449 // Otherwise, at least some kind of argument could go in these registers, the 4450 // question is whether this particular type is too big. 4451 CGF.EmitBlock(MaybeRegBlock); 4452 4453 // Integer arguments may need to correct register alignment (for example a 4454 // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we 4455 // align __gr_offs to calculate the potential address. 4456 if (!IsFPR && !IsIndirect && TyAlign.getQuantity() > 8) { 4457 int Align = TyAlign.getQuantity(); 4458 4459 reg_offs = CGF.Builder.CreateAdd( 4460 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1), 4461 "align_regoffs"); 4462 reg_offs = CGF.Builder.CreateAnd( 4463 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align), 4464 "aligned_regoffs"); 4465 } 4466 4467 // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list. 4468 // The fact that this is done unconditionally reflects the fact that 4469 // allocating an argument to the stack also uses up all the remaining 4470 // registers of the appropriate kind. 4471 llvm::Value *NewOffset = nullptr; 4472 NewOffset = CGF.Builder.CreateAdd( 4473 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs"); 4474 CGF.Builder.CreateStore(NewOffset, reg_offs_p); 4475 4476 // Now we're in a position to decide whether this argument really was in 4477 // registers or not. 4478 llvm::Value *InRegs = nullptr; 4479 InRegs = CGF.Builder.CreateICmpSLE( 4480 NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg"); 4481 4482 CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock); 4483 4484 //======================================= 4485 // Argument was in registers 4486 //======================================= 4487 4488 // Now we emit the code for if the argument was originally passed in 4489 // registers. First start the appropriate block: 4490 CGF.EmitBlock(InRegBlock); 4491 4492 llvm::Value *reg_top = nullptr; 4493 Address reg_top_p = CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, 4494 reg_top_offset, "reg_top_p"); 4495 reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top"); 4496 Address BaseAddr(CGF.Builder.CreateInBoundsGEP(reg_top, reg_offs), 4497 CharUnits::fromQuantity(IsFPR ? 16 : 8)); 4498 Address RegAddr = Address::invalid(); 4499 llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty); 4500 4501 if (IsIndirect) { 4502 // If it's been passed indirectly (actually a struct), whatever we find from 4503 // stored registers or on the stack will actually be a struct **. 4504 MemTy = llvm::PointerType::getUnqual(MemTy); 4505 } 4506 4507 const Type *Base = nullptr; 4508 uint64_t NumMembers = 0; 4509 bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers); 4510 if (IsHFA && NumMembers > 1) { 4511 // Homogeneous aggregates passed in registers will have their elements split 4512 // and stored 16-bytes apart regardless of size (they're notionally in qN, 4513 // qN+1, ...). We reload and store into a temporary local variable 4514 // contiguously. 4515 assert(!IsIndirect && "Homogeneous aggregates should be passed directly"); 4516 auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0)); 4517 llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0)); 4518 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers); 4519 Address Tmp = CGF.CreateTempAlloca(HFATy, 4520 std::max(TyAlign, BaseTyInfo.second)); 4521 4522 // On big-endian platforms, the value will be right-aligned in its slot. 4523 int Offset = 0; 4524 if (CGF.CGM.getDataLayout().isBigEndian() && 4525 BaseTyInfo.first.getQuantity() < 16) 4526 Offset = 16 - BaseTyInfo.first.getQuantity(); 4527 4528 for (unsigned i = 0; i < NumMembers; ++i) { 4529 CharUnits BaseOffset = CharUnits::fromQuantity(16 * i + Offset); 4530 Address LoadAddr = 4531 CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, BaseOffset); 4532 LoadAddr = CGF.Builder.CreateElementBitCast(LoadAddr, BaseTy); 4533 4534 Address StoreAddr = 4535 CGF.Builder.CreateConstArrayGEP(Tmp, i, BaseTyInfo.first); 4536 4537 llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr); 4538 CGF.Builder.CreateStore(Elem, StoreAddr); 4539 } 4540 4541 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, MemTy); 4542 } else { 4543 // Otherwise the object is contiguous in memory. 4544 4545 // It might be right-aligned in its slot. 4546 CharUnits SlotSize = BaseAddr.getAlignment(); 4547 if (CGF.CGM.getDataLayout().isBigEndian() && !IsIndirect && 4548 (IsHFA || !isAggregateTypeForABI(Ty)) && 4549 TyInfo.first < SlotSize) { 4550 CharUnits Offset = SlotSize - TyInfo.first; 4551 BaseAddr = CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, Offset); 4552 } 4553 4554 RegAddr = CGF.Builder.CreateElementBitCast(BaseAddr, MemTy); 4555 } 4556 4557 CGF.EmitBranch(ContBlock); 4558 4559 //======================================= 4560 // Argument was on the stack 4561 //======================================= 4562 CGF.EmitBlock(OnStackBlock); 4563 4564 Address stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, 4565 CharUnits::Zero(), "stack_p"); 4566 llvm::Value *OnStackPtr = CGF.Builder.CreateLoad(stack_p, "stack"); 4567 4568 // Again, stack arguments may need realignment. In this case both integer and 4569 // floating-point ones might be affected. 4570 if (!IsIndirect && TyAlign.getQuantity() > 8) { 4571 int Align = TyAlign.getQuantity(); 4572 4573 OnStackPtr = CGF.Builder.CreatePtrToInt(OnStackPtr, CGF.Int64Ty); 4574 4575 OnStackPtr = CGF.Builder.CreateAdd( 4576 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1), 4577 "align_stack"); 4578 OnStackPtr = CGF.Builder.CreateAnd( 4579 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, -Align), 4580 "align_stack"); 4581 4582 OnStackPtr = CGF.Builder.CreateIntToPtr(OnStackPtr, CGF.Int8PtrTy); 4583 } 4584 Address OnStackAddr(OnStackPtr, 4585 std::max(CharUnits::fromQuantity(8), TyAlign)); 4586 4587 // All stack slots are multiples of 8 bytes. 4588 CharUnits StackSlotSize = CharUnits::fromQuantity(8); 4589 CharUnits StackSize; 4590 if (IsIndirect) 4591 StackSize = StackSlotSize; 4592 else 4593 StackSize = TyInfo.first.RoundUpToAlignment(StackSlotSize); 4594 4595 llvm::Value *StackSizeC = CGF.Builder.getSize(StackSize); 4596 llvm::Value *NewStack = 4597 CGF.Builder.CreateInBoundsGEP(OnStackPtr, StackSizeC, "new_stack"); 4598 4599 // Write the new value of __stack for the next call to va_arg 4600 CGF.Builder.CreateStore(NewStack, stack_p); 4601 4602 if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) && 4603 TyInfo.first < StackSlotSize) { 4604 CharUnits Offset = StackSlotSize - TyInfo.first; 4605 OnStackAddr = CGF.Builder.CreateConstInBoundsByteGEP(OnStackAddr, Offset); 4606 } 4607 4608 OnStackAddr = CGF.Builder.CreateElementBitCast(OnStackAddr, MemTy); 4609 4610 CGF.EmitBranch(ContBlock); 4611 4612 //======================================= 4613 // Tidy up 4614 //======================================= 4615 CGF.EmitBlock(ContBlock); 4616 4617 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, 4618 OnStackAddr, OnStackBlock, "vaargs.addr"); 4619 4620 if (IsIndirect) 4621 return Address(CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"), 4622 TyInfo.second); 4623 4624 return ResAddr; 4625 } 4626 4627 Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty, 4628 CodeGenFunction &CGF) const { 4629 // The backend's lowering doesn't support va_arg for aggregates or 4630 // illegal vector types. Lower VAArg here for these cases and use 4631 // the LLVM va_arg instruction for everything else. 4632 if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty)) 4633 return Address::invalid(); 4634 4635 CharUnits SlotSize = CharUnits::fromQuantity(8); 4636 4637 // Empty records are ignored for parameter passing purposes. 4638 if (isEmptyRecord(getContext(), Ty, true)) { 4639 Address Addr(CGF.Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize); 4640 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty)); 4641 return Addr; 4642 } 4643 4644 // The size of the actual thing passed, which might end up just 4645 // being a pointer for indirect types. 4646 auto TyInfo = getContext().getTypeInfoInChars(Ty); 4647 4648 // Arguments bigger than 16 bytes which aren't homogeneous 4649 // aggregates should be passed indirectly. 4650 bool IsIndirect = false; 4651 if (TyInfo.first.getQuantity() > 16) { 4652 const Type *Base = nullptr; 4653 uint64_t Members = 0; 4654 IsIndirect = !isHomogeneousAggregate(Ty, Base, Members); 4655 } 4656 4657 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, 4658 TyInfo, SlotSize, /*AllowHigherAlign*/ true); 4659 } 4660 4661 //===----------------------------------------------------------------------===// 4662 // ARM ABI Implementation 4663 //===----------------------------------------------------------------------===// 4664 4665 namespace { 4666 4667 class ARMABIInfo : public ABIInfo { 4668 public: 4669 enum ABIKind { 4670 APCS = 0, 4671 AAPCS = 1, 4672 AAPCS_VFP 4673 }; 4674 4675 private: 4676 ABIKind Kind; 4677 4678 public: 4679 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind) { 4680 setCCs(); 4681 } 4682 4683 bool isEABI() const { 4684 switch (getTarget().getTriple().getEnvironment()) { 4685 case llvm::Triple::Android: 4686 case llvm::Triple::EABI: 4687 case llvm::Triple::EABIHF: 4688 case llvm::Triple::GNUEABI: 4689 case llvm::Triple::GNUEABIHF: 4690 return true; 4691 default: 4692 return false; 4693 } 4694 } 4695 4696 bool isEABIHF() const { 4697 switch (getTarget().getTriple().getEnvironment()) { 4698 case llvm::Triple::EABIHF: 4699 case llvm::Triple::GNUEABIHF: 4700 return true; 4701 default: 4702 return false; 4703 } 4704 } 4705 4706 ABIKind getABIKind() const { return Kind; } 4707 4708 private: 4709 ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic) const; 4710 ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic) const; 4711 bool isIllegalVectorType(QualType Ty) const; 4712 4713 bool isHomogeneousAggregateBaseType(QualType Ty) const override; 4714 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 4715 uint64_t Members) const override; 4716 4717 void computeInfo(CGFunctionInfo &FI) const override; 4718 4719 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 4720 QualType Ty) const override; 4721 4722 llvm::CallingConv::ID getLLVMDefaultCC() const; 4723 llvm::CallingConv::ID getABIDefaultCC() const; 4724 void setCCs(); 4725 }; 4726 4727 class ARMTargetCodeGenInfo : public TargetCodeGenInfo { 4728 public: 4729 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K) 4730 :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {} 4731 4732 const ARMABIInfo &getABIInfo() const { 4733 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo()); 4734 } 4735 4736 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 4737 return 13; 4738 } 4739 4740 StringRef getARCRetainAutoreleasedReturnValueMarker() const override { 4741 return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue"; 4742 } 4743 4744 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4745 llvm::Value *Address) const override { 4746 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 4747 4748 // 0-15 are the 16 integer registers. 4749 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15); 4750 return false; 4751 } 4752 4753 unsigned getSizeOfUnwindException() const override { 4754 if (getABIInfo().isEABI()) return 88; 4755 return TargetCodeGenInfo::getSizeOfUnwindException(); 4756 } 4757 4758 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 4759 CodeGen::CodeGenModule &CGM) const override { 4760 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 4761 if (!FD) 4762 return; 4763 4764 const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>(); 4765 if (!Attr) 4766 return; 4767 4768 const char *Kind; 4769 switch (Attr->getInterrupt()) { 4770 case ARMInterruptAttr::Generic: Kind = ""; break; 4771 case ARMInterruptAttr::IRQ: Kind = "IRQ"; break; 4772 case ARMInterruptAttr::FIQ: Kind = "FIQ"; break; 4773 case ARMInterruptAttr::SWI: Kind = "SWI"; break; 4774 case ARMInterruptAttr::ABORT: Kind = "ABORT"; break; 4775 case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break; 4776 } 4777 4778 llvm::Function *Fn = cast<llvm::Function>(GV); 4779 4780 Fn->addFnAttr("interrupt", Kind); 4781 4782 if (cast<ARMABIInfo>(getABIInfo()).getABIKind() == ARMABIInfo::APCS) 4783 return; 4784 4785 // AAPCS guarantees that sp will be 8-byte aligned on any public interface, 4786 // however this is not necessarily true on taking any interrupt. Instruct 4787 // the backend to perform a realignment as part of the function prologue. 4788 llvm::AttrBuilder B; 4789 B.addStackAlignmentAttr(8); 4790 Fn->addAttributes(llvm::AttributeSet::FunctionIndex, 4791 llvm::AttributeSet::get(CGM.getLLVMContext(), 4792 llvm::AttributeSet::FunctionIndex, 4793 B)); 4794 } 4795 }; 4796 4797 class WindowsARMTargetCodeGenInfo : public ARMTargetCodeGenInfo { 4798 void addStackProbeSizeTargetAttribute(const Decl *D, llvm::GlobalValue *GV, 4799 CodeGen::CodeGenModule &CGM) const; 4800 4801 public: 4802 WindowsARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K) 4803 : ARMTargetCodeGenInfo(CGT, K) {} 4804 4805 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 4806 CodeGen::CodeGenModule &CGM) const override; 4807 }; 4808 4809 void WindowsARMTargetCodeGenInfo::addStackProbeSizeTargetAttribute( 4810 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { 4811 if (!isa<FunctionDecl>(D)) 4812 return; 4813 if (CGM.getCodeGenOpts().StackProbeSize == 4096) 4814 return; 4815 4816 llvm::Function *F = cast<llvm::Function>(GV); 4817 F->addFnAttr("stack-probe-size", 4818 llvm::utostr(CGM.getCodeGenOpts().StackProbeSize)); 4819 } 4820 4821 void WindowsARMTargetCodeGenInfo::setTargetAttributes( 4822 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { 4823 ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM); 4824 addStackProbeSizeTargetAttribute(D, GV, CGM); 4825 } 4826 } 4827 4828 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const { 4829 if (!getCXXABI().classifyReturnType(FI)) 4830 FI.getReturnInfo() = 4831 classifyReturnType(FI.getReturnType(), FI.isVariadic()); 4832 4833 for (auto &I : FI.arguments()) 4834 I.info = classifyArgumentType(I.type, FI.isVariadic()); 4835 4836 // Always honor user-specified calling convention. 4837 if (FI.getCallingConvention() != llvm::CallingConv::C) 4838 return; 4839 4840 llvm::CallingConv::ID cc = getRuntimeCC(); 4841 if (cc != llvm::CallingConv::C) 4842 FI.setEffectiveCallingConvention(cc); 4843 } 4844 4845 /// Return the default calling convention that LLVM will use. 4846 llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const { 4847 // The default calling convention that LLVM will infer. 4848 if (isEABIHF()) 4849 return llvm::CallingConv::ARM_AAPCS_VFP; 4850 else if (isEABI()) 4851 return llvm::CallingConv::ARM_AAPCS; 4852 else 4853 return llvm::CallingConv::ARM_APCS; 4854 } 4855 4856 /// Return the calling convention that our ABI would like us to use 4857 /// as the C calling convention. 4858 llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const { 4859 switch (getABIKind()) { 4860 case APCS: return llvm::CallingConv::ARM_APCS; 4861 case AAPCS: return llvm::CallingConv::ARM_AAPCS; 4862 case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 4863 } 4864 llvm_unreachable("bad ABI kind"); 4865 } 4866 4867 void ARMABIInfo::setCCs() { 4868 assert(getRuntimeCC() == llvm::CallingConv::C); 4869 4870 // Don't muddy up the IR with a ton of explicit annotations if 4871 // they'd just match what LLVM will infer from the triple. 4872 llvm::CallingConv::ID abiCC = getABIDefaultCC(); 4873 if (abiCC != getLLVMDefaultCC()) 4874 RuntimeCC = abiCC; 4875 4876 BuiltinCC = (getABIKind() == APCS ? 4877 llvm::CallingConv::ARM_APCS : llvm::CallingConv::ARM_AAPCS); 4878 } 4879 4880 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, 4881 bool isVariadic) const { 4882 // 6.1.2.1 The following argument types are VFP CPRCs: 4883 // A single-precision floating-point type (including promoted 4884 // half-precision types); A double-precision floating-point type; 4885 // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate 4886 // with a Base Type of a single- or double-precision floating-point type, 4887 // 64-bit containerized vectors or 128-bit containerized vectors with one 4888 // to four Elements. 4889 bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic; 4890 4891 Ty = useFirstFieldIfTransparentUnion(Ty); 4892 4893 // Handle illegal vector types here. 4894 if (isIllegalVectorType(Ty)) { 4895 uint64_t Size = getContext().getTypeSize(Ty); 4896 if (Size <= 32) { 4897 llvm::Type *ResType = 4898 llvm::Type::getInt32Ty(getVMContext()); 4899 return ABIArgInfo::getDirect(ResType); 4900 } 4901 if (Size == 64) { 4902 llvm::Type *ResType = llvm::VectorType::get( 4903 llvm::Type::getInt32Ty(getVMContext()), 2); 4904 return ABIArgInfo::getDirect(ResType); 4905 } 4906 if (Size == 128) { 4907 llvm::Type *ResType = llvm::VectorType::get( 4908 llvm::Type::getInt32Ty(getVMContext()), 4); 4909 return ABIArgInfo::getDirect(ResType); 4910 } 4911 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 4912 } 4913 4914 // __fp16 gets passed as if it were an int or float, but with the top 16 bits 4915 // unspecified. This is not done for OpenCL as it handles the half type 4916 // natively, and does not need to interwork with AAPCS code. 4917 if (Ty->isHalfType() && !getContext().getLangOpts().OpenCL) { 4918 llvm::Type *ResType = IsEffectivelyAAPCS_VFP ? 4919 llvm::Type::getFloatTy(getVMContext()) : 4920 llvm::Type::getInt32Ty(getVMContext()); 4921 return ABIArgInfo::getDirect(ResType); 4922 } 4923 4924 if (!isAggregateTypeForABI(Ty)) { 4925 // Treat an enum type as its underlying type. 4926 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) { 4927 Ty = EnumTy->getDecl()->getIntegerType(); 4928 } 4929 4930 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend() 4931 : ABIArgInfo::getDirect()); 4932 } 4933 4934 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { 4935 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 4936 } 4937 4938 // Ignore empty records. 4939 if (isEmptyRecord(getContext(), Ty, true)) 4940 return ABIArgInfo::getIgnore(); 4941 4942 if (IsEffectivelyAAPCS_VFP) { 4943 // Homogeneous Aggregates need to be expanded when we can fit the aggregate 4944 // into VFP registers. 4945 const Type *Base = nullptr; 4946 uint64_t Members = 0; 4947 if (isHomogeneousAggregate(Ty, Base, Members)) { 4948 assert(Base && "Base class should be set for homogeneous aggregate"); 4949 // Base can be a floating-point or a vector. 4950 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false); 4951 } 4952 } 4953 4954 // Support byval for ARM. 4955 // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at 4956 // most 8-byte. We realign the indirect argument if type alignment is bigger 4957 // than ABI alignment. 4958 uint64_t ABIAlign = 4; 4959 uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8; 4960 if (getABIKind() == ARMABIInfo::AAPCS_VFP || 4961 getABIKind() == ARMABIInfo::AAPCS) 4962 ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8); 4963 4964 if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) { 4965 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign), 4966 /*ByVal=*/true, 4967 /*Realign=*/TyAlign > ABIAlign); 4968 } 4969 4970 // Otherwise, pass by coercing to a structure of the appropriate size. 4971 llvm::Type* ElemTy; 4972 unsigned SizeRegs; 4973 // FIXME: Try to match the types of the arguments more accurately where 4974 // we can. 4975 if (getContext().getTypeAlign(Ty) <= 32) { 4976 ElemTy = llvm::Type::getInt32Ty(getVMContext()); 4977 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32; 4978 } else { 4979 ElemTy = llvm::Type::getInt64Ty(getVMContext()); 4980 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64; 4981 } 4982 4983 return ABIArgInfo::getDirect(llvm::ArrayType::get(ElemTy, SizeRegs)); 4984 } 4985 4986 static bool isIntegerLikeType(QualType Ty, ASTContext &Context, 4987 llvm::LLVMContext &VMContext) { 4988 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure 4989 // is called integer-like if its size is less than or equal to one word, and 4990 // the offset of each of its addressable sub-fields is zero. 4991 4992 uint64_t Size = Context.getTypeSize(Ty); 4993 4994 // Check that the type fits in a word. 4995 if (Size > 32) 4996 return false; 4997 4998 // FIXME: Handle vector types! 4999 if (Ty->isVectorType()) 5000 return false; 5001 5002 // Float types are never treated as "integer like". 5003 if (Ty->isRealFloatingType()) 5004 return false; 5005 5006 // If this is a builtin or pointer type then it is ok. 5007 if (Ty->getAs<BuiltinType>() || Ty->isPointerType()) 5008 return true; 5009 5010 // Small complex integer types are "integer like". 5011 if (const ComplexType *CT = Ty->getAs<ComplexType>()) 5012 return isIntegerLikeType(CT->getElementType(), Context, VMContext); 5013 5014 // Single element and zero sized arrays should be allowed, by the definition 5015 // above, but they are not. 5016 5017 // Otherwise, it must be a record type. 5018 const RecordType *RT = Ty->getAs<RecordType>(); 5019 if (!RT) return false; 5020 5021 // Ignore records with flexible arrays. 5022 const RecordDecl *RD = RT->getDecl(); 5023 if (RD->hasFlexibleArrayMember()) 5024 return false; 5025 5026 // Check that all sub-fields are at offset 0, and are themselves "integer 5027 // like". 5028 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 5029 5030 bool HadField = false; 5031 unsigned idx = 0; 5032 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 5033 i != e; ++i, ++idx) { 5034 const FieldDecl *FD = *i; 5035 5036 // Bit-fields are not addressable, we only need to verify they are "integer 5037 // like". We still have to disallow a subsequent non-bitfield, for example: 5038 // struct { int : 0; int x } 5039 // is non-integer like according to gcc. 5040 if (FD->isBitField()) { 5041 if (!RD->isUnion()) 5042 HadField = true; 5043 5044 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 5045 return false; 5046 5047 continue; 5048 } 5049 5050 // Check if this field is at offset 0. 5051 if (Layout.getFieldOffset(idx) != 0) 5052 return false; 5053 5054 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 5055 return false; 5056 5057 // Only allow at most one field in a structure. This doesn't match the 5058 // wording above, but follows gcc in situations with a field following an 5059 // empty structure. 5060 if (!RD->isUnion()) { 5061 if (HadField) 5062 return false; 5063 5064 HadField = true; 5065 } 5066 } 5067 5068 return true; 5069 } 5070 5071 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, 5072 bool isVariadic) const { 5073 bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic; 5074 5075 if (RetTy->isVoidType()) 5076 return ABIArgInfo::getIgnore(); 5077 5078 // Large vector types should be returned via memory. 5079 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) { 5080 return getNaturalAlignIndirect(RetTy); 5081 } 5082 5083 // __fp16 gets returned as if it were an int or float, but with the top 16 5084 // bits unspecified. This is not done for OpenCL as it handles the half type 5085 // natively, and does not need to interwork with AAPCS code. 5086 if (RetTy->isHalfType() && !getContext().getLangOpts().OpenCL) { 5087 llvm::Type *ResType = IsEffectivelyAAPCS_VFP ? 5088 llvm::Type::getFloatTy(getVMContext()) : 5089 llvm::Type::getInt32Ty(getVMContext()); 5090 return ABIArgInfo::getDirect(ResType); 5091 } 5092 5093 if (!isAggregateTypeForABI(RetTy)) { 5094 // Treat an enum type as its underlying type. 5095 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 5096 RetTy = EnumTy->getDecl()->getIntegerType(); 5097 5098 return RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend() 5099 : ABIArgInfo::getDirect(); 5100 } 5101 5102 // Are we following APCS? 5103 if (getABIKind() == APCS) { 5104 if (isEmptyRecord(getContext(), RetTy, false)) 5105 return ABIArgInfo::getIgnore(); 5106 5107 // Complex types are all returned as packed integers. 5108 // 5109 // FIXME: Consider using 2 x vector types if the back end handles them 5110 // correctly. 5111 if (RetTy->isAnyComplexType()) 5112 return ABIArgInfo::getDirect(llvm::IntegerType::get( 5113 getVMContext(), getContext().getTypeSize(RetTy))); 5114 5115 // Integer like structures are returned in r0. 5116 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) { 5117 // Return in the smallest viable integer type. 5118 uint64_t Size = getContext().getTypeSize(RetTy); 5119 if (Size <= 8) 5120 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 5121 if (Size <= 16) 5122 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 5123 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 5124 } 5125 5126 // Otherwise return in memory. 5127 return getNaturalAlignIndirect(RetTy); 5128 } 5129 5130 // Otherwise this is an AAPCS variant. 5131 5132 if (isEmptyRecord(getContext(), RetTy, true)) 5133 return ABIArgInfo::getIgnore(); 5134 5135 // Check for homogeneous aggregates with AAPCS-VFP. 5136 if (IsEffectivelyAAPCS_VFP) { 5137 const Type *Base = nullptr; 5138 uint64_t Members; 5139 if (isHomogeneousAggregate(RetTy, Base, Members)) { 5140 assert(Base && "Base class should be set for homogeneous aggregate"); 5141 // Homogeneous Aggregates are returned directly. 5142 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false); 5143 } 5144 } 5145 5146 // Aggregates <= 4 bytes are returned in r0; other aggregates 5147 // are returned indirectly. 5148 uint64_t Size = getContext().getTypeSize(RetTy); 5149 if (Size <= 32) { 5150 if (getDataLayout().isBigEndian()) 5151 // Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4) 5152 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 5153 5154 // Return in the smallest viable integer type. 5155 if (Size <= 8) 5156 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 5157 if (Size <= 16) 5158 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 5159 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 5160 } 5161 5162 return getNaturalAlignIndirect(RetTy); 5163 } 5164 5165 /// isIllegalVector - check whether Ty is an illegal vector type. 5166 bool ARMABIInfo::isIllegalVectorType(QualType Ty) const { 5167 if (const VectorType *VT = Ty->getAs<VectorType>()) { 5168 // Check whether VT is legal. 5169 unsigned NumElements = VT->getNumElements(); 5170 uint64_t Size = getContext().getTypeSize(VT); 5171 // NumElements should be power of 2. 5172 if ((NumElements & (NumElements - 1)) != 0) 5173 return true; 5174 // Size should be greater than 32 bits. 5175 return Size <= 32; 5176 } 5177 return false; 5178 } 5179 5180 bool ARMABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 5181 // Homogeneous aggregates for AAPCS-VFP must have base types of float, 5182 // double, or 64-bit or 128-bit vectors. 5183 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 5184 if (BT->getKind() == BuiltinType::Float || 5185 BT->getKind() == BuiltinType::Double || 5186 BT->getKind() == BuiltinType::LongDouble) 5187 return true; 5188 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 5189 unsigned VecSize = getContext().getTypeSize(VT); 5190 if (VecSize == 64 || VecSize == 128) 5191 return true; 5192 } 5193 return false; 5194 } 5195 5196 bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, 5197 uint64_t Members) const { 5198 return Members <= 4; 5199 } 5200 5201 Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 5202 QualType Ty) const { 5203 CharUnits SlotSize = CharUnits::fromQuantity(4); 5204 5205 // Empty records are ignored for parameter passing purposes. 5206 if (isEmptyRecord(getContext(), Ty, true)) { 5207 Address Addr(CGF.Builder.CreateLoad(VAListAddr), SlotSize); 5208 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty)); 5209 return Addr; 5210 } 5211 5212 auto TyInfo = getContext().getTypeInfoInChars(Ty); 5213 CharUnits TyAlignForABI = TyInfo.second; 5214 5215 // Use indirect if size of the illegal vector is bigger than 16 bytes. 5216 bool IsIndirect = false; 5217 if (TyInfo.first > CharUnits::fromQuantity(16) && isIllegalVectorType(Ty)) { 5218 IsIndirect = true; 5219 5220 // Otherwise, bound the type's ABI alignment. 5221 // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for 5222 // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte. 5223 // Our callers should be prepared to handle an under-aligned address. 5224 } else if (getABIKind() == ARMABIInfo::AAPCS_VFP || 5225 getABIKind() == ARMABIInfo::AAPCS) { 5226 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4)); 5227 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(8)); 5228 } else { 5229 TyAlignForABI = CharUnits::fromQuantity(4); 5230 } 5231 TyInfo.second = TyAlignForABI; 5232 5233 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TyInfo, 5234 SlotSize, /*AllowHigherAlign*/ true); 5235 } 5236 5237 //===----------------------------------------------------------------------===// 5238 // NVPTX ABI Implementation 5239 //===----------------------------------------------------------------------===// 5240 5241 namespace { 5242 5243 class NVPTXABIInfo : public ABIInfo { 5244 public: 5245 NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 5246 5247 ABIArgInfo classifyReturnType(QualType RetTy) const; 5248 ABIArgInfo classifyArgumentType(QualType Ty) const; 5249 5250 void computeInfo(CGFunctionInfo &FI) const override; 5251 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 5252 QualType Ty) const override; 5253 }; 5254 5255 class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo { 5256 public: 5257 NVPTXTargetCodeGenInfo(CodeGenTypes &CGT) 5258 : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {} 5259 5260 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 5261 CodeGen::CodeGenModule &M) const override; 5262 private: 5263 // Adds a NamedMDNode with F, Name, and Operand as operands, and adds the 5264 // resulting MDNode to the nvvm.annotations MDNode. 5265 static void addNVVMMetadata(llvm::Function *F, StringRef Name, int Operand); 5266 }; 5267 5268 ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const { 5269 if (RetTy->isVoidType()) 5270 return ABIArgInfo::getIgnore(); 5271 5272 // note: this is different from default ABI 5273 if (!RetTy->isScalarType()) 5274 return ABIArgInfo::getDirect(); 5275 5276 // Treat an enum type as its underlying type. 5277 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 5278 RetTy = EnumTy->getDecl()->getIntegerType(); 5279 5280 return (RetTy->isPromotableIntegerType() ? 5281 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 5282 } 5283 5284 ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const { 5285 // Treat an enum type as its underlying type. 5286 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 5287 Ty = EnumTy->getDecl()->getIntegerType(); 5288 5289 // Return aggregates type as indirect by value 5290 if (isAggregateTypeForABI(Ty)) 5291 return getNaturalAlignIndirect(Ty, /* byval */ true); 5292 5293 return (Ty->isPromotableIntegerType() ? 5294 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 5295 } 5296 5297 void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const { 5298 if (!getCXXABI().classifyReturnType(FI)) 5299 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 5300 for (auto &I : FI.arguments()) 5301 I.info = classifyArgumentType(I.type); 5302 5303 // Always honor user-specified calling convention. 5304 if (FI.getCallingConvention() != llvm::CallingConv::C) 5305 return; 5306 5307 FI.setEffectiveCallingConvention(getRuntimeCC()); 5308 } 5309 5310 Address NVPTXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 5311 QualType Ty) const { 5312 llvm_unreachable("NVPTX does not support varargs"); 5313 } 5314 5315 void NVPTXTargetCodeGenInfo:: 5316 setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 5317 CodeGen::CodeGenModule &M) const{ 5318 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 5319 if (!FD) return; 5320 5321 llvm::Function *F = cast<llvm::Function>(GV); 5322 5323 // Perform special handling in OpenCL mode 5324 if (M.getLangOpts().OpenCL) { 5325 // Use OpenCL function attributes to check for kernel functions 5326 // By default, all functions are device functions 5327 if (FD->hasAttr<OpenCLKernelAttr>()) { 5328 // OpenCL __kernel functions get kernel metadata 5329 // Create !{<func-ref>, metadata !"kernel", i32 1} node 5330 addNVVMMetadata(F, "kernel", 1); 5331 // And kernel functions are not subject to inlining 5332 F->addFnAttr(llvm::Attribute::NoInline); 5333 } 5334 } 5335 5336 // Perform special handling in CUDA mode. 5337 if (M.getLangOpts().CUDA) { 5338 // CUDA __global__ functions get a kernel metadata entry. Since 5339 // __global__ functions cannot be called from the device, we do not 5340 // need to set the noinline attribute. 5341 if (FD->hasAttr<CUDAGlobalAttr>()) { 5342 // Create !{<func-ref>, metadata !"kernel", i32 1} node 5343 addNVVMMetadata(F, "kernel", 1); 5344 } 5345 if (CUDALaunchBoundsAttr *Attr = FD->getAttr<CUDALaunchBoundsAttr>()) { 5346 // Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node 5347 llvm::APSInt MaxThreads(32); 5348 MaxThreads = Attr->getMaxThreads()->EvaluateKnownConstInt(M.getContext()); 5349 if (MaxThreads > 0) 5350 addNVVMMetadata(F, "maxntidx", MaxThreads.getExtValue()); 5351 5352 // min blocks is an optional argument for CUDALaunchBoundsAttr. If it was 5353 // not specified in __launch_bounds__ or if the user specified a 0 value, 5354 // we don't have to add a PTX directive. 5355 if (Attr->getMinBlocks()) { 5356 llvm::APSInt MinBlocks(32); 5357 MinBlocks = Attr->getMinBlocks()->EvaluateKnownConstInt(M.getContext()); 5358 if (MinBlocks > 0) 5359 // Create !{<func-ref>, metadata !"minctasm", i32 <val>} node 5360 addNVVMMetadata(F, "minctasm", MinBlocks.getExtValue()); 5361 } 5362 } 5363 } 5364 } 5365 5366 void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name, 5367 int Operand) { 5368 llvm::Module *M = F->getParent(); 5369 llvm::LLVMContext &Ctx = M->getContext(); 5370 5371 // Get "nvvm.annotations" metadata node 5372 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations"); 5373 5374 llvm::Metadata *MDVals[] = { 5375 llvm::ConstantAsMetadata::get(F), llvm::MDString::get(Ctx, Name), 5376 llvm::ConstantAsMetadata::get( 5377 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))}; 5378 // Append metadata to nvvm.annotations 5379 MD->addOperand(llvm::MDNode::get(Ctx, MDVals)); 5380 } 5381 } 5382 5383 //===----------------------------------------------------------------------===// 5384 // SystemZ ABI Implementation 5385 //===----------------------------------------------------------------------===// 5386 5387 namespace { 5388 5389 class SystemZABIInfo : public ABIInfo { 5390 bool HasVector; 5391 5392 public: 5393 SystemZABIInfo(CodeGenTypes &CGT, bool HV) 5394 : ABIInfo(CGT), HasVector(HV) {} 5395 5396 bool isPromotableIntegerType(QualType Ty) const; 5397 bool isCompoundType(QualType Ty) const; 5398 bool isVectorArgumentType(QualType Ty) const; 5399 bool isFPArgumentType(QualType Ty) const; 5400 QualType GetSingleElementType(QualType Ty) const; 5401 5402 ABIArgInfo classifyReturnType(QualType RetTy) const; 5403 ABIArgInfo classifyArgumentType(QualType ArgTy) const; 5404 5405 void computeInfo(CGFunctionInfo &FI) const override { 5406 if (!getCXXABI().classifyReturnType(FI)) 5407 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 5408 for (auto &I : FI.arguments()) 5409 I.info = classifyArgumentType(I.type); 5410 } 5411 5412 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 5413 QualType Ty) const override; 5414 }; 5415 5416 class SystemZTargetCodeGenInfo : public TargetCodeGenInfo { 5417 public: 5418 SystemZTargetCodeGenInfo(CodeGenTypes &CGT, bool HasVector) 5419 : TargetCodeGenInfo(new SystemZABIInfo(CGT, HasVector)) {} 5420 }; 5421 5422 } 5423 5424 bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const { 5425 // Treat an enum type as its underlying type. 5426 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 5427 Ty = EnumTy->getDecl()->getIntegerType(); 5428 5429 // Promotable integer types are required to be promoted by the ABI. 5430 if (Ty->isPromotableIntegerType()) 5431 return true; 5432 5433 // 32-bit values must also be promoted. 5434 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 5435 switch (BT->getKind()) { 5436 case BuiltinType::Int: 5437 case BuiltinType::UInt: 5438 return true; 5439 default: 5440 return false; 5441 } 5442 return false; 5443 } 5444 5445 bool SystemZABIInfo::isCompoundType(QualType Ty) const { 5446 return (Ty->isAnyComplexType() || 5447 Ty->isVectorType() || 5448 isAggregateTypeForABI(Ty)); 5449 } 5450 5451 bool SystemZABIInfo::isVectorArgumentType(QualType Ty) const { 5452 return (HasVector && 5453 Ty->isVectorType() && 5454 getContext().getTypeSize(Ty) <= 128); 5455 } 5456 5457 bool SystemZABIInfo::isFPArgumentType(QualType Ty) const { 5458 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 5459 switch (BT->getKind()) { 5460 case BuiltinType::Float: 5461 case BuiltinType::Double: 5462 return true; 5463 default: 5464 return false; 5465 } 5466 5467 return false; 5468 } 5469 5470 QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const { 5471 if (const RecordType *RT = Ty->getAsStructureType()) { 5472 const RecordDecl *RD = RT->getDecl(); 5473 QualType Found; 5474 5475 // If this is a C++ record, check the bases first. 5476 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 5477 for (const auto &I : CXXRD->bases()) { 5478 QualType Base = I.getType(); 5479 5480 // Empty bases don't affect things either way. 5481 if (isEmptyRecord(getContext(), Base, true)) 5482 continue; 5483 5484 if (!Found.isNull()) 5485 return Ty; 5486 Found = GetSingleElementType(Base); 5487 } 5488 5489 // Check the fields. 5490 for (const auto *FD : RD->fields()) { 5491 // For compatibility with GCC, ignore empty bitfields in C++ mode. 5492 // Unlike isSingleElementStruct(), empty structure and array fields 5493 // do count. So do anonymous bitfields that aren't zero-sized. 5494 if (getContext().getLangOpts().CPlusPlus && 5495 FD->isBitField() && FD->getBitWidthValue(getContext()) == 0) 5496 continue; 5497 5498 // Unlike isSingleElementStruct(), arrays do not count. 5499 // Nested structures still do though. 5500 if (!Found.isNull()) 5501 return Ty; 5502 Found = GetSingleElementType(FD->getType()); 5503 } 5504 5505 // Unlike isSingleElementStruct(), trailing padding is allowed. 5506 // An 8-byte aligned struct s { float f; } is passed as a double. 5507 if (!Found.isNull()) 5508 return Found; 5509 } 5510 5511 return Ty; 5512 } 5513 5514 Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 5515 QualType Ty) const { 5516 // Assume that va_list type is correct; should be pointer to LLVM type: 5517 // struct { 5518 // i64 __gpr; 5519 // i64 __fpr; 5520 // i8 *__overflow_arg_area; 5521 // i8 *__reg_save_area; 5522 // }; 5523 5524 // Every non-vector argument occupies 8 bytes and is passed by preference 5525 // in either GPRs or FPRs. Vector arguments occupy 8 or 16 bytes and are 5526 // always passed on the stack. 5527 Ty = getContext().getCanonicalType(Ty); 5528 auto TyInfo = getContext().getTypeInfoInChars(Ty); 5529 llvm::Type *ArgTy = CGF.ConvertTypeForMem(Ty); 5530 llvm::Type *DirectTy = ArgTy; 5531 ABIArgInfo AI = classifyArgumentType(Ty); 5532 bool IsIndirect = AI.isIndirect(); 5533 bool InFPRs = false; 5534 bool IsVector = false; 5535 CharUnits UnpaddedSize; 5536 CharUnits DirectAlign; 5537 if (IsIndirect) { 5538 DirectTy = llvm::PointerType::getUnqual(DirectTy); 5539 UnpaddedSize = DirectAlign = CharUnits::fromQuantity(8); 5540 } else { 5541 if (AI.getCoerceToType()) 5542 ArgTy = AI.getCoerceToType(); 5543 InFPRs = ArgTy->isFloatTy() || ArgTy->isDoubleTy(); 5544 IsVector = ArgTy->isVectorTy(); 5545 UnpaddedSize = TyInfo.first; 5546 DirectAlign = TyInfo.second; 5547 } 5548 CharUnits PaddedSize = CharUnits::fromQuantity(8); 5549 if (IsVector && UnpaddedSize > PaddedSize) 5550 PaddedSize = CharUnits::fromQuantity(16); 5551 assert((UnpaddedSize <= PaddedSize) && "Invalid argument size."); 5552 5553 CharUnits Padding = (PaddedSize - UnpaddedSize); 5554 5555 llvm::Type *IndexTy = CGF.Int64Ty; 5556 llvm::Value *PaddedSizeV = 5557 llvm::ConstantInt::get(IndexTy, PaddedSize.getQuantity()); 5558 5559 if (IsVector) { 5560 // Work out the address of a vector argument on the stack. 5561 // Vector arguments are always passed in the high bits of a 5562 // single (8 byte) or double (16 byte) stack slot. 5563 Address OverflowArgAreaPtr = 5564 CGF.Builder.CreateStructGEP(VAListAddr, 2, CharUnits::fromQuantity(16), 5565 "overflow_arg_area_ptr"); 5566 Address OverflowArgArea = 5567 Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"), 5568 TyInfo.second); 5569 Address MemAddr = 5570 CGF.Builder.CreateElementBitCast(OverflowArgArea, DirectTy, "mem_addr"); 5571 5572 // Update overflow_arg_area_ptr pointer 5573 llvm::Value *NewOverflowArgArea = 5574 CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV, 5575 "overflow_arg_area"); 5576 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr); 5577 5578 return MemAddr; 5579 } 5580 5581 assert(PaddedSize.getQuantity() == 8); 5582 5583 unsigned MaxRegs, RegCountField, RegSaveIndex; 5584 CharUnits RegPadding; 5585 if (InFPRs) { 5586 MaxRegs = 4; // Maximum of 4 FPR arguments 5587 RegCountField = 1; // __fpr 5588 RegSaveIndex = 16; // save offset for f0 5589 RegPadding = CharUnits(); // floats are passed in the high bits of an FPR 5590 } else { 5591 MaxRegs = 5; // Maximum of 5 GPR arguments 5592 RegCountField = 0; // __gpr 5593 RegSaveIndex = 2; // save offset for r2 5594 RegPadding = Padding; // values are passed in the low bits of a GPR 5595 } 5596 5597 Address RegCountPtr = CGF.Builder.CreateStructGEP( 5598 VAListAddr, RegCountField, RegCountField * CharUnits::fromQuantity(8), 5599 "reg_count_ptr"); 5600 llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count"); 5601 llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs); 5602 llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV, 5603 "fits_in_regs"); 5604 5605 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 5606 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); 5607 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 5608 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); 5609 5610 // Emit code to load the value if it was passed in registers. 5611 CGF.EmitBlock(InRegBlock); 5612 5613 // Work out the address of an argument register. 5614 llvm::Value *ScaledRegCount = 5615 CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count"); 5616 llvm::Value *RegBase = 5617 llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize.getQuantity() 5618 + RegPadding.getQuantity()); 5619 llvm::Value *RegOffset = 5620 CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset"); 5621 Address RegSaveAreaPtr = 5622 CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(24), 5623 "reg_save_area_ptr"); 5624 llvm::Value *RegSaveArea = 5625 CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area"); 5626 Address RawRegAddr(CGF.Builder.CreateGEP(RegSaveArea, RegOffset, 5627 "raw_reg_addr"), 5628 PaddedSize); 5629 Address RegAddr = 5630 CGF.Builder.CreateElementBitCast(RawRegAddr, DirectTy, "reg_addr"); 5631 5632 // Update the register count 5633 llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1); 5634 llvm::Value *NewRegCount = 5635 CGF.Builder.CreateAdd(RegCount, One, "reg_count"); 5636 CGF.Builder.CreateStore(NewRegCount, RegCountPtr); 5637 CGF.EmitBranch(ContBlock); 5638 5639 // Emit code to load the value if it was passed in memory. 5640 CGF.EmitBlock(InMemBlock); 5641 5642 // Work out the address of a stack argument. 5643 Address OverflowArgAreaPtr = CGF.Builder.CreateStructGEP( 5644 VAListAddr, 2, CharUnits::fromQuantity(16), "overflow_arg_area_ptr"); 5645 Address OverflowArgArea = 5646 Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"), 5647 PaddedSize); 5648 Address RawMemAddr = 5649 CGF.Builder.CreateConstByteGEP(OverflowArgArea, Padding, "raw_mem_addr"); 5650 Address MemAddr = 5651 CGF.Builder.CreateElementBitCast(RawMemAddr, DirectTy, "mem_addr"); 5652 5653 // Update overflow_arg_area_ptr pointer 5654 llvm::Value *NewOverflowArgArea = 5655 CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV, 5656 "overflow_arg_area"); 5657 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr); 5658 CGF.EmitBranch(ContBlock); 5659 5660 // Return the appropriate result. 5661 CGF.EmitBlock(ContBlock); 5662 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, 5663 MemAddr, InMemBlock, "va_arg.addr"); 5664 5665 if (IsIndirect) 5666 ResAddr = Address(CGF.Builder.CreateLoad(ResAddr, "indirect_arg"), 5667 TyInfo.second); 5668 5669 return ResAddr; 5670 } 5671 5672 ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const { 5673 if (RetTy->isVoidType()) 5674 return ABIArgInfo::getIgnore(); 5675 if (isVectorArgumentType(RetTy)) 5676 return ABIArgInfo::getDirect(); 5677 if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64) 5678 return getNaturalAlignIndirect(RetTy); 5679 return (isPromotableIntegerType(RetTy) ? 5680 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 5681 } 5682 5683 ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const { 5684 // Handle the generic C++ ABI. 5685 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 5686 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 5687 5688 // Integers and enums are extended to full register width. 5689 if (isPromotableIntegerType(Ty)) 5690 return ABIArgInfo::getExtend(); 5691 5692 // Handle vector types and vector-like structure types. Note that 5693 // as opposed to float-like structure types, we do not allow any 5694 // padding for vector-like structures, so verify the sizes match. 5695 uint64_t Size = getContext().getTypeSize(Ty); 5696 QualType SingleElementTy = GetSingleElementType(Ty); 5697 if (isVectorArgumentType(SingleElementTy) && 5698 getContext().getTypeSize(SingleElementTy) == Size) 5699 return ABIArgInfo::getDirect(CGT.ConvertType(SingleElementTy)); 5700 5701 // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly. 5702 if (Size != 8 && Size != 16 && Size != 32 && Size != 64) 5703 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 5704 5705 // Handle small structures. 5706 if (const RecordType *RT = Ty->getAs<RecordType>()) { 5707 // Structures with flexible arrays have variable length, so really 5708 // fail the size test above. 5709 const RecordDecl *RD = RT->getDecl(); 5710 if (RD->hasFlexibleArrayMember()) 5711 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 5712 5713 // The structure is passed as an unextended integer, a float, or a double. 5714 llvm::Type *PassTy; 5715 if (isFPArgumentType(SingleElementTy)) { 5716 assert(Size == 32 || Size == 64); 5717 if (Size == 32) 5718 PassTy = llvm::Type::getFloatTy(getVMContext()); 5719 else 5720 PassTy = llvm::Type::getDoubleTy(getVMContext()); 5721 } else 5722 PassTy = llvm::IntegerType::get(getVMContext(), Size); 5723 return ABIArgInfo::getDirect(PassTy); 5724 } 5725 5726 // Non-structure compounds are passed indirectly. 5727 if (isCompoundType(Ty)) 5728 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 5729 5730 return ABIArgInfo::getDirect(nullptr); 5731 } 5732 5733 //===----------------------------------------------------------------------===// 5734 // MSP430 ABI Implementation 5735 //===----------------------------------------------------------------------===// 5736 5737 namespace { 5738 5739 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo { 5740 public: 5741 MSP430TargetCodeGenInfo(CodeGenTypes &CGT) 5742 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 5743 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 5744 CodeGen::CodeGenModule &M) const override; 5745 }; 5746 5747 } 5748 5749 void MSP430TargetCodeGenInfo::setTargetAttributes(const Decl *D, 5750 llvm::GlobalValue *GV, 5751 CodeGen::CodeGenModule &M) const { 5752 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { 5753 if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) { 5754 // Handle 'interrupt' attribute: 5755 llvm::Function *F = cast<llvm::Function>(GV); 5756 5757 // Step 1: Set ISR calling convention. 5758 F->setCallingConv(llvm::CallingConv::MSP430_INTR); 5759 5760 // Step 2: Add attributes goodness. 5761 F->addFnAttr(llvm::Attribute::NoInline); 5762 5763 // Step 3: Emit ISR vector alias. 5764 unsigned Num = attr->getNumber() / 2; 5765 llvm::GlobalAlias::create(llvm::Function::ExternalLinkage, 5766 "__isr_" + Twine(Num), F); 5767 } 5768 } 5769 } 5770 5771 //===----------------------------------------------------------------------===// 5772 // MIPS ABI Implementation. This works for both little-endian and 5773 // big-endian variants. 5774 //===----------------------------------------------------------------------===// 5775 5776 namespace { 5777 class MipsABIInfo : public ABIInfo { 5778 bool IsO32; 5779 unsigned MinABIStackAlignInBytes, StackAlignInBytes; 5780 void CoerceToIntArgs(uint64_t TySize, 5781 SmallVectorImpl<llvm::Type *> &ArgList) const; 5782 llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const; 5783 llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const; 5784 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const; 5785 public: 5786 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) : 5787 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8), 5788 StackAlignInBytes(IsO32 ? 8 : 16) {} 5789 5790 ABIArgInfo classifyReturnType(QualType RetTy) const; 5791 ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const; 5792 void computeInfo(CGFunctionInfo &FI) const override; 5793 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 5794 QualType Ty) const override; 5795 bool shouldSignExtUnsignedType(QualType Ty) const override; 5796 }; 5797 5798 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo { 5799 unsigned SizeOfUnwindException; 5800 public: 5801 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32) 5802 : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)), 5803 SizeOfUnwindException(IsO32 ? 24 : 32) {} 5804 5805 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { 5806 return 29; 5807 } 5808 5809 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 5810 CodeGen::CodeGenModule &CGM) const override { 5811 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 5812 if (!FD) return; 5813 llvm::Function *Fn = cast<llvm::Function>(GV); 5814 if (FD->hasAttr<Mips16Attr>()) { 5815 Fn->addFnAttr("mips16"); 5816 } 5817 else if (FD->hasAttr<NoMips16Attr>()) { 5818 Fn->addFnAttr("nomips16"); 5819 } 5820 } 5821 5822 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 5823 llvm::Value *Address) const override; 5824 5825 unsigned getSizeOfUnwindException() const override { 5826 return SizeOfUnwindException; 5827 } 5828 }; 5829 } 5830 5831 void MipsABIInfo::CoerceToIntArgs( 5832 uint64_t TySize, SmallVectorImpl<llvm::Type *> &ArgList) const { 5833 llvm::IntegerType *IntTy = 5834 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8); 5835 5836 // Add (TySize / MinABIStackAlignInBytes) args of IntTy. 5837 for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N) 5838 ArgList.push_back(IntTy); 5839 5840 // If necessary, add one more integer type to ArgList. 5841 unsigned R = TySize % (MinABIStackAlignInBytes * 8); 5842 5843 if (R) 5844 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R)); 5845 } 5846 5847 // In N32/64, an aligned double precision floating point field is passed in 5848 // a register. 5849 llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const { 5850 SmallVector<llvm::Type*, 8> ArgList, IntArgList; 5851 5852 if (IsO32) { 5853 CoerceToIntArgs(TySize, ArgList); 5854 return llvm::StructType::get(getVMContext(), ArgList); 5855 } 5856 5857 if (Ty->isComplexType()) 5858 return CGT.ConvertType(Ty); 5859 5860 const RecordType *RT = Ty->getAs<RecordType>(); 5861 5862 // Unions/vectors are passed in integer registers. 5863 if (!RT || !RT->isStructureOrClassType()) { 5864 CoerceToIntArgs(TySize, ArgList); 5865 return llvm::StructType::get(getVMContext(), ArgList); 5866 } 5867 5868 const RecordDecl *RD = RT->getDecl(); 5869 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 5870 assert(!(TySize % 8) && "Size of structure must be multiple of 8."); 5871 5872 uint64_t LastOffset = 0; 5873 unsigned idx = 0; 5874 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64); 5875 5876 // Iterate over fields in the struct/class and check if there are any aligned 5877 // double fields. 5878 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 5879 i != e; ++i, ++idx) { 5880 const QualType Ty = i->getType(); 5881 const BuiltinType *BT = Ty->getAs<BuiltinType>(); 5882 5883 if (!BT || BT->getKind() != BuiltinType::Double) 5884 continue; 5885 5886 uint64_t Offset = Layout.getFieldOffset(idx); 5887 if (Offset % 64) // Ignore doubles that are not aligned. 5888 continue; 5889 5890 // Add ((Offset - LastOffset) / 64) args of type i64. 5891 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j) 5892 ArgList.push_back(I64); 5893 5894 // Add double type. 5895 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext())); 5896 LastOffset = Offset + 64; 5897 } 5898 5899 CoerceToIntArgs(TySize - LastOffset, IntArgList); 5900 ArgList.append(IntArgList.begin(), IntArgList.end()); 5901 5902 return llvm::StructType::get(getVMContext(), ArgList); 5903 } 5904 5905 llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset, 5906 uint64_t Offset) const { 5907 if (OrigOffset + MinABIStackAlignInBytes > Offset) 5908 return nullptr; 5909 5910 return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8); 5911 } 5912 5913 ABIArgInfo 5914 MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const { 5915 Ty = useFirstFieldIfTransparentUnion(Ty); 5916 5917 uint64_t OrigOffset = Offset; 5918 uint64_t TySize = getContext().getTypeSize(Ty); 5919 uint64_t Align = getContext().getTypeAlign(Ty) / 8; 5920 5921 Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes), 5922 (uint64_t)StackAlignInBytes); 5923 unsigned CurrOffset = llvm::RoundUpToAlignment(Offset, Align); 5924 Offset = CurrOffset + llvm::RoundUpToAlignment(TySize, Align * 8) / 8; 5925 5926 if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) { 5927 // Ignore empty aggregates. 5928 if (TySize == 0) 5929 return ABIArgInfo::getIgnore(); 5930 5931 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { 5932 Offset = OrigOffset + MinABIStackAlignInBytes; 5933 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 5934 } 5935 5936 // If we have reached here, aggregates are passed directly by coercing to 5937 // another structure type. Padding is inserted if the offset of the 5938 // aggregate is unaligned. 5939 ABIArgInfo ArgInfo = 5940 ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0, 5941 getPaddingType(OrigOffset, CurrOffset)); 5942 ArgInfo.setInReg(true); 5943 return ArgInfo; 5944 } 5945 5946 // Treat an enum type as its underlying type. 5947 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 5948 Ty = EnumTy->getDecl()->getIntegerType(); 5949 5950 // All integral types are promoted to the GPR width. 5951 if (Ty->isIntegralOrEnumerationType()) 5952 return ABIArgInfo::getExtend(); 5953 5954 return ABIArgInfo::getDirect( 5955 nullptr, 0, IsO32 ? nullptr : getPaddingType(OrigOffset, CurrOffset)); 5956 } 5957 5958 llvm::Type* 5959 MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const { 5960 const RecordType *RT = RetTy->getAs<RecordType>(); 5961 SmallVector<llvm::Type*, 8> RTList; 5962 5963 if (RT && RT->isStructureOrClassType()) { 5964 const RecordDecl *RD = RT->getDecl(); 5965 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 5966 unsigned FieldCnt = Layout.getFieldCount(); 5967 5968 // N32/64 returns struct/classes in floating point registers if the 5969 // following conditions are met: 5970 // 1. The size of the struct/class is no larger than 128-bit. 5971 // 2. The struct/class has one or two fields all of which are floating 5972 // point types. 5973 // 3. The offset of the first field is zero (this follows what gcc does). 5974 // 5975 // Any other composite results are returned in integer registers. 5976 // 5977 if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) { 5978 RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end(); 5979 for (; b != e; ++b) { 5980 const BuiltinType *BT = b->getType()->getAs<BuiltinType>(); 5981 5982 if (!BT || !BT->isFloatingPoint()) 5983 break; 5984 5985 RTList.push_back(CGT.ConvertType(b->getType())); 5986 } 5987 5988 if (b == e) 5989 return llvm::StructType::get(getVMContext(), RTList, 5990 RD->hasAttr<PackedAttr>()); 5991 5992 RTList.clear(); 5993 } 5994 } 5995 5996 CoerceToIntArgs(Size, RTList); 5997 return llvm::StructType::get(getVMContext(), RTList); 5998 } 5999 6000 ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const { 6001 uint64_t Size = getContext().getTypeSize(RetTy); 6002 6003 if (RetTy->isVoidType()) 6004 return ABIArgInfo::getIgnore(); 6005 6006 // O32 doesn't treat zero-sized structs differently from other structs. 6007 // However, N32/N64 ignores zero sized return values. 6008 if (!IsO32 && Size == 0) 6009 return ABIArgInfo::getIgnore(); 6010 6011 if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) { 6012 if (Size <= 128) { 6013 if (RetTy->isAnyComplexType()) 6014 return ABIArgInfo::getDirect(); 6015 6016 // O32 returns integer vectors in registers and N32/N64 returns all small 6017 // aggregates in registers. 6018 if (!IsO32 || 6019 (RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())) { 6020 ABIArgInfo ArgInfo = 6021 ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size)); 6022 ArgInfo.setInReg(true); 6023 return ArgInfo; 6024 } 6025 } 6026 6027 return getNaturalAlignIndirect(RetTy); 6028 } 6029 6030 // Treat an enum type as its underlying type. 6031 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 6032 RetTy = EnumTy->getDecl()->getIntegerType(); 6033 6034 return (RetTy->isPromotableIntegerType() ? 6035 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 6036 } 6037 6038 void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const { 6039 ABIArgInfo &RetInfo = FI.getReturnInfo(); 6040 if (!getCXXABI().classifyReturnType(FI)) 6041 RetInfo = classifyReturnType(FI.getReturnType()); 6042 6043 // Check if a pointer to an aggregate is passed as a hidden argument. 6044 uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0; 6045 6046 for (auto &I : FI.arguments()) 6047 I.info = classifyArgumentType(I.type, Offset); 6048 } 6049 6050 Address MipsABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 6051 QualType OrigTy) const { 6052 QualType Ty = OrigTy; 6053 6054 // Integer arguments are promoted to 32-bit on O32 and 64-bit on N32/N64. 6055 // Pointers are also promoted in the same way but this only matters for N32. 6056 unsigned SlotSizeInBits = IsO32 ? 32 : 64; 6057 unsigned PtrWidth = getTarget().getPointerWidth(0); 6058 bool DidPromote = false; 6059 if ((Ty->isIntegerType() && 6060 getContext().getIntWidth(Ty) < SlotSizeInBits) || 6061 (Ty->isPointerType() && PtrWidth < SlotSizeInBits)) { 6062 DidPromote = true; 6063 Ty = getContext().getIntTypeForBitwidth(SlotSizeInBits, 6064 Ty->isSignedIntegerType()); 6065 } 6066 6067 auto TyInfo = getContext().getTypeInfoInChars(Ty); 6068 6069 // The alignment of things in the argument area is never larger than 6070 // StackAlignInBytes. 6071 TyInfo.second = 6072 std::min(TyInfo.second, CharUnits::fromQuantity(StackAlignInBytes)); 6073 6074 // MinABIStackAlignInBytes is the size of argument slots on the stack. 6075 CharUnits ArgSlotSize = CharUnits::fromQuantity(MinABIStackAlignInBytes); 6076 6077 Address Addr = emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false, 6078 TyInfo, ArgSlotSize, /*AllowHigherAlign*/ true); 6079 6080 6081 // If there was a promotion, "unpromote" into a temporary. 6082 // TODO: can we just use a pointer into a subset of the original slot? 6083 if (DidPromote) { 6084 Address Temp = CGF.CreateMemTemp(OrigTy, "vaarg.promotion-temp"); 6085 llvm::Value *Promoted = CGF.Builder.CreateLoad(Addr); 6086 6087 // Truncate down to the right width. 6088 llvm::Type *IntTy = (OrigTy->isIntegerType() ? Temp.getElementType() 6089 : CGF.IntPtrTy); 6090 llvm::Value *V = CGF.Builder.CreateTrunc(Promoted, IntTy); 6091 if (OrigTy->isPointerType()) 6092 V = CGF.Builder.CreateIntToPtr(V, Temp.getElementType()); 6093 6094 CGF.Builder.CreateStore(V, Temp); 6095 Addr = Temp; 6096 } 6097 6098 return Addr; 6099 } 6100 6101 bool MipsABIInfo::shouldSignExtUnsignedType(QualType Ty) const { 6102 int TySize = getContext().getTypeSize(Ty); 6103 6104 // MIPS64 ABI requires unsigned 32 bit integers to be sign extended. 6105 if (Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32) 6106 return true; 6107 6108 return false; 6109 } 6110 6111 bool 6112 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 6113 llvm::Value *Address) const { 6114 // This information comes from gcc's implementation, which seems to 6115 // as canonical as it gets. 6116 6117 // Everything on MIPS is 4 bytes. Double-precision FP registers 6118 // are aliased to pairs of single-precision FP registers. 6119 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 6120 6121 // 0-31 are the general purpose registers, $0 - $31. 6122 // 32-63 are the floating-point registers, $f0 - $f31. 6123 // 64 and 65 are the multiply/divide registers, $hi and $lo. 6124 // 66 is the (notional, I think) register for signal-handler return. 6125 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65); 6126 6127 // 67-74 are the floating-point status registers, $fcc0 - $fcc7. 6128 // They are one bit wide and ignored here. 6129 6130 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31. 6131 // (coprocessor 1 is the FP unit) 6132 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31. 6133 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31. 6134 // 176-181 are the DSP accumulator registers. 6135 AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181); 6136 return false; 6137 } 6138 6139 //===----------------------------------------------------------------------===// 6140 // TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults. 6141 // Currently subclassed only to implement custom OpenCL C function attribute 6142 // handling. 6143 //===----------------------------------------------------------------------===// 6144 6145 namespace { 6146 6147 class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo { 6148 public: 6149 TCETargetCodeGenInfo(CodeGenTypes &CGT) 6150 : DefaultTargetCodeGenInfo(CGT) {} 6151 6152 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 6153 CodeGen::CodeGenModule &M) const override; 6154 }; 6155 6156 void TCETargetCodeGenInfo::setTargetAttributes( 6157 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const { 6158 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 6159 if (!FD) return; 6160 6161 llvm::Function *F = cast<llvm::Function>(GV); 6162 6163 if (M.getLangOpts().OpenCL) { 6164 if (FD->hasAttr<OpenCLKernelAttr>()) { 6165 // OpenCL C Kernel functions are not subject to inlining 6166 F->addFnAttr(llvm::Attribute::NoInline); 6167 const ReqdWorkGroupSizeAttr *Attr = FD->getAttr<ReqdWorkGroupSizeAttr>(); 6168 if (Attr) { 6169 // Convert the reqd_work_group_size() attributes to metadata. 6170 llvm::LLVMContext &Context = F->getContext(); 6171 llvm::NamedMDNode *OpenCLMetadata = 6172 M.getModule().getOrInsertNamedMetadata( 6173 "opencl.kernel_wg_size_info"); 6174 6175 SmallVector<llvm::Metadata *, 5> Operands; 6176 Operands.push_back(llvm::ConstantAsMetadata::get(F)); 6177 6178 Operands.push_back( 6179 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue( 6180 M.Int32Ty, llvm::APInt(32, Attr->getXDim())))); 6181 Operands.push_back( 6182 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue( 6183 M.Int32Ty, llvm::APInt(32, Attr->getYDim())))); 6184 Operands.push_back( 6185 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue( 6186 M.Int32Ty, llvm::APInt(32, Attr->getZDim())))); 6187 6188 // Add a boolean constant operand for "required" (true) or "hint" 6189 // (false) for implementing the work_group_size_hint attr later. 6190 // Currently always true as the hint is not yet implemented. 6191 Operands.push_back( 6192 llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context))); 6193 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands)); 6194 } 6195 } 6196 } 6197 } 6198 6199 } 6200 6201 //===----------------------------------------------------------------------===// 6202 // Hexagon ABI Implementation 6203 //===----------------------------------------------------------------------===// 6204 6205 namespace { 6206 6207 class HexagonABIInfo : public ABIInfo { 6208 6209 6210 public: 6211 HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 6212 6213 private: 6214 6215 ABIArgInfo classifyReturnType(QualType RetTy) const; 6216 ABIArgInfo classifyArgumentType(QualType RetTy) const; 6217 6218 void computeInfo(CGFunctionInfo &FI) const override; 6219 6220 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 6221 QualType Ty) const override; 6222 }; 6223 6224 class HexagonTargetCodeGenInfo : public TargetCodeGenInfo { 6225 public: 6226 HexagonTargetCodeGenInfo(CodeGenTypes &CGT) 6227 :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {} 6228 6229 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 6230 return 29; 6231 } 6232 }; 6233 6234 } 6235 6236 void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const { 6237 if (!getCXXABI().classifyReturnType(FI)) 6238 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 6239 for (auto &I : FI.arguments()) 6240 I.info = classifyArgumentType(I.type); 6241 } 6242 6243 ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const { 6244 if (!isAggregateTypeForABI(Ty)) { 6245 // Treat an enum type as its underlying type. 6246 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 6247 Ty = EnumTy->getDecl()->getIntegerType(); 6248 6249 return (Ty->isPromotableIntegerType() ? 6250 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 6251 } 6252 6253 // Ignore empty records. 6254 if (isEmptyRecord(getContext(), Ty, true)) 6255 return ABIArgInfo::getIgnore(); 6256 6257 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 6258 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 6259 6260 uint64_t Size = getContext().getTypeSize(Ty); 6261 if (Size > 64) 6262 return getNaturalAlignIndirect(Ty, /*ByVal=*/true); 6263 // Pass in the smallest viable integer type. 6264 else if (Size > 32) 6265 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext())); 6266 else if (Size > 16) 6267 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 6268 else if (Size > 8) 6269 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 6270 else 6271 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 6272 } 6273 6274 ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const { 6275 if (RetTy->isVoidType()) 6276 return ABIArgInfo::getIgnore(); 6277 6278 // Large vector types should be returned via memory. 6279 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64) 6280 return getNaturalAlignIndirect(RetTy); 6281 6282 if (!isAggregateTypeForABI(RetTy)) { 6283 // Treat an enum type as its underlying type. 6284 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 6285 RetTy = EnumTy->getDecl()->getIntegerType(); 6286 6287 return (RetTy->isPromotableIntegerType() ? 6288 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 6289 } 6290 6291 if (isEmptyRecord(getContext(), RetTy, true)) 6292 return ABIArgInfo::getIgnore(); 6293 6294 // Aggregates <= 8 bytes are returned in r0; other aggregates 6295 // are returned indirectly. 6296 uint64_t Size = getContext().getTypeSize(RetTy); 6297 if (Size <= 64) { 6298 // Return in the smallest viable integer type. 6299 if (Size <= 8) 6300 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 6301 if (Size <= 16) 6302 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 6303 if (Size <= 32) 6304 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 6305 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext())); 6306 } 6307 6308 return getNaturalAlignIndirect(RetTy, /*ByVal=*/true); 6309 } 6310 6311 Address HexagonABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 6312 QualType Ty) const { 6313 // FIXME: Someone needs to audit that this handle alignment correctly. 6314 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false, 6315 getContext().getTypeInfoInChars(Ty), 6316 CharUnits::fromQuantity(4), 6317 /*AllowHigherAlign*/ true); 6318 } 6319 6320 //===----------------------------------------------------------------------===// 6321 // AMDGPU ABI Implementation 6322 //===----------------------------------------------------------------------===// 6323 6324 namespace { 6325 6326 class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo { 6327 public: 6328 AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT) 6329 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 6330 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 6331 CodeGen::CodeGenModule &M) const override; 6332 }; 6333 6334 } 6335 6336 void AMDGPUTargetCodeGenInfo::setTargetAttributes( 6337 const Decl *D, 6338 llvm::GlobalValue *GV, 6339 CodeGen::CodeGenModule &M) const { 6340 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 6341 if (!FD) 6342 return; 6343 6344 if (const auto Attr = FD->getAttr<AMDGPUNumVGPRAttr>()) { 6345 llvm::Function *F = cast<llvm::Function>(GV); 6346 uint32_t NumVGPR = Attr->getNumVGPR(); 6347 if (NumVGPR != 0) 6348 F->addFnAttr("amdgpu_num_vgpr", llvm::utostr(NumVGPR)); 6349 } 6350 6351 if (const auto Attr = FD->getAttr<AMDGPUNumSGPRAttr>()) { 6352 llvm::Function *F = cast<llvm::Function>(GV); 6353 unsigned NumSGPR = Attr->getNumSGPR(); 6354 if (NumSGPR != 0) 6355 F->addFnAttr("amdgpu_num_sgpr", llvm::utostr(NumSGPR)); 6356 } 6357 } 6358 6359 6360 //===----------------------------------------------------------------------===// 6361 // SPARC v9 ABI Implementation. 6362 // Based on the SPARC Compliance Definition version 2.4.1. 6363 // 6364 // Function arguments a mapped to a nominal "parameter array" and promoted to 6365 // registers depending on their type. Each argument occupies 8 or 16 bytes in 6366 // the array, structs larger than 16 bytes are passed indirectly. 6367 // 6368 // One case requires special care: 6369 // 6370 // struct mixed { 6371 // int i; 6372 // float f; 6373 // }; 6374 // 6375 // When a struct mixed is passed by value, it only occupies 8 bytes in the 6376 // parameter array, but the int is passed in an integer register, and the float 6377 // is passed in a floating point register. This is represented as two arguments 6378 // with the LLVM IR inreg attribute: 6379 // 6380 // declare void f(i32 inreg %i, float inreg %f) 6381 // 6382 // The code generator will only allocate 4 bytes from the parameter array for 6383 // the inreg arguments. All other arguments are allocated a multiple of 8 6384 // bytes. 6385 // 6386 namespace { 6387 class SparcV9ABIInfo : public ABIInfo { 6388 public: 6389 SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 6390 6391 private: 6392 ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const; 6393 void computeInfo(CGFunctionInfo &FI) const override; 6394 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 6395 QualType Ty) const override; 6396 6397 // Coercion type builder for structs passed in registers. The coercion type 6398 // serves two purposes: 6399 // 6400 // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned' 6401 // in registers. 6402 // 2. Expose aligned floating point elements as first-level elements, so the 6403 // code generator knows to pass them in floating point registers. 6404 // 6405 // We also compute the InReg flag which indicates that the struct contains 6406 // aligned 32-bit floats. 6407 // 6408 struct CoerceBuilder { 6409 llvm::LLVMContext &Context; 6410 const llvm::DataLayout &DL; 6411 SmallVector<llvm::Type*, 8> Elems; 6412 uint64_t Size; 6413 bool InReg; 6414 6415 CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl) 6416 : Context(c), DL(dl), Size(0), InReg(false) {} 6417 6418 // Pad Elems with integers until Size is ToSize. 6419 void pad(uint64_t ToSize) { 6420 assert(ToSize >= Size && "Cannot remove elements"); 6421 if (ToSize == Size) 6422 return; 6423 6424 // Finish the current 64-bit word. 6425 uint64_t Aligned = llvm::RoundUpToAlignment(Size, 64); 6426 if (Aligned > Size && Aligned <= ToSize) { 6427 Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size)); 6428 Size = Aligned; 6429 } 6430 6431 // Add whole 64-bit words. 6432 while (Size + 64 <= ToSize) { 6433 Elems.push_back(llvm::Type::getInt64Ty(Context)); 6434 Size += 64; 6435 } 6436 6437 // Final in-word padding. 6438 if (Size < ToSize) { 6439 Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size)); 6440 Size = ToSize; 6441 } 6442 } 6443 6444 // Add a floating point element at Offset. 6445 void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) { 6446 // Unaligned floats are treated as integers. 6447 if (Offset % Bits) 6448 return; 6449 // The InReg flag is only required if there are any floats < 64 bits. 6450 if (Bits < 64) 6451 InReg = true; 6452 pad(Offset); 6453 Elems.push_back(Ty); 6454 Size = Offset + Bits; 6455 } 6456 6457 // Add a struct type to the coercion type, starting at Offset (in bits). 6458 void addStruct(uint64_t Offset, llvm::StructType *StrTy) { 6459 const llvm::StructLayout *Layout = DL.getStructLayout(StrTy); 6460 for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) { 6461 llvm::Type *ElemTy = StrTy->getElementType(i); 6462 uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i); 6463 switch (ElemTy->getTypeID()) { 6464 case llvm::Type::StructTyID: 6465 addStruct(ElemOffset, cast<llvm::StructType>(ElemTy)); 6466 break; 6467 case llvm::Type::FloatTyID: 6468 addFloat(ElemOffset, ElemTy, 32); 6469 break; 6470 case llvm::Type::DoubleTyID: 6471 addFloat(ElemOffset, ElemTy, 64); 6472 break; 6473 case llvm::Type::FP128TyID: 6474 addFloat(ElemOffset, ElemTy, 128); 6475 break; 6476 case llvm::Type::PointerTyID: 6477 if (ElemOffset % 64 == 0) { 6478 pad(ElemOffset); 6479 Elems.push_back(ElemTy); 6480 Size += 64; 6481 } 6482 break; 6483 default: 6484 break; 6485 } 6486 } 6487 } 6488 6489 // Check if Ty is a usable substitute for the coercion type. 6490 bool isUsableType(llvm::StructType *Ty) const { 6491 return llvm::makeArrayRef(Elems) == Ty->elements(); 6492 } 6493 6494 // Get the coercion type as a literal struct type. 6495 llvm::Type *getType() const { 6496 if (Elems.size() == 1) 6497 return Elems.front(); 6498 else 6499 return llvm::StructType::get(Context, Elems); 6500 } 6501 }; 6502 }; 6503 } // end anonymous namespace 6504 6505 ABIArgInfo 6506 SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const { 6507 if (Ty->isVoidType()) 6508 return ABIArgInfo::getIgnore(); 6509 6510 uint64_t Size = getContext().getTypeSize(Ty); 6511 6512 // Anything too big to fit in registers is passed with an explicit indirect 6513 // pointer / sret pointer. 6514 if (Size > SizeLimit) 6515 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 6516 6517 // Treat an enum type as its underlying type. 6518 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 6519 Ty = EnumTy->getDecl()->getIntegerType(); 6520 6521 // Integer types smaller than a register are extended. 6522 if (Size < 64 && Ty->isIntegerType()) 6523 return ABIArgInfo::getExtend(); 6524 6525 // Other non-aggregates go in registers. 6526 if (!isAggregateTypeForABI(Ty)) 6527 return ABIArgInfo::getDirect(); 6528 6529 // If a C++ object has either a non-trivial copy constructor or a non-trivial 6530 // destructor, it is passed with an explicit indirect pointer / sret pointer. 6531 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 6532 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 6533 6534 // This is a small aggregate type that should be passed in registers. 6535 // Build a coercion type from the LLVM struct type. 6536 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty)); 6537 if (!StrTy) 6538 return ABIArgInfo::getDirect(); 6539 6540 CoerceBuilder CB(getVMContext(), getDataLayout()); 6541 CB.addStruct(0, StrTy); 6542 CB.pad(llvm::RoundUpToAlignment(CB.DL.getTypeSizeInBits(StrTy), 64)); 6543 6544 // Try to use the original type for coercion. 6545 llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType(); 6546 6547 if (CB.InReg) 6548 return ABIArgInfo::getDirectInReg(CoerceTy); 6549 else 6550 return ABIArgInfo::getDirect(CoerceTy); 6551 } 6552 6553 Address SparcV9ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 6554 QualType Ty) const { 6555 ABIArgInfo AI = classifyType(Ty, 16 * 8); 6556 llvm::Type *ArgTy = CGT.ConvertType(Ty); 6557 if (AI.canHaveCoerceToType() && !AI.getCoerceToType()) 6558 AI.setCoerceToType(ArgTy); 6559 6560 CharUnits SlotSize = CharUnits::fromQuantity(8); 6561 6562 CGBuilderTy &Builder = CGF.Builder; 6563 Address Addr(Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize); 6564 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy); 6565 6566 auto TypeInfo = getContext().getTypeInfoInChars(Ty); 6567 6568 Address ArgAddr = Address::invalid(); 6569 CharUnits Stride; 6570 switch (AI.getKind()) { 6571 case ABIArgInfo::Expand: 6572 case ABIArgInfo::InAlloca: 6573 llvm_unreachable("Unsupported ABI kind for va_arg"); 6574 6575 case ABIArgInfo::Extend: { 6576 Stride = SlotSize; 6577 CharUnits Offset = SlotSize - TypeInfo.first; 6578 ArgAddr = Builder.CreateConstInBoundsByteGEP(Addr, Offset, "extend"); 6579 break; 6580 } 6581 6582 case ABIArgInfo::Direct: { 6583 auto AllocSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType()); 6584 Stride = CharUnits::fromQuantity(AllocSize).RoundUpToAlignment(SlotSize); 6585 ArgAddr = Addr; 6586 break; 6587 } 6588 6589 case ABIArgInfo::Indirect: 6590 Stride = SlotSize; 6591 ArgAddr = Builder.CreateElementBitCast(Addr, ArgPtrTy, "indirect"); 6592 ArgAddr = Address(Builder.CreateLoad(ArgAddr, "indirect.arg"), 6593 TypeInfo.second); 6594 break; 6595 6596 case ABIArgInfo::Ignore: 6597 return Address(llvm::UndefValue::get(ArgPtrTy), TypeInfo.second); 6598 } 6599 6600 // Update VAList. 6601 llvm::Value *NextPtr = 6602 Builder.CreateConstInBoundsByteGEP(Addr.getPointer(), Stride, "ap.next"); 6603 Builder.CreateStore(NextPtr, VAListAddr); 6604 6605 return Builder.CreateBitCast(ArgAddr, ArgPtrTy, "arg.addr"); 6606 } 6607 6608 void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const { 6609 FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8); 6610 for (auto &I : FI.arguments()) 6611 I.info = classifyType(I.type, 16 * 8); 6612 } 6613 6614 namespace { 6615 class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo { 6616 public: 6617 SparcV9TargetCodeGenInfo(CodeGenTypes &CGT) 6618 : TargetCodeGenInfo(new SparcV9ABIInfo(CGT)) {} 6619 6620 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 6621 return 14; 6622 } 6623 6624 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 6625 llvm::Value *Address) const override; 6626 }; 6627 } // end anonymous namespace 6628 6629 bool 6630 SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 6631 llvm::Value *Address) const { 6632 // This is calculated from the LLVM and GCC tables and verified 6633 // against gcc output. AFAIK all ABIs use the same encoding. 6634 6635 CodeGen::CGBuilderTy &Builder = CGF.Builder; 6636 6637 llvm::IntegerType *i8 = CGF.Int8Ty; 6638 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 6639 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 6640 6641 // 0-31: the 8-byte general-purpose registers 6642 AssignToArrayRange(Builder, Address, Eight8, 0, 31); 6643 6644 // 32-63: f0-31, the 4-byte floating-point registers 6645 AssignToArrayRange(Builder, Address, Four8, 32, 63); 6646 6647 // Y = 64 6648 // PSR = 65 6649 // WIM = 66 6650 // TBR = 67 6651 // PC = 68 6652 // NPC = 69 6653 // FSR = 70 6654 // CSR = 71 6655 AssignToArrayRange(Builder, Address, Eight8, 64, 71); 6656 6657 // 72-87: d0-15, the 8-byte floating-point registers 6658 AssignToArrayRange(Builder, Address, Eight8, 72, 87); 6659 6660 return false; 6661 } 6662 6663 6664 //===----------------------------------------------------------------------===// 6665 // XCore ABI Implementation 6666 //===----------------------------------------------------------------------===// 6667 6668 namespace { 6669 6670 /// A SmallStringEnc instance is used to build up the TypeString by passing 6671 /// it by reference between functions that append to it. 6672 typedef llvm::SmallString<128> SmallStringEnc; 6673 6674 /// TypeStringCache caches the meta encodings of Types. 6675 /// 6676 /// The reason for caching TypeStrings is two fold: 6677 /// 1. To cache a type's encoding for later uses; 6678 /// 2. As a means to break recursive member type inclusion. 6679 /// 6680 /// A cache Entry can have a Status of: 6681 /// NonRecursive: The type encoding is not recursive; 6682 /// Recursive: The type encoding is recursive; 6683 /// Incomplete: An incomplete TypeString; 6684 /// IncompleteUsed: An incomplete TypeString that has been used in a 6685 /// Recursive type encoding. 6686 /// 6687 /// A NonRecursive entry will have all of its sub-members expanded as fully 6688 /// as possible. Whilst it may contain types which are recursive, the type 6689 /// itself is not recursive and thus its encoding may be safely used whenever 6690 /// the type is encountered. 6691 /// 6692 /// A Recursive entry will have all of its sub-members expanded as fully as 6693 /// possible. The type itself is recursive and it may contain other types which 6694 /// are recursive. The Recursive encoding must not be used during the expansion 6695 /// of a recursive type's recursive branch. For simplicity the code uses 6696 /// IncompleteCount to reject all usage of Recursive encodings for member types. 6697 /// 6698 /// An Incomplete entry is always a RecordType and only encodes its 6699 /// identifier e.g. "s(S){}". Incomplete 'StubEnc' entries are ephemeral and 6700 /// are placed into the cache during type expansion as a means to identify and 6701 /// handle recursive inclusion of types as sub-members. If there is recursion 6702 /// the entry becomes IncompleteUsed. 6703 /// 6704 /// During the expansion of a RecordType's members: 6705 /// 6706 /// If the cache contains a NonRecursive encoding for the member type, the 6707 /// cached encoding is used; 6708 /// 6709 /// If the cache contains a Recursive encoding for the member type, the 6710 /// cached encoding is 'Swapped' out, as it may be incorrect, and... 6711 /// 6712 /// If the member is a RecordType, an Incomplete encoding is placed into the 6713 /// cache to break potential recursive inclusion of itself as a sub-member; 6714 /// 6715 /// Once a member RecordType has been expanded, its temporary incomplete 6716 /// entry is removed from the cache. If a Recursive encoding was swapped out 6717 /// it is swapped back in; 6718 /// 6719 /// If an incomplete entry is used to expand a sub-member, the incomplete 6720 /// entry is marked as IncompleteUsed. The cache keeps count of how many 6721 /// IncompleteUsed entries it currently contains in IncompleteUsedCount; 6722 /// 6723 /// If a member's encoding is found to be a NonRecursive or Recursive viz: 6724 /// IncompleteUsedCount==0, the member's encoding is added to the cache. 6725 /// Else the member is part of a recursive type and thus the recursion has 6726 /// been exited too soon for the encoding to be correct for the member. 6727 /// 6728 class TypeStringCache { 6729 enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed}; 6730 struct Entry { 6731 std::string Str; // The encoded TypeString for the type. 6732 enum Status State; // Information about the encoding in 'Str'. 6733 std::string Swapped; // A temporary place holder for a Recursive encoding 6734 // during the expansion of RecordType's members. 6735 }; 6736 std::map<const IdentifierInfo *, struct Entry> Map; 6737 unsigned IncompleteCount; // Number of Incomplete entries in the Map. 6738 unsigned IncompleteUsedCount; // Number of IncompleteUsed entries in the Map. 6739 public: 6740 TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {} 6741 void addIncomplete(const IdentifierInfo *ID, std::string StubEnc); 6742 bool removeIncomplete(const IdentifierInfo *ID); 6743 void addIfComplete(const IdentifierInfo *ID, StringRef Str, 6744 bool IsRecursive); 6745 StringRef lookupStr(const IdentifierInfo *ID); 6746 }; 6747 6748 /// TypeString encodings for enum & union fields must be order. 6749 /// FieldEncoding is a helper for this ordering process. 6750 class FieldEncoding { 6751 bool HasName; 6752 std::string Enc; 6753 public: 6754 FieldEncoding(bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {} 6755 StringRef str() {return Enc.c_str();} 6756 bool operator<(const FieldEncoding &rhs) const { 6757 if (HasName != rhs.HasName) return HasName; 6758 return Enc < rhs.Enc; 6759 } 6760 }; 6761 6762 class XCoreABIInfo : public DefaultABIInfo { 6763 public: 6764 XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 6765 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 6766 QualType Ty) const override; 6767 }; 6768 6769 class XCoreTargetCodeGenInfo : public TargetCodeGenInfo { 6770 mutable TypeStringCache TSC; 6771 public: 6772 XCoreTargetCodeGenInfo(CodeGenTypes &CGT) 6773 :TargetCodeGenInfo(new XCoreABIInfo(CGT)) {} 6774 void emitTargetMD(const Decl *D, llvm::GlobalValue *GV, 6775 CodeGen::CodeGenModule &M) const override; 6776 }; 6777 6778 } // End anonymous namespace. 6779 6780 Address XCoreABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 6781 QualType Ty) const { 6782 CGBuilderTy &Builder = CGF.Builder; 6783 6784 // Get the VAList. 6785 CharUnits SlotSize = CharUnits::fromQuantity(4); 6786 Address AP(Builder.CreateLoad(VAListAddr), SlotSize); 6787 6788 // Handle the argument. 6789 ABIArgInfo AI = classifyArgumentType(Ty); 6790 CharUnits TypeAlign = getContext().getTypeAlignInChars(Ty); 6791 llvm::Type *ArgTy = CGT.ConvertType(Ty); 6792 if (AI.canHaveCoerceToType() && !AI.getCoerceToType()) 6793 AI.setCoerceToType(ArgTy); 6794 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy); 6795 6796 Address Val = Address::invalid(); 6797 CharUnits ArgSize = CharUnits::Zero(); 6798 switch (AI.getKind()) { 6799 case ABIArgInfo::Expand: 6800 case ABIArgInfo::InAlloca: 6801 llvm_unreachable("Unsupported ABI kind for va_arg"); 6802 case ABIArgInfo::Ignore: 6803 Val = Address(llvm::UndefValue::get(ArgPtrTy), TypeAlign); 6804 ArgSize = CharUnits::Zero(); 6805 break; 6806 case ABIArgInfo::Extend: 6807 case ABIArgInfo::Direct: 6808 Val = Builder.CreateBitCast(AP, ArgPtrTy); 6809 ArgSize = CharUnits::fromQuantity( 6810 getDataLayout().getTypeAllocSize(AI.getCoerceToType())); 6811 ArgSize = ArgSize.RoundUpToAlignment(SlotSize); 6812 break; 6813 case ABIArgInfo::Indirect: 6814 Val = Builder.CreateElementBitCast(AP, ArgPtrTy); 6815 Val = Address(Builder.CreateLoad(Val), TypeAlign); 6816 ArgSize = SlotSize; 6817 break; 6818 } 6819 6820 // Increment the VAList. 6821 if (!ArgSize.isZero()) { 6822 llvm::Value *APN = 6823 Builder.CreateConstInBoundsByteGEP(AP.getPointer(), ArgSize); 6824 Builder.CreateStore(APN, VAListAddr); 6825 } 6826 6827 return Val; 6828 } 6829 6830 /// During the expansion of a RecordType, an incomplete TypeString is placed 6831 /// into the cache as a means to identify and break recursion. 6832 /// If there is a Recursive encoding in the cache, it is swapped out and will 6833 /// be reinserted by removeIncomplete(). 6834 /// All other types of encoding should have been used rather than arriving here. 6835 void TypeStringCache::addIncomplete(const IdentifierInfo *ID, 6836 std::string StubEnc) { 6837 if (!ID) 6838 return; 6839 Entry &E = Map[ID]; 6840 assert( (E.Str.empty() || E.State == Recursive) && 6841 "Incorrectly use of addIncomplete"); 6842 assert(!StubEnc.empty() && "Passing an empty string to addIncomplete()"); 6843 E.Swapped.swap(E.Str); // swap out the Recursive 6844 E.Str.swap(StubEnc); 6845 E.State = Incomplete; 6846 ++IncompleteCount; 6847 } 6848 6849 /// Once the RecordType has been expanded, the temporary incomplete TypeString 6850 /// must be removed from the cache. 6851 /// If a Recursive was swapped out by addIncomplete(), it will be replaced. 6852 /// Returns true if the RecordType was defined recursively. 6853 bool TypeStringCache::removeIncomplete(const IdentifierInfo *ID) { 6854 if (!ID) 6855 return false; 6856 auto I = Map.find(ID); 6857 assert(I != Map.end() && "Entry not present"); 6858 Entry &E = I->second; 6859 assert( (E.State == Incomplete || 6860 E.State == IncompleteUsed) && 6861 "Entry must be an incomplete type"); 6862 bool IsRecursive = false; 6863 if (E.State == IncompleteUsed) { 6864 // We made use of our Incomplete encoding, thus we are recursive. 6865 IsRecursive = true; 6866 --IncompleteUsedCount; 6867 } 6868 if (E.Swapped.empty()) 6869 Map.erase(I); 6870 else { 6871 // Swap the Recursive back. 6872 E.Swapped.swap(E.Str); 6873 E.Swapped.clear(); 6874 E.State = Recursive; 6875 } 6876 --IncompleteCount; 6877 return IsRecursive; 6878 } 6879 6880 /// Add the encoded TypeString to the cache only if it is NonRecursive or 6881 /// Recursive (viz: all sub-members were expanded as fully as possible). 6882 void TypeStringCache::addIfComplete(const IdentifierInfo *ID, StringRef Str, 6883 bool IsRecursive) { 6884 if (!ID || IncompleteUsedCount) 6885 return; // No key or it is is an incomplete sub-type so don't add. 6886 Entry &E = Map[ID]; 6887 if (IsRecursive && !E.Str.empty()) { 6888 assert(E.State==Recursive && E.Str.size() == Str.size() && 6889 "This is not the same Recursive entry"); 6890 // The parent container was not recursive after all, so we could have used 6891 // this Recursive sub-member entry after all, but we assumed the worse when 6892 // we started viz: IncompleteCount!=0. 6893 return; 6894 } 6895 assert(E.Str.empty() && "Entry already present"); 6896 E.Str = Str.str(); 6897 E.State = IsRecursive? Recursive : NonRecursive; 6898 } 6899 6900 /// Return a cached TypeString encoding for the ID. If there isn't one, or we 6901 /// are recursively expanding a type (IncompleteCount != 0) and the cached 6902 /// encoding is Recursive, return an empty StringRef. 6903 StringRef TypeStringCache::lookupStr(const IdentifierInfo *ID) { 6904 if (!ID) 6905 return StringRef(); // We have no key. 6906 auto I = Map.find(ID); 6907 if (I == Map.end()) 6908 return StringRef(); // We have no encoding. 6909 Entry &E = I->second; 6910 if (E.State == Recursive && IncompleteCount) 6911 return StringRef(); // We don't use Recursive encodings for member types. 6912 6913 if (E.State == Incomplete) { 6914 // The incomplete type is being used to break out of recursion. 6915 E.State = IncompleteUsed; 6916 ++IncompleteUsedCount; 6917 } 6918 return E.Str.c_str(); 6919 } 6920 6921 /// The XCore ABI includes a type information section that communicates symbol 6922 /// type information to the linker. The linker uses this information to verify 6923 /// safety/correctness of things such as array bound and pointers et al. 6924 /// The ABI only requires C (and XC) language modules to emit TypeStrings. 6925 /// This type information (TypeString) is emitted into meta data for all global 6926 /// symbols: definitions, declarations, functions & variables. 6927 /// 6928 /// The TypeString carries type, qualifier, name, size & value details. 6929 /// Please see 'Tools Development Guide' section 2.16.2 for format details: 6930 /// https://www.xmos.com/download/public/Tools-Development-Guide%28X9114A%29.pdf 6931 /// The output is tested by test/CodeGen/xcore-stringtype.c. 6932 /// 6933 static bool getTypeString(SmallStringEnc &Enc, const Decl *D, 6934 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC); 6935 6936 /// XCore uses emitTargetMD to emit TypeString metadata for global symbols. 6937 void XCoreTargetCodeGenInfo::emitTargetMD(const Decl *D, llvm::GlobalValue *GV, 6938 CodeGen::CodeGenModule &CGM) const { 6939 SmallStringEnc Enc; 6940 if (getTypeString(Enc, D, CGM, TSC)) { 6941 llvm::LLVMContext &Ctx = CGM.getModule().getContext(); 6942 llvm::SmallVector<llvm::Metadata *, 2> MDVals; 6943 MDVals.push_back(llvm::ConstantAsMetadata::get(GV)); 6944 MDVals.push_back(llvm::MDString::get(Ctx, Enc.str())); 6945 llvm::NamedMDNode *MD = 6946 CGM.getModule().getOrInsertNamedMetadata("xcore.typestrings"); 6947 MD->addOperand(llvm::MDNode::get(Ctx, MDVals)); 6948 } 6949 } 6950 6951 static bool appendType(SmallStringEnc &Enc, QualType QType, 6952 const CodeGen::CodeGenModule &CGM, 6953 TypeStringCache &TSC); 6954 6955 /// Helper function for appendRecordType(). 6956 /// Builds a SmallVector containing the encoded field types in declaration 6957 /// order. 6958 static bool extractFieldType(SmallVectorImpl<FieldEncoding> &FE, 6959 const RecordDecl *RD, 6960 const CodeGen::CodeGenModule &CGM, 6961 TypeStringCache &TSC) { 6962 for (const auto *Field : RD->fields()) { 6963 SmallStringEnc Enc; 6964 Enc += "m("; 6965 Enc += Field->getName(); 6966 Enc += "){"; 6967 if (Field->isBitField()) { 6968 Enc += "b("; 6969 llvm::raw_svector_ostream OS(Enc); 6970 OS << Field->getBitWidthValue(CGM.getContext()); 6971 Enc += ':'; 6972 } 6973 if (!appendType(Enc, Field->getType(), CGM, TSC)) 6974 return false; 6975 if (Field->isBitField()) 6976 Enc += ')'; 6977 Enc += '}'; 6978 FE.emplace_back(!Field->getName().empty(), Enc); 6979 } 6980 return true; 6981 } 6982 6983 /// Appends structure and union types to Enc and adds encoding to cache. 6984 /// Recursively calls appendType (via extractFieldType) for each field. 6985 /// Union types have their fields ordered according to the ABI. 6986 static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT, 6987 const CodeGen::CodeGenModule &CGM, 6988 TypeStringCache &TSC, const IdentifierInfo *ID) { 6989 // Append the cached TypeString if we have one. 6990 StringRef TypeString = TSC.lookupStr(ID); 6991 if (!TypeString.empty()) { 6992 Enc += TypeString; 6993 return true; 6994 } 6995 6996 // Start to emit an incomplete TypeString. 6997 size_t Start = Enc.size(); 6998 Enc += (RT->isUnionType()? 'u' : 's'); 6999 Enc += '('; 7000 if (ID) 7001 Enc += ID->getName(); 7002 Enc += "){"; 7003 7004 // We collect all encoded fields and order as necessary. 7005 bool IsRecursive = false; 7006 const RecordDecl *RD = RT->getDecl()->getDefinition(); 7007 if (RD && !RD->field_empty()) { 7008 // An incomplete TypeString stub is placed in the cache for this RecordType 7009 // so that recursive calls to this RecordType will use it whilst building a 7010 // complete TypeString for this RecordType. 7011 SmallVector<FieldEncoding, 16> FE; 7012 std::string StubEnc(Enc.substr(Start).str()); 7013 StubEnc += '}'; // StubEnc now holds a valid incomplete TypeString. 7014 TSC.addIncomplete(ID, std::move(StubEnc)); 7015 if (!extractFieldType(FE, RD, CGM, TSC)) { 7016 (void) TSC.removeIncomplete(ID); 7017 return false; 7018 } 7019 IsRecursive = TSC.removeIncomplete(ID); 7020 // The ABI requires unions to be sorted but not structures. 7021 // See FieldEncoding::operator< for sort algorithm. 7022 if (RT->isUnionType()) 7023 std::sort(FE.begin(), FE.end()); 7024 // We can now complete the TypeString. 7025 unsigned E = FE.size(); 7026 for (unsigned I = 0; I != E; ++I) { 7027 if (I) 7028 Enc += ','; 7029 Enc += FE[I].str(); 7030 } 7031 } 7032 Enc += '}'; 7033 TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive); 7034 return true; 7035 } 7036 7037 /// Appends enum types to Enc and adds the encoding to the cache. 7038 static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET, 7039 TypeStringCache &TSC, 7040 const IdentifierInfo *ID) { 7041 // Append the cached TypeString if we have one. 7042 StringRef TypeString = TSC.lookupStr(ID); 7043 if (!TypeString.empty()) { 7044 Enc += TypeString; 7045 return true; 7046 } 7047 7048 size_t Start = Enc.size(); 7049 Enc += "e("; 7050 if (ID) 7051 Enc += ID->getName(); 7052 Enc += "){"; 7053 7054 // We collect all encoded enumerations and order them alphanumerically. 7055 if (const EnumDecl *ED = ET->getDecl()->getDefinition()) { 7056 SmallVector<FieldEncoding, 16> FE; 7057 for (auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E; 7058 ++I) { 7059 SmallStringEnc EnumEnc; 7060 EnumEnc += "m("; 7061 EnumEnc += I->getName(); 7062 EnumEnc += "){"; 7063 I->getInitVal().toString(EnumEnc); 7064 EnumEnc += '}'; 7065 FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc)); 7066 } 7067 std::sort(FE.begin(), FE.end()); 7068 unsigned E = FE.size(); 7069 for (unsigned I = 0; I != E; ++I) { 7070 if (I) 7071 Enc += ','; 7072 Enc += FE[I].str(); 7073 } 7074 } 7075 Enc += '}'; 7076 TSC.addIfComplete(ID, Enc.substr(Start), false); 7077 return true; 7078 } 7079 7080 /// Appends type's qualifier to Enc. 7081 /// This is done prior to appending the type's encoding. 7082 static void appendQualifier(SmallStringEnc &Enc, QualType QT) { 7083 // Qualifiers are emitted in alphabetical order. 7084 static const char *const Table[]={"","c:","r:","cr:","v:","cv:","rv:","crv:"}; 7085 int Lookup = 0; 7086 if (QT.isConstQualified()) 7087 Lookup += 1<<0; 7088 if (QT.isRestrictQualified()) 7089 Lookup += 1<<1; 7090 if (QT.isVolatileQualified()) 7091 Lookup += 1<<2; 7092 Enc += Table[Lookup]; 7093 } 7094 7095 /// Appends built-in types to Enc. 7096 static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT) { 7097 const char *EncType; 7098 switch (BT->getKind()) { 7099 case BuiltinType::Void: 7100 EncType = "0"; 7101 break; 7102 case BuiltinType::Bool: 7103 EncType = "b"; 7104 break; 7105 case BuiltinType::Char_U: 7106 EncType = "uc"; 7107 break; 7108 case BuiltinType::UChar: 7109 EncType = "uc"; 7110 break; 7111 case BuiltinType::SChar: 7112 EncType = "sc"; 7113 break; 7114 case BuiltinType::UShort: 7115 EncType = "us"; 7116 break; 7117 case BuiltinType::Short: 7118 EncType = "ss"; 7119 break; 7120 case BuiltinType::UInt: 7121 EncType = "ui"; 7122 break; 7123 case BuiltinType::Int: 7124 EncType = "si"; 7125 break; 7126 case BuiltinType::ULong: 7127 EncType = "ul"; 7128 break; 7129 case BuiltinType::Long: 7130 EncType = "sl"; 7131 break; 7132 case BuiltinType::ULongLong: 7133 EncType = "ull"; 7134 break; 7135 case BuiltinType::LongLong: 7136 EncType = "sll"; 7137 break; 7138 case BuiltinType::Float: 7139 EncType = "ft"; 7140 break; 7141 case BuiltinType::Double: 7142 EncType = "d"; 7143 break; 7144 case BuiltinType::LongDouble: 7145 EncType = "ld"; 7146 break; 7147 default: 7148 return false; 7149 } 7150 Enc += EncType; 7151 return true; 7152 } 7153 7154 /// Appends a pointer encoding to Enc before calling appendType for the pointee. 7155 static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT, 7156 const CodeGen::CodeGenModule &CGM, 7157 TypeStringCache &TSC) { 7158 Enc += "p("; 7159 if (!appendType(Enc, PT->getPointeeType(), CGM, TSC)) 7160 return false; 7161 Enc += ')'; 7162 return true; 7163 } 7164 7165 /// Appends array encoding to Enc before calling appendType for the element. 7166 static bool appendArrayType(SmallStringEnc &Enc, QualType QT, 7167 const ArrayType *AT, 7168 const CodeGen::CodeGenModule &CGM, 7169 TypeStringCache &TSC, StringRef NoSizeEnc) { 7170 if (AT->getSizeModifier() != ArrayType::Normal) 7171 return false; 7172 Enc += "a("; 7173 if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT)) 7174 CAT->getSize().toStringUnsigned(Enc); 7175 else 7176 Enc += NoSizeEnc; // Global arrays use "*", otherwise it is "". 7177 Enc += ':'; 7178 // The Qualifiers should be attached to the type rather than the array. 7179 appendQualifier(Enc, QT); 7180 if (!appendType(Enc, AT->getElementType(), CGM, TSC)) 7181 return false; 7182 Enc += ')'; 7183 return true; 7184 } 7185 7186 /// Appends a function encoding to Enc, calling appendType for the return type 7187 /// and the arguments. 7188 static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT, 7189 const CodeGen::CodeGenModule &CGM, 7190 TypeStringCache &TSC) { 7191 Enc += "f{"; 7192 if (!appendType(Enc, FT->getReturnType(), CGM, TSC)) 7193 return false; 7194 Enc += "}("; 7195 if (const FunctionProtoType *FPT = FT->getAs<FunctionProtoType>()) { 7196 // N.B. we are only interested in the adjusted param types. 7197 auto I = FPT->param_type_begin(); 7198 auto E = FPT->param_type_end(); 7199 if (I != E) { 7200 do { 7201 if (!appendType(Enc, *I, CGM, TSC)) 7202 return false; 7203 ++I; 7204 if (I != E) 7205 Enc += ','; 7206 } while (I != E); 7207 if (FPT->isVariadic()) 7208 Enc += ",va"; 7209 } else { 7210 if (FPT->isVariadic()) 7211 Enc += "va"; 7212 else 7213 Enc += '0'; 7214 } 7215 } 7216 Enc += ')'; 7217 return true; 7218 } 7219 7220 /// Handles the type's qualifier before dispatching a call to handle specific 7221 /// type encodings. 7222 static bool appendType(SmallStringEnc &Enc, QualType QType, 7223 const CodeGen::CodeGenModule &CGM, 7224 TypeStringCache &TSC) { 7225 7226 QualType QT = QType.getCanonicalType(); 7227 7228 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) 7229 // The Qualifiers should be attached to the type rather than the array. 7230 // Thus we don't call appendQualifier() here. 7231 return appendArrayType(Enc, QT, AT, CGM, TSC, ""); 7232 7233 appendQualifier(Enc, QT); 7234 7235 if (const BuiltinType *BT = QT->getAs<BuiltinType>()) 7236 return appendBuiltinType(Enc, BT); 7237 7238 if (const PointerType *PT = QT->getAs<PointerType>()) 7239 return appendPointerType(Enc, PT, CGM, TSC); 7240 7241 if (const EnumType *ET = QT->getAs<EnumType>()) 7242 return appendEnumType(Enc, ET, TSC, QT.getBaseTypeIdentifier()); 7243 7244 if (const RecordType *RT = QT->getAsStructureType()) 7245 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier()); 7246 7247 if (const RecordType *RT = QT->getAsUnionType()) 7248 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier()); 7249 7250 if (const FunctionType *FT = QT->getAs<FunctionType>()) 7251 return appendFunctionType(Enc, FT, CGM, TSC); 7252 7253 return false; 7254 } 7255 7256 static bool getTypeString(SmallStringEnc &Enc, const Decl *D, 7257 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC) { 7258 if (!D) 7259 return false; 7260 7261 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 7262 if (FD->getLanguageLinkage() != CLanguageLinkage) 7263 return false; 7264 return appendType(Enc, FD->getType(), CGM, TSC); 7265 } 7266 7267 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { 7268 if (VD->getLanguageLinkage() != CLanguageLinkage) 7269 return false; 7270 QualType QT = VD->getType().getCanonicalType(); 7271 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) { 7272 // Global ArrayTypes are given a size of '*' if the size is unknown. 7273 // The Qualifiers should be attached to the type rather than the array. 7274 // Thus we don't call appendQualifier() here. 7275 return appendArrayType(Enc, QT, AT, CGM, TSC, "*"); 7276 } 7277 return appendType(Enc, QT, CGM, TSC); 7278 } 7279 return false; 7280 } 7281 7282 7283 //===----------------------------------------------------------------------===// 7284 // Driver code 7285 //===----------------------------------------------------------------------===// 7286 7287 const llvm::Triple &CodeGenModule::getTriple() const { 7288 return getTarget().getTriple(); 7289 } 7290 7291 bool CodeGenModule::supportsCOMDAT() const { 7292 return !getTriple().isOSBinFormatMachO(); 7293 } 7294 7295 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() { 7296 if (TheTargetCodeGenInfo) 7297 return *TheTargetCodeGenInfo; 7298 7299 const llvm::Triple &Triple = getTarget().getTriple(); 7300 switch (Triple.getArch()) { 7301 default: 7302 return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types)); 7303 7304 case llvm::Triple::le32: 7305 return *(TheTargetCodeGenInfo = new PNaClTargetCodeGenInfo(Types)); 7306 case llvm::Triple::mips: 7307 case llvm::Triple::mipsel: 7308 if (Triple.getOS() == llvm::Triple::NaCl) 7309 return *(TheTargetCodeGenInfo = new PNaClTargetCodeGenInfo(Types)); 7310 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, true)); 7311 7312 case llvm::Triple::mips64: 7313 case llvm::Triple::mips64el: 7314 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, false)); 7315 7316 case llvm::Triple::aarch64: 7317 case llvm::Triple::aarch64_be: { 7318 AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS; 7319 if (getTarget().getABI() == "darwinpcs") 7320 Kind = AArch64ABIInfo::DarwinPCS; 7321 7322 return *(TheTargetCodeGenInfo = new AArch64TargetCodeGenInfo(Types, Kind)); 7323 } 7324 7325 case llvm::Triple::wasm32: 7326 case llvm::Triple::wasm64: 7327 return *(TheTargetCodeGenInfo = new WebAssemblyTargetCodeGenInfo(Types)); 7328 7329 case llvm::Triple::arm: 7330 case llvm::Triple::armeb: 7331 case llvm::Triple::thumb: 7332 case llvm::Triple::thumbeb: 7333 { 7334 if (Triple.getOS() == llvm::Triple::Win32) { 7335 TheTargetCodeGenInfo = 7336 new WindowsARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS_VFP); 7337 return *TheTargetCodeGenInfo; 7338 } 7339 7340 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS; 7341 if (getTarget().getABI() == "apcs-gnu") 7342 Kind = ARMABIInfo::APCS; 7343 else if (CodeGenOpts.FloatABI == "hard" || 7344 (CodeGenOpts.FloatABI != "soft" && 7345 Triple.getEnvironment() == llvm::Triple::GNUEABIHF)) 7346 Kind = ARMABIInfo::AAPCS_VFP; 7347 7348 return *(TheTargetCodeGenInfo = new ARMTargetCodeGenInfo(Types, Kind)); 7349 } 7350 7351 case llvm::Triple::ppc: 7352 return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types)); 7353 case llvm::Triple::ppc64: 7354 if (Triple.isOSBinFormatELF()) { 7355 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1; 7356 if (getTarget().getABI() == "elfv2") 7357 Kind = PPC64_SVR4_ABIInfo::ELFv2; 7358 bool HasQPX = getTarget().getABI() == "elfv1-qpx"; 7359 7360 return *(TheTargetCodeGenInfo = 7361 new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX)); 7362 } else 7363 return *(TheTargetCodeGenInfo = new PPC64TargetCodeGenInfo(Types)); 7364 case llvm::Triple::ppc64le: { 7365 assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!"); 7366 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2; 7367 if (getTarget().getABI() == "elfv1" || getTarget().getABI() == "elfv1-qpx") 7368 Kind = PPC64_SVR4_ABIInfo::ELFv1; 7369 bool HasQPX = getTarget().getABI() == "elfv1-qpx"; 7370 7371 return *(TheTargetCodeGenInfo = 7372 new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX)); 7373 } 7374 7375 case llvm::Triple::nvptx: 7376 case llvm::Triple::nvptx64: 7377 return *(TheTargetCodeGenInfo = new NVPTXTargetCodeGenInfo(Types)); 7378 7379 case llvm::Triple::msp430: 7380 return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types)); 7381 7382 case llvm::Triple::systemz: { 7383 bool HasVector = getTarget().getABI() == "vector"; 7384 return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo(Types, 7385 HasVector)); 7386 } 7387 7388 case llvm::Triple::tce: 7389 return *(TheTargetCodeGenInfo = new TCETargetCodeGenInfo(Types)); 7390 7391 case llvm::Triple::x86: { 7392 bool IsDarwinVectorABI = Triple.isOSDarwin(); 7393 bool RetSmallStructInRegABI = 7394 X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts); 7395 bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing(); 7396 7397 if (Triple.getOS() == llvm::Triple::Win32) { 7398 return *(TheTargetCodeGenInfo = new WinX86_32TargetCodeGenInfo( 7399 Types, IsDarwinVectorABI, RetSmallStructInRegABI, 7400 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters)); 7401 } else { 7402 return *(TheTargetCodeGenInfo = new X86_32TargetCodeGenInfo( 7403 Types, IsDarwinVectorABI, RetSmallStructInRegABI, 7404 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters, 7405 CodeGenOpts.FloatABI == "soft")); 7406 } 7407 } 7408 7409 case llvm::Triple::x86_64: { 7410 StringRef ABI = getTarget().getABI(); 7411 X86AVXABILevel AVXLevel = (ABI == "avx512" ? X86AVXABILevel::AVX512 : 7412 ABI == "avx" ? X86AVXABILevel::AVX : 7413 X86AVXABILevel::None); 7414 7415 switch (Triple.getOS()) { 7416 case llvm::Triple::Win32: 7417 return *(TheTargetCodeGenInfo = 7418 new WinX86_64TargetCodeGenInfo(Types, AVXLevel)); 7419 case llvm::Triple::PS4: 7420 return *(TheTargetCodeGenInfo = 7421 new PS4TargetCodeGenInfo(Types, AVXLevel)); 7422 default: 7423 return *(TheTargetCodeGenInfo = 7424 new X86_64TargetCodeGenInfo(Types, AVXLevel)); 7425 } 7426 } 7427 case llvm::Triple::hexagon: 7428 return *(TheTargetCodeGenInfo = new HexagonTargetCodeGenInfo(Types)); 7429 case llvm::Triple::r600: 7430 return *(TheTargetCodeGenInfo = new AMDGPUTargetCodeGenInfo(Types)); 7431 case llvm::Triple::amdgcn: 7432 return *(TheTargetCodeGenInfo = new AMDGPUTargetCodeGenInfo(Types)); 7433 case llvm::Triple::sparcv9: 7434 return *(TheTargetCodeGenInfo = new SparcV9TargetCodeGenInfo(Types)); 7435 case llvm::Triple::xcore: 7436 return *(TheTargetCodeGenInfo = new XCoreTargetCodeGenInfo(Types)); 7437 } 7438 } 7439