1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // These classes wrap the information about a call or function 10 // definition used to handle ABI compliancy. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "TargetInfo.h" 15 #include "ABIInfo.h" 16 #include "CGBlocks.h" 17 #include "CGCXXABI.h" 18 #include "CGValue.h" 19 #include "CodeGenFunction.h" 20 #include "clang/AST/Attr.h" 21 #include "clang/AST/RecordLayout.h" 22 #include "clang/Basic/CodeGenOptions.h" 23 #include "clang/Basic/DiagnosticFrontend.h" 24 #include "clang/CodeGen/CGFunctionInfo.h" 25 #include "clang/CodeGen/SwiftCallingConv.h" 26 #include "llvm/ADT/SmallBitVector.h" 27 #include "llvm/ADT/StringExtras.h" 28 #include "llvm/ADT/StringSwitch.h" 29 #include "llvm/ADT/Triple.h" 30 #include "llvm/ADT/Twine.h" 31 #include "llvm/IR/DataLayout.h" 32 #include "llvm/IR/IntrinsicsNVPTX.h" 33 #include "llvm/IR/Type.h" 34 #include "llvm/Support/raw_ostream.h" 35 #include <algorithm> // std::sort 36 37 using namespace clang; 38 using namespace CodeGen; 39 40 // Helper for coercing an aggregate argument or return value into an integer 41 // array of the same size (including padding) and alignment. This alternate 42 // coercion happens only for the RenderScript ABI and can be removed after 43 // runtimes that rely on it are no longer supported. 44 // 45 // RenderScript assumes that the size of the argument / return value in the IR 46 // is the same as the size of the corresponding qualified type. This helper 47 // coerces the aggregate type into an array of the same size (including 48 // padding). This coercion is used in lieu of expansion of struct members or 49 // other canonical coercions that return a coerced-type of larger size. 50 // 51 // Ty - The argument / return value type 52 // Context - The associated ASTContext 53 // LLVMContext - The associated LLVMContext 54 static ABIArgInfo coerceToIntArray(QualType Ty, 55 ASTContext &Context, 56 llvm::LLVMContext &LLVMContext) { 57 // Alignment and Size are measured in bits. 58 const uint64_t Size = Context.getTypeSize(Ty); 59 const uint64_t Alignment = Context.getTypeAlign(Ty); 60 llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Alignment); 61 const uint64_t NumElements = (Size + Alignment - 1) / Alignment; 62 return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements)); 63 } 64 65 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, 66 llvm::Value *Array, 67 llvm::Value *Value, 68 unsigned FirstIndex, 69 unsigned LastIndex) { 70 // Alternatively, we could emit this as a loop in the source. 71 for (unsigned I = FirstIndex; I <= LastIndex; ++I) { 72 llvm::Value *Cell = 73 Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I); 74 Builder.CreateAlignedStore(Value, Cell, CharUnits::One()); 75 } 76 } 77 78 static bool isAggregateTypeForABI(QualType T) { 79 return !CodeGenFunction::hasScalarEvaluationKind(T) || 80 T->isMemberFunctionPointerType(); 81 } 82 83 ABIArgInfo ABIInfo::getNaturalAlignIndirect(QualType Ty, bool ByVal, 84 bool Realign, 85 llvm::Type *Padding) const { 86 return ABIArgInfo::getIndirect(getContext().getTypeAlignInChars(Ty), ByVal, 87 Realign, Padding); 88 } 89 90 ABIArgInfo 91 ABIInfo::getNaturalAlignIndirectInReg(QualType Ty, bool Realign) const { 92 return ABIArgInfo::getIndirectInReg(getContext().getTypeAlignInChars(Ty), 93 /*ByVal*/ false, Realign); 94 } 95 96 Address ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, 97 QualType Ty) const { 98 return Address::invalid(); 99 } 100 101 bool ABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const { 102 if (Ty->isPromotableIntegerType()) 103 return true; 104 105 if (const auto *EIT = Ty->getAs<ExtIntType>()) 106 if (EIT->getNumBits() < getContext().getTypeSize(getContext().IntTy)) 107 return true; 108 109 return false; 110 } 111 112 ABIInfo::~ABIInfo() {} 113 114 /// Does the given lowering require more than the given number of 115 /// registers when expanded? 116 /// 117 /// This is intended to be the basis of a reasonable basic implementation 118 /// of should{Pass,Return}IndirectlyForSwift. 119 /// 120 /// For most targets, a limit of four total registers is reasonable; this 121 /// limits the amount of code required in order to move around the value 122 /// in case it wasn't produced immediately prior to the call by the caller 123 /// (or wasn't produced in exactly the right registers) or isn't used 124 /// immediately within the callee. But some targets may need to further 125 /// limit the register count due to an inability to support that many 126 /// return registers. 127 static bool occupiesMoreThan(CodeGenTypes &cgt, 128 ArrayRef<llvm::Type*> scalarTypes, 129 unsigned maxAllRegisters) { 130 unsigned intCount = 0, fpCount = 0; 131 for (llvm::Type *type : scalarTypes) { 132 if (type->isPointerTy()) { 133 intCount++; 134 } else if (auto intTy = dyn_cast<llvm::IntegerType>(type)) { 135 auto ptrWidth = cgt.getTarget().getPointerWidth(0); 136 intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth; 137 } else { 138 assert(type->isVectorTy() || type->isFloatingPointTy()); 139 fpCount++; 140 } 141 } 142 143 return (intCount + fpCount > maxAllRegisters); 144 } 145 146 bool SwiftABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize, 147 llvm::Type *eltTy, 148 unsigned numElts) const { 149 // The default implementation of this assumes that the target guarantees 150 // 128-bit SIMD support but nothing more. 151 return (vectorSize.getQuantity() > 8 && vectorSize.getQuantity() <= 16); 152 } 153 154 static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, 155 CGCXXABI &CXXABI) { 156 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 157 if (!RD) { 158 if (!RT->getDecl()->canPassInRegisters()) 159 return CGCXXABI::RAA_Indirect; 160 return CGCXXABI::RAA_Default; 161 } 162 return CXXABI.getRecordArgABI(RD); 163 } 164 165 static CGCXXABI::RecordArgABI getRecordArgABI(QualType T, 166 CGCXXABI &CXXABI) { 167 const RecordType *RT = T->getAs<RecordType>(); 168 if (!RT) 169 return CGCXXABI::RAA_Default; 170 return getRecordArgABI(RT, CXXABI); 171 } 172 173 static bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, 174 const ABIInfo &Info) { 175 QualType Ty = FI.getReturnType(); 176 177 if (const auto *RT = Ty->getAs<RecordType>()) 178 if (!isa<CXXRecordDecl>(RT->getDecl()) && 179 !RT->getDecl()->canPassInRegisters()) { 180 FI.getReturnInfo() = Info.getNaturalAlignIndirect(Ty); 181 return true; 182 } 183 184 return CXXABI.classifyReturnType(FI); 185 } 186 187 /// Pass transparent unions as if they were the type of the first element. Sema 188 /// should ensure that all elements of the union have the same "machine type". 189 static QualType useFirstFieldIfTransparentUnion(QualType Ty) { 190 if (const RecordType *UT = Ty->getAsUnionType()) { 191 const RecordDecl *UD = UT->getDecl(); 192 if (UD->hasAttr<TransparentUnionAttr>()) { 193 assert(!UD->field_empty() && "sema created an empty transparent union"); 194 return UD->field_begin()->getType(); 195 } 196 } 197 return Ty; 198 } 199 200 CGCXXABI &ABIInfo::getCXXABI() const { 201 return CGT.getCXXABI(); 202 } 203 204 ASTContext &ABIInfo::getContext() const { 205 return CGT.getContext(); 206 } 207 208 llvm::LLVMContext &ABIInfo::getVMContext() const { 209 return CGT.getLLVMContext(); 210 } 211 212 const llvm::DataLayout &ABIInfo::getDataLayout() const { 213 return CGT.getDataLayout(); 214 } 215 216 const TargetInfo &ABIInfo::getTarget() const { 217 return CGT.getTarget(); 218 } 219 220 const CodeGenOptions &ABIInfo::getCodeGenOpts() const { 221 return CGT.getCodeGenOpts(); 222 } 223 224 bool ABIInfo::isAndroid() const { return getTarget().getTriple().isAndroid(); } 225 226 bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 227 return false; 228 } 229 230 bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, 231 uint64_t Members) const { 232 return false; 233 } 234 235 LLVM_DUMP_METHOD void ABIArgInfo::dump() const { 236 raw_ostream &OS = llvm::errs(); 237 OS << "(ABIArgInfo Kind="; 238 switch (TheKind) { 239 case Direct: 240 OS << "Direct Type="; 241 if (llvm::Type *Ty = getCoerceToType()) 242 Ty->print(OS); 243 else 244 OS << "null"; 245 break; 246 case Extend: 247 OS << "Extend"; 248 break; 249 case Ignore: 250 OS << "Ignore"; 251 break; 252 case InAlloca: 253 OS << "InAlloca Offset=" << getInAllocaFieldIndex(); 254 break; 255 case Indirect: 256 OS << "Indirect Align=" << getIndirectAlign().getQuantity() 257 << " ByVal=" << getIndirectByVal() 258 << " Realign=" << getIndirectRealign(); 259 break; 260 case IndirectAliased: 261 OS << "Indirect Align=" << getIndirectAlign().getQuantity() 262 << " AadrSpace=" << getIndirectAddrSpace() 263 << " Realign=" << getIndirectRealign(); 264 break; 265 case Expand: 266 OS << "Expand"; 267 break; 268 case CoerceAndExpand: 269 OS << "CoerceAndExpand Type="; 270 getCoerceAndExpandType()->print(OS); 271 break; 272 } 273 OS << ")\n"; 274 } 275 276 // Dynamically round a pointer up to a multiple of the given alignment. 277 static llvm::Value *emitRoundPointerUpToAlignment(CodeGenFunction &CGF, 278 llvm::Value *Ptr, 279 CharUnits Align) { 280 llvm::Value *PtrAsInt = Ptr; 281 // OverflowArgArea = (OverflowArgArea + Align - 1) & -Align; 282 PtrAsInt = CGF.Builder.CreatePtrToInt(PtrAsInt, CGF.IntPtrTy); 283 PtrAsInt = CGF.Builder.CreateAdd(PtrAsInt, 284 llvm::ConstantInt::get(CGF.IntPtrTy, Align.getQuantity() - 1)); 285 PtrAsInt = CGF.Builder.CreateAnd(PtrAsInt, 286 llvm::ConstantInt::get(CGF.IntPtrTy, -Align.getQuantity())); 287 PtrAsInt = CGF.Builder.CreateIntToPtr(PtrAsInt, 288 Ptr->getType(), 289 Ptr->getName() + ".aligned"); 290 return PtrAsInt; 291 } 292 293 /// Emit va_arg for a platform using the common void* representation, 294 /// where arguments are simply emitted in an array of slots on the stack. 295 /// 296 /// This version implements the core direct-value passing rules. 297 /// 298 /// \param SlotSize - The size and alignment of a stack slot. 299 /// Each argument will be allocated to a multiple of this number of 300 /// slots, and all the slots will be aligned to this value. 301 /// \param AllowHigherAlign - The slot alignment is not a cap; 302 /// an argument type with an alignment greater than the slot size 303 /// will be emitted on a higher-alignment address, potentially 304 /// leaving one or more empty slots behind as padding. If this 305 /// is false, the returned address might be less-aligned than 306 /// DirectAlign. 307 static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF, 308 Address VAListAddr, 309 llvm::Type *DirectTy, 310 CharUnits DirectSize, 311 CharUnits DirectAlign, 312 CharUnits SlotSize, 313 bool AllowHigherAlign) { 314 // Cast the element type to i8* if necessary. Some platforms define 315 // va_list as a struct containing an i8* instead of just an i8*. 316 if (VAListAddr.getElementType() != CGF.Int8PtrTy) 317 VAListAddr = CGF.Builder.CreateElementBitCast(VAListAddr, CGF.Int8PtrTy); 318 319 llvm::Value *Ptr = CGF.Builder.CreateLoad(VAListAddr, "argp.cur"); 320 321 // If the CC aligns values higher than the slot size, do so if needed. 322 Address Addr = Address::invalid(); 323 if (AllowHigherAlign && DirectAlign > SlotSize) { 324 Addr = Address(emitRoundPointerUpToAlignment(CGF, Ptr, DirectAlign), 325 DirectAlign); 326 } else { 327 Addr = Address(Ptr, SlotSize); 328 } 329 330 // Advance the pointer past the argument, then store that back. 331 CharUnits FullDirectSize = DirectSize.alignTo(SlotSize); 332 Address NextPtr = 333 CGF.Builder.CreateConstInBoundsByteGEP(Addr, FullDirectSize, "argp.next"); 334 CGF.Builder.CreateStore(NextPtr.getPointer(), VAListAddr); 335 336 // If the argument is smaller than a slot, and this is a big-endian 337 // target, the argument will be right-adjusted in its slot. 338 if (DirectSize < SlotSize && CGF.CGM.getDataLayout().isBigEndian() && 339 !DirectTy->isStructTy()) { 340 Addr = CGF.Builder.CreateConstInBoundsByteGEP(Addr, SlotSize - DirectSize); 341 } 342 343 Addr = CGF.Builder.CreateElementBitCast(Addr, DirectTy); 344 return Addr; 345 } 346 347 /// Emit va_arg for a platform using the common void* representation, 348 /// where arguments are simply emitted in an array of slots on the stack. 349 /// 350 /// \param IsIndirect - Values of this type are passed indirectly. 351 /// \param ValueInfo - The size and alignment of this type, generally 352 /// computed with getContext().getTypeInfoInChars(ValueTy). 353 /// \param SlotSizeAndAlign - The size and alignment of a stack slot. 354 /// Each argument will be allocated to a multiple of this number of 355 /// slots, and all the slots will be aligned to this value. 356 /// \param AllowHigherAlign - The slot alignment is not a cap; 357 /// an argument type with an alignment greater than the slot size 358 /// will be emitted on a higher-alignment address, potentially 359 /// leaving one or more empty slots behind as padding. 360 static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, 361 QualType ValueTy, bool IsIndirect, 362 std::pair<CharUnits, CharUnits> ValueInfo, 363 CharUnits SlotSizeAndAlign, 364 bool AllowHigherAlign) { 365 // The size and alignment of the value that was passed directly. 366 CharUnits DirectSize, DirectAlign; 367 if (IsIndirect) { 368 DirectSize = CGF.getPointerSize(); 369 DirectAlign = CGF.getPointerAlign(); 370 } else { 371 DirectSize = ValueInfo.first; 372 DirectAlign = ValueInfo.second; 373 } 374 375 // Cast the address we've calculated to the right type. 376 llvm::Type *DirectTy = CGF.ConvertTypeForMem(ValueTy); 377 if (IsIndirect) 378 DirectTy = DirectTy->getPointerTo(0); 379 380 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, DirectTy, 381 DirectSize, DirectAlign, 382 SlotSizeAndAlign, 383 AllowHigherAlign); 384 385 if (IsIndirect) { 386 Addr = Address(CGF.Builder.CreateLoad(Addr), ValueInfo.second); 387 } 388 389 return Addr; 390 391 } 392 393 static Address emitMergePHI(CodeGenFunction &CGF, 394 Address Addr1, llvm::BasicBlock *Block1, 395 Address Addr2, llvm::BasicBlock *Block2, 396 const llvm::Twine &Name = "") { 397 assert(Addr1.getType() == Addr2.getType()); 398 llvm::PHINode *PHI = CGF.Builder.CreatePHI(Addr1.getType(), 2, Name); 399 PHI->addIncoming(Addr1.getPointer(), Block1); 400 PHI->addIncoming(Addr2.getPointer(), Block2); 401 CharUnits Align = std::min(Addr1.getAlignment(), Addr2.getAlignment()); 402 return Address(PHI, Align); 403 } 404 405 TargetCodeGenInfo::~TargetCodeGenInfo() = default; 406 407 // If someone can figure out a general rule for this, that would be great. 408 // It's probably just doomed to be platform-dependent, though. 409 unsigned TargetCodeGenInfo::getSizeOfUnwindException() const { 410 // Verified for: 411 // x86-64 FreeBSD, Linux, Darwin 412 // x86-32 FreeBSD, Linux, Darwin 413 // PowerPC Linux, Darwin 414 // ARM Darwin (*not* EABI) 415 // AArch64 Linux 416 return 32; 417 } 418 419 bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args, 420 const FunctionNoProtoType *fnType) const { 421 // The following conventions are known to require this to be false: 422 // x86_stdcall 423 // MIPS 424 // For everything else, we just prefer false unless we opt out. 425 return false; 426 } 427 428 void 429 TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib, 430 llvm::SmallString<24> &Opt) const { 431 // This assumes the user is passing a library name like "rt" instead of a 432 // filename like "librt.a/so", and that they don't care whether it's static or 433 // dynamic. 434 Opt = "-l"; 435 Opt += Lib; 436 } 437 438 unsigned TargetCodeGenInfo::getOpenCLKernelCallingConv() const { 439 // OpenCL kernels are called via an explicit runtime API with arguments 440 // set with clSetKernelArg(), not as normal sub-functions. 441 // Return SPIR_KERNEL by default as the kernel calling convention to 442 // ensure the fingerprint is fixed such way that each OpenCL argument 443 // gets one matching argument in the produced kernel function argument 444 // list to enable feasible implementation of clSetKernelArg() with 445 // aggregates etc. In case we would use the default C calling conv here, 446 // clSetKernelArg() might break depending on the target-specific 447 // conventions; different targets might split structs passed as values 448 // to multiple function arguments etc. 449 return llvm::CallingConv::SPIR_KERNEL; 450 } 451 452 llvm::Constant *TargetCodeGenInfo::getNullPointer(const CodeGen::CodeGenModule &CGM, 453 llvm::PointerType *T, QualType QT) const { 454 return llvm::ConstantPointerNull::get(T); 455 } 456 457 LangAS TargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM, 458 const VarDecl *D) const { 459 assert(!CGM.getLangOpts().OpenCL && 460 !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) && 461 "Address space agnostic languages only"); 462 return D ? D->getType().getAddressSpace() : LangAS::Default; 463 } 464 465 llvm::Value *TargetCodeGenInfo::performAddrSpaceCast( 466 CodeGen::CodeGenFunction &CGF, llvm::Value *Src, LangAS SrcAddr, 467 LangAS DestAddr, llvm::Type *DestTy, bool isNonNull) const { 468 // Since target may map different address spaces in AST to the same address 469 // space, an address space conversion may end up as a bitcast. 470 if (auto *C = dyn_cast<llvm::Constant>(Src)) 471 return performAddrSpaceCast(CGF.CGM, C, SrcAddr, DestAddr, DestTy); 472 // Try to preserve the source's name to make IR more readable. 473 return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 474 Src, DestTy, Src->hasName() ? Src->getName() + ".ascast" : ""); 475 } 476 477 llvm::Constant * 478 TargetCodeGenInfo::performAddrSpaceCast(CodeGenModule &CGM, llvm::Constant *Src, 479 LangAS SrcAddr, LangAS DestAddr, 480 llvm::Type *DestTy) const { 481 // Since target may map different address spaces in AST to the same address 482 // space, an address space conversion may end up as a bitcast. 483 return llvm::ConstantExpr::getPointerCast(Src, DestTy); 484 } 485 486 llvm::SyncScope::ID 487 TargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts, 488 SyncScope Scope, 489 llvm::AtomicOrdering Ordering, 490 llvm::LLVMContext &Ctx) const { 491 return Ctx.getOrInsertSyncScopeID(""); /* default sync scope */ 492 } 493 494 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays); 495 496 /// isEmptyField - Return true iff a the field is "empty", that is it 497 /// is an unnamed bit-field or an (array of) empty record(s). 498 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD, 499 bool AllowArrays) { 500 if (FD->isUnnamedBitfield()) 501 return true; 502 503 QualType FT = FD->getType(); 504 505 // Constant arrays of empty records count as empty, strip them off. 506 // Constant arrays of zero length always count as empty. 507 bool WasArray = false; 508 if (AllowArrays) 509 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 510 if (AT->getSize() == 0) 511 return true; 512 FT = AT->getElementType(); 513 // The [[no_unique_address]] special case below does not apply to 514 // arrays of C++ empty records, so we need to remember this fact. 515 WasArray = true; 516 } 517 518 const RecordType *RT = FT->getAs<RecordType>(); 519 if (!RT) 520 return false; 521 522 // C++ record fields are never empty, at least in the Itanium ABI. 523 // 524 // FIXME: We should use a predicate for whether this behavior is true in the 525 // current ABI. 526 // 527 // The exception to the above rule are fields marked with the 528 // [[no_unique_address]] attribute (since C++20). Those do count as empty 529 // according to the Itanium ABI. The exception applies only to records, 530 // not arrays of records, so we must also check whether we stripped off an 531 // array type above. 532 if (isa<CXXRecordDecl>(RT->getDecl()) && 533 (WasArray || !FD->hasAttr<NoUniqueAddressAttr>())) 534 return false; 535 536 return isEmptyRecord(Context, FT, AllowArrays); 537 } 538 539 /// isEmptyRecord - Return true iff a structure contains only empty 540 /// fields. Note that a structure with a flexible array member is not 541 /// considered empty. 542 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) { 543 const RecordType *RT = T->getAs<RecordType>(); 544 if (!RT) 545 return false; 546 const RecordDecl *RD = RT->getDecl(); 547 if (RD->hasFlexibleArrayMember()) 548 return false; 549 550 // If this is a C++ record, check the bases first. 551 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 552 for (const auto &I : CXXRD->bases()) 553 if (!isEmptyRecord(Context, I.getType(), true)) 554 return false; 555 556 for (const auto *I : RD->fields()) 557 if (!isEmptyField(Context, I, AllowArrays)) 558 return false; 559 return true; 560 } 561 562 /// isSingleElementStruct - Determine if a structure is a "single 563 /// element struct", i.e. it has exactly one non-empty field or 564 /// exactly one field which is itself a single element 565 /// struct. Structures with flexible array members are never 566 /// considered single element structs. 567 /// 568 /// \return The field declaration for the single non-empty field, if 569 /// it exists. 570 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) { 571 const RecordType *RT = T->getAs<RecordType>(); 572 if (!RT) 573 return nullptr; 574 575 const RecordDecl *RD = RT->getDecl(); 576 if (RD->hasFlexibleArrayMember()) 577 return nullptr; 578 579 const Type *Found = nullptr; 580 581 // If this is a C++ record, check the bases first. 582 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 583 for (const auto &I : CXXRD->bases()) { 584 // Ignore empty records. 585 if (isEmptyRecord(Context, I.getType(), true)) 586 continue; 587 588 // If we already found an element then this isn't a single-element struct. 589 if (Found) 590 return nullptr; 591 592 // If this is non-empty and not a single element struct, the composite 593 // cannot be a single element struct. 594 Found = isSingleElementStruct(I.getType(), Context); 595 if (!Found) 596 return nullptr; 597 } 598 } 599 600 // Check for single element. 601 for (const auto *FD : RD->fields()) { 602 QualType FT = FD->getType(); 603 604 // Ignore empty fields. 605 if (isEmptyField(Context, FD, true)) 606 continue; 607 608 // If we already found an element then this isn't a single-element 609 // struct. 610 if (Found) 611 return nullptr; 612 613 // Treat single element arrays as the element. 614 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 615 if (AT->getSize().getZExtValue() != 1) 616 break; 617 FT = AT->getElementType(); 618 } 619 620 if (!isAggregateTypeForABI(FT)) { 621 Found = FT.getTypePtr(); 622 } else { 623 Found = isSingleElementStruct(FT, Context); 624 if (!Found) 625 return nullptr; 626 } 627 } 628 629 // We don't consider a struct a single-element struct if it has 630 // padding beyond the element type. 631 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T)) 632 return nullptr; 633 634 return Found; 635 } 636 637 namespace { 638 Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty, 639 const ABIArgInfo &AI) { 640 // This default implementation defers to the llvm backend's va_arg 641 // instruction. It can handle only passing arguments directly 642 // (typically only handled in the backend for primitive types), or 643 // aggregates passed indirectly by pointer (NOTE: if the "byval" 644 // flag has ABI impact in the callee, this implementation cannot 645 // work.) 646 647 // Only a few cases are covered here at the moment -- those needed 648 // by the default abi. 649 llvm::Value *Val; 650 651 if (AI.isIndirect()) { 652 assert(!AI.getPaddingType() && 653 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!"); 654 assert( 655 !AI.getIndirectRealign() && 656 "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!"); 657 658 auto TyInfo = CGF.getContext().getTypeInfoInChars(Ty); 659 CharUnits TyAlignForABI = TyInfo.second; 660 661 llvm::Type *BaseTy = 662 llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty)); 663 llvm::Value *Addr = 664 CGF.Builder.CreateVAArg(VAListAddr.getPointer(), BaseTy); 665 return Address(Addr, TyAlignForABI); 666 } else { 667 assert((AI.isDirect() || AI.isExtend()) && 668 "Unexpected ArgInfo Kind in generic VAArg emitter!"); 669 670 assert(!AI.getInReg() && 671 "Unexpected InReg seen in arginfo in generic VAArg emitter!"); 672 assert(!AI.getPaddingType() && 673 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!"); 674 assert(!AI.getDirectOffset() && 675 "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!"); 676 assert(!AI.getCoerceToType() && 677 "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!"); 678 679 Address Temp = CGF.CreateMemTemp(Ty, "varet"); 680 Val = CGF.Builder.CreateVAArg(VAListAddr.getPointer(), CGF.ConvertType(Ty)); 681 CGF.Builder.CreateStore(Val, Temp); 682 return Temp; 683 } 684 } 685 686 /// DefaultABIInfo - The default implementation for ABI specific 687 /// details. This implementation provides information which results in 688 /// self-consistent and sensible LLVM IR generation, but does not 689 /// conform to any particular ABI. 690 class DefaultABIInfo : public ABIInfo { 691 public: 692 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 693 694 ABIArgInfo classifyReturnType(QualType RetTy) const; 695 ABIArgInfo classifyArgumentType(QualType RetTy) const; 696 697 void computeInfo(CGFunctionInfo &FI) const override { 698 if (!getCXXABI().classifyReturnType(FI)) 699 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 700 for (auto &I : FI.arguments()) 701 I.info = classifyArgumentType(I.type); 702 } 703 704 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 705 QualType Ty) const override { 706 return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty)); 707 } 708 }; 709 710 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo { 711 public: 712 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 713 : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {} 714 }; 715 716 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const { 717 Ty = useFirstFieldIfTransparentUnion(Ty); 718 719 if (isAggregateTypeForABI(Ty)) { 720 // Records with non-trivial destructors/copy-constructors should not be 721 // passed by value. 722 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 723 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 724 725 return getNaturalAlignIndirect(Ty); 726 } 727 728 // Treat an enum type as its underlying type. 729 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 730 Ty = EnumTy->getDecl()->getIntegerType(); 731 732 ASTContext &Context = getContext(); 733 if (const auto *EIT = Ty->getAs<ExtIntType>()) 734 if (EIT->getNumBits() > 735 Context.getTypeSize(Context.getTargetInfo().hasInt128Type() 736 ? Context.Int128Ty 737 : Context.LongLongTy)) 738 return getNaturalAlignIndirect(Ty); 739 740 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) 741 : ABIArgInfo::getDirect()); 742 } 743 744 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const { 745 if (RetTy->isVoidType()) 746 return ABIArgInfo::getIgnore(); 747 748 if (isAggregateTypeForABI(RetTy)) 749 return getNaturalAlignIndirect(RetTy); 750 751 // Treat an enum type as its underlying type. 752 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 753 RetTy = EnumTy->getDecl()->getIntegerType(); 754 755 if (const auto *EIT = RetTy->getAs<ExtIntType>()) 756 if (EIT->getNumBits() > 757 getContext().getTypeSize(getContext().getTargetInfo().hasInt128Type() 758 ? getContext().Int128Ty 759 : getContext().LongLongTy)) 760 return getNaturalAlignIndirect(RetTy); 761 762 return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) 763 : ABIArgInfo::getDirect()); 764 } 765 766 //===----------------------------------------------------------------------===// 767 // WebAssembly ABI Implementation 768 // 769 // This is a very simple ABI that relies a lot on DefaultABIInfo. 770 //===----------------------------------------------------------------------===// 771 772 class WebAssemblyABIInfo final : public SwiftABIInfo { 773 public: 774 enum ABIKind { 775 MVP = 0, 776 ExperimentalMV = 1, 777 }; 778 779 private: 780 DefaultABIInfo defaultInfo; 781 ABIKind Kind; 782 783 public: 784 explicit WebAssemblyABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind) 785 : SwiftABIInfo(CGT), defaultInfo(CGT), Kind(Kind) {} 786 787 private: 788 ABIArgInfo classifyReturnType(QualType RetTy) const; 789 ABIArgInfo classifyArgumentType(QualType Ty) const; 790 791 // DefaultABIInfo's classifyReturnType and classifyArgumentType are 792 // non-virtual, but computeInfo and EmitVAArg are virtual, so we 793 // overload them. 794 void computeInfo(CGFunctionInfo &FI) const override { 795 if (!getCXXABI().classifyReturnType(FI)) 796 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 797 for (auto &Arg : FI.arguments()) 798 Arg.info = classifyArgumentType(Arg.type); 799 } 800 801 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 802 QualType Ty) const override; 803 804 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, 805 bool asReturnValue) const override { 806 return occupiesMoreThan(CGT, scalars, /*total*/ 4); 807 } 808 809 bool isSwiftErrorInRegister() const override { 810 return false; 811 } 812 }; 813 814 class WebAssemblyTargetCodeGenInfo final : public TargetCodeGenInfo { 815 public: 816 explicit WebAssemblyTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, 817 WebAssemblyABIInfo::ABIKind K) 818 : TargetCodeGenInfo(std::make_unique<WebAssemblyABIInfo>(CGT, K)) {} 819 820 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 821 CodeGen::CodeGenModule &CGM) const override { 822 TargetCodeGenInfo::setTargetAttributes(D, GV, CGM); 823 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) { 824 if (const auto *Attr = FD->getAttr<WebAssemblyImportModuleAttr>()) { 825 llvm::Function *Fn = cast<llvm::Function>(GV); 826 llvm::AttrBuilder B; 827 B.addAttribute("wasm-import-module", Attr->getImportModule()); 828 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B); 829 } 830 if (const auto *Attr = FD->getAttr<WebAssemblyImportNameAttr>()) { 831 llvm::Function *Fn = cast<llvm::Function>(GV); 832 llvm::AttrBuilder B; 833 B.addAttribute("wasm-import-name", Attr->getImportName()); 834 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B); 835 } 836 if (const auto *Attr = FD->getAttr<WebAssemblyExportNameAttr>()) { 837 llvm::Function *Fn = cast<llvm::Function>(GV); 838 llvm::AttrBuilder B; 839 B.addAttribute("wasm-export-name", Attr->getExportName()); 840 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B); 841 } 842 } 843 844 if (auto *FD = dyn_cast_or_null<FunctionDecl>(D)) { 845 llvm::Function *Fn = cast<llvm::Function>(GV); 846 if (!FD->doesThisDeclarationHaveABody() && !FD->hasPrototype()) 847 Fn->addFnAttr("no-prototype"); 848 } 849 } 850 }; 851 852 /// Classify argument of given type \p Ty. 853 ABIArgInfo WebAssemblyABIInfo::classifyArgumentType(QualType Ty) const { 854 Ty = useFirstFieldIfTransparentUnion(Ty); 855 856 if (isAggregateTypeForABI(Ty)) { 857 // Records with non-trivial destructors/copy-constructors should not be 858 // passed by value. 859 if (auto RAA = getRecordArgABI(Ty, getCXXABI())) 860 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 861 // Ignore empty structs/unions. 862 if (isEmptyRecord(getContext(), Ty, true)) 863 return ABIArgInfo::getIgnore(); 864 // Lower single-element structs to just pass a regular value. TODO: We 865 // could do reasonable-size multiple-element structs too, using getExpand(), 866 // though watch out for things like bitfields. 867 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext())) 868 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 869 // For the experimental multivalue ABI, fully expand all other aggregates 870 if (Kind == ABIKind::ExperimentalMV) { 871 const RecordType *RT = Ty->getAs<RecordType>(); 872 assert(RT); 873 bool HasBitField = false; 874 for (auto *Field : RT->getDecl()->fields()) { 875 if (Field->isBitField()) { 876 HasBitField = true; 877 break; 878 } 879 } 880 if (!HasBitField) 881 return ABIArgInfo::getExpand(); 882 } 883 } 884 885 // Otherwise just do the default thing. 886 return defaultInfo.classifyArgumentType(Ty); 887 } 888 889 ABIArgInfo WebAssemblyABIInfo::classifyReturnType(QualType RetTy) const { 890 if (isAggregateTypeForABI(RetTy)) { 891 // Records with non-trivial destructors/copy-constructors should not be 892 // returned by value. 893 if (!getRecordArgABI(RetTy, getCXXABI())) { 894 // Ignore empty structs/unions. 895 if (isEmptyRecord(getContext(), RetTy, true)) 896 return ABIArgInfo::getIgnore(); 897 // Lower single-element structs to just return a regular value. TODO: We 898 // could do reasonable-size multiple-element structs too, using 899 // ABIArgInfo::getDirect(). 900 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) 901 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 902 // For the experimental multivalue ABI, return all other aggregates 903 if (Kind == ABIKind::ExperimentalMV) 904 return ABIArgInfo::getDirect(); 905 } 906 } 907 908 // Otherwise just do the default thing. 909 return defaultInfo.classifyReturnType(RetTy); 910 } 911 912 Address WebAssemblyABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 913 QualType Ty) const { 914 bool IsIndirect = isAggregateTypeForABI(Ty) && 915 !isEmptyRecord(getContext(), Ty, true) && 916 !isSingleElementStruct(Ty, getContext()); 917 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, 918 getContext().getTypeInfoInChars(Ty), 919 CharUnits::fromQuantity(4), 920 /*AllowHigherAlign=*/true); 921 } 922 923 //===----------------------------------------------------------------------===// 924 // le32/PNaCl bitcode ABI Implementation 925 // 926 // This is a simplified version of the x86_32 ABI. Arguments and return values 927 // are always passed on the stack. 928 //===----------------------------------------------------------------------===// 929 930 class PNaClABIInfo : public ABIInfo { 931 public: 932 PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 933 934 ABIArgInfo classifyReturnType(QualType RetTy) const; 935 ABIArgInfo classifyArgumentType(QualType RetTy) const; 936 937 void computeInfo(CGFunctionInfo &FI) const override; 938 Address EmitVAArg(CodeGenFunction &CGF, 939 Address VAListAddr, QualType Ty) const override; 940 }; 941 942 class PNaClTargetCodeGenInfo : public TargetCodeGenInfo { 943 public: 944 PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 945 : TargetCodeGenInfo(std::make_unique<PNaClABIInfo>(CGT)) {} 946 }; 947 948 void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const { 949 if (!getCXXABI().classifyReturnType(FI)) 950 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 951 952 for (auto &I : FI.arguments()) 953 I.info = classifyArgumentType(I.type); 954 } 955 956 Address PNaClABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 957 QualType Ty) const { 958 // The PNaCL ABI is a bit odd, in that varargs don't use normal 959 // function classification. Structs get passed directly for varargs 960 // functions, through a rewriting transform in 961 // pnacl-llvm/lib/Transforms/NaCl/ExpandVarArgs.cpp, which allows 962 // this target to actually support a va_arg instructions with an 963 // aggregate type, unlike other targets. 964 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect()); 965 } 966 967 /// Classify argument of given type \p Ty. 968 ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const { 969 if (isAggregateTypeForABI(Ty)) { 970 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 971 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 972 return getNaturalAlignIndirect(Ty); 973 } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) { 974 // Treat an enum type as its underlying type. 975 Ty = EnumTy->getDecl()->getIntegerType(); 976 } else if (Ty->isFloatingType()) { 977 // Floating-point types don't go inreg. 978 return ABIArgInfo::getDirect(); 979 } else if (const auto *EIT = Ty->getAs<ExtIntType>()) { 980 // Treat extended integers as integers if <=64, otherwise pass indirectly. 981 if (EIT->getNumBits() > 64) 982 return getNaturalAlignIndirect(Ty); 983 return ABIArgInfo::getDirect(); 984 } 985 986 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) 987 : ABIArgInfo::getDirect()); 988 } 989 990 ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const { 991 if (RetTy->isVoidType()) 992 return ABIArgInfo::getIgnore(); 993 994 // In the PNaCl ABI we always return records/structures on the stack. 995 if (isAggregateTypeForABI(RetTy)) 996 return getNaturalAlignIndirect(RetTy); 997 998 // Treat extended integers as integers if <=64, otherwise pass indirectly. 999 if (const auto *EIT = RetTy->getAs<ExtIntType>()) { 1000 if (EIT->getNumBits() > 64) 1001 return getNaturalAlignIndirect(RetTy); 1002 return ABIArgInfo::getDirect(); 1003 } 1004 1005 // Treat an enum type as its underlying type. 1006 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 1007 RetTy = EnumTy->getDecl()->getIntegerType(); 1008 1009 return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) 1010 : ABIArgInfo::getDirect()); 1011 } 1012 1013 /// IsX86_MMXType - Return true if this is an MMX type. 1014 bool IsX86_MMXType(llvm::Type *IRType) { 1015 // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>. 1016 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 && 1017 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() && 1018 IRType->getScalarSizeInBits() != 64; 1019 } 1020 1021 static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 1022 StringRef Constraint, 1023 llvm::Type* Ty) { 1024 bool IsMMXCons = llvm::StringSwitch<bool>(Constraint) 1025 .Cases("y", "&y", "^Ym", true) 1026 .Default(false); 1027 if (IsMMXCons && Ty->isVectorTy()) { 1028 if (cast<llvm::VectorType>(Ty)->getPrimitiveSizeInBits().getFixedSize() != 1029 64) { 1030 // Invalid MMX constraint 1031 return nullptr; 1032 } 1033 1034 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext()); 1035 } 1036 1037 // No operation needed 1038 return Ty; 1039 } 1040 1041 /// Returns true if this type can be passed in SSE registers with the 1042 /// X86_VectorCall calling convention. Shared between x86_32 and x86_64. 1043 static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) { 1044 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 1045 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) { 1046 if (BT->getKind() == BuiltinType::LongDouble) { 1047 if (&Context.getTargetInfo().getLongDoubleFormat() == 1048 &llvm::APFloat::x87DoubleExtended()) 1049 return false; 1050 } 1051 return true; 1052 } 1053 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 1054 // vectorcall can pass XMM, YMM, and ZMM vectors. We don't pass SSE1 MMX 1055 // registers specially. 1056 unsigned VecSize = Context.getTypeSize(VT); 1057 if (VecSize == 128 || VecSize == 256 || VecSize == 512) 1058 return true; 1059 } 1060 return false; 1061 } 1062 1063 /// Returns true if this aggregate is small enough to be passed in SSE registers 1064 /// in the X86_VectorCall calling convention. Shared between x86_32 and x86_64. 1065 static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) { 1066 return NumMembers <= 4; 1067 } 1068 1069 /// Returns a Homogeneous Vector Aggregate ABIArgInfo, used in X86. 1070 static ABIArgInfo getDirectX86Hva(llvm::Type* T = nullptr) { 1071 auto AI = ABIArgInfo::getDirect(T); 1072 AI.setInReg(true); 1073 AI.setCanBeFlattened(false); 1074 return AI; 1075 } 1076 1077 //===----------------------------------------------------------------------===// 1078 // X86-32 ABI Implementation 1079 //===----------------------------------------------------------------------===// 1080 1081 /// Similar to llvm::CCState, but for Clang. 1082 struct CCState { 1083 CCState(CGFunctionInfo &FI) 1084 : IsPreassigned(FI.arg_size()), CC(FI.getCallingConvention()) {} 1085 1086 llvm::SmallBitVector IsPreassigned; 1087 unsigned CC = CallingConv::CC_C; 1088 unsigned FreeRegs = 0; 1089 unsigned FreeSSERegs = 0; 1090 }; 1091 1092 enum { 1093 // Vectorcall only allows the first 6 parameters to be passed in registers. 1094 VectorcallMaxParamNumAsReg = 6 1095 }; 1096 1097 /// X86_32ABIInfo - The X86-32 ABI information. 1098 class X86_32ABIInfo : public SwiftABIInfo { 1099 enum Class { 1100 Integer, 1101 Float 1102 }; 1103 1104 static const unsigned MinABIStackAlignInBytes = 4; 1105 1106 bool IsDarwinVectorABI; 1107 bool IsRetSmallStructInRegABI; 1108 bool IsWin32StructABI; 1109 bool IsSoftFloatABI; 1110 bool IsMCUABI; 1111 unsigned DefaultNumRegisterParameters; 1112 1113 static bool isRegisterSize(unsigned Size) { 1114 return (Size == 8 || Size == 16 || Size == 32 || Size == 64); 1115 } 1116 1117 bool isHomogeneousAggregateBaseType(QualType Ty) const override { 1118 // FIXME: Assumes vectorcall is in use. 1119 return isX86VectorTypeForVectorCall(getContext(), Ty); 1120 } 1121 1122 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 1123 uint64_t NumMembers) const override { 1124 // FIXME: Assumes vectorcall is in use. 1125 return isX86VectorCallAggregateSmallEnough(NumMembers); 1126 } 1127 1128 bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const; 1129 1130 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 1131 /// such that the argument will be passed in memory. 1132 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const; 1133 1134 ABIArgInfo getIndirectReturnResult(QualType Ty, CCState &State) const; 1135 1136 /// Return the alignment to use for the given type on the stack. 1137 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const; 1138 1139 Class classify(QualType Ty) const; 1140 ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const; 1141 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const; 1142 1143 /// Updates the number of available free registers, returns 1144 /// true if any registers were allocated. 1145 bool updateFreeRegs(QualType Ty, CCState &State) const; 1146 1147 bool shouldAggregateUseDirect(QualType Ty, CCState &State, bool &InReg, 1148 bool &NeedsPadding) const; 1149 bool shouldPrimitiveUseInReg(QualType Ty, CCState &State) const; 1150 1151 bool canExpandIndirectArgument(QualType Ty) const; 1152 1153 /// Rewrite the function info so that all memory arguments use 1154 /// inalloca. 1155 void rewriteWithInAlloca(CGFunctionInfo &FI) const; 1156 1157 void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields, 1158 CharUnits &StackOffset, ABIArgInfo &Info, 1159 QualType Type) const; 1160 void runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State) const; 1161 1162 public: 1163 1164 void computeInfo(CGFunctionInfo &FI) const override; 1165 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 1166 QualType Ty) const override; 1167 1168 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI, 1169 bool RetSmallStructInRegABI, bool Win32StructABI, 1170 unsigned NumRegisterParameters, bool SoftFloatABI) 1171 : SwiftABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI), 1172 IsRetSmallStructInRegABI(RetSmallStructInRegABI), 1173 IsWin32StructABI(Win32StructABI), 1174 IsSoftFloatABI(SoftFloatABI), 1175 IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()), 1176 DefaultNumRegisterParameters(NumRegisterParameters) {} 1177 1178 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, 1179 bool asReturnValue) const override { 1180 // LLVM's x86-32 lowering currently only assigns up to three 1181 // integer registers and three fp registers. Oddly, it'll use up to 1182 // four vector registers for vectors, but those can overlap with the 1183 // scalar registers. 1184 return occupiesMoreThan(CGT, scalars, /*total*/ 3); 1185 } 1186 1187 bool isSwiftErrorInRegister() const override { 1188 // x86-32 lowering does not support passing swifterror in a register. 1189 return false; 1190 } 1191 }; 1192 1193 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo { 1194 public: 1195 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI, 1196 bool RetSmallStructInRegABI, bool Win32StructABI, 1197 unsigned NumRegisterParameters, bool SoftFloatABI) 1198 : TargetCodeGenInfo(std::make_unique<X86_32ABIInfo>( 1199 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI, 1200 NumRegisterParameters, SoftFloatABI)) {} 1201 1202 static bool isStructReturnInRegABI( 1203 const llvm::Triple &Triple, const CodeGenOptions &Opts); 1204 1205 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 1206 CodeGen::CodeGenModule &CGM) const override; 1207 1208 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { 1209 // Darwin uses different dwarf register numbers for EH. 1210 if (CGM.getTarget().getTriple().isOSDarwin()) return 5; 1211 return 4; 1212 } 1213 1214 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 1215 llvm::Value *Address) const override; 1216 1217 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 1218 StringRef Constraint, 1219 llvm::Type* Ty) const override { 1220 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 1221 } 1222 1223 void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue, 1224 std::string &Constraints, 1225 std::vector<llvm::Type *> &ResultRegTypes, 1226 std::vector<llvm::Type *> &ResultTruncRegTypes, 1227 std::vector<LValue> &ResultRegDests, 1228 std::string &AsmString, 1229 unsigned NumOutputs) const override; 1230 1231 llvm::Constant * 1232 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override { 1233 unsigned Sig = (0xeb << 0) | // jmp rel8 1234 (0x06 << 8) | // .+0x08 1235 ('v' << 16) | 1236 ('2' << 24); 1237 return llvm::ConstantInt::get(CGM.Int32Ty, Sig); 1238 } 1239 1240 StringRef getARCRetainAutoreleasedReturnValueMarker() const override { 1241 return "movl\t%ebp, %ebp" 1242 "\t\t// marker for objc_retainAutoreleaseReturnValue"; 1243 } 1244 }; 1245 1246 } 1247 1248 /// Rewrite input constraint references after adding some output constraints. 1249 /// In the case where there is one output and one input and we add one output, 1250 /// we need to replace all operand references greater than or equal to 1: 1251 /// mov $0, $1 1252 /// mov eax, $1 1253 /// The result will be: 1254 /// mov $0, $2 1255 /// mov eax, $2 1256 static void rewriteInputConstraintReferences(unsigned FirstIn, 1257 unsigned NumNewOuts, 1258 std::string &AsmString) { 1259 std::string Buf; 1260 llvm::raw_string_ostream OS(Buf); 1261 size_t Pos = 0; 1262 while (Pos < AsmString.size()) { 1263 size_t DollarStart = AsmString.find('$', Pos); 1264 if (DollarStart == std::string::npos) 1265 DollarStart = AsmString.size(); 1266 size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart); 1267 if (DollarEnd == std::string::npos) 1268 DollarEnd = AsmString.size(); 1269 OS << StringRef(&AsmString[Pos], DollarEnd - Pos); 1270 Pos = DollarEnd; 1271 size_t NumDollars = DollarEnd - DollarStart; 1272 if (NumDollars % 2 != 0 && Pos < AsmString.size()) { 1273 // We have an operand reference. 1274 size_t DigitStart = Pos; 1275 if (AsmString[DigitStart] == '{') { 1276 OS << '{'; 1277 ++DigitStart; 1278 } 1279 size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart); 1280 if (DigitEnd == std::string::npos) 1281 DigitEnd = AsmString.size(); 1282 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart); 1283 unsigned OperandIndex; 1284 if (!OperandStr.getAsInteger(10, OperandIndex)) { 1285 if (OperandIndex >= FirstIn) 1286 OperandIndex += NumNewOuts; 1287 OS << OperandIndex; 1288 } else { 1289 OS << OperandStr; 1290 } 1291 Pos = DigitEnd; 1292 } 1293 } 1294 AsmString = std::move(OS.str()); 1295 } 1296 1297 /// Add output constraints for EAX:EDX because they are return registers. 1298 void X86_32TargetCodeGenInfo::addReturnRegisterOutputs( 1299 CodeGenFunction &CGF, LValue ReturnSlot, std::string &Constraints, 1300 std::vector<llvm::Type *> &ResultRegTypes, 1301 std::vector<llvm::Type *> &ResultTruncRegTypes, 1302 std::vector<LValue> &ResultRegDests, std::string &AsmString, 1303 unsigned NumOutputs) const { 1304 uint64_t RetWidth = CGF.getContext().getTypeSize(ReturnSlot.getType()); 1305 1306 // Use the EAX constraint if the width is 32 or smaller and EAX:EDX if it is 1307 // larger. 1308 if (!Constraints.empty()) 1309 Constraints += ','; 1310 if (RetWidth <= 32) { 1311 Constraints += "={eax}"; 1312 ResultRegTypes.push_back(CGF.Int32Ty); 1313 } else { 1314 // Use the 'A' constraint for EAX:EDX. 1315 Constraints += "=A"; 1316 ResultRegTypes.push_back(CGF.Int64Ty); 1317 } 1318 1319 // Truncate EAX or EAX:EDX to an integer of the appropriate size. 1320 llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth); 1321 ResultTruncRegTypes.push_back(CoerceTy); 1322 1323 // Coerce the integer by bitcasting the return slot pointer. 1324 ReturnSlot.setAddress(CGF.Builder.CreateBitCast(ReturnSlot.getAddress(CGF), 1325 CoerceTy->getPointerTo())); 1326 ResultRegDests.push_back(ReturnSlot); 1327 1328 rewriteInputConstraintReferences(NumOutputs, 1, AsmString); 1329 } 1330 1331 /// shouldReturnTypeInRegister - Determine if the given type should be 1332 /// returned in a register (for the Darwin and MCU ABI). 1333 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty, 1334 ASTContext &Context) const { 1335 uint64_t Size = Context.getTypeSize(Ty); 1336 1337 // For i386, type must be register sized. 1338 // For the MCU ABI, it only needs to be <= 8-byte 1339 if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size))) 1340 return false; 1341 1342 if (Ty->isVectorType()) { 1343 // 64- and 128- bit vectors inside structures are not returned in 1344 // registers. 1345 if (Size == 64 || Size == 128) 1346 return false; 1347 1348 return true; 1349 } 1350 1351 // If this is a builtin, pointer, enum, complex type, member pointer, or 1352 // member function pointer it is ok. 1353 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() || 1354 Ty->isAnyComplexType() || Ty->isEnumeralType() || 1355 Ty->isBlockPointerType() || Ty->isMemberPointerType()) 1356 return true; 1357 1358 // Arrays are treated like records. 1359 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) 1360 return shouldReturnTypeInRegister(AT->getElementType(), Context); 1361 1362 // Otherwise, it must be a record type. 1363 const RecordType *RT = Ty->getAs<RecordType>(); 1364 if (!RT) return false; 1365 1366 // FIXME: Traverse bases here too. 1367 1368 // Structure types are passed in register if all fields would be 1369 // passed in a register. 1370 for (const auto *FD : RT->getDecl()->fields()) { 1371 // Empty fields are ignored. 1372 if (isEmptyField(Context, FD, true)) 1373 continue; 1374 1375 // Check fields recursively. 1376 if (!shouldReturnTypeInRegister(FD->getType(), Context)) 1377 return false; 1378 } 1379 return true; 1380 } 1381 1382 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) { 1383 // Treat complex types as the element type. 1384 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) 1385 Ty = CTy->getElementType(); 1386 1387 // Check for a type which we know has a simple scalar argument-passing 1388 // convention without any padding. (We're specifically looking for 32 1389 // and 64-bit integer and integer-equivalents, float, and double.) 1390 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() && 1391 !Ty->isEnumeralType() && !Ty->isBlockPointerType()) 1392 return false; 1393 1394 uint64_t Size = Context.getTypeSize(Ty); 1395 return Size == 32 || Size == 64; 1396 } 1397 1398 static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD, 1399 uint64_t &Size) { 1400 for (const auto *FD : RD->fields()) { 1401 // Scalar arguments on the stack get 4 byte alignment on x86. If the 1402 // argument is smaller than 32-bits, expanding the struct will create 1403 // alignment padding. 1404 if (!is32Or64BitBasicType(FD->getType(), Context)) 1405 return false; 1406 1407 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know 1408 // how to expand them yet, and the predicate for telling if a bitfield still 1409 // counts as "basic" is more complicated than what we were doing previously. 1410 if (FD->isBitField()) 1411 return false; 1412 1413 Size += Context.getTypeSize(FD->getType()); 1414 } 1415 return true; 1416 } 1417 1418 static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD, 1419 uint64_t &Size) { 1420 // Don't do this if there are any non-empty bases. 1421 for (const CXXBaseSpecifier &Base : RD->bases()) { 1422 if (!addBaseAndFieldSizes(Context, Base.getType()->getAsCXXRecordDecl(), 1423 Size)) 1424 return false; 1425 } 1426 if (!addFieldSizes(Context, RD, Size)) 1427 return false; 1428 return true; 1429 } 1430 1431 /// Test whether an argument type which is to be passed indirectly (on the 1432 /// stack) would have the equivalent layout if it was expanded into separate 1433 /// arguments. If so, we prefer to do the latter to avoid inhibiting 1434 /// optimizations. 1435 bool X86_32ABIInfo::canExpandIndirectArgument(QualType Ty) const { 1436 // We can only expand structure types. 1437 const RecordType *RT = Ty->getAs<RecordType>(); 1438 if (!RT) 1439 return false; 1440 const RecordDecl *RD = RT->getDecl(); 1441 uint64_t Size = 0; 1442 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 1443 if (!IsWin32StructABI) { 1444 // On non-Windows, we have to conservatively match our old bitcode 1445 // prototypes in order to be ABI-compatible at the bitcode level. 1446 if (!CXXRD->isCLike()) 1447 return false; 1448 } else { 1449 // Don't do this for dynamic classes. 1450 if (CXXRD->isDynamicClass()) 1451 return false; 1452 } 1453 if (!addBaseAndFieldSizes(getContext(), CXXRD, Size)) 1454 return false; 1455 } else { 1456 if (!addFieldSizes(getContext(), RD, Size)) 1457 return false; 1458 } 1459 1460 // We can do this if there was no alignment padding. 1461 return Size == getContext().getTypeSize(Ty); 1462 } 1463 1464 ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, CCState &State) const { 1465 // If the return value is indirect, then the hidden argument is consuming one 1466 // integer register. 1467 if (State.FreeRegs) { 1468 --State.FreeRegs; 1469 if (!IsMCUABI) 1470 return getNaturalAlignIndirectInReg(RetTy); 1471 } 1472 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false); 1473 } 1474 1475 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, 1476 CCState &State) const { 1477 if (RetTy->isVoidType()) 1478 return ABIArgInfo::getIgnore(); 1479 1480 const Type *Base = nullptr; 1481 uint64_t NumElts = 0; 1482 if ((State.CC == llvm::CallingConv::X86_VectorCall || 1483 State.CC == llvm::CallingConv::X86_RegCall) && 1484 isHomogeneousAggregate(RetTy, Base, NumElts)) { 1485 // The LLVM struct type for such an aggregate should lower properly. 1486 return ABIArgInfo::getDirect(); 1487 } 1488 1489 if (const VectorType *VT = RetTy->getAs<VectorType>()) { 1490 // On Darwin, some vectors are returned in registers. 1491 if (IsDarwinVectorABI) { 1492 uint64_t Size = getContext().getTypeSize(RetTy); 1493 1494 // 128-bit vectors are a special case; they are returned in 1495 // registers and we need to make sure to pick a type the LLVM 1496 // backend will like. 1497 if (Size == 128) 1498 return ABIArgInfo::getDirect(llvm::FixedVectorType::get( 1499 llvm::Type::getInt64Ty(getVMContext()), 2)); 1500 1501 // Always return in register if it fits in a general purpose 1502 // register, or if it is 64 bits and has a single element. 1503 if ((Size == 8 || Size == 16 || Size == 32) || 1504 (Size == 64 && VT->getNumElements() == 1)) 1505 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 1506 Size)); 1507 1508 return getIndirectReturnResult(RetTy, State); 1509 } 1510 1511 return ABIArgInfo::getDirect(); 1512 } 1513 1514 if (isAggregateTypeForABI(RetTy)) { 1515 if (const RecordType *RT = RetTy->getAs<RecordType>()) { 1516 // Structures with flexible arrays are always indirect. 1517 if (RT->getDecl()->hasFlexibleArrayMember()) 1518 return getIndirectReturnResult(RetTy, State); 1519 } 1520 1521 // If specified, structs and unions are always indirect. 1522 if (!IsRetSmallStructInRegABI && !RetTy->isAnyComplexType()) 1523 return getIndirectReturnResult(RetTy, State); 1524 1525 // Ignore empty structs/unions. 1526 if (isEmptyRecord(getContext(), RetTy, true)) 1527 return ABIArgInfo::getIgnore(); 1528 1529 // Small structures which are register sized are generally returned 1530 // in a register. 1531 if (shouldReturnTypeInRegister(RetTy, getContext())) { 1532 uint64_t Size = getContext().getTypeSize(RetTy); 1533 1534 // As a special-case, if the struct is a "single-element" struct, and 1535 // the field is of type "float" or "double", return it in a 1536 // floating-point register. (MSVC does not apply this special case.) 1537 // We apply a similar transformation for pointer types to improve the 1538 // quality of the generated IR. 1539 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) 1540 if ((!IsWin32StructABI && SeltTy->isRealFloatingType()) 1541 || SeltTy->hasPointerRepresentation()) 1542 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 1543 1544 // FIXME: We should be able to narrow this integer in cases with dead 1545 // padding. 1546 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size)); 1547 } 1548 1549 return getIndirectReturnResult(RetTy, State); 1550 } 1551 1552 // Treat an enum type as its underlying type. 1553 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 1554 RetTy = EnumTy->getDecl()->getIntegerType(); 1555 1556 if (const auto *EIT = RetTy->getAs<ExtIntType>()) 1557 if (EIT->getNumBits() > 64) 1558 return getIndirectReturnResult(RetTy, State); 1559 1560 return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) 1561 : ABIArgInfo::getDirect()); 1562 } 1563 1564 static bool isSIMDVectorType(ASTContext &Context, QualType Ty) { 1565 return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128; 1566 } 1567 1568 static bool isRecordWithSIMDVectorType(ASTContext &Context, QualType Ty) { 1569 const RecordType *RT = Ty->getAs<RecordType>(); 1570 if (!RT) 1571 return 0; 1572 const RecordDecl *RD = RT->getDecl(); 1573 1574 // If this is a C++ record, check the bases first. 1575 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 1576 for (const auto &I : CXXRD->bases()) 1577 if (!isRecordWithSIMDVectorType(Context, I.getType())) 1578 return false; 1579 1580 for (const auto *i : RD->fields()) { 1581 QualType FT = i->getType(); 1582 1583 if (isSIMDVectorType(Context, FT)) 1584 return true; 1585 1586 if (isRecordWithSIMDVectorType(Context, FT)) 1587 return true; 1588 } 1589 1590 return false; 1591 } 1592 1593 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty, 1594 unsigned Align) const { 1595 // Otherwise, if the alignment is less than or equal to the minimum ABI 1596 // alignment, just use the default; the backend will handle this. 1597 if (Align <= MinABIStackAlignInBytes) 1598 return 0; // Use default alignment. 1599 1600 // On non-Darwin, the stack type alignment is always 4. 1601 if (!IsDarwinVectorABI) { 1602 // Set explicit alignment, since we may need to realign the top. 1603 return MinABIStackAlignInBytes; 1604 } 1605 1606 // Otherwise, if the type contains an SSE vector type, the alignment is 16. 1607 if (Align >= 16 && (isSIMDVectorType(getContext(), Ty) || 1608 isRecordWithSIMDVectorType(getContext(), Ty))) 1609 return 16; 1610 1611 return MinABIStackAlignInBytes; 1612 } 1613 1614 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal, 1615 CCState &State) const { 1616 if (!ByVal) { 1617 if (State.FreeRegs) { 1618 --State.FreeRegs; // Non-byval indirects just use one pointer. 1619 if (!IsMCUABI) 1620 return getNaturalAlignIndirectInReg(Ty); 1621 } 1622 return getNaturalAlignIndirect(Ty, false); 1623 } 1624 1625 // Compute the byval alignment. 1626 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; 1627 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign); 1628 if (StackAlign == 0) 1629 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true); 1630 1631 // If the stack alignment is less than the type alignment, realign the 1632 // argument. 1633 bool Realign = TypeAlign > StackAlign; 1634 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(StackAlign), 1635 /*ByVal=*/true, Realign); 1636 } 1637 1638 X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const { 1639 const Type *T = isSingleElementStruct(Ty, getContext()); 1640 if (!T) 1641 T = Ty.getTypePtr(); 1642 1643 if (const BuiltinType *BT = T->getAs<BuiltinType>()) { 1644 BuiltinType::Kind K = BT->getKind(); 1645 if (K == BuiltinType::Float || K == BuiltinType::Double) 1646 return Float; 1647 } 1648 return Integer; 1649 } 1650 1651 bool X86_32ABIInfo::updateFreeRegs(QualType Ty, CCState &State) const { 1652 if (!IsSoftFloatABI) { 1653 Class C = classify(Ty); 1654 if (C == Float) 1655 return false; 1656 } 1657 1658 unsigned Size = getContext().getTypeSize(Ty); 1659 unsigned SizeInRegs = (Size + 31) / 32; 1660 1661 if (SizeInRegs == 0) 1662 return false; 1663 1664 if (!IsMCUABI) { 1665 if (SizeInRegs > State.FreeRegs) { 1666 State.FreeRegs = 0; 1667 return false; 1668 } 1669 } else { 1670 // The MCU psABI allows passing parameters in-reg even if there are 1671 // earlier parameters that are passed on the stack. Also, 1672 // it does not allow passing >8-byte structs in-register, 1673 // even if there are 3 free registers available. 1674 if (SizeInRegs > State.FreeRegs || SizeInRegs > 2) 1675 return false; 1676 } 1677 1678 State.FreeRegs -= SizeInRegs; 1679 return true; 1680 } 1681 1682 bool X86_32ABIInfo::shouldAggregateUseDirect(QualType Ty, CCState &State, 1683 bool &InReg, 1684 bool &NeedsPadding) const { 1685 // On Windows, aggregates other than HFAs are never passed in registers, and 1686 // they do not consume register slots. Homogenous floating-point aggregates 1687 // (HFAs) have already been dealt with at this point. 1688 if (IsWin32StructABI && isAggregateTypeForABI(Ty)) 1689 return false; 1690 1691 NeedsPadding = false; 1692 InReg = !IsMCUABI; 1693 1694 if (!updateFreeRegs(Ty, State)) 1695 return false; 1696 1697 if (IsMCUABI) 1698 return true; 1699 1700 if (State.CC == llvm::CallingConv::X86_FastCall || 1701 State.CC == llvm::CallingConv::X86_VectorCall || 1702 State.CC == llvm::CallingConv::X86_RegCall) { 1703 if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs) 1704 NeedsPadding = true; 1705 1706 return false; 1707 } 1708 1709 return true; 1710 } 1711 1712 bool X86_32ABIInfo::shouldPrimitiveUseInReg(QualType Ty, CCState &State) const { 1713 if (!updateFreeRegs(Ty, State)) 1714 return false; 1715 1716 if (IsMCUABI) 1717 return false; 1718 1719 if (State.CC == llvm::CallingConv::X86_FastCall || 1720 State.CC == llvm::CallingConv::X86_VectorCall || 1721 State.CC == llvm::CallingConv::X86_RegCall) { 1722 if (getContext().getTypeSize(Ty) > 32) 1723 return false; 1724 1725 return (Ty->isIntegralOrEnumerationType() || Ty->isPointerType() || 1726 Ty->isReferenceType()); 1727 } 1728 1729 return true; 1730 } 1731 1732 void X86_32ABIInfo::runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State) const { 1733 // Vectorcall x86 works subtly different than in x64, so the format is 1734 // a bit different than the x64 version. First, all vector types (not HVAs) 1735 // are assigned, with the first 6 ending up in the [XYZ]MM0-5 registers. 1736 // This differs from the x64 implementation, where the first 6 by INDEX get 1737 // registers. 1738 // In the second pass over the arguments, HVAs are passed in the remaining 1739 // vector registers if possible, or indirectly by address. The address will be 1740 // passed in ECX/EDX if available. Any other arguments are passed according to 1741 // the usual fastcall rules. 1742 MutableArrayRef<CGFunctionInfoArgInfo> Args = FI.arguments(); 1743 for (int I = 0, E = Args.size(); I < E; ++I) { 1744 const Type *Base = nullptr; 1745 uint64_t NumElts = 0; 1746 const QualType &Ty = Args[I].type; 1747 if ((Ty->isVectorType() || Ty->isBuiltinType()) && 1748 isHomogeneousAggregate(Ty, Base, NumElts)) { 1749 if (State.FreeSSERegs >= NumElts) { 1750 State.FreeSSERegs -= NumElts; 1751 Args[I].info = ABIArgInfo::getDirectInReg(); 1752 State.IsPreassigned.set(I); 1753 } 1754 } 1755 } 1756 } 1757 1758 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, 1759 CCState &State) const { 1760 // FIXME: Set alignment on indirect arguments. 1761 bool IsFastCall = State.CC == llvm::CallingConv::X86_FastCall; 1762 bool IsRegCall = State.CC == llvm::CallingConv::X86_RegCall; 1763 bool IsVectorCall = State.CC == llvm::CallingConv::X86_VectorCall; 1764 1765 Ty = useFirstFieldIfTransparentUnion(Ty); 1766 TypeInfo TI = getContext().getTypeInfo(Ty); 1767 1768 // Check with the C++ ABI first. 1769 const RecordType *RT = Ty->getAs<RecordType>(); 1770 if (RT) { 1771 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()); 1772 if (RAA == CGCXXABI::RAA_Indirect) { 1773 return getIndirectResult(Ty, false, State); 1774 } else if (RAA == CGCXXABI::RAA_DirectInMemory) { 1775 // The field index doesn't matter, we'll fix it up later. 1776 return ABIArgInfo::getInAlloca(/*FieldIndex=*/0); 1777 } 1778 } 1779 1780 // Regcall uses the concept of a homogenous vector aggregate, similar 1781 // to other targets. 1782 const Type *Base = nullptr; 1783 uint64_t NumElts = 0; 1784 if ((IsRegCall || IsVectorCall) && 1785 isHomogeneousAggregate(Ty, Base, NumElts)) { 1786 if (State.FreeSSERegs >= NumElts) { 1787 State.FreeSSERegs -= NumElts; 1788 1789 // Vectorcall passes HVAs directly and does not flatten them, but regcall 1790 // does. 1791 if (IsVectorCall) 1792 return getDirectX86Hva(); 1793 1794 if (Ty->isBuiltinType() || Ty->isVectorType()) 1795 return ABIArgInfo::getDirect(); 1796 return ABIArgInfo::getExpand(); 1797 } 1798 return getIndirectResult(Ty, /*ByVal=*/false, State); 1799 } 1800 1801 if (isAggregateTypeForABI(Ty)) { 1802 // Structures with flexible arrays are always indirect. 1803 // FIXME: This should not be byval! 1804 if (RT && RT->getDecl()->hasFlexibleArrayMember()) 1805 return getIndirectResult(Ty, true, State); 1806 1807 // Ignore empty structs/unions on non-Windows. 1808 if (!IsWin32StructABI && isEmptyRecord(getContext(), Ty, true)) 1809 return ABIArgInfo::getIgnore(); 1810 1811 llvm::LLVMContext &LLVMContext = getVMContext(); 1812 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext); 1813 bool NeedsPadding = false; 1814 bool InReg; 1815 if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) { 1816 unsigned SizeInRegs = (TI.Width + 31) / 32; 1817 SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32); 1818 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements); 1819 if (InReg) 1820 return ABIArgInfo::getDirectInReg(Result); 1821 else 1822 return ABIArgInfo::getDirect(Result); 1823 } 1824 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr; 1825 1826 // Pass over-aligned aggregates on Windows indirectly. This behavior was 1827 // added in MSVC 2015. 1828 if (IsWin32StructABI && TI.AlignIsRequired && TI.Align > 32) 1829 return getIndirectResult(Ty, /*ByVal=*/false, State); 1830 1831 // Expand small (<= 128-bit) record types when we know that the stack layout 1832 // of those arguments will match the struct. This is important because the 1833 // LLVM backend isn't smart enough to remove byval, which inhibits many 1834 // optimizations. 1835 // Don't do this for the MCU if there are still free integer registers 1836 // (see X86_64 ABI for full explanation). 1837 if (TI.Width <= 4 * 32 && (!IsMCUABI || State.FreeRegs == 0) && 1838 canExpandIndirectArgument(Ty)) 1839 return ABIArgInfo::getExpandWithPadding( 1840 IsFastCall || IsVectorCall || IsRegCall, PaddingType); 1841 1842 return getIndirectResult(Ty, true, State); 1843 } 1844 1845 if (const VectorType *VT = Ty->getAs<VectorType>()) { 1846 // On Windows, vectors are passed directly if registers are available, or 1847 // indirectly if not. This avoids the need to align argument memory. Pass 1848 // user-defined vector types larger than 512 bits indirectly for simplicity. 1849 if (IsWin32StructABI) { 1850 if (TI.Width <= 512 && State.FreeSSERegs > 0) { 1851 --State.FreeSSERegs; 1852 return ABIArgInfo::getDirectInReg(); 1853 } 1854 return getIndirectResult(Ty, /*ByVal=*/false, State); 1855 } 1856 1857 // On Darwin, some vectors are passed in memory, we handle this by passing 1858 // it as an i8/i16/i32/i64. 1859 if (IsDarwinVectorABI) { 1860 if ((TI.Width == 8 || TI.Width == 16 || TI.Width == 32) || 1861 (TI.Width == 64 && VT->getNumElements() == 1)) 1862 return ABIArgInfo::getDirect( 1863 llvm::IntegerType::get(getVMContext(), TI.Width)); 1864 } 1865 1866 if (IsX86_MMXType(CGT.ConvertType(Ty))) 1867 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64)); 1868 1869 return ABIArgInfo::getDirect(); 1870 } 1871 1872 1873 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1874 Ty = EnumTy->getDecl()->getIntegerType(); 1875 1876 bool InReg = shouldPrimitiveUseInReg(Ty, State); 1877 1878 if (isPromotableIntegerTypeForABI(Ty)) { 1879 if (InReg) 1880 return ABIArgInfo::getExtendInReg(Ty); 1881 return ABIArgInfo::getExtend(Ty); 1882 } 1883 1884 if (const auto * EIT = Ty->getAs<ExtIntType>()) { 1885 if (EIT->getNumBits() <= 64) { 1886 if (InReg) 1887 return ABIArgInfo::getDirectInReg(); 1888 return ABIArgInfo::getDirect(); 1889 } 1890 return getIndirectResult(Ty, /*ByVal=*/false, State); 1891 } 1892 1893 if (InReg) 1894 return ABIArgInfo::getDirectInReg(); 1895 return ABIArgInfo::getDirect(); 1896 } 1897 1898 void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const { 1899 CCState State(FI); 1900 if (IsMCUABI) 1901 State.FreeRegs = 3; 1902 else if (State.CC == llvm::CallingConv::X86_FastCall) { 1903 State.FreeRegs = 2; 1904 State.FreeSSERegs = 3; 1905 } else if (State.CC == llvm::CallingConv::X86_VectorCall) { 1906 State.FreeRegs = 2; 1907 State.FreeSSERegs = 6; 1908 } else if (FI.getHasRegParm()) 1909 State.FreeRegs = FI.getRegParm(); 1910 else if (State.CC == llvm::CallingConv::X86_RegCall) { 1911 State.FreeRegs = 5; 1912 State.FreeSSERegs = 8; 1913 } else if (IsWin32StructABI) { 1914 // Since MSVC 2015, the first three SSE vectors have been passed in 1915 // registers. The rest are passed indirectly. 1916 State.FreeRegs = DefaultNumRegisterParameters; 1917 State.FreeSSERegs = 3; 1918 } else 1919 State.FreeRegs = DefaultNumRegisterParameters; 1920 1921 if (!::classifyReturnType(getCXXABI(), FI, *this)) { 1922 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State); 1923 } else if (FI.getReturnInfo().isIndirect()) { 1924 // The C++ ABI is not aware of register usage, so we have to check if the 1925 // return value was sret and put it in a register ourselves if appropriate. 1926 if (State.FreeRegs) { 1927 --State.FreeRegs; // The sret parameter consumes a register. 1928 if (!IsMCUABI) 1929 FI.getReturnInfo().setInReg(true); 1930 } 1931 } 1932 1933 // The chain argument effectively gives us another free register. 1934 if (FI.isChainCall()) 1935 ++State.FreeRegs; 1936 1937 // For vectorcall, do a first pass over the arguments, assigning FP and vector 1938 // arguments to XMM registers as available. 1939 if (State.CC == llvm::CallingConv::X86_VectorCall) 1940 runVectorCallFirstPass(FI, State); 1941 1942 bool UsedInAlloca = false; 1943 MutableArrayRef<CGFunctionInfoArgInfo> Args = FI.arguments(); 1944 for (int I = 0, E = Args.size(); I < E; ++I) { 1945 // Skip arguments that have already been assigned. 1946 if (State.IsPreassigned.test(I)) 1947 continue; 1948 1949 Args[I].info = classifyArgumentType(Args[I].type, State); 1950 UsedInAlloca |= (Args[I].info.getKind() == ABIArgInfo::InAlloca); 1951 } 1952 1953 // If we needed to use inalloca for any argument, do a second pass and rewrite 1954 // all the memory arguments to use inalloca. 1955 if (UsedInAlloca) 1956 rewriteWithInAlloca(FI); 1957 } 1958 1959 void 1960 X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields, 1961 CharUnits &StackOffset, ABIArgInfo &Info, 1962 QualType Type) const { 1963 // Arguments are always 4-byte-aligned. 1964 CharUnits WordSize = CharUnits::fromQuantity(4); 1965 assert(StackOffset.isMultipleOf(WordSize) && "unaligned inalloca struct"); 1966 1967 // sret pointers and indirect things will require an extra pointer 1968 // indirection, unless they are byval. Most things are byval, and will not 1969 // require this indirection. 1970 bool IsIndirect = false; 1971 if (Info.isIndirect() && !Info.getIndirectByVal()) 1972 IsIndirect = true; 1973 Info = ABIArgInfo::getInAlloca(FrameFields.size(), IsIndirect); 1974 llvm::Type *LLTy = CGT.ConvertTypeForMem(Type); 1975 if (IsIndirect) 1976 LLTy = LLTy->getPointerTo(0); 1977 FrameFields.push_back(LLTy); 1978 StackOffset += IsIndirect ? WordSize : getContext().getTypeSizeInChars(Type); 1979 1980 // Insert padding bytes to respect alignment. 1981 CharUnits FieldEnd = StackOffset; 1982 StackOffset = FieldEnd.alignTo(WordSize); 1983 if (StackOffset != FieldEnd) { 1984 CharUnits NumBytes = StackOffset - FieldEnd; 1985 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext()); 1986 Ty = llvm::ArrayType::get(Ty, NumBytes.getQuantity()); 1987 FrameFields.push_back(Ty); 1988 } 1989 } 1990 1991 static bool isArgInAlloca(const ABIArgInfo &Info) { 1992 // Leave ignored and inreg arguments alone. 1993 switch (Info.getKind()) { 1994 case ABIArgInfo::InAlloca: 1995 return true; 1996 case ABIArgInfo::Ignore: 1997 case ABIArgInfo::IndirectAliased: 1998 return false; 1999 case ABIArgInfo::Indirect: 2000 case ABIArgInfo::Direct: 2001 case ABIArgInfo::Extend: 2002 return !Info.getInReg(); 2003 case ABIArgInfo::Expand: 2004 case ABIArgInfo::CoerceAndExpand: 2005 // These are aggregate types which are never passed in registers when 2006 // inalloca is involved. 2007 return true; 2008 } 2009 llvm_unreachable("invalid enum"); 2010 } 2011 2012 void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const { 2013 assert(IsWin32StructABI && "inalloca only supported on win32"); 2014 2015 // Build a packed struct type for all of the arguments in memory. 2016 SmallVector<llvm::Type *, 6> FrameFields; 2017 2018 // The stack alignment is always 4. 2019 CharUnits StackAlign = CharUnits::fromQuantity(4); 2020 2021 CharUnits StackOffset; 2022 CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end(); 2023 2024 // Put 'this' into the struct before 'sret', if necessary. 2025 bool IsThisCall = 2026 FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall; 2027 ABIArgInfo &Ret = FI.getReturnInfo(); 2028 if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall && 2029 isArgInAlloca(I->info)) { 2030 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type); 2031 ++I; 2032 } 2033 2034 // Put the sret parameter into the inalloca struct if it's in memory. 2035 if (Ret.isIndirect() && !Ret.getInReg()) { 2036 addFieldToArgStruct(FrameFields, StackOffset, Ret, FI.getReturnType()); 2037 // On Windows, the hidden sret parameter is always returned in eax. 2038 Ret.setInAllocaSRet(IsWin32StructABI); 2039 } 2040 2041 // Skip the 'this' parameter in ecx. 2042 if (IsThisCall) 2043 ++I; 2044 2045 // Put arguments passed in memory into the struct. 2046 for (; I != E; ++I) { 2047 if (isArgInAlloca(I->info)) 2048 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type); 2049 } 2050 2051 FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields, 2052 /*isPacked=*/true), 2053 StackAlign); 2054 } 2055 2056 Address X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF, 2057 Address VAListAddr, QualType Ty) const { 2058 2059 auto TypeInfo = getContext().getTypeInfoInChars(Ty); 2060 2061 // x86-32 changes the alignment of certain arguments on the stack. 2062 // 2063 // Just messing with TypeInfo like this works because we never pass 2064 // anything indirectly. 2065 TypeInfo.second = CharUnits::fromQuantity( 2066 getTypeStackAlignInBytes(Ty, TypeInfo.second.getQuantity())); 2067 2068 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, 2069 TypeInfo, CharUnits::fromQuantity(4), 2070 /*AllowHigherAlign*/ true); 2071 } 2072 2073 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI( 2074 const llvm::Triple &Triple, const CodeGenOptions &Opts) { 2075 assert(Triple.getArch() == llvm::Triple::x86); 2076 2077 switch (Opts.getStructReturnConvention()) { 2078 case CodeGenOptions::SRCK_Default: 2079 break; 2080 case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return 2081 return false; 2082 case CodeGenOptions::SRCK_InRegs: // -freg-struct-return 2083 return true; 2084 } 2085 2086 if (Triple.isOSDarwin() || Triple.isOSIAMCU()) 2087 return true; 2088 2089 switch (Triple.getOS()) { 2090 case llvm::Triple::DragonFly: 2091 case llvm::Triple::FreeBSD: 2092 case llvm::Triple::OpenBSD: 2093 case llvm::Triple::Win32: 2094 return true; 2095 default: 2096 return false; 2097 } 2098 } 2099 2100 void X86_32TargetCodeGenInfo::setTargetAttributes( 2101 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { 2102 if (GV->isDeclaration()) 2103 return; 2104 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { 2105 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { 2106 llvm::Function *Fn = cast<llvm::Function>(GV); 2107 Fn->addFnAttr("stackrealign"); 2108 } 2109 if (FD->hasAttr<AnyX86InterruptAttr>()) { 2110 llvm::Function *Fn = cast<llvm::Function>(GV); 2111 Fn->setCallingConv(llvm::CallingConv::X86_INTR); 2112 } 2113 } 2114 } 2115 2116 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable( 2117 CodeGen::CodeGenFunction &CGF, 2118 llvm::Value *Address) const { 2119 CodeGen::CGBuilderTy &Builder = CGF.Builder; 2120 2121 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 2122 2123 // 0-7 are the eight integer registers; the order is different 2124 // on Darwin (for EH), but the range is the same. 2125 // 8 is %eip. 2126 AssignToArrayRange(Builder, Address, Four8, 0, 8); 2127 2128 if (CGF.CGM.getTarget().getTriple().isOSDarwin()) { 2129 // 12-16 are st(0..4). Not sure why we stop at 4. 2130 // These have size 16, which is sizeof(long double) on 2131 // platforms with 8-byte alignment for that type. 2132 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16); 2133 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16); 2134 2135 } else { 2136 // 9 is %eflags, which doesn't get a size on Darwin for some 2137 // reason. 2138 Builder.CreateAlignedStore( 2139 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9), 2140 CharUnits::One()); 2141 2142 // 11-16 are st(0..5). Not sure why we stop at 5. 2143 // These have size 12, which is sizeof(long double) on 2144 // platforms with 4-byte alignment for that type. 2145 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12); 2146 AssignToArrayRange(Builder, Address, Twelve8, 11, 16); 2147 } 2148 2149 return false; 2150 } 2151 2152 //===----------------------------------------------------------------------===// 2153 // X86-64 ABI Implementation 2154 //===----------------------------------------------------------------------===// 2155 2156 2157 namespace { 2158 /// The AVX ABI level for X86 targets. 2159 enum class X86AVXABILevel { 2160 None, 2161 AVX, 2162 AVX512 2163 }; 2164 2165 /// \p returns the size in bits of the largest (native) vector for \p AVXLevel. 2166 static unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) { 2167 switch (AVXLevel) { 2168 case X86AVXABILevel::AVX512: 2169 return 512; 2170 case X86AVXABILevel::AVX: 2171 return 256; 2172 case X86AVXABILevel::None: 2173 return 128; 2174 } 2175 llvm_unreachable("Unknown AVXLevel"); 2176 } 2177 2178 /// X86_64ABIInfo - The X86_64 ABI information. 2179 class X86_64ABIInfo : public SwiftABIInfo { 2180 enum Class { 2181 Integer = 0, 2182 SSE, 2183 SSEUp, 2184 X87, 2185 X87Up, 2186 ComplexX87, 2187 NoClass, 2188 Memory 2189 }; 2190 2191 /// merge - Implement the X86_64 ABI merging algorithm. 2192 /// 2193 /// Merge an accumulating classification \arg Accum with a field 2194 /// classification \arg Field. 2195 /// 2196 /// \param Accum - The accumulating classification. This should 2197 /// always be either NoClass or the result of a previous merge 2198 /// call. In addition, this should never be Memory (the caller 2199 /// should just return Memory for the aggregate). 2200 static Class merge(Class Accum, Class Field); 2201 2202 /// postMerge - Implement the X86_64 ABI post merging algorithm. 2203 /// 2204 /// Post merger cleanup, reduces a malformed Hi and Lo pair to 2205 /// final MEMORY or SSE classes when necessary. 2206 /// 2207 /// \param AggregateSize - The size of the current aggregate in 2208 /// the classification process. 2209 /// 2210 /// \param Lo - The classification for the parts of the type 2211 /// residing in the low word of the containing object. 2212 /// 2213 /// \param Hi - The classification for the parts of the type 2214 /// residing in the higher words of the containing object. 2215 /// 2216 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const; 2217 2218 /// classify - Determine the x86_64 register classes in which the 2219 /// given type T should be passed. 2220 /// 2221 /// \param Lo - The classification for the parts of the type 2222 /// residing in the low word of the containing object. 2223 /// 2224 /// \param Hi - The classification for the parts of the type 2225 /// residing in the high word of the containing object. 2226 /// 2227 /// \param OffsetBase - The bit offset of this type in the 2228 /// containing object. Some parameters are classified different 2229 /// depending on whether they straddle an eightbyte boundary. 2230 /// 2231 /// \param isNamedArg - Whether the argument in question is a "named" 2232 /// argument, as used in AMD64-ABI 3.5.7. 2233 /// 2234 /// If a word is unused its result will be NoClass; if a type should 2235 /// be passed in Memory then at least the classification of \arg Lo 2236 /// will be Memory. 2237 /// 2238 /// The \arg Lo class will be NoClass iff the argument is ignored. 2239 /// 2240 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will 2241 /// also be ComplexX87. 2242 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi, 2243 bool isNamedArg) const; 2244 2245 llvm::Type *GetByteVectorType(QualType Ty) const; 2246 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType, 2247 unsigned IROffset, QualType SourceTy, 2248 unsigned SourceOffset) const; 2249 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType, 2250 unsigned IROffset, QualType SourceTy, 2251 unsigned SourceOffset) const; 2252 2253 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 2254 /// such that the argument will be returned in memory. 2255 ABIArgInfo getIndirectReturnResult(QualType Ty) const; 2256 2257 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 2258 /// such that the argument will be passed in memory. 2259 /// 2260 /// \param freeIntRegs - The number of free integer registers remaining 2261 /// available. 2262 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const; 2263 2264 ABIArgInfo classifyReturnType(QualType RetTy) const; 2265 2266 ABIArgInfo classifyArgumentType(QualType Ty, unsigned freeIntRegs, 2267 unsigned &neededInt, unsigned &neededSSE, 2268 bool isNamedArg) const; 2269 2270 ABIArgInfo classifyRegCallStructType(QualType Ty, unsigned &NeededInt, 2271 unsigned &NeededSSE) const; 2272 2273 ABIArgInfo classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt, 2274 unsigned &NeededSSE) const; 2275 2276 bool IsIllegalVectorType(QualType Ty) const; 2277 2278 /// The 0.98 ABI revision clarified a lot of ambiguities, 2279 /// unfortunately in ways that were not always consistent with 2280 /// certain previous compilers. In particular, platforms which 2281 /// required strict binary compatibility with older versions of GCC 2282 /// may need to exempt themselves. 2283 bool honorsRevision0_98() const { 2284 return !getTarget().getTriple().isOSDarwin(); 2285 } 2286 2287 /// GCC classifies <1 x long long> as SSE but some platform ABIs choose to 2288 /// classify it as INTEGER (for compatibility with older clang compilers). 2289 bool classifyIntegerMMXAsSSE() const { 2290 // Clang <= 3.8 did not do this. 2291 if (getContext().getLangOpts().getClangABICompat() <= 2292 LangOptions::ClangABI::Ver3_8) 2293 return false; 2294 2295 const llvm::Triple &Triple = getTarget().getTriple(); 2296 if (Triple.isOSDarwin() || Triple.getOS() == llvm::Triple::PS4) 2297 return false; 2298 if (Triple.isOSFreeBSD() && Triple.getOSMajorVersion() >= 10) 2299 return false; 2300 return true; 2301 } 2302 2303 // GCC classifies vectors of __int128 as memory. 2304 bool passInt128VectorsInMem() const { 2305 // Clang <= 9.0 did not do this. 2306 if (getContext().getLangOpts().getClangABICompat() <= 2307 LangOptions::ClangABI::Ver9) 2308 return false; 2309 2310 const llvm::Triple &T = getTarget().getTriple(); 2311 return T.isOSLinux() || T.isOSNetBSD(); 2312 } 2313 2314 X86AVXABILevel AVXLevel; 2315 // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on 2316 // 64-bit hardware. 2317 bool Has64BitPointers; 2318 2319 public: 2320 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) : 2321 SwiftABIInfo(CGT), AVXLevel(AVXLevel), 2322 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) { 2323 } 2324 2325 bool isPassedUsingAVXType(QualType type) const { 2326 unsigned neededInt, neededSSE; 2327 // The freeIntRegs argument doesn't matter here. 2328 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE, 2329 /*isNamedArg*/true); 2330 if (info.isDirect()) { 2331 llvm::Type *ty = info.getCoerceToType(); 2332 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty)) 2333 return vectorTy->getPrimitiveSizeInBits().getFixedSize() > 128; 2334 } 2335 return false; 2336 } 2337 2338 void computeInfo(CGFunctionInfo &FI) const override; 2339 2340 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 2341 QualType Ty) const override; 2342 Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, 2343 QualType Ty) const override; 2344 2345 bool has64BitPointers() const { 2346 return Has64BitPointers; 2347 } 2348 2349 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, 2350 bool asReturnValue) const override { 2351 return occupiesMoreThan(CGT, scalars, /*total*/ 4); 2352 } 2353 bool isSwiftErrorInRegister() const override { 2354 return true; 2355 } 2356 }; 2357 2358 /// WinX86_64ABIInfo - The Windows X86_64 ABI information. 2359 class WinX86_64ABIInfo : public SwiftABIInfo { 2360 public: 2361 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) 2362 : SwiftABIInfo(CGT), AVXLevel(AVXLevel), 2363 IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {} 2364 2365 void computeInfo(CGFunctionInfo &FI) const override; 2366 2367 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 2368 QualType Ty) const override; 2369 2370 bool isHomogeneousAggregateBaseType(QualType Ty) const override { 2371 // FIXME: Assumes vectorcall is in use. 2372 return isX86VectorTypeForVectorCall(getContext(), Ty); 2373 } 2374 2375 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 2376 uint64_t NumMembers) const override { 2377 // FIXME: Assumes vectorcall is in use. 2378 return isX86VectorCallAggregateSmallEnough(NumMembers); 2379 } 2380 2381 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type *> scalars, 2382 bool asReturnValue) const override { 2383 return occupiesMoreThan(CGT, scalars, /*total*/ 4); 2384 } 2385 2386 bool isSwiftErrorInRegister() const override { 2387 return true; 2388 } 2389 2390 private: 2391 ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs, bool IsReturnType, 2392 bool IsVectorCall, bool IsRegCall) const; 2393 ABIArgInfo reclassifyHvaArgType(QualType Ty, unsigned &FreeSSERegs, 2394 const ABIArgInfo ¤t) const; 2395 void computeVectorCallArgs(CGFunctionInfo &FI, unsigned FreeSSERegs, 2396 bool IsVectorCall, bool IsRegCall) const; 2397 2398 X86AVXABILevel AVXLevel; 2399 2400 bool IsMingw64; 2401 }; 2402 2403 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo { 2404 public: 2405 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) 2406 : TargetCodeGenInfo(std::make_unique<X86_64ABIInfo>(CGT, AVXLevel)) {} 2407 2408 const X86_64ABIInfo &getABIInfo() const { 2409 return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo()); 2410 } 2411 2412 /// Disable tail call on x86-64. The epilogue code before the tail jump blocks 2413 /// autoreleaseRV/retainRV and autoreleaseRV/unsafeClaimRV optimizations. 2414 bool markARCOptimizedReturnCallsAsNoTail() const override { return true; } 2415 2416 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { 2417 return 7; 2418 } 2419 2420 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2421 llvm::Value *Address) const override { 2422 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 2423 2424 // 0-15 are the 16 integer registers. 2425 // 16 is %rip. 2426 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 2427 return false; 2428 } 2429 2430 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 2431 StringRef Constraint, 2432 llvm::Type* Ty) const override { 2433 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 2434 } 2435 2436 bool isNoProtoCallVariadic(const CallArgList &args, 2437 const FunctionNoProtoType *fnType) const override { 2438 // The default CC on x86-64 sets %al to the number of SSA 2439 // registers used, and GCC sets this when calling an unprototyped 2440 // function, so we override the default behavior. However, don't do 2441 // that when AVX types are involved: the ABI explicitly states it is 2442 // undefined, and it doesn't work in practice because of how the ABI 2443 // defines varargs anyway. 2444 if (fnType->getCallConv() == CC_C) { 2445 bool HasAVXType = false; 2446 for (CallArgList::const_iterator 2447 it = args.begin(), ie = args.end(); it != ie; ++it) { 2448 if (getABIInfo().isPassedUsingAVXType(it->Ty)) { 2449 HasAVXType = true; 2450 break; 2451 } 2452 } 2453 2454 if (!HasAVXType) 2455 return true; 2456 } 2457 2458 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType); 2459 } 2460 2461 llvm::Constant * 2462 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override { 2463 unsigned Sig = (0xeb << 0) | // jmp rel8 2464 (0x06 << 8) | // .+0x08 2465 ('v' << 16) | 2466 ('2' << 24); 2467 return llvm::ConstantInt::get(CGM.Int32Ty, Sig); 2468 } 2469 2470 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 2471 CodeGen::CodeGenModule &CGM) const override { 2472 if (GV->isDeclaration()) 2473 return; 2474 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { 2475 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { 2476 llvm::Function *Fn = cast<llvm::Function>(GV); 2477 Fn->addFnAttr("stackrealign"); 2478 } 2479 if (FD->hasAttr<AnyX86InterruptAttr>()) { 2480 llvm::Function *Fn = cast<llvm::Function>(GV); 2481 Fn->setCallingConv(llvm::CallingConv::X86_INTR); 2482 } 2483 } 2484 } 2485 2486 void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc, 2487 const FunctionDecl *Caller, 2488 const FunctionDecl *Callee, 2489 const CallArgList &Args) const override; 2490 }; 2491 2492 static void initFeatureMaps(const ASTContext &Ctx, 2493 llvm::StringMap<bool> &CallerMap, 2494 const FunctionDecl *Caller, 2495 llvm::StringMap<bool> &CalleeMap, 2496 const FunctionDecl *Callee) { 2497 if (CalleeMap.empty() && CallerMap.empty()) { 2498 // The caller is potentially nullptr in the case where the call isn't in a 2499 // function. In this case, the getFunctionFeatureMap ensures we just get 2500 // the TU level setting (since it cannot be modified by 'target'.. 2501 Ctx.getFunctionFeatureMap(CallerMap, Caller); 2502 Ctx.getFunctionFeatureMap(CalleeMap, Callee); 2503 } 2504 } 2505 2506 static bool checkAVXParamFeature(DiagnosticsEngine &Diag, 2507 SourceLocation CallLoc, 2508 const llvm::StringMap<bool> &CallerMap, 2509 const llvm::StringMap<bool> &CalleeMap, 2510 QualType Ty, StringRef Feature, 2511 bool IsArgument) { 2512 bool CallerHasFeat = CallerMap.lookup(Feature); 2513 bool CalleeHasFeat = CalleeMap.lookup(Feature); 2514 if (!CallerHasFeat && !CalleeHasFeat) 2515 return Diag.Report(CallLoc, diag::warn_avx_calling_convention) 2516 << IsArgument << Ty << Feature; 2517 2518 // Mixing calling conventions here is very clearly an error. 2519 if (!CallerHasFeat || !CalleeHasFeat) 2520 return Diag.Report(CallLoc, diag::err_avx_calling_convention) 2521 << IsArgument << Ty << Feature; 2522 2523 // Else, both caller and callee have the required feature, so there is no need 2524 // to diagnose. 2525 return false; 2526 } 2527 2528 static bool checkAVXParam(DiagnosticsEngine &Diag, ASTContext &Ctx, 2529 SourceLocation CallLoc, 2530 const llvm::StringMap<bool> &CallerMap, 2531 const llvm::StringMap<bool> &CalleeMap, QualType Ty, 2532 bool IsArgument) { 2533 uint64_t Size = Ctx.getTypeSize(Ty); 2534 if (Size > 256) 2535 return checkAVXParamFeature(Diag, CallLoc, CallerMap, CalleeMap, Ty, 2536 "avx512f", IsArgument); 2537 2538 if (Size > 128) 2539 return checkAVXParamFeature(Diag, CallLoc, CallerMap, CalleeMap, Ty, "avx", 2540 IsArgument); 2541 2542 return false; 2543 } 2544 2545 void X86_64TargetCodeGenInfo::checkFunctionCallABI( 2546 CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller, 2547 const FunctionDecl *Callee, const CallArgList &Args) const { 2548 llvm::StringMap<bool> CallerMap; 2549 llvm::StringMap<bool> CalleeMap; 2550 unsigned ArgIndex = 0; 2551 2552 // We need to loop through the actual call arguments rather than the the 2553 // function's parameters, in case this variadic. 2554 for (const CallArg &Arg : Args) { 2555 // The "avx" feature changes how vectors >128 in size are passed. "avx512f" 2556 // additionally changes how vectors >256 in size are passed. Like GCC, we 2557 // warn when a function is called with an argument where this will change. 2558 // Unlike GCC, we also error when it is an obvious ABI mismatch, that is, 2559 // the caller and callee features are mismatched. 2560 // Unfortunately, we cannot do this diagnostic in SEMA, since the callee can 2561 // change its ABI with attribute-target after this call. 2562 if (Arg.getType()->isVectorType() && 2563 CGM.getContext().getTypeSize(Arg.getType()) > 128) { 2564 initFeatureMaps(CGM.getContext(), CallerMap, Caller, CalleeMap, Callee); 2565 QualType Ty = Arg.getType(); 2566 // The CallArg seems to have desugared the type already, so for clearer 2567 // diagnostics, replace it with the type in the FunctionDecl if possible. 2568 if (ArgIndex < Callee->getNumParams()) 2569 Ty = Callee->getParamDecl(ArgIndex)->getType(); 2570 2571 if (checkAVXParam(CGM.getDiags(), CGM.getContext(), CallLoc, CallerMap, 2572 CalleeMap, Ty, /*IsArgument*/ true)) 2573 return; 2574 } 2575 ++ArgIndex; 2576 } 2577 2578 // Check return always, as we don't have a good way of knowing in codegen 2579 // whether this value is used, tail-called, etc. 2580 if (Callee->getReturnType()->isVectorType() && 2581 CGM.getContext().getTypeSize(Callee->getReturnType()) > 128) { 2582 initFeatureMaps(CGM.getContext(), CallerMap, Caller, CalleeMap, Callee); 2583 checkAVXParam(CGM.getDiags(), CGM.getContext(), CallLoc, CallerMap, 2584 CalleeMap, Callee->getReturnType(), 2585 /*IsArgument*/ false); 2586 } 2587 } 2588 2589 static std::string qualifyWindowsLibrary(llvm::StringRef Lib) { 2590 // If the argument does not end in .lib, automatically add the suffix. 2591 // If the argument contains a space, enclose it in quotes. 2592 // This matches the behavior of MSVC. 2593 bool Quote = (Lib.find(" ") != StringRef::npos); 2594 std::string ArgStr = Quote ? "\"" : ""; 2595 ArgStr += Lib; 2596 if (!Lib.endswith_lower(".lib") && !Lib.endswith_lower(".a")) 2597 ArgStr += ".lib"; 2598 ArgStr += Quote ? "\"" : ""; 2599 return ArgStr; 2600 } 2601 2602 class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo { 2603 public: 2604 WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, 2605 bool DarwinVectorABI, bool RetSmallStructInRegABI, bool Win32StructABI, 2606 unsigned NumRegisterParameters) 2607 : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI, 2608 Win32StructABI, NumRegisterParameters, false) {} 2609 2610 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 2611 CodeGen::CodeGenModule &CGM) const override; 2612 2613 void getDependentLibraryOption(llvm::StringRef Lib, 2614 llvm::SmallString<24> &Opt) const override { 2615 Opt = "/DEFAULTLIB:"; 2616 Opt += qualifyWindowsLibrary(Lib); 2617 } 2618 2619 void getDetectMismatchOption(llvm::StringRef Name, 2620 llvm::StringRef Value, 2621 llvm::SmallString<32> &Opt) const override { 2622 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; 2623 } 2624 }; 2625 2626 static void addStackProbeTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 2627 CodeGen::CodeGenModule &CGM) { 2628 if (llvm::Function *Fn = dyn_cast_or_null<llvm::Function>(GV)) { 2629 2630 if (CGM.getCodeGenOpts().StackProbeSize != 4096) 2631 Fn->addFnAttr("stack-probe-size", 2632 llvm::utostr(CGM.getCodeGenOpts().StackProbeSize)); 2633 if (CGM.getCodeGenOpts().NoStackArgProbe) 2634 Fn->addFnAttr("no-stack-arg-probe"); 2635 } 2636 } 2637 2638 void WinX86_32TargetCodeGenInfo::setTargetAttributes( 2639 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { 2640 X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM); 2641 if (GV->isDeclaration()) 2642 return; 2643 addStackProbeTargetAttributes(D, GV, CGM); 2644 } 2645 2646 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo { 2647 public: 2648 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, 2649 X86AVXABILevel AVXLevel) 2650 : TargetCodeGenInfo(std::make_unique<WinX86_64ABIInfo>(CGT, AVXLevel)) {} 2651 2652 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 2653 CodeGen::CodeGenModule &CGM) const override; 2654 2655 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { 2656 return 7; 2657 } 2658 2659 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2660 llvm::Value *Address) const override { 2661 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 2662 2663 // 0-15 are the 16 integer registers. 2664 // 16 is %rip. 2665 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 2666 return false; 2667 } 2668 2669 void getDependentLibraryOption(llvm::StringRef Lib, 2670 llvm::SmallString<24> &Opt) const override { 2671 Opt = "/DEFAULTLIB:"; 2672 Opt += qualifyWindowsLibrary(Lib); 2673 } 2674 2675 void getDetectMismatchOption(llvm::StringRef Name, 2676 llvm::StringRef Value, 2677 llvm::SmallString<32> &Opt) const override { 2678 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; 2679 } 2680 }; 2681 2682 void WinX86_64TargetCodeGenInfo::setTargetAttributes( 2683 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { 2684 TargetCodeGenInfo::setTargetAttributes(D, GV, CGM); 2685 if (GV->isDeclaration()) 2686 return; 2687 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { 2688 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { 2689 llvm::Function *Fn = cast<llvm::Function>(GV); 2690 Fn->addFnAttr("stackrealign"); 2691 } 2692 if (FD->hasAttr<AnyX86InterruptAttr>()) { 2693 llvm::Function *Fn = cast<llvm::Function>(GV); 2694 Fn->setCallingConv(llvm::CallingConv::X86_INTR); 2695 } 2696 } 2697 2698 addStackProbeTargetAttributes(D, GV, CGM); 2699 } 2700 } 2701 2702 void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo, 2703 Class &Hi) const { 2704 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done: 2705 // 2706 // (a) If one of the classes is Memory, the whole argument is passed in 2707 // memory. 2708 // 2709 // (b) If X87UP is not preceded by X87, the whole argument is passed in 2710 // memory. 2711 // 2712 // (c) If the size of the aggregate exceeds two eightbytes and the first 2713 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole 2714 // argument is passed in memory. NOTE: This is necessary to keep the 2715 // ABI working for processors that don't support the __m256 type. 2716 // 2717 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE. 2718 // 2719 // Some of these are enforced by the merging logic. Others can arise 2720 // only with unions; for example: 2721 // union { _Complex double; unsigned; } 2722 // 2723 // Note that clauses (b) and (c) were added in 0.98. 2724 // 2725 if (Hi == Memory) 2726 Lo = Memory; 2727 if (Hi == X87Up && Lo != X87 && honorsRevision0_98()) 2728 Lo = Memory; 2729 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp)) 2730 Lo = Memory; 2731 if (Hi == SSEUp && Lo != SSE) 2732 Hi = SSE; 2733 } 2734 2735 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) { 2736 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is 2737 // classified recursively so that always two fields are 2738 // considered. The resulting class is calculated according to 2739 // the classes of the fields in the eightbyte: 2740 // 2741 // (a) If both classes are equal, this is the resulting class. 2742 // 2743 // (b) If one of the classes is NO_CLASS, the resulting class is 2744 // the other class. 2745 // 2746 // (c) If one of the classes is MEMORY, the result is the MEMORY 2747 // class. 2748 // 2749 // (d) If one of the classes is INTEGER, the result is the 2750 // INTEGER. 2751 // 2752 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class, 2753 // MEMORY is used as class. 2754 // 2755 // (f) Otherwise class SSE is used. 2756 2757 // Accum should never be memory (we should have returned) or 2758 // ComplexX87 (because this cannot be passed in a structure). 2759 assert((Accum != Memory && Accum != ComplexX87) && 2760 "Invalid accumulated classification during merge."); 2761 if (Accum == Field || Field == NoClass) 2762 return Accum; 2763 if (Field == Memory) 2764 return Memory; 2765 if (Accum == NoClass) 2766 return Field; 2767 if (Accum == Integer || Field == Integer) 2768 return Integer; 2769 if (Field == X87 || Field == X87Up || Field == ComplexX87 || 2770 Accum == X87 || Accum == X87Up) 2771 return Memory; 2772 return SSE; 2773 } 2774 2775 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, 2776 Class &Lo, Class &Hi, bool isNamedArg) const { 2777 // FIXME: This code can be simplified by introducing a simple value class for 2778 // Class pairs with appropriate constructor methods for the various 2779 // situations. 2780 2781 // FIXME: Some of the split computations are wrong; unaligned vectors 2782 // shouldn't be passed in registers for example, so there is no chance they 2783 // can straddle an eightbyte. Verify & simplify. 2784 2785 Lo = Hi = NoClass; 2786 2787 Class &Current = OffsetBase < 64 ? Lo : Hi; 2788 Current = Memory; 2789 2790 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 2791 BuiltinType::Kind k = BT->getKind(); 2792 2793 if (k == BuiltinType::Void) { 2794 Current = NoClass; 2795 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) { 2796 Lo = Integer; 2797 Hi = Integer; 2798 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) { 2799 Current = Integer; 2800 } else if (k == BuiltinType::Float || k == BuiltinType::Double) { 2801 Current = SSE; 2802 } else if (k == BuiltinType::LongDouble) { 2803 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat(); 2804 if (LDF == &llvm::APFloat::IEEEquad()) { 2805 Lo = SSE; 2806 Hi = SSEUp; 2807 } else if (LDF == &llvm::APFloat::x87DoubleExtended()) { 2808 Lo = X87; 2809 Hi = X87Up; 2810 } else if (LDF == &llvm::APFloat::IEEEdouble()) { 2811 Current = SSE; 2812 } else 2813 llvm_unreachable("unexpected long double representation!"); 2814 } 2815 // FIXME: _Decimal32 and _Decimal64 are SSE. 2816 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). 2817 return; 2818 } 2819 2820 if (const EnumType *ET = Ty->getAs<EnumType>()) { 2821 // Classify the underlying integer type. 2822 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg); 2823 return; 2824 } 2825 2826 if (Ty->hasPointerRepresentation()) { 2827 Current = Integer; 2828 return; 2829 } 2830 2831 if (Ty->isMemberPointerType()) { 2832 if (Ty->isMemberFunctionPointerType()) { 2833 if (Has64BitPointers) { 2834 // If Has64BitPointers, this is an {i64, i64}, so classify both 2835 // Lo and Hi now. 2836 Lo = Hi = Integer; 2837 } else { 2838 // Otherwise, with 32-bit pointers, this is an {i32, i32}. If that 2839 // straddles an eightbyte boundary, Hi should be classified as well. 2840 uint64_t EB_FuncPtr = (OffsetBase) / 64; 2841 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64; 2842 if (EB_FuncPtr != EB_ThisAdj) { 2843 Lo = Hi = Integer; 2844 } else { 2845 Current = Integer; 2846 } 2847 } 2848 } else { 2849 Current = Integer; 2850 } 2851 return; 2852 } 2853 2854 if (const VectorType *VT = Ty->getAs<VectorType>()) { 2855 uint64_t Size = getContext().getTypeSize(VT); 2856 if (Size == 1 || Size == 8 || Size == 16 || Size == 32) { 2857 // gcc passes the following as integer: 2858 // 4 bytes - <4 x char>, <2 x short>, <1 x int>, <1 x float> 2859 // 2 bytes - <2 x char>, <1 x short> 2860 // 1 byte - <1 x char> 2861 Current = Integer; 2862 2863 // If this type crosses an eightbyte boundary, it should be 2864 // split. 2865 uint64_t EB_Lo = (OffsetBase) / 64; 2866 uint64_t EB_Hi = (OffsetBase + Size - 1) / 64; 2867 if (EB_Lo != EB_Hi) 2868 Hi = Lo; 2869 } else if (Size == 64) { 2870 QualType ElementType = VT->getElementType(); 2871 2872 // gcc passes <1 x double> in memory. :( 2873 if (ElementType->isSpecificBuiltinType(BuiltinType::Double)) 2874 return; 2875 2876 // gcc passes <1 x long long> as SSE but clang used to unconditionally 2877 // pass them as integer. For platforms where clang is the de facto 2878 // platform compiler, we must continue to use integer. 2879 if (!classifyIntegerMMXAsSSE() && 2880 (ElementType->isSpecificBuiltinType(BuiltinType::LongLong) || 2881 ElementType->isSpecificBuiltinType(BuiltinType::ULongLong) || 2882 ElementType->isSpecificBuiltinType(BuiltinType::Long) || 2883 ElementType->isSpecificBuiltinType(BuiltinType::ULong))) 2884 Current = Integer; 2885 else 2886 Current = SSE; 2887 2888 // If this type crosses an eightbyte boundary, it should be 2889 // split. 2890 if (OffsetBase && OffsetBase != 64) 2891 Hi = Lo; 2892 } else if (Size == 128 || 2893 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) { 2894 QualType ElementType = VT->getElementType(); 2895 2896 // gcc passes 256 and 512 bit <X x __int128> vectors in memory. :( 2897 if (passInt128VectorsInMem() && Size != 128 && 2898 (ElementType->isSpecificBuiltinType(BuiltinType::Int128) || 2899 ElementType->isSpecificBuiltinType(BuiltinType::UInt128))) 2900 return; 2901 2902 // Arguments of 256-bits are split into four eightbyte chunks. The 2903 // least significant one belongs to class SSE and all the others to class 2904 // SSEUP. The original Lo and Hi design considers that types can't be 2905 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense. 2906 // This design isn't correct for 256-bits, but since there're no cases 2907 // where the upper parts would need to be inspected, avoid adding 2908 // complexity and just consider Hi to match the 64-256 part. 2909 // 2910 // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in 2911 // registers if they are "named", i.e. not part of the "..." of a 2912 // variadic function. 2913 // 2914 // Similarly, per 3.2.3. of the AVX512 draft, 512-bits ("named") args are 2915 // split into eight eightbyte chunks, one SSE and seven SSEUP. 2916 Lo = SSE; 2917 Hi = SSEUp; 2918 } 2919 return; 2920 } 2921 2922 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 2923 QualType ET = getContext().getCanonicalType(CT->getElementType()); 2924 2925 uint64_t Size = getContext().getTypeSize(Ty); 2926 if (ET->isIntegralOrEnumerationType()) { 2927 if (Size <= 64) 2928 Current = Integer; 2929 else if (Size <= 128) 2930 Lo = Hi = Integer; 2931 } else if (ET == getContext().FloatTy) { 2932 Current = SSE; 2933 } else if (ET == getContext().DoubleTy) { 2934 Lo = Hi = SSE; 2935 } else if (ET == getContext().LongDoubleTy) { 2936 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat(); 2937 if (LDF == &llvm::APFloat::IEEEquad()) 2938 Current = Memory; 2939 else if (LDF == &llvm::APFloat::x87DoubleExtended()) 2940 Current = ComplexX87; 2941 else if (LDF == &llvm::APFloat::IEEEdouble()) 2942 Lo = Hi = SSE; 2943 else 2944 llvm_unreachable("unexpected long double representation!"); 2945 } 2946 2947 // If this complex type crosses an eightbyte boundary then it 2948 // should be split. 2949 uint64_t EB_Real = (OffsetBase) / 64; 2950 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64; 2951 if (Hi == NoClass && EB_Real != EB_Imag) 2952 Hi = Lo; 2953 2954 return; 2955 } 2956 2957 if (const auto *EITy = Ty->getAs<ExtIntType>()) { 2958 if (EITy->getNumBits() <= 64) 2959 Current = Integer; 2960 else if (EITy->getNumBits() <= 128) 2961 Lo = Hi = Integer; 2962 // Larger values need to get passed in memory. 2963 return; 2964 } 2965 2966 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 2967 // Arrays are treated like structures. 2968 2969 uint64_t Size = getContext().getTypeSize(Ty); 2970 2971 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 2972 // than eight eightbytes, ..., it has class MEMORY. 2973 if (Size > 512) 2974 return; 2975 2976 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned 2977 // fields, it has class MEMORY. 2978 // 2979 // Only need to check alignment of array base. 2980 if (OffsetBase % getContext().getTypeAlign(AT->getElementType())) 2981 return; 2982 2983 // Otherwise implement simplified merge. We could be smarter about 2984 // this, but it isn't worth it and would be harder to verify. 2985 Current = NoClass; 2986 uint64_t EltSize = getContext().getTypeSize(AT->getElementType()); 2987 uint64_t ArraySize = AT->getSize().getZExtValue(); 2988 2989 // The only case a 256-bit wide vector could be used is when the array 2990 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 2991 // to work for sizes wider than 128, early check and fallback to memory. 2992 // 2993 if (Size > 128 && 2994 (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel))) 2995 return; 2996 2997 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) { 2998 Class FieldLo, FieldHi; 2999 classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg); 3000 Lo = merge(Lo, FieldLo); 3001 Hi = merge(Hi, FieldHi); 3002 if (Lo == Memory || Hi == Memory) 3003 break; 3004 } 3005 3006 postMerge(Size, Lo, Hi); 3007 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification."); 3008 return; 3009 } 3010 3011 if (const RecordType *RT = Ty->getAs<RecordType>()) { 3012 uint64_t Size = getContext().getTypeSize(Ty); 3013 3014 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 3015 // than eight eightbytes, ..., it has class MEMORY. 3016 if (Size > 512) 3017 return; 3018 3019 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial 3020 // copy constructor or a non-trivial destructor, it is passed by invisible 3021 // reference. 3022 if (getRecordArgABI(RT, getCXXABI())) 3023 return; 3024 3025 const RecordDecl *RD = RT->getDecl(); 3026 3027 // Assume variable sized types are passed in memory. 3028 if (RD->hasFlexibleArrayMember()) 3029 return; 3030 3031 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 3032 3033 // Reset Lo class, this will be recomputed. 3034 Current = NoClass; 3035 3036 // If this is a C++ record, classify the bases first. 3037 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 3038 for (const auto &I : CXXRD->bases()) { 3039 assert(!I.isVirtual() && !I.getType()->isDependentType() && 3040 "Unexpected base class!"); 3041 const auto *Base = 3042 cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl()); 3043 3044 // Classify this field. 3045 // 3046 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a 3047 // single eightbyte, each is classified separately. Each eightbyte gets 3048 // initialized to class NO_CLASS. 3049 Class FieldLo, FieldHi; 3050 uint64_t Offset = 3051 OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base)); 3052 classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg); 3053 Lo = merge(Lo, FieldLo); 3054 Hi = merge(Hi, FieldHi); 3055 if (Lo == Memory || Hi == Memory) { 3056 postMerge(Size, Lo, Hi); 3057 return; 3058 } 3059 } 3060 } 3061 3062 // Classify the fields one at a time, merging the results. 3063 unsigned idx = 0; 3064 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 3065 i != e; ++i, ++idx) { 3066 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 3067 bool BitField = i->isBitField(); 3068 3069 // Ignore padding bit-fields. 3070 if (BitField && i->isUnnamedBitfield()) 3071 continue; 3072 3073 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than 3074 // four eightbytes, or it contains unaligned fields, it has class MEMORY. 3075 // 3076 // The only case a 256-bit wide vector could be used is when the struct 3077 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 3078 // to work for sizes wider than 128, early check and fallback to memory. 3079 // 3080 if (Size > 128 && (Size != getContext().getTypeSize(i->getType()) || 3081 Size > getNativeVectorSizeForAVXABI(AVXLevel))) { 3082 Lo = Memory; 3083 postMerge(Size, Lo, Hi); 3084 return; 3085 } 3086 // Note, skip this test for bit-fields, see below. 3087 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) { 3088 Lo = Memory; 3089 postMerge(Size, Lo, Hi); 3090 return; 3091 } 3092 3093 // Classify this field. 3094 // 3095 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate 3096 // exceeds a single eightbyte, each is classified 3097 // separately. Each eightbyte gets initialized to class 3098 // NO_CLASS. 3099 Class FieldLo, FieldHi; 3100 3101 // Bit-fields require special handling, they do not force the 3102 // structure to be passed in memory even if unaligned, and 3103 // therefore they can straddle an eightbyte. 3104 if (BitField) { 3105 assert(!i->isUnnamedBitfield()); 3106 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 3107 uint64_t Size = i->getBitWidthValue(getContext()); 3108 3109 uint64_t EB_Lo = Offset / 64; 3110 uint64_t EB_Hi = (Offset + Size - 1) / 64; 3111 3112 if (EB_Lo) { 3113 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes."); 3114 FieldLo = NoClass; 3115 FieldHi = Integer; 3116 } else { 3117 FieldLo = Integer; 3118 FieldHi = EB_Hi ? Integer : NoClass; 3119 } 3120 } else 3121 classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg); 3122 Lo = merge(Lo, FieldLo); 3123 Hi = merge(Hi, FieldHi); 3124 if (Lo == Memory || Hi == Memory) 3125 break; 3126 } 3127 3128 postMerge(Size, Lo, Hi); 3129 } 3130 } 3131 3132 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const { 3133 // If this is a scalar LLVM value then assume LLVM will pass it in the right 3134 // place naturally. 3135 if (!isAggregateTypeForABI(Ty)) { 3136 // Treat an enum type as its underlying type. 3137 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3138 Ty = EnumTy->getDecl()->getIntegerType(); 3139 3140 if (Ty->isExtIntType()) 3141 return getNaturalAlignIndirect(Ty); 3142 3143 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) 3144 : ABIArgInfo::getDirect()); 3145 } 3146 3147 return getNaturalAlignIndirect(Ty); 3148 } 3149 3150 bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const { 3151 if (const VectorType *VecTy = Ty->getAs<VectorType>()) { 3152 uint64_t Size = getContext().getTypeSize(VecTy); 3153 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel); 3154 if (Size <= 64 || Size > LargestVector) 3155 return true; 3156 QualType EltTy = VecTy->getElementType(); 3157 if (passInt128VectorsInMem() && 3158 (EltTy->isSpecificBuiltinType(BuiltinType::Int128) || 3159 EltTy->isSpecificBuiltinType(BuiltinType::UInt128))) 3160 return true; 3161 } 3162 3163 return false; 3164 } 3165 3166 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty, 3167 unsigned freeIntRegs) const { 3168 // If this is a scalar LLVM value then assume LLVM will pass it in the right 3169 // place naturally. 3170 // 3171 // This assumption is optimistic, as there could be free registers available 3172 // when we need to pass this argument in memory, and LLVM could try to pass 3173 // the argument in the free register. This does not seem to happen currently, 3174 // but this code would be much safer if we could mark the argument with 3175 // 'onstack'. See PR12193. 3176 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty) && 3177 !Ty->isExtIntType()) { 3178 // Treat an enum type as its underlying type. 3179 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3180 Ty = EnumTy->getDecl()->getIntegerType(); 3181 3182 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) 3183 : ABIArgInfo::getDirect()); 3184 } 3185 3186 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 3187 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 3188 3189 // Compute the byval alignment. We specify the alignment of the byval in all 3190 // cases so that the mid-level optimizer knows the alignment of the byval. 3191 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U); 3192 3193 // Attempt to avoid passing indirect results using byval when possible. This 3194 // is important for good codegen. 3195 // 3196 // We do this by coercing the value into a scalar type which the backend can 3197 // handle naturally (i.e., without using byval). 3198 // 3199 // For simplicity, we currently only do this when we have exhausted all of the 3200 // free integer registers. Doing this when there are free integer registers 3201 // would require more care, as we would have to ensure that the coerced value 3202 // did not claim the unused register. That would require either reording the 3203 // arguments to the function (so that any subsequent inreg values came first), 3204 // or only doing this optimization when there were no following arguments that 3205 // might be inreg. 3206 // 3207 // We currently expect it to be rare (particularly in well written code) for 3208 // arguments to be passed on the stack when there are still free integer 3209 // registers available (this would typically imply large structs being passed 3210 // by value), so this seems like a fair tradeoff for now. 3211 // 3212 // We can revisit this if the backend grows support for 'onstack' parameter 3213 // attributes. See PR12193. 3214 if (freeIntRegs == 0) { 3215 uint64_t Size = getContext().getTypeSize(Ty); 3216 3217 // If this type fits in an eightbyte, coerce it into the matching integral 3218 // type, which will end up on the stack (with alignment 8). 3219 if (Align == 8 && Size <= 64) 3220 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 3221 Size)); 3222 } 3223 3224 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(Align)); 3225 } 3226 3227 /// The ABI specifies that a value should be passed in a full vector XMM/YMM 3228 /// register. Pick an LLVM IR type that will be passed as a vector register. 3229 llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const { 3230 // Wrapper structs/arrays that only contain vectors are passed just like 3231 // vectors; strip them off if present. 3232 if (const Type *InnerTy = isSingleElementStruct(Ty, getContext())) 3233 Ty = QualType(InnerTy, 0); 3234 3235 llvm::Type *IRType = CGT.ConvertType(Ty); 3236 if (isa<llvm::VectorType>(IRType)) { 3237 // Don't pass vXi128 vectors in their native type, the backend can't 3238 // legalize them. 3239 if (passInt128VectorsInMem() && 3240 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy(128)) { 3241 // Use a vXi64 vector. 3242 uint64_t Size = getContext().getTypeSize(Ty); 3243 return llvm::FixedVectorType::get(llvm::Type::getInt64Ty(getVMContext()), 3244 Size / 64); 3245 } 3246 3247 return IRType; 3248 } 3249 3250 if (IRType->getTypeID() == llvm::Type::FP128TyID) 3251 return IRType; 3252 3253 // We couldn't find the preferred IR vector type for 'Ty'. 3254 uint64_t Size = getContext().getTypeSize(Ty); 3255 assert((Size == 128 || Size == 256 || Size == 512) && "Invalid type found!"); 3256 3257 3258 // Return a LLVM IR vector type based on the size of 'Ty'. 3259 return llvm::FixedVectorType::get(llvm::Type::getDoubleTy(getVMContext()), 3260 Size / 64); 3261 } 3262 3263 /// BitsContainNoUserData - Return true if the specified [start,end) bit range 3264 /// is known to either be off the end of the specified type or being in 3265 /// alignment padding. The user type specified is known to be at most 128 bits 3266 /// in size, and have passed through X86_64ABIInfo::classify with a successful 3267 /// classification that put one of the two halves in the INTEGER class. 3268 /// 3269 /// It is conservatively correct to return false. 3270 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, 3271 unsigned EndBit, ASTContext &Context) { 3272 // If the bytes being queried are off the end of the type, there is no user 3273 // data hiding here. This handles analysis of builtins, vectors and other 3274 // types that don't contain interesting padding. 3275 unsigned TySize = (unsigned)Context.getTypeSize(Ty); 3276 if (TySize <= StartBit) 3277 return true; 3278 3279 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 3280 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType()); 3281 unsigned NumElts = (unsigned)AT->getSize().getZExtValue(); 3282 3283 // Check each element to see if the element overlaps with the queried range. 3284 for (unsigned i = 0; i != NumElts; ++i) { 3285 // If the element is after the span we care about, then we're done.. 3286 unsigned EltOffset = i*EltSize; 3287 if (EltOffset >= EndBit) break; 3288 3289 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0; 3290 if (!BitsContainNoUserData(AT->getElementType(), EltStart, 3291 EndBit-EltOffset, Context)) 3292 return false; 3293 } 3294 // If it overlaps no elements, then it is safe to process as padding. 3295 return true; 3296 } 3297 3298 if (const RecordType *RT = Ty->getAs<RecordType>()) { 3299 const RecordDecl *RD = RT->getDecl(); 3300 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 3301 3302 // If this is a C++ record, check the bases first. 3303 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 3304 for (const auto &I : CXXRD->bases()) { 3305 assert(!I.isVirtual() && !I.getType()->isDependentType() && 3306 "Unexpected base class!"); 3307 const auto *Base = 3308 cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl()); 3309 3310 // If the base is after the span we care about, ignore it. 3311 unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base)); 3312 if (BaseOffset >= EndBit) continue; 3313 3314 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0; 3315 if (!BitsContainNoUserData(I.getType(), BaseStart, 3316 EndBit-BaseOffset, Context)) 3317 return false; 3318 } 3319 } 3320 3321 // Verify that no field has data that overlaps the region of interest. Yes 3322 // this could be sped up a lot by being smarter about queried fields, 3323 // however we're only looking at structs up to 16 bytes, so we don't care 3324 // much. 3325 unsigned idx = 0; 3326 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 3327 i != e; ++i, ++idx) { 3328 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx); 3329 3330 // If we found a field after the region we care about, then we're done. 3331 if (FieldOffset >= EndBit) break; 3332 3333 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0; 3334 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset, 3335 Context)) 3336 return false; 3337 } 3338 3339 // If nothing in this record overlapped the area of interest, then we're 3340 // clean. 3341 return true; 3342 } 3343 3344 return false; 3345 } 3346 3347 /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a 3348 /// float member at the specified offset. For example, {int,{float}} has a 3349 /// float at offset 4. It is conservatively correct for this routine to return 3350 /// false. 3351 static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset, 3352 const llvm::DataLayout &TD) { 3353 // Base case if we find a float. 3354 if (IROffset == 0 && IRType->isFloatTy()) 3355 return true; 3356 3357 // If this is a struct, recurse into the field at the specified offset. 3358 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 3359 const llvm::StructLayout *SL = TD.getStructLayout(STy); 3360 unsigned Elt = SL->getElementContainingOffset(IROffset); 3361 IROffset -= SL->getElementOffset(Elt); 3362 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD); 3363 } 3364 3365 // If this is an array, recurse into the field at the specified offset. 3366 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 3367 llvm::Type *EltTy = ATy->getElementType(); 3368 unsigned EltSize = TD.getTypeAllocSize(EltTy); 3369 IROffset -= IROffset/EltSize*EltSize; 3370 return ContainsFloatAtOffset(EltTy, IROffset, TD); 3371 } 3372 3373 return false; 3374 } 3375 3376 3377 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the 3378 /// low 8 bytes of an XMM register, corresponding to the SSE class. 3379 llvm::Type *X86_64ABIInfo:: 3380 GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset, 3381 QualType SourceTy, unsigned SourceOffset) const { 3382 // The only three choices we have are either double, <2 x float>, or float. We 3383 // pass as float if the last 4 bytes is just padding. This happens for 3384 // structs that contain 3 floats. 3385 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32, 3386 SourceOffset*8+64, getContext())) 3387 return llvm::Type::getFloatTy(getVMContext()); 3388 3389 // We want to pass as <2 x float> if the LLVM IR type contains a float at 3390 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the 3391 // case. 3392 if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) && 3393 ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout())) 3394 return llvm::FixedVectorType::get(llvm::Type::getFloatTy(getVMContext()), 3395 2); 3396 3397 return llvm::Type::getDoubleTy(getVMContext()); 3398 } 3399 3400 3401 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in 3402 /// an 8-byte GPR. This means that we either have a scalar or we are talking 3403 /// about the high or low part of an up-to-16-byte struct. This routine picks 3404 /// the best LLVM IR type to represent this, which may be i64 or may be anything 3405 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*, 3406 /// etc). 3407 /// 3408 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for 3409 /// the source type. IROffset is an offset in bytes into the LLVM IR type that 3410 /// the 8-byte value references. PrefType may be null. 3411 /// 3412 /// SourceTy is the source-level type for the entire argument. SourceOffset is 3413 /// an offset into this that we're processing (which is always either 0 or 8). 3414 /// 3415 llvm::Type *X86_64ABIInfo:: 3416 GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset, 3417 QualType SourceTy, unsigned SourceOffset) const { 3418 // If we're dealing with an un-offset LLVM IR type, then it means that we're 3419 // returning an 8-byte unit starting with it. See if we can safely use it. 3420 if (IROffset == 0) { 3421 // Pointers and int64's always fill the 8-byte unit. 3422 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) || 3423 IRType->isIntegerTy(64)) 3424 return IRType; 3425 3426 // If we have a 1/2/4-byte integer, we can use it only if the rest of the 3427 // goodness in the source type is just tail padding. This is allowed to 3428 // kick in for struct {double,int} on the int, but not on 3429 // struct{double,int,int} because we wouldn't return the second int. We 3430 // have to do this analysis on the source type because we can't depend on 3431 // unions being lowered a specific way etc. 3432 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) || 3433 IRType->isIntegerTy(32) || 3434 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) { 3435 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 : 3436 cast<llvm::IntegerType>(IRType)->getBitWidth(); 3437 3438 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth, 3439 SourceOffset*8+64, getContext())) 3440 return IRType; 3441 } 3442 } 3443 3444 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 3445 // If this is a struct, recurse into the field at the specified offset. 3446 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy); 3447 if (IROffset < SL->getSizeInBytes()) { 3448 unsigned FieldIdx = SL->getElementContainingOffset(IROffset); 3449 IROffset -= SL->getElementOffset(FieldIdx); 3450 3451 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset, 3452 SourceTy, SourceOffset); 3453 } 3454 } 3455 3456 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 3457 llvm::Type *EltTy = ATy->getElementType(); 3458 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy); 3459 unsigned EltOffset = IROffset/EltSize*EltSize; 3460 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy, 3461 SourceOffset); 3462 } 3463 3464 // Okay, we don't have any better idea of what to pass, so we pass this in an 3465 // integer register that isn't too big to fit the rest of the struct. 3466 unsigned TySizeInBytes = 3467 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity(); 3468 3469 assert(TySizeInBytes != SourceOffset && "Empty field?"); 3470 3471 // It is always safe to classify this as an integer type up to i64 that 3472 // isn't larger than the structure. 3473 return llvm::IntegerType::get(getVMContext(), 3474 std::min(TySizeInBytes-SourceOffset, 8U)*8); 3475 } 3476 3477 3478 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally 3479 /// be used as elements of a two register pair to pass or return, return a 3480 /// first class aggregate to represent them. For example, if the low part of 3481 /// a by-value argument should be passed as i32* and the high part as float, 3482 /// return {i32*, float}. 3483 static llvm::Type * 3484 GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, 3485 const llvm::DataLayout &TD) { 3486 // In order to correctly satisfy the ABI, we need to the high part to start 3487 // at offset 8. If the high and low parts we inferred are both 4-byte types 3488 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have 3489 // the second element at offset 8. Check for this: 3490 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo); 3491 unsigned HiAlign = TD.getABITypeAlignment(Hi); 3492 unsigned HiStart = llvm::alignTo(LoSize, HiAlign); 3493 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!"); 3494 3495 // To handle this, we have to increase the size of the low part so that the 3496 // second element will start at an 8 byte offset. We can't increase the size 3497 // of the second element because it might make us access off the end of the 3498 // struct. 3499 if (HiStart != 8) { 3500 // There are usually two sorts of types the ABI generation code can produce 3501 // for the low part of a pair that aren't 8 bytes in size: float or 3502 // i8/i16/i32. This can also include pointers when they are 32-bit (X32 and 3503 // NaCl). 3504 // Promote these to a larger type. 3505 if (Lo->isFloatTy()) 3506 Lo = llvm::Type::getDoubleTy(Lo->getContext()); 3507 else { 3508 assert((Lo->isIntegerTy() || Lo->isPointerTy()) 3509 && "Invalid/unknown lo type"); 3510 Lo = llvm::Type::getInt64Ty(Lo->getContext()); 3511 } 3512 } 3513 3514 llvm::StructType *Result = llvm::StructType::get(Lo, Hi); 3515 3516 // Verify that the second element is at an 8-byte offset. 3517 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 && 3518 "Invalid x86-64 argument pair!"); 3519 return Result; 3520 } 3521 3522 ABIArgInfo X86_64ABIInfo:: 3523 classifyReturnType(QualType RetTy) const { 3524 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the 3525 // classification algorithm. 3526 X86_64ABIInfo::Class Lo, Hi; 3527 classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true); 3528 3529 // Check some invariants. 3530 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 3531 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 3532 3533 llvm::Type *ResType = nullptr; 3534 switch (Lo) { 3535 case NoClass: 3536 if (Hi == NoClass) 3537 return ABIArgInfo::getIgnore(); 3538 // If the low part is just padding, it takes no register, leave ResType 3539 // null. 3540 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 3541 "Unknown missing lo part"); 3542 break; 3543 3544 case SSEUp: 3545 case X87Up: 3546 llvm_unreachable("Invalid classification for lo word."); 3547 3548 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via 3549 // hidden argument. 3550 case Memory: 3551 return getIndirectReturnResult(RetTy); 3552 3553 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next 3554 // available register of the sequence %rax, %rdx is used. 3555 case Integer: 3556 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 3557 3558 // If we have a sign or zero extended integer, make sure to return Extend 3559 // so that the parameter gets the right LLVM IR attributes. 3560 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 3561 // Treat an enum type as its underlying type. 3562 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 3563 RetTy = EnumTy->getDecl()->getIntegerType(); 3564 3565 if (RetTy->isIntegralOrEnumerationType() && 3566 isPromotableIntegerTypeForABI(RetTy)) 3567 return ABIArgInfo::getExtend(RetTy); 3568 } 3569 break; 3570 3571 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next 3572 // available SSE register of the sequence %xmm0, %xmm1 is used. 3573 case SSE: 3574 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 3575 break; 3576 3577 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is 3578 // returned on the X87 stack in %st0 as 80-bit x87 number. 3579 case X87: 3580 ResType = llvm::Type::getX86_FP80Ty(getVMContext()); 3581 break; 3582 3583 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real 3584 // part of the value is returned in %st0 and the imaginary part in 3585 // %st1. 3586 case ComplexX87: 3587 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification."); 3588 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()), 3589 llvm::Type::getX86_FP80Ty(getVMContext())); 3590 break; 3591 } 3592 3593 llvm::Type *HighPart = nullptr; 3594 switch (Hi) { 3595 // Memory was handled previously and X87 should 3596 // never occur as a hi class. 3597 case Memory: 3598 case X87: 3599 llvm_unreachable("Invalid classification for hi word."); 3600 3601 case ComplexX87: // Previously handled. 3602 case NoClass: 3603 break; 3604 3605 case Integer: 3606 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 3607 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 3608 return ABIArgInfo::getDirect(HighPart, 8); 3609 break; 3610 case SSE: 3611 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 3612 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 3613 return ABIArgInfo::getDirect(HighPart, 8); 3614 break; 3615 3616 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte 3617 // is passed in the next available eightbyte chunk if the last used 3618 // vector register. 3619 // 3620 // SSEUP should always be preceded by SSE, just widen. 3621 case SSEUp: 3622 assert(Lo == SSE && "Unexpected SSEUp classification."); 3623 ResType = GetByteVectorType(RetTy); 3624 break; 3625 3626 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is 3627 // returned together with the previous X87 value in %st0. 3628 case X87Up: 3629 // If X87Up is preceded by X87, we don't need to do 3630 // anything. However, in some cases with unions it may not be 3631 // preceded by X87. In such situations we follow gcc and pass the 3632 // extra bits in an SSE reg. 3633 if (Lo != X87) { 3634 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 3635 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 3636 return ABIArgInfo::getDirect(HighPart, 8); 3637 } 3638 break; 3639 } 3640 3641 // If a high part was specified, merge it together with the low part. It is 3642 // known to pass in the high eightbyte of the result. We do this by forming a 3643 // first class struct aggregate with the high and low part: {low, high} 3644 if (HighPart) 3645 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); 3646 3647 return ABIArgInfo::getDirect(ResType); 3648 } 3649 3650 ABIArgInfo X86_64ABIInfo::classifyArgumentType( 3651 QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE, 3652 bool isNamedArg) 3653 const 3654 { 3655 Ty = useFirstFieldIfTransparentUnion(Ty); 3656 3657 X86_64ABIInfo::Class Lo, Hi; 3658 classify(Ty, 0, Lo, Hi, isNamedArg); 3659 3660 // Check some invariants. 3661 // FIXME: Enforce these by construction. 3662 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 3663 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 3664 3665 neededInt = 0; 3666 neededSSE = 0; 3667 llvm::Type *ResType = nullptr; 3668 switch (Lo) { 3669 case NoClass: 3670 if (Hi == NoClass) 3671 return ABIArgInfo::getIgnore(); 3672 // If the low part is just padding, it takes no register, leave ResType 3673 // null. 3674 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 3675 "Unknown missing lo part"); 3676 break; 3677 3678 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument 3679 // on the stack. 3680 case Memory: 3681 3682 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or 3683 // COMPLEX_X87, it is passed in memory. 3684 case X87: 3685 case ComplexX87: 3686 if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect) 3687 ++neededInt; 3688 return getIndirectResult(Ty, freeIntRegs); 3689 3690 case SSEUp: 3691 case X87Up: 3692 llvm_unreachable("Invalid classification for lo word."); 3693 3694 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next 3695 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8 3696 // and %r9 is used. 3697 case Integer: 3698 ++neededInt; 3699 3700 // Pick an 8-byte type based on the preferred type. 3701 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0); 3702 3703 // If we have a sign or zero extended integer, make sure to return Extend 3704 // so that the parameter gets the right LLVM IR attributes. 3705 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 3706 // Treat an enum type as its underlying type. 3707 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3708 Ty = EnumTy->getDecl()->getIntegerType(); 3709 3710 if (Ty->isIntegralOrEnumerationType() && 3711 isPromotableIntegerTypeForABI(Ty)) 3712 return ABIArgInfo::getExtend(Ty); 3713 } 3714 3715 break; 3716 3717 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next 3718 // available SSE register is used, the registers are taken in the 3719 // order from %xmm0 to %xmm7. 3720 case SSE: { 3721 llvm::Type *IRType = CGT.ConvertType(Ty); 3722 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0); 3723 ++neededSSE; 3724 break; 3725 } 3726 } 3727 3728 llvm::Type *HighPart = nullptr; 3729 switch (Hi) { 3730 // Memory was handled previously, ComplexX87 and X87 should 3731 // never occur as hi classes, and X87Up must be preceded by X87, 3732 // which is passed in memory. 3733 case Memory: 3734 case X87: 3735 case ComplexX87: 3736 llvm_unreachable("Invalid classification for hi word."); 3737 3738 case NoClass: break; 3739 3740 case Integer: 3741 ++neededInt; 3742 // Pick an 8-byte type based on the preferred type. 3743 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 3744 3745 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 3746 return ABIArgInfo::getDirect(HighPart, 8); 3747 break; 3748 3749 // X87Up generally doesn't occur here (long double is passed in 3750 // memory), except in situations involving unions. 3751 case X87Up: 3752 case SSE: 3753 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 3754 3755 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 3756 return ABIArgInfo::getDirect(HighPart, 8); 3757 3758 ++neededSSE; 3759 break; 3760 3761 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the 3762 // eightbyte is passed in the upper half of the last used SSE 3763 // register. This only happens when 128-bit vectors are passed. 3764 case SSEUp: 3765 assert(Lo == SSE && "Unexpected SSEUp classification"); 3766 ResType = GetByteVectorType(Ty); 3767 break; 3768 } 3769 3770 // If a high part was specified, merge it together with the low part. It is 3771 // known to pass in the high eightbyte of the result. We do this by forming a 3772 // first class struct aggregate with the high and low part: {low, high} 3773 if (HighPart) 3774 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); 3775 3776 return ABIArgInfo::getDirect(ResType); 3777 } 3778 3779 ABIArgInfo 3780 X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt, 3781 unsigned &NeededSSE) const { 3782 auto RT = Ty->getAs<RecordType>(); 3783 assert(RT && "classifyRegCallStructType only valid with struct types"); 3784 3785 if (RT->getDecl()->hasFlexibleArrayMember()) 3786 return getIndirectReturnResult(Ty); 3787 3788 // Sum up bases 3789 if (auto CXXRD = dyn_cast<CXXRecordDecl>(RT->getDecl())) { 3790 if (CXXRD->isDynamicClass()) { 3791 NeededInt = NeededSSE = 0; 3792 return getIndirectReturnResult(Ty); 3793 } 3794 3795 for (const auto &I : CXXRD->bases()) 3796 if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE) 3797 .isIndirect()) { 3798 NeededInt = NeededSSE = 0; 3799 return getIndirectReturnResult(Ty); 3800 } 3801 } 3802 3803 // Sum up members 3804 for (const auto *FD : RT->getDecl()->fields()) { 3805 if (FD->getType()->isRecordType() && !FD->getType()->isUnionType()) { 3806 if (classifyRegCallStructTypeImpl(FD->getType(), NeededInt, NeededSSE) 3807 .isIndirect()) { 3808 NeededInt = NeededSSE = 0; 3809 return getIndirectReturnResult(Ty); 3810 } 3811 } else { 3812 unsigned LocalNeededInt, LocalNeededSSE; 3813 if (classifyArgumentType(FD->getType(), UINT_MAX, LocalNeededInt, 3814 LocalNeededSSE, true) 3815 .isIndirect()) { 3816 NeededInt = NeededSSE = 0; 3817 return getIndirectReturnResult(Ty); 3818 } 3819 NeededInt += LocalNeededInt; 3820 NeededSSE += LocalNeededSSE; 3821 } 3822 } 3823 3824 return ABIArgInfo::getDirect(); 3825 } 3826 3827 ABIArgInfo X86_64ABIInfo::classifyRegCallStructType(QualType Ty, 3828 unsigned &NeededInt, 3829 unsigned &NeededSSE) const { 3830 3831 NeededInt = 0; 3832 NeededSSE = 0; 3833 3834 return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE); 3835 } 3836 3837 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 3838 3839 const unsigned CallingConv = FI.getCallingConvention(); 3840 // It is possible to force Win64 calling convention on any x86_64 target by 3841 // using __attribute__((ms_abi)). In such case to correctly emit Win64 3842 // compatible code delegate this call to WinX86_64ABIInfo::computeInfo. 3843 if (CallingConv == llvm::CallingConv::Win64) { 3844 WinX86_64ABIInfo Win64ABIInfo(CGT, AVXLevel); 3845 Win64ABIInfo.computeInfo(FI); 3846 return; 3847 } 3848 3849 bool IsRegCall = CallingConv == llvm::CallingConv::X86_RegCall; 3850 3851 // Keep track of the number of assigned registers. 3852 unsigned FreeIntRegs = IsRegCall ? 11 : 6; 3853 unsigned FreeSSERegs = IsRegCall ? 16 : 8; 3854 unsigned NeededInt, NeededSSE; 3855 3856 if (!::classifyReturnType(getCXXABI(), FI, *this)) { 3857 if (IsRegCall && FI.getReturnType()->getTypePtr()->isRecordType() && 3858 !FI.getReturnType()->getTypePtr()->isUnionType()) { 3859 FI.getReturnInfo() = 3860 classifyRegCallStructType(FI.getReturnType(), NeededInt, NeededSSE); 3861 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) { 3862 FreeIntRegs -= NeededInt; 3863 FreeSSERegs -= NeededSSE; 3864 } else { 3865 FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType()); 3866 } 3867 } else if (IsRegCall && FI.getReturnType()->getAs<ComplexType>() && 3868 getContext().getCanonicalType(FI.getReturnType() 3869 ->getAs<ComplexType>() 3870 ->getElementType()) == 3871 getContext().LongDoubleTy) 3872 // Complex Long Double Type is passed in Memory when Regcall 3873 // calling convention is used. 3874 FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType()); 3875 else 3876 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 3877 } 3878 3879 // If the return value is indirect, then the hidden argument is consuming one 3880 // integer register. 3881 if (FI.getReturnInfo().isIndirect()) 3882 --FreeIntRegs; 3883 3884 // The chain argument effectively gives us another free register. 3885 if (FI.isChainCall()) 3886 ++FreeIntRegs; 3887 3888 unsigned NumRequiredArgs = FI.getNumRequiredArgs(); 3889 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers 3890 // get assigned (in left-to-right order) for passing as follows... 3891 unsigned ArgNo = 0; 3892 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 3893 it != ie; ++it, ++ArgNo) { 3894 bool IsNamedArg = ArgNo < NumRequiredArgs; 3895 3896 if (IsRegCall && it->type->isStructureOrClassType()) 3897 it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE); 3898 else 3899 it->info = classifyArgumentType(it->type, FreeIntRegs, NeededInt, 3900 NeededSSE, IsNamedArg); 3901 3902 // AMD64-ABI 3.2.3p3: If there are no registers available for any 3903 // eightbyte of an argument, the whole argument is passed on the 3904 // stack. If registers have already been assigned for some 3905 // eightbytes of such an argument, the assignments get reverted. 3906 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) { 3907 FreeIntRegs -= NeededInt; 3908 FreeSSERegs -= NeededSSE; 3909 } else { 3910 it->info = getIndirectResult(it->type, FreeIntRegs); 3911 } 3912 } 3913 } 3914 3915 static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF, 3916 Address VAListAddr, QualType Ty) { 3917 Address overflow_arg_area_p = 3918 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p"); 3919 llvm::Value *overflow_arg_area = 3920 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area"); 3921 3922 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16 3923 // byte boundary if alignment needed by type exceeds 8 byte boundary. 3924 // It isn't stated explicitly in the standard, but in practice we use 3925 // alignment greater than 16 where necessary. 3926 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty); 3927 if (Align > CharUnits::fromQuantity(8)) { 3928 overflow_arg_area = emitRoundPointerUpToAlignment(CGF, overflow_arg_area, 3929 Align); 3930 } 3931 3932 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area. 3933 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 3934 llvm::Value *Res = 3935 CGF.Builder.CreateBitCast(overflow_arg_area, 3936 llvm::PointerType::getUnqual(LTy)); 3937 3938 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to: 3939 // l->overflow_arg_area + sizeof(type). 3940 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to 3941 // an 8 byte boundary. 3942 3943 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8; 3944 llvm::Value *Offset = 3945 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7); 3946 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset, 3947 "overflow_arg_area.next"); 3948 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p); 3949 3950 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type. 3951 return Address(Res, Align); 3952 } 3953 3954 Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 3955 QualType Ty) const { 3956 // Assume that va_list type is correct; should be pointer to LLVM type: 3957 // struct { 3958 // i32 gp_offset; 3959 // i32 fp_offset; 3960 // i8* overflow_arg_area; 3961 // i8* reg_save_area; 3962 // }; 3963 unsigned neededInt, neededSSE; 3964 3965 Ty = getContext().getCanonicalType(Ty); 3966 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE, 3967 /*isNamedArg*/false); 3968 3969 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed 3970 // in the registers. If not go to step 7. 3971 if (!neededInt && !neededSSE) 3972 return EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty); 3973 3974 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of 3975 // general purpose registers needed to pass type and num_fp to hold 3976 // the number of floating point registers needed. 3977 3978 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into 3979 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or 3980 // l->fp_offset > 304 - num_fp * 16 go to step 7. 3981 // 3982 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of 3983 // register save space). 3984 3985 llvm::Value *InRegs = nullptr; 3986 Address gp_offset_p = Address::invalid(), fp_offset_p = Address::invalid(); 3987 llvm::Value *gp_offset = nullptr, *fp_offset = nullptr; 3988 if (neededInt) { 3989 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p"); 3990 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset"); 3991 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8); 3992 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp"); 3993 } 3994 3995 if (neededSSE) { 3996 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p"); 3997 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset"); 3998 llvm::Value *FitsInFP = 3999 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16); 4000 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp"); 4001 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP; 4002 } 4003 4004 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 4005 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); 4006 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 4007 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); 4008 4009 // Emit code to load the value if it was passed in registers. 4010 4011 CGF.EmitBlock(InRegBlock); 4012 4013 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with 4014 // an offset of l->gp_offset and/or l->fp_offset. This may require 4015 // copying to a temporary location in case the parameter is passed 4016 // in different register classes or requires an alignment greater 4017 // than 8 for general purpose registers and 16 for XMM registers. 4018 // 4019 // FIXME: This really results in shameful code when we end up needing to 4020 // collect arguments from different places; often what should result in a 4021 // simple assembling of a structure from scattered addresses has many more 4022 // loads than necessary. Can we clean this up? 4023 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 4024 llvm::Value *RegSaveArea = CGF.Builder.CreateLoad( 4025 CGF.Builder.CreateStructGEP(VAListAddr, 3), "reg_save_area"); 4026 4027 Address RegAddr = Address::invalid(); 4028 if (neededInt && neededSSE) { 4029 // FIXME: Cleanup. 4030 assert(AI.isDirect() && "Unexpected ABI info for mixed regs"); 4031 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType()); 4032 Address Tmp = CGF.CreateMemTemp(Ty); 4033 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST); 4034 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs"); 4035 llvm::Type *TyLo = ST->getElementType(0); 4036 llvm::Type *TyHi = ST->getElementType(1); 4037 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) && 4038 "Unexpected ABI info for mixed regs"); 4039 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo); 4040 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi); 4041 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegSaveArea, gp_offset); 4042 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegSaveArea, fp_offset); 4043 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr; 4044 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr; 4045 4046 // Copy the first element. 4047 // FIXME: Our choice of alignment here and below is probably pessimistic. 4048 llvm::Value *V = CGF.Builder.CreateAlignedLoad( 4049 TyLo, CGF.Builder.CreateBitCast(RegLoAddr, PTyLo), 4050 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyLo))); 4051 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 4052 4053 // Copy the second element. 4054 V = CGF.Builder.CreateAlignedLoad( 4055 TyHi, CGF.Builder.CreateBitCast(RegHiAddr, PTyHi), 4056 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyHi))); 4057 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 4058 4059 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy); 4060 } else if (neededInt) { 4061 RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, gp_offset), 4062 CharUnits::fromQuantity(8)); 4063 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy); 4064 4065 // Copy to a temporary if necessary to ensure the appropriate alignment. 4066 std::pair<CharUnits, CharUnits> SizeAlign = 4067 getContext().getTypeInfoInChars(Ty); 4068 uint64_t TySize = SizeAlign.first.getQuantity(); 4069 CharUnits TyAlign = SizeAlign.second; 4070 4071 // Copy into a temporary if the type is more aligned than the 4072 // register save area. 4073 if (TyAlign.getQuantity() > 8) { 4074 Address Tmp = CGF.CreateMemTemp(Ty); 4075 CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, false); 4076 RegAddr = Tmp; 4077 } 4078 4079 } else if (neededSSE == 1) { 4080 RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset), 4081 CharUnits::fromQuantity(16)); 4082 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy); 4083 } else { 4084 assert(neededSSE == 2 && "Invalid number of needed registers!"); 4085 // SSE registers are spaced 16 bytes apart in the register save 4086 // area, we need to collect the two eightbytes together. 4087 // The ABI isn't explicit about this, but it seems reasonable 4088 // to assume that the slots are 16-byte aligned, since the stack is 4089 // naturally 16-byte aligned and the prologue is expected to store 4090 // all the SSE registers to the RSA. 4091 Address RegAddrLo = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset), 4092 CharUnits::fromQuantity(16)); 4093 Address RegAddrHi = 4094 CGF.Builder.CreateConstInBoundsByteGEP(RegAddrLo, 4095 CharUnits::fromQuantity(16)); 4096 llvm::Type *ST = AI.canHaveCoerceToType() 4097 ? AI.getCoerceToType() 4098 : llvm::StructType::get(CGF.DoubleTy, CGF.DoubleTy); 4099 llvm::Value *V; 4100 Address Tmp = CGF.CreateMemTemp(Ty); 4101 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST); 4102 V = CGF.Builder.CreateLoad(CGF.Builder.CreateElementBitCast( 4103 RegAddrLo, ST->getStructElementType(0))); 4104 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 4105 V = CGF.Builder.CreateLoad(CGF.Builder.CreateElementBitCast( 4106 RegAddrHi, ST->getStructElementType(1))); 4107 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 4108 4109 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy); 4110 } 4111 4112 // AMD64-ABI 3.5.7p5: Step 5. Set: 4113 // l->gp_offset = l->gp_offset + num_gp * 8 4114 // l->fp_offset = l->fp_offset + num_fp * 16. 4115 if (neededInt) { 4116 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8); 4117 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset), 4118 gp_offset_p); 4119 } 4120 if (neededSSE) { 4121 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16); 4122 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset), 4123 fp_offset_p); 4124 } 4125 CGF.EmitBranch(ContBlock); 4126 4127 // Emit code to load the value if it was passed in memory. 4128 4129 CGF.EmitBlock(InMemBlock); 4130 Address MemAddr = EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty); 4131 4132 // Return the appropriate result. 4133 4134 CGF.EmitBlock(ContBlock); 4135 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock, 4136 "vaarg.addr"); 4137 return ResAddr; 4138 } 4139 4140 Address X86_64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, 4141 QualType Ty) const { 4142 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false, 4143 CGF.getContext().getTypeInfoInChars(Ty), 4144 CharUnits::fromQuantity(8), 4145 /*allowHigherAlign*/ false); 4146 } 4147 4148 ABIArgInfo 4149 WinX86_64ABIInfo::reclassifyHvaArgType(QualType Ty, unsigned &FreeSSERegs, 4150 const ABIArgInfo ¤t) const { 4151 // Assumes vectorCall calling convention. 4152 const Type *Base = nullptr; 4153 uint64_t NumElts = 0; 4154 4155 if (!Ty->isBuiltinType() && !Ty->isVectorType() && 4156 isHomogeneousAggregate(Ty, Base, NumElts) && FreeSSERegs >= NumElts) { 4157 FreeSSERegs -= NumElts; 4158 return getDirectX86Hva(); 4159 } 4160 return current; 4161 } 4162 4163 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs, 4164 bool IsReturnType, bool IsVectorCall, 4165 bool IsRegCall) const { 4166 4167 if (Ty->isVoidType()) 4168 return ABIArgInfo::getIgnore(); 4169 4170 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 4171 Ty = EnumTy->getDecl()->getIntegerType(); 4172 4173 TypeInfo Info = getContext().getTypeInfo(Ty); 4174 uint64_t Width = Info.Width; 4175 CharUnits Align = getContext().toCharUnitsFromBits(Info.Align); 4176 4177 const RecordType *RT = Ty->getAs<RecordType>(); 4178 if (RT) { 4179 if (!IsReturnType) { 4180 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI())) 4181 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 4182 } 4183 4184 if (RT->getDecl()->hasFlexibleArrayMember()) 4185 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 4186 4187 } 4188 4189 const Type *Base = nullptr; 4190 uint64_t NumElts = 0; 4191 // vectorcall adds the concept of a homogenous vector aggregate, similar to 4192 // other targets. 4193 if ((IsVectorCall || IsRegCall) && 4194 isHomogeneousAggregate(Ty, Base, NumElts)) { 4195 if (IsRegCall) { 4196 if (FreeSSERegs >= NumElts) { 4197 FreeSSERegs -= NumElts; 4198 if (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType()) 4199 return ABIArgInfo::getDirect(); 4200 return ABIArgInfo::getExpand(); 4201 } 4202 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false); 4203 } else if (IsVectorCall) { 4204 if (FreeSSERegs >= NumElts && 4205 (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())) { 4206 FreeSSERegs -= NumElts; 4207 return ABIArgInfo::getDirect(); 4208 } else if (IsReturnType) { 4209 return ABIArgInfo::getExpand(); 4210 } else if (!Ty->isBuiltinType() && !Ty->isVectorType()) { 4211 // HVAs are delayed and reclassified in the 2nd step. 4212 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false); 4213 } 4214 } 4215 } 4216 4217 if (Ty->isMemberPointerType()) { 4218 // If the member pointer is represented by an LLVM int or ptr, pass it 4219 // directly. 4220 llvm::Type *LLTy = CGT.ConvertType(Ty); 4221 if (LLTy->isPointerTy() || LLTy->isIntegerTy()) 4222 return ABIArgInfo::getDirect(); 4223 } 4224 4225 if (RT || Ty->isAnyComplexType() || Ty->isMemberPointerType()) { 4226 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is 4227 // not 1, 2, 4, or 8 bytes, must be passed by reference." 4228 if (Width > 64 || !llvm::isPowerOf2_64(Width)) 4229 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 4230 4231 // Otherwise, coerce it to a small integer. 4232 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width)); 4233 } 4234 4235 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 4236 switch (BT->getKind()) { 4237 case BuiltinType::Bool: 4238 // Bool type is always extended to the ABI, other builtin types are not 4239 // extended. 4240 return ABIArgInfo::getExtend(Ty); 4241 4242 case BuiltinType::LongDouble: 4243 // Mingw64 GCC uses the old 80 bit extended precision floating point 4244 // unit. It passes them indirectly through memory. 4245 if (IsMingw64) { 4246 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat(); 4247 if (LDF == &llvm::APFloat::x87DoubleExtended()) 4248 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false); 4249 } 4250 break; 4251 4252 case BuiltinType::Int128: 4253 case BuiltinType::UInt128: 4254 // If it's a parameter type, the normal ABI rule is that arguments larger 4255 // than 8 bytes are passed indirectly. GCC follows it. We follow it too, 4256 // even though it isn't particularly efficient. 4257 if (!IsReturnType) 4258 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false); 4259 4260 // Mingw64 GCC returns i128 in XMM0. Coerce to v2i64 to handle that. 4261 // Clang matches them for compatibility. 4262 return ABIArgInfo::getDirect(llvm::FixedVectorType::get( 4263 llvm::Type::getInt64Ty(getVMContext()), 2)); 4264 4265 default: 4266 break; 4267 } 4268 } 4269 4270 if (Ty->isExtIntType()) { 4271 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is 4272 // not 1, 2, 4, or 8 bytes, must be passed by reference." 4273 // However, non-power-of-two _ExtInts will be passed as 1,2,4 or 8 bytes 4274 // anyway as long is it fits in them, so we don't have to check the power of 4275 // 2. 4276 if (Width <= 64) 4277 return ABIArgInfo::getDirect(); 4278 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false); 4279 } 4280 4281 return ABIArgInfo::getDirect(); 4282 } 4283 4284 void WinX86_64ABIInfo::computeVectorCallArgs(CGFunctionInfo &FI, 4285 unsigned FreeSSERegs, 4286 bool IsVectorCall, 4287 bool IsRegCall) const { 4288 unsigned Count = 0; 4289 for (auto &I : FI.arguments()) { 4290 // Vectorcall in x64 only permits the first 6 arguments to be passed 4291 // as XMM/YMM registers. 4292 if (Count < VectorcallMaxParamNumAsReg) 4293 I.info = classify(I.type, FreeSSERegs, false, IsVectorCall, IsRegCall); 4294 else { 4295 // Since these cannot be passed in registers, pretend no registers 4296 // are left. 4297 unsigned ZeroSSERegsAvail = 0; 4298 I.info = classify(I.type, /*FreeSSERegs=*/ZeroSSERegsAvail, false, 4299 IsVectorCall, IsRegCall); 4300 } 4301 ++Count; 4302 } 4303 4304 for (auto &I : FI.arguments()) { 4305 I.info = reclassifyHvaArgType(I.type, FreeSSERegs, I.info); 4306 } 4307 } 4308 4309 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 4310 const unsigned CC = FI.getCallingConvention(); 4311 bool IsVectorCall = CC == llvm::CallingConv::X86_VectorCall; 4312 bool IsRegCall = CC == llvm::CallingConv::X86_RegCall; 4313 4314 // If __attribute__((sysv_abi)) is in use, use the SysV argument 4315 // classification rules. 4316 if (CC == llvm::CallingConv::X86_64_SysV) { 4317 X86_64ABIInfo SysVABIInfo(CGT, AVXLevel); 4318 SysVABIInfo.computeInfo(FI); 4319 return; 4320 } 4321 4322 unsigned FreeSSERegs = 0; 4323 if (IsVectorCall) { 4324 // We can use up to 4 SSE return registers with vectorcall. 4325 FreeSSERegs = 4; 4326 } else if (IsRegCall) { 4327 // RegCall gives us 16 SSE registers. 4328 FreeSSERegs = 16; 4329 } 4330 4331 if (!getCXXABI().classifyReturnType(FI)) 4332 FI.getReturnInfo() = classify(FI.getReturnType(), FreeSSERegs, true, 4333 IsVectorCall, IsRegCall); 4334 4335 if (IsVectorCall) { 4336 // We can use up to 6 SSE register parameters with vectorcall. 4337 FreeSSERegs = 6; 4338 } else if (IsRegCall) { 4339 // RegCall gives us 16 SSE registers, we can reuse the return registers. 4340 FreeSSERegs = 16; 4341 } 4342 4343 if (IsVectorCall) { 4344 computeVectorCallArgs(FI, FreeSSERegs, IsVectorCall, IsRegCall); 4345 } else { 4346 for (auto &I : FI.arguments()) 4347 I.info = classify(I.type, FreeSSERegs, false, IsVectorCall, IsRegCall); 4348 } 4349 4350 } 4351 4352 Address WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 4353 QualType Ty) const { 4354 4355 bool IsIndirect = false; 4356 4357 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is 4358 // not 1, 2, 4, or 8 bytes, must be passed by reference." 4359 if (isAggregateTypeForABI(Ty) || Ty->isMemberPointerType()) { 4360 uint64_t Width = getContext().getTypeSize(Ty); 4361 IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width); 4362 } 4363 4364 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, 4365 CGF.getContext().getTypeInfoInChars(Ty), 4366 CharUnits::fromQuantity(8), 4367 /*allowHigherAlign*/ false); 4368 } 4369 4370 static bool PPC_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4371 llvm::Value *Address, bool Is64Bit, 4372 bool IsAIX) { 4373 // This is calculated from the LLVM and GCC tables and verified 4374 // against gcc output. AFAIK all PPC ABIs use the same encoding. 4375 4376 CodeGen::CGBuilderTy &Builder = CGF.Builder; 4377 4378 llvm::IntegerType *i8 = CGF.Int8Ty; 4379 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 4380 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 4381 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 4382 4383 // 0-31: r0-31, the 4-byte or 8-byte general-purpose registers 4384 AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 0, 31); 4385 4386 // 32-63: fp0-31, the 8-byte floating-point registers 4387 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 4388 4389 // 64-67 are various 4-byte or 8-byte special-purpose registers: 4390 // 64: mq 4391 // 65: lr 4392 // 66: ctr 4393 // 67: ap 4394 AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 64, 67); 4395 4396 // 68-76 are various 4-byte special-purpose registers: 4397 // 68-75 cr0-7 4398 // 76: xer 4399 AssignToArrayRange(Builder, Address, Four8, 68, 76); 4400 4401 // 77-108: v0-31, the 16-byte vector registers 4402 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 4403 4404 // 109: vrsave 4405 // 110: vscr 4406 AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 109, 110); 4407 4408 // AIX does not utilize the rest of the registers. 4409 if (IsAIX) 4410 return false; 4411 4412 // 111: spe_acc 4413 // 112: spefscr 4414 // 113: sfp 4415 AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 111, 113); 4416 4417 if (!Is64Bit) 4418 return false; 4419 4420 // TODO: Need to verify if these registers are used on 64 bit AIX with Power8 4421 // or above CPU. 4422 // 64-bit only registers: 4423 // 114: tfhar 4424 // 115: tfiar 4425 // 116: texasr 4426 AssignToArrayRange(Builder, Address, Eight8, 114, 116); 4427 4428 return false; 4429 } 4430 4431 // AIX 4432 namespace { 4433 /// AIXABIInfo - The AIX XCOFF ABI information. 4434 class AIXABIInfo : public ABIInfo { 4435 const bool Is64Bit; 4436 const unsigned PtrByteSize; 4437 CharUnits getParamTypeAlignment(QualType Ty) const; 4438 4439 public: 4440 AIXABIInfo(CodeGen::CodeGenTypes &CGT, bool Is64Bit) 4441 : ABIInfo(CGT), Is64Bit(Is64Bit), PtrByteSize(Is64Bit ? 8 : 4) {} 4442 4443 bool isPromotableTypeForABI(QualType Ty) const; 4444 4445 ABIArgInfo classifyReturnType(QualType RetTy) const; 4446 ABIArgInfo classifyArgumentType(QualType Ty) const; 4447 4448 void computeInfo(CGFunctionInfo &FI) const override { 4449 if (!getCXXABI().classifyReturnType(FI)) 4450 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 4451 4452 for (auto &I : FI.arguments()) 4453 I.info = classifyArgumentType(I.type); 4454 } 4455 4456 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 4457 QualType Ty) const override; 4458 }; 4459 4460 class AIXTargetCodeGenInfo : public TargetCodeGenInfo { 4461 const bool Is64Bit; 4462 4463 public: 4464 AIXTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool Is64Bit) 4465 : TargetCodeGenInfo(std::make_unique<AIXABIInfo>(CGT, Is64Bit)), 4466 Is64Bit(Is64Bit) {} 4467 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 4468 return 1; // r1 is the dedicated stack pointer 4469 } 4470 4471 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4472 llvm::Value *Address) const override; 4473 }; 4474 } // namespace 4475 4476 // Return true if the ABI requires Ty to be passed sign- or zero- 4477 // extended to 32/64 bits. 4478 bool AIXABIInfo::isPromotableTypeForABI(QualType Ty) const { 4479 // Treat an enum type as its underlying type. 4480 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 4481 Ty = EnumTy->getDecl()->getIntegerType(); 4482 4483 // Promotable integer types are required to be promoted by the ABI. 4484 if (Ty->isPromotableIntegerType()) 4485 return true; 4486 4487 if (!Is64Bit) 4488 return false; 4489 4490 // For 64 bit mode, in addition to the usual promotable integer types, we also 4491 // need to extend all 32-bit types, since the ABI requires promotion to 64 4492 // bits. 4493 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 4494 switch (BT->getKind()) { 4495 case BuiltinType::Int: 4496 case BuiltinType::UInt: 4497 return true; 4498 default: 4499 break; 4500 } 4501 4502 return false; 4503 } 4504 4505 ABIArgInfo AIXABIInfo::classifyReturnType(QualType RetTy) const { 4506 if (RetTy->isAnyComplexType()) 4507 llvm::report_fatal_error("complex type is not supported on AIX yet"); 4508 4509 if (RetTy->isVectorType()) 4510 llvm::report_fatal_error("vector type is not supported on AIX yet"); 4511 4512 if (RetTy->isVoidType()) 4513 return ABIArgInfo::getIgnore(); 4514 4515 // TODO: Evaluate if AIX power alignment rule would have an impact on the 4516 // alignment here. 4517 if (isAggregateTypeForABI(RetTy)) 4518 return getNaturalAlignIndirect(RetTy); 4519 4520 return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) 4521 : ABIArgInfo::getDirect()); 4522 } 4523 4524 ABIArgInfo AIXABIInfo::classifyArgumentType(QualType Ty) const { 4525 Ty = useFirstFieldIfTransparentUnion(Ty); 4526 4527 if (Ty->isAnyComplexType()) 4528 llvm::report_fatal_error("complex type is not supported on AIX yet"); 4529 4530 if (Ty->isVectorType()) 4531 llvm::report_fatal_error("vector type is not supported on AIX yet"); 4532 4533 // TODO: Evaluate if AIX power alignment rule would have an impact on the 4534 // alignment here. 4535 if (isAggregateTypeForABI(Ty)) { 4536 // Records with non-trivial destructors/copy-constructors should not be 4537 // passed by value. 4538 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 4539 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 4540 4541 CharUnits CCAlign = getParamTypeAlignment(Ty); 4542 CharUnits TyAlign = getContext().getTypeAlignInChars(Ty); 4543 4544 return ABIArgInfo::getIndirect(CCAlign, /*ByVal*/ true, 4545 /*Realign*/ TyAlign > CCAlign); 4546 } 4547 4548 return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) 4549 : ABIArgInfo::getDirect()); 4550 } 4551 4552 CharUnits AIXABIInfo::getParamTypeAlignment(QualType Ty) const { 4553 if (Ty->isAnyComplexType()) 4554 llvm::report_fatal_error("complex type is not supported on AIX yet"); 4555 4556 if (Ty->isVectorType()) 4557 llvm::report_fatal_error("vector type is not supported on AIX yet"); 4558 4559 // If the structure contains a vector type, the alignment is 16. 4560 if (isRecordWithSIMDVectorType(getContext(), Ty)) 4561 return CharUnits::fromQuantity(16); 4562 4563 return CharUnits::fromQuantity(PtrByteSize); 4564 } 4565 4566 Address AIXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 4567 QualType Ty) const { 4568 if (Ty->isAnyComplexType()) 4569 llvm::report_fatal_error("complex type is not supported on AIX yet"); 4570 4571 if (Ty->isVectorType()) 4572 llvm::report_fatal_error("vector type is not supported on AIX yet"); 4573 4574 auto TypeInfo = getContext().getTypeInfoInChars(Ty); 4575 TypeInfo.second = getParamTypeAlignment(Ty); 4576 4577 CharUnits SlotSize = CharUnits::fromQuantity(PtrByteSize); 4578 4579 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, TypeInfo, 4580 SlotSize, /*AllowHigher*/ true); 4581 } 4582 4583 bool AIXTargetCodeGenInfo::initDwarfEHRegSizeTable( 4584 CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const { 4585 return PPC_initDwarfEHRegSizeTable(CGF, Address, Is64Bit, /*IsAIX*/ true); 4586 } 4587 4588 // PowerPC-32 4589 namespace { 4590 /// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information. 4591 class PPC32_SVR4_ABIInfo : public DefaultABIInfo { 4592 bool IsSoftFloatABI; 4593 bool IsRetSmallStructInRegABI; 4594 4595 CharUnits getParamTypeAlignment(QualType Ty) const; 4596 4597 public: 4598 PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, bool SoftFloatABI, 4599 bool RetSmallStructInRegABI) 4600 : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI), 4601 IsRetSmallStructInRegABI(RetSmallStructInRegABI) {} 4602 4603 ABIArgInfo classifyReturnType(QualType RetTy) const; 4604 4605 void computeInfo(CGFunctionInfo &FI) const override { 4606 if (!getCXXABI().classifyReturnType(FI)) 4607 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 4608 for (auto &I : FI.arguments()) 4609 I.info = classifyArgumentType(I.type); 4610 } 4611 4612 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 4613 QualType Ty) const override; 4614 }; 4615 4616 class PPC32TargetCodeGenInfo : public TargetCodeGenInfo { 4617 public: 4618 PPC32TargetCodeGenInfo(CodeGenTypes &CGT, bool SoftFloatABI, 4619 bool RetSmallStructInRegABI) 4620 : TargetCodeGenInfo(std::make_unique<PPC32_SVR4_ABIInfo>( 4621 CGT, SoftFloatABI, RetSmallStructInRegABI)) {} 4622 4623 static bool isStructReturnInRegABI(const llvm::Triple &Triple, 4624 const CodeGenOptions &Opts); 4625 4626 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 4627 // This is recovered from gcc output. 4628 return 1; // r1 is the dedicated stack pointer 4629 } 4630 4631 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4632 llvm::Value *Address) const override; 4633 }; 4634 } 4635 4636 CharUnits PPC32_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const { 4637 // Complex types are passed just like their elements. 4638 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) 4639 Ty = CTy->getElementType(); 4640 4641 if (Ty->isVectorType()) 4642 return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 4643 : 4); 4644 4645 // For single-element float/vector structs, we consider the whole type 4646 // to have the same alignment requirements as its single element. 4647 const Type *AlignTy = nullptr; 4648 if (const Type *EltType = isSingleElementStruct(Ty, getContext())) { 4649 const BuiltinType *BT = EltType->getAs<BuiltinType>(); 4650 if ((EltType->isVectorType() && getContext().getTypeSize(EltType) == 128) || 4651 (BT && BT->isFloatingPoint())) 4652 AlignTy = EltType; 4653 } 4654 4655 if (AlignTy) 4656 return CharUnits::fromQuantity(AlignTy->isVectorType() ? 16 : 4); 4657 return CharUnits::fromQuantity(4); 4658 } 4659 4660 ABIArgInfo PPC32_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const { 4661 uint64_t Size; 4662 4663 // -msvr4-struct-return puts small aggregates in GPR3 and GPR4. 4664 if (isAggregateTypeForABI(RetTy) && IsRetSmallStructInRegABI && 4665 (Size = getContext().getTypeSize(RetTy)) <= 64) { 4666 // System V ABI (1995), page 3-22, specified: 4667 // > A structure or union whose size is less than or equal to 8 bytes 4668 // > shall be returned in r3 and r4, as if it were first stored in the 4669 // > 8-byte aligned memory area and then the low addressed word were 4670 // > loaded into r3 and the high-addressed word into r4. Bits beyond 4671 // > the last member of the structure or union are not defined. 4672 // 4673 // GCC for big-endian PPC32 inserts the pad before the first member, 4674 // not "beyond the last member" of the struct. To stay compatible 4675 // with GCC, we coerce the struct to an integer of the same size. 4676 // LLVM will extend it and return i32 in r3, or i64 in r3:r4. 4677 if (Size == 0) 4678 return ABIArgInfo::getIgnore(); 4679 else { 4680 llvm::Type *CoerceTy = llvm::Type::getIntNTy(getVMContext(), Size); 4681 return ABIArgInfo::getDirect(CoerceTy); 4682 } 4683 } 4684 4685 return DefaultABIInfo::classifyReturnType(RetTy); 4686 } 4687 4688 // TODO: this implementation is now likely redundant with 4689 // DefaultABIInfo::EmitVAArg. 4690 Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList, 4691 QualType Ty) const { 4692 if (getTarget().getTriple().isOSDarwin()) { 4693 auto TI = getContext().getTypeInfoInChars(Ty); 4694 TI.second = getParamTypeAlignment(Ty); 4695 4696 CharUnits SlotSize = CharUnits::fromQuantity(4); 4697 return emitVoidPtrVAArg(CGF, VAList, Ty, 4698 classifyArgumentType(Ty).isIndirect(), TI, SlotSize, 4699 /*AllowHigherAlign=*/true); 4700 } 4701 4702 const unsigned OverflowLimit = 8; 4703 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { 4704 // TODO: Implement this. For now ignore. 4705 (void)CTy; 4706 return Address::invalid(); // FIXME? 4707 } 4708 4709 // struct __va_list_tag { 4710 // unsigned char gpr; 4711 // unsigned char fpr; 4712 // unsigned short reserved; 4713 // void *overflow_arg_area; 4714 // void *reg_save_area; 4715 // }; 4716 4717 bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64; 4718 bool isInt = 4719 Ty->isIntegerType() || Ty->isPointerType() || Ty->isAggregateType(); 4720 bool isF64 = Ty->isFloatingType() && getContext().getTypeSize(Ty) == 64; 4721 4722 // All aggregates are passed indirectly? That doesn't seem consistent 4723 // with the argument-lowering code. 4724 bool isIndirect = Ty->isAggregateType(); 4725 4726 CGBuilderTy &Builder = CGF.Builder; 4727 4728 // The calling convention either uses 1-2 GPRs or 1 FPR. 4729 Address NumRegsAddr = Address::invalid(); 4730 if (isInt || IsSoftFloatABI) { 4731 NumRegsAddr = Builder.CreateStructGEP(VAList, 0, "gpr"); 4732 } else { 4733 NumRegsAddr = Builder.CreateStructGEP(VAList, 1, "fpr"); 4734 } 4735 4736 llvm::Value *NumRegs = Builder.CreateLoad(NumRegsAddr, "numUsedRegs"); 4737 4738 // "Align" the register count when TY is i64. 4739 if (isI64 || (isF64 && IsSoftFloatABI)) { 4740 NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1)); 4741 NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U)); 4742 } 4743 4744 llvm::Value *CC = 4745 Builder.CreateICmpULT(NumRegs, Builder.getInt8(OverflowLimit), "cond"); 4746 4747 llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs"); 4748 llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow"); 4749 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont"); 4750 4751 Builder.CreateCondBr(CC, UsingRegs, UsingOverflow); 4752 4753 llvm::Type *DirectTy = CGF.ConvertType(Ty); 4754 if (isIndirect) DirectTy = DirectTy->getPointerTo(0); 4755 4756 // Case 1: consume registers. 4757 Address RegAddr = Address::invalid(); 4758 { 4759 CGF.EmitBlock(UsingRegs); 4760 4761 Address RegSaveAreaPtr = Builder.CreateStructGEP(VAList, 4); 4762 RegAddr = Address(Builder.CreateLoad(RegSaveAreaPtr), 4763 CharUnits::fromQuantity(8)); 4764 assert(RegAddr.getElementType() == CGF.Int8Ty); 4765 4766 // Floating-point registers start after the general-purpose registers. 4767 if (!(isInt || IsSoftFloatABI)) { 4768 RegAddr = Builder.CreateConstInBoundsByteGEP(RegAddr, 4769 CharUnits::fromQuantity(32)); 4770 } 4771 4772 // Get the address of the saved value by scaling the number of 4773 // registers we've used by the number of 4774 CharUnits RegSize = CharUnits::fromQuantity((isInt || IsSoftFloatABI) ? 4 : 8); 4775 llvm::Value *RegOffset = 4776 Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.getQuantity())); 4777 RegAddr = Address(Builder.CreateInBoundsGEP(CGF.Int8Ty, 4778 RegAddr.getPointer(), RegOffset), 4779 RegAddr.getAlignment().alignmentOfArrayElement(RegSize)); 4780 RegAddr = Builder.CreateElementBitCast(RegAddr, DirectTy); 4781 4782 // Increase the used-register count. 4783 NumRegs = 4784 Builder.CreateAdd(NumRegs, 4785 Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1)); 4786 Builder.CreateStore(NumRegs, NumRegsAddr); 4787 4788 CGF.EmitBranch(Cont); 4789 } 4790 4791 // Case 2: consume space in the overflow area. 4792 Address MemAddr = Address::invalid(); 4793 { 4794 CGF.EmitBlock(UsingOverflow); 4795 4796 Builder.CreateStore(Builder.getInt8(OverflowLimit), NumRegsAddr); 4797 4798 // Everything in the overflow area is rounded up to a size of at least 4. 4799 CharUnits OverflowAreaAlign = CharUnits::fromQuantity(4); 4800 4801 CharUnits Size; 4802 if (!isIndirect) { 4803 auto TypeInfo = CGF.getContext().getTypeInfoInChars(Ty); 4804 Size = TypeInfo.first.alignTo(OverflowAreaAlign); 4805 } else { 4806 Size = CGF.getPointerSize(); 4807 } 4808 4809 Address OverflowAreaAddr = Builder.CreateStructGEP(VAList, 3); 4810 Address OverflowArea(Builder.CreateLoad(OverflowAreaAddr, "argp.cur"), 4811 OverflowAreaAlign); 4812 // Round up address of argument to alignment 4813 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty); 4814 if (Align > OverflowAreaAlign) { 4815 llvm::Value *Ptr = OverflowArea.getPointer(); 4816 OverflowArea = Address(emitRoundPointerUpToAlignment(CGF, Ptr, Align), 4817 Align); 4818 } 4819 4820 MemAddr = Builder.CreateElementBitCast(OverflowArea, DirectTy); 4821 4822 // Increase the overflow area. 4823 OverflowArea = Builder.CreateConstInBoundsByteGEP(OverflowArea, Size); 4824 Builder.CreateStore(OverflowArea.getPointer(), OverflowAreaAddr); 4825 CGF.EmitBranch(Cont); 4826 } 4827 4828 CGF.EmitBlock(Cont); 4829 4830 // Merge the cases with a phi. 4831 Address Result = emitMergePHI(CGF, RegAddr, UsingRegs, MemAddr, UsingOverflow, 4832 "vaarg.addr"); 4833 4834 // Load the pointer if the argument was passed indirectly. 4835 if (isIndirect) { 4836 Result = Address(Builder.CreateLoad(Result, "aggr"), 4837 getContext().getTypeAlignInChars(Ty)); 4838 } 4839 4840 return Result; 4841 } 4842 4843 bool PPC32TargetCodeGenInfo::isStructReturnInRegABI( 4844 const llvm::Triple &Triple, const CodeGenOptions &Opts) { 4845 assert(Triple.getArch() == llvm::Triple::ppc); 4846 4847 switch (Opts.getStructReturnConvention()) { 4848 case CodeGenOptions::SRCK_Default: 4849 break; 4850 case CodeGenOptions::SRCK_OnStack: // -maix-struct-return 4851 return false; 4852 case CodeGenOptions::SRCK_InRegs: // -msvr4-struct-return 4853 return true; 4854 } 4855 4856 if (Triple.isOSBinFormatELF() && !Triple.isOSLinux()) 4857 return true; 4858 4859 return false; 4860 } 4861 4862 bool 4863 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4864 llvm::Value *Address) const { 4865 return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ false, 4866 /*IsAIX*/ false); 4867 } 4868 4869 // PowerPC-64 4870 4871 namespace { 4872 /// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information. 4873 class PPC64_SVR4_ABIInfo : public SwiftABIInfo { 4874 public: 4875 enum ABIKind { 4876 ELFv1 = 0, 4877 ELFv2 4878 }; 4879 4880 private: 4881 static const unsigned GPRBits = 64; 4882 ABIKind Kind; 4883 bool HasQPX; 4884 bool IsSoftFloatABI; 4885 4886 // A vector of float or double will be promoted to <4 x f32> or <4 x f64> and 4887 // will be passed in a QPX register. 4888 bool IsQPXVectorTy(const Type *Ty) const { 4889 if (!HasQPX) 4890 return false; 4891 4892 if (const VectorType *VT = Ty->getAs<VectorType>()) { 4893 unsigned NumElements = VT->getNumElements(); 4894 if (NumElements == 1) 4895 return false; 4896 4897 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) { 4898 if (getContext().getTypeSize(Ty) <= 256) 4899 return true; 4900 } else if (VT->getElementType()-> 4901 isSpecificBuiltinType(BuiltinType::Float)) { 4902 if (getContext().getTypeSize(Ty) <= 128) 4903 return true; 4904 } 4905 } 4906 4907 return false; 4908 } 4909 4910 bool IsQPXVectorTy(QualType Ty) const { 4911 return IsQPXVectorTy(Ty.getTypePtr()); 4912 } 4913 4914 public: 4915 PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind, bool HasQPX, 4916 bool SoftFloatABI) 4917 : SwiftABIInfo(CGT), Kind(Kind), HasQPX(HasQPX), 4918 IsSoftFloatABI(SoftFloatABI) {} 4919 4920 bool isPromotableTypeForABI(QualType Ty) const; 4921 CharUnits getParamTypeAlignment(QualType Ty) const; 4922 4923 ABIArgInfo classifyReturnType(QualType RetTy) const; 4924 ABIArgInfo classifyArgumentType(QualType Ty) const; 4925 4926 bool isHomogeneousAggregateBaseType(QualType Ty) const override; 4927 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 4928 uint64_t Members) const override; 4929 4930 // TODO: We can add more logic to computeInfo to improve performance. 4931 // Example: For aggregate arguments that fit in a register, we could 4932 // use getDirectInReg (as is done below for structs containing a single 4933 // floating-point value) to avoid pushing them to memory on function 4934 // entry. This would require changing the logic in PPCISelLowering 4935 // when lowering the parameters in the caller and args in the callee. 4936 void computeInfo(CGFunctionInfo &FI) const override { 4937 if (!getCXXABI().classifyReturnType(FI)) 4938 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 4939 for (auto &I : FI.arguments()) { 4940 // We rely on the default argument classification for the most part. 4941 // One exception: An aggregate containing a single floating-point 4942 // or vector item must be passed in a register if one is available. 4943 const Type *T = isSingleElementStruct(I.type, getContext()); 4944 if (T) { 4945 const BuiltinType *BT = T->getAs<BuiltinType>(); 4946 if (IsQPXVectorTy(T) || 4947 (T->isVectorType() && getContext().getTypeSize(T) == 128) || 4948 (BT && BT->isFloatingPoint())) { 4949 QualType QT(T, 0); 4950 I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT)); 4951 continue; 4952 } 4953 } 4954 I.info = classifyArgumentType(I.type); 4955 } 4956 } 4957 4958 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 4959 QualType Ty) const override; 4960 4961 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, 4962 bool asReturnValue) const override { 4963 return occupiesMoreThan(CGT, scalars, /*total*/ 4); 4964 } 4965 4966 bool isSwiftErrorInRegister() const override { 4967 return false; 4968 } 4969 }; 4970 4971 class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo { 4972 4973 public: 4974 PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT, 4975 PPC64_SVR4_ABIInfo::ABIKind Kind, bool HasQPX, 4976 bool SoftFloatABI) 4977 : TargetCodeGenInfo(std::make_unique<PPC64_SVR4_ABIInfo>( 4978 CGT, Kind, HasQPX, SoftFloatABI)) {} 4979 4980 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 4981 // This is recovered from gcc output. 4982 return 1; // r1 is the dedicated stack pointer 4983 } 4984 4985 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4986 llvm::Value *Address) const override; 4987 }; 4988 4989 class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo { 4990 public: 4991 PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} 4992 4993 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 4994 // This is recovered from gcc output. 4995 return 1; // r1 is the dedicated stack pointer 4996 } 4997 4998 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4999 llvm::Value *Address) const override; 5000 }; 5001 5002 } 5003 5004 // Return true if the ABI requires Ty to be passed sign- or zero- 5005 // extended to 64 bits. 5006 bool 5007 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const { 5008 // Treat an enum type as its underlying type. 5009 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 5010 Ty = EnumTy->getDecl()->getIntegerType(); 5011 5012 // Promotable integer types are required to be promoted by the ABI. 5013 if (isPromotableIntegerTypeForABI(Ty)) 5014 return true; 5015 5016 // In addition to the usual promotable integer types, we also need to 5017 // extend all 32-bit types, since the ABI requires promotion to 64 bits. 5018 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 5019 switch (BT->getKind()) { 5020 case BuiltinType::Int: 5021 case BuiltinType::UInt: 5022 return true; 5023 default: 5024 break; 5025 } 5026 5027 if (const auto *EIT = Ty->getAs<ExtIntType>()) 5028 if (EIT->getNumBits() < 64) 5029 return true; 5030 5031 return false; 5032 } 5033 5034 /// isAlignedParamType - Determine whether a type requires 16-byte or 5035 /// higher alignment in the parameter area. Always returns at least 8. 5036 CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const { 5037 // Complex types are passed just like their elements. 5038 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) 5039 Ty = CTy->getElementType(); 5040 5041 // Only vector types of size 16 bytes need alignment (larger types are 5042 // passed via reference, smaller types are not aligned). 5043 if (IsQPXVectorTy(Ty)) { 5044 if (getContext().getTypeSize(Ty) > 128) 5045 return CharUnits::fromQuantity(32); 5046 5047 return CharUnits::fromQuantity(16); 5048 } else if (Ty->isVectorType()) { 5049 return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 : 8); 5050 } 5051 5052 // For single-element float/vector structs, we consider the whole type 5053 // to have the same alignment requirements as its single element. 5054 const Type *AlignAsType = nullptr; 5055 const Type *EltType = isSingleElementStruct(Ty, getContext()); 5056 if (EltType) { 5057 const BuiltinType *BT = EltType->getAs<BuiltinType>(); 5058 if (IsQPXVectorTy(EltType) || (EltType->isVectorType() && 5059 getContext().getTypeSize(EltType) == 128) || 5060 (BT && BT->isFloatingPoint())) 5061 AlignAsType = EltType; 5062 } 5063 5064 // Likewise for ELFv2 homogeneous aggregates. 5065 const Type *Base = nullptr; 5066 uint64_t Members = 0; 5067 if (!AlignAsType && Kind == ELFv2 && 5068 isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members)) 5069 AlignAsType = Base; 5070 5071 // With special case aggregates, only vector base types need alignment. 5072 if (AlignAsType && IsQPXVectorTy(AlignAsType)) { 5073 if (getContext().getTypeSize(AlignAsType) > 128) 5074 return CharUnits::fromQuantity(32); 5075 5076 return CharUnits::fromQuantity(16); 5077 } else if (AlignAsType) { 5078 return CharUnits::fromQuantity(AlignAsType->isVectorType() ? 16 : 8); 5079 } 5080 5081 // Otherwise, we only need alignment for any aggregate type that 5082 // has an alignment requirement of >= 16 bytes. 5083 if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) { 5084 if (HasQPX && getContext().getTypeAlign(Ty) >= 256) 5085 return CharUnits::fromQuantity(32); 5086 return CharUnits::fromQuantity(16); 5087 } 5088 5089 return CharUnits::fromQuantity(8); 5090 } 5091 5092 /// isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous 5093 /// aggregate. Base is set to the base element type, and Members is set 5094 /// to the number of base elements. 5095 bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base, 5096 uint64_t &Members) const { 5097 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 5098 uint64_t NElements = AT->getSize().getZExtValue(); 5099 if (NElements == 0) 5100 return false; 5101 if (!isHomogeneousAggregate(AT->getElementType(), Base, Members)) 5102 return false; 5103 Members *= NElements; 5104 } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 5105 const RecordDecl *RD = RT->getDecl(); 5106 if (RD->hasFlexibleArrayMember()) 5107 return false; 5108 5109 Members = 0; 5110 5111 // If this is a C++ record, check the bases first. 5112 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 5113 for (const auto &I : CXXRD->bases()) { 5114 // Ignore empty records. 5115 if (isEmptyRecord(getContext(), I.getType(), true)) 5116 continue; 5117 5118 uint64_t FldMembers; 5119 if (!isHomogeneousAggregate(I.getType(), Base, FldMembers)) 5120 return false; 5121 5122 Members += FldMembers; 5123 } 5124 } 5125 5126 for (const auto *FD : RD->fields()) { 5127 // Ignore (non-zero arrays of) empty records. 5128 QualType FT = FD->getType(); 5129 while (const ConstantArrayType *AT = 5130 getContext().getAsConstantArrayType(FT)) { 5131 if (AT->getSize().getZExtValue() == 0) 5132 return false; 5133 FT = AT->getElementType(); 5134 } 5135 if (isEmptyRecord(getContext(), FT, true)) 5136 continue; 5137 5138 // For compatibility with GCC, ignore empty bitfields in C++ mode. 5139 if (getContext().getLangOpts().CPlusPlus && 5140 FD->isZeroLengthBitField(getContext())) 5141 continue; 5142 5143 uint64_t FldMembers; 5144 if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers)) 5145 return false; 5146 5147 Members = (RD->isUnion() ? 5148 std::max(Members, FldMembers) : Members + FldMembers); 5149 } 5150 5151 if (!Base) 5152 return false; 5153 5154 // Ensure there is no padding. 5155 if (getContext().getTypeSize(Base) * Members != 5156 getContext().getTypeSize(Ty)) 5157 return false; 5158 } else { 5159 Members = 1; 5160 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 5161 Members = 2; 5162 Ty = CT->getElementType(); 5163 } 5164 5165 // Most ABIs only support float, double, and some vector type widths. 5166 if (!isHomogeneousAggregateBaseType(Ty)) 5167 return false; 5168 5169 // The base type must be the same for all members. Types that 5170 // agree in both total size and mode (float vs. vector) are 5171 // treated as being equivalent here. 5172 const Type *TyPtr = Ty.getTypePtr(); 5173 if (!Base) { 5174 Base = TyPtr; 5175 // If it's a non-power-of-2 vector, its size is already a power-of-2, 5176 // so make sure to widen it explicitly. 5177 if (const VectorType *VT = Base->getAs<VectorType>()) { 5178 QualType EltTy = VT->getElementType(); 5179 unsigned NumElements = 5180 getContext().getTypeSize(VT) / getContext().getTypeSize(EltTy); 5181 Base = getContext() 5182 .getVectorType(EltTy, NumElements, VT->getVectorKind()) 5183 .getTypePtr(); 5184 } 5185 } 5186 5187 if (Base->isVectorType() != TyPtr->isVectorType() || 5188 getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr)) 5189 return false; 5190 } 5191 return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members); 5192 } 5193 5194 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 5195 // Homogeneous aggregates for ELFv2 must have base types of float, 5196 // double, long double, or 128-bit vectors. 5197 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 5198 if (BT->getKind() == BuiltinType::Float || 5199 BT->getKind() == BuiltinType::Double || 5200 BT->getKind() == BuiltinType::LongDouble || 5201 (getContext().getTargetInfo().hasFloat128Type() && 5202 (BT->getKind() == BuiltinType::Float128))) { 5203 if (IsSoftFloatABI) 5204 return false; 5205 return true; 5206 } 5207 } 5208 if (const VectorType *VT = Ty->getAs<VectorType>()) { 5209 if (getContext().getTypeSize(VT) == 128 || IsQPXVectorTy(Ty)) 5210 return true; 5211 } 5212 return false; 5213 } 5214 5215 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough( 5216 const Type *Base, uint64_t Members) const { 5217 // Vector and fp128 types require one register, other floating point types 5218 // require one or two registers depending on their size. 5219 uint32_t NumRegs = 5220 ((getContext().getTargetInfo().hasFloat128Type() && 5221 Base->isFloat128Type()) || 5222 Base->isVectorType()) ? 1 5223 : (getContext().getTypeSize(Base) + 63) / 64; 5224 5225 // Homogeneous Aggregates may occupy at most 8 registers. 5226 return Members * NumRegs <= 8; 5227 } 5228 5229 ABIArgInfo 5230 PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const { 5231 Ty = useFirstFieldIfTransparentUnion(Ty); 5232 5233 if (Ty->isAnyComplexType()) 5234 return ABIArgInfo::getDirect(); 5235 5236 // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes) 5237 // or via reference (larger than 16 bytes). 5238 if (Ty->isVectorType() && !IsQPXVectorTy(Ty)) { 5239 uint64_t Size = getContext().getTypeSize(Ty); 5240 if (Size > 128) 5241 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 5242 else if (Size < 128) { 5243 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size); 5244 return ABIArgInfo::getDirect(CoerceTy); 5245 } 5246 } 5247 5248 if (const auto *EIT = Ty->getAs<ExtIntType>()) 5249 if (EIT->getNumBits() > 128) 5250 return getNaturalAlignIndirect(Ty, /*ByVal=*/true); 5251 5252 if (isAggregateTypeForABI(Ty)) { 5253 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 5254 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 5255 5256 uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity(); 5257 uint64_t TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity(); 5258 5259 // ELFv2 homogeneous aggregates are passed as array types. 5260 const Type *Base = nullptr; 5261 uint64_t Members = 0; 5262 if (Kind == ELFv2 && 5263 isHomogeneousAggregate(Ty, Base, Members)) { 5264 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0)); 5265 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members); 5266 return ABIArgInfo::getDirect(CoerceTy); 5267 } 5268 5269 // If an aggregate may end up fully in registers, we do not 5270 // use the ByVal method, but pass the aggregate as array. 5271 // This is usually beneficial since we avoid forcing the 5272 // back-end to store the argument to memory. 5273 uint64_t Bits = getContext().getTypeSize(Ty); 5274 if (Bits > 0 && Bits <= 8 * GPRBits) { 5275 llvm::Type *CoerceTy; 5276 5277 // Types up to 8 bytes are passed as integer type (which will be 5278 // properly aligned in the argument save area doubleword). 5279 if (Bits <= GPRBits) 5280 CoerceTy = 5281 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8)); 5282 // Larger types are passed as arrays, with the base type selected 5283 // according to the required alignment in the save area. 5284 else { 5285 uint64_t RegBits = ABIAlign * 8; 5286 uint64_t NumRegs = llvm::alignTo(Bits, RegBits) / RegBits; 5287 llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits); 5288 CoerceTy = llvm::ArrayType::get(RegTy, NumRegs); 5289 } 5290 5291 return ABIArgInfo::getDirect(CoerceTy); 5292 } 5293 5294 // All other aggregates are passed ByVal. 5295 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign), 5296 /*ByVal=*/true, 5297 /*Realign=*/TyAlign > ABIAlign); 5298 } 5299 5300 return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) 5301 : ABIArgInfo::getDirect()); 5302 } 5303 5304 ABIArgInfo 5305 PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const { 5306 if (RetTy->isVoidType()) 5307 return ABIArgInfo::getIgnore(); 5308 5309 if (RetTy->isAnyComplexType()) 5310 return ABIArgInfo::getDirect(); 5311 5312 // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes) 5313 // or via reference (larger than 16 bytes). 5314 if (RetTy->isVectorType() && !IsQPXVectorTy(RetTy)) { 5315 uint64_t Size = getContext().getTypeSize(RetTy); 5316 if (Size > 128) 5317 return getNaturalAlignIndirect(RetTy); 5318 else if (Size < 128) { 5319 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size); 5320 return ABIArgInfo::getDirect(CoerceTy); 5321 } 5322 } 5323 5324 if (const auto *EIT = RetTy->getAs<ExtIntType>()) 5325 if (EIT->getNumBits() > 128) 5326 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false); 5327 5328 if (isAggregateTypeForABI(RetTy)) { 5329 // ELFv2 homogeneous aggregates are returned as array types. 5330 const Type *Base = nullptr; 5331 uint64_t Members = 0; 5332 if (Kind == ELFv2 && 5333 isHomogeneousAggregate(RetTy, Base, Members)) { 5334 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0)); 5335 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members); 5336 return ABIArgInfo::getDirect(CoerceTy); 5337 } 5338 5339 // ELFv2 small aggregates are returned in up to two registers. 5340 uint64_t Bits = getContext().getTypeSize(RetTy); 5341 if (Kind == ELFv2 && Bits <= 2 * GPRBits) { 5342 if (Bits == 0) 5343 return ABIArgInfo::getIgnore(); 5344 5345 llvm::Type *CoerceTy; 5346 if (Bits > GPRBits) { 5347 CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits); 5348 CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy); 5349 } else 5350 CoerceTy = 5351 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8)); 5352 return ABIArgInfo::getDirect(CoerceTy); 5353 } 5354 5355 // All other aggregates are returned indirectly. 5356 return getNaturalAlignIndirect(RetTy); 5357 } 5358 5359 return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) 5360 : ABIArgInfo::getDirect()); 5361 } 5362 5363 // Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine. 5364 Address PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 5365 QualType Ty) const { 5366 auto TypeInfo = getContext().getTypeInfoInChars(Ty); 5367 TypeInfo.second = getParamTypeAlignment(Ty); 5368 5369 CharUnits SlotSize = CharUnits::fromQuantity(8); 5370 5371 // If we have a complex type and the base type is smaller than 8 bytes, 5372 // the ABI calls for the real and imaginary parts to be right-adjusted 5373 // in separate doublewords. However, Clang expects us to produce a 5374 // pointer to a structure with the two parts packed tightly. So generate 5375 // loads of the real and imaginary parts relative to the va_list pointer, 5376 // and store them to a temporary structure. 5377 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { 5378 CharUnits EltSize = TypeInfo.first / 2; 5379 if (EltSize < SlotSize) { 5380 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, CGF.Int8Ty, 5381 SlotSize * 2, SlotSize, 5382 SlotSize, /*AllowHigher*/ true); 5383 5384 Address RealAddr = Addr; 5385 Address ImagAddr = RealAddr; 5386 if (CGF.CGM.getDataLayout().isBigEndian()) { 5387 RealAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, 5388 SlotSize - EltSize); 5389 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(ImagAddr, 5390 2 * SlotSize - EltSize); 5391 } else { 5392 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize); 5393 } 5394 5395 llvm::Type *EltTy = CGF.ConvertTypeForMem(CTy->getElementType()); 5396 RealAddr = CGF.Builder.CreateElementBitCast(RealAddr, EltTy); 5397 ImagAddr = CGF.Builder.CreateElementBitCast(ImagAddr, EltTy); 5398 llvm::Value *Real = CGF.Builder.CreateLoad(RealAddr, ".vareal"); 5399 llvm::Value *Imag = CGF.Builder.CreateLoad(ImagAddr, ".vaimag"); 5400 5401 Address Temp = CGF.CreateMemTemp(Ty, "vacplx"); 5402 CGF.EmitStoreOfComplex({Real, Imag}, CGF.MakeAddrLValue(Temp, Ty), 5403 /*init*/ true); 5404 return Temp; 5405 } 5406 } 5407 5408 // Otherwise, just use the general rule. 5409 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, 5410 TypeInfo, SlotSize, /*AllowHigher*/ true); 5411 } 5412 5413 bool 5414 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable( 5415 CodeGen::CodeGenFunction &CGF, 5416 llvm::Value *Address) const { 5417 return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ true, 5418 /*IsAIX*/ false); 5419 } 5420 5421 bool 5422 PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 5423 llvm::Value *Address) const { 5424 return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ true, 5425 /*IsAIX*/ false); 5426 } 5427 5428 //===----------------------------------------------------------------------===// 5429 // AArch64 ABI Implementation 5430 //===----------------------------------------------------------------------===// 5431 5432 namespace { 5433 5434 class AArch64ABIInfo : public SwiftABIInfo { 5435 public: 5436 enum ABIKind { 5437 AAPCS = 0, 5438 DarwinPCS, 5439 Win64 5440 }; 5441 5442 private: 5443 ABIKind Kind; 5444 5445 public: 5446 AArch64ABIInfo(CodeGenTypes &CGT, ABIKind Kind) 5447 : SwiftABIInfo(CGT), Kind(Kind) {} 5448 5449 private: 5450 ABIKind getABIKind() const { return Kind; } 5451 bool isDarwinPCS() const { return Kind == DarwinPCS; } 5452 5453 ABIArgInfo classifyReturnType(QualType RetTy, bool IsVariadic) const; 5454 ABIArgInfo classifyArgumentType(QualType RetTy) const; 5455 bool isHomogeneousAggregateBaseType(QualType Ty) const override; 5456 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 5457 uint64_t Members) const override; 5458 5459 bool isIllegalVectorType(QualType Ty) const; 5460 5461 void computeInfo(CGFunctionInfo &FI) const override { 5462 if (!::classifyReturnType(getCXXABI(), FI, *this)) 5463 FI.getReturnInfo() = 5464 classifyReturnType(FI.getReturnType(), FI.isVariadic()); 5465 5466 for (auto &it : FI.arguments()) 5467 it.info = classifyArgumentType(it.type); 5468 } 5469 5470 Address EmitDarwinVAArg(Address VAListAddr, QualType Ty, 5471 CodeGenFunction &CGF) const; 5472 5473 Address EmitAAPCSVAArg(Address VAListAddr, QualType Ty, 5474 CodeGenFunction &CGF) const; 5475 5476 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 5477 QualType Ty) const override { 5478 return Kind == Win64 ? EmitMSVAArg(CGF, VAListAddr, Ty) 5479 : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF) 5480 : EmitAAPCSVAArg(VAListAddr, Ty, CGF); 5481 } 5482 5483 Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, 5484 QualType Ty) const override; 5485 5486 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, 5487 bool asReturnValue) const override { 5488 return occupiesMoreThan(CGT, scalars, /*total*/ 4); 5489 } 5490 bool isSwiftErrorInRegister() const override { 5491 return true; 5492 } 5493 5494 bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy, 5495 unsigned elts) const override; 5496 5497 bool allowBFloatArgsAndRet() const override { 5498 return getTarget().hasBFloat16Type(); 5499 } 5500 }; 5501 5502 class AArch64TargetCodeGenInfo : public TargetCodeGenInfo { 5503 public: 5504 AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind) 5505 : TargetCodeGenInfo(std::make_unique<AArch64ABIInfo>(CGT, Kind)) {} 5506 5507 StringRef getARCRetainAutoreleasedReturnValueMarker() const override { 5508 return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue"; 5509 } 5510 5511 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 5512 return 31; 5513 } 5514 5515 bool doesReturnSlotInterfereWithArgs() const override { return false; } 5516 5517 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 5518 CodeGen::CodeGenModule &CGM) const override { 5519 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 5520 if (!FD) 5521 return; 5522 5523 LangOptions::SignReturnAddressScopeKind Scope = 5524 CGM.getLangOpts().getSignReturnAddressScope(); 5525 LangOptions::SignReturnAddressKeyKind Key = 5526 CGM.getLangOpts().getSignReturnAddressKey(); 5527 bool BranchTargetEnforcement = CGM.getLangOpts().BranchTargetEnforcement; 5528 if (const auto *TA = FD->getAttr<TargetAttr>()) { 5529 ParsedTargetAttr Attr = TA->parse(); 5530 if (!Attr.BranchProtection.empty()) { 5531 TargetInfo::BranchProtectionInfo BPI; 5532 StringRef Error; 5533 (void)CGM.getTarget().validateBranchProtection(Attr.BranchProtection, 5534 BPI, Error); 5535 assert(Error.empty()); 5536 Scope = BPI.SignReturnAddr; 5537 Key = BPI.SignKey; 5538 BranchTargetEnforcement = BPI.BranchTargetEnforcement; 5539 } 5540 } 5541 5542 auto *Fn = cast<llvm::Function>(GV); 5543 if (Scope != LangOptions::SignReturnAddressScopeKind::None) { 5544 Fn->addFnAttr("sign-return-address", 5545 Scope == LangOptions::SignReturnAddressScopeKind::All 5546 ? "all" 5547 : "non-leaf"); 5548 5549 Fn->addFnAttr("sign-return-address-key", 5550 Key == LangOptions::SignReturnAddressKeyKind::AKey 5551 ? "a_key" 5552 : "b_key"); 5553 } 5554 5555 if (BranchTargetEnforcement) 5556 Fn->addFnAttr("branch-target-enforcement"); 5557 } 5558 }; 5559 5560 class WindowsAArch64TargetCodeGenInfo : public AArch64TargetCodeGenInfo { 5561 public: 5562 WindowsAArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind K) 5563 : AArch64TargetCodeGenInfo(CGT, K) {} 5564 5565 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 5566 CodeGen::CodeGenModule &CGM) const override; 5567 5568 void getDependentLibraryOption(llvm::StringRef Lib, 5569 llvm::SmallString<24> &Opt) const override { 5570 Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib); 5571 } 5572 5573 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value, 5574 llvm::SmallString<32> &Opt) const override { 5575 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; 5576 } 5577 }; 5578 5579 void WindowsAArch64TargetCodeGenInfo::setTargetAttributes( 5580 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { 5581 AArch64TargetCodeGenInfo::setTargetAttributes(D, GV, CGM); 5582 if (GV->isDeclaration()) 5583 return; 5584 addStackProbeTargetAttributes(D, GV, CGM); 5585 } 5586 } 5587 5588 ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const { 5589 Ty = useFirstFieldIfTransparentUnion(Ty); 5590 5591 // Handle illegal vector types here. 5592 if (isIllegalVectorType(Ty)) { 5593 uint64_t Size = getContext().getTypeSize(Ty); 5594 // Android promotes <2 x i8> to i16, not i32 5595 if (isAndroid() && (Size <= 16)) { 5596 llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext()); 5597 return ABIArgInfo::getDirect(ResType); 5598 } 5599 if (Size <= 32) { 5600 llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext()); 5601 return ABIArgInfo::getDirect(ResType); 5602 } 5603 if (Size == 64) { 5604 auto *ResType = 5605 llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2); 5606 return ABIArgInfo::getDirect(ResType); 5607 } 5608 if (Size == 128) { 5609 auto *ResType = 5610 llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4); 5611 return ABIArgInfo::getDirect(ResType); 5612 } 5613 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 5614 } 5615 5616 if (!isAggregateTypeForABI(Ty)) { 5617 // Treat an enum type as its underlying type. 5618 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 5619 Ty = EnumTy->getDecl()->getIntegerType(); 5620 5621 if (const auto *EIT = Ty->getAs<ExtIntType>()) 5622 if (EIT->getNumBits() > 128) 5623 return getNaturalAlignIndirect(Ty); 5624 5625 return (isPromotableIntegerTypeForABI(Ty) && isDarwinPCS() 5626 ? ABIArgInfo::getExtend(Ty) 5627 : ABIArgInfo::getDirect()); 5628 } 5629 5630 // Structures with either a non-trivial destructor or a non-trivial 5631 // copy constructor are always indirect. 5632 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { 5633 return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA == 5634 CGCXXABI::RAA_DirectInMemory); 5635 } 5636 5637 // Empty records are always ignored on Darwin, but actually passed in C++ mode 5638 // elsewhere for GNU compatibility. 5639 uint64_t Size = getContext().getTypeSize(Ty); 5640 bool IsEmpty = isEmptyRecord(getContext(), Ty, true); 5641 if (IsEmpty || Size == 0) { 5642 if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS()) 5643 return ABIArgInfo::getIgnore(); 5644 5645 // GNU C mode. The only argument that gets ignored is an empty one with size 5646 // 0. 5647 if (IsEmpty && Size == 0) 5648 return ABIArgInfo::getIgnore(); 5649 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 5650 } 5651 5652 // Homogeneous Floating-point Aggregates (HFAs) need to be expanded. 5653 const Type *Base = nullptr; 5654 uint64_t Members = 0; 5655 if (isHomogeneousAggregate(Ty, Base, Members)) { 5656 return ABIArgInfo::getDirect( 5657 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members)); 5658 } 5659 5660 // Aggregates <= 16 bytes are passed directly in registers or on the stack. 5661 if (Size <= 128) { 5662 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of 5663 // same size and alignment. 5664 if (getTarget().isRenderScriptTarget()) { 5665 return coerceToIntArray(Ty, getContext(), getVMContext()); 5666 } 5667 unsigned Alignment; 5668 if (Kind == AArch64ABIInfo::AAPCS) { 5669 Alignment = getContext().getTypeUnadjustedAlign(Ty); 5670 Alignment = Alignment < 128 ? 64 : 128; 5671 } else { 5672 Alignment = std::max(getContext().getTypeAlign(Ty), 5673 (unsigned)getTarget().getPointerWidth(0)); 5674 } 5675 Size = llvm::alignTo(Size, Alignment); 5676 5677 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment. 5678 // For aggregates with 16-byte alignment, we use i128. 5679 llvm::Type *BaseTy = llvm::Type::getIntNTy(getVMContext(), Alignment); 5680 return ABIArgInfo::getDirect( 5681 Size == Alignment ? BaseTy 5682 : llvm::ArrayType::get(BaseTy, Size / Alignment)); 5683 } 5684 5685 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 5686 } 5687 5688 ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy, 5689 bool IsVariadic) const { 5690 if (RetTy->isVoidType()) 5691 return ABIArgInfo::getIgnore(); 5692 5693 // Large vector types should be returned via memory. 5694 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) 5695 return getNaturalAlignIndirect(RetTy); 5696 5697 if (!isAggregateTypeForABI(RetTy)) { 5698 // Treat an enum type as its underlying type. 5699 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 5700 RetTy = EnumTy->getDecl()->getIntegerType(); 5701 5702 if (const auto *EIT = RetTy->getAs<ExtIntType>()) 5703 if (EIT->getNumBits() > 128) 5704 return getNaturalAlignIndirect(RetTy); 5705 5706 return (isPromotableIntegerTypeForABI(RetTy) && isDarwinPCS() 5707 ? ABIArgInfo::getExtend(RetTy) 5708 : ABIArgInfo::getDirect()); 5709 } 5710 5711 uint64_t Size = getContext().getTypeSize(RetTy); 5712 if (isEmptyRecord(getContext(), RetTy, true) || Size == 0) 5713 return ABIArgInfo::getIgnore(); 5714 5715 const Type *Base = nullptr; 5716 uint64_t Members = 0; 5717 if (isHomogeneousAggregate(RetTy, Base, Members) && 5718 !(getTarget().getTriple().getArch() == llvm::Triple::aarch64_32 && 5719 IsVariadic)) 5720 // Homogeneous Floating-point Aggregates (HFAs) are returned directly. 5721 return ABIArgInfo::getDirect(); 5722 5723 // Aggregates <= 16 bytes are returned directly in registers or on the stack. 5724 if (Size <= 128) { 5725 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of 5726 // same size and alignment. 5727 if (getTarget().isRenderScriptTarget()) { 5728 return coerceToIntArray(RetTy, getContext(), getVMContext()); 5729 } 5730 unsigned Alignment = getContext().getTypeAlign(RetTy); 5731 Size = llvm::alignTo(Size, 64); // round up to multiple of 8 bytes 5732 5733 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment. 5734 // For aggregates with 16-byte alignment, we use i128. 5735 if (Alignment < 128 && Size == 128) { 5736 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext()); 5737 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64)); 5738 } 5739 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size)); 5740 } 5741 5742 return getNaturalAlignIndirect(RetTy); 5743 } 5744 5745 /// isIllegalVectorType - check whether the vector type is legal for AArch64. 5746 bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const { 5747 if (const VectorType *VT = Ty->getAs<VectorType>()) { 5748 // Check whether VT is legal. 5749 unsigned NumElements = VT->getNumElements(); 5750 uint64_t Size = getContext().getTypeSize(VT); 5751 // NumElements should be power of 2. 5752 if (!llvm::isPowerOf2_32(NumElements)) 5753 return true; 5754 5755 // arm64_32 has to be compatible with the ARM logic here, which allows huge 5756 // vectors for some reason. 5757 llvm::Triple Triple = getTarget().getTriple(); 5758 if (Triple.getArch() == llvm::Triple::aarch64_32 && 5759 Triple.isOSBinFormatMachO()) 5760 return Size <= 32; 5761 5762 return Size != 64 && (Size != 128 || NumElements == 1); 5763 } 5764 return false; 5765 } 5766 5767 bool AArch64ABIInfo::isLegalVectorTypeForSwift(CharUnits totalSize, 5768 llvm::Type *eltTy, 5769 unsigned elts) const { 5770 if (!llvm::isPowerOf2_32(elts)) 5771 return false; 5772 if (totalSize.getQuantity() != 8 && 5773 (totalSize.getQuantity() != 16 || elts == 1)) 5774 return false; 5775 return true; 5776 } 5777 5778 bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 5779 // Homogeneous aggregates for AAPCS64 must have base types of a floating 5780 // point type or a short-vector type. This is the same as the 32-bit ABI, 5781 // but with the difference that any floating-point type is allowed, 5782 // including __fp16. 5783 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 5784 if (BT->isFloatingPoint()) 5785 return true; 5786 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 5787 unsigned VecSize = getContext().getTypeSize(VT); 5788 if (VecSize == 64 || VecSize == 128) 5789 return true; 5790 } 5791 return false; 5792 } 5793 5794 bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, 5795 uint64_t Members) const { 5796 return Members <= 4; 5797 } 5798 5799 Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr, 5800 QualType Ty, 5801 CodeGenFunction &CGF) const { 5802 ABIArgInfo AI = classifyArgumentType(Ty); 5803 bool IsIndirect = AI.isIndirect(); 5804 5805 llvm::Type *BaseTy = CGF.ConvertType(Ty); 5806 if (IsIndirect) 5807 BaseTy = llvm::PointerType::getUnqual(BaseTy); 5808 else if (AI.getCoerceToType()) 5809 BaseTy = AI.getCoerceToType(); 5810 5811 unsigned NumRegs = 1; 5812 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) { 5813 BaseTy = ArrTy->getElementType(); 5814 NumRegs = ArrTy->getNumElements(); 5815 } 5816 bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy(); 5817 5818 // The AArch64 va_list type and handling is specified in the Procedure Call 5819 // Standard, section B.4: 5820 // 5821 // struct { 5822 // void *__stack; 5823 // void *__gr_top; 5824 // void *__vr_top; 5825 // int __gr_offs; 5826 // int __vr_offs; 5827 // }; 5828 5829 llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg"); 5830 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 5831 llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack"); 5832 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 5833 5834 CharUnits TySize = getContext().getTypeSizeInChars(Ty); 5835 CharUnits TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty); 5836 5837 Address reg_offs_p = Address::invalid(); 5838 llvm::Value *reg_offs = nullptr; 5839 int reg_top_index; 5840 int RegSize = IsIndirect ? 8 : TySize.getQuantity(); 5841 if (!IsFPR) { 5842 // 3 is the field number of __gr_offs 5843 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p"); 5844 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs"); 5845 reg_top_index = 1; // field number for __gr_top 5846 RegSize = llvm::alignTo(RegSize, 8); 5847 } else { 5848 // 4 is the field number of __vr_offs. 5849 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p"); 5850 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs"); 5851 reg_top_index = 2; // field number for __vr_top 5852 RegSize = 16 * NumRegs; 5853 } 5854 5855 //======================================= 5856 // Find out where argument was passed 5857 //======================================= 5858 5859 // If reg_offs >= 0 we're already using the stack for this type of 5860 // argument. We don't want to keep updating reg_offs (in case it overflows, 5861 // though anyone passing 2GB of arguments, each at most 16 bytes, deserves 5862 // whatever they get). 5863 llvm::Value *UsingStack = nullptr; 5864 UsingStack = CGF.Builder.CreateICmpSGE( 5865 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0)); 5866 5867 CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock); 5868 5869 // Otherwise, at least some kind of argument could go in these registers, the 5870 // question is whether this particular type is too big. 5871 CGF.EmitBlock(MaybeRegBlock); 5872 5873 // Integer arguments may need to correct register alignment (for example a 5874 // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we 5875 // align __gr_offs to calculate the potential address. 5876 if (!IsFPR && !IsIndirect && TyAlign.getQuantity() > 8) { 5877 int Align = TyAlign.getQuantity(); 5878 5879 reg_offs = CGF.Builder.CreateAdd( 5880 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1), 5881 "align_regoffs"); 5882 reg_offs = CGF.Builder.CreateAnd( 5883 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align), 5884 "aligned_regoffs"); 5885 } 5886 5887 // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list. 5888 // The fact that this is done unconditionally reflects the fact that 5889 // allocating an argument to the stack also uses up all the remaining 5890 // registers of the appropriate kind. 5891 llvm::Value *NewOffset = nullptr; 5892 NewOffset = CGF.Builder.CreateAdd( 5893 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs"); 5894 CGF.Builder.CreateStore(NewOffset, reg_offs_p); 5895 5896 // Now we're in a position to decide whether this argument really was in 5897 // registers or not. 5898 llvm::Value *InRegs = nullptr; 5899 InRegs = CGF.Builder.CreateICmpSLE( 5900 NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg"); 5901 5902 CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock); 5903 5904 //======================================= 5905 // Argument was in registers 5906 //======================================= 5907 5908 // Now we emit the code for if the argument was originally passed in 5909 // registers. First start the appropriate block: 5910 CGF.EmitBlock(InRegBlock); 5911 5912 llvm::Value *reg_top = nullptr; 5913 Address reg_top_p = 5914 CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p"); 5915 reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top"); 5916 Address BaseAddr(CGF.Builder.CreateInBoundsGEP(reg_top, reg_offs), 5917 CharUnits::fromQuantity(IsFPR ? 16 : 8)); 5918 Address RegAddr = Address::invalid(); 5919 llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty); 5920 5921 if (IsIndirect) { 5922 // If it's been passed indirectly (actually a struct), whatever we find from 5923 // stored registers or on the stack will actually be a struct **. 5924 MemTy = llvm::PointerType::getUnqual(MemTy); 5925 } 5926 5927 const Type *Base = nullptr; 5928 uint64_t NumMembers = 0; 5929 bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers); 5930 if (IsHFA && NumMembers > 1) { 5931 // Homogeneous aggregates passed in registers will have their elements split 5932 // and stored 16-bytes apart regardless of size (they're notionally in qN, 5933 // qN+1, ...). We reload and store into a temporary local variable 5934 // contiguously. 5935 assert(!IsIndirect && "Homogeneous aggregates should be passed directly"); 5936 auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0)); 5937 llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0)); 5938 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers); 5939 Address Tmp = CGF.CreateTempAlloca(HFATy, 5940 std::max(TyAlign, BaseTyInfo.second)); 5941 5942 // On big-endian platforms, the value will be right-aligned in its slot. 5943 int Offset = 0; 5944 if (CGF.CGM.getDataLayout().isBigEndian() && 5945 BaseTyInfo.first.getQuantity() < 16) 5946 Offset = 16 - BaseTyInfo.first.getQuantity(); 5947 5948 for (unsigned i = 0; i < NumMembers; ++i) { 5949 CharUnits BaseOffset = CharUnits::fromQuantity(16 * i + Offset); 5950 Address LoadAddr = 5951 CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, BaseOffset); 5952 LoadAddr = CGF.Builder.CreateElementBitCast(LoadAddr, BaseTy); 5953 5954 Address StoreAddr = CGF.Builder.CreateConstArrayGEP(Tmp, i); 5955 5956 llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr); 5957 CGF.Builder.CreateStore(Elem, StoreAddr); 5958 } 5959 5960 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, MemTy); 5961 } else { 5962 // Otherwise the object is contiguous in memory. 5963 5964 // It might be right-aligned in its slot. 5965 CharUnits SlotSize = BaseAddr.getAlignment(); 5966 if (CGF.CGM.getDataLayout().isBigEndian() && !IsIndirect && 5967 (IsHFA || !isAggregateTypeForABI(Ty)) && 5968 TySize < SlotSize) { 5969 CharUnits Offset = SlotSize - TySize; 5970 BaseAddr = CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, Offset); 5971 } 5972 5973 RegAddr = CGF.Builder.CreateElementBitCast(BaseAddr, MemTy); 5974 } 5975 5976 CGF.EmitBranch(ContBlock); 5977 5978 //======================================= 5979 // Argument was on the stack 5980 //======================================= 5981 CGF.EmitBlock(OnStackBlock); 5982 5983 Address stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p"); 5984 llvm::Value *OnStackPtr = CGF.Builder.CreateLoad(stack_p, "stack"); 5985 5986 // Again, stack arguments may need realignment. In this case both integer and 5987 // floating-point ones might be affected. 5988 if (!IsIndirect && TyAlign.getQuantity() > 8) { 5989 int Align = TyAlign.getQuantity(); 5990 5991 OnStackPtr = CGF.Builder.CreatePtrToInt(OnStackPtr, CGF.Int64Ty); 5992 5993 OnStackPtr = CGF.Builder.CreateAdd( 5994 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1), 5995 "align_stack"); 5996 OnStackPtr = CGF.Builder.CreateAnd( 5997 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, -Align), 5998 "align_stack"); 5999 6000 OnStackPtr = CGF.Builder.CreateIntToPtr(OnStackPtr, CGF.Int8PtrTy); 6001 } 6002 Address OnStackAddr(OnStackPtr, 6003 std::max(CharUnits::fromQuantity(8), TyAlign)); 6004 6005 // All stack slots are multiples of 8 bytes. 6006 CharUnits StackSlotSize = CharUnits::fromQuantity(8); 6007 CharUnits StackSize; 6008 if (IsIndirect) 6009 StackSize = StackSlotSize; 6010 else 6011 StackSize = TySize.alignTo(StackSlotSize); 6012 6013 llvm::Value *StackSizeC = CGF.Builder.getSize(StackSize); 6014 llvm::Value *NewStack = 6015 CGF.Builder.CreateInBoundsGEP(OnStackPtr, StackSizeC, "new_stack"); 6016 6017 // Write the new value of __stack for the next call to va_arg 6018 CGF.Builder.CreateStore(NewStack, stack_p); 6019 6020 if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) && 6021 TySize < StackSlotSize) { 6022 CharUnits Offset = StackSlotSize - TySize; 6023 OnStackAddr = CGF.Builder.CreateConstInBoundsByteGEP(OnStackAddr, Offset); 6024 } 6025 6026 OnStackAddr = CGF.Builder.CreateElementBitCast(OnStackAddr, MemTy); 6027 6028 CGF.EmitBranch(ContBlock); 6029 6030 //======================================= 6031 // Tidy up 6032 //======================================= 6033 CGF.EmitBlock(ContBlock); 6034 6035 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, 6036 OnStackAddr, OnStackBlock, "vaargs.addr"); 6037 6038 if (IsIndirect) 6039 return Address(CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"), 6040 TyAlign); 6041 6042 return ResAddr; 6043 } 6044 6045 Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty, 6046 CodeGenFunction &CGF) const { 6047 // The backend's lowering doesn't support va_arg for aggregates or 6048 // illegal vector types. Lower VAArg here for these cases and use 6049 // the LLVM va_arg instruction for everything else. 6050 if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty)) 6051 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect()); 6052 6053 uint64_t PointerSize = getTarget().getPointerWidth(0) / 8; 6054 CharUnits SlotSize = CharUnits::fromQuantity(PointerSize); 6055 6056 // Empty records are ignored for parameter passing purposes. 6057 if (isEmptyRecord(getContext(), Ty, true)) { 6058 Address Addr(CGF.Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize); 6059 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty)); 6060 return Addr; 6061 } 6062 6063 // The size of the actual thing passed, which might end up just 6064 // being a pointer for indirect types. 6065 auto TyInfo = getContext().getTypeInfoInChars(Ty); 6066 6067 // Arguments bigger than 16 bytes which aren't homogeneous 6068 // aggregates should be passed indirectly. 6069 bool IsIndirect = false; 6070 if (TyInfo.first.getQuantity() > 16) { 6071 const Type *Base = nullptr; 6072 uint64_t Members = 0; 6073 IsIndirect = !isHomogeneousAggregate(Ty, Base, Members); 6074 } 6075 6076 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, 6077 TyInfo, SlotSize, /*AllowHigherAlign*/ true); 6078 } 6079 6080 Address AArch64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, 6081 QualType Ty) const { 6082 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false, 6083 CGF.getContext().getTypeInfoInChars(Ty), 6084 CharUnits::fromQuantity(8), 6085 /*allowHigherAlign*/ false); 6086 } 6087 6088 //===----------------------------------------------------------------------===// 6089 // ARM ABI Implementation 6090 //===----------------------------------------------------------------------===// 6091 6092 namespace { 6093 6094 class ARMABIInfo : public SwiftABIInfo { 6095 public: 6096 enum ABIKind { 6097 APCS = 0, 6098 AAPCS = 1, 6099 AAPCS_VFP = 2, 6100 AAPCS16_VFP = 3, 6101 }; 6102 6103 private: 6104 ABIKind Kind; 6105 bool IsFloatABISoftFP; 6106 6107 public: 6108 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) 6109 : SwiftABIInfo(CGT), Kind(_Kind) { 6110 setCCs(); 6111 IsFloatABISoftFP = CGT.getCodeGenOpts().FloatABI == "softfp" || 6112 CGT.getCodeGenOpts().FloatABI == ""; // default 6113 } 6114 6115 bool isEABI() const { 6116 switch (getTarget().getTriple().getEnvironment()) { 6117 case llvm::Triple::Android: 6118 case llvm::Triple::EABI: 6119 case llvm::Triple::EABIHF: 6120 case llvm::Triple::GNUEABI: 6121 case llvm::Triple::GNUEABIHF: 6122 case llvm::Triple::MuslEABI: 6123 case llvm::Triple::MuslEABIHF: 6124 return true; 6125 default: 6126 return false; 6127 } 6128 } 6129 6130 bool isEABIHF() const { 6131 switch (getTarget().getTriple().getEnvironment()) { 6132 case llvm::Triple::EABIHF: 6133 case llvm::Triple::GNUEABIHF: 6134 case llvm::Triple::MuslEABIHF: 6135 return true; 6136 default: 6137 return false; 6138 } 6139 } 6140 6141 ABIKind getABIKind() const { return Kind; } 6142 6143 bool allowBFloatArgsAndRet() const override { 6144 return !IsFloatABISoftFP && getTarget().hasBFloat16Type(); 6145 } 6146 6147 private: 6148 ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic, 6149 unsigned functionCallConv) const; 6150 ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic, 6151 unsigned functionCallConv) const; 6152 ABIArgInfo classifyHomogeneousAggregate(QualType Ty, const Type *Base, 6153 uint64_t Members) const; 6154 ABIArgInfo coerceIllegalVector(QualType Ty) const; 6155 bool isIllegalVectorType(QualType Ty) const; 6156 bool containsAnyFP16Vectors(QualType Ty) const; 6157 6158 bool isHomogeneousAggregateBaseType(QualType Ty) const override; 6159 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 6160 uint64_t Members) const override; 6161 6162 bool isEffectivelyAAPCS_VFP(unsigned callConvention, bool acceptHalf) const; 6163 6164 void computeInfo(CGFunctionInfo &FI) const override; 6165 6166 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 6167 QualType Ty) const override; 6168 6169 llvm::CallingConv::ID getLLVMDefaultCC() const; 6170 llvm::CallingConv::ID getABIDefaultCC() const; 6171 void setCCs(); 6172 6173 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, 6174 bool asReturnValue) const override { 6175 return occupiesMoreThan(CGT, scalars, /*total*/ 4); 6176 } 6177 bool isSwiftErrorInRegister() const override { 6178 return true; 6179 } 6180 bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy, 6181 unsigned elts) const override; 6182 }; 6183 6184 class ARMTargetCodeGenInfo : public TargetCodeGenInfo { 6185 public: 6186 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K) 6187 : TargetCodeGenInfo(std::make_unique<ARMABIInfo>(CGT, K)) {} 6188 6189 const ARMABIInfo &getABIInfo() const { 6190 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo()); 6191 } 6192 6193 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 6194 return 13; 6195 } 6196 6197 StringRef getARCRetainAutoreleasedReturnValueMarker() const override { 6198 return "mov\tr7, r7\t\t// marker for objc_retainAutoreleaseReturnValue"; 6199 } 6200 6201 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 6202 llvm::Value *Address) const override { 6203 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 6204 6205 // 0-15 are the 16 integer registers. 6206 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15); 6207 return false; 6208 } 6209 6210 unsigned getSizeOfUnwindException() const override { 6211 if (getABIInfo().isEABI()) return 88; 6212 return TargetCodeGenInfo::getSizeOfUnwindException(); 6213 } 6214 6215 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 6216 CodeGen::CodeGenModule &CGM) const override { 6217 if (GV->isDeclaration()) 6218 return; 6219 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 6220 if (!FD) 6221 return; 6222 6223 const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>(); 6224 if (!Attr) 6225 return; 6226 6227 const char *Kind; 6228 switch (Attr->getInterrupt()) { 6229 case ARMInterruptAttr::Generic: Kind = ""; break; 6230 case ARMInterruptAttr::IRQ: Kind = "IRQ"; break; 6231 case ARMInterruptAttr::FIQ: Kind = "FIQ"; break; 6232 case ARMInterruptAttr::SWI: Kind = "SWI"; break; 6233 case ARMInterruptAttr::ABORT: Kind = "ABORT"; break; 6234 case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break; 6235 } 6236 6237 llvm::Function *Fn = cast<llvm::Function>(GV); 6238 6239 Fn->addFnAttr("interrupt", Kind); 6240 6241 ARMABIInfo::ABIKind ABI = cast<ARMABIInfo>(getABIInfo()).getABIKind(); 6242 if (ABI == ARMABIInfo::APCS) 6243 return; 6244 6245 // AAPCS guarantees that sp will be 8-byte aligned on any public interface, 6246 // however this is not necessarily true on taking any interrupt. Instruct 6247 // the backend to perform a realignment as part of the function prologue. 6248 llvm::AttrBuilder B; 6249 B.addStackAlignmentAttr(8); 6250 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B); 6251 } 6252 }; 6253 6254 class WindowsARMTargetCodeGenInfo : public ARMTargetCodeGenInfo { 6255 public: 6256 WindowsARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K) 6257 : ARMTargetCodeGenInfo(CGT, K) {} 6258 6259 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 6260 CodeGen::CodeGenModule &CGM) const override; 6261 6262 void getDependentLibraryOption(llvm::StringRef Lib, 6263 llvm::SmallString<24> &Opt) const override { 6264 Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib); 6265 } 6266 6267 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value, 6268 llvm::SmallString<32> &Opt) const override { 6269 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; 6270 } 6271 }; 6272 6273 void WindowsARMTargetCodeGenInfo::setTargetAttributes( 6274 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { 6275 ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM); 6276 if (GV->isDeclaration()) 6277 return; 6278 addStackProbeTargetAttributes(D, GV, CGM); 6279 } 6280 } 6281 6282 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const { 6283 if (!::classifyReturnType(getCXXABI(), FI, *this)) 6284 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), FI.isVariadic(), 6285 FI.getCallingConvention()); 6286 6287 for (auto &I : FI.arguments()) 6288 I.info = classifyArgumentType(I.type, FI.isVariadic(), 6289 FI.getCallingConvention()); 6290 6291 6292 // Always honor user-specified calling convention. 6293 if (FI.getCallingConvention() != llvm::CallingConv::C) 6294 return; 6295 6296 llvm::CallingConv::ID cc = getRuntimeCC(); 6297 if (cc != llvm::CallingConv::C) 6298 FI.setEffectiveCallingConvention(cc); 6299 } 6300 6301 /// Return the default calling convention that LLVM will use. 6302 llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const { 6303 // The default calling convention that LLVM will infer. 6304 if (isEABIHF() || getTarget().getTriple().isWatchABI()) 6305 return llvm::CallingConv::ARM_AAPCS_VFP; 6306 else if (isEABI()) 6307 return llvm::CallingConv::ARM_AAPCS; 6308 else 6309 return llvm::CallingConv::ARM_APCS; 6310 } 6311 6312 /// Return the calling convention that our ABI would like us to use 6313 /// as the C calling convention. 6314 llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const { 6315 switch (getABIKind()) { 6316 case APCS: return llvm::CallingConv::ARM_APCS; 6317 case AAPCS: return llvm::CallingConv::ARM_AAPCS; 6318 case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 6319 case AAPCS16_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 6320 } 6321 llvm_unreachable("bad ABI kind"); 6322 } 6323 6324 void ARMABIInfo::setCCs() { 6325 assert(getRuntimeCC() == llvm::CallingConv::C); 6326 6327 // Don't muddy up the IR with a ton of explicit annotations if 6328 // they'd just match what LLVM will infer from the triple. 6329 llvm::CallingConv::ID abiCC = getABIDefaultCC(); 6330 if (abiCC != getLLVMDefaultCC()) 6331 RuntimeCC = abiCC; 6332 } 6333 6334 ABIArgInfo ARMABIInfo::coerceIllegalVector(QualType Ty) const { 6335 uint64_t Size = getContext().getTypeSize(Ty); 6336 if (Size <= 32) { 6337 llvm::Type *ResType = 6338 llvm::Type::getInt32Ty(getVMContext()); 6339 return ABIArgInfo::getDirect(ResType); 6340 } 6341 if (Size == 64 || Size == 128) { 6342 auto *ResType = llvm::FixedVectorType::get( 6343 llvm::Type::getInt32Ty(getVMContext()), Size / 32); 6344 return ABIArgInfo::getDirect(ResType); 6345 } 6346 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 6347 } 6348 6349 ABIArgInfo ARMABIInfo::classifyHomogeneousAggregate(QualType Ty, 6350 const Type *Base, 6351 uint64_t Members) const { 6352 assert(Base && "Base class should be set for homogeneous aggregate"); 6353 // Base can be a floating-point or a vector. 6354 if (const VectorType *VT = Base->getAs<VectorType>()) { 6355 // FP16 vectors should be converted to integer vectors 6356 if (!getTarget().hasLegalHalfType() && containsAnyFP16Vectors(Ty)) { 6357 uint64_t Size = getContext().getTypeSize(VT); 6358 auto *NewVecTy = llvm::FixedVectorType::get( 6359 llvm::Type::getInt32Ty(getVMContext()), Size / 32); 6360 llvm::Type *Ty = llvm::ArrayType::get(NewVecTy, Members); 6361 return ABIArgInfo::getDirect(Ty, 0, nullptr, false); 6362 } 6363 } 6364 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false); 6365 } 6366 6367 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic, 6368 unsigned functionCallConv) const { 6369 // 6.1.2.1 The following argument types are VFP CPRCs: 6370 // A single-precision floating-point type (including promoted 6371 // half-precision types); A double-precision floating-point type; 6372 // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate 6373 // with a Base Type of a single- or double-precision floating-point type, 6374 // 64-bit containerized vectors or 128-bit containerized vectors with one 6375 // to four Elements. 6376 // Variadic functions should always marshal to the base standard. 6377 bool IsAAPCS_VFP = 6378 !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ false); 6379 6380 Ty = useFirstFieldIfTransparentUnion(Ty); 6381 6382 // Handle illegal vector types here. 6383 if (isIllegalVectorType(Ty)) 6384 return coerceIllegalVector(Ty); 6385 6386 if (!isAggregateTypeForABI(Ty)) { 6387 // Treat an enum type as its underlying type. 6388 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) { 6389 Ty = EnumTy->getDecl()->getIntegerType(); 6390 } 6391 6392 if (const auto *EIT = Ty->getAs<ExtIntType>()) 6393 if (EIT->getNumBits() > 64) 6394 return getNaturalAlignIndirect(Ty, /*ByVal=*/true); 6395 6396 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) 6397 : ABIArgInfo::getDirect()); 6398 } 6399 6400 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { 6401 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 6402 } 6403 6404 // Ignore empty records. 6405 if (isEmptyRecord(getContext(), Ty, true)) 6406 return ABIArgInfo::getIgnore(); 6407 6408 if (IsAAPCS_VFP) { 6409 // Homogeneous Aggregates need to be expanded when we can fit the aggregate 6410 // into VFP registers. 6411 const Type *Base = nullptr; 6412 uint64_t Members = 0; 6413 if (isHomogeneousAggregate(Ty, Base, Members)) 6414 return classifyHomogeneousAggregate(Ty, Base, Members); 6415 } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) { 6416 // WatchOS does have homogeneous aggregates. Note that we intentionally use 6417 // this convention even for a variadic function: the backend will use GPRs 6418 // if needed. 6419 const Type *Base = nullptr; 6420 uint64_t Members = 0; 6421 if (isHomogeneousAggregate(Ty, Base, Members)) { 6422 assert(Base && Members <= 4 && "unexpected homogeneous aggregate"); 6423 llvm::Type *Ty = 6424 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members); 6425 return ABIArgInfo::getDirect(Ty, 0, nullptr, false); 6426 } 6427 } 6428 6429 if (getABIKind() == ARMABIInfo::AAPCS16_VFP && 6430 getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(16)) { 6431 // WatchOS is adopting the 64-bit AAPCS rule on composite types: if they're 6432 // bigger than 128-bits, they get placed in space allocated by the caller, 6433 // and a pointer is passed. 6434 return ABIArgInfo::getIndirect( 6435 CharUnits::fromQuantity(getContext().getTypeAlign(Ty) / 8), false); 6436 } 6437 6438 // Support byval for ARM. 6439 // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at 6440 // most 8-byte. We realign the indirect argument if type alignment is bigger 6441 // than ABI alignment. 6442 uint64_t ABIAlign = 4; 6443 uint64_t TyAlign; 6444 if (getABIKind() == ARMABIInfo::AAPCS_VFP || 6445 getABIKind() == ARMABIInfo::AAPCS) { 6446 TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity(); 6447 ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8); 6448 } else { 6449 TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity(); 6450 } 6451 if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) { 6452 assert(getABIKind() != ARMABIInfo::AAPCS16_VFP && "unexpected byval"); 6453 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign), 6454 /*ByVal=*/true, 6455 /*Realign=*/TyAlign > ABIAlign); 6456 } 6457 6458 // On RenderScript, coerce Aggregates <= 64 bytes to an integer array of 6459 // same size and alignment. 6460 if (getTarget().isRenderScriptTarget()) { 6461 return coerceToIntArray(Ty, getContext(), getVMContext()); 6462 } 6463 6464 // Otherwise, pass by coercing to a structure of the appropriate size. 6465 llvm::Type* ElemTy; 6466 unsigned SizeRegs; 6467 // FIXME: Try to match the types of the arguments more accurately where 6468 // we can. 6469 if (TyAlign <= 4) { 6470 ElemTy = llvm::Type::getInt32Ty(getVMContext()); 6471 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32; 6472 } else { 6473 ElemTy = llvm::Type::getInt64Ty(getVMContext()); 6474 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64; 6475 } 6476 6477 return ABIArgInfo::getDirect(llvm::ArrayType::get(ElemTy, SizeRegs)); 6478 } 6479 6480 static bool isIntegerLikeType(QualType Ty, ASTContext &Context, 6481 llvm::LLVMContext &VMContext) { 6482 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure 6483 // is called integer-like if its size is less than or equal to one word, and 6484 // the offset of each of its addressable sub-fields is zero. 6485 6486 uint64_t Size = Context.getTypeSize(Ty); 6487 6488 // Check that the type fits in a word. 6489 if (Size > 32) 6490 return false; 6491 6492 // FIXME: Handle vector types! 6493 if (Ty->isVectorType()) 6494 return false; 6495 6496 // Float types are never treated as "integer like". 6497 if (Ty->isRealFloatingType()) 6498 return false; 6499 6500 // If this is a builtin or pointer type then it is ok. 6501 if (Ty->getAs<BuiltinType>() || Ty->isPointerType()) 6502 return true; 6503 6504 // Small complex integer types are "integer like". 6505 if (const ComplexType *CT = Ty->getAs<ComplexType>()) 6506 return isIntegerLikeType(CT->getElementType(), Context, VMContext); 6507 6508 // Single element and zero sized arrays should be allowed, by the definition 6509 // above, but they are not. 6510 6511 // Otherwise, it must be a record type. 6512 const RecordType *RT = Ty->getAs<RecordType>(); 6513 if (!RT) return false; 6514 6515 // Ignore records with flexible arrays. 6516 const RecordDecl *RD = RT->getDecl(); 6517 if (RD->hasFlexibleArrayMember()) 6518 return false; 6519 6520 // Check that all sub-fields are at offset 0, and are themselves "integer 6521 // like". 6522 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 6523 6524 bool HadField = false; 6525 unsigned idx = 0; 6526 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 6527 i != e; ++i, ++idx) { 6528 const FieldDecl *FD = *i; 6529 6530 // Bit-fields are not addressable, we only need to verify they are "integer 6531 // like". We still have to disallow a subsequent non-bitfield, for example: 6532 // struct { int : 0; int x } 6533 // is non-integer like according to gcc. 6534 if (FD->isBitField()) { 6535 if (!RD->isUnion()) 6536 HadField = true; 6537 6538 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 6539 return false; 6540 6541 continue; 6542 } 6543 6544 // Check if this field is at offset 0. 6545 if (Layout.getFieldOffset(idx) != 0) 6546 return false; 6547 6548 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 6549 return false; 6550 6551 // Only allow at most one field in a structure. This doesn't match the 6552 // wording above, but follows gcc in situations with a field following an 6553 // empty structure. 6554 if (!RD->isUnion()) { 6555 if (HadField) 6556 return false; 6557 6558 HadField = true; 6559 } 6560 } 6561 6562 return true; 6563 } 6564 6565 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, bool isVariadic, 6566 unsigned functionCallConv) const { 6567 6568 // Variadic functions should always marshal to the base standard. 6569 bool IsAAPCS_VFP = 6570 !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ true); 6571 6572 if (RetTy->isVoidType()) 6573 return ABIArgInfo::getIgnore(); 6574 6575 if (const VectorType *VT = RetTy->getAs<VectorType>()) { 6576 // Large vector types should be returned via memory. 6577 if (getContext().getTypeSize(RetTy) > 128) 6578 return getNaturalAlignIndirect(RetTy); 6579 // TODO: FP16/BF16 vectors should be converted to integer vectors 6580 // This check is similar to isIllegalVectorType - refactor? 6581 if ((!getTarget().hasLegalHalfType() && 6582 (VT->getElementType()->isFloat16Type() || 6583 VT->getElementType()->isHalfType())) || 6584 (IsFloatABISoftFP && 6585 VT->getElementType()->isBFloat16Type())) 6586 return coerceIllegalVector(RetTy); 6587 } 6588 6589 if (!isAggregateTypeForABI(RetTy)) { 6590 // Treat an enum type as its underlying type. 6591 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 6592 RetTy = EnumTy->getDecl()->getIntegerType(); 6593 6594 if (const auto *EIT = RetTy->getAs<ExtIntType>()) 6595 if (EIT->getNumBits() > 64) 6596 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false); 6597 6598 return isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) 6599 : ABIArgInfo::getDirect(); 6600 } 6601 6602 // Are we following APCS? 6603 if (getABIKind() == APCS) { 6604 if (isEmptyRecord(getContext(), RetTy, false)) 6605 return ABIArgInfo::getIgnore(); 6606 6607 // Complex types are all returned as packed integers. 6608 // 6609 // FIXME: Consider using 2 x vector types if the back end handles them 6610 // correctly. 6611 if (RetTy->isAnyComplexType()) 6612 return ABIArgInfo::getDirect(llvm::IntegerType::get( 6613 getVMContext(), getContext().getTypeSize(RetTy))); 6614 6615 // Integer like structures are returned in r0. 6616 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) { 6617 // Return in the smallest viable integer type. 6618 uint64_t Size = getContext().getTypeSize(RetTy); 6619 if (Size <= 8) 6620 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 6621 if (Size <= 16) 6622 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 6623 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 6624 } 6625 6626 // Otherwise return in memory. 6627 return getNaturalAlignIndirect(RetTy); 6628 } 6629 6630 // Otherwise this is an AAPCS variant. 6631 6632 if (isEmptyRecord(getContext(), RetTy, true)) 6633 return ABIArgInfo::getIgnore(); 6634 6635 // Check for homogeneous aggregates with AAPCS-VFP. 6636 if (IsAAPCS_VFP) { 6637 const Type *Base = nullptr; 6638 uint64_t Members = 0; 6639 if (isHomogeneousAggregate(RetTy, Base, Members)) 6640 return classifyHomogeneousAggregate(RetTy, Base, Members); 6641 } 6642 6643 // Aggregates <= 4 bytes are returned in r0; other aggregates 6644 // are returned indirectly. 6645 uint64_t Size = getContext().getTypeSize(RetTy); 6646 if (Size <= 32) { 6647 // On RenderScript, coerce Aggregates <= 4 bytes to an integer array of 6648 // same size and alignment. 6649 if (getTarget().isRenderScriptTarget()) { 6650 return coerceToIntArray(RetTy, getContext(), getVMContext()); 6651 } 6652 if (getDataLayout().isBigEndian()) 6653 // Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4) 6654 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 6655 6656 // Return in the smallest viable integer type. 6657 if (Size <= 8) 6658 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 6659 if (Size <= 16) 6660 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 6661 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 6662 } else if (Size <= 128 && getABIKind() == AAPCS16_VFP) { 6663 llvm::Type *Int32Ty = llvm::Type::getInt32Ty(getVMContext()); 6664 llvm::Type *CoerceTy = 6665 llvm::ArrayType::get(Int32Ty, llvm::alignTo(Size, 32) / 32); 6666 return ABIArgInfo::getDirect(CoerceTy); 6667 } 6668 6669 return getNaturalAlignIndirect(RetTy); 6670 } 6671 6672 /// isIllegalVector - check whether Ty is an illegal vector type. 6673 bool ARMABIInfo::isIllegalVectorType(QualType Ty) const { 6674 if (const VectorType *VT = Ty->getAs<VectorType> ()) { 6675 // On targets that don't support half, fp16 or bfloat, they are expanded 6676 // into float, and we don't want the ABI to depend on whether or not they 6677 // are supported in hardware. Thus return false to coerce vectors of these 6678 // types into integer vectors. 6679 // We do not depend on hasLegalHalfType for bfloat as it is a 6680 // separate IR type. 6681 if ((!getTarget().hasLegalHalfType() && 6682 (VT->getElementType()->isFloat16Type() || 6683 VT->getElementType()->isHalfType())) || 6684 (IsFloatABISoftFP && 6685 VT->getElementType()->isBFloat16Type())) 6686 return true; 6687 if (isAndroid()) { 6688 // Android shipped using Clang 3.1, which supported a slightly different 6689 // vector ABI. The primary differences were that 3-element vector types 6690 // were legal, and so were sub 32-bit vectors (i.e. <2 x i8>). This path 6691 // accepts that legacy behavior for Android only. 6692 // Check whether VT is legal. 6693 unsigned NumElements = VT->getNumElements(); 6694 // NumElements should be power of 2 or equal to 3. 6695 if (!llvm::isPowerOf2_32(NumElements) && NumElements != 3) 6696 return true; 6697 } else { 6698 // Check whether VT is legal. 6699 unsigned NumElements = VT->getNumElements(); 6700 uint64_t Size = getContext().getTypeSize(VT); 6701 // NumElements should be power of 2. 6702 if (!llvm::isPowerOf2_32(NumElements)) 6703 return true; 6704 // Size should be greater than 32 bits. 6705 return Size <= 32; 6706 } 6707 } 6708 return false; 6709 } 6710 6711 /// Return true if a type contains any 16-bit floating point vectors 6712 bool ARMABIInfo::containsAnyFP16Vectors(QualType Ty) const { 6713 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 6714 uint64_t NElements = AT->getSize().getZExtValue(); 6715 if (NElements == 0) 6716 return false; 6717 return containsAnyFP16Vectors(AT->getElementType()); 6718 } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 6719 const RecordDecl *RD = RT->getDecl(); 6720 6721 // If this is a C++ record, check the bases first. 6722 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 6723 if (llvm::any_of(CXXRD->bases(), [this](const CXXBaseSpecifier &B) { 6724 return containsAnyFP16Vectors(B.getType()); 6725 })) 6726 return true; 6727 6728 if (llvm::any_of(RD->fields(), [this](FieldDecl *FD) { 6729 return FD && containsAnyFP16Vectors(FD->getType()); 6730 })) 6731 return true; 6732 6733 return false; 6734 } else { 6735 if (const VectorType *VT = Ty->getAs<VectorType>()) 6736 return (VT->getElementType()->isFloat16Type() || 6737 VT->getElementType()->isBFloat16Type() || 6738 VT->getElementType()->isHalfType()); 6739 return false; 6740 } 6741 } 6742 6743 bool ARMABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize, 6744 llvm::Type *eltTy, 6745 unsigned numElts) const { 6746 if (!llvm::isPowerOf2_32(numElts)) 6747 return false; 6748 unsigned size = getDataLayout().getTypeStoreSizeInBits(eltTy); 6749 if (size > 64) 6750 return false; 6751 if (vectorSize.getQuantity() != 8 && 6752 (vectorSize.getQuantity() != 16 || numElts == 1)) 6753 return false; 6754 return true; 6755 } 6756 6757 bool ARMABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 6758 // Homogeneous aggregates for AAPCS-VFP must have base types of float, 6759 // double, or 64-bit or 128-bit vectors. 6760 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 6761 if (BT->getKind() == BuiltinType::Float || 6762 BT->getKind() == BuiltinType::Double || 6763 BT->getKind() == BuiltinType::LongDouble) 6764 return true; 6765 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 6766 unsigned VecSize = getContext().getTypeSize(VT); 6767 if (VecSize == 64 || VecSize == 128) 6768 return true; 6769 } 6770 return false; 6771 } 6772 6773 bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, 6774 uint64_t Members) const { 6775 return Members <= 4; 6776 } 6777 6778 bool ARMABIInfo::isEffectivelyAAPCS_VFP(unsigned callConvention, 6779 bool acceptHalf) const { 6780 // Give precedence to user-specified calling conventions. 6781 if (callConvention != llvm::CallingConv::C) 6782 return (callConvention == llvm::CallingConv::ARM_AAPCS_VFP); 6783 else 6784 return (getABIKind() == AAPCS_VFP) || 6785 (acceptHalf && (getABIKind() == AAPCS16_VFP)); 6786 } 6787 6788 Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 6789 QualType Ty) const { 6790 CharUnits SlotSize = CharUnits::fromQuantity(4); 6791 6792 // Empty records are ignored for parameter passing purposes. 6793 if (isEmptyRecord(getContext(), Ty, true)) { 6794 Address Addr(CGF.Builder.CreateLoad(VAListAddr), SlotSize); 6795 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty)); 6796 return Addr; 6797 } 6798 6799 CharUnits TySize = getContext().getTypeSizeInChars(Ty); 6800 CharUnits TyAlignForABI = getContext().getTypeUnadjustedAlignInChars(Ty); 6801 6802 // Use indirect if size of the illegal vector is bigger than 16 bytes. 6803 bool IsIndirect = false; 6804 const Type *Base = nullptr; 6805 uint64_t Members = 0; 6806 if (TySize > CharUnits::fromQuantity(16) && isIllegalVectorType(Ty)) { 6807 IsIndirect = true; 6808 6809 // ARMv7k passes structs bigger than 16 bytes indirectly, in space 6810 // allocated by the caller. 6811 } else if (TySize > CharUnits::fromQuantity(16) && 6812 getABIKind() == ARMABIInfo::AAPCS16_VFP && 6813 !isHomogeneousAggregate(Ty, Base, Members)) { 6814 IsIndirect = true; 6815 6816 // Otherwise, bound the type's ABI alignment. 6817 // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for 6818 // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte. 6819 // Our callers should be prepared to handle an under-aligned address. 6820 } else if (getABIKind() == ARMABIInfo::AAPCS_VFP || 6821 getABIKind() == ARMABIInfo::AAPCS) { 6822 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4)); 6823 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(8)); 6824 } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) { 6825 // ARMv7k allows type alignment up to 16 bytes. 6826 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4)); 6827 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(16)); 6828 } else { 6829 TyAlignForABI = CharUnits::fromQuantity(4); 6830 } 6831 6832 std::pair<CharUnits, CharUnits> TyInfo = { TySize, TyAlignForABI }; 6833 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TyInfo, 6834 SlotSize, /*AllowHigherAlign*/ true); 6835 } 6836 6837 //===----------------------------------------------------------------------===// 6838 // NVPTX ABI Implementation 6839 //===----------------------------------------------------------------------===// 6840 6841 namespace { 6842 6843 class NVPTXTargetCodeGenInfo; 6844 6845 class NVPTXABIInfo : public ABIInfo { 6846 NVPTXTargetCodeGenInfo &CGInfo; 6847 6848 public: 6849 NVPTXABIInfo(CodeGenTypes &CGT, NVPTXTargetCodeGenInfo &Info) 6850 : ABIInfo(CGT), CGInfo(Info) {} 6851 6852 ABIArgInfo classifyReturnType(QualType RetTy) const; 6853 ABIArgInfo classifyArgumentType(QualType Ty) const; 6854 6855 void computeInfo(CGFunctionInfo &FI) const override; 6856 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 6857 QualType Ty) const override; 6858 bool isUnsupportedType(QualType T) const; 6859 ABIArgInfo coerceToIntArrayWithLimit(QualType Ty, unsigned MaxSize) const; 6860 }; 6861 6862 class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo { 6863 public: 6864 NVPTXTargetCodeGenInfo(CodeGenTypes &CGT) 6865 : TargetCodeGenInfo(std::make_unique<NVPTXABIInfo>(CGT, *this)) {} 6866 6867 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 6868 CodeGen::CodeGenModule &M) const override; 6869 bool shouldEmitStaticExternCAliases() const override; 6870 6871 llvm::Type *getCUDADeviceBuiltinSurfaceDeviceType() const override { 6872 // On the device side, surface reference is represented as an object handle 6873 // in 64-bit integer. 6874 return llvm::Type::getInt64Ty(getABIInfo().getVMContext()); 6875 } 6876 6877 llvm::Type *getCUDADeviceBuiltinTextureDeviceType() const override { 6878 // On the device side, texture reference is represented as an object handle 6879 // in 64-bit integer. 6880 return llvm::Type::getInt64Ty(getABIInfo().getVMContext()); 6881 } 6882 6883 bool emitCUDADeviceBuiltinSurfaceDeviceCopy(CodeGenFunction &CGF, LValue Dst, 6884 LValue Src) const override { 6885 emitBuiltinSurfTexDeviceCopy(CGF, Dst, Src); 6886 return true; 6887 } 6888 6889 bool emitCUDADeviceBuiltinTextureDeviceCopy(CodeGenFunction &CGF, LValue Dst, 6890 LValue Src) const override { 6891 emitBuiltinSurfTexDeviceCopy(CGF, Dst, Src); 6892 return true; 6893 } 6894 6895 private: 6896 // Adds a NamedMDNode with GV, Name, and Operand as operands, and adds the 6897 // resulting MDNode to the nvvm.annotations MDNode. 6898 static void addNVVMMetadata(llvm::GlobalValue *GV, StringRef Name, 6899 int Operand); 6900 6901 static void emitBuiltinSurfTexDeviceCopy(CodeGenFunction &CGF, LValue Dst, 6902 LValue Src) { 6903 llvm::Value *Handle = nullptr; 6904 llvm::Constant *C = 6905 llvm::dyn_cast<llvm::Constant>(Src.getAddress(CGF).getPointer()); 6906 // Lookup `addrspacecast` through the constant pointer if any. 6907 if (auto *ASC = llvm::dyn_cast_or_null<llvm::AddrSpaceCastOperator>(C)) 6908 C = llvm::cast<llvm::Constant>(ASC->getPointerOperand()); 6909 if (auto *GV = llvm::dyn_cast_or_null<llvm::GlobalVariable>(C)) { 6910 // Load the handle from the specific global variable using 6911 // `nvvm.texsurf.handle.internal` intrinsic. 6912 Handle = CGF.EmitRuntimeCall( 6913 CGF.CGM.getIntrinsic(llvm::Intrinsic::nvvm_texsurf_handle_internal, 6914 {GV->getType()}), 6915 {GV}, "texsurf_handle"); 6916 } else 6917 Handle = CGF.EmitLoadOfScalar(Src, SourceLocation()); 6918 CGF.EmitStoreOfScalar(Handle, Dst); 6919 } 6920 }; 6921 6922 /// Checks if the type is unsupported directly by the current target. 6923 bool NVPTXABIInfo::isUnsupportedType(QualType T) const { 6924 ASTContext &Context = getContext(); 6925 if (!Context.getTargetInfo().hasFloat16Type() && T->isFloat16Type()) 6926 return true; 6927 if (!Context.getTargetInfo().hasFloat128Type() && 6928 (T->isFloat128Type() || 6929 (T->isRealFloatingType() && Context.getTypeSize(T) == 128))) 6930 return true; 6931 if (const auto *EIT = T->getAs<ExtIntType>()) 6932 return EIT->getNumBits() > 6933 (Context.getTargetInfo().hasInt128Type() ? 128U : 64U); 6934 if (!Context.getTargetInfo().hasInt128Type() && T->isIntegerType() && 6935 Context.getTypeSize(T) > 64U) 6936 return true; 6937 if (const auto *AT = T->getAsArrayTypeUnsafe()) 6938 return isUnsupportedType(AT->getElementType()); 6939 const auto *RT = T->getAs<RecordType>(); 6940 if (!RT) 6941 return false; 6942 const RecordDecl *RD = RT->getDecl(); 6943 6944 // If this is a C++ record, check the bases first. 6945 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 6946 for (const CXXBaseSpecifier &I : CXXRD->bases()) 6947 if (isUnsupportedType(I.getType())) 6948 return true; 6949 6950 for (const FieldDecl *I : RD->fields()) 6951 if (isUnsupportedType(I->getType())) 6952 return true; 6953 return false; 6954 } 6955 6956 /// Coerce the given type into an array with maximum allowed size of elements. 6957 ABIArgInfo NVPTXABIInfo::coerceToIntArrayWithLimit(QualType Ty, 6958 unsigned MaxSize) const { 6959 // Alignment and Size are measured in bits. 6960 const uint64_t Size = getContext().getTypeSize(Ty); 6961 const uint64_t Alignment = getContext().getTypeAlign(Ty); 6962 const unsigned Div = std::min<unsigned>(MaxSize, Alignment); 6963 llvm::Type *IntType = llvm::Type::getIntNTy(getVMContext(), Div); 6964 const uint64_t NumElements = (Size + Div - 1) / Div; 6965 return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements)); 6966 } 6967 6968 ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const { 6969 if (RetTy->isVoidType()) 6970 return ABIArgInfo::getIgnore(); 6971 6972 if (getContext().getLangOpts().OpenMP && 6973 getContext().getLangOpts().OpenMPIsDevice && isUnsupportedType(RetTy)) 6974 return coerceToIntArrayWithLimit(RetTy, 64); 6975 6976 // note: this is different from default ABI 6977 if (!RetTy->isScalarType()) 6978 return ABIArgInfo::getDirect(); 6979 6980 // Treat an enum type as its underlying type. 6981 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 6982 RetTy = EnumTy->getDecl()->getIntegerType(); 6983 6984 return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) 6985 : ABIArgInfo::getDirect()); 6986 } 6987 6988 ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const { 6989 // Treat an enum type as its underlying type. 6990 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 6991 Ty = EnumTy->getDecl()->getIntegerType(); 6992 6993 // Return aggregates type as indirect by value 6994 if (isAggregateTypeForABI(Ty)) { 6995 // Under CUDA device compilation, tex/surf builtin types are replaced with 6996 // object types and passed directly. 6997 if (getContext().getLangOpts().CUDAIsDevice) { 6998 if (Ty->isCUDADeviceBuiltinSurfaceType()) 6999 return ABIArgInfo::getDirect( 7000 CGInfo.getCUDADeviceBuiltinSurfaceDeviceType()); 7001 if (Ty->isCUDADeviceBuiltinTextureType()) 7002 return ABIArgInfo::getDirect( 7003 CGInfo.getCUDADeviceBuiltinTextureDeviceType()); 7004 } 7005 return getNaturalAlignIndirect(Ty, /* byval */ true); 7006 } 7007 7008 if (const auto *EIT = Ty->getAs<ExtIntType>()) { 7009 if ((EIT->getNumBits() > 128) || 7010 (!getContext().getTargetInfo().hasInt128Type() && 7011 EIT->getNumBits() > 64)) 7012 return getNaturalAlignIndirect(Ty, /* byval */ true); 7013 } 7014 7015 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) 7016 : ABIArgInfo::getDirect()); 7017 } 7018 7019 void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const { 7020 if (!getCXXABI().classifyReturnType(FI)) 7021 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 7022 for (auto &I : FI.arguments()) 7023 I.info = classifyArgumentType(I.type); 7024 7025 // Always honor user-specified calling convention. 7026 if (FI.getCallingConvention() != llvm::CallingConv::C) 7027 return; 7028 7029 FI.setEffectiveCallingConvention(getRuntimeCC()); 7030 } 7031 7032 Address NVPTXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 7033 QualType Ty) const { 7034 llvm_unreachable("NVPTX does not support varargs"); 7035 } 7036 7037 void NVPTXTargetCodeGenInfo::setTargetAttributes( 7038 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const { 7039 if (GV->isDeclaration()) 7040 return; 7041 const VarDecl *VD = dyn_cast_or_null<VarDecl>(D); 7042 if (VD) { 7043 if (M.getLangOpts().CUDA) { 7044 if (VD->getType()->isCUDADeviceBuiltinSurfaceType()) 7045 addNVVMMetadata(GV, "surface", 1); 7046 else if (VD->getType()->isCUDADeviceBuiltinTextureType()) 7047 addNVVMMetadata(GV, "texture", 1); 7048 return; 7049 } 7050 } 7051 7052 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 7053 if (!FD) return; 7054 7055 llvm::Function *F = cast<llvm::Function>(GV); 7056 7057 // Perform special handling in OpenCL mode 7058 if (M.getLangOpts().OpenCL) { 7059 // Use OpenCL function attributes to check for kernel functions 7060 // By default, all functions are device functions 7061 if (FD->hasAttr<OpenCLKernelAttr>()) { 7062 // OpenCL __kernel functions get kernel metadata 7063 // Create !{<func-ref>, metadata !"kernel", i32 1} node 7064 addNVVMMetadata(F, "kernel", 1); 7065 // And kernel functions are not subject to inlining 7066 F->addFnAttr(llvm::Attribute::NoInline); 7067 } 7068 } 7069 7070 // Perform special handling in CUDA mode. 7071 if (M.getLangOpts().CUDA) { 7072 // CUDA __global__ functions get a kernel metadata entry. Since 7073 // __global__ functions cannot be called from the device, we do not 7074 // need to set the noinline attribute. 7075 if (FD->hasAttr<CUDAGlobalAttr>()) { 7076 // Create !{<func-ref>, metadata !"kernel", i32 1} node 7077 addNVVMMetadata(F, "kernel", 1); 7078 } 7079 if (CUDALaunchBoundsAttr *Attr = FD->getAttr<CUDALaunchBoundsAttr>()) { 7080 // Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node 7081 llvm::APSInt MaxThreads(32); 7082 MaxThreads = Attr->getMaxThreads()->EvaluateKnownConstInt(M.getContext()); 7083 if (MaxThreads > 0) 7084 addNVVMMetadata(F, "maxntidx", MaxThreads.getExtValue()); 7085 7086 // min blocks is an optional argument for CUDALaunchBoundsAttr. If it was 7087 // not specified in __launch_bounds__ or if the user specified a 0 value, 7088 // we don't have to add a PTX directive. 7089 if (Attr->getMinBlocks()) { 7090 llvm::APSInt MinBlocks(32); 7091 MinBlocks = Attr->getMinBlocks()->EvaluateKnownConstInt(M.getContext()); 7092 if (MinBlocks > 0) 7093 // Create !{<func-ref>, metadata !"minctasm", i32 <val>} node 7094 addNVVMMetadata(F, "minctasm", MinBlocks.getExtValue()); 7095 } 7096 } 7097 } 7098 } 7099 7100 void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::GlobalValue *GV, 7101 StringRef Name, int Operand) { 7102 llvm::Module *M = GV->getParent(); 7103 llvm::LLVMContext &Ctx = M->getContext(); 7104 7105 // Get "nvvm.annotations" metadata node 7106 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations"); 7107 7108 llvm::Metadata *MDVals[] = { 7109 llvm::ConstantAsMetadata::get(GV), llvm::MDString::get(Ctx, Name), 7110 llvm::ConstantAsMetadata::get( 7111 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))}; 7112 // Append metadata to nvvm.annotations 7113 MD->addOperand(llvm::MDNode::get(Ctx, MDVals)); 7114 } 7115 7116 bool NVPTXTargetCodeGenInfo::shouldEmitStaticExternCAliases() const { 7117 return false; 7118 } 7119 } 7120 7121 //===----------------------------------------------------------------------===// 7122 // SystemZ ABI Implementation 7123 //===----------------------------------------------------------------------===// 7124 7125 namespace { 7126 7127 class SystemZABIInfo : public SwiftABIInfo { 7128 bool HasVector; 7129 bool IsSoftFloatABI; 7130 7131 public: 7132 SystemZABIInfo(CodeGenTypes &CGT, bool HV, bool SF) 7133 : SwiftABIInfo(CGT), HasVector(HV), IsSoftFloatABI(SF) {} 7134 7135 bool isPromotableIntegerTypeForABI(QualType Ty) const; 7136 bool isCompoundType(QualType Ty) const; 7137 bool isVectorArgumentType(QualType Ty) const; 7138 bool isFPArgumentType(QualType Ty) const; 7139 QualType GetSingleElementType(QualType Ty) const; 7140 7141 ABIArgInfo classifyReturnType(QualType RetTy) const; 7142 ABIArgInfo classifyArgumentType(QualType ArgTy) const; 7143 7144 void computeInfo(CGFunctionInfo &FI) const override { 7145 if (!getCXXABI().classifyReturnType(FI)) 7146 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 7147 for (auto &I : FI.arguments()) 7148 I.info = classifyArgumentType(I.type); 7149 } 7150 7151 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 7152 QualType Ty) const override; 7153 7154 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, 7155 bool asReturnValue) const override { 7156 return occupiesMoreThan(CGT, scalars, /*total*/ 4); 7157 } 7158 bool isSwiftErrorInRegister() const override { 7159 return false; 7160 } 7161 }; 7162 7163 class SystemZTargetCodeGenInfo : public TargetCodeGenInfo { 7164 public: 7165 SystemZTargetCodeGenInfo(CodeGenTypes &CGT, bool HasVector, bool SoftFloatABI) 7166 : TargetCodeGenInfo( 7167 std::make_unique<SystemZABIInfo>(CGT, HasVector, SoftFloatABI)) {} 7168 }; 7169 7170 } 7171 7172 bool SystemZABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const { 7173 // Treat an enum type as its underlying type. 7174 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 7175 Ty = EnumTy->getDecl()->getIntegerType(); 7176 7177 // Promotable integer types are required to be promoted by the ABI. 7178 if (ABIInfo::isPromotableIntegerTypeForABI(Ty)) 7179 return true; 7180 7181 if (const auto *EIT = Ty->getAs<ExtIntType>()) 7182 if (EIT->getNumBits() < 64) 7183 return true; 7184 7185 // 32-bit values must also be promoted. 7186 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 7187 switch (BT->getKind()) { 7188 case BuiltinType::Int: 7189 case BuiltinType::UInt: 7190 return true; 7191 default: 7192 return false; 7193 } 7194 return false; 7195 } 7196 7197 bool SystemZABIInfo::isCompoundType(QualType Ty) const { 7198 return (Ty->isAnyComplexType() || 7199 Ty->isVectorType() || 7200 isAggregateTypeForABI(Ty)); 7201 } 7202 7203 bool SystemZABIInfo::isVectorArgumentType(QualType Ty) const { 7204 return (HasVector && 7205 Ty->isVectorType() && 7206 getContext().getTypeSize(Ty) <= 128); 7207 } 7208 7209 bool SystemZABIInfo::isFPArgumentType(QualType Ty) const { 7210 if (IsSoftFloatABI) 7211 return false; 7212 7213 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 7214 switch (BT->getKind()) { 7215 case BuiltinType::Float: 7216 case BuiltinType::Double: 7217 return true; 7218 default: 7219 return false; 7220 } 7221 7222 return false; 7223 } 7224 7225 QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const { 7226 const RecordType *RT = Ty->getAs<RecordType>(); 7227 7228 if (RT && RT->isStructureOrClassType()) { 7229 const RecordDecl *RD = RT->getDecl(); 7230 QualType Found; 7231 7232 // If this is a C++ record, check the bases first. 7233 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 7234 for (const auto &I : CXXRD->bases()) { 7235 QualType Base = I.getType(); 7236 7237 // Empty bases don't affect things either way. 7238 if (isEmptyRecord(getContext(), Base, true)) 7239 continue; 7240 7241 if (!Found.isNull()) 7242 return Ty; 7243 Found = GetSingleElementType(Base); 7244 } 7245 7246 // Check the fields. 7247 for (const auto *FD : RD->fields()) { 7248 // For compatibility with GCC, ignore empty bitfields in C++ mode. 7249 // Unlike isSingleElementStruct(), empty structure and array fields 7250 // do count. So do anonymous bitfields that aren't zero-sized. 7251 if (getContext().getLangOpts().CPlusPlus && 7252 FD->isZeroLengthBitField(getContext())) 7253 continue; 7254 // Like isSingleElementStruct(), ignore C++20 empty data members. 7255 if (FD->hasAttr<NoUniqueAddressAttr>() && 7256 isEmptyRecord(getContext(), FD->getType(), true)) 7257 continue; 7258 7259 // Unlike isSingleElementStruct(), arrays do not count. 7260 // Nested structures still do though. 7261 if (!Found.isNull()) 7262 return Ty; 7263 Found = GetSingleElementType(FD->getType()); 7264 } 7265 7266 // Unlike isSingleElementStruct(), trailing padding is allowed. 7267 // An 8-byte aligned struct s { float f; } is passed as a double. 7268 if (!Found.isNull()) 7269 return Found; 7270 } 7271 7272 return Ty; 7273 } 7274 7275 Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 7276 QualType Ty) const { 7277 // Assume that va_list type is correct; should be pointer to LLVM type: 7278 // struct { 7279 // i64 __gpr; 7280 // i64 __fpr; 7281 // i8 *__overflow_arg_area; 7282 // i8 *__reg_save_area; 7283 // }; 7284 7285 // Every non-vector argument occupies 8 bytes and is passed by preference 7286 // in either GPRs or FPRs. Vector arguments occupy 8 or 16 bytes and are 7287 // always passed on the stack. 7288 Ty = getContext().getCanonicalType(Ty); 7289 auto TyInfo = getContext().getTypeInfoInChars(Ty); 7290 llvm::Type *ArgTy = CGF.ConvertTypeForMem(Ty); 7291 llvm::Type *DirectTy = ArgTy; 7292 ABIArgInfo AI = classifyArgumentType(Ty); 7293 bool IsIndirect = AI.isIndirect(); 7294 bool InFPRs = false; 7295 bool IsVector = false; 7296 CharUnits UnpaddedSize; 7297 CharUnits DirectAlign; 7298 if (IsIndirect) { 7299 DirectTy = llvm::PointerType::getUnqual(DirectTy); 7300 UnpaddedSize = DirectAlign = CharUnits::fromQuantity(8); 7301 } else { 7302 if (AI.getCoerceToType()) 7303 ArgTy = AI.getCoerceToType(); 7304 InFPRs = (!IsSoftFloatABI && (ArgTy->isFloatTy() || ArgTy->isDoubleTy())); 7305 IsVector = ArgTy->isVectorTy(); 7306 UnpaddedSize = TyInfo.first; 7307 DirectAlign = TyInfo.second; 7308 } 7309 CharUnits PaddedSize = CharUnits::fromQuantity(8); 7310 if (IsVector && UnpaddedSize > PaddedSize) 7311 PaddedSize = CharUnits::fromQuantity(16); 7312 assert((UnpaddedSize <= PaddedSize) && "Invalid argument size."); 7313 7314 CharUnits Padding = (PaddedSize - UnpaddedSize); 7315 7316 llvm::Type *IndexTy = CGF.Int64Ty; 7317 llvm::Value *PaddedSizeV = 7318 llvm::ConstantInt::get(IndexTy, PaddedSize.getQuantity()); 7319 7320 if (IsVector) { 7321 // Work out the address of a vector argument on the stack. 7322 // Vector arguments are always passed in the high bits of a 7323 // single (8 byte) or double (16 byte) stack slot. 7324 Address OverflowArgAreaPtr = 7325 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr"); 7326 Address OverflowArgArea = 7327 Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"), 7328 TyInfo.second); 7329 Address MemAddr = 7330 CGF.Builder.CreateElementBitCast(OverflowArgArea, DirectTy, "mem_addr"); 7331 7332 // Update overflow_arg_area_ptr pointer 7333 llvm::Value *NewOverflowArgArea = 7334 CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV, 7335 "overflow_arg_area"); 7336 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr); 7337 7338 return MemAddr; 7339 } 7340 7341 assert(PaddedSize.getQuantity() == 8); 7342 7343 unsigned MaxRegs, RegCountField, RegSaveIndex; 7344 CharUnits RegPadding; 7345 if (InFPRs) { 7346 MaxRegs = 4; // Maximum of 4 FPR arguments 7347 RegCountField = 1; // __fpr 7348 RegSaveIndex = 16; // save offset for f0 7349 RegPadding = CharUnits(); // floats are passed in the high bits of an FPR 7350 } else { 7351 MaxRegs = 5; // Maximum of 5 GPR arguments 7352 RegCountField = 0; // __gpr 7353 RegSaveIndex = 2; // save offset for r2 7354 RegPadding = Padding; // values are passed in the low bits of a GPR 7355 } 7356 7357 Address RegCountPtr = 7358 CGF.Builder.CreateStructGEP(VAListAddr, RegCountField, "reg_count_ptr"); 7359 llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count"); 7360 llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs); 7361 llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV, 7362 "fits_in_regs"); 7363 7364 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 7365 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); 7366 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 7367 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); 7368 7369 // Emit code to load the value if it was passed in registers. 7370 CGF.EmitBlock(InRegBlock); 7371 7372 // Work out the address of an argument register. 7373 llvm::Value *ScaledRegCount = 7374 CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count"); 7375 llvm::Value *RegBase = 7376 llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize.getQuantity() 7377 + RegPadding.getQuantity()); 7378 llvm::Value *RegOffset = 7379 CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset"); 7380 Address RegSaveAreaPtr = 7381 CGF.Builder.CreateStructGEP(VAListAddr, 3, "reg_save_area_ptr"); 7382 llvm::Value *RegSaveArea = 7383 CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area"); 7384 Address RawRegAddr(CGF.Builder.CreateGEP(RegSaveArea, RegOffset, 7385 "raw_reg_addr"), 7386 PaddedSize); 7387 Address RegAddr = 7388 CGF.Builder.CreateElementBitCast(RawRegAddr, DirectTy, "reg_addr"); 7389 7390 // Update the register count 7391 llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1); 7392 llvm::Value *NewRegCount = 7393 CGF.Builder.CreateAdd(RegCount, One, "reg_count"); 7394 CGF.Builder.CreateStore(NewRegCount, RegCountPtr); 7395 CGF.EmitBranch(ContBlock); 7396 7397 // Emit code to load the value if it was passed in memory. 7398 CGF.EmitBlock(InMemBlock); 7399 7400 // Work out the address of a stack argument. 7401 Address OverflowArgAreaPtr = 7402 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr"); 7403 Address OverflowArgArea = 7404 Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"), 7405 PaddedSize); 7406 Address RawMemAddr = 7407 CGF.Builder.CreateConstByteGEP(OverflowArgArea, Padding, "raw_mem_addr"); 7408 Address MemAddr = 7409 CGF.Builder.CreateElementBitCast(RawMemAddr, DirectTy, "mem_addr"); 7410 7411 // Update overflow_arg_area_ptr pointer 7412 llvm::Value *NewOverflowArgArea = 7413 CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV, 7414 "overflow_arg_area"); 7415 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr); 7416 CGF.EmitBranch(ContBlock); 7417 7418 // Return the appropriate result. 7419 CGF.EmitBlock(ContBlock); 7420 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, 7421 MemAddr, InMemBlock, "va_arg.addr"); 7422 7423 if (IsIndirect) 7424 ResAddr = Address(CGF.Builder.CreateLoad(ResAddr, "indirect_arg"), 7425 TyInfo.second); 7426 7427 return ResAddr; 7428 } 7429 7430 ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const { 7431 if (RetTy->isVoidType()) 7432 return ABIArgInfo::getIgnore(); 7433 if (isVectorArgumentType(RetTy)) 7434 return ABIArgInfo::getDirect(); 7435 if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64) 7436 return getNaturalAlignIndirect(RetTy); 7437 return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) 7438 : ABIArgInfo::getDirect()); 7439 } 7440 7441 ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const { 7442 // Handle the generic C++ ABI. 7443 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 7444 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 7445 7446 // Integers and enums are extended to full register width. 7447 if (isPromotableIntegerTypeForABI(Ty)) 7448 return ABIArgInfo::getExtend(Ty); 7449 7450 // Handle vector types and vector-like structure types. Note that 7451 // as opposed to float-like structure types, we do not allow any 7452 // padding for vector-like structures, so verify the sizes match. 7453 uint64_t Size = getContext().getTypeSize(Ty); 7454 QualType SingleElementTy = GetSingleElementType(Ty); 7455 if (isVectorArgumentType(SingleElementTy) && 7456 getContext().getTypeSize(SingleElementTy) == Size) 7457 return ABIArgInfo::getDirect(CGT.ConvertType(SingleElementTy)); 7458 7459 // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly. 7460 if (Size != 8 && Size != 16 && Size != 32 && Size != 64) 7461 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 7462 7463 // Handle small structures. 7464 if (const RecordType *RT = Ty->getAs<RecordType>()) { 7465 // Structures with flexible arrays have variable length, so really 7466 // fail the size test above. 7467 const RecordDecl *RD = RT->getDecl(); 7468 if (RD->hasFlexibleArrayMember()) 7469 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 7470 7471 // The structure is passed as an unextended integer, a float, or a double. 7472 llvm::Type *PassTy; 7473 if (isFPArgumentType(SingleElementTy)) { 7474 assert(Size == 32 || Size == 64); 7475 if (Size == 32) 7476 PassTy = llvm::Type::getFloatTy(getVMContext()); 7477 else 7478 PassTy = llvm::Type::getDoubleTy(getVMContext()); 7479 } else 7480 PassTy = llvm::IntegerType::get(getVMContext(), Size); 7481 return ABIArgInfo::getDirect(PassTy); 7482 } 7483 7484 // Non-structure compounds are passed indirectly. 7485 if (isCompoundType(Ty)) 7486 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 7487 7488 return ABIArgInfo::getDirect(nullptr); 7489 } 7490 7491 //===----------------------------------------------------------------------===// 7492 // MSP430 ABI Implementation 7493 //===----------------------------------------------------------------------===// 7494 7495 namespace { 7496 7497 class MSP430ABIInfo : public DefaultABIInfo { 7498 static ABIArgInfo complexArgInfo() { 7499 ABIArgInfo Info = ABIArgInfo::getDirect(); 7500 Info.setCanBeFlattened(false); 7501 return Info; 7502 } 7503 7504 public: 7505 MSP430ABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 7506 7507 ABIArgInfo classifyReturnType(QualType RetTy) const { 7508 if (RetTy->isAnyComplexType()) 7509 return complexArgInfo(); 7510 7511 return DefaultABIInfo::classifyReturnType(RetTy); 7512 } 7513 7514 ABIArgInfo classifyArgumentType(QualType RetTy) const { 7515 if (RetTy->isAnyComplexType()) 7516 return complexArgInfo(); 7517 7518 return DefaultABIInfo::classifyArgumentType(RetTy); 7519 } 7520 7521 // Just copy the original implementations because 7522 // DefaultABIInfo::classify{Return,Argument}Type() are not virtual 7523 void computeInfo(CGFunctionInfo &FI) const override { 7524 if (!getCXXABI().classifyReturnType(FI)) 7525 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 7526 for (auto &I : FI.arguments()) 7527 I.info = classifyArgumentType(I.type); 7528 } 7529 7530 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 7531 QualType Ty) const override { 7532 return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty)); 7533 } 7534 }; 7535 7536 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo { 7537 public: 7538 MSP430TargetCodeGenInfo(CodeGenTypes &CGT) 7539 : TargetCodeGenInfo(std::make_unique<MSP430ABIInfo>(CGT)) {} 7540 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 7541 CodeGen::CodeGenModule &M) const override; 7542 }; 7543 7544 } 7545 7546 void MSP430TargetCodeGenInfo::setTargetAttributes( 7547 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const { 7548 if (GV->isDeclaration()) 7549 return; 7550 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { 7551 const auto *InterruptAttr = FD->getAttr<MSP430InterruptAttr>(); 7552 if (!InterruptAttr) 7553 return; 7554 7555 // Handle 'interrupt' attribute: 7556 llvm::Function *F = cast<llvm::Function>(GV); 7557 7558 // Step 1: Set ISR calling convention. 7559 F->setCallingConv(llvm::CallingConv::MSP430_INTR); 7560 7561 // Step 2: Add attributes goodness. 7562 F->addFnAttr(llvm::Attribute::NoInline); 7563 F->addFnAttr("interrupt", llvm::utostr(InterruptAttr->getNumber())); 7564 } 7565 } 7566 7567 //===----------------------------------------------------------------------===// 7568 // MIPS ABI Implementation. This works for both little-endian and 7569 // big-endian variants. 7570 //===----------------------------------------------------------------------===// 7571 7572 namespace { 7573 class MipsABIInfo : public ABIInfo { 7574 bool IsO32; 7575 unsigned MinABIStackAlignInBytes, StackAlignInBytes; 7576 void CoerceToIntArgs(uint64_t TySize, 7577 SmallVectorImpl<llvm::Type *> &ArgList) const; 7578 llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const; 7579 llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const; 7580 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const; 7581 public: 7582 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) : 7583 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8), 7584 StackAlignInBytes(IsO32 ? 8 : 16) {} 7585 7586 ABIArgInfo classifyReturnType(QualType RetTy) const; 7587 ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const; 7588 void computeInfo(CGFunctionInfo &FI) const override; 7589 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 7590 QualType Ty) const override; 7591 ABIArgInfo extendType(QualType Ty) const; 7592 }; 7593 7594 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo { 7595 unsigned SizeOfUnwindException; 7596 public: 7597 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32) 7598 : TargetCodeGenInfo(std::make_unique<MipsABIInfo>(CGT, IsO32)), 7599 SizeOfUnwindException(IsO32 ? 24 : 32) {} 7600 7601 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { 7602 return 29; 7603 } 7604 7605 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 7606 CodeGen::CodeGenModule &CGM) const override { 7607 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 7608 if (!FD) return; 7609 llvm::Function *Fn = cast<llvm::Function>(GV); 7610 7611 if (FD->hasAttr<MipsLongCallAttr>()) 7612 Fn->addFnAttr("long-call"); 7613 else if (FD->hasAttr<MipsShortCallAttr>()) 7614 Fn->addFnAttr("short-call"); 7615 7616 // Other attributes do not have a meaning for declarations. 7617 if (GV->isDeclaration()) 7618 return; 7619 7620 if (FD->hasAttr<Mips16Attr>()) { 7621 Fn->addFnAttr("mips16"); 7622 } 7623 else if (FD->hasAttr<NoMips16Attr>()) { 7624 Fn->addFnAttr("nomips16"); 7625 } 7626 7627 if (FD->hasAttr<MicroMipsAttr>()) 7628 Fn->addFnAttr("micromips"); 7629 else if (FD->hasAttr<NoMicroMipsAttr>()) 7630 Fn->addFnAttr("nomicromips"); 7631 7632 const MipsInterruptAttr *Attr = FD->getAttr<MipsInterruptAttr>(); 7633 if (!Attr) 7634 return; 7635 7636 const char *Kind; 7637 switch (Attr->getInterrupt()) { 7638 case MipsInterruptAttr::eic: Kind = "eic"; break; 7639 case MipsInterruptAttr::sw0: Kind = "sw0"; break; 7640 case MipsInterruptAttr::sw1: Kind = "sw1"; break; 7641 case MipsInterruptAttr::hw0: Kind = "hw0"; break; 7642 case MipsInterruptAttr::hw1: Kind = "hw1"; break; 7643 case MipsInterruptAttr::hw2: Kind = "hw2"; break; 7644 case MipsInterruptAttr::hw3: Kind = "hw3"; break; 7645 case MipsInterruptAttr::hw4: Kind = "hw4"; break; 7646 case MipsInterruptAttr::hw5: Kind = "hw5"; break; 7647 } 7648 7649 Fn->addFnAttr("interrupt", Kind); 7650 7651 } 7652 7653 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 7654 llvm::Value *Address) const override; 7655 7656 unsigned getSizeOfUnwindException() const override { 7657 return SizeOfUnwindException; 7658 } 7659 }; 7660 } 7661 7662 void MipsABIInfo::CoerceToIntArgs( 7663 uint64_t TySize, SmallVectorImpl<llvm::Type *> &ArgList) const { 7664 llvm::IntegerType *IntTy = 7665 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8); 7666 7667 // Add (TySize / MinABIStackAlignInBytes) args of IntTy. 7668 for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N) 7669 ArgList.push_back(IntTy); 7670 7671 // If necessary, add one more integer type to ArgList. 7672 unsigned R = TySize % (MinABIStackAlignInBytes * 8); 7673 7674 if (R) 7675 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R)); 7676 } 7677 7678 // In N32/64, an aligned double precision floating point field is passed in 7679 // a register. 7680 llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const { 7681 SmallVector<llvm::Type*, 8> ArgList, IntArgList; 7682 7683 if (IsO32) { 7684 CoerceToIntArgs(TySize, ArgList); 7685 return llvm::StructType::get(getVMContext(), ArgList); 7686 } 7687 7688 if (Ty->isComplexType()) 7689 return CGT.ConvertType(Ty); 7690 7691 const RecordType *RT = Ty->getAs<RecordType>(); 7692 7693 // Unions/vectors are passed in integer registers. 7694 if (!RT || !RT->isStructureOrClassType()) { 7695 CoerceToIntArgs(TySize, ArgList); 7696 return llvm::StructType::get(getVMContext(), ArgList); 7697 } 7698 7699 const RecordDecl *RD = RT->getDecl(); 7700 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 7701 assert(!(TySize % 8) && "Size of structure must be multiple of 8."); 7702 7703 uint64_t LastOffset = 0; 7704 unsigned idx = 0; 7705 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64); 7706 7707 // Iterate over fields in the struct/class and check if there are any aligned 7708 // double fields. 7709 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 7710 i != e; ++i, ++idx) { 7711 const QualType Ty = i->getType(); 7712 const BuiltinType *BT = Ty->getAs<BuiltinType>(); 7713 7714 if (!BT || BT->getKind() != BuiltinType::Double) 7715 continue; 7716 7717 uint64_t Offset = Layout.getFieldOffset(idx); 7718 if (Offset % 64) // Ignore doubles that are not aligned. 7719 continue; 7720 7721 // Add ((Offset - LastOffset) / 64) args of type i64. 7722 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j) 7723 ArgList.push_back(I64); 7724 7725 // Add double type. 7726 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext())); 7727 LastOffset = Offset + 64; 7728 } 7729 7730 CoerceToIntArgs(TySize - LastOffset, IntArgList); 7731 ArgList.append(IntArgList.begin(), IntArgList.end()); 7732 7733 return llvm::StructType::get(getVMContext(), ArgList); 7734 } 7735 7736 llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset, 7737 uint64_t Offset) const { 7738 if (OrigOffset + MinABIStackAlignInBytes > Offset) 7739 return nullptr; 7740 7741 return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8); 7742 } 7743 7744 ABIArgInfo 7745 MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const { 7746 Ty = useFirstFieldIfTransparentUnion(Ty); 7747 7748 uint64_t OrigOffset = Offset; 7749 uint64_t TySize = getContext().getTypeSize(Ty); 7750 uint64_t Align = getContext().getTypeAlign(Ty) / 8; 7751 7752 Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes), 7753 (uint64_t)StackAlignInBytes); 7754 unsigned CurrOffset = llvm::alignTo(Offset, Align); 7755 Offset = CurrOffset + llvm::alignTo(TySize, Align * 8) / 8; 7756 7757 if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) { 7758 // Ignore empty aggregates. 7759 if (TySize == 0) 7760 return ABIArgInfo::getIgnore(); 7761 7762 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { 7763 Offset = OrigOffset + MinABIStackAlignInBytes; 7764 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 7765 } 7766 7767 // If we have reached here, aggregates are passed directly by coercing to 7768 // another structure type. Padding is inserted if the offset of the 7769 // aggregate is unaligned. 7770 ABIArgInfo ArgInfo = 7771 ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0, 7772 getPaddingType(OrigOffset, CurrOffset)); 7773 ArgInfo.setInReg(true); 7774 return ArgInfo; 7775 } 7776 7777 // Treat an enum type as its underlying type. 7778 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 7779 Ty = EnumTy->getDecl()->getIntegerType(); 7780 7781 // Make sure we pass indirectly things that are too large. 7782 if (const auto *EIT = Ty->getAs<ExtIntType>()) 7783 if (EIT->getNumBits() > 128 || 7784 (EIT->getNumBits() > 64 && 7785 !getContext().getTargetInfo().hasInt128Type())) 7786 return getNaturalAlignIndirect(Ty); 7787 7788 // All integral types are promoted to the GPR width. 7789 if (Ty->isIntegralOrEnumerationType()) 7790 return extendType(Ty); 7791 7792 return ABIArgInfo::getDirect( 7793 nullptr, 0, IsO32 ? nullptr : getPaddingType(OrigOffset, CurrOffset)); 7794 } 7795 7796 llvm::Type* 7797 MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const { 7798 const RecordType *RT = RetTy->getAs<RecordType>(); 7799 SmallVector<llvm::Type*, 8> RTList; 7800 7801 if (RT && RT->isStructureOrClassType()) { 7802 const RecordDecl *RD = RT->getDecl(); 7803 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 7804 unsigned FieldCnt = Layout.getFieldCount(); 7805 7806 // N32/64 returns struct/classes in floating point registers if the 7807 // following conditions are met: 7808 // 1. The size of the struct/class is no larger than 128-bit. 7809 // 2. The struct/class has one or two fields all of which are floating 7810 // point types. 7811 // 3. The offset of the first field is zero (this follows what gcc does). 7812 // 7813 // Any other composite results are returned in integer registers. 7814 // 7815 if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) { 7816 RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end(); 7817 for (; b != e; ++b) { 7818 const BuiltinType *BT = b->getType()->getAs<BuiltinType>(); 7819 7820 if (!BT || !BT->isFloatingPoint()) 7821 break; 7822 7823 RTList.push_back(CGT.ConvertType(b->getType())); 7824 } 7825 7826 if (b == e) 7827 return llvm::StructType::get(getVMContext(), RTList, 7828 RD->hasAttr<PackedAttr>()); 7829 7830 RTList.clear(); 7831 } 7832 } 7833 7834 CoerceToIntArgs(Size, RTList); 7835 return llvm::StructType::get(getVMContext(), RTList); 7836 } 7837 7838 ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const { 7839 uint64_t Size = getContext().getTypeSize(RetTy); 7840 7841 if (RetTy->isVoidType()) 7842 return ABIArgInfo::getIgnore(); 7843 7844 // O32 doesn't treat zero-sized structs differently from other structs. 7845 // However, N32/N64 ignores zero sized return values. 7846 if (!IsO32 && Size == 0) 7847 return ABIArgInfo::getIgnore(); 7848 7849 if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) { 7850 if (Size <= 128) { 7851 if (RetTy->isAnyComplexType()) 7852 return ABIArgInfo::getDirect(); 7853 7854 // O32 returns integer vectors in registers and N32/N64 returns all small 7855 // aggregates in registers. 7856 if (!IsO32 || 7857 (RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())) { 7858 ABIArgInfo ArgInfo = 7859 ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size)); 7860 ArgInfo.setInReg(true); 7861 return ArgInfo; 7862 } 7863 } 7864 7865 return getNaturalAlignIndirect(RetTy); 7866 } 7867 7868 // Treat an enum type as its underlying type. 7869 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 7870 RetTy = EnumTy->getDecl()->getIntegerType(); 7871 7872 // Make sure we pass indirectly things that are too large. 7873 if (const auto *EIT = RetTy->getAs<ExtIntType>()) 7874 if (EIT->getNumBits() > 128 || 7875 (EIT->getNumBits() > 64 && 7876 !getContext().getTargetInfo().hasInt128Type())) 7877 return getNaturalAlignIndirect(RetTy); 7878 7879 if (isPromotableIntegerTypeForABI(RetTy)) 7880 return ABIArgInfo::getExtend(RetTy); 7881 7882 if ((RetTy->isUnsignedIntegerOrEnumerationType() || 7883 RetTy->isSignedIntegerOrEnumerationType()) && Size == 32 && !IsO32) 7884 return ABIArgInfo::getSignExtend(RetTy); 7885 7886 return ABIArgInfo::getDirect(); 7887 } 7888 7889 void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const { 7890 ABIArgInfo &RetInfo = FI.getReturnInfo(); 7891 if (!getCXXABI().classifyReturnType(FI)) 7892 RetInfo = classifyReturnType(FI.getReturnType()); 7893 7894 // Check if a pointer to an aggregate is passed as a hidden argument. 7895 uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0; 7896 7897 for (auto &I : FI.arguments()) 7898 I.info = classifyArgumentType(I.type, Offset); 7899 } 7900 7901 Address MipsABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 7902 QualType OrigTy) const { 7903 QualType Ty = OrigTy; 7904 7905 // Integer arguments are promoted to 32-bit on O32 and 64-bit on N32/N64. 7906 // Pointers are also promoted in the same way but this only matters for N32. 7907 unsigned SlotSizeInBits = IsO32 ? 32 : 64; 7908 unsigned PtrWidth = getTarget().getPointerWidth(0); 7909 bool DidPromote = false; 7910 if ((Ty->isIntegerType() && 7911 getContext().getIntWidth(Ty) < SlotSizeInBits) || 7912 (Ty->isPointerType() && PtrWidth < SlotSizeInBits)) { 7913 DidPromote = true; 7914 Ty = getContext().getIntTypeForBitwidth(SlotSizeInBits, 7915 Ty->isSignedIntegerType()); 7916 } 7917 7918 auto TyInfo = getContext().getTypeInfoInChars(Ty); 7919 7920 // The alignment of things in the argument area is never larger than 7921 // StackAlignInBytes. 7922 TyInfo.second = 7923 std::min(TyInfo.second, CharUnits::fromQuantity(StackAlignInBytes)); 7924 7925 // MinABIStackAlignInBytes is the size of argument slots on the stack. 7926 CharUnits ArgSlotSize = CharUnits::fromQuantity(MinABIStackAlignInBytes); 7927 7928 Address Addr = emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false, 7929 TyInfo, ArgSlotSize, /*AllowHigherAlign*/ true); 7930 7931 7932 // If there was a promotion, "unpromote" into a temporary. 7933 // TODO: can we just use a pointer into a subset of the original slot? 7934 if (DidPromote) { 7935 Address Temp = CGF.CreateMemTemp(OrigTy, "vaarg.promotion-temp"); 7936 llvm::Value *Promoted = CGF.Builder.CreateLoad(Addr); 7937 7938 // Truncate down to the right width. 7939 llvm::Type *IntTy = (OrigTy->isIntegerType() ? Temp.getElementType() 7940 : CGF.IntPtrTy); 7941 llvm::Value *V = CGF.Builder.CreateTrunc(Promoted, IntTy); 7942 if (OrigTy->isPointerType()) 7943 V = CGF.Builder.CreateIntToPtr(V, Temp.getElementType()); 7944 7945 CGF.Builder.CreateStore(V, Temp); 7946 Addr = Temp; 7947 } 7948 7949 return Addr; 7950 } 7951 7952 ABIArgInfo MipsABIInfo::extendType(QualType Ty) const { 7953 int TySize = getContext().getTypeSize(Ty); 7954 7955 // MIPS64 ABI requires unsigned 32 bit integers to be sign extended. 7956 if (Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32) 7957 return ABIArgInfo::getSignExtend(Ty); 7958 7959 return ABIArgInfo::getExtend(Ty); 7960 } 7961 7962 bool 7963 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 7964 llvm::Value *Address) const { 7965 // This information comes from gcc's implementation, which seems to 7966 // as canonical as it gets. 7967 7968 // Everything on MIPS is 4 bytes. Double-precision FP registers 7969 // are aliased to pairs of single-precision FP registers. 7970 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 7971 7972 // 0-31 are the general purpose registers, $0 - $31. 7973 // 32-63 are the floating-point registers, $f0 - $f31. 7974 // 64 and 65 are the multiply/divide registers, $hi and $lo. 7975 // 66 is the (notional, I think) register for signal-handler return. 7976 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65); 7977 7978 // 67-74 are the floating-point status registers, $fcc0 - $fcc7. 7979 // They are one bit wide and ignored here. 7980 7981 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31. 7982 // (coprocessor 1 is the FP unit) 7983 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31. 7984 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31. 7985 // 176-181 are the DSP accumulator registers. 7986 AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181); 7987 return false; 7988 } 7989 7990 //===----------------------------------------------------------------------===// 7991 // AVR ABI Implementation. 7992 //===----------------------------------------------------------------------===// 7993 7994 namespace { 7995 class AVRTargetCodeGenInfo : public TargetCodeGenInfo { 7996 public: 7997 AVRTargetCodeGenInfo(CodeGenTypes &CGT) 7998 : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {} 7999 8000 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 8001 CodeGen::CodeGenModule &CGM) const override { 8002 if (GV->isDeclaration()) 8003 return; 8004 const auto *FD = dyn_cast_or_null<FunctionDecl>(D); 8005 if (!FD) return; 8006 auto *Fn = cast<llvm::Function>(GV); 8007 8008 if (FD->getAttr<AVRInterruptAttr>()) 8009 Fn->addFnAttr("interrupt"); 8010 8011 if (FD->getAttr<AVRSignalAttr>()) 8012 Fn->addFnAttr("signal"); 8013 } 8014 }; 8015 } 8016 8017 //===----------------------------------------------------------------------===// 8018 // TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults. 8019 // Currently subclassed only to implement custom OpenCL C function attribute 8020 // handling. 8021 //===----------------------------------------------------------------------===// 8022 8023 namespace { 8024 8025 class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo { 8026 public: 8027 TCETargetCodeGenInfo(CodeGenTypes &CGT) 8028 : DefaultTargetCodeGenInfo(CGT) {} 8029 8030 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 8031 CodeGen::CodeGenModule &M) const override; 8032 }; 8033 8034 void TCETargetCodeGenInfo::setTargetAttributes( 8035 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const { 8036 if (GV->isDeclaration()) 8037 return; 8038 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 8039 if (!FD) return; 8040 8041 llvm::Function *F = cast<llvm::Function>(GV); 8042 8043 if (M.getLangOpts().OpenCL) { 8044 if (FD->hasAttr<OpenCLKernelAttr>()) { 8045 // OpenCL C Kernel functions are not subject to inlining 8046 F->addFnAttr(llvm::Attribute::NoInline); 8047 const ReqdWorkGroupSizeAttr *Attr = FD->getAttr<ReqdWorkGroupSizeAttr>(); 8048 if (Attr) { 8049 // Convert the reqd_work_group_size() attributes to metadata. 8050 llvm::LLVMContext &Context = F->getContext(); 8051 llvm::NamedMDNode *OpenCLMetadata = 8052 M.getModule().getOrInsertNamedMetadata( 8053 "opencl.kernel_wg_size_info"); 8054 8055 SmallVector<llvm::Metadata *, 5> Operands; 8056 Operands.push_back(llvm::ConstantAsMetadata::get(F)); 8057 8058 Operands.push_back( 8059 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue( 8060 M.Int32Ty, llvm::APInt(32, Attr->getXDim())))); 8061 Operands.push_back( 8062 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue( 8063 M.Int32Ty, llvm::APInt(32, Attr->getYDim())))); 8064 Operands.push_back( 8065 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue( 8066 M.Int32Ty, llvm::APInt(32, Attr->getZDim())))); 8067 8068 // Add a boolean constant operand for "required" (true) or "hint" 8069 // (false) for implementing the work_group_size_hint attr later. 8070 // Currently always true as the hint is not yet implemented. 8071 Operands.push_back( 8072 llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context))); 8073 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands)); 8074 } 8075 } 8076 } 8077 } 8078 8079 } 8080 8081 //===----------------------------------------------------------------------===// 8082 // Hexagon ABI Implementation 8083 //===----------------------------------------------------------------------===// 8084 8085 namespace { 8086 8087 class HexagonABIInfo : public DefaultABIInfo { 8088 public: 8089 HexagonABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 8090 8091 private: 8092 ABIArgInfo classifyReturnType(QualType RetTy) const; 8093 ABIArgInfo classifyArgumentType(QualType RetTy) const; 8094 ABIArgInfo classifyArgumentType(QualType RetTy, unsigned *RegsLeft) const; 8095 8096 void computeInfo(CGFunctionInfo &FI) const override; 8097 8098 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 8099 QualType Ty) const override; 8100 Address EmitVAArgFromMemory(CodeGenFunction &CFG, Address VAListAddr, 8101 QualType Ty) const; 8102 Address EmitVAArgForHexagon(CodeGenFunction &CFG, Address VAListAddr, 8103 QualType Ty) const; 8104 Address EmitVAArgForHexagonLinux(CodeGenFunction &CFG, Address VAListAddr, 8105 QualType Ty) const; 8106 }; 8107 8108 class HexagonTargetCodeGenInfo : public TargetCodeGenInfo { 8109 public: 8110 HexagonTargetCodeGenInfo(CodeGenTypes &CGT) 8111 : TargetCodeGenInfo(std::make_unique<HexagonABIInfo>(CGT)) {} 8112 8113 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 8114 return 29; 8115 } 8116 8117 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 8118 CodeGen::CodeGenModule &GCM) const override { 8119 if (GV->isDeclaration()) 8120 return; 8121 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 8122 if (!FD) 8123 return; 8124 } 8125 }; 8126 8127 } // namespace 8128 8129 void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const { 8130 unsigned RegsLeft = 6; 8131 if (!getCXXABI().classifyReturnType(FI)) 8132 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 8133 for (auto &I : FI.arguments()) 8134 I.info = classifyArgumentType(I.type, &RegsLeft); 8135 } 8136 8137 static bool HexagonAdjustRegsLeft(uint64_t Size, unsigned *RegsLeft) { 8138 assert(Size <= 64 && "Not expecting to pass arguments larger than 64 bits" 8139 " through registers"); 8140 8141 if (*RegsLeft == 0) 8142 return false; 8143 8144 if (Size <= 32) { 8145 (*RegsLeft)--; 8146 return true; 8147 } 8148 8149 if (2 <= (*RegsLeft & (~1U))) { 8150 *RegsLeft = (*RegsLeft & (~1U)) - 2; 8151 return true; 8152 } 8153 8154 // Next available register was r5 but candidate was greater than 32-bits so it 8155 // has to go on the stack. However we still consume r5 8156 if (*RegsLeft == 1) 8157 *RegsLeft = 0; 8158 8159 return false; 8160 } 8161 8162 ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty, 8163 unsigned *RegsLeft) const { 8164 if (!isAggregateTypeForABI(Ty)) { 8165 // Treat an enum type as its underlying type. 8166 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 8167 Ty = EnumTy->getDecl()->getIntegerType(); 8168 8169 uint64_t Size = getContext().getTypeSize(Ty); 8170 if (Size <= 64) 8171 HexagonAdjustRegsLeft(Size, RegsLeft); 8172 8173 if (Size > 64 && Ty->isExtIntType()) 8174 return getNaturalAlignIndirect(Ty, /*ByVal=*/true); 8175 8176 return isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) 8177 : ABIArgInfo::getDirect(); 8178 } 8179 8180 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 8181 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 8182 8183 // Ignore empty records. 8184 if (isEmptyRecord(getContext(), Ty, true)) 8185 return ABIArgInfo::getIgnore(); 8186 8187 uint64_t Size = getContext().getTypeSize(Ty); 8188 unsigned Align = getContext().getTypeAlign(Ty); 8189 8190 if (Size > 64) 8191 return getNaturalAlignIndirect(Ty, /*ByVal=*/true); 8192 8193 if (HexagonAdjustRegsLeft(Size, RegsLeft)) 8194 Align = Size <= 32 ? 32 : 64; 8195 if (Size <= Align) { 8196 // Pass in the smallest viable integer type. 8197 if (!llvm::isPowerOf2_64(Size)) 8198 Size = llvm::NextPowerOf2(Size); 8199 return ABIArgInfo::getDirect(llvm::Type::getIntNTy(getVMContext(), Size)); 8200 } 8201 return DefaultABIInfo::classifyArgumentType(Ty); 8202 } 8203 8204 ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const { 8205 if (RetTy->isVoidType()) 8206 return ABIArgInfo::getIgnore(); 8207 8208 const TargetInfo &T = CGT.getTarget(); 8209 uint64_t Size = getContext().getTypeSize(RetTy); 8210 8211 if (RetTy->getAs<VectorType>()) { 8212 // HVX vectors are returned in vector registers or register pairs. 8213 if (T.hasFeature("hvx")) { 8214 assert(T.hasFeature("hvx-length64b") || T.hasFeature("hvx-length128b")); 8215 uint64_t VecSize = T.hasFeature("hvx-length64b") ? 64*8 : 128*8; 8216 if (Size == VecSize || Size == 2*VecSize) 8217 return ABIArgInfo::getDirectInReg(); 8218 } 8219 // Large vector types should be returned via memory. 8220 if (Size > 64) 8221 return getNaturalAlignIndirect(RetTy); 8222 } 8223 8224 if (!isAggregateTypeForABI(RetTy)) { 8225 // Treat an enum type as its underlying type. 8226 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 8227 RetTy = EnumTy->getDecl()->getIntegerType(); 8228 8229 if (Size > 64 && RetTy->isExtIntType()) 8230 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false); 8231 8232 return isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) 8233 : ABIArgInfo::getDirect(); 8234 } 8235 8236 if (isEmptyRecord(getContext(), RetTy, true)) 8237 return ABIArgInfo::getIgnore(); 8238 8239 // Aggregates <= 8 bytes are returned in registers, other aggregates 8240 // are returned indirectly. 8241 if (Size <= 64) { 8242 // Return in the smallest viable integer type. 8243 if (!llvm::isPowerOf2_64(Size)) 8244 Size = llvm::NextPowerOf2(Size); 8245 return ABIArgInfo::getDirect(llvm::Type::getIntNTy(getVMContext(), Size)); 8246 } 8247 return getNaturalAlignIndirect(RetTy, /*ByVal=*/true); 8248 } 8249 8250 Address HexagonABIInfo::EmitVAArgFromMemory(CodeGenFunction &CGF, 8251 Address VAListAddr, 8252 QualType Ty) const { 8253 // Load the overflow area pointer. 8254 Address __overflow_area_pointer_p = 8255 CGF.Builder.CreateStructGEP(VAListAddr, 2, "__overflow_area_pointer_p"); 8256 llvm::Value *__overflow_area_pointer = CGF.Builder.CreateLoad( 8257 __overflow_area_pointer_p, "__overflow_area_pointer"); 8258 8259 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8; 8260 if (Align > 4) { 8261 // Alignment should be a power of 2. 8262 assert((Align & (Align - 1)) == 0 && "Alignment is not power of 2!"); 8263 8264 // overflow_arg_area = (overflow_arg_area + align - 1) & -align; 8265 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int64Ty, Align - 1); 8266 8267 // Add offset to the current pointer to access the argument. 8268 __overflow_area_pointer = 8269 CGF.Builder.CreateGEP(__overflow_area_pointer, Offset); 8270 llvm::Value *AsInt = 8271 CGF.Builder.CreatePtrToInt(__overflow_area_pointer, CGF.Int32Ty); 8272 8273 // Create a mask which should be "AND"ed 8274 // with (overflow_arg_area + align - 1) 8275 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -(int)Align); 8276 __overflow_area_pointer = CGF.Builder.CreateIntToPtr( 8277 CGF.Builder.CreateAnd(AsInt, Mask), __overflow_area_pointer->getType(), 8278 "__overflow_area_pointer.align"); 8279 } 8280 8281 // Get the type of the argument from memory and bitcast 8282 // overflow area pointer to the argument type. 8283 llvm::Type *PTy = CGF.ConvertTypeForMem(Ty); 8284 Address AddrTyped = CGF.Builder.CreateBitCast( 8285 Address(__overflow_area_pointer, CharUnits::fromQuantity(Align)), 8286 llvm::PointerType::getUnqual(PTy)); 8287 8288 // Round up to the minimum stack alignment for varargs which is 4 bytes. 8289 uint64_t Offset = llvm::alignTo(CGF.getContext().getTypeSize(Ty) / 8, 4); 8290 8291 __overflow_area_pointer = CGF.Builder.CreateGEP( 8292 __overflow_area_pointer, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 8293 "__overflow_area_pointer.next"); 8294 CGF.Builder.CreateStore(__overflow_area_pointer, __overflow_area_pointer_p); 8295 8296 return AddrTyped; 8297 } 8298 8299 Address HexagonABIInfo::EmitVAArgForHexagon(CodeGenFunction &CGF, 8300 Address VAListAddr, 8301 QualType Ty) const { 8302 // FIXME: Need to handle alignment 8303 llvm::Type *BP = CGF.Int8PtrTy; 8304 llvm::Type *BPP = CGF.Int8PtrPtrTy; 8305 CGBuilderTy &Builder = CGF.Builder; 8306 Address VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 8307 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 8308 // Handle address alignment for type alignment > 32 bits 8309 uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8; 8310 if (TyAlign > 4) { 8311 assert((TyAlign & (TyAlign - 1)) == 0 && "Alignment is not power of 2!"); 8312 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty); 8313 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1)); 8314 AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1))); 8315 Addr = Builder.CreateIntToPtr(AddrAsInt, BP); 8316 } 8317 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 8318 Address AddrTyped = Builder.CreateBitCast( 8319 Address(Addr, CharUnits::fromQuantity(TyAlign)), PTy); 8320 8321 uint64_t Offset = llvm::alignTo(CGF.getContext().getTypeSize(Ty) / 8, 4); 8322 llvm::Value *NextAddr = Builder.CreateGEP( 8323 Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), "ap.next"); 8324 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 8325 8326 return AddrTyped; 8327 } 8328 8329 Address HexagonABIInfo::EmitVAArgForHexagonLinux(CodeGenFunction &CGF, 8330 Address VAListAddr, 8331 QualType Ty) const { 8332 int ArgSize = CGF.getContext().getTypeSize(Ty) / 8; 8333 8334 if (ArgSize > 8) 8335 return EmitVAArgFromMemory(CGF, VAListAddr, Ty); 8336 8337 // Here we have check if the argument is in register area or 8338 // in overflow area. 8339 // If the saved register area pointer + argsize rounded up to alignment > 8340 // saved register area end pointer, argument is in overflow area. 8341 unsigned RegsLeft = 6; 8342 Ty = CGF.getContext().getCanonicalType(Ty); 8343 (void)classifyArgumentType(Ty, &RegsLeft); 8344 8345 llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg"); 8346 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 8347 llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack"); 8348 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 8349 8350 // Get rounded size of the argument.GCC does not allow vararg of 8351 // size < 4 bytes. We follow the same logic here. 8352 ArgSize = (CGF.getContext().getTypeSize(Ty) <= 32) ? 4 : 8; 8353 int ArgAlign = (CGF.getContext().getTypeSize(Ty) <= 32) ? 4 : 8; 8354 8355 // Argument may be in saved register area 8356 CGF.EmitBlock(MaybeRegBlock); 8357 8358 // Load the current saved register area pointer. 8359 Address __current_saved_reg_area_pointer_p = CGF.Builder.CreateStructGEP( 8360 VAListAddr, 0, "__current_saved_reg_area_pointer_p"); 8361 llvm::Value *__current_saved_reg_area_pointer = CGF.Builder.CreateLoad( 8362 __current_saved_reg_area_pointer_p, "__current_saved_reg_area_pointer"); 8363 8364 // Load the saved register area end pointer. 8365 Address __saved_reg_area_end_pointer_p = CGF.Builder.CreateStructGEP( 8366 VAListAddr, 1, "__saved_reg_area_end_pointer_p"); 8367 llvm::Value *__saved_reg_area_end_pointer = CGF.Builder.CreateLoad( 8368 __saved_reg_area_end_pointer_p, "__saved_reg_area_end_pointer"); 8369 8370 // If the size of argument is > 4 bytes, check if the stack 8371 // location is aligned to 8 bytes 8372 if (ArgAlign > 4) { 8373 8374 llvm::Value *__current_saved_reg_area_pointer_int = 8375 CGF.Builder.CreatePtrToInt(__current_saved_reg_area_pointer, 8376 CGF.Int32Ty); 8377 8378 __current_saved_reg_area_pointer_int = CGF.Builder.CreateAdd( 8379 __current_saved_reg_area_pointer_int, 8380 llvm::ConstantInt::get(CGF.Int32Ty, (ArgAlign - 1)), 8381 "align_current_saved_reg_area_pointer"); 8382 8383 __current_saved_reg_area_pointer_int = 8384 CGF.Builder.CreateAnd(__current_saved_reg_area_pointer_int, 8385 llvm::ConstantInt::get(CGF.Int32Ty, -ArgAlign), 8386 "align_current_saved_reg_area_pointer"); 8387 8388 __current_saved_reg_area_pointer = 8389 CGF.Builder.CreateIntToPtr(__current_saved_reg_area_pointer_int, 8390 __current_saved_reg_area_pointer->getType(), 8391 "align_current_saved_reg_area_pointer"); 8392 } 8393 8394 llvm::Value *__new_saved_reg_area_pointer = 8395 CGF.Builder.CreateGEP(__current_saved_reg_area_pointer, 8396 llvm::ConstantInt::get(CGF.Int32Ty, ArgSize), 8397 "__new_saved_reg_area_pointer"); 8398 8399 llvm::Value *UsingStack = 0; 8400 UsingStack = CGF.Builder.CreateICmpSGT(__new_saved_reg_area_pointer, 8401 __saved_reg_area_end_pointer); 8402 8403 CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, InRegBlock); 8404 8405 // Argument in saved register area 8406 // Implement the block where argument is in register saved area 8407 CGF.EmitBlock(InRegBlock); 8408 8409 llvm::Type *PTy = CGF.ConvertType(Ty); 8410 llvm::Value *__saved_reg_area_p = CGF.Builder.CreateBitCast( 8411 __current_saved_reg_area_pointer, llvm::PointerType::getUnqual(PTy)); 8412 8413 CGF.Builder.CreateStore(__new_saved_reg_area_pointer, 8414 __current_saved_reg_area_pointer_p); 8415 8416 CGF.EmitBranch(ContBlock); 8417 8418 // Argument in overflow area 8419 // Implement the block where the argument is in overflow area. 8420 CGF.EmitBlock(OnStackBlock); 8421 8422 // Load the overflow area pointer 8423 Address __overflow_area_pointer_p = 8424 CGF.Builder.CreateStructGEP(VAListAddr, 2, "__overflow_area_pointer_p"); 8425 llvm::Value *__overflow_area_pointer = CGF.Builder.CreateLoad( 8426 __overflow_area_pointer_p, "__overflow_area_pointer"); 8427 8428 // Align the overflow area pointer according to the alignment of the argument 8429 if (ArgAlign > 4) { 8430 llvm::Value *__overflow_area_pointer_int = 8431 CGF.Builder.CreatePtrToInt(__overflow_area_pointer, CGF.Int32Ty); 8432 8433 __overflow_area_pointer_int = 8434 CGF.Builder.CreateAdd(__overflow_area_pointer_int, 8435 llvm::ConstantInt::get(CGF.Int32Ty, ArgAlign - 1), 8436 "align_overflow_area_pointer"); 8437 8438 __overflow_area_pointer_int = 8439 CGF.Builder.CreateAnd(__overflow_area_pointer_int, 8440 llvm::ConstantInt::get(CGF.Int32Ty, -ArgAlign), 8441 "align_overflow_area_pointer"); 8442 8443 __overflow_area_pointer = CGF.Builder.CreateIntToPtr( 8444 __overflow_area_pointer_int, __overflow_area_pointer->getType(), 8445 "align_overflow_area_pointer"); 8446 } 8447 8448 // Get the pointer for next argument in overflow area and store it 8449 // to overflow area pointer. 8450 llvm::Value *__new_overflow_area_pointer = CGF.Builder.CreateGEP( 8451 __overflow_area_pointer, llvm::ConstantInt::get(CGF.Int32Ty, ArgSize), 8452 "__overflow_area_pointer.next"); 8453 8454 CGF.Builder.CreateStore(__new_overflow_area_pointer, 8455 __overflow_area_pointer_p); 8456 8457 CGF.Builder.CreateStore(__new_overflow_area_pointer, 8458 __current_saved_reg_area_pointer_p); 8459 8460 // Bitcast the overflow area pointer to the type of argument. 8461 llvm::Type *OverflowPTy = CGF.ConvertTypeForMem(Ty); 8462 llvm::Value *__overflow_area_p = CGF.Builder.CreateBitCast( 8463 __overflow_area_pointer, llvm::PointerType::getUnqual(OverflowPTy)); 8464 8465 CGF.EmitBranch(ContBlock); 8466 8467 // Get the correct pointer to load the variable argument 8468 // Implement the ContBlock 8469 CGF.EmitBlock(ContBlock); 8470 8471 llvm::Type *MemPTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty)); 8472 llvm::PHINode *ArgAddr = CGF.Builder.CreatePHI(MemPTy, 2, "vaarg.addr"); 8473 ArgAddr->addIncoming(__saved_reg_area_p, InRegBlock); 8474 ArgAddr->addIncoming(__overflow_area_p, OnStackBlock); 8475 8476 return Address(ArgAddr, CharUnits::fromQuantity(ArgAlign)); 8477 } 8478 8479 Address HexagonABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 8480 QualType Ty) const { 8481 8482 if (getTarget().getTriple().isMusl()) 8483 return EmitVAArgForHexagonLinux(CGF, VAListAddr, Ty); 8484 8485 return EmitVAArgForHexagon(CGF, VAListAddr, Ty); 8486 } 8487 8488 //===----------------------------------------------------------------------===// 8489 // Lanai ABI Implementation 8490 //===----------------------------------------------------------------------===// 8491 8492 namespace { 8493 class LanaiABIInfo : public DefaultABIInfo { 8494 public: 8495 LanaiABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 8496 8497 bool shouldUseInReg(QualType Ty, CCState &State) const; 8498 8499 void computeInfo(CGFunctionInfo &FI) const override { 8500 CCState State(FI); 8501 // Lanai uses 4 registers to pass arguments unless the function has the 8502 // regparm attribute set. 8503 if (FI.getHasRegParm()) { 8504 State.FreeRegs = FI.getRegParm(); 8505 } else { 8506 State.FreeRegs = 4; 8507 } 8508 8509 if (!getCXXABI().classifyReturnType(FI)) 8510 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 8511 for (auto &I : FI.arguments()) 8512 I.info = classifyArgumentType(I.type, State); 8513 } 8514 8515 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const; 8516 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const; 8517 }; 8518 } // end anonymous namespace 8519 8520 bool LanaiABIInfo::shouldUseInReg(QualType Ty, CCState &State) const { 8521 unsigned Size = getContext().getTypeSize(Ty); 8522 unsigned SizeInRegs = llvm::alignTo(Size, 32U) / 32U; 8523 8524 if (SizeInRegs == 0) 8525 return false; 8526 8527 if (SizeInRegs > State.FreeRegs) { 8528 State.FreeRegs = 0; 8529 return false; 8530 } 8531 8532 State.FreeRegs -= SizeInRegs; 8533 8534 return true; 8535 } 8536 8537 ABIArgInfo LanaiABIInfo::getIndirectResult(QualType Ty, bool ByVal, 8538 CCState &State) const { 8539 if (!ByVal) { 8540 if (State.FreeRegs) { 8541 --State.FreeRegs; // Non-byval indirects just use one pointer. 8542 return getNaturalAlignIndirectInReg(Ty); 8543 } 8544 return getNaturalAlignIndirect(Ty, false); 8545 } 8546 8547 // Compute the byval alignment. 8548 const unsigned MinABIStackAlignInBytes = 4; 8549 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; 8550 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true, 8551 /*Realign=*/TypeAlign > 8552 MinABIStackAlignInBytes); 8553 } 8554 8555 ABIArgInfo LanaiABIInfo::classifyArgumentType(QualType Ty, 8556 CCState &State) const { 8557 // Check with the C++ ABI first. 8558 const RecordType *RT = Ty->getAs<RecordType>(); 8559 if (RT) { 8560 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()); 8561 if (RAA == CGCXXABI::RAA_Indirect) { 8562 return getIndirectResult(Ty, /*ByVal=*/false, State); 8563 } else if (RAA == CGCXXABI::RAA_DirectInMemory) { 8564 return getNaturalAlignIndirect(Ty, /*ByVal=*/true); 8565 } 8566 } 8567 8568 if (isAggregateTypeForABI(Ty)) { 8569 // Structures with flexible arrays are always indirect. 8570 if (RT && RT->getDecl()->hasFlexibleArrayMember()) 8571 return getIndirectResult(Ty, /*ByVal=*/true, State); 8572 8573 // Ignore empty structs/unions. 8574 if (isEmptyRecord(getContext(), Ty, true)) 8575 return ABIArgInfo::getIgnore(); 8576 8577 llvm::LLVMContext &LLVMContext = getVMContext(); 8578 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32; 8579 if (SizeInRegs <= State.FreeRegs) { 8580 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext); 8581 SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32); 8582 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements); 8583 State.FreeRegs -= SizeInRegs; 8584 return ABIArgInfo::getDirectInReg(Result); 8585 } else { 8586 State.FreeRegs = 0; 8587 } 8588 return getIndirectResult(Ty, true, State); 8589 } 8590 8591 // Treat an enum type as its underlying type. 8592 if (const auto *EnumTy = Ty->getAs<EnumType>()) 8593 Ty = EnumTy->getDecl()->getIntegerType(); 8594 8595 bool InReg = shouldUseInReg(Ty, State); 8596 8597 // Don't pass >64 bit integers in registers. 8598 if (const auto *EIT = Ty->getAs<ExtIntType>()) 8599 if (EIT->getNumBits() > 64) 8600 return getIndirectResult(Ty, /*ByVal=*/true, State); 8601 8602 if (isPromotableIntegerTypeForABI(Ty)) { 8603 if (InReg) 8604 return ABIArgInfo::getDirectInReg(); 8605 return ABIArgInfo::getExtend(Ty); 8606 } 8607 if (InReg) 8608 return ABIArgInfo::getDirectInReg(); 8609 return ABIArgInfo::getDirect(); 8610 } 8611 8612 namespace { 8613 class LanaiTargetCodeGenInfo : public TargetCodeGenInfo { 8614 public: 8615 LanaiTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 8616 : TargetCodeGenInfo(std::make_unique<LanaiABIInfo>(CGT)) {} 8617 }; 8618 } 8619 8620 //===----------------------------------------------------------------------===// 8621 // AMDGPU ABI Implementation 8622 //===----------------------------------------------------------------------===// 8623 8624 namespace { 8625 8626 class AMDGPUABIInfo final : public DefaultABIInfo { 8627 private: 8628 static const unsigned MaxNumRegsForArgsRet = 16; 8629 8630 unsigned numRegsForType(QualType Ty) const; 8631 8632 bool isHomogeneousAggregateBaseType(QualType Ty) const override; 8633 bool isHomogeneousAggregateSmallEnough(const Type *Base, 8634 uint64_t Members) const override; 8635 8636 // Coerce HIP pointer arguments from generic pointers to global ones. 8637 llvm::Type *coerceKernelArgumentType(llvm::Type *Ty, unsigned FromAS, 8638 unsigned ToAS) const { 8639 // Structure types. 8640 if (auto STy = dyn_cast<llvm::StructType>(Ty)) { 8641 SmallVector<llvm::Type *, 8> EltTys; 8642 bool Changed = false; 8643 for (auto T : STy->elements()) { 8644 auto NT = coerceKernelArgumentType(T, FromAS, ToAS); 8645 EltTys.push_back(NT); 8646 Changed |= (NT != T); 8647 } 8648 // Skip if there is no change in element types. 8649 if (!Changed) 8650 return STy; 8651 if (STy->hasName()) 8652 return llvm::StructType::create( 8653 EltTys, (STy->getName() + ".coerce").str(), STy->isPacked()); 8654 return llvm::StructType::get(getVMContext(), EltTys, STy->isPacked()); 8655 } 8656 // Array types. 8657 if (auto ATy = dyn_cast<llvm::ArrayType>(Ty)) { 8658 auto T = ATy->getElementType(); 8659 auto NT = coerceKernelArgumentType(T, FromAS, ToAS); 8660 // Skip if there is no change in that element type. 8661 if (NT == T) 8662 return ATy; 8663 return llvm::ArrayType::get(NT, ATy->getNumElements()); 8664 } 8665 // Single value types. 8666 if (Ty->isPointerTy() && Ty->getPointerAddressSpace() == FromAS) 8667 return llvm::PointerType::get( 8668 cast<llvm::PointerType>(Ty)->getElementType(), ToAS); 8669 return Ty; 8670 } 8671 8672 public: 8673 explicit AMDGPUABIInfo(CodeGen::CodeGenTypes &CGT) : 8674 DefaultABIInfo(CGT) {} 8675 8676 ABIArgInfo classifyReturnType(QualType RetTy) const; 8677 ABIArgInfo classifyKernelArgumentType(QualType Ty) const; 8678 ABIArgInfo classifyArgumentType(QualType Ty, unsigned &NumRegsLeft) const; 8679 8680 void computeInfo(CGFunctionInfo &FI) const override; 8681 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 8682 QualType Ty) const override; 8683 }; 8684 8685 bool AMDGPUABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 8686 return true; 8687 } 8688 8689 bool AMDGPUABIInfo::isHomogeneousAggregateSmallEnough( 8690 const Type *Base, uint64_t Members) const { 8691 uint32_t NumRegs = (getContext().getTypeSize(Base) + 31) / 32; 8692 8693 // Homogeneous Aggregates may occupy at most 16 registers. 8694 return Members * NumRegs <= MaxNumRegsForArgsRet; 8695 } 8696 8697 /// Estimate number of registers the type will use when passed in registers. 8698 unsigned AMDGPUABIInfo::numRegsForType(QualType Ty) const { 8699 unsigned NumRegs = 0; 8700 8701 if (const VectorType *VT = Ty->getAs<VectorType>()) { 8702 // Compute from the number of elements. The reported size is based on the 8703 // in-memory size, which includes the padding 4th element for 3-vectors. 8704 QualType EltTy = VT->getElementType(); 8705 unsigned EltSize = getContext().getTypeSize(EltTy); 8706 8707 // 16-bit element vectors should be passed as packed. 8708 if (EltSize == 16) 8709 return (VT->getNumElements() + 1) / 2; 8710 8711 unsigned EltNumRegs = (EltSize + 31) / 32; 8712 return EltNumRegs * VT->getNumElements(); 8713 } 8714 8715 if (const RecordType *RT = Ty->getAs<RecordType>()) { 8716 const RecordDecl *RD = RT->getDecl(); 8717 assert(!RD->hasFlexibleArrayMember()); 8718 8719 for (const FieldDecl *Field : RD->fields()) { 8720 QualType FieldTy = Field->getType(); 8721 NumRegs += numRegsForType(FieldTy); 8722 } 8723 8724 return NumRegs; 8725 } 8726 8727 return (getContext().getTypeSize(Ty) + 31) / 32; 8728 } 8729 8730 void AMDGPUABIInfo::computeInfo(CGFunctionInfo &FI) const { 8731 llvm::CallingConv::ID CC = FI.getCallingConvention(); 8732 8733 if (!getCXXABI().classifyReturnType(FI)) 8734 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 8735 8736 unsigned NumRegsLeft = MaxNumRegsForArgsRet; 8737 for (auto &Arg : FI.arguments()) { 8738 if (CC == llvm::CallingConv::AMDGPU_KERNEL) { 8739 Arg.info = classifyKernelArgumentType(Arg.type); 8740 } else { 8741 Arg.info = classifyArgumentType(Arg.type, NumRegsLeft); 8742 } 8743 } 8744 } 8745 8746 Address AMDGPUABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 8747 QualType Ty) const { 8748 llvm_unreachable("AMDGPU does not support varargs"); 8749 } 8750 8751 ABIArgInfo AMDGPUABIInfo::classifyReturnType(QualType RetTy) const { 8752 if (isAggregateTypeForABI(RetTy)) { 8753 // Records with non-trivial destructors/copy-constructors should not be 8754 // returned by value. 8755 if (!getRecordArgABI(RetTy, getCXXABI())) { 8756 // Ignore empty structs/unions. 8757 if (isEmptyRecord(getContext(), RetTy, true)) 8758 return ABIArgInfo::getIgnore(); 8759 8760 // Lower single-element structs to just return a regular value. 8761 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) 8762 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 8763 8764 if (const RecordType *RT = RetTy->getAs<RecordType>()) { 8765 const RecordDecl *RD = RT->getDecl(); 8766 if (RD->hasFlexibleArrayMember()) 8767 return DefaultABIInfo::classifyReturnType(RetTy); 8768 } 8769 8770 // Pack aggregates <= 4 bytes into single VGPR or pair. 8771 uint64_t Size = getContext().getTypeSize(RetTy); 8772 if (Size <= 16) 8773 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 8774 8775 if (Size <= 32) 8776 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 8777 8778 if (Size <= 64) { 8779 llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext()); 8780 return ABIArgInfo::getDirect(llvm::ArrayType::get(I32Ty, 2)); 8781 } 8782 8783 if (numRegsForType(RetTy) <= MaxNumRegsForArgsRet) 8784 return ABIArgInfo::getDirect(); 8785 } 8786 } 8787 8788 // Otherwise just do the default thing. 8789 return DefaultABIInfo::classifyReturnType(RetTy); 8790 } 8791 8792 /// For kernels all parameters are really passed in a special buffer. It doesn't 8793 /// make sense to pass anything byval, so everything must be direct. 8794 ABIArgInfo AMDGPUABIInfo::classifyKernelArgumentType(QualType Ty) const { 8795 Ty = useFirstFieldIfTransparentUnion(Ty); 8796 8797 // TODO: Can we omit empty structs? 8798 8799 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext())) 8800 Ty = QualType(SeltTy, 0); 8801 8802 llvm::Type *OrigLTy = CGT.ConvertType(Ty); 8803 llvm::Type *LTy = OrigLTy; 8804 if (getContext().getLangOpts().HIP) { 8805 LTy = coerceKernelArgumentType( 8806 OrigLTy, /*FromAS=*/getContext().getTargetAddressSpace(LangAS::Default), 8807 /*ToAS=*/getContext().getTargetAddressSpace(LangAS::cuda_device)); 8808 } 8809 8810 // FIXME: Should also use this for OpenCL, but it requires addressing the 8811 // problem of kernels being called. 8812 // 8813 // FIXME: This doesn't apply the optimization of coercing pointers in structs 8814 // to global address space when using byref. This would require implementing a 8815 // new kind of coercion of the in-memory type when for indirect arguments. 8816 if (!getContext().getLangOpts().OpenCL && LTy == OrigLTy && 8817 isAggregateTypeForABI(Ty)) { 8818 return ABIArgInfo::getIndirectAliased( 8819 getContext().getTypeAlignInChars(Ty), 8820 getContext().getTargetAddressSpace(LangAS::opencl_constant), 8821 false /*Realign*/, nullptr /*Padding*/); 8822 } 8823 8824 // If we set CanBeFlattened to true, CodeGen will expand the struct to its 8825 // individual elements, which confuses the Clover OpenCL backend; therefore we 8826 // have to set it to false here. Other args of getDirect() are just defaults. 8827 return ABIArgInfo::getDirect(LTy, 0, nullptr, false); 8828 } 8829 8830 ABIArgInfo AMDGPUABIInfo::classifyArgumentType(QualType Ty, 8831 unsigned &NumRegsLeft) const { 8832 assert(NumRegsLeft <= MaxNumRegsForArgsRet && "register estimate underflow"); 8833 8834 Ty = useFirstFieldIfTransparentUnion(Ty); 8835 8836 if (isAggregateTypeForABI(Ty)) { 8837 // Records with non-trivial destructors/copy-constructors should not be 8838 // passed by value. 8839 if (auto RAA = getRecordArgABI(Ty, getCXXABI())) 8840 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 8841 8842 // Ignore empty structs/unions. 8843 if (isEmptyRecord(getContext(), Ty, true)) 8844 return ABIArgInfo::getIgnore(); 8845 8846 // Lower single-element structs to just pass a regular value. TODO: We 8847 // could do reasonable-size multiple-element structs too, using getExpand(), 8848 // though watch out for things like bitfields. 8849 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext())) 8850 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 8851 8852 if (const RecordType *RT = Ty->getAs<RecordType>()) { 8853 const RecordDecl *RD = RT->getDecl(); 8854 if (RD->hasFlexibleArrayMember()) 8855 return DefaultABIInfo::classifyArgumentType(Ty); 8856 } 8857 8858 // Pack aggregates <= 8 bytes into single VGPR or pair. 8859 uint64_t Size = getContext().getTypeSize(Ty); 8860 if (Size <= 64) { 8861 unsigned NumRegs = (Size + 31) / 32; 8862 NumRegsLeft -= std::min(NumRegsLeft, NumRegs); 8863 8864 if (Size <= 16) 8865 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 8866 8867 if (Size <= 32) 8868 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 8869 8870 // XXX: Should this be i64 instead, and should the limit increase? 8871 llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext()); 8872 return ABIArgInfo::getDirect(llvm::ArrayType::get(I32Ty, 2)); 8873 } 8874 8875 if (NumRegsLeft > 0) { 8876 unsigned NumRegs = numRegsForType(Ty); 8877 if (NumRegsLeft >= NumRegs) { 8878 NumRegsLeft -= NumRegs; 8879 return ABIArgInfo::getDirect(); 8880 } 8881 } 8882 } 8883 8884 // Otherwise just do the default thing. 8885 ABIArgInfo ArgInfo = DefaultABIInfo::classifyArgumentType(Ty); 8886 if (!ArgInfo.isIndirect()) { 8887 unsigned NumRegs = numRegsForType(Ty); 8888 NumRegsLeft -= std::min(NumRegs, NumRegsLeft); 8889 } 8890 8891 return ArgInfo; 8892 } 8893 8894 class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo { 8895 public: 8896 AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT) 8897 : TargetCodeGenInfo(std::make_unique<AMDGPUABIInfo>(CGT)) {} 8898 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 8899 CodeGen::CodeGenModule &M) const override; 8900 unsigned getOpenCLKernelCallingConv() const override; 8901 8902 llvm::Constant *getNullPointer(const CodeGen::CodeGenModule &CGM, 8903 llvm::PointerType *T, QualType QT) const override; 8904 8905 LangAS getASTAllocaAddressSpace() const override { 8906 return getLangASFromTargetAS( 8907 getABIInfo().getDataLayout().getAllocaAddrSpace()); 8908 } 8909 LangAS getGlobalVarAddressSpace(CodeGenModule &CGM, 8910 const VarDecl *D) const override; 8911 llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts, 8912 SyncScope Scope, 8913 llvm::AtomicOrdering Ordering, 8914 llvm::LLVMContext &Ctx) const override; 8915 llvm::Function * 8916 createEnqueuedBlockKernel(CodeGenFunction &CGF, 8917 llvm::Function *BlockInvokeFunc, 8918 llvm::Value *BlockLiteral) const override; 8919 bool shouldEmitStaticExternCAliases() const override; 8920 void setCUDAKernelCallingConvention(const FunctionType *&FT) const override; 8921 }; 8922 } 8923 8924 static bool requiresAMDGPUProtectedVisibility(const Decl *D, 8925 llvm::GlobalValue *GV) { 8926 if (GV->getVisibility() != llvm::GlobalValue::HiddenVisibility) 8927 return false; 8928 8929 return D->hasAttr<OpenCLKernelAttr>() || 8930 (isa<FunctionDecl>(D) && D->hasAttr<CUDAGlobalAttr>()) || 8931 (isa<VarDecl>(D) && 8932 (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() || 8933 cast<VarDecl>(D)->getType()->isCUDADeviceBuiltinSurfaceType() || 8934 cast<VarDecl>(D)->getType()->isCUDADeviceBuiltinTextureType())); 8935 } 8936 8937 void AMDGPUTargetCodeGenInfo::setTargetAttributes( 8938 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const { 8939 if (requiresAMDGPUProtectedVisibility(D, GV)) { 8940 GV->setVisibility(llvm::GlobalValue::ProtectedVisibility); 8941 GV->setDSOLocal(true); 8942 } 8943 8944 if (GV->isDeclaration()) 8945 return; 8946 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 8947 if (!FD) 8948 return; 8949 8950 llvm::Function *F = cast<llvm::Function>(GV); 8951 8952 const auto *ReqdWGS = M.getLangOpts().OpenCL ? 8953 FD->getAttr<ReqdWorkGroupSizeAttr>() : nullptr; 8954 8955 8956 const bool IsOpenCLKernel = M.getLangOpts().OpenCL && 8957 FD->hasAttr<OpenCLKernelAttr>(); 8958 const bool IsHIPKernel = M.getLangOpts().HIP && 8959 FD->hasAttr<CUDAGlobalAttr>(); 8960 if ((IsOpenCLKernel || IsHIPKernel) && 8961 (M.getTriple().getOS() == llvm::Triple::AMDHSA)) 8962 F->addFnAttr("amdgpu-implicitarg-num-bytes", "56"); 8963 8964 if (IsHIPKernel) 8965 F->addFnAttr("uniform-work-group-size", "true"); 8966 8967 8968 const auto *FlatWGS = FD->getAttr<AMDGPUFlatWorkGroupSizeAttr>(); 8969 if (ReqdWGS || FlatWGS) { 8970 unsigned Min = 0; 8971 unsigned Max = 0; 8972 if (FlatWGS) { 8973 Min = FlatWGS->getMin() 8974 ->EvaluateKnownConstInt(M.getContext()) 8975 .getExtValue(); 8976 Max = FlatWGS->getMax() 8977 ->EvaluateKnownConstInt(M.getContext()) 8978 .getExtValue(); 8979 } 8980 if (ReqdWGS && Min == 0 && Max == 0) 8981 Min = Max = ReqdWGS->getXDim() * ReqdWGS->getYDim() * ReqdWGS->getZDim(); 8982 8983 if (Min != 0) { 8984 assert(Min <= Max && "Min must be less than or equal Max"); 8985 8986 std::string AttrVal = llvm::utostr(Min) + "," + llvm::utostr(Max); 8987 F->addFnAttr("amdgpu-flat-work-group-size", AttrVal); 8988 } else 8989 assert(Max == 0 && "Max must be zero"); 8990 } else if (IsOpenCLKernel || IsHIPKernel) { 8991 // By default, restrict the maximum size to a value specified by 8992 // --gpu-max-threads-per-block=n or its default value for HIP. 8993 const unsigned OpenCLDefaultMaxWorkGroupSize = 256; 8994 const unsigned DefaultMaxWorkGroupSize = 8995 IsOpenCLKernel ? OpenCLDefaultMaxWorkGroupSize 8996 : M.getLangOpts().GPUMaxThreadsPerBlock; 8997 std::string AttrVal = 8998 std::string("1,") + llvm::utostr(DefaultMaxWorkGroupSize); 8999 F->addFnAttr("amdgpu-flat-work-group-size", AttrVal); 9000 } 9001 9002 if (const auto *Attr = FD->getAttr<AMDGPUWavesPerEUAttr>()) { 9003 unsigned Min = 9004 Attr->getMin()->EvaluateKnownConstInt(M.getContext()).getExtValue(); 9005 unsigned Max = Attr->getMax() ? Attr->getMax() 9006 ->EvaluateKnownConstInt(M.getContext()) 9007 .getExtValue() 9008 : 0; 9009 9010 if (Min != 0) { 9011 assert((Max == 0 || Min <= Max) && "Min must be less than or equal Max"); 9012 9013 std::string AttrVal = llvm::utostr(Min); 9014 if (Max != 0) 9015 AttrVal = AttrVal + "," + llvm::utostr(Max); 9016 F->addFnAttr("amdgpu-waves-per-eu", AttrVal); 9017 } else 9018 assert(Max == 0 && "Max must be zero"); 9019 } 9020 9021 if (const auto *Attr = FD->getAttr<AMDGPUNumSGPRAttr>()) { 9022 unsigned NumSGPR = Attr->getNumSGPR(); 9023 9024 if (NumSGPR != 0) 9025 F->addFnAttr("amdgpu-num-sgpr", llvm::utostr(NumSGPR)); 9026 } 9027 9028 if (const auto *Attr = FD->getAttr<AMDGPUNumVGPRAttr>()) { 9029 uint32_t NumVGPR = Attr->getNumVGPR(); 9030 9031 if (NumVGPR != 0) 9032 F->addFnAttr("amdgpu-num-vgpr", llvm::utostr(NumVGPR)); 9033 } 9034 } 9035 9036 unsigned AMDGPUTargetCodeGenInfo::getOpenCLKernelCallingConv() const { 9037 return llvm::CallingConv::AMDGPU_KERNEL; 9038 } 9039 9040 // Currently LLVM assumes null pointers always have value 0, 9041 // which results in incorrectly transformed IR. Therefore, instead of 9042 // emitting null pointers in private and local address spaces, a null 9043 // pointer in generic address space is emitted which is casted to a 9044 // pointer in local or private address space. 9045 llvm::Constant *AMDGPUTargetCodeGenInfo::getNullPointer( 9046 const CodeGen::CodeGenModule &CGM, llvm::PointerType *PT, 9047 QualType QT) const { 9048 if (CGM.getContext().getTargetNullPointerValue(QT) == 0) 9049 return llvm::ConstantPointerNull::get(PT); 9050 9051 auto &Ctx = CGM.getContext(); 9052 auto NPT = llvm::PointerType::get(PT->getElementType(), 9053 Ctx.getTargetAddressSpace(LangAS::opencl_generic)); 9054 return llvm::ConstantExpr::getAddrSpaceCast( 9055 llvm::ConstantPointerNull::get(NPT), PT); 9056 } 9057 9058 LangAS 9059 AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM, 9060 const VarDecl *D) const { 9061 assert(!CGM.getLangOpts().OpenCL && 9062 !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) && 9063 "Address space agnostic languages only"); 9064 LangAS DefaultGlobalAS = getLangASFromTargetAS( 9065 CGM.getContext().getTargetAddressSpace(LangAS::opencl_global)); 9066 if (!D) 9067 return DefaultGlobalAS; 9068 9069 LangAS AddrSpace = D->getType().getAddressSpace(); 9070 assert(AddrSpace == LangAS::Default || isTargetAddressSpace(AddrSpace)); 9071 if (AddrSpace != LangAS::Default) 9072 return AddrSpace; 9073 9074 if (CGM.isTypeConstant(D->getType(), false)) { 9075 if (auto ConstAS = CGM.getTarget().getConstantAddressSpace()) 9076 return ConstAS.getValue(); 9077 } 9078 return DefaultGlobalAS; 9079 } 9080 9081 llvm::SyncScope::ID 9082 AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts, 9083 SyncScope Scope, 9084 llvm::AtomicOrdering Ordering, 9085 llvm::LLVMContext &Ctx) const { 9086 std::string Name; 9087 switch (Scope) { 9088 case SyncScope::OpenCLWorkGroup: 9089 Name = "workgroup"; 9090 break; 9091 case SyncScope::OpenCLDevice: 9092 Name = "agent"; 9093 break; 9094 case SyncScope::OpenCLAllSVMDevices: 9095 Name = ""; 9096 break; 9097 case SyncScope::OpenCLSubGroup: 9098 Name = "wavefront"; 9099 } 9100 9101 if (Ordering != llvm::AtomicOrdering::SequentiallyConsistent) { 9102 if (!Name.empty()) 9103 Name = Twine(Twine(Name) + Twine("-")).str(); 9104 9105 Name = Twine(Twine(Name) + Twine("one-as")).str(); 9106 } 9107 9108 return Ctx.getOrInsertSyncScopeID(Name); 9109 } 9110 9111 bool AMDGPUTargetCodeGenInfo::shouldEmitStaticExternCAliases() const { 9112 return false; 9113 } 9114 9115 void AMDGPUTargetCodeGenInfo::setCUDAKernelCallingConvention( 9116 const FunctionType *&FT) const { 9117 FT = getABIInfo().getContext().adjustFunctionType( 9118 FT, FT->getExtInfo().withCallingConv(CC_OpenCLKernel)); 9119 } 9120 9121 //===----------------------------------------------------------------------===// 9122 // SPARC v8 ABI Implementation. 9123 // Based on the SPARC Compliance Definition version 2.4.1. 9124 // 9125 // Ensures that complex values are passed in registers. 9126 // 9127 namespace { 9128 class SparcV8ABIInfo : public DefaultABIInfo { 9129 public: 9130 SparcV8ABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 9131 9132 private: 9133 ABIArgInfo classifyReturnType(QualType RetTy) const; 9134 void computeInfo(CGFunctionInfo &FI) const override; 9135 }; 9136 } // end anonymous namespace 9137 9138 9139 ABIArgInfo 9140 SparcV8ABIInfo::classifyReturnType(QualType Ty) const { 9141 if (Ty->isAnyComplexType()) { 9142 return ABIArgInfo::getDirect(); 9143 } 9144 else { 9145 return DefaultABIInfo::classifyReturnType(Ty); 9146 } 9147 } 9148 9149 void SparcV8ABIInfo::computeInfo(CGFunctionInfo &FI) const { 9150 9151 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 9152 for (auto &Arg : FI.arguments()) 9153 Arg.info = classifyArgumentType(Arg.type); 9154 } 9155 9156 namespace { 9157 class SparcV8TargetCodeGenInfo : public TargetCodeGenInfo { 9158 public: 9159 SparcV8TargetCodeGenInfo(CodeGenTypes &CGT) 9160 : TargetCodeGenInfo(std::make_unique<SparcV8ABIInfo>(CGT)) {} 9161 }; 9162 } // end anonymous namespace 9163 9164 //===----------------------------------------------------------------------===// 9165 // SPARC v9 ABI Implementation. 9166 // Based on the SPARC Compliance Definition version 2.4.1. 9167 // 9168 // Function arguments a mapped to a nominal "parameter array" and promoted to 9169 // registers depending on their type. Each argument occupies 8 or 16 bytes in 9170 // the array, structs larger than 16 bytes are passed indirectly. 9171 // 9172 // One case requires special care: 9173 // 9174 // struct mixed { 9175 // int i; 9176 // float f; 9177 // }; 9178 // 9179 // When a struct mixed is passed by value, it only occupies 8 bytes in the 9180 // parameter array, but the int is passed in an integer register, and the float 9181 // is passed in a floating point register. This is represented as two arguments 9182 // with the LLVM IR inreg attribute: 9183 // 9184 // declare void f(i32 inreg %i, float inreg %f) 9185 // 9186 // The code generator will only allocate 4 bytes from the parameter array for 9187 // the inreg arguments. All other arguments are allocated a multiple of 8 9188 // bytes. 9189 // 9190 namespace { 9191 class SparcV9ABIInfo : public ABIInfo { 9192 public: 9193 SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 9194 9195 private: 9196 ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const; 9197 void computeInfo(CGFunctionInfo &FI) const override; 9198 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 9199 QualType Ty) const override; 9200 9201 // Coercion type builder for structs passed in registers. The coercion type 9202 // serves two purposes: 9203 // 9204 // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned' 9205 // in registers. 9206 // 2. Expose aligned floating point elements as first-level elements, so the 9207 // code generator knows to pass them in floating point registers. 9208 // 9209 // We also compute the InReg flag which indicates that the struct contains 9210 // aligned 32-bit floats. 9211 // 9212 struct CoerceBuilder { 9213 llvm::LLVMContext &Context; 9214 const llvm::DataLayout &DL; 9215 SmallVector<llvm::Type*, 8> Elems; 9216 uint64_t Size; 9217 bool InReg; 9218 9219 CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl) 9220 : Context(c), DL(dl), Size(0), InReg(false) {} 9221 9222 // Pad Elems with integers until Size is ToSize. 9223 void pad(uint64_t ToSize) { 9224 assert(ToSize >= Size && "Cannot remove elements"); 9225 if (ToSize == Size) 9226 return; 9227 9228 // Finish the current 64-bit word. 9229 uint64_t Aligned = llvm::alignTo(Size, 64); 9230 if (Aligned > Size && Aligned <= ToSize) { 9231 Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size)); 9232 Size = Aligned; 9233 } 9234 9235 // Add whole 64-bit words. 9236 while (Size + 64 <= ToSize) { 9237 Elems.push_back(llvm::Type::getInt64Ty(Context)); 9238 Size += 64; 9239 } 9240 9241 // Final in-word padding. 9242 if (Size < ToSize) { 9243 Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size)); 9244 Size = ToSize; 9245 } 9246 } 9247 9248 // Add a floating point element at Offset. 9249 void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) { 9250 // Unaligned floats are treated as integers. 9251 if (Offset % Bits) 9252 return; 9253 // The InReg flag is only required if there are any floats < 64 bits. 9254 if (Bits < 64) 9255 InReg = true; 9256 pad(Offset); 9257 Elems.push_back(Ty); 9258 Size = Offset + Bits; 9259 } 9260 9261 // Add a struct type to the coercion type, starting at Offset (in bits). 9262 void addStruct(uint64_t Offset, llvm::StructType *StrTy) { 9263 const llvm::StructLayout *Layout = DL.getStructLayout(StrTy); 9264 for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) { 9265 llvm::Type *ElemTy = StrTy->getElementType(i); 9266 uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i); 9267 switch (ElemTy->getTypeID()) { 9268 case llvm::Type::StructTyID: 9269 addStruct(ElemOffset, cast<llvm::StructType>(ElemTy)); 9270 break; 9271 case llvm::Type::FloatTyID: 9272 addFloat(ElemOffset, ElemTy, 32); 9273 break; 9274 case llvm::Type::DoubleTyID: 9275 addFloat(ElemOffset, ElemTy, 64); 9276 break; 9277 case llvm::Type::FP128TyID: 9278 addFloat(ElemOffset, ElemTy, 128); 9279 break; 9280 case llvm::Type::PointerTyID: 9281 if (ElemOffset % 64 == 0) { 9282 pad(ElemOffset); 9283 Elems.push_back(ElemTy); 9284 Size += 64; 9285 } 9286 break; 9287 default: 9288 break; 9289 } 9290 } 9291 } 9292 9293 // Check if Ty is a usable substitute for the coercion type. 9294 bool isUsableType(llvm::StructType *Ty) const { 9295 return llvm::makeArrayRef(Elems) == Ty->elements(); 9296 } 9297 9298 // Get the coercion type as a literal struct type. 9299 llvm::Type *getType() const { 9300 if (Elems.size() == 1) 9301 return Elems.front(); 9302 else 9303 return llvm::StructType::get(Context, Elems); 9304 } 9305 }; 9306 }; 9307 } // end anonymous namespace 9308 9309 ABIArgInfo 9310 SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const { 9311 if (Ty->isVoidType()) 9312 return ABIArgInfo::getIgnore(); 9313 9314 uint64_t Size = getContext().getTypeSize(Ty); 9315 9316 // Anything too big to fit in registers is passed with an explicit indirect 9317 // pointer / sret pointer. 9318 if (Size > SizeLimit) 9319 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 9320 9321 // Treat an enum type as its underlying type. 9322 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 9323 Ty = EnumTy->getDecl()->getIntegerType(); 9324 9325 // Integer types smaller than a register are extended. 9326 if (Size < 64 && Ty->isIntegerType()) 9327 return ABIArgInfo::getExtend(Ty); 9328 9329 if (const auto *EIT = Ty->getAs<ExtIntType>()) 9330 if (EIT->getNumBits() < 64) 9331 return ABIArgInfo::getExtend(Ty); 9332 9333 // Other non-aggregates go in registers. 9334 if (!isAggregateTypeForABI(Ty)) 9335 return ABIArgInfo::getDirect(); 9336 9337 // If a C++ object has either a non-trivial copy constructor or a non-trivial 9338 // destructor, it is passed with an explicit indirect pointer / sret pointer. 9339 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 9340 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 9341 9342 // This is a small aggregate type that should be passed in registers. 9343 // Build a coercion type from the LLVM struct type. 9344 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty)); 9345 if (!StrTy) 9346 return ABIArgInfo::getDirect(); 9347 9348 CoerceBuilder CB(getVMContext(), getDataLayout()); 9349 CB.addStruct(0, StrTy); 9350 CB.pad(llvm::alignTo(CB.DL.getTypeSizeInBits(StrTy), 64)); 9351 9352 // Try to use the original type for coercion. 9353 llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType(); 9354 9355 if (CB.InReg) 9356 return ABIArgInfo::getDirectInReg(CoerceTy); 9357 else 9358 return ABIArgInfo::getDirect(CoerceTy); 9359 } 9360 9361 Address SparcV9ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 9362 QualType Ty) const { 9363 ABIArgInfo AI = classifyType(Ty, 16 * 8); 9364 llvm::Type *ArgTy = CGT.ConvertType(Ty); 9365 if (AI.canHaveCoerceToType() && !AI.getCoerceToType()) 9366 AI.setCoerceToType(ArgTy); 9367 9368 CharUnits SlotSize = CharUnits::fromQuantity(8); 9369 9370 CGBuilderTy &Builder = CGF.Builder; 9371 Address Addr(Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize); 9372 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy); 9373 9374 auto TypeInfo = getContext().getTypeInfoInChars(Ty); 9375 9376 Address ArgAddr = Address::invalid(); 9377 CharUnits Stride; 9378 switch (AI.getKind()) { 9379 case ABIArgInfo::Expand: 9380 case ABIArgInfo::CoerceAndExpand: 9381 case ABIArgInfo::InAlloca: 9382 llvm_unreachable("Unsupported ABI kind for va_arg"); 9383 9384 case ABIArgInfo::Extend: { 9385 Stride = SlotSize; 9386 CharUnits Offset = SlotSize - TypeInfo.first; 9387 ArgAddr = Builder.CreateConstInBoundsByteGEP(Addr, Offset, "extend"); 9388 break; 9389 } 9390 9391 case ABIArgInfo::Direct: { 9392 auto AllocSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType()); 9393 Stride = CharUnits::fromQuantity(AllocSize).alignTo(SlotSize); 9394 ArgAddr = Addr; 9395 break; 9396 } 9397 9398 case ABIArgInfo::Indirect: 9399 case ABIArgInfo::IndirectAliased: 9400 Stride = SlotSize; 9401 ArgAddr = Builder.CreateElementBitCast(Addr, ArgPtrTy, "indirect"); 9402 ArgAddr = Address(Builder.CreateLoad(ArgAddr, "indirect.arg"), 9403 TypeInfo.second); 9404 break; 9405 9406 case ABIArgInfo::Ignore: 9407 return Address(llvm::UndefValue::get(ArgPtrTy), TypeInfo.second); 9408 } 9409 9410 // Update VAList. 9411 Address NextPtr = Builder.CreateConstInBoundsByteGEP(Addr, Stride, "ap.next"); 9412 Builder.CreateStore(NextPtr.getPointer(), VAListAddr); 9413 9414 return Builder.CreateBitCast(ArgAddr, ArgPtrTy, "arg.addr"); 9415 } 9416 9417 void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const { 9418 FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8); 9419 for (auto &I : FI.arguments()) 9420 I.info = classifyType(I.type, 16 * 8); 9421 } 9422 9423 namespace { 9424 class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo { 9425 public: 9426 SparcV9TargetCodeGenInfo(CodeGenTypes &CGT) 9427 : TargetCodeGenInfo(std::make_unique<SparcV9ABIInfo>(CGT)) {} 9428 9429 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 9430 return 14; 9431 } 9432 9433 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 9434 llvm::Value *Address) const override; 9435 }; 9436 } // end anonymous namespace 9437 9438 bool 9439 SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 9440 llvm::Value *Address) const { 9441 // This is calculated from the LLVM and GCC tables and verified 9442 // against gcc output. AFAIK all ABIs use the same encoding. 9443 9444 CodeGen::CGBuilderTy &Builder = CGF.Builder; 9445 9446 llvm::IntegerType *i8 = CGF.Int8Ty; 9447 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 9448 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 9449 9450 // 0-31: the 8-byte general-purpose registers 9451 AssignToArrayRange(Builder, Address, Eight8, 0, 31); 9452 9453 // 32-63: f0-31, the 4-byte floating-point registers 9454 AssignToArrayRange(Builder, Address, Four8, 32, 63); 9455 9456 // Y = 64 9457 // PSR = 65 9458 // WIM = 66 9459 // TBR = 67 9460 // PC = 68 9461 // NPC = 69 9462 // FSR = 70 9463 // CSR = 71 9464 AssignToArrayRange(Builder, Address, Eight8, 64, 71); 9465 9466 // 72-87: d0-15, the 8-byte floating-point registers 9467 AssignToArrayRange(Builder, Address, Eight8, 72, 87); 9468 9469 return false; 9470 } 9471 9472 // ARC ABI implementation. 9473 namespace { 9474 9475 class ARCABIInfo : public DefaultABIInfo { 9476 public: 9477 using DefaultABIInfo::DefaultABIInfo; 9478 9479 private: 9480 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 9481 QualType Ty) const override; 9482 9483 void updateState(const ABIArgInfo &Info, QualType Ty, CCState &State) const { 9484 if (!State.FreeRegs) 9485 return; 9486 if (Info.isIndirect() && Info.getInReg()) 9487 State.FreeRegs--; 9488 else if (Info.isDirect() && Info.getInReg()) { 9489 unsigned sz = (getContext().getTypeSize(Ty) + 31) / 32; 9490 if (sz < State.FreeRegs) 9491 State.FreeRegs -= sz; 9492 else 9493 State.FreeRegs = 0; 9494 } 9495 } 9496 9497 void computeInfo(CGFunctionInfo &FI) const override { 9498 CCState State(FI); 9499 // ARC uses 8 registers to pass arguments. 9500 State.FreeRegs = 8; 9501 9502 if (!getCXXABI().classifyReturnType(FI)) 9503 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 9504 updateState(FI.getReturnInfo(), FI.getReturnType(), State); 9505 for (auto &I : FI.arguments()) { 9506 I.info = classifyArgumentType(I.type, State.FreeRegs); 9507 updateState(I.info, I.type, State); 9508 } 9509 } 9510 9511 ABIArgInfo getIndirectByRef(QualType Ty, bool HasFreeRegs) const; 9512 ABIArgInfo getIndirectByValue(QualType Ty) const; 9513 ABIArgInfo classifyArgumentType(QualType Ty, uint8_t FreeRegs) const; 9514 ABIArgInfo classifyReturnType(QualType RetTy) const; 9515 }; 9516 9517 class ARCTargetCodeGenInfo : public TargetCodeGenInfo { 9518 public: 9519 ARCTargetCodeGenInfo(CodeGenTypes &CGT) 9520 : TargetCodeGenInfo(std::make_unique<ARCABIInfo>(CGT)) {} 9521 }; 9522 9523 9524 ABIArgInfo ARCABIInfo::getIndirectByRef(QualType Ty, bool HasFreeRegs) const { 9525 return HasFreeRegs ? getNaturalAlignIndirectInReg(Ty) : 9526 getNaturalAlignIndirect(Ty, false); 9527 } 9528 9529 ABIArgInfo ARCABIInfo::getIndirectByValue(QualType Ty) const { 9530 // Compute the byval alignment. 9531 const unsigned MinABIStackAlignInBytes = 4; 9532 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; 9533 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true, 9534 TypeAlign > MinABIStackAlignInBytes); 9535 } 9536 9537 Address ARCABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 9538 QualType Ty) const { 9539 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false, 9540 getContext().getTypeInfoInChars(Ty), 9541 CharUnits::fromQuantity(4), true); 9542 } 9543 9544 ABIArgInfo ARCABIInfo::classifyArgumentType(QualType Ty, 9545 uint8_t FreeRegs) const { 9546 // Handle the generic C++ ABI. 9547 const RecordType *RT = Ty->getAs<RecordType>(); 9548 if (RT) { 9549 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()); 9550 if (RAA == CGCXXABI::RAA_Indirect) 9551 return getIndirectByRef(Ty, FreeRegs > 0); 9552 9553 if (RAA == CGCXXABI::RAA_DirectInMemory) 9554 return getIndirectByValue(Ty); 9555 } 9556 9557 // Treat an enum type as its underlying type. 9558 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 9559 Ty = EnumTy->getDecl()->getIntegerType(); 9560 9561 auto SizeInRegs = llvm::alignTo(getContext().getTypeSize(Ty), 32) / 32; 9562 9563 if (isAggregateTypeForABI(Ty)) { 9564 // Structures with flexible arrays are always indirect. 9565 if (RT && RT->getDecl()->hasFlexibleArrayMember()) 9566 return getIndirectByValue(Ty); 9567 9568 // Ignore empty structs/unions. 9569 if (isEmptyRecord(getContext(), Ty, true)) 9570 return ABIArgInfo::getIgnore(); 9571 9572 llvm::LLVMContext &LLVMContext = getVMContext(); 9573 9574 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext); 9575 SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32); 9576 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements); 9577 9578 return FreeRegs >= SizeInRegs ? 9579 ABIArgInfo::getDirectInReg(Result) : 9580 ABIArgInfo::getDirect(Result, 0, nullptr, false); 9581 } 9582 9583 if (const auto *EIT = Ty->getAs<ExtIntType>()) 9584 if (EIT->getNumBits() > 64) 9585 return getIndirectByValue(Ty); 9586 9587 return isPromotableIntegerTypeForABI(Ty) 9588 ? (FreeRegs >= SizeInRegs ? ABIArgInfo::getExtendInReg(Ty) 9589 : ABIArgInfo::getExtend(Ty)) 9590 : (FreeRegs >= SizeInRegs ? ABIArgInfo::getDirectInReg() 9591 : ABIArgInfo::getDirect()); 9592 } 9593 9594 ABIArgInfo ARCABIInfo::classifyReturnType(QualType RetTy) const { 9595 if (RetTy->isAnyComplexType()) 9596 return ABIArgInfo::getDirectInReg(); 9597 9598 // Arguments of size > 4 registers are indirect. 9599 auto RetSize = llvm::alignTo(getContext().getTypeSize(RetTy), 32) / 32; 9600 if (RetSize > 4) 9601 return getIndirectByRef(RetTy, /*HasFreeRegs*/ true); 9602 9603 return DefaultABIInfo::classifyReturnType(RetTy); 9604 } 9605 9606 } // End anonymous namespace. 9607 9608 //===----------------------------------------------------------------------===// 9609 // XCore ABI Implementation 9610 //===----------------------------------------------------------------------===// 9611 9612 namespace { 9613 9614 /// A SmallStringEnc instance is used to build up the TypeString by passing 9615 /// it by reference between functions that append to it. 9616 typedef llvm::SmallString<128> SmallStringEnc; 9617 9618 /// TypeStringCache caches the meta encodings of Types. 9619 /// 9620 /// The reason for caching TypeStrings is two fold: 9621 /// 1. To cache a type's encoding for later uses; 9622 /// 2. As a means to break recursive member type inclusion. 9623 /// 9624 /// A cache Entry can have a Status of: 9625 /// NonRecursive: The type encoding is not recursive; 9626 /// Recursive: The type encoding is recursive; 9627 /// Incomplete: An incomplete TypeString; 9628 /// IncompleteUsed: An incomplete TypeString that has been used in a 9629 /// Recursive type encoding. 9630 /// 9631 /// A NonRecursive entry will have all of its sub-members expanded as fully 9632 /// as possible. Whilst it may contain types which are recursive, the type 9633 /// itself is not recursive and thus its encoding may be safely used whenever 9634 /// the type is encountered. 9635 /// 9636 /// A Recursive entry will have all of its sub-members expanded as fully as 9637 /// possible. The type itself is recursive and it may contain other types which 9638 /// are recursive. The Recursive encoding must not be used during the expansion 9639 /// of a recursive type's recursive branch. For simplicity the code uses 9640 /// IncompleteCount to reject all usage of Recursive encodings for member types. 9641 /// 9642 /// An Incomplete entry is always a RecordType and only encodes its 9643 /// identifier e.g. "s(S){}". Incomplete 'StubEnc' entries are ephemeral and 9644 /// are placed into the cache during type expansion as a means to identify and 9645 /// handle recursive inclusion of types as sub-members. If there is recursion 9646 /// the entry becomes IncompleteUsed. 9647 /// 9648 /// During the expansion of a RecordType's members: 9649 /// 9650 /// If the cache contains a NonRecursive encoding for the member type, the 9651 /// cached encoding is used; 9652 /// 9653 /// If the cache contains a Recursive encoding for the member type, the 9654 /// cached encoding is 'Swapped' out, as it may be incorrect, and... 9655 /// 9656 /// If the member is a RecordType, an Incomplete encoding is placed into the 9657 /// cache to break potential recursive inclusion of itself as a sub-member; 9658 /// 9659 /// Once a member RecordType has been expanded, its temporary incomplete 9660 /// entry is removed from the cache. If a Recursive encoding was swapped out 9661 /// it is swapped back in; 9662 /// 9663 /// If an incomplete entry is used to expand a sub-member, the incomplete 9664 /// entry is marked as IncompleteUsed. The cache keeps count of how many 9665 /// IncompleteUsed entries it currently contains in IncompleteUsedCount; 9666 /// 9667 /// If a member's encoding is found to be a NonRecursive or Recursive viz: 9668 /// IncompleteUsedCount==0, the member's encoding is added to the cache. 9669 /// Else the member is part of a recursive type and thus the recursion has 9670 /// been exited too soon for the encoding to be correct for the member. 9671 /// 9672 class TypeStringCache { 9673 enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed}; 9674 struct Entry { 9675 std::string Str; // The encoded TypeString for the type. 9676 enum Status State; // Information about the encoding in 'Str'. 9677 std::string Swapped; // A temporary place holder for a Recursive encoding 9678 // during the expansion of RecordType's members. 9679 }; 9680 std::map<const IdentifierInfo *, struct Entry> Map; 9681 unsigned IncompleteCount; // Number of Incomplete entries in the Map. 9682 unsigned IncompleteUsedCount; // Number of IncompleteUsed entries in the Map. 9683 public: 9684 TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {} 9685 void addIncomplete(const IdentifierInfo *ID, std::string StubEnc); 9686 bool removeIncomplete(const IdentifierInfo *ID); 9687 void addIfComplete(const IdentifierInfo *ID, StringRef Str, 9688 bool IsRecursive); 9689 StringRef lookupStr(const IdentifierInfo *ID); 9690 }; 9691 9692 /// TypeString encodings for enum & union fields must be order. 9693 /// FieldEncoding is a helper for this ordering process. 9694 class FieldEncoding { 9695 bool HasName; 9696 std::string Enc; 9697 public: 9698 FieldEncoding(bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {} 9699 StringRef str() { return Enc; } 9700 bool operator<(const FieldEncoding &rhs) const { 9701 if (HasName != rhs.HasName) return HasName; 9702 return Enc < rhs.Enc; 9703 } 9704 }; 9705 9706 class XCoreABIInfo : public DefaultABIInfo { 9707 public: 9708 XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 9709 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 9710 QualType Ty) const override; 9711 }; 9712 9713 class XCoreTargetCodeGenInfo : public TargetCodeGenInfo { 9714 mutable TypeStringCache TSC; 9715 void emitTargetMD(const Decl *D, llvm::GlobalValue *GV, 9716 const CodeGen::CodeGenModule &M) const; 9717 9718 public: 9719 XCoreTargetCodeGenInfo(CodeGenTypes &CGT) 9720 : TargetCodeGenInfo(std::make_unique<XCoreABIInfo>(CGT)) {} 9721 void emitTargetMetadata(CodeGen::CodeGenModule &CGM, 9722 const llvm::MapVector<GlobalDecl, StringRef> 9723 &MangledDeclNames) const override; 9724 }; 9725 9726 } // End anonymous namespace. 9727 9728 // TODO: this implementation is likely now redundant with the default 9729 // EmitVAArg. 9730 Address XCoreABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 9731 QualType Ty) const { 9732 CGBuilderTy &Builder = CGF.Builder; 9733 9734 // Get the VAList. 9735 CharUnits SlotSize = CharUnits::fromQuantity(4); 9736 Address AP(Builder.CreateLoad(VAListAddr), SlotSize); 9737 9738 // Handle the argument. 9739 ABIArgInfo AI = classifyArgumentType(Ty); 9740 CharUnits TypeAlign = getContext().getTypeAlignInChars(Ty); 9741 llvm::Type *ArgTy = CGT.ConvertType(Ty); 9742 if (AI.canHaveCoerceToType() && !AI.getCoerceToType()) 9743 AI.setCoerceToType(ArgTy); 9744 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy); 9745 9746 Address Val = Address::invalid(); 9747 CharUnits ArgSize = CharUnits::Zero(); 9748 switch (AI.getKind()) { 9749 case ABIArgInfo::Expand: 9750 case ABIArgInfo::CoerceAndExpand: 9751 case ABIArgInfo::InAlloca: 9752 llvm_unreachable("Unsupported ABI kind for va_arg"); 9753 case ABIArgInfo::Ignore: 9754 Val = Address(llvm::UndefValue::get(ArgPtrTy), TypeAlign); 9755 ArgSize = CharUnits::Zero(); 9756 break; 9757 case ABIArgInfo::Extend: 9758 case ABIArgInfo::Direct: 9759 Val = Builder.CreateBitCast(AP, ArgPtrTy); 9760 ArgSize = CharUnits::fromQuantity( 9761 getDataLayout().getTypeAllocSize(AI.getCoerceToType())); 9762 ArgSize = ArgSize.alignTo(SlotSize); 9763 break; 9764 case ABIArgInfo::Indirect: 9765 case ABIArgInfo::IndirectAliased: 9766 Val = Builder.CreateElementBitCast(AP, ArgPtrTy); 9767 Val = Address(Builder.CreateLoad(Val), TypeAlign); 9768 ArgSize = SlotSize; 9769 break; 9770 } 9771 9772 // Increment the VAList. 9773 if (!ArgSize.isZero()) { 9774 Address APN = Builder.CreateConstInBoundsByteGEP(AP, ArgSize); 9775 Builder.CreateStore(APN.getPointer(), VAListAddr); 9776 } 9777 9778 return Val; 9779 } 9780 9781 /// During the expansion of a RecordType, an incomplete TypeString is placed 9782 /// into the cache as a means to identify and break recursion. 9783 /// If there is a Recursive encoding in the cache, it is swapped out and will 9784 /// be reinserted by removeIncomplete(). 9785 /// All other types of encoding should have been used rather than arriving here. 9786 void TypeStringCache::addIncomplete(const IdentifierInfo *ID, 9787 std::string StubEnc) { 9788 if (!ID) 9789 return; 9790 Entry &E = Map[ID]; 9791 assert( (E.Str.empty() || E.State == Recursive) && 9792 "Incorrectly use of addIncomplete"); 9793 assert(!StubEnc.empty() && "Passing an empty string to addIncomplete()"); 9794 E.Swapped.swap(E.Str); // swap out the Recursive 9795 E.Str.swap(StubEnc); 9796 E.State = Incomplete; 9797 ++IncompleteCount; 9798 } 9799 9800 /// Once the RecordType has been expanded, the temporary incomplete TypeString 9801 /// must be removed from the cache. 9802 /// If a Recursive was swapped out by addIncomplete(), it will be replaced. 9803 /// Returns true if the RecordType was defined recursively. 9804 bool TypeStringCache::removeIncomplete(const IdentifierInfo *ID) { 9805 if (!ID) 9806 return false; 9807 auto I = Map.find(ID); 9808 assert(I != Map.end() && "Entry not present"); 9809 Entry &E = I->second; 9810 assert( (E.State == Incomplete || 9811 E.State == IncompleteUsed) && 9812 "Entry must be an incomplete type"); 9813 bool IsRecursive = false; 9814 if (E.State == IncompleteUsed) { 9815 // We made use of our Incomplete encoding, thus we are recursive. 9816 IsRecursive = true; 9817 --IncompleteUsedCount; 9818 } 9819 if (E.Swapped.empty()) 9820 Map.erase(I); 9821 else { 9822 // Swap the Recursive back. 9823 E.Swapped.swap(E.Str); 9824 E.Swapped.clear(); 9825 E.State = Recursive; 9826 } 9827 --IncompleteCount; 9828 return IsRecursive; 9829 } 9830 9831 /// Add the encoded TypeString to the cache only if it is NonRecursive or 9832 /// Recursive (viz: all sub-members were expanded as fully as possible). 9833 void TypeStringCache::addIfComplete(const IdentifierInfo *ID, StringRef Str, 9834 bool IsRecursive) { 9835 if (!ID || IncompleteUsedCount) 9836 return; // No key or it is is an incomplete sub-type so don't add. 9837 Entry &E = Map[ID]; 9838 if (IsRecursive && !E.Str.empty()) { 9839 assert(E.State==Recursive && E.Str.size() == Str.size() && 9840 "This is not the same Recursive entry"); 9841 // The parent container was not recursive after all, so we could have used 9842 // this Recursive sub-member entry after all, but we assumed the worse when 9843 // we started viz: IncompleteCount!=0. 9844 return; 9845 } 9846 assert(E.Str.empty() && "Entry already present"); 9847 E.Str = Str.str(); 9848 E.State = IsRecursive? Recursive : NonRecursive; 9849 } 9850 9851 /// Return a cached TypeString encoding for the ID. If there isn't one, or we 9852 /// are recursively expanding a type (IncompleteCount != 0) and the cached 9853 /// encoding is Recursive, return an empty StringRef. 9854 StringRef TypeStringCache::lookupStr(const IdentifierInfo *ID) { 9855 if (!ID) 9856 return StringRef(); // We have no key. 9857 auto I = Map.find(ID); 9858 if (I == Map.end()) 9859 return StringRef(); // We have no encoding. 9860 Entry &E = I->second; 9861 if (E.State == Recursive && IncompleteCount) 9862 return StringRef(); // We don't use Recursive encodings for member types. 9863 9864 if (E.State == Incomplete) { 9865 // The incomplete type is being used to break out of recursion. 9866 E.State = IncompleteUsed; 9867 ++IncompleteUsedCount; 9868 } 9869 return E.Str; 9870 } 9871 9872 /// The XCore ABI includes a type information section that communicates symbol 9873 /// type information to the linker. The linker uses this information to verify 9874 /// safety/correctness of things such as array bound and pointers et al. 9875 /// The ABI only requires C (and XC) language modules to emit TypeStrings. 9876 /// This type information (TypeString) is emitted into meta data for all global 9877 /// symbols: definitions, declarations, functions & variables. 9878 /// 9879 /// The TypeString carries type, qualifier, name, size & value details. 9880 /// Please see 'Tools Development Guide' section 2.16.2 for format details: 9881 /// https://www.xmos.com/download/public/Tools-Development-Guide%28X9114A%29.pdf 9882 /// The output is tested by test/CodeGen/xcore-stringtype.c. 9883 /// 9884 static bool getTypeString(SmallStringEnc &Enc, const Decl *D, 9885 const CodeGen::CodeGenModule &CGM, 9886 TypeStringCache &TSC); 9887 9888 /// XCore uses emitTargetMD to emit TypeString metadata for global symbols. 9889 void XCoreTargetCodeGenInfo::emitTargetMD( 9890 const Decl *D, llvm::GlobalValue *GV, 9891 const CodeGen::CodeGenModule &CGM) const { 9892 SmallStringEnc Enc; 9893 if (getTypeString(Enc, D, CGM, TSC)) { 9894 llvm::LLVMContext &Ctx = CGM.getModule().getContext(); 9895 llvm::Metadata *MDVals[] = {llvm::ConstantAsMetadata::get(GV), 9896 llvm::MDString::get(Ctx, Enc.str())}; 9897 llvm::NamedMDNode *MD = 9898 CGM.getModule().getOrInsertNamedMetadata("xcore.typestrings"); 9899 MD->addOperand(llvm::MDNode::get(Ctx, MDVals)); 9900 } 9901 } 9902 9903 void XCoreTargetCodeGenInfo::emitTargetMetadata( 9904 CodeGen::CodeGenModule &CGM, 9905 const llvm::MapVector<GlobalDecl, StringRef> &MangledDeclNames) const { 9906 // Warning, new MangledDeclNames may be appended within this loop. 9907 // We rely on MapVector insertions adding new elements to the end 9908 // of the container. 9909 for (unsigned I = 0; I != MangledDeclNames.size(); ++I) { 9910 auto Val = *(MangledDeclNames.begin() + I); 9911 llvm::GlobalValue *GV = CGM.GetGlobalValue(Val.second); 9912 if (GV) { 9913 const Decl *D = Val.first.getDecl()->getMostRecentDecl(); 9914 emitTargetMD(D, GV, CGM); 9915 } 9916 } 9917 } 9918 //===----------------------------------------------------------------------===// 9919 // SPIR ABI Implementation 9920 //===----------------------------------------------------------------------===// 9921 9922 namespace { 9923 class SPIRTargetCodeGenInfo : public TargetCodeGenInfo { 9924 public: 9925 SPIRTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 9926 : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {} 9927 unsigned getOpenCLKernelCallingConv() const override; 9928 }; 9929 9930 } // End anonymous namespace. 9931 9932 namespace clang { 9933 namespace CodeGen { 9934 void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI) { 9935 DefaultABIInfo SPIRABI(CGM.getTypes()); 9936 SPIRABI.computeInfo(FI); 9937 } 9938 } 9939 } 9940 9941 unsigned SPIRTargetCodeGenInfo::getOpenCLKernelCallingConv() const { 9942 return llvm::CallingConv::SPIR_KERNEL; 9943 } 9944 9945 static bool appendType(SmallStringEnc &Enc, QualType QType, 9946 const CodeGen::CodeGenModule &CGM, 9947 TypeStringCache &TSC); 9948 9949 /// Helper function for appendRecordType(). 9950 /// Builds a SmallVector containing the encoded field types in declaration 9951 /// order. 9952 static bool extractFieldType(SmallVectorImpl<FieldEncoding> &FE, 9953 const RecordDecl *RD, 9954 const CodeGen::CodeGenModule &CGM, 9955 TypeStringCache &TSC) { 9956 for (const auto *Field : RD->fields()) { 9957 SmallStringEnc Enc; 9958 Enc += "m("; 9959 Enc += Field->getName(); 9960 Enc += "){"; 9961 if (Field->isBitField()) { 9962 Enc += "b("; 9963 llvm::raw_svector_ostream OS(Enc); 9964 OS << Field->getBitWidthValue(CGM.getContext()); 9965 Enc += ':'; 9966 } 9967 if (!appendType(Enc, Field->getType(), CGM, TSC)) 9968 return false; 9969 if (Field->isBitField()) 9970 Enc += ')'; 9971 Enc += '}'; 9972 FE.emplace_back(!Field->getName().empty(), Enc); 9973 } 9974 return true; 9975 } 9976 9977 /// Appends structure and union types to Enc and adds encoding to cache. 9978 /// Recursively calls appendType (via extractFieldType) for each field. 9979 /// Union types have their fields ordered according to the ABI. 9980 static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT, 9981 const CodeGen::CodeGenModule &CGM, 9982 TypeStringCache &TSC, const IdentifierInfo *ID) { 9983 // Append the cached TypeString if we have one. 9984 StringRef TypeString = TSC.lookupStr(ID); 9985 if (!TypeString.empty()) { 9986 Enc += TypeString; 9987 return true; 9988 } 9989 9990 // Start to emit an incomplete TypeString. 9991 size_t Start = Enc.size(); 9992 Enc += (RT->isUnionType()? 'u' : 's'); 9993 Enc += '('; 9994 if (ID) 9995 Enc += ID->getName(); 9996 Enc += "){"; 9997 9998 // We collect all encoded fields and order as necessary. 9999 bool IsRecursive = false; 10000 const RecordDecl *RD = RT->getDecl()->getDefinition(); 10001 if (RD && !RD->field_empty()) { 10002 // An incomplete TypeString stub is placed in the cache for this RecordType 10003 // so that recursive calls to this RecordType will use it whilst building a 10004 // complete TypeString for this RecordType. 10005 SmallVector<FieldEncoding, 16> FE; 10006 std::string StubEnc(Enc.substr(Start).str()); 10007 StubEnc += '}'; // StubEnc now holds a valid incomplete TypeString. 10008 TSC.addIncomplete(ID, std::move(StubEnc)); 10009 if (!extractFieldType(FE, RD, CGM, TSC)) { 10010 (void) TSC.removeIncomplete(ID); 10011 return false; 10012 } 10013 IsRecursive = TSC.removeIncomplete(ID); 10014 // The ABI requires unions to be sorted but not structures. 10015 // See FieldEncoding::operator< for sort algorithm. 10016 if (RT->isUnionType()) 10017 llvm::sort(FE); 10018 // We can now complete the TypeString. 10019 unsigned E = FE.size(); 10020 for (unsigned I = 0; I != E; ++I) { 10021 if (I) 10022 Enc += ','; 10023 Enc += FE[I].str(); 10024 } 10025 } 10026 Enc += '}'; 10027 TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive); 10028 return true; 10029 } 10030 10031 /// Appends enum types to Enc and adds the encoding to the cache. 10032 static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET, 10033 TypeStringCache &TSC, 10034 const IdentifierInfo *ID) { 10035 // Append the cached TypeString if we have one. 10036 StringRef TypeString = TSC.lookupStr(ID); 10037 if (!TypeString.empty()) { 10038 Enc += TypeString; 10039 return true; 10040 } 10041 10042 size_t Start = Enc.size(); 10043 Enc += "e("; 10044 if (ID) 10045 Enc += ID->getName(); 10046 Enc += "){"; 10047 10048 // We collect all encoded enumerations and order them alphanumerically. 10049 if (const EnumDecl *ED = ET->getDecl()->getDefinition()) { 10050 SmallVector<FieldEncoding, 16> FE; 10051 for (auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E; 10052 ++I) { 10053 SmallStringEnc EnumEnc; 10054 EnumEnc += "m("; 10055 EnumEnc += I->getName(); 10056 EnumEnc += "){"; 10057 I->getInitVal().toString(EnumEnc); 10058 EnumEnc += '}'; 10059 FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc)); 10060 } 10061 llvm::sort(FE); 10062 unsigned E = FE.size(); 10063 for (unsigned I = 0; I != E; ++I) { 10064 if (I) 10065 Enc += ','; 10066 Enc += FE[I].str(); 10067 } 10068 } 10069 Enc += '}'; 10070 TSC.addIfComplete(ID, Enc.substr(Start), false); 10071 return true; 10072 } 10073 10074 /// Appends type's qualifier to Enc. 10075 /// This is done prior to appending the type's encoding. 10076 static void appendQualifier(SmallStringEnc &Enc, QualType QT) { 10077 // Qualifiers are emitted in alphabetical order. 10078 static const char *const Table[]={"","c:","r:","cr:","v:","cv:","rv:","crv:"}; 10079 int Lookup = 0; 10080 if (QT.isConstQualified()) 10081 Lookup += 1<<0; 10082 if (QT.isRestrictQualified()) 10083 Lookup += 1<<1; 10084 if (QT.isVolatileQualified()) 10085 Lookup += 1<<2; 10086 Enc += Table[Lookup]; 10087 } 10088 10089 /// Appends built-in types to Enc. 10090 static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT) { 10091 const char *EncType; 10092 switch (BT->getKind()) { 10093 case BuiltinType::Void: 10094 EncType = "0"; 10095 break; 10096 case BuiltinType::Bool: 10097 EncType = "b"; 10098 break; 10099 case BuiltinType::Char_U: 10100 EncType = "uc"; 10101 break; 10102 case BuiltinType::UChar: 10103 EncType = "uc"; 10104 break; 10105 case BuiltinType::SChar: 10106 EncType = "sc"; 10107 break; 10108 case BuiltinType::UShort: 10109 EncType = "us"; 10110 break; 10111 case BuiltinType::Short: 10112 EncType = "ss"; 10113 break; 10114 case BuiltinType::UInt: 10115 EncType = "ui"; 10116 break; 10117 case BuiltinType::Int: 10118 EncType = "si"; 10119 break; 10120 case BuiltinType::ULong: 10121 EncType = "ul"; 10122 break; 10123 case BuiltinType::Long: 10124 EncType = "sl"; 10125 break; 10126 case BuiltinType::ULongLong: 10127 EncType = "ull"; 10128 break; 10129 case BuiltinType::LongLong: 10130 EncType = "sll"; 10131 break; 10132 case BuiltinType::Float: 10133 EncType = "ft"; 10134 break; 10135 case BuiltinType::Double: 10136 EncType = "d"; 10137 break; 10138 case BuiltinType::LongDouble: 10139 EncType = "ld"; 10140 break; 10141 default: 10142 return false; 10143 } 10144 Enc += EncType; 10145 return true; 10146 } 10147 10148 /// Appends a pointer encoding to Enc before calling appendType for the pointee. 10149 static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT, 10150 const CodeGen::CodeGenModule &CGM, 10151 TypeStringCache &TSC) { 10152 Enc += "p("; 10153 if (!appendType(Enc, PT->getPointeeType(), CGM, TSC)) 10154 return false; 10155 Enc += ')'; 10156 return true; 10157 } 10158 10159 /// Appends array encoding to Enc before calling appendType for the element. 10160 static bool appendArrayType(SmallStringEnc &Enc, QualType QT, 10161 const ArrayType *AT, 10162 const CodeGen::CodeGenModule &CGM, 10163 TypeStringCache &TSC, StringRef NoSizeEnc) { 10164 if (AT->getSizeModifier() != ArrayType::Normal) 10165 return false; 10166 Enc += "a("; 10167 if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT)) 10168 CAT->getSize().toStringUnsigned(Enc); 10169 else 10170 Enc += NoSizeEnc; // Global arrays use "*", otherwise it is "". 10171 Enc += ':'; 10172 // The Qualifiers should be attached to the type rather than the array. 10173 appendQualifier(Enc, QT); 10174 if (!appendType(Enc, AT->getElementType(), CGM, TSC)) 10175 return false; 10176 Enc += ')'; 10177 return true; 10178 } 10179 10180 /// Appends a function encoding to Enc, calling appendType for the return type 10181 /// and the arguments. 10182 static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT, 10183 const CodeGen::CodeGenModule &CGM, 10184 TypeStringCache &TSC) { 10185 Enc += "f{"; 10186 if (!appendType(Enc, FT->getReturnType(), CGM, TSC)) 10187 return false; 10188 Enc += "}("; 10189 if (const FunctionProtoType *FPT = FT->getAs<FunctionProtoType>()) { 10190 // N.B. we are only interested in the adjusted param types. 10191 auto I = FPT->param_type_begin(); 10192 auto E = FPT->param_type_end(); 10193 if (I != E) { 10194 do { 10195 if (!appendType(Enc, *I, CGM, TSC)) 10196 return false; 10197 ++I; 10198 if (I != E) 10199 Enc += ','; 10200 } while (I != E); 10201 if (FPT->isVariadic()) 10202 Enc += ",va"; 10203 } else { 10204 if (FPT->isVariadic()) 10205 Enc += "va"; 10206 else 10207 Enc += '0'; 10208 } 10209 } 10210 Enc += ')'; 10211 return true; 10212 } 10213 10214 /// Handles the type's qualifier before dispatching a call to handle specific 10215 /// type encodings. 10216 static bool appendType(SmallStringEnc &Enc, QualType QType, 10217 const CodeGen::CodeGenModule &CGM, 10218 TypeStringCache &TSC) { 10219 10220 QualType QT = QType.getCanonicalType(); 10221 10222 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) 10223 // The Qualifiers should be attached to the type rather than the array. 10224 // Thus we don't call appendQualifier() here. 10225 return appendArrayType(Enc, QT, AT, CGM, TSC, ""); 10226 10227 appendQualifier(Enc, QT); 10228 10229 if (const BuiltinType *BT = QT->getAs<BuiltinType>()) 10230 return appendBuiltinType(Enc, BT); 10231 10232 if (const PointerType *PT = QT->getAs<PointerType>()) 10233 return appendPointerType(Enc, PT, CGM, TSC); 10234 10235 if (const EnumType *ET = QT->getAs<EnumType>()) 10236 return appendEnumType(Enc, ET, TSC, QT.getBaseTypeIdentifier()); 10237 10238 if (const RecordType *RT = QT->getAsStructureType()) 10239 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier()); 10240 10241 if (const RecordType *RT = QT->getAsUnionType()) 10242 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier()); 10243 10244 if (const FunctionType *FT = QT->getAs<FunctionType>()) 10245 return appendFunctionType(Enc, FT, CGM, TSC); 10246 10247 return false; 10248 } 10249 10250 static bool getTypeString(SmallStringEnc &Enc, const Decl *D, 10251 const CodeGen::CodeGenModule &CGM, 10252 TypeStringCache &TSC) { 10253 if (!D) 10254 return false; 10255 10256 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 10257 if (FD->getLanguageLinkage() != CLanguageLinkage) 10258 return false; 10259 return appendType(Enc, FD->getType(), CGM, TSC); 10260 } 10261 10262 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { 10263 if (VD->getLanguageLinkage() != CLanguageLinkage) 10264 return false; 10265 QualType QT = VD->getType().getCanonicalType(); 10266 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) { 10267 // Global ArrayTypes are given a size of '*' if the size is unknown. 10268 // The Qualifiers should be attached to the type rather than the array. 10269 // Thus we don't call appendQualifier() here. 10270 return appendArrayType(Enc, QT, AT, CGM, TSC, "*"); 10271 } 10272 return appendType(Enc, QT, CGM, TSC); 10273 } 10274 return false; 10275 } 10276 10277 //===----------------------------------------------------------------------===// 10278 // RISCV ABI Implementation 10279 //===----------------------------------------------------------------------===// 10280 10281 namespace { 10282 class RISCVABIInfo : public DefaultABIInfo { 10283 private: 10284 // Size of the integer ('x') registers in bits. 10285 unsigned XLen; 10286 // Size of the floating point ('f') registers in bits. Note that the target 10287 // ISA might have a wider FLen than the selected ABI (e.g. an RV32IF target 10288 // with soft float ABI has FLen==0). 10289 unsigned FLen; 10290 static const int NumArgGPRs = 8; 10291 static const int NumArgFPRs = 8; 10292 bool detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff, 10293 llvm::Type *&Field1Ty, 10294 CharUnits &Field1Off, 10295 llvm::Type *&Field2Ty, 10296 CharUnits &Field2Off) const; 10297 10298 public: 10299 RISCVABIInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen, unsigned FLen) 10300 : DefaultABIInfo(CGT), XLen(XLen), FLen(FLen) {} 10301 10302 // DefaultABIInfo's classifyReturnType and classifyArgumentType are 10303 // non-virtual, but computeInfo is virtual, so we overload it. 10304 void computeInfo(CGFunctionInfo &FI) const override; 10305 10306 ABIArgInfo classifyArgumentType(QualType Ty, bool IsFixed, int &ArgGPRsLeft, 10307 int &ArgFPRsLeft) const; 10308 ABIArgInfo classifyReturnType(QualType RetTy) const; 10309 10310 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 10311 QualType Ty) const override; 10312 10313 ABIArgInfo extendType(QualType Ty) const; 10314 10315 bool detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty, 10316 CharUnits &Field1Off, llvm::Type *&Field2Ty, 10317 CharUnits &Field2Off, int &NeededArgGPRs, 10318 int &NeededArgFPRs) const; 10319 ABIArgInfo coerceAndExpandFPCCEligibleStruct(llvm::Type *Field1Ty, 10320 CharUnits Field1Off, 10321 llvm::Type *Field2Ty, 10322 CharUnits Field2Off) const; 10323 }; 10324 } // end anonymous namespace 10325 10326 void RISCVABIInfo::computeInfo(CGFunctionInfo &FI) const { 10327 QualType RetTy = FI.getReturnType(); 10328 if (!getCXXABI().classifyReturnType(FI)) 10329 FI.getReturnInfo() = classifyReturnType(RetTy); 10330 10331 // IsRetIndirect is true if classifyArgumentType indicated the value should 10332 // be passed indirect, or if the type size is a scalar greater than 2*XLen 10333 // and not a complex type with elements <= FLen. e.g. fp128 is passed direct 10334 // in LLVM IR, relying on the backend lowering code to rewrite the argument 10335 // list and pass indirectly on RV32. 10336 bool IsRetIndirect = FI.getReturnInfo().getKind() == ABIArgInfo::Indirect; 10337 if (!IsRetIndirect && RetTy->isScalarType() && 10338 getContext().getTypeSize(RetTy) > (2 * XLen)) { 10339 if (RetTy->isComplexType() && FLen) { 10340 QualType EltTy = RetTy->getAs<ComplexType>()->getElementType(); 10341 IsRetIndirect = getContext().getTypeSize(EltTy) > FLen; 10342 } else { 10343 // This is a normal scalar > 2*XLen, such as fp128 on RV32. 10344 IsRetIndirect = true; 10345 } 10346 } 10347 10348 // We must track the number of GPRs used in order to conform to the RISC-V 10349 // ABI, as integer scalars passed in registers should have signext/zeroext 10350 // when promoted, but are anyext if passed on the stack. As GPR usage is 10351 // different for variadic arguments, we must also track whether we are 10352 // examining a vararg or not. 10353 int ArgGPRsLeft = IsRetIndirect ? NumArgGPRs - 1 : NumArgGPRs; 10354 int ArgFPRsLeft = FLen ? NumArgFPRs : 0; 10355 int NumFixedArgs = FI.getNumRequiredArgs(); 10356 10357 int ArgNum = 0; 10358 for (auto &ArgInfo : FI.arguments()) { 10359 bool IsFixed = ArgNum < NumFixedArgs; 10360 ArgInfo.info = 10361 classifyArgumentType(ArgInfo.type, IsFixed, ArgGPRsLeft, ArgFPRsLeft); 10362 ArgNum++; 10363 } 10364 } 10365 10366 // Returns true if the struct is a potential candidate for the floating point 10367 // calling convention. If this function returns true, the caller is 10368 // responsible for checking that if there is only a single field then that 10369 // field is a float. 10370 bool RISCVABIInfo::detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff, 10371 llvm::Type *&Field1Ty, 10372 CharUnits &Field1Off, 10373 llvm::Type *&Field2Ty, 10374 CharUnits &Field2Off) const { 10375 bool IsInt = Ty->isIntegralOrEnumerationType(); 10376 bool IsFloat = Ty->isRealFloatingType(); 10377 10378 if (IsInt || IsFloat) { 10379 uint64_t Size = getContext().getTypeSize(Ty); 10380 if (IsInt && Size > XLen) 10381 return false; 10382 // Can't be eligible if larger than the FP registers. Half precision isn't 10383 // currently supported on RISC-V and the ABI hasn't been confirmed, so 10384 // default to the integer ABI in that case. 10385 if (IsFloat && (Size > FLen || Size < 32)) 10386 return false; 10387 // Can't be eligible if an integer type was already found (int+int pairs 10388 // are not eligible). 10389 if (IsInt && Field1Ty && Field1Ty->isIntegerTy()) 10390 return false; 10391 if (!Field1Ty) { 10392 Field1Ty = CGT.ConvertType(Ty); 10393 Field1Off = CurOff; 10394 return true; 10395 } 10396 if (!Field2Ty) { 10397 Field2Ty = CGT.ConvertType(Ty); 10398 Field2Off = CurOff; 10399 return true; 10400 } 10401 return false; 10402 } 10403 10404 if (auto CTy = Ty->getAs<ComplexType>()) { 10405 if (Field1Ty) 10406 return false; 10407 QualType EltTy = CTy->getElementType(); 10408 if (getContext().getTypeSize(EltTy) > FLen) 10409 return false; 10410 Field1Ty = CGT.ConvertType(EltTy); 10411 Field1Off = CurOff; 10412 assert(CurOff.isZero() && "Unexpected offset for first field"); 10413 Field2Ty = Field1Ty; 10414 Field2Off = Field1Off + getContext().getTypeSizeInChars(EltTy); 10415 return true; 10416 } 10417 10418 if (const ConstantArrayType *ATy = getContext().getAsConstantArrayType(Ty)) { 10419 uint64_t ArraySize = ATy->getSize().getZExtValue(); 10420 QualType EltTy = ATy->getElementType(); 10421 CharUnits EltSize = getContext().getTypeSizeInChars(EltTy); 10422 for (uint64_t i = 0; i < ArraySize; ++i) { 10423 bool Ret = detectFPCCEligibleStructHelper(EltTy, CurOff, Field1Ty, 10424 Field1Off, Field2Ty, Field2Off); 10425 if (!Ret) 10426 return false; 10427 CurOff += EltSize; 10428 } 10429 return true; 10430 } 10431 10432 if (const auto *RTy = Ty->getAs<RecordType>()) { 10433 // Structures with either a non-trivial destructor or a non-trivial 10434 // copy constructor are not eligible for the FP calling convention. 10435 if (getRecordArgABI(Ty, CGT.getCXXABI())) 10436 return false; 10437 if (isEmptyRecord(getContext(), Ty, true)) 10438 return true; 10439 const RecordDecl *RD = RTy->getDecl(); 10440 // Unions aren't eligible unless they're empty (which is caught above). 10441 if (RD->isUnion()) 10442 return false; 10443 int ZeroWidthBitFieldCount = 0; 10444 for (const FieldDecl *FD : RD->fields()) { 10445 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 10446 uint64_t FieldOffInBits = Layout.getFieldOffset(FD->getFieldIndex()); 10447 QualType QTy = FD->getType(); 10448 if (FD->isBitField()) { 10449 unsigned BitWidth = FD->getBitWidthValue(getContext()); 10450 // Allow a bitfield with a type greater than XLen as long as the 10451 // bitwidth is XLen or less. 10452 if (getContext().getTypeSize(QTy) > XLen && BitWidth <= XLen) 10453 QTy = getContext().getIntTypeForBitwidth(XLen, false); 10454 if (BitWidth == 0) { 10455 ZeroWidthBitFieldCount++; 10456 continue; 10457 } 10458 } 10459 10460 bool Ret = detectFPCCEligibleStructHelper( 10461 QTy, CurOff + getContext().toCharUnitsFromBits(FieldOffInBits), 10462 Field1Ty, Field1Off, Field2Ty, Field2Off); 10463 if (!Ret) 10464 return false; 10465 10466 // As a quirk of the ABI, zero-width bitfields aren't ignored for fp+fp 10467 // or int+fp structs, but are ignored for a struct with an fp field and 10468 // any number of zero-width bitfields. 10469 if (Field2Ty && ZeroWidthBitFieldCount > 0) 10470 return false; 10471 } 10472 return Field1Ty != nullptr; 10473 } 10474 10475 return false; 10476 } 10477 10478 // Determine if a struct is eligible for passing according to the floating 10479 // point calling convention (i.e., when flattened it contains a single fp 10480 // value, fp+fp, or int+fp of appropriate size). If so, NeededArgFPRs and 10481 // NeededArgGPRs are incremented appropriately. 10482 bool RISCVABIInfo::detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty, 10483 CharUnits &Field1Off, 10484 llvm::Type *&Field2Ty, 10485 CharUnits &Field2Off, 10486 int &NeededArgGPRs, 10487 int &NeededArgFPRs) const { 10488 Field1Ty = nullptr; 10489 Field2Ty = nullptr; 10490 NeededArgGPRs = 0; 10491 NeededArgFPRs = 0; 10492 bool IsCandidate = detectFPCCEligibleStructHelper( 10493 Ty, CharUnits::Zero(), Field1Ty, Field1Off, Field2Ty, Field2Off); 10494 // Not really a candidate if we have a single int but no float. 10495 if (Field1Ty && !Field2Ty && !Field1Ty->isFloatingPointTy()) 10496 return false; 10497 if (!IsCandidate) 10498 return false; 10499 if (Field1Ty && Field1Ty->isFloatingPointTy()) 10500 NeededArgFPRs++; 10501 else if (Field1Ty) 10502 NeededArgGPRs++; 10503 if (Field2Ty && Field2Ty->isFloatingPointTy()) 10504 NeededArgFPRs++; 10505 else if (Field2Ty) 10506 NeededArgGPRs++; 10507 return IsCandidate; 10508 } 10509 10510 // Call getCoerceAndExpand for the two-element flattened struct described by 10511 // Field1Ty, Field1Off, Field2Ty, Field2Off. This method will create an 10512 // appropriate coerceToType and unpaddedCoerceToType. 10513 ABIArgInfo RISCVABIInfo::coerceAndExpandFPCCEligibleStruct( 10514 llvm::Type *Field1Ty, CharUnits Field1Off, llvm::Type *Field2Ty, 10515 CharUnits Field2Off) const { 10516 SmallVector<llvm::Type *, 3> CoerceElts; 10517 SmallVector<llvm::Type *, 2> UnpaddedCoerceElts; 10518 if (!Field1Off.isZero()) 10519 CoerceElts.push_back(llvm::ArrayType::get( 10520 llvm::Type::getInt8Ty(getVMContext()), Field1Off.getQuantity())); 10521 10522 CoerceElts.push_back(Field1Ty); 10523 UnpaddedCoerceElts.push_back(Field1Ty); 10524 10525 if (!Field2Ty) { 10526 return ABIArgInfo::getCoerceAndExpand( 10527 llvm::StructType::get(getVMContext(), CoerceElts, !Field1Off.isZero()), 10528 UnpaddedCoerceElts[0]); 10529 } 10530 10531 CharUnits Field2Align = 10532 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(Field2Ty)); 10533 CharUnits Field1Size = 10534 CharUnits::fromQuantity(getDataLayout().getTypeStoreSize(Field1Ty)); 10535 CharUnits Field2OffNoPadNoPack = Field1Size.alignTo(Field2Align); 10536 10537 CharUnits Padding = CharUnits::Zero(); 10538 if (Field2Off > Field2OffNoPadNoPack) 10539 Padding = Field2Off - Field2OffNoPadNoPack; 10540 else if (Field2Off != Field2Align && Field2Off > Field1Size) 10541 Padding = Field2Off - Field1Size; 10542 10543 bool IsPacked = !Field2Off.isMultipleOf(Field2Align); 10544 10545 if (!Padding.isZero()) 10546 CoerceElts.push_back(llvm::ArrayType::get( 10547 llvm::Type::getInt8Ty(getVMContext()), Padding.getQuantity())); 10548 10549 CoerceElts.push_back(Field2Ty); 10550 UnpaddedCoerceElts.push_back(Field2Ty); 10551 10552 auto CoerceToType = 10553 llvm::StructType::get(getVMContext(), CoerceElts, IsPacked); 10554 auto UnpaddedCoerceToType = 10555 llvm::StructType::get(getVMContext(), UnpaddedCoerceElts, IsPacked); 10556 10557 return ABIArgInfo::getCoerceAndExpand(CoerceToType, UnpaddedCoerceToType); 10558 } 10559 10560 ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed, 10561 int &ArgGPRsLeft, 10562 int &ArgFPRsLeft) const { 10563 assert(ArgGPRsLeft <= NumArgGPRs && "Arg GPR tracking underflow"); 10564 Ty = useFirstFieldIfTransparentUnion(Ty); 10565 10566 // Structures with either a non-trivial destructor or a non-trivial 10567 // copy constructor are always passed indirectly. 10568 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { 10569 if (ArgGPRsLeft) 10570 ArgGPRsLeft -= 1; 10571 return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA == 10572 CGCXXABI::RAA_DirectInMemory); 10573 } 10574 10575 // Ignore empty structs/unions. 10576 if (isEmptyRecord(getContext(), Ty, true)) 10577 return ABIArgInfo::getIgnore(); 10578 10579 uint64_t Size = getContext().getTypeSize(Ty); 10580 10581 // Pass floating point values via FPRs if possible. 10582 if (IsFixed && Ty->isFloatingType() && !Ty->isComplexType() && 10583 FLen >= Size && ArgFPRsLeft) { 10584 ArgFPRsLeft--; 10585 return ABIArgInfo::getDirect(); 10586 } 10587 10588 // Complex types for the hard float ABI must be passed direct rather than 10589 // using CoerceAndExpand. 10590 if (IsFixed && Ty->isComplexType() && FLen && ArgFPRsLeft >= 2) { 10591 QualType EltTy = Ty->castAs<ComplexType>()->getElementType(); 10592 if (getContext().getTypeSize(EltTy) <= FLen) { 10593 ArgFPRsLeft -= 2; 10594 return ABIArgInfo::getDirect(); 10595 } 10596 } 10597 10598 if (IsFixed && FLen && Ty->isStructureOrClassType()) { 10599 llvm::Type *Field1Ty = nullptr; 10600 llvm::Type *Field2Ty = nullptr; 10601 CharUnits Field1Off = CharUnits::Zero(); 10602 CharUnits Field2Off = CharUnits::Zero(); 10603 int NeededArgGPRs; 10604 int NeededArgFPRs; 10605 bool IsCandidate = 10606 detectFPCCEligibleStruct(Ty, Field1Ty, Field1Off, Field2Ty, Field2Off, 10607 NeededArgGPRs, NeededArgFPRs); 10608 if (IsCandidate && NeededArgGPRs <= ArgGPRsLeft && 10609 NeededArgFPRs <= ArgFPRsLeft) { 10610 ArgGPRsLeft -= NeededArgGPRs; 10611 ArgFPRsLeft -= NeededArgFPRs; 10612 return coerceAndExpandFPCCEligibleStruct(Field1Ty, Field1Off, Field2Ty, 10613 Field2Off); 10614 } 10615 } 10616 10617 uint64_t NeededAlign = getContext().getTypeAlign(Ty); 10618 bool MustUseStack = false; 10619 // Determine the number of GPRs needed to pass the current argument 10620 // according to the ABI. 2*XLen-aligned varargs are passed in "aligned" 10621 // register pairs, so may consume 3 registers. 10622 int NeededArgGPRs = 1; 10623 if (!IsFixed && NeededAlign == 2 * XLen) 10624 NeededArgGPRs = 2 + (ArgGPRsLeft % 2); 10625 else if (Size > XLen && Size <= 2 * XLen) 10626 NeededArgGPRs = 2; 10627 10628 if (NeededArgGPRs > ArgGPRsLeft) { 10629 MustUseStack = true; 10630 NeededArgGPRs = ArgGPRsLeft; 10631 } 10632 10633 ArgGPRsLeft -= NeededArgGPRs; 10634 10635 if (!isAggregateTypeForABI(Ty) && !Ty->isVectorType()) { 10636 // Treat an enum type as its underlying type. 10637 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 10638 Ty = EnumTy->getDecl()->getIntegerType(); 10639 10640 // All integral types are promoted to XLen width, unless passed on the 10641 // stack. 10642 if (Size < XLen && Ty->isIntegralOrEnumerationType() && !MustUseStack) { 10643 return extendType(Ty); 10644 } 10645 10646 if (const auto *EIT = Ty->getAs<ExtIntType>()) { 10647 if (EIT->getNumBits() < XLen && !MustUseStack) 10648 return extendType(Ty); 10649 if (EIT->getNumBits() > 128 || 10650 (!getContext().getTargetInfo().hasInt128Type() && 10651 EIT->getNumBits() > 64)) 10652 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 10653 } 10654 10655 return ABIArgInfo::getDirect(); 10656 } 10657 10658 // Aggregates which are <= 2*XLen will be passed in registers if possible, 10659 // so coerce to integers. 10660 if (Size <= 2 * XLen) { 10661 unsigned Alignment = getContext().getTypeAlign(Ty); 10662 10663 // Use a single XLen int if possible, 2*XLen if 2*XLen alignment is 10664 // required, and a 2-element XLen array if only XLen alignment is required. 10665 if (Size <= XLen) { 10666 return ABIArgInfo::getDirect( 10667 llvm::IntegerType::get(getVMContext(), XLen)); 10668 } else if (Alignment == 2 * XLen) { 10669 return ABIArgInfo::getDirect( 10670 llvm::IntegerType::get(getVMContext(), 2 * XLen)); 10671 } else { 10672 return ABIArgInfo::getDirect(llvm::ArrayType::get( 10673 llvm::IntegerType::get(getVMContext(), XLen), 2)); 10674 } 10675 } 10676 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 10677 } 10678 10679 ABIArgInfo RISCVABIInfo::classifyReturnType(QualType RetTy) const { 10680 if (RetTy->isVoidType()) 10681 return ABIArgInfo::getIgnore(); 10682 10683 int ArgGPRsLeft = 2; 10684 int ArgFPRsLeft = FLen ? 2 : 0; 10685 10686 // The rules for return and argument types are the same, so defer to 10687 // classifyArgumentType. 10688 return classifyArgumentType(RetTy, /*IsFixed=*/true, ArgGPRsLeft, 10689 ArgFPRsLeft); 10690 } 10691 10692 Address RISCVABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 10693 QualType Ty) const { 10694 CharUnits SlotSize = CharUnits::fromQuantity(XLen / 8); 10695 10696 // Empty records are ignored for parameter passing purposes. 10697 if (isEmptyRecord(getContext(), Ty, true)) { 10698 Address Addr(CGF.Builder.CreateLoad(VAListAddr), SlotSize); 10699 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty)); 10700 return Addr; 10701 } 10702 10703 std::pair<CharUnits, CharUnits> SizeAndAlign = 10704 getContext().getTypeInfoInChars(Ty); 10705 10706 // Arguments bigger than 2*Xlen bytes are passed indirectly. 10707 bool IsIndirect = SizeAndAlign.first > 2 * SlotSize; 10708 10709 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, SizeAndAlign, 10710 SlotSize, /*AllowHigherAlign=*/true); 10711 } 10712 10713 ABIArgInfo RISCVABIInfo::extendType(QualType Ty) const { 10714 int TySize = getContext().getTypeSize(Ty); 10715 // RV64 ABI requires unsigned 32 bit integers to be sign extended. 10716 if (XLen == 64 && Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32) 10717 return ABIArgInfo::getSignExtend(Ty); 10718 return ABIArgInfo::getExtend(Ty); 10719 } 10720 10721 namespace { 10722 class RISCVTargetCodeGenInfo : public TargetCodeGenInfo { 10723 public: 10724 RISCVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen, 10725 unsigned FLen) 10726 : TargetCodeGenInfo(std::make_unique<RISCVABIInfo>(CGT, XLen, FLen)) {} 10727 10728 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 10729 CodeGen::CodeGenModule &CGM) const override { 10730 const auto *FD = dyn_cast_or_null<FunctionDecl>(D); 10731 if (!FD) return; 10732 10733 const auto *Attr = FD->getAttr<RISCVInterruptAttr>(); 10734 if (!Attr) 10735 return; 10736 10737 const char *Kind; 10738 switch (Attr->getInterrupt()) { 10739 case RISCVInterruptAttr::user: Kind = "user"; break; 10740 case RISCVInterruptAttr::supervisor: Kind = "supervisor"; break; 10741 case RISCVInterruptAttr::machine: Kind = "machine"; break; 10742 } 10743 10744 auto *Fn = cast<llvm::Function>(GV); 10745 10746 Fn->addFnAttr("interrupt", Kind); 10747 } 10748 }; 10749 } // namespace 10750 10751 //===----------------------------------------------------------------------===// 10752 // VE ABI Implementation. 10753 // 10754 namespace { 10755 class VEABIInfo : public DefaultABIInfo { 10756 public: 10757 VEABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 10758 10759 private: 10760 ABIArgInfo classifyReturnType(QualType RetTy) const; 10761 ABIArgInfo classifyArgumentType(QualType RetTy) const; 10762 void computeInfo(CGFunctionInfo &FI) const override; 10763 }; 10764 } // end anonymous namespace 10765 10766 ABIArgInfo VEABIInfo::classifyReturnType(QualType Ty) const { 10767 if (Ty->isAnyComplexType()) 10768 return ABIArgInfo::getDirect(); 10769 uint64_t Size = getContext().getTypeSize(Ty); 10770 if (Size < 64 && Ty->isIntegerType()) 10771 return ABIArgInfo::getExtend(Ty); 10772 return DefaultABIInfo::classifyReturnType(Ty); 10773 } 10774 10775 ABIArgInfo VEABIInfo::classifyArgumentType(QualType Ty) const { 10776 if (Ty->isAnyComplexType()) 10777 return ABIArgInfo::getDirect(); 10778 uint64_t Size = getContext().getTypeSize(Ty); 10779 if (Size < 64 && Ty->isIntegerType()) 10780 return ABIArgInfo::getExtend(Ty); 10781 return DefaultABIInfo::classifyArgumentType(Ty); 10782 } 10783 10784 void VEABIInfo::computeInfo(CGFunctionInfo &FI) const { 10785 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 10786 for (auto &Arg : FI.arguments()) 10787 Arg.info = classifyArgumentType(Arg.type); 10788 } 10789 10790 namespace { 10791 class VETargetCodeGenInfo : public TargetCodeGenInfo { 10792 public: 10793 VETargetCodeGenInfo(CodeGenTypes &CGT) 10794 : TargetCodeGenInfo(std::make_unique<VEABIInfo>(CGT)) {} 10795 // VE ABI requires the arguments of variadic and prototype-less functions 10796 // are passed in both registers and memory. 10797 bool isNoProtoCallVariadic(const CallArgList &args, 10798 const FunctionNoProtoType *fnType) const override { 10799 return true; 10800 } 10801 }; 10802 } // end anonymous namespace 10803 10804 //===----------------------------------------------------------------------===// 10805 // Driver code 10806 //===----------------------------------------------------------------------===// 10807 10808 bool CodeGenModule::supportsCOMDAT() const { 10809 return getTriple().supportsCOMDAT(); 10810 } 10811 10812 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() { 10813 if (TheTargetCodeGenInfo) 10814 return *TheTargetCodeGenInfo; 10815 10816 // Helper to set the unique_ptr while still keeping the return value. 10817 auto SetCGInfo = [&](TargetCodeGenInfo *P) -> const TargetCodeGenInfo & { 10818 this->TheTargetCodeGenInfo.reset(P); 10819 return *P; 10820 }; 10821 10822 const llvm::Triple &Triple = getTarget().getTriple(); 10823 switch (Triple.getArch()) { 10824 default: 10825 return SetCGInfo(new DefaultTargetCodeGenInfo(Types)); 10826 10827 case llvm::Triple::le32: 10828 return SetCGInfo(new PNaClTargetCodeGenInfo(Types)); 10829 case llvm::Triple::mips: 10830 case llvm::Triple::mipsel: 10831 if (Triple.getOS() == llvm::Triple::NaCl) 10832 return SetCGInfo(new PNaClTargetCodeGenInfo(Types)); 10833 return SetCGInfo(new MIPSTargetCodeGenInfo(Types, true)); 10834 10835 case llvm::Triple::mips64: 10836 case llvm::Triple::mips64el: 10837 return SetCGInfo(new MIPSTargetCodeGenInfo(Types, false)); 10838 10839 case llvm::Triple::avr: 10840 return SetCGInfo(new AVRTargetCodeGenInfo(Types)); 10841 10842 case llvm::Triple::aarch64: 10843 case llvm::Triple::aarch64_32: 10844 case llvm::Triple::aarch64_be: { 10845 AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS; 10846 if (getTarget().getABI() == "darwinpcs") 10847 Kind = AArch64ABIInfo::DarwinPCS; 10848 else if (Triple.isOSWindows()) 10849 return SetCGInfo( 10850 new WindowsAArch64TargetCodeGenInfo(Types, AArch64ABIInfo::Win64)); 10851 10852 return SetCGInfo(new AArch64TargetCodeGenInfo(Types, Kind)); 10853 } 10854 10855 case llvm::Triple::wasm32: 10856 case llvm::Triple::wasm64: { 10857 WebAssemblyABIInfo::ABIKind Kind = WebAssemblyABIInfo::MVP; 10858 if (getTarget().getABI() == "experimental-mv") 10859 Kind = WebAssemblyABIInfo::ExperimentalMV; 10860 return SetCGInfo(new WebAssemblyTargetCodeGenInfo(Types, Kind)); 10861 } 10862 10863 case llvm::Triple::arm: 10864 case llvm::Triple::armeb: 10865 case llvm::Triple::thumb: 10866 case llvm::Triple::thumbeb: { 10867 if (Triple.getOS() == llvm::Triple::Win32) { 10868 return SetCGInfo( 10869 new WindowsARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS_VFP)); 10870 } 10871 10872 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS; 10873 StringRef ABIStr = getTarget().getABI(); 10874 if (ABIStr == "apcs-gnu") 10875 Kind = ARMABIInfo::APCS; 10876 else if (ABIStr == "aapcs16") 10877 Kind = ARMABIInfo::AAPCS16_VFP; 10878 else if (CodeGenOpts.FloatABI == "hard" || 10879 (CodeGenOpts.FloatABI != "soft" && 10880 (Triple.getEnvironment() == llvm::Triple::GNUEABIHF || 10881 Triple.getEnvironment() == llvm::Triple::MuslEABIHF || 10882 Triple.getEnvironment() == llvm::Triple::EABIHF))) 10883 Kind = ARMABIInfo::AAPCS_VFP; 10884 10885 return SetCGInfo(new ARMTargetCodeGenInfo(Types, Kind)); 10886 } 10887 10888 case llvm::Triple::ppc: { 10889 if (Triple.isOSAIX()) 10890 return SetCGInfo(new AIXTargetCodeGenInfo(Types, /*Is64Bit*/ false)); 10891 10892 bool IsSoftFloat = 10893 CodeGenOpts.FloatABI == "soft" || getTarget().hasFeature("spe"); 10894 bool RetSmallStructInRegABI = 10895 PPC32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts); 10896 return SetCGInfo( 10897 new PPC32TargetCodeGenInfo(Types, IsSoftFloat, RetSmallStructInRegABI)); 10898 } 10899 case llvm::Triple::ppc64: 10900 if (Triple.isOSAIX()) 10901 return SetCGInfo(new AIXTargetCodeGenInfo(Types, /*Is64Bit*/ true)); 10902 10903 if (Triple.isOSBinFormatELF()) { 10904 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1; 10905 if (getTarget().getABI() == "elfv2") 10906 Kind = PPC64_SVR4_ABIInfo::ELFv2; 10907 bool HasQPX = getTarget().getABI() == "elfv1-qpx"; 10908 bool IsSoftFloat = CodeGenOpts.FloatABI == "soft"; 10909 10910 return SetCGInfo(new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX, 10911 IsSoftFloat)); 10912 } 10913 return SetCGInfo(new PPC64TargetCodeGenInfo(Types)); 10914 case llvm::Triple::ppc64le: { 10915 assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!"); 10916 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2; 10917 if (getTarget().getABI() == "elfv1" || getTarget().getABI() == "elfv1-qpx") 10918 Kind = PPC64_SVR4_ABIInfo::ELFv1; 10919 bool HasQPX = getTarget().getABI() == "elfv1-qpx"; 10920 bool IsSoftFloat = CodeGenOpts.FloatABI == "soft"; 10921 10922 return SetCGInfo(new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX, 10923 IsSoftFloat)); 10924 } 10925 10926 case llvm::Triple::nvptx: 10927 case llvm::Triple::nvptx64: 10928 return SetCGInfo(new NVPTXTargetCodeGenInfo(Types)); 10929 10930 case llvm::Triple::msp430: 10931 return SetCGInfo(new MSP430TargetCodeGenInfo(Types)); 10932 10933 case llvm::Triple::riscv32: 10934 case llvm::Triple::riscv64: { 10935 StringRef ABIStr = getTarget().getABI(); 10936 unsigned XLen = getTarget().getPointerWidth(0); 10937 unsigned ABIFLen = 0; 10938 if (ABIStr.endswith("f")) 10939 ABIFLen = 32; 10940 else if (ABIStr.endswith("d")) 10941 ABIFLen = 64; 10942 return SetCGInfo(new RISCVTargetCodeGenInfo(Types, XLen, ABIFLen)); 10943 } 10944 10945 case llvm::Triple::systemz: { 10946 bool SoftFloat = CodeGenOpts.FloatABI == "soft"; 10947 bool HasVector = !SoftFloat && getTarget().getABI() == "vector"; 10948 return SetCGInfo(new SystemZTargetCodeGenInfo(Types, HasVector, SoftFloat)); 10949 } 10950 10951 case llvm::Triple::tce: 10952 case llvm::Triple::tcele: 10953 return SetCGInfo(new TCETargetCodeGenInfo(Types)); 10954 10955 case llvm::Triple::x86: { 10956 bool IsDarwinVectorABI = Triple.isOSDarwin(); 10957 bool RetSmallStructInRegABI = 10958 X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts); 10959 bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing(); 10960 10961 if (Triple.getOS() == llvm::Triple::Win32) { 10962 return SetCGInfo(new WinX86_32TargetCodeGenInfo( 10963 Types, IsDarwinVectorABI, RetSmallStructInRegABI, 10964 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters)); 10965 } else { 10966 return SetCGInfo(new X86_32TargetCodeGenInfo( 10967 Types, IsDarwinVectorABI, RetSmallStructInRegABI, 10968 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters, 10969 CodeGenOpts.FloatABI == "soft")); 10970 } 10971 } 10972 10973 case llvm::Triple::x86_64: { 10974 StringRef ABI = getTarget().getABI(); 10975 X86AVXABILevel AVXLevel = 10976 (ABI == "avx512" 10977 ? X86AVXABILevel::AVX512 10978 : ABI == "avx" ? X86AVXABILevel::AVX : X86AVXABILevel::None); 10979 10980 switch (Triple.getOS()) { 10981 case llvm::Triple::Win32: 10982 return SetCGInfo(new WinX86_64TargetCodeGenInfo(Types, AVXLevel)); 10983 default: 10984 return SetCGInfo(new X86_64TargetCodeGenInfo(Types, AVXLevel)); 10985 } 10986 } 10987 case llvm::Triple::hexagon: 10988 return SetCGInfo(new HexagonTargetCodeGenInfo(Types)); 10989 case llvm::Triple::lanai: 10990 return SetCGInfo(new LanaiTargetCodeGenInfo(Types)); 10991 case llvm::Triple::r600: 10992 return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types)); 10993 case llvm::Triple::amdgcn: 10994 return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types)); 10995 case llvm::Triple::sparc: 10996 return SetCGInfo(new SparcV8TargetCodeGenInfo(Types)); 10997 case llvm::Triple::sparcv9: 10998 return SetCGInfo(new SparcV9TargetCodeGenInfo(Types)); 10999 case llvm::Triple::xcore: 11000 return SetCGInfo(new XCoreTargetCodeGenInfo(Types)); 11001 case llvm::Triple::arc: 11002 return SetCGInfo(new ARCTargetCodeGenInfo(Types)); 11003 case llvm::Triple::spir: 11004 case llvm::Triple::spir64: 11005 return SetCGInfo(new SPIRTargetCodeGenInfo(Types)); 11006 case llvm::Triple::ve: 11007 return SetCGInfo(new VETargetCodeGenInfo(Types)); 11008 } 11009 } 11010 11011 /// Create an OpenCL kernel for an enqueued block. 11012 /// 11013 /// The kernel has the same function type as the block invoke function. Its 11014 /// name is the name of the block invoke function postfixed with "_kernel". 11015 /// It simply calls the block invoke function then returns. 11016 llvm::Function * 11017 TargetCodeGenInfo::createEnqueuedBlockKernel(CodeGenFunction &CGF, 11018 llvm::Function *Invoke, 11019 llvm::Value *BlockLiteral) const { 11020 auto *InvokeFT = Invoke->getFunctionType(); 11021 llvm::SmallVector<llvm::Type *, 2> ArgTys; 11022 for (auto &P : InvokeFT->params()) 11023 ArgTys.push_back(P); 11024 auto &C = CGF.getLLVMContext(); 11025 std::string Name = Invoke->getName().str() + "_kernel"; 11026 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys, false); 11027 auto *F = llvm::Function::Create(FT, llvm::GlobalValue::InternalLinkage, Name, 11028 &CGF.CGM.getModule()); 11029 auto IP = CGF.Builder.saveIP(); 11030 auto *BB = llvm::BasicBlock::Create(C, "entry", F); 11031 auto &Builder = CGF.Builder; 11032 Builder.SetInsertPoint(BB); 11033 llvm::SmallVector<llvm::Value *, 2> Args; 11034 for (auto &A : F->args()) 11035 Args.push_back(&A); 11036 Builder.CreateCall(Invoke, Args); 11037 Builder.CreateRetVoid(); 11038 Builder.restoreIP(IP); 11039 return F; 11040 } 11041 11042 /// Create an OpenCL kernel for an enqueued block. 11043 /// 11044 /// The type of the first argument (the block literal) is the struct type 11045 /// of the block literal instead of a pointer type. The first argument 11046 /// (block literal) is passed directly by value to the kernel. The kernel 11047 /// allocates the same type of struct on stack and stores the block literal 11048 /// to it and passes its pointer to the block invoke function. The kernel 11049 /// has "enqueued-block" function attribute and kernel argument metadata. 11050 llvm::Function *AMDGPUTargetCodeGenInfo::createEnqueuedBlockKernel( 11051 CodeGenFunction &CGF, llvm::Function *Invoke, 11052 llvm::Value *BlockLiteral) const { 11053 auto &Builder = CGF.Builder; 11054 auto &C = CGF.getLLVMContext(); 11055 11056 auto *BlockTy = BlockLiteral->getType()->getPointerElementType(); 11057 auto *InvokeFT = Invoke->getFunctionType(); 11058 llvm::SmallVector<llvm::Type *, 2> ArgTys; 11059 llvm::SmallVector<llvm::Metadata *, 8> AddressQuals; 11060 llvm::SmallVector<llvm::Metadata *, 8> AccessQuals; 11061 llvm::SmallVector<llvm::Metadata *, 8> ArgTypeNames; 11062 llvm::SmallVector<llvm::Metadata *, 8> ArgBaseTypeNames; 11063 llvm::SmallVector<llvm::Metadata *, 8> ArgTypeQuals; 11064 llvm::SmallVector<llvm::Metadata *, 8> ArgNames; 11065 11066 ArgTys.push_back(BlockTy); 11067 ArgTypeNames.push_back(llvm::MDString::get(C, "__block_literal")); 11068 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(0))); 11069 ArgBaseTypeNames.push_back(llvm::MDString::get(C, "__block_literal")); 11070 ArgTypeQuals.push_back(llvm::MDString::get(C, "")); 11071 AccessQuals.push_back(llvm::MDString::get(C, "none")); 11072 ArgNames.push_back(llvm::MDString::get(C, "block_literal")); 11073 for (unsigned I = 1, E = InvokeFT->getNumParams(); I < E; ++I) { 11074 ArgTys.push_back(InvokeFT->getParamType(I)); 11075 ArgTypeNames.push_back(llvm::MDString::get(C, "void*")); 11076 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(3))); 11077 AccessQuals.push_back(llvm::MDString::get(C, "none")); 11078 ArgBaseTypeNames.push_back(llvm::MDString::get(C, "void*")); 11079 ArgTypeQuals.push_back(llvm::MDString::get(C, "")); 11080 ArgNames.push_back( 11081 llvm::MDString::get(C, (Twine("local_arg") + Twine(I)).str())); 11082 } 11083 std::string Name = Invoke->getName().str() + "_kernel"; 11084 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys, false); 11085 auto *F = llvm::Function::Create(FT, llvm::GlobalValue::InternalLinkage, Name, 11086 &CGF.CGM.getModule()); 11087 F->addFnAttr("enqueued-block"); 11088 auto IP = CGF.Builder.saveIP(); 11089 auto *BB = llvm::BasicBlock::Create(C, "entry", F); 11090 Builder.SetInsertPoint(BB); 11091 const auto BlockAlign = CGF.CGM.getDataLayout().getPrefTypeAlign(BlockTy); 11092 auto *BlockPtr = Builder.CreateAlloca(BlockTy, nullptr); 11093 BlockPtr->setAlignment(BlockAlign); 11094 Builder.CreateAlignedStore(F->arg_begin(), BlockPtr, BlockAlign); 11095 auto *Cast = Builder.CreatePointerCast(BlockPtr, InvokeFT->getParamType(0)); 11096 llvm::SmallVector<llvm::Value *, 2> Args; 11097 Args.push_back(Cast); 11098 for (auto I = F->arg_begin() + 1, E = F->arg_end(); I != E; ++I) 11099 Args.push_back(I); 11100 Builder.CreateCall(Invoke, Args); 11101 Builder.CreateRetVoid(); 11102 Builder.restoreIP(IP); 11103 11104 F->setMetadata("kernel_arg_addr_space", llvm::MDNode::get(C, AddressQuals)); 11105 F->setMetadata("kernel_arg_access_qual", llvm::MDNode::get(C, AccessQuals)); 11106 F->setMetadata("kernel_arg_type", llvm::MDNode::get(C, ArgTypeNames)); 11107 F->setMetadata("kernel_arg_base_type", 11108 llvm::MDNode::get(C, ArgBaseTypeNames)); 11109 F->setMetadata("kernel_arg_type_qual", llvm::MDNode::get(C, ArgTypeQuals)); 11110 if (CGF.CGM.getCodeGenOpts().EmitOpenCLArgMetadata) 11111 F->setMetadata("kernel_arg_name", llvm::MDNode::get(C, ArgNames)); 11112 11113 return F; 11114 } 11115