1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // These classes wrap the information about a call or function 10 // definition used to handle ABI compliancy. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "TargetInfo.h" 15 #include "ABIInfo.h" 16 #include "CGBlocks.h" 17 #include "CGCXXABI.h" 18 #include "CGValue.h" 19 #include "CodeGenFunction.h" 20 #include "clang/AST/Attr.h" 21 #include "clang/AST/RecordLayout.h" 22 #include "clang/Basic/CodeGenOptions.h" 23 #include "clang/Basic/DiagnosticFrontend.h" 24 #include "clang/CodeGen/CGFunctionInfo.h" 25 #include "clang/CodeGen/SwiftCallingConv.h" 26 #include "llvm/ADT/SmallBitVector.h" 27 #include "llvm/ADT/StringExtras.h" 28 #include "llvm/ADT/StringSwitch.h" 29 #include "llvm/ADT/Triple.h" 30 #include "llvm/ADT/Twine.h" 31 #include "llvm/IR/DataLayout.h" 32 #include "llvm/IR/IntrinsicsNVPTX.h" 33 #include "llvm/IR/Type.h" 34 #include "llvm/Support/raw_ostream.h" 35 #include <algorithm> // std::sort 36 37 using namespace clang; 38 using namespace CodeGen; 39 40 // Helper for coercing an aggregate argument or return value into an integer 41 // array of the same size (including padding) and alignment. This alternate 42 // coercion happens only for the RenderScript ABI and can be removed after 43 // runtimes that rely on it are no longer supported. 44 // 45 // RenderScript assumes that the size of the argument / return value in the IR 46 // is the same as the size of the corresponding qualified type. This helper 47 // coerces the aggregate type into an array of the same size (including 48 // padding). This coercion is used in lieu of expansion of struct members or 49 // other canonical coercions that return a coerced-type of larger size. 50 // 51 // Ty - The argument / return value type 52 // Context - The associated ASTContext 53 // LLVMContext - The associated LLVMContext 54 static ABIArgInfo coerceToIntArray(QualType Ty, 55 ASTContext &Context, 56 llvm::LLVMContext &LLVMContext) { 57 // Alignment and Size are measured in bits. 58 const uint64_t Size = Context.getTypeSize(Ty); 59 const uint64_t Alignment = Context.getTypeAlign(Ty); 60 llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Alignment); 61 const uint64_t NumElements = (Size + Alignment - 1) / Alignment; 62 return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements)); 63 } 64 65 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, 66 llvm::Value *Array, 67 llvm::Value *Value, 68 unsigned FirstIndex, 69 unsigned LastIndex) { 70 // Alternatively, we could emit this as a loop in the source. 71 for (unsigned I = FirstIndex; I <= LastIndex; ++I) { 72 llvm::Value *Cell = 73 Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I); 74 Builder.CreateAlignedStore(Value, Cell, CharUnits::One()); 75 } 76 } 77 78 static bool isAggregateTypeForABI(QualType T) { 79 return !CodeGenFunction::hasScalarEvaluationKind(T) || 80 T->isMemberFunctionPointerType(); 81 } 82 83 ABIArgInfo ABIInfo::getNaturalAlignIndirect(QualType Ty, bool ByVal, 84 bool Realign, 85 llvm::Type *Padding) const { 86 return ABIArgInfo::getIndirect(getContext().getTypeAlignInChars(Ty), ByVal, 87 Realign, Padding); 88 } 89 90 ABIArgInfo 91 ABIInfo::getNaturalAlignIndirectInReg(QualType Ty, bool Realign) const { 92 return ABIArgInfo::getIndirectInReg(getContext().getTypeAlignInChars(Ty), 93 /*ByVal*/ false, Realign); 94 } 95 96 Address ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, 97 QualType Ty) const { 98 return Address::invalid(); 99 } 100 101 bool ABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const { 102 if (Ty->isPromotableIntegerType()) 103 return true; 104 105 if (const auto *EIT = Ty->getAs<ExtIntType>()) 106 if (EIT->getNumBits() < getContext().getTypeSize(getContext().IntTy)) 107 return true; 108 109 return false; 110 } 111 112 ABIInfo::~ABIInfo() {} 113 114 /// Does the given lowering require more than the given number of 115 /// registers when expanded? 116 /// 117 /// This is intended to be the basis of a reasonable basic implementation 118 /// of should{Pass,Return}IndirectlyForSwift. 119 /// 120 /// For most targets, a limit of four total registers is reasonable; this 121 /// limits the amount of code required in order to move around the value 122 /// in case it wasn't produced immediately prior to the call by the caller 123 /// (or wasn't produced in exactly the right registers) or isn't used 124 /// immediately within the callee. But some targets may need to further 125 /// limit the register count due to an inability to support that many 126 /// return registers. 127 static bool occupiesMoreThan(CodeGenTypes &cgt, 128 ArrayRef<llvm::Type*> scalarTypes, 129 unsigned maxAllRegisters) { 130 unsigned intCount = 0, fpCount = 0; 131 for (llvm::Type *type : scalarTypes) { 132 if (type->isPointerTy()) { 133 intCount++; 134 } else if (auto intTy = dyn_cast<llvm::IntegerType>(type)) { 135 auto ptrWidth = cgt.getTarget().getPointerWidth(0); 136 intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth; 137 } else { 138 assert(type->isVectorTy() || type->isFloatingPointTy()); 139 fpCount++; 140 } 141 } 142 143 return (intCount + fpCount > maxAllRegisters); 144 } 145 146 bool SwiftABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize, 147 llvm::Type *eltTy, 148 unsigned numElts) const { 149 // The default implementation of this assumes that the target guarantees 150 // 128-bit SIMD support but nothing more. 151 return (vectorSize.getQuantity() > 8 && vectorSize.getQuantity() <= 16); 152 } 153 154 static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, 155 CGCXXABI &CXXABI) { 156 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 157 if (!RD) { 158 if (!RT->getDecl()->canPassInRegisters()) 159 return CGCXXABI::RAA_Indirect; 160 return CGCXXABI::RAA_Default; 161 } 162 return CXXABI.getRecordArgABI(RD); 163 } 164 165 static CGCXXABI::RecordArgABI getRecordArgABI(QualType T, 166 CGCXXABI &CXXABI) { 167 const RecordType *RT = T->getAs<RecordType>(); 168 if (!RT) 169 return CGCXXABI::RAA_Default; 170 return getRecordArgABI(RT, CXXABI); 171 } 172 173 static bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, 174 const ABIInfo &Info) { 175 QualType Ty = FI.getReturnType(); 176 177 if (const auto *RT = Ty->getAs<RecordType>()) 178 if (!isa<CXXRecordDecl>(RT->getDecl()) && 179 !RT->getDecl()->canPassInRegisters()) { 180 FI.getReturnInfo() = Info.getNaturalAlignIndirect(Ty); 181 return true; 182 } 183 184 return CXXABI.classifyReturnType(FI); 185 } 186 187 /// Pass transparent unions as if they were the type of the first element. Sema 188 /// should ensure that all elements of the union have the same "machine type". 189 static QualType useFirstFieldIfTransparentUnion(QualType Ty) { 190 if (const RecordType *UT = Ty->getAsUnionType()) { 191 const RecordDecl *UD = UT->getDecl(); 192 if (UD->hasAttr<TransparentUnionAttr>()) { 193 assert(!UD->field_empty() && "sema created an empty transparent union"); 194 return UD->field_begin()->getType(); 195 } 196 } 197 return Ty; 198 } 199 200 CGCXXABI &ABIInfo::getCXXABI() const { 201 return CGT.getCXXABI(); 202 } 203 204 ASTContext &ABIInfo::getContext() const { 205 return CGT.getContext(); 206 } 207 208 llvm::LLVMContext &ABIInfo::getVMContext() const { 209 return CGT.getLLVMContext(); 210 } 211 212 const llvm::DataLayout &ABIInfo::getDataLayout() const { 213 return CGT.getDataLayout(); 214 } 215 216 const TargetInfo &ABIInfo::getTarget() const { 217 return CGT.getTarget(); 218 } 219 220 const CodeGenOptions &ABIInfo::getCodeGenOpts() const { 221 return CGT.getCodeGenOpts(); 222 } 223 224 bool ABIInfo::isAndroid() const { return getTarget().getTriple().isAndroid(); } 225 226 bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 227 return false; 228 } 229 230 bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, 231 uint64_t Members) const { 232 return false; 233 } 234 235 LLVM_DUMP_METHOD void ABIArgInfo::dump() const { 236 raw_ostream &OS = llvm::errs(); 237 OS << "(ABIArgInfo Kind="; 238 switch (TheKind) { 239 case Direct: 240 OS << "Direct Type="; 241 if (llvm::Type *Ty = getCoerceToType()) 242 Ty->print(OS); 243 else 244 OS << "null"; 245 break; 246 case Extend: 247 OS << "Extend"; 248 break; 249 case Ignore: 250 OS << "Ignore"; 251 break; 252 case InAlloca: 253 OS << "InAlloca Offset=" << getInAllocaFieldIndex(); 254 break; 255 case Indirect: 256 OS << "Indirect Align=" << getIndirectAlign().getQuantity() 257 << " ByVal=" << getIndirectByVal() 258 << " Realign=" << getIndirectRealign(); 259 break; 260 case IndirectAliased: 261 OS << "Indirect Align=" << getIndirectAlign().getQuantity() 262 << " AadrSpace=" << getIndirectAddrSpace() 263 << " Realign=" << getIndirectRealign(); 264 break; 265 case Expand: 266 OS << "Expand"; 267 break; 268 case CoerceAndExpand: 269 OS << "CoerceAndExpand Type="; 270 getCoerceAndExpandType()->print(OS); 271 break; 272 } 273 OS << ")\n"; 274 } 275 276 // Dynamically round a pointer up to a multiple of the given alignment. 277 static llvm::Value *emitRoundPointerUpToAlignment(CodeGenFunction &CGF, 278 llvm::Value *Ptr, 279 CharUnits Align) { 280 llvm::Value *PtrAsInt = Ptr; 281 // OverflowArgArea = (OverflowArgArea + Align - 1) & -Align; 282 PtrAsInt = CGF.Builder.CreatePtrToInt(PtrAsInt, CGF.IntPtrTy); 283 PtrAsInt = CGF.Builder.CreateAdd(PtrAsInt, 284 llvm::ConstantInt::get(CGF.IntPtrTy, Align.getQuantity() - 1)); 285 PtrAsInt = CGF.Builder.CreateAnd(PtrAsInt, 286 llvm::ConstantInt::get(CGF.IntPtrTy, -Align.getQuantity())); 287 PtrAsInt = CGF.Builder.CreateIntToPtr(PtrAsInt, 288 Ptr->getType(), 289 Ptr->getName() + ".aligned"); 290 return PtrAsInt; 291 } 292 293 /// Emit va_arg for a platform using the common void* representation, 294 /// where arguments are simply emitted in an array of slots on the stack. 295 /// 296 /// This version implements the core direct-value passing rules. 297 /// 298 /// \param SlotSize - The size and alignment of a stack slot. 299 /// Each argument will be allocated to a multiple of this number of 300 /// slots, and all the slots will be aligned to this value. 301 /// \param AllowHigherAlign - The slot alignment is not a cap; 302 /// an argument type with an alignment greater than the slot size 303 /// will be emitted on a higher-alignment address, potentially 304 /// leaving one or more empty slots behind as padding. If this 305 /// is false, the returned address might be less-aligned than 306 /// DirectAlign. 307 static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF, 308 Address VAListAddr, 309 llvm::Type *DirectTy, 310 CharUnits DirectSize, 311 CharUnits DirectAlign, 312 CharUnits SlotSize, 313 bool AllowHigherAlign) { 314 // Cast the element type to i8* if necessary. Some platforms define 315 // va_list as a struct containing an i8* instead of just an i8*. 316 if (VAListAddr.getElementType() != CGF.Int8PtrTy) 317 VAListAddr = CGF.Builder.CreateElementBitCast(VAListAddr, CGF.Int8PtrTy); 318 319 llvm::Value *Ptr = CGF.Builder.CreateLoad(VAListAddr, "argp.cur"); 320 321 // If the CC aligns values higher than the slot size, do so if needed. 322 Address Addr = Address::invalid(); 323 if (AllowHigherAlign && DirectAlign > SlotSize) { 324 Addr = Address(emitRoundPointerUpToAlignment(CGF, Ptr, DirectAlign), 325 DirectAlign); 326 } else { 327 Addr = Address(Ptr, SlotSize); 328 } 329 330 // Advance the pointer past the argument, then store that back. 331 CharUnits FullDirectSize = DirectSize.alignTo(SlotSize); 332 Address NextPtr = 333 CGF.Builder.CreateConstInBoundsByteGEP(Addr, FullDirectSize, "argp.next"); 334 CGF.Builder.CreateStore(NextPtr.getPointer(), VAListAddr); 335 336 // If the argument is smaller than a slot, and this is a big-endian 337 // target, the argument will be right-adjusted in its slot. 338 if (DirectSize < SlotSize && CGF.CGM.getDataLayout().isBigEndian() && 339 !DirectTy->isStructTy()) { 340 Addr = CGF.Builder.CreateConstInBoundsByteGEP(Addr, SlotSize - DirectSize); 341 } 342 343 Addr = CGF.Builder.CreateElementBitCast(Addr, DirectTy); 344 return Addr; 345 } 346 347 /// Emit va_arg for a platform using the common void* representation, 348 /// where arguments are simply emitted in an array of slots on the stack. 349 /// 350 /// \param IsIndirect - Values of this type are passed indirectly. 351 /// \param ValueInfo - The size and alignment of this type, generally 352 /// computed with getContext().getTypeInfoInChars(ValueTy). 353 /// \param SlotSizeAndAlign - The size and alignment of a stack slot. 354 /// Each argument will be allocated to a multiple of this number of 355 /// slots, and all the slots will be aligned to this value. 356 /// \param AllowHigherAlign - The slot alignment is not a cap; 357 /// an argument type with an alignment greater than the slot size 358 /// will be emitted on a higher-alignment address, potentially 359 /// leaving one or more empty slots behind as padding. 360 static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, 361 QualType ValueTy, bool IsIndirect, 362 std::pair<CharUnits, CharUnits> ValueInfo, 363 CharUnits SlotSizeAndAlign, 364 bool AllowHigherAlign) { 365 // The size and alignment of the value that was passed directly. 366 CharUnits DirectSize, DirectAlign; 367 if (IsIndirect) { 368 DirectSize = CGF.getPointerSize(); 369 DirectAlign = CGF.getPointerAlign(); 370 } else { 371 DirectSize = ValueInfo.first; 372 DirectAlign = ValueInfo.second; 373 } 374 375 // Cast the address we've calculated to the right type. 376 llvm::Type *DirectTy = CGF.ConvertTypeForMem(ValueTy); 377 if (IsIndirect) 378 DirectTy = DirectTy->getPointerTo(0); 379 380 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, DirectTy, 381 DirectSize, DirectAlign, 382 SlotSizeAndAlign, 383 AllowHigherAlign); 384 385 if (IsIndirect) { 386 Addr = Address(CGF.Builder.CreateLoad(Addr), ValueInfo.second); 387 } 388 389 return Addr; 390 391 } 392 393 static Address emitMergePHI(CodeGenFunction &CGF, 394 Address Addr1, llvm::BasicBlock *Block1, 395 Address Addr2, llvm::BasicBlock *Block2, 396 const llvm::Twine &Name = "") { 397 assert(Addr1.getType() == Addr2.getType()); 398 llvm::PHINode *PHI = CGF.Builder.CreatePHI(Addr1.getType(), 2, Name); 399 PHI->addIncoming(Addr1.getPointer(), Block1); 400 PHI->addIncoming(Addr2.getPointer(), Block2); 401 CharUnits Align = std::min(Addr1.getAlignment(), Addr2.getAlignment()); 402 return Address(PHI, Align); 403 } 404 405 TargetCodeGenInfo::~TargetCodeGenInfo() = default; 406 407 // If someone can figure out a general rule for this, that would be great. 408 // It's probably just doomed to be platform-dependent, though. 409 unsigned TargetCodeGenInfo::getSizeOfUnwindException() const { 410 // Verified for: 411 // x86-64 FreeBSD, Linux, Darwin 412 // x86-32 FreeBSD, Linux, Darwin 413 // PowerPC Linux, Darwin 414 // ARM Darwin (*not* EABI) 415 // AArch64 Linux 416 return 32; 417 } 418 419 bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args, 420 const FunctionNoProtoType *fnType) const { 421 // The following conventions are known to require this to be false: 422 // x86_stdcall 423 // MIPS 424 // For everything else, we just prefer false unless we opt out. 425 return false; 426 } 427 428 void 429 TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib, 430 llvm::SmallString<24> &Opt) const { 431 // This assumes the user is passing a library name like "rt" instead of a 432 // filename like "librt.a/so", and that they don't care whether it's static or 433 // dynamic. 434 Opt = "-l"; 435 Opt += Lib; 436 } 437 438 unsigned TargetCodeGenInfo::getOpenCLKernelCallingConv() const { 439 // OpenCL kernels are called via an explicit runtime API with arguments 440 // set with clSetKernelArg(), not as normal sub-functions. 441 // Return SPIR_KERNEL by default as the kernel calling convention to 442 // ensure the fingerprint is fixed such way that each OpenCL argument 443 // gets one matching argument in the produced kernel function argument 444 // list to enable feasible implementation of clSetKernelArg() with 445 // aggregates etc. In case we would use the default C calling conv here, 446 // clSetKernelArg() might break depending on the target-specific 447 // conventions; different targets might split structs passed as values 448 // to multiple function arguments etc. 449 return llvm::CallingConv::SPIR_KERNEL; 450 } 451 452 llvm::Constant *TargetCodeGenInfo::getNullPointer(const CodeGen::CodeGenModule &CGM, 453 llvm::PointerType *T, QualType QT) const { 454 return llvm::ConstantPointerNull::get(T); 455 } 456 457 LangAS TargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM, 458 const VarDecl *D) const { 459 assert(!CGM.getLangOpts().OpenCL && 460 !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) && 461 "Address space agnostic languages only"); 462 return D ? D->getType().getAddressSpace() : LangAS::Default; 463 } 464 465 llvm::Value *TargetCodeGenInfo::performAddrSpaceCast( 466 CodeGen::CodeGenFunction &CGF, llvm::Value *Src, LangAS SrcAddr, 467 LangAS DestAddr, llvm::Type *DestTy, bool isNonNull) const { 468 // Since target may map different address spaces in AST to the same address 469 // space, an address space conversion may end up as a bitcast. 470 if (auto *C = dyn_cast<llvm::Constant>(Src)) 471 return performAddrSpaceCast(CGF.CGM, C, SrcAddr, DestAddr, DestTy); 472 // Try to preserve the source's name to make IR more readable. 473 return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 474 Src, DestTy, Src->hasName() ? Src->getName() + ".ascast" : ""); 475 } 476 477 llvm::Constant * 478 TargetCodeGenInfo::performAddrSpaceCast(CodeGenModule &CGM, llvm::Constant *Src, 479 LangAS SrcAddr, LangAS DestAddr, 480 llvm::Type *DestTy) const { 481 // Since target may map different address spaces in AST to the same address 482 // space, an address space conversion may end up as a bitcast. 483 return llvm::ConstantExpr::getPointerCast(Src, DestTy); 484 } 485 486 llvm::SyncScope::ID 487 TargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts, 488 SyncScope Scope, 489 llvm::AtomicOrdering Ordering, 490 llvm::LLVMContext &Ctx) const { 491 return Ctx.getOrInsertSyncScopeID(""); /* default sync scope */ 492 } 493 494 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays); 495 496 /// isEmptyField - Return true iff a the field is "empty", that is it 497 /// is an unnamed bit-field or an (array of) empty record(s). 498 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD, 499 bool AllowArrays) { 500 if (FD->isUnnamedBitfield()) 501 return true; 502 503 QualType FT = FD->getType(); 504 505 // Constant arrays of empty records count as empty, strip them off. 506 // Constant arrays of zero length always count as empty. 507 bool WasArray = false; 508 if (AllowArrays) 509 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 510 if (AT->getSize() == 0) 511 return true; 512 FT = AT->getElementType(); 513 // The [[no_unique_address]] special case below does not apply to 514 // arrays of C++ empty records, so we need to remember this fact. 515 WasArray = true; 516 } 517 518 const RecordType *RT = FT->getAs<RecordType>(); 519 if (!RT) 520 return false; 521 522 // C++ record fields are never empty, at least in the Itanium ABI. 523 // 524 // FIXME: We should use a predicate for whether this behavior is true in the 525 // current ABI. 526 // 527 // The exception to the above rule are fields marked with the 528 // [[no_unique_address]] attribute (since C++20). Those do count as empty 529 // according to the Itanium ABI. The exception applies only to records, 530 // not arrays of records, so we must also check whether we stripped off an 531 // array type above. 532 if (isa<CXXRecordDecl>(RT->getDecl()) && 533 (WasArray || !FD->hasAttr<NoUniqueAddressAttr>())) 534 return false; 535 536 return isEmptyRecord(Context, FT, AllowArrays); 537 } 538 539 /// isEmptyRecord - Return true iff a structure contains only empty 540 /// fields. Note that a structure with a flexible array member is not 541 /// considered empty. 542 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) { 543 const RecordType *RT = T->getAs<RecordType>(); 544 if (!RT) 545 return false; 546 const RecordDecl *RD = RT->getDecl(); 547 if (RD->hasFlexibleArrayMember()) 548 return false; 549 550 // If this is a C++ record, check the bases first. 551 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 552 for (const auto &I : CXXRD->bases()) 553 if (!isEmptyRecord(Context, I.getType(), true)) 554 return false; 555 556 for (const auto *I : RD->fields()) 557 if (!isEmptyField(Context, I, AllowArrays)) 558 return false; 559 return true; 560 } 561 562 /// isSingleElementStruct - Determine if a structure is a "single 563 /// element struct", i.e. it has exactly one non-empty field or 564 /// exactly one field which is itself a single element 565 /// struct. Structures with flexible array members are never 566 /// considered single element structs. 567 /// 568 /// \return The field declaration for the single non-empty field, if 569 /// it exists. 570 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) { 571 const RecordType *RT = T->getAs<RecordType>(); 572 if (!RT) 573 return nullptr; 574 575 const RecordDecl *RD = RT->getDecl(); 576 if (RD->hasFlexibleArrayMember()) 577 return nullptr; 578 579 const Type *Found = nullptr; 580 581 // If this is a C++ record, check the bases first. 582 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 583 for (const auto &I : CXXRD->bases()) { 584 // Ignore empty records. 585 if (isEmptyRecord(Context, I.getType(), true)) 586 continue; 587 588 // If we already found an element then this isn't a single-element struct. 589 if (Found) 590 return nullptr; 591 592 // If this is non-empty and not a single element struct, the composite 593 // cannot be a single element struct. 594 Found = isSingleElementStruct(I.getType(), Context); 595 if (!Found) 596 return nullptr; 597 } 598 } 599 600 // Check for single element. 601 for (const auto *FD : RD->fields()) { 602 QualType FT = FD->getType(); 603 604 // Ignore empty fields. 605 if (isEmptyField(Context, FD, true)) 606 continue; 607 608 // If we already found an element then this isn't a single-element 609 // struct. 610 if (Found) 611 return nullptr; 612 613 // Treat single element arrays as the element. 614 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 615 if (AT->getSize().getZExtValue() != 1) 616 break; 617 FT = AT->getElementType(); 618 } 619 620 if (!isAggregateTypeForABI(FT)) { 621 Found = FT.getTypePtr(); 622 } else { 623 Found = isSingleElementStruct(FT, Context); 624 if (!Found) 625 return nullptr; 626 } 627 } 628 629 // We don't consider a struct a single-element struct if it has 630 // padding beyond the element type. 631 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T)) 632 return nullptr; 633 634 return Found; 635 } 636 637 namespace { 638 Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty, 639 const ABIArgInfo &AI) { 640 // This default implementation defers to the llvm backend's va_arg 641 // instruction. It can handle only passing arguments directly 642 // (typically only handled in the backend for primitive types), or 643 // aggregates passed indirectly by pointer (NOTE: if the "byval" 644 // flag has ABI impact in the callee, this implementation cannot 645 // work.) 646 647 // Only a few cases are covered here at the moment -- those needed 648 // by the default abi. 649 llvm::Value *Val; 650 651 if (AI.isIndirect()) { 652 assert(!AI.getPaddingType() && 653 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!"); 654 assert( 655 !AI.getIndirectRealign() && 656 "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!"); 657 658 auto TyInfo = CGF.getContext().getTypeInfoInChars(Ty); 659 CharUnits TyAlignForABI = TyInfo.second; 660 661 llvm::Type *BaseTy = 662 llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty)); 663 llvm::Value *Addr = 664 CGF.Builder.CreateVAArg(VAListAddr.getPointer(), BaseTy); 665 return Address(Addr, TyAlignForABI); 666 } else { 667 assert((AI.isDirect() || AI.isExtend()) && 668 "Unexpected ArgInfo Kind in generic VAArg emitter!"); 669 670 assert(!AI.getInReg() && 671 "Unexpected InReg seen in arginfo in generic VAArg emitter!"); 672 assert(!AI.getPaddingType() && 673 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!"); 674 assert(!AI.getDirectOffset() && 675 "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!"); 676 assert(!AI.getCoerceToType() && 677 "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!"); 678 679 Address Temp = CGF.CreateMemTemp(Ty, "varet"); 680 Val = CGF.Builder.CreateVAArg(VAListAddr.getPointer(), CGF.ConvertType(Ty)); 681 CGF.Builder.CreateStore(Val, Temp); 682 return Temp; 683 } 684 } 685 686 /// DefaultABIInfo - The default implementation for ABI specific 687 /// details. This implementation provides information which results in 688 /// self-consistent and sensible LLVM IR generation, but does not 689 /// conform to any particular ABI. 690 class DefaultABIInfo : public ABIInfo { 691 public: 692 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 693 694 ABIArgInfo classifyReturnType(QualType RetTy) const; 695 ABIArgInfo classifyArgumentType(QualType RetTy) const; 696 697 void computeInfo(CGFunctionInfo &FI) const override { 698 if (!getCXXABI().classifyReturnType(FI)) 699 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 700 for (auto &I : FI.arguments()) 701 I.info = classifyArgumentType(I.type); 702 } 703 704 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 705 QualType Ty) const override { 706 return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty)); 707 } 708 }; 709 710 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo { 711 public: 712 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 713 : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {} 714 }; 715 716 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const { 717 Ty = useFirstFieldIfTransparentUnion(Ty); 718 719 if (isAggregateTypeForABI(Ty)) { 720 // Records with non-trivial destructors/copy-constructors should not be 721 // passed by value. 722 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 723 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 724 725 return getNaturalAlignIndirect(Ty); 726 } 727 728 // Treat an enum type as its underlying type. 729 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 730 Ty = EnumTy->getDecl()->getIntegerType(); 731 732 ASTContext &Context = getContext(); 733 if (const auto *EIT = Ty->getAs<ExtIntType>()) 734 if (EIT->getNumBits() > 735 Context.getTypeSize(Context.getTargetInfo().hasInt128Type() 736 ? Context.Int128Ty 737 : Context.LongLongTy)) 738 return getNaturalAlignIndirect(Ty); 739 740 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) 741 : ABIArgInfo::getDirect()); 742 } 743 744 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const { 745 if (RetTy->isVoidType()) 746 return ABIArgInfo::getIgnore(); 747 748 if (isAggregateTypeForABI(RetTy)) 749 return getNaturalAlignIndirect(RetTy); 750 751 // Treat an enum type as its underlying type. 752 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 753 RetTy = EnumTy->getDecl()->getIntegerType(); 754 755 if (const auto *EIT = RetTy->getAs<ExtIntType>()) 756 if (EIT->getNumBits() > 757 getContext().getTypeSize(getContext().getTargetInfo().hasInt128Type() 758 ? getContext().Int128Ty 759 : getContext().LongLongTy)) 760 return getNaturalAlignIndirect(RetTy); 761 762 return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) 763 : ABIArgInfo::getDirect()); 764 } 765 766 //===----------------------------------------------------------------------===// 767 // WebAssembly ABI Implementation 768 // 769 // This is a very simple ABI that relies a lot on DefaultABIInfo. 770 //===----------------------------------------------------------------------===// 771 772 class WebAssemblyABIInfo final : public SwiftABIInfo { 773 public: 774 enum ABIKind { 775 MVP = 0, 776 ExperimentalMV = 1, 777 }; 778 779 private: 780 DefaultABIInfo defaultInfo; 781 ABIKind Kind; 782 783 public: 784 explicit WebAssemblyABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind) 785 : SwiftABIInfo(CGT), defaultInfo(CGT), Kind(Kind) {} 786 787 private: 788 ABIArgInfo classifyReturnType(QualType RetTy) const; 789 ABIArgInfo classifyArgumentType(QualType Ty) const; 790 791 // DefaultABIInfo's classifyReturnType and classifyArgumentType are 792 // non-virtual, but computeInfo and EmitVAArg are virtual, so we 793 // overload them. 794 void computeInfo(CGFunctionInfo &FI) const override { 795 if (!getCXXABI().classifyReturnType(FI)) 796 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 797 for (auto &Arg : FI.arguments()) 798 Arg.info = classifyArgumentType(Arg.type); 799 } 800 801 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 802 QualType Ty) const override; 803 804 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, 805 bool asReturnValue) const override { 806 return occupiesMoreThan(CGT, scalars, /*total*/ 4); 807 } 808 809 bool isSwiftErrorInRegister() const override { 810 return false; 811 } 812 }; 813 814 class WebAssemblyTargetCodeGenInfo final : public TargetCodeGenInfo { 815 public: 816 explicit WebAssemblyTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, 817 WebAssemblyABIInfo::ABIKind K) 818 : TargetCodeGenInfo(std::make_unique<WebAssemblyABIInfo>(CGT, K)) {} 819 820 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 821 CodeGen::CodeGenModule &CGM) const override { 822 TargetCodeGenInfo::setTargetAttributes(D, GV, CGM); 823 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) { 824 if (const auto *Attr = FD->getAttr<WebAssemblyImportModuleAttr>()) { 825 llvm::Function *Fn = cast<llvm::Function>(GV); 826 llvm::AttrBuilder B; 827 B.addAttribute("wasm-import-module", Attr->getImportModule()); 828 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B); 829 } 830 if (const auto *Attr = FD->getAttr<WebAssemblyImportNameAttr>()) { 831 llvm::Function *Fn = cast<llvm::Function>(GV); 832 llvm::AttrBuilder B; 833 B.addAttribute("wasm-import-name", Attr->getImportName()); 834 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B); 835 } 836 if (const auto *Attr = FD->getAttr<WebAssemblyExportNameAttr>()) { 837 llvm::Function *Fn = cast<llvm::Function>(GV); 838 llvm::AttrBuilder B; 839 B.addAttribute("wasm-export-name", Attr->getExportName()); 840 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B); 841 } 842 } 843 844 if (auto *FD = dyn_cast_or_null<FunctionDecl>(D)) { 845 llvm::Function *Fn = cast<llvm::Function>(GV); 846 if (!FD->doesThisDeclarationHaveABody() && !FD->hasPrototype()) 847 Fn->addFnAttr("no-prototype"); 848 } 849 } 850 }; 851 852 /// Classify argument of given type \p Ty. 853 ABIArgInfo WebAssemblyABIInfo::classifyArgumentType(QualType Ty) const { 854 Ty = useFirstFieldIfTransparentUnion(Ty); 855 856 if (isAggregateTypeForABI(Ty)) { 857 // Records with non-trivial destructors/copy-constructors should not be 858 // passed by value. 859 if (auto RAA = getRecordArgABI(Ty, getCXXABI())) 860 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 861 // Ignore empty structs/unions. 862 if (isEmptyRecord(getContext(), Ty, true)) 863 return ABIArgInfo::getIgnore(); 864 // Lower single-element structs to just pass a regular value. TODO: We 865 // could do reasonable-size multiple-element structs too, using getExpand(), 866 // though watch out for things like bitfields. 867 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext())) 868 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 869 // For the experimental multivalue ABI, fully expand all other aggregates 870 if (Kind == ABIKind::ExperimentalMV) { 871 const RecordType *RT = Ty->getAs<RecordType>(); 872 assert(RT); 873 bool HasBitField = false; 874 for (auto *Field : RT->getDecl()->fields()) { 875 if (Field->isBitField()) { 876 HasBitField = true; 877 break; 878 } 879 } 880 if (!HasBitField) 881 return ABIArgInfo::getExpand(); 882 } 883 } 884 885 // Otherwise just do the default thing. 886 return defaultInfo.classifyArgumentType(Ty); 887 } 888 889 ABIArgInfo WebAssemblyABIInfo::classifyReturnType(QualType RetTy) const { 890 if (isAggregateTypeForABI(RetTy)) { 891 // Records with non-trivial destructors/copy-constructors should not be 892 // returned by value. 893 if (!getRecordArgABI(RetTy, getCXXABI())) { 894 // Ignore empty structs/unions. 895 if (isEmptyRecord(getContext(), RetTy, true)) 896 return ABIArgInfo::getIgnore(); 897 // Lower single-element structs to just return a regular value. TODO: We 898 // could do reasonable-size multiple-element structs too, using 899 // ABIArgInfo::getDirect(). 900 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) 901 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 902 // For the experimental multivalue ABI, return all other aggregates 903 if (Kind == ABIKind::ExperimentalMV) 904 return ABIArgInfo::getDirect(); 905 } 906 } 907 908 // Otherwise just do the default thing. 909 return defaultInfo.classifyReturnType(RetTy); 910 } 911 912 Address WebAssemblyABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 913 QualType Ty) const { 914 bool IsIndirect = isAggregateTypeForABI(Ty) && 915 !isEmptyRecord(getContext(), Ty, true) && 916 !isSingleElementStruct(Ty, getContext()); 917 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, 918 getContext().getTypeInfoInChars(Ty), 919 CharUnits::fromQuantity(4), 920 /*AllowHigherAlign=*/true); 921 } 922 923 //===----------------------------------------------------------------------===// 924 // le32/PNaCl bitcode ABI Implementation 925 // 926 // This is a simplified version of the x86_32 ABI. Arguments and return values 927 // are always passed on the stack. 928 //===----------------------------------------------------------------------===// 929 930 class PNaClABIInfo : public ABIInfo { 931 public: 932 PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 933 934 ABIArgInfo classifyReturnType(QualType RetTy) const; 935 ABIArgInfo classifyArgumentType(QualType RetTy) const; 936 937 void computeInfo(CGFunctionInfo &FI) const override; 938 Address EmitVAArg(CodeGenFunction &CGF, 939 Address VAListAddr, QualType Ty) const override; 940 }; 941 942 class PNaClTargetCodeGenInfo : public TargetCodeGenInfo { 943 public: 944 PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 945 : TargetCodeGenInfo(std::make_unique<PNaClABIInfo>(CGT)) {} 946 }; 947 948 void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const { 949 if (!getCXXABI().classifyReturnType(FI)) 950 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 951 952 for (auto &I : FI.arguments()) 953 I.info = classifyArgumentType(I.type); 954 } 955 956 Address PNaClABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 957 QualType Ty) const { 958 // The PNaCL ABI is a bit odd, in that varargs don't use normal 959 // function classification. Structs get passed directly for varargs 960 // functions, through a rewriting transform in 961 // pnacl-llvm/lib/Transforms/NaCl/ExpandVarArgs.cpp, which allows 962 // this target to actually support a va_arg instructions with an 963 // aggregate type, unlike other targets. 964 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect()); 965 } 966 967 /// Classify argument of given type \p Ty. 968 ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const { 969 if (isAggregateTypeForABI(Ty)) { 970 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 971 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 972 return getNaturalAlignIndirect(Ty); 973 } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) { 974 // Treat an enum type as its underlying type. 975 Ty = EnumTy->getDecl()->getIntegerType(); 976 } else if (Ty->isFloatingType()) { 977 // Floating-point types don't go inreg. 978 return ABIArgInfo::getDirect(); 979 } else if (const auto *EIT = Ty->getAs<ExtIntType>()) { 980 // Treat extended integers as integers if <=64, otherwise pass indirectly. 981 if (EIT->getNumBits() > 64) 982 return getNaturalAlignIndirect(Ty); 983 return ABIArgInfo::getDirect(); 984 } 985 986 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) 987 : ABIArgInfo::getDirect()); 988 } 989 990 ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const { 991 if (RetTy->isVoidType()) 992 return ABIArgInfo::getIgnore(); 993 994 // In the PNaCl ABI we always return records/structures on the stack. 995 if (isAggregateTypeForABI(RetTy)) 996 return getNaturalAlignIndirect(RetTy); 997 998 // Treat extended integers as integers if <=64, otherwise pass indirectly. 999 if (const auto *EIT = RetTy->getAs<ExtIntType>()) { 1000 if (EIT->getNumBits() > 64) 1001 return getNaturalAlignIndirect(RetTy); 1002 return ABIArgInfo::getDirect(); 1003 } 1004 1005 // Treat an enum type as its underlying type. 1006 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 1007 RetTy = EnumTy->getDecl()->getIntegerType(); 1008 1009 return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) 1010 : ABIArgInfo::getDirect()); 1011 } 1012 1013 /// IsX86_MMXType - Return true if this is an MMX type. 1014 bool IsX86_MMXType(llvm::Type *IRType) { 1015 // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>. 1016 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 && 1017 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() && 1018 IRType->getScalarSizeInBits() != 64; 1019 } 1020 1021 static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 1022 StringRef Constraint, 1023 llvm::Type* Ty) { 1024 bool IsMMXCons = llvm::StringSwitch<bool>(Constraint) 1025 .Cases("y", "&y", "^Ym", true) 1026 .Default(false); 1027 if (IsMMXCons && Ty->isVectorTy()) { 1028 if (cast<llvm::VectorType>(Ty)->getPrimitiveSizeInBits().getFixedSize() != 1029 64) { 1030 // Invalid MMX constraint 1031 return nullptr; 1032 } 1033 1034 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext()); 1035 } 1036 1037 // No operation needed 1038 return Ty; 1039 } 1040 1041 /// Returns true if this type can be passed in SSE registers with the 1042 /// X86_VectorCall calling convention. Shared between x86_32 and x86_64. 1043 static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) { 1044 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 1045 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) { 1046 if (BT->getKind() == BuiltinType::LongDouble) { 1047 if (&Context.getTargetInfo().getLongDoubleFormat() == 1048 &llvm::APFloat::x87DoubleExtended()) 1049 return false; 1050 } 1051 return true; 1052 } 1053 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 1054 // vectorcall can pass XMM, YMM, and ZMM vectors. We don't pass SSE1 MMX 1055 // registers specially. 1056 unsigned VecSize = Context.getTypeSize(VT); 1057 if (VecSize == 128 || VecSize == 256 || VecSize == 512) 1058 return true; 1059 } 1060 return false; 1061 } 1062 1063 /// Returns true if this aggregate is small enough to be passed in SSE registers 1064 /// in the X86_VectorCall calling convention. Shared between x86_32 and x86_64. 1065 static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) { 1066 return NumMembers <= 4; 1067 } 1068 1069 /// Returns a Homogeneous Vector Aggregate ABIArgInfo, used in X86. 1070 static ABIArgInfo getDirectX86Hva(llvm::Type* T = nullptr) { 1071 auto AI = ABIArgInfo::getDirect(T); 1072 AI.setInReg(true); 1073 AI.setCanBeFlattened(false); 1074 return AI; 1075 } 1076 1077 //===----------------------------------------------------------------------===// 1078 // X86-32 ABI Implementation 1079 //===----------------------------------------------------------------------===// 1080 1081 /// Similar to llvm::CCState, but for Clang. 1082 struct CCState { 1083 CCState(CGFunctionInfo &FI) 1084 : IsPreassigned(FI.arg_size()), CC(FI.getCallingConvention()) {} 1085 1086 llvm::SmallBitVector IsPreassigned; 1087 unsigned CC = CallingConv::CC_C; 1088 unsigned FreeRegs = 0; 1089 unsigned FreeSSERegs = 0; 1090 }; 1091 1092 enum { 1093 // Vectorcall only allows the first 6 parameters to be passed in registers. 1094 VectorcallMaxParamNumAsReg = 6 1095 }; 1096 1097 /// X86_32ABIInfo - The X86-32 ABI information. 1098 class X86_32ABIInfo : public SwiftABIInfo { 1099 enum Class { 1100 Integer, 1101 Float 1102 }; 1103 1104 static const unsigned MinABIStackAlignInBytes = 4; 1105 1106 bool IsDarwinVectorABI; 1107 bool IsRetSmallStructInRegABI; 1108 bool IsWin32StructABI; 1109 bool IsSoftFloatABI; 1110 bool IsMCUABI; 1111 unsigned DefaultNumRegisterParameters; 1112 1113 static bool isRegisterSize(unsigned Size) { 1114 return (Size == 8 || Size == 16 || Size == 32 || Size == 64); 1115 } 1116 1117 bool isHomogeneousAggregateBaseType(QualType Ty) const override { 1118 // FIXME: Assumes vectorcall is in use. 1119 return isX86VectorTypeForVectorCall(getContext(), Ty); 1120 } 1121 1122 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 1123 uint64_t NumMembers) const override { 1124 // FIXME: Assumes vectorcall is in use. 1125 return isX86VectorCallAggregateSmallEnough(NumMembers); 1126 } 1127 1128 bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const; 1129 1130 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 1131 /// such that the argument will be passed in memory. 1132 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const; 1133 1134 ABIArgInfo getIndirectReturnResult(QualType Ty, CCState &State) const; 1135 1136 /// Return the alignment to use for the given type on the stack. 1137 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const; 1138 1139 Class classify(QualType Ty) const; 1140 ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const; 1141 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const; 1142 1143 /// Updates the number of available free registers, returns 1144 /// true if any registers were allocated. 1145 bool updateFreeRegs(QualType Ty, CCState &State) const; 1146 1147 bool shouldAggregateUseDirect(QualType Ty, CCState &State, bool &InReg, 1148 bool &NeedsPadding) const; 1149 bool shouldPrimitiveUseInReg(QualType Ty, CCState &State) const; 1150 1151 bool canExpandIndirectArgument(QualType Ty) const; 1152 1153 /// Rewrite the function info so that all memory arguments use 1154 /// inalloca. 1155 void rewriteWithInAlloca(CGFunctionInfo &FI) const; 1156 1157 void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields, 1158 CharUnits &StackOffset, ABIArgInfo &Info, 1159 QualType Type) const; 1160 void runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State) const; 1161 1162 public: 1163 1164 void computeInfo(CGFunctionInfo &FI) const override; 1165 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 1166 QualType Ty) const override; 1167 1168 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI, 1169 bool RetSmallStructInRegABI, bool Win32StructABI, 1170 unsigned NumRegisterParameters, bool SoftFloatABI) 1171 : SwiftABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI), 1172 IsRetSmallStructInRegABI(RetSmallStructInRegABI), 1173 IsWin32StructABI(Win32StructABI), 1174 IsSoftFloatABI(SoftFloatABI), 1175 IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()), 1176 DefaultNumRegisterParameters(NumRegisterParameters) {} 1177 1178 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, 1179 bool asReturnValue) const override { 1180 // LLVM's x86-32 lowering currently only assigns up to three 1181 // integer registers and three fp registers. Oddly, it'll use up to 1182 // four vector registers for vectors, but those can overlap with the 1183 // scalar registers. 1184 return occupiesMoreThan(CGT, scalars, /*total*/ 3); 1185 } 1186 1187 bool isSwiftErrorInRegister() const override { 1188 // x86-32 lowering does not support passing swifterror in a register. 1189 return false; 1190 } 1191 }; 1192 1193 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo { 1194 public: 1195 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI, 1196 bool RetSmallStructInRegABI, bool Win32StructABI, 1197 unsigned NumRegisterParameters, bool SoftFloatABI) 1198 : TargetCodeGenInfo(std::make_unique<X86_32ABIInfo>( 1199 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI, 1200 NumRegisterParameters, SoftFloatABI)) {} 1201 1202 static bool isStructReturnInRegABI( 1203 const llvm::Triple &Triple, const CodeGenOptions &Opts); 1204 1205 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 1206 CodeGen::CodeGenModule &CGM) const override; 1207 1208 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { 1209 // Darwin uses different dwarf register numbers for EH. 1210 if (CGM.getTarget().getTriple().isOSDarwin()) return 5; 1211 return 4; 1212 } 1213 1214 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 1215 llvm::Value *Address) const override; 1216 1217 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 1218 StringRef Constraint, 1219 llvm::Type* Ty) const override { 1220 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 1221 } 1222 1223 void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue, 1224 std::string &Constraints, 1225 std::vector<llvm::Type *> &ResultRegTypes, 1226 std::vector<llvm::Type *> &ResultTruncRegTypes, 1227 std::vector<LValue> &ResultRegDests, 1228 std::string &AsmString, 1229 unsigned NumOutputs) const override; 1230 1231 llvm::Constant * 1232 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override { 1233 unsigned Sig = (0xeb << 0) | // jmp rel8 1234 (0x06 << 8) | // .+0x08 1235 ('v' << 16) | 1236 ('2' << 24); 1237 return llvm::ConstantInt::get(CGM.Int32Ty, Sig); 1238 } 1239 1240 StringRef getARCRetainAutoreleasedReturnValueMarker() const override { 1241 return "movl\t%ebp, %ebp" 1242 "\t\t// marker for objc_retainAutoreleaseReturnValue"; 1243 } 1244 }; 1245 1246 } 1247 1248 /// Rewrite input constraint references after adding some output constraints. 1249 /// In the case where there is one output and one input and we add one output, 1250 /// we need to replace all operand references greater than or equal to 1: 1251 /// mov $0, $1 1252 /// mov eax, $1 1253 /// The result will be: 1254 /// mov $0, $2 1255 /// mov eax, $2 1256 static void rewriteInputConstraintReferences(unsigned FirstIn, 1257 unsigned NumNewOuts, 1258 std::string &AsmString) { 1259 std::string Buf; 1260 llvm::raw_string_ostream OS(Buf); 1261 size_t Pos = 0; 1262 while (Pos < AsmString.size()) { 1263 size_t DollarStart = AsmString.find('$', Pos); 1264 if (DollarStart == std::string::npos) 1265 DollarStart = AsmString.size(); 1266 size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart); 1267 if (DollarEnd == std::string::npos) 1268 DollarEnd = AsmString.size(); 1269 OS << StringRef(&AsmString[Pos], DollarEnd - Pos); 1270 Pos = DollarEnd; 1271 size_t NumDollars = DollarEnd - DollarStart; 1272 if (NumDollars % 2 != 0 && Pos < AsmString.size()) { 1273 // We have an operand reference. 1274 size_t DigitStart = Pos; 1275 if (AsmString[DigitStart] == '{') { 1276 OS << '{'; 1277 ++DigitStart; 1278 } 1279 size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart); 1280 if (DigitEnd == std::string::npos) 1281 DigitEnd = AsmString.size(); 1282 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart); 1283 unsigned OperandIndex; 1284 if (!OperandStr.getAsInteger(10, OperandIndex)) { 1285 if (OperandIndex >= FirstIn) 1286 OperandIndex += NumNewOuts; 1287 OS << OperandIndex; 1288 } else { 1289 OS << OperandStr; 1290 } 1291 Pos = DigitEnd; 1292 } 1293 } 1294 AsmString = std::move(OS.str()); 1295 } 1296 1297 /// Add output constraints for EAX:EDX because they are return registers. 1298 void X86_32TargetCodeGenInfo::addReturnRegisterOutputs( 1299 CodeGenFunction &CGF, LValue ReturnSlot, std::string &Constraints, 1300 std::vector<llvm::Type *> &ResultRegTypes, 1301 std::vector<llvm::Type *> &ResultTruncRegTypes, 1302 std::vector<LValue> &ResultRegDests, std::string &AsmString, 1303 unsigned NumOutputs) const { 1304 uint64_t RetWidth = CGF.getContext().getTypeSize(ReturnSlot.getType()); 1305 1306 // Use the EAX constraint if the width is 32 or smaller and EAX:EDX if it is 1307 // larger. 1308 if (!Constraints.empty()) 1309 Constraints += ','; 1310 if (RetWidth <= 32) { 1311 Constraints += "={eax}"; 1312 ResultRegTypes.push_back(CGF.Int32Ty); 1313 } else { 1314 // Use the 'A' constraint for EAX:EDX. 1315 Constraints += "=A"; 1316 ResultRegTypes.push_back(CGF.Int64Ty); 1317 } 1318 1319 // Truncate EAX or EAX:EDX to an integer of the appropriate size. 1320 llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth); 1321 ResultTruncRegTypes.push_back(CoerceTy); 1322 1323 // Coerce the integer by bitcasting the return slot pointer. 1324 ReturnSlot.setAddress(CGF.Builder.CreateBitCast(ReturnSlot.getAddress(CGF), 1325 CoerceTy->getPointerTo())); 1326 ResultRegDests.push_back(ReturnSlot); 1327 1328 rewriteInputConstraintReferences(NumOutputs, 1, AsmString); 1329 } 1330 1331 /// shouldReturnTypeInRegister - Determine if the given type should be 1332 /// returned in a register (for the Darwin and MCU ABI). 1333 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty, 1334 ASTContext &Context) const { 1335 uint64_t Size = Context.getTypeSize(Ty); 1336 1337 // For i386, type must be register sized. 1338 // For the MCU ABI, it only needs to be <= 8-byte 1339 if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size))) 1340 return false; 1341 1342 if (Ty->isVectorType()) { 1343 // 64- and 128- bit vectors inside structures are not returned in 1344 // registers. 1345 if (Size == 64 || Size == 128) 1346 return false; 1347 1348 return true; 1349 } 1350 1351 // If this is a builtin, pointer, enum, complex type, member pointer, or 1352 // member function pointer it is ok. 1353 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() || 1354 Ty->isAnyComplexType() || Ty->isEnumeralType() || 1355 Ty->isBlockPointerType() || Ty->isMemberPointerType()) 1356 return true; 1357 1358 // Arrays are treated like records. 1359 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) 1360 return shouldReturnTypeInRegister(AT->getElementType(), Context); 1361 1362 // Otherwise, it must be a record type. 1363 const RecordType *RT = Ty->getAs<RecordType>(); 1364 if (!RT) return false; 1365 1366 // FIXME: Traverse bases here too. 1367 1368 // Structure types are passed in register if all fields would be 1369 // passed in a register. 1370 for (const auto *FD : RT->getDecl()->fields()) { 1371 // Empty fields are ignored. 1372 if (isEmptyField(Context, FD, true)) 1373 continue; 1374 1375 // Check fields recursively. 1376 if (!shouldReturnTypeInRegister(FD->getType(), Context)) 1377 return false; 1378 } 1379 return true; 1380 } 1381 1382 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) { 1383 // Treat complex types as the element type. 1384 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) 1385 Ty = CTy->getElementType(); 1386 1387 // Check for a type which we know has a simple scalar argument-passing 1388 // convention without any padding. (We're specifically looking for 32 1389 // and 64-bit integer and integer-equivalents, float, and double.) 1390 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() && 1391 !Ty->isEnumeralType() && !Ty->isBlockPointerType()) 1392 return false; 1393 1394 uint64_t Size = Context.getTypeSize(Ty); 1395 return Size == 32 || Size == 64; 1396 } 1397 1398 static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD, 1399 uint64_t &Size) { 1400 for (const auto *FD : RD->fields()) { 1401 // Scalar arguments on the stack get 4 byte alignment on x86. If the 1402 // argument is smaller than 32-bits, expanding the struct will create 1403 // alignment padding. 1404 if (!is32Or64BitBasicType(FD->getType(), Context)) 1405 return false; 1406 1407 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know 1408 // how to expand them yet, and the predicate for telling if a bitfield still 1409 // counts as "basic" is more complicated than what we were doing previously. 1410 if (FD->isBitField()) 1411 return false; 1412 1413 Size += Context.getTypeSize(FD->getType()); 1414 } 1415 return true; 1416 } 1417 1418 static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD, 1419 uint64_t &Size) { 1420 // Don't do this if there are any non-empty bases. 1421 for (const CXXBaseSpecifier &Base : RD->bases()) { 1422 if (!addBaseAndFieldSizes(Context, Base.getType()->getAsCXXRecordDecl(), 1423 Size)) 1424 return false; 1425 } 1426 if (!addFieldSizes(Context, RD, Size)) 1427 return false; 1428 return true; 1429 } 1430 1431 /// Test whether an argument type which is to be passed indirectly (on the 1432 /// stack) would have the equivalent layout if it was expanded into separate 1433 /// arguments. If so, we prefer to do the latter to avoid inhibiting 1434 /// optimizations. 1435 bool X86_32ABIInfo::canExpandIndirectArgument(QualType Ty) const { 1436 // We can only expand structure types. 1437 const RecordType *RT = Ty->getAs<RecordType>(); 1438 if (!RT) 1439 return false; 1440 const RecordDecl *RD = RT->getDecl(); 1441 uint64_t Size = 0; 1442 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 1443 if (!IsWin32StructABI) { 1444 // On non-Windows, we have to conservatively match our old bitcode 1445 // prototypes in order to be ABI-compatible at the bitcode level. 1446 if (!CXXRD->isCLike()) 1447 return false; 1448 } else { 1449 // Don't do this for dynamic classes. 1450 if (CXXRD->isDynamicClass()) 1451 return false; 1452 } 1453 if (!addBaseAndFieldSizes(getContext(), CXXRD, Size)) 1454 return false; 1455 } else { 1456 if (!addFieldSizes(getContext(), RD, Size)) 1457 return false; 1458 } 1459 1460 // We can do this if there was no alignment padding. 1461 return Size == getContext().getTypeSize(Ty); 1462 } 1463 1464 ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, CCState &State) const { 1465 // If the return value is indirect, then the hidden argument is consuming one 1466 // integer register. 1467 if (State.FreeRegs) { 1468 --State.FreeRegs; 1469 if (!IsMCUABI) 1470 return getNaturalAlignIndirectInReg(RetTy); 1471 } 1472 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false); 1473 } 1474 1475 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, 1476 CCState &State) const { 1477 if (RetTy->isVoidType()) 1478 return ABIArgInfo::getIgnore(); 1479 1480 const Type *Base = nullptr; 1481 uint64_t NumElts = 0; 1482 if ((State.CC == llvm::CallingConv::X86_VectorCall || 1483 State.CC == llvm::CallingConv::X86_RegCall) && 1484 isHomogeneousAggregate(RetTy, Base, NumElts)) { 1485 // The LLVM struct type for such an aggregate should lower properly. 1486 return ABIArgInfo::getDirect(); 1487 } 1488 1489 if (const VectorType *VT = RetTy->getAs<VectorType>()) { 1490 // On Darwin, some vectors are returned in registers. 1491 if (IsDarwinVectorABI) { 1492 uint64_t Size = getContext().getTypeSize(RetTy); 1493 1494 // 128-bit vectors are a special case; they are returned in 1495 // registers and we need to make sure to pick a type the LLVM 1496 // backend will like. 1497 if (Size == 128) 1498 return ABIArgInfo::getDirect(llvm::FixedVectorType::get( 1499 llvm::Type::getInt64Ty(getVMContext()), 2)); 1500 1501 // Always return in register if it fits in a general purpose 1502 // register, or if it is 64 bits and has a single element. 1503 if ((Size == 8 || Size == 16 || Size == 32) || 1504 (Size == 64 && VT->getNumElements() == 1)) 1505 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 1506 Size)); 1507 1508 return getIndirectReturnResult(RetTy, State); 1509 } 1510 1511 return ABIArgInfo::getDirect(); 1512 } 1513 1514 if (isAggregateTypeForABI(RetTy)) { 1515 if (const RecordType *RT = RetTy->getAs<RecordType>()) { 1516 // Structures with flexible arrays are always indirect. 1517 if (RT->getDecl()->hasFlexibleArrayMember()) 1518 return getIndirectReturnResult(RetTy, State); 1519 } 1520 1521 // If specified, structs and unions are always indirect. 1522 if (!IsRetSmallStructInRegABI && !RetTy->isAnyComplexType()) 1523 return getIndirectReturnResult(RetTy, State); 1524 1525 // Ignore empty structs/unions. 1526 if (isEmptyRecord(getContext(), RetTy, true)) 1527 return ABIArgInfo::getIgnore(); 1528 1529 // Small structures which are register sized are generally returned 1530 // in a register. 1531 if (shouldReturnTypeInRegister(RetTy, getContext())) { 1532 uint64_t Size = getContext().getTypeSize(RetTy); 1533 1534 // As a special-case, if the struct is a "single-element" struct, and 1535 // the field is of type "float" or "double", return it in a 1536 // floating-point register. (MSVC does not apply this special case.) 1537 // We apply a similar transformation for pointer types to improve the 1538 // quality of the generated IR. 1539 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) 1540 if ((!IsWin32StructABI && SeltTy->isRealFloatingType()) 1541 || SeltTy->hasPointerRepresentation()) 1542 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 1543 1544 // FIXME: We should be able to narrow this integer in cases with dead 1545 // padding. 1546 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size)); 1547 } 1548 1549 return getIndirectReturnResult(RetTy, State); 1550 } 1551 1552 // Treat an enum type as its underlying type. 1553 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 1554 RetTy = EnumTy->getDecl()->getIntegerType(); 1555 1556 if (const auto *EIT = RetTy->getAs<ExtIntType>()) 1557 if (EIT->getNumBits() > 64) 1558 return getIndirectReturnResult(RetTy, State); 1559 1560 return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) 1561 : ABIArgInfo::getDirect()); 1562 } 1563 1564 static bool isSIMDVectorType(ASTContext &Context, QualType Ty) { 1565 return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128; 1566 } 1567 1568 static bool isRecordWithSIMDVectorType(ASTContext &Context, QualType Ty) { 1569 const RecordType *RT = Ty->getAs<RecordType>(); 1570 if (!RT) 1571 return 0; 1572 const RecordDecl *RD = RT->getDecl(); 1573 1574 // If this is a C++ record, check the bases first. 1575 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 1576 for (const auto &I : CXXRD->bases()) 1577 if (!isRecordWithSIMDVectorType(Context, I.getType())) 1578 return false; 1579 1580 for (const auto *i : RD->fields()) { 1581 QualType FT = i->getType(); 1582 1583 if (isSIMDVectorType(Context, FT)) 1584 return true; 1585 1586 if (isRecordWithSIMDVectorType(Context, FT)) 1587 return true; 1588 } 1589 1590 return false; 1591 } 1592 1593 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty, 1594 unsigned Align) const { 1595 // Otherwise, if the alignment is less than or equal to the minimum ABI 1596 // alignment, just use the default; the backend will handle this. 1597 if (Align <= MinABIStackAlignInBytes) 1598 return 0; // Use default alignment. 1599 1600 // On non-Darwin, the stack type alignment is always 4. 1601 if (!IsDarwinVectorABI) { 1602 // Set explicit alignment, since we may need to realign the top. 1603 return MinABIStackAlignInBytes; 1604 } 1605 1606 // Otherwise, if the type contains an SSE vector type, the alignment is 16. 1607 if (Align >= 16 && (isSIMDVectorType(getContext(), Ty) || 1608 isRecordWithSIMDVectorType(getContext(), Ty))) 1609 return 16; 1610 1611 return MinABIStackAlignInBytes; 1612 } 1613 1614 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal, 1615 CCState &State) const { 1616 if (!ByVal) { 1617 if (State.FreeRegs) { 1618 --State.FreeRegs; // Non-byval indirects just use one pointer. 1619 if (!IsMCUABI) 1620 return getNaturalAlignIndirectInReg(Ty); 1621 } 1622 return getNaturalAlignIndirect(Ty, false); 1623 } 1624 1625 // Compute the byval alignment. 1626 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; 1627 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign); 1628 if (StackAlign == 0) 1629 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true); 1630 1631 // If the stack alignment is less than the type alignment, realign the 1632 // argument. 1633 bool Realign = TypeAlign > StackAlign; 1634 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(StackAlign), 1635 /*ByVal=*/true, Realign); 1636 } 1637 1638 X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const { 1639 const Type *T = isSingleElementStruct(Ty, getContext()); 1640 if (!T) 1641 T = Ty.getTypePtr(); 1642 1643 if (const BuiltinType *BT = T->getAs<BuiltinType>()) { 1644 BuiltinType::Kind K = BT->getKind(); 1645 if (K == BuiltinType::Float || K == BuiltinType::Double) 1646 return Float; 1647 } 1648 return Integer; 1649 } 1650 1651 bool X86_32ABIInfo::updateFreeRegs(QualType Ty, CCState &State) const { 1652 if (!IsSoftFloatABI) { 1653 Class C = classify(Ty); 1654 if (C == Float) 1655 return false; 1656 } 1657 1658 unsigned Size = getContext().getTypeSize(Ty); 1659 unsigned SizeInRegs = (Size + 31) / 32; 1660 1661 if (SizeInRegs == 0) 1662 return false; 1663 1664 if (!IsMCUABI) { 1665 if (SizeInRegs > State.FreeRegs) { 1666 State.FreeRegs = 0; 1667 return false; 1668 } 1669 } else { 1670 // The MCU psABI allows passing parameters in-reg even if there are 1671 // earlier parameters that are passed on the stack. Also, 1672 // it does not allow passing >8-byte structs in-register, 1673 // even if there are 3 free registers available. 1674 if (SizeInRegs > State.FreeRegs || SizeInRegs > 2) 1675 return false; 1676 } 1677 1678 State.FreeRegs -= SizeInRegs; 1679 return true; 1680 } 1681 1682 bool X86_32ABIInfo::shouldAggregateUseDirect(QualType Ty, CCState &State, 1683 bool &InReg, 1684 bool &NeedsPadding) const { 1685 // On Windows, aggregates other than HFAs are never passed in registers, and 1686 // they do not consume register slots. Homogenous floating-point aggregates 1687 // (HFAs) have already been dealt with at this point. 1688 if (IsWin32StructABI && isAggregateTypeForABI(Ty)) 1689 return false; 1690 1691 NeedsPadding = false; 1692 InReg = !IsMCUABI; 1693 1694 if (!updateFreeRegs(Ty, State)) 1695 return false; 1696 1697 if (IsMCUABI) 1698 return true; 1699 1700 if (State.CC == llvm::CallingConv::X86_FastCall || 1701 State.CC == llvm::CallingConv::X86_VectorCall || 1702 State.CC == llvm::CallingConv::X86_RegCall) { 1703 if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs) 1704 NeedsPadding = true; 1705 1706 return false; 1707 } 1708 1709 return true; 1710 } 1711 1712 bool X86_32ABIInfo::shouldPrimitiveUseInReg(QualType Ty, CCState &State) const { 1713 if (!updateFreeRegs(Ty, State)) 1714 return false; 1715 1716 if (IsMCUABI) 1717 return false; 1718 1719 if (State.CC == llvm::CallingConv::X86_FastCall || 1720 State.CC == llvm::CallingConv::X86_VectorCall || 1721 State.CC == llvm::CallingConv::X86_RegCall) { 1722 if (getContext().getTypeSize(Ty) > 32) 1723 return false; 1724 1725 return (Ty->isIntegralOrEnumerationType() || Ty->isPointerType() || 1726 Ty->isReferenceType()); 1727 } 1728 1729 return true; 1730 } 1731 1732 void X86_32ABIInfo::runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State) const { 1733 // Vectorcall x86 works subtly different than in x64, so the format is 1734 // a bit different than the x64 version. First, all vector types (not HVAs) 1735 // are assigned, with the first 6 ending up in the [XYZ]MM0-5 registers. 1736 // This differs from the x64 implementation, where the first 6 by INDEX get 1737 // registers. 1738 // In the second pass over the arguments, HVAs are passed in the remaining 1739 // vector registers if possible, or indirectly by address. The address will be 1740 // passed in ECX/EDX if available. Any other arguments are passed according to 1741 // the usual fastcall rules. 1742 MutableArrayRef<CGFunctionInfoArgInfo> Args = FI.arguments(); 1743 for (int I = 0, E = Args.size(); I < E; ++I) { 1744 const Type *Base = nullptr; 1745 uint64_t NumElts = 0; 1746 const QualType &Ty = Args[I].type; 1747 if ((Ty->isVectorType() || Ty->isBuiltinType()) && 1748 isHomogeneousAggregate(Ty, Base, NumElts)) { 1749 if (State.FreeSSERegs >= NumElts) { 1750 State.FreeSSERegs -= NumElts; 1751 Args[I].info = ABIArgInfo::getDirectInReg(); 1752 State.IsPreassigned.set(I); 1753 } 1754 } 1755 } 1756 } 1757 1758 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, 1759 CCState &State) const { 1760 // FIXME: Set alignment on indirect arguments. 1761 bool IsFastCall = State.CC == llvm::CallingConv::X86_FastCall; 1762 bool IsRegCall = State.CC == llvm::CallingConv::X86_RegCall; 1763 bool IsVectorCall = State.CC == llvm::CallingConv::X86_VectorCall; 1764 1765 Ty = useFirstFieldIfTransparentUnion(Ty); 1766 TypeInfo TI = getContext().getTypeInfo(Ty); 1767 1768 // Check with the C++ ABI first. 1769 const RecordType *RT = Ty->getAs<RecordType>(); 1770 if (RT) { 1771 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()); 1772 if (RAA == CGCXXABI::RAA_Indirect) { 1773 return getIndirectResult(Ty, false, State); 1774 } else if (RAA == CGCXXABI::RAA_DirectInMemory) { 1775 // The field index doesn't matter, we'll fix it up later. 1776 return ABIArgInfo::getInAlloca(/*FieldIndex=*/0); 1777 } 1778 } 1779 1780 // Regcall uses the concept of a homogenous vector aggregate, similar 1781 // to other targets. 1782 const Type *Base = nullptr; 1783 uint64_t NumElts = 0; 1784 if ((IsRegCall || IsVectorCall) && 1785 isHomogeneousAggregate(Ty, Base, NumElts)) { 1786 if (State.FreeSSERegs >= NumElts) { 1787 State.FreeSSERegs -= NumElts; 1788 1789 // Vectorcall passes HVAs directly and does not flatten them, but regcall 1790 // does. 1791 if (IsVectorCall) 1792 return getDirectX86Hva(); 1793 1794 if (Ty->isBuiltinType() || Ty->isVectorType()) 1795 return ABIArgInfo::getDirect(); 1796 return ABIArgInfo::getExpand(); 1797 } 1798 return getIndirectResult(Ty, /*ByVal=*/false, State); 1799 } 1800 1801 if (isAggregateTypeForABI(Ty)) { 1802 // Structures with flexible arrays are always indirect. 1803 // FIXME: This should not be byval! 1804 if (RT && RT->getDecl()->hasFlexibleArrayMember()) 1805 return getIndirectResult(Ty, true, State); 1806 1807 // Ignore empty structs/unions on non-Windows. 1808 if (!IsWin32StructABI && isEmptyRecord(getContext(), Ty, true)) 1809 return ABIArgInfo::getIgnore(); 1810 1811 llvm::LLVMContext &LLVMContext = getVMContext(); 1812 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext); 1813 bool NeedsPadding = false; 1814 bool InReg; 1815 if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) { 1816 unsigned SizeInRegs = (TI.Width + 31) / 32; 1817 SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32); 1818 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements); 1819 if (InReg) 1820 return ABIArgInfo::getDirectInReg(Result); 1821 else 1822 return ABIArgInfo::getDirect(Result); 1823 } 1824 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr; 1825 1826 // Pass over-aligned aggregates on Windows indirectly. This behavior was 1827 // added in MSVC 2015. 1828 if (IsWin32StructABI && TI.AlignIsRequired && TI.Align > 32) 1829 return getIndirectResult(Ty, /*ByVal=*/false, State); 1830 1831 // Expand small (<= 128-bit) record types when we know that the stack layout 1832 // of those arguments will match the struct. This is important because the 1833 // LLVM backend isn't smart enough to remove byval, which inhibits many 1834 // optimizations. 1835 // Don't do this for the MCU if there are still free integer registers 1836 // (see X86_64 ABI for full explanation). 1837 if (TI.Width <= 4 * 32 && (!IsMCUABI || State.FreeRegs == 0) && 1838 canExpandIndirectArgument(Ty)) 1839 return ABIArgInfo::getExpandWithPadding( 1840 IsFastCall || IsVectorCall || IsRegCall, PaddingType); 1841 1842 return getIndirectResult(Ty, true, State); 1843 } 1844 1845 if (const VectorType *VT = Ty->getAs<VectorType>()) { 1846 // On Windows, vectors are passed directly if registers are available, or 1847 // indirectly if not. This avoids the need to align argument memory. Pass 1848 // user-defined vector types larger than 512 bits indirectly for simplicity. 1849 if (IsWin32StructABI) { 1850 if (TI.Width <= 512 && State.FreeSSERegs > 0) { 1851 --State.FreeSSERegs; 1852 return ABIArgInfo::getDirectInReg(); 1853 } 1854 return getIndirectResult(Ty, /*ByVal=*/false, State); 1855 } 1856 1857 // On Darwin, some vectors are passed in memory, we handle this by passing 1858 // it as an i8/i16/i32/i64. 1859 if (IsDarwinVectorABI) { 1860 if ((TI.Width == 8 || TI.Width == 16 || TI.Width == 32) || 1861 (TI.Width == 64 && VT->getNumElements() == 1)) 1862 return ABIArgInfo::getDirect( 1863 llvm::IntegerType::get(getVMContext(), TI.Width)); 1864 } 1865 1866 if (IsX86_MMXType(CGT.ConvertType(Ty))) 1867 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64)); 1868 1869 return ABIArgInfo::getDirect(); 1870 } 1871 1872 1873 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1874 Ty = EnumTy->getDecl()->getIntegerType(); 1875 1876 bool InReg = shouldPrimitiveUseInReg(Ty, State); 1877 1878 if (isPromotableIntegerTypeForABI(Ty)) { 1879 if (InReg) 1880 return ABIArgInfo::getExtendInReg(Ty); 1881 return ABIArgInfo::getExtend(Ty); 1882 } 1883 1884 if (const auto * EIT = Ty->getAs<ExtIntType>()) { 1885 if (EIT->getNumBits() <= 64) { 1886 if (InReg) 1887 return ABIArgInfo::getDirectInReg(); 1888 return ABIArgInfo::getDirect(); 1889 } 1890 return getIndirectResult(Ty, /*ByVal=*/false, State); 1891 } 1892 1893 if (InReg) 1894 return ABIArgInfo::getDirectInReg(); 1895 return ABIArgInfo::getDirect(); 1896 } 1897 1898 void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const { 1899 CCState State(FI); 1900 if (IsMCUABI) 1901 State.FreeRegs = 3; 1902 else if (State.CC == llvm::CallingConv::X86_FastCall) { 1903 State.FreeRegs = 2; 1904 State.FreeSSERegs = 3; 1905 } else if (State.CC == llvm::CallingConv::X86_VectorCall) { 1906 State.FreeRegs = 2; 1907 State.FreeSSERegs = 6; 1908 } else if (FI.getHasRegParm()) 1909 State.FreeRegs = FI.getRegParm(); 1910 else if (State.CC == llvm::CallingConv::X86_RegCall) { 1911 State.FreeRegs = 5; 1912 State.FreeSSERegs = 8; 1913 } else if (IsWin32StructABI) { 1914 // Since MSVC 2015, the first three SSE vectors have been passed in 1915 // registers. The rest are passed indirectly. 1916 State.FreeRegs = DefaultNumRegisterParameters; 1917 State.FreeSSERegs = 3; 1918 } else 1919 State.FreeRegs = DefaultNumRegisterParameters; 1920 1921 if (!::classifyReturnType(getCXXABI(), FI, *this)) { 1922 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State); 1923 } else if (FI.getReturnInfo().isIndirect()) { 1924 // The C++ ABI is not aware of register usage, so we have to check if the 1925 // return value was sret and put it in a register ourselves if appropriate. 1926 if (State.FreeRegs) { 1927 --State.FreeRegs; // The sret parameter consumes a register. 1928 if (!IsMCUABI) 1929 FI.getReturnInfo().setInReg(true); 1930 } 1931 } 1932 1933 // The chain argument effectively gives us another free register. 1934 if (FI.isChainCall()) 1935 ++State.FreeRegs; 1936 1937 // For vectorcall, do a first pass over the arguments, assigning FP and vector 1938 // arguments to XMM registers as available. 1939 if (State.CC == llvm::CallingConv::X86_VectorCall) 1940 runVectorCallFirstPass(FI, State); 1941 1942 bool UsedInAlloca = false; 1943 MutableArrayRef<CGFunctionInfoArgInfo> Args = FI.arguments(); 1944 for (int I = 0, E = Args.size(); I < E; ++I) { 1945 // Skip arguments that have already been assigned. 1946 if (State.IsPreassigned.test(I)) 1947 continue; 1948 1949 Args[I].info = classifyArgumentType(Args[I].type, State); 1950 UsedInAlloca |= (Args[I].info.getKind() == ABIArgInfo::InAlloca); 1951 } 1952 1953 // If we needed to use inalloca for any argument, do a second pass and rewrite 1954 // all the memory arguments to use inalloca. 1955 if (UsedInAlloca) 1956 rewriteWithInAlloca(FI); 1957 } 1958 1959 void 1960 X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields, 1961 CharUnits &StackOffset, ABIArgInfo &Info, 1962 QualType Type) const { 1963 // Arguments are always 4-byte-aligned. 1964 CharUnits WordSize = CharUnits::fromQuantity(4); 1965 assert(StackOffset.isMultipleOf(WordSize) && "unaligned inalloca struct"); 1966 1967 // sret pointers and indirect things will require an extra pointer 1968 // indirection, unless they are byval. Most things are byval, and will not 1969 // require this indirection. 1970 bool IsIndirect = false; 1971 if (Info.isIndirect() && !Info.getIndirectByVal()) 1972 IsIndirect = true; 1973 Info = ABIArgInfo::getInAlloca(FrameFields.size(), IsIndirect); 1974 llvm::Type *LLTy = CGT.ConvertTypeForMem(Type); 1975 if (IsIndirect) 1976 LLTy = LLTy->getPointerTo(0); 1977 FrameFields.push_back(LLTy); 1978 StackOffset += IsIndirect ? WordSize : getContext().getTypeSizeInChars(Type); 1979 1980 // Insert padding bytes to respect alignment. 1981 CharUnits FieldEnd = StackOffset; 1982 StackOffset = FieldEnd.alignTo(WordSize); 1983 if (StackOffset != FieldEnd) { 1984 CharUnits NumBytes = StackOffset - FieldEnd; 1985 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext()); 1986 Ty = llvm::ArrayType::get(Ty, NumBytes.getQuantity()); 1987 FrameFields.push_back(Ty); 1988 } 1989 } 1990 1991 static bool isArgInAlloca(const ABIArgInfo &Info) { 1992 // Leave ignored and inreg arguments alone. 1993 switch (Info.getKind()) { 1994 case ABIArgInfo::InAlloca: 1995 return true; 1996 case ABIArgInfo::Ignore: 1997 case ABIArgInfo::IndirectAliased: 1998 return false; 1999 case ABIArgInfo::Indirect: 2000 case ABIArgInfo::Direct: 2001 case ABIArgInfo::Extend: 2002 return !Info.getInReg(); 2003 case ABIArgInfo::Expand: 2004 case ABIArgInfo::CoerceAndExpand: 2005 // These are aggregate types which are never passed in registers when 2006 // inalloca is involved. 2007 return true; 2008 } 2009 llvm_unreachable("invalid enum"); 2010 } 2011 2012 void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const { 2013 assert(IsWin32StructABI && "inalloca only supported on win32"); 2014 2015 // Build a packed struct type for all of the arguments in memory. 2016 SmallVector<llvm::Type *, 6> FrameFields; 2017 2018 // The stack alignment is always 4. 2019 CharUnits StackAlign = CharUnits::fromQuantity(4); 2020 2021 CharUnits StackOffset; 2022 CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end(); 2023 2024 // Put 'this' into the struct before 'sret', if necessary. 2025 bool IsThisCall = 2026 FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall; 2027 ABIArgInfo &Ret = FI.getReturnInfo(); 2028 if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall && 2029 isArgInAlloca(I->info)) { 2030 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type); 2031 ++I; 2032 } 2033 2034 // Put the sret parameter into the inalloca struct if it's in memory. 2035 if (Ret.isIndirect() && !Ret.getInReg()) { 2036 addFieldToArgStruct(FrameFields, StackOffset, Ret, FI.getReturnType()); 2037 // On Windows, the hidden sret parameter is always returned in eax. 2038 Ret.setInAllocaSRet(IsWin32StructABI); 2039 } 2040 2041 // Skip the 'this' parameter in ecx. 2042 if (IsThisCall) 2043 ++I; 2044 2045 // Put arguments passed in memory into the struct. 2046 for (; I != E; ++I) { 2047 if (isArgInAlloca(I->info)) 2048 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type); 2049 } 2050 2051 FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields, 2052 /*isPacked=*/true), 2053 StackAlign); 2054 } 2055 2056 Address X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF, 2057 Address VAListAddr, QualType Ty) const { 2058 2059 auto TypeInfo = getContext().getTypeInfoInChars(Ty); 2060 2061 // x86-32 changes the alignment of certain arguments on the stack. 2062 // 2063 // Just messing with TypeInfo like this works because we never pass 2064 // anything indirectly. 2065 TypeInfo.second = CharUnits::fromQuantity( 2066 getTypeStackAlignInBytes(Ty, TypeInfo.second.getQuantity())); 2067 2068 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, 2069 TypeInfo, CharUnits::fromQuantity(4), 2070 /*AllowHigherAlign*/ true); 2071 } 2072 2073 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI( 2074 const llvm::Triple &Triple, const CodeGenOptions &Opts) { 2075 assert(Triple.getArch() == llvm::Triple::x86); 2076 2077 switch (Opts.getStructReturnConvention()) { 2078 case CodeGenOptions::SRCK_Default: 2079 break; 2080 case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return 2081 return false; 2082 case CodeGenOptions::SRCK_InRegs: // -freg-struct-return 2083 return true; 2084 } 2085 2086 if (Triple.isOSDarwin() || Triple.isOSIAMCU()) 2087 return true; 2088 2089 switch (Triple.getOS()) { 2090 case llvm::Triple::DragonFly: 2091 case llvm::Triple::FreeBSD: 2092 case llvm::Triple::OpenBSD: 2093 case llvm::Triple::Win32: 2094 return true; 2095 default: 2096 return false; 2097 } 2098 } 2099 2100 void X86_32TargetCodeGenInfo::setTargetAttributes( 2101 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { 2102 if (GV->isDeclaration()) 2103 return; 2104 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { 2105 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { 2106 llvm::Function *Fn = cast<llvm::Function>(GV); 2107 Fn->addFnAttr("stackrealign"); 2108 } 2109 if (FD->hasAttr<AnyX86InterruptAttr>()) { 2110 llvm::Function *Fn = cast<llvm::Function>(GV); 2111 Fn->setCallingConv(llvm::CallingConv::X86_INTR); 2112 } 2113 } 2114 } 2115 2116 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable( 2117 CodeGen::CodeGenFunction &CGF, 2118 llvm::Value *Address) const { 2119 CodeGen::CGBuilderTy &Builder = CGF.Builder; 2120 2121 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 2122 2123 // 0-7 are the eight integer registers; the order is different 2124 // on Darwin (for EH), but the range is the same. 2125 // 8 is %eip. 2126 AssignToArrayRange(Builder, Address, Four8, 0, 8); 2127 2128 if (CGF.CGM.getTarget().getTriple().isOSDarwin()) { 2129 // 12-16 are st(0..4). Not sure why we stop at 4. 2130 // These have size 16, which is sizeof(long double) on 2131 // platforms with 8-byte alignment for that type. 2132 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16); 2133 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16); 2134 2135 } else { 2136 // 9 is %eflags, which doesn't get a size on Darwin for some 2137 // reason. 2138 Builder.CreateAlignedStore( 2139 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9), 2140 CharUnits::One()); 2141 2142 // 11-16 are st(0..5). Not sure why we stop at 5. 2143 // These have size 12, which is sizeof(long double) on 2144 // platforms with 4-byte alignment for that type. 2145 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12); 2146 AssignToArrayRange(Builder, Address, Twelve8, 11, 16); 2147 } 2148 2149 return false; 2150 } 2151 2152 //===----------------------------------------------------------------------===// 2153 // X86-64 ABI Implementation 2154 //===----------------------------------------------------------------------===// 2155 2156 2157 namespace { 2158 /// The AVX ABI level for X86 targets. 2159 enum class X86AVXABILevel { 2160 None, 2161 AVX, 2162 AVX512 2163 }; 2164 2165 /// \p returns the size in bits of the largest (native) vector for \p AVXLevel. 2166 static unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) { 2167 switch (AVXLevel) { 2168 case X86AVXABILevel::AVX512: 2169 return 512; 2170 case X86AVXABILevel::AVX: 2171 return 256; 2172 case X86AVXABILevel::None: 2173 return 128; 2174 } 2175 llvm_unreachable("Unknown AVXLevel"); 2176 } 2177 2178 /// X86_64ABIInfo - The X86_64 ABI information. 2179 class X86_64ABIInfo : public SwiftABIInfo { 2180 enum Class { 2181 Integer = 0, 2182 SSE, 2183 SSEUp, 2184 X87, 2185 X87Up, 2186 ComplexX87, 2187 NoClass, 2188 Memory 2189 }; 2190 2191 /// merge - Implement the X86_64 ABI merging algorithm. 2192 /// 2193 /// Merge an accumulating classification \arg Accum with a field 2194 /// classification \arg Field. 2195 /// 2196 /// \param Accum - The accumulating classification. This should 2197 /// always be either NoClass or the result of a previous merge 2198 /// call. In addition, this should never be Memory (the caller 2199 /// should just return Memory for the aggregate). 2200 static Class merge(Class Accum, Class Field); 2201 2202 /// postMerge - Implement the X86_64 ABI post merging algorithm. 2203 /// 2204 /// Post merger cleanup, reduces a malformed Hi and Lo pair to 2205 /// final MEMORY or SSE classes when necessary. 2206 /// 2207 /// \param AggregateSize - The size of the current aggregate in 2208 /// the classification process. 2209 /// 2210 /// \param Lo - The classification for the parts of the type 2211 /// residing in the low word of the containing object. 2212 /// 2213 /// \param Hi - The classification for the parts of the type 2214 /// residing in the higher words of the containing object. 2215 /// 2216 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const; 2217 2218 /// classify - Determine the x86_64 register classes in which the 2219 /// given type T should be passed. 2220 /// 2221 /// \param Lo - The classification for the parts of the type 2222 /// residing in the low word of the containing object. 2223 /// 2224 /// \param Hi - The classification for the parts of the type 2225 /// residing in the high word of the containing object. 2226 /// 2227 /// \param OffsetBase - The bit offset of this type in the 2228 /// containing object. Some parameters are classified different 2229 /// depending on whether they straddle an eightbyte boundary. 2230 /// 2231 /// \param isNamedArg - Whether the argument in question is a "named" 2232 /// argument, as used in AMD64-ABI 3.5.7. 2233 /// 2234 /// If a word is unused its result will be NoClass; if a type should 2235 /// be passed in Memory then at least the classification of \arg Lo 2236 /// will be Memory. 2237 /// 2238 /// The \arg Lo class will be NoClass iff the argument is ignored. 2239 /// 2240 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will 2241 /// also be ComplexX87. 2242 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi, 2243 bool isNamedArg) const; 2244 2245 llvm::Type *GetByteVectorType(QualType Ty) const; 2246 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType, 2247 unsigned IROffset, QualType SourceTy, 2248 unsigned SourceOffset) const; 2249 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType, 2250 unsigned IROffset, QualType SourceTy, 2251 unsigned SourceOffset) const; 2252 2253 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 2254 /// such that the argument will be returned in memory. 2255 ABIArgInfo getIndirectReturnResult(QualType Ty) const; 2256 2257 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 2258 /// such that the argument will be passed in memory. 2259 /// 2260 /// \param freeIntRegs - The number of free integer registers remaining 2261 /// available. 2262 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const; 2263 2264 ABIArgInfo classifyReturnType(QualType RetTy) const; 2265 2266 ABIArgInfo classifyArgumentType(QualType Ty, unsigned freeIntRegs, 2267 unsigned &neededInt, unsigned &neededSSE, 2268 bool isNamedArg) const; 2269 2270 ABIArgInfo classifyRegCallStructType(QualType Ty, unsigned &NeededInt, 2271 unsigned &NeededSSE) const; 2272 2273 ABIArgInfo classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt, 2274 unsigned &NeededSSE) const; 2275 2276 bool IsIllegalVectorType(QualType Ty) const; 2277 2278 /// The 0.98 ABI revision clarified a lot of ambiguities, 2279 /// unfortunately in ways that were not always consistent with 2280 /// certain previous compilers. In particular, platforms which 2281 /// required strict binary compatibility with older versions of GCC 2282 /// may need to exempt themselves. 2283 bool honorsRevision0_98() const { 2284 return !getTarget().getTriple().isOSDarwin(); 2285 } 2286 2287 /// GCC classifies <1 x long long> as SSE but some platform ABIs choose to 2288 /// classify it as INTEGER (for compatibility with older clang compilers). 2289 bool classifyIntegerMMXAsSSE() const { 2290 // Clang <= 3.8 did not do this. 2291 if (getContext().getLangOpts().getClangABICompat() <= 2292 LangOptions::ClangABI::Ver3_8) 2293 return false; 2294 2295 const llvm::Triple &Triple = getTarget().getTriple(); 2296 if (Triple.isOSDarwin() || Triple.getOS() == llvm::Triple::PS4) 2297 return false; 2298 if (Triple.isOSFreeBSD() && Triple.getOSMajorVersion() >= 10) 2299 return false; 2300 return true; 2301 } 2302 2303 // GCC classifies vectors of __int128 as memory. 2304 bool passInt128VectorsInMem() const { 2305 // Clang <= 9.0 did not do this. 2306 if (getContext().getLangOpts().getClangABICompat() <= 2307 LangOptions::ClangABI::Ver9) 2308 return false; 2309 2310 const llvm::Triple &T = getTarget().getTriple(); 2311 return T.isOSLinux() || T.isOSNetBSD(); 2312 } 2313 2314 X86AVXABILevel AVXLevel; 2315 // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on 2316 // 64-bit hardware. 2317 bool Has64BitPointers; 2318 2319 public: 2320 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) : 2321 SwiftABIInfo(CGT), AVXLevel(AVXLevel), 2322 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) { 2323 } 2324 2325 bool isPassedUsingAVXType(QualType type) const { 2326 unsigned neededInt, neededSSE; 2327 // The freeIntRegs argument doesn't matter here. 2328 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE, 2329 /*isNamedArg*/true); 2330 if (info.isDirect()) { 2331 llvm::Type *ty = info.getCoerceToType(); 2332 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty)) 2333 return vectorTy->getPrimitiveSizeInBits().getFixedSize() > 128; 2334 } 2335 return false; 2336 } 2337 2338 void computeInfo(CGFunctionInfo &FI) const override; 2339 2340 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 2341 QualType Ty) const override; 2342 Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, 2343 QualType Ty) const override; 2344 2345 bool has64BitPointers() const { 2346 return Has64BitPointers; 2347 } 2348 2349 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, 2350 bool asReturnValue) const override { 2351 return occupiesMoreThan(CGT, scalars, /*total*/ 4); 2352 } 2353 bool isSwiftErrorInRegister() const override { 2354 return true; 2355 } 2356 }; 2357 2358 /// WinX86_64ABIInfo - The Windows X86_64 ABI information. 2359 class WinX86_64ABIInfo : public SwiftABIInfo { 2360 public: 2361 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) 2362 : SwiftABIInfo(CGT), AVXLevel(AVXLevel), 2363 IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {} 2364 2365 void computeInfo(CGFunctionInfo &FI) const override; 2366 2367 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 2368 QualType Ty) const override; 2369 2370 bool isHomogeneousAggregateBaseType(QualType Ty) const override { 2371 // FIXME: Assumes vectorcall is in use. 2372 return isX86VectorTypeForVectorCall(getContext(), Ty); 2373 } 2374 2375 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 2376 uint64_t NumMembers) const override { 2377 // FIXME: Assumes vectorcall is in use. 2378 return isX86VectorCallAggregateSmallEnough(NumMembers); 2379 } 2380 2381 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type *> scalars, 2382 bool asReturnValue) const override { 2383 return occupiesMoreThan(CGT, scalars, /*total*/ 4); 2384 } 2385 2386 bool isSwiftErrorInRegister() const override { 2387 return true; 2388 } 2389 2390 private: 2391 ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs, bool IsReturnType, 2392 bool IsVectorCall, bool IsRegCall) const; 2393 ABIArgInfo reclassifyHvaArgType(QualType Ty, unsigned &FreeSSERegs, 2394 const ABIArgInfo ¤t) const; 2395 void computeVectorCallArgs(CGFunctionInfo &FI, unsigned FreeSSERegs, 2396 bool IsVectorCall, bool IsRegCall) const; 2397 2398 X86AVXABILevel AVXLevel; 2399 2400 bool IsMingw64; 2401 }; 2402 2403 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo { 2404 public: 2405 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) 2406 : TargetCodeGenInfo(std::make_unique<X86_64ABIInfo>(CGT, AVXLevel)) {} 2407 2408 const X86_64ABIInfo &getABIInfo() const { 2409 return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo()); 2410 } 2411 2412 /// Disable tail call on x86-64. The epilogue code before the tail jump blocks 2413 /// autoreleaseRV/retainRV and autoreleaseRV/unsafeClaimRV optimizations. 2414 bool markARCOptimizedReturnCallsAsNoTail() const override { return true; } 2415 2416 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { 2417 return 7; 2418 } 2419 2420 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2421 llvm::Value *Address) const override { 2422 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 2423 2424 // 0-15 are the 16 integer registers. 2425 // 16 is %rip. 2426 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 2427 return false; 2428 } 2429 2430 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 2431 StringRef Constraint, 2432 llvm::Type* Ty) const override { 2433 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 2434 } 2435 2436 bool isNoProtoCallVariadic(const CallArgList &args, 2437 const FunctionNoProtoType *fnType) const override { 2438 // The default CC on x86-64 sets %al to the number of SSA 2439 // registers used, and GCC sets this when calling an unprototyped 2440 // function, so we override the default behavior. However, don't do 2441 // that when AVX types are involved: the ABI explicitly states it is 2442 // undefined, and it doesn't work in practice because of how the ABI 2443 // defines varargs anyway. 2444 if (fnType->getCallConv() == CC_C) { 2445 bool HasAVXType = false; 2446 for (CallArgList::const_iterator 2447 it = args.begin(), ie = args.end(); it != ie; ++it) { 2448 if (getABIInfo().isPassedUsingAVXType(it->Ty)) { 2449 HasAVXType = true; 2450 break; 2451 } 2452 } 2453 2454 if (!HasAVXType) 2455 return true; 2456 } 2457 2458 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType); 2459 } 2460 2461 llvm::Constant * 2462 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override { 2463 unsigned Sig = (0xeb << 0) | // jmp rel8 2464 (0x06 << 8) | // .+0x08 2465 ('v' << 16) | 2466 ('2' << 24); 2467 return llvm::ConstantInt::get(CGM.Int32Ty, Sig); 2468 } 2469 2470 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 2471 CodeGen::CodeGenModule &CGM) const override { 2472 if (GV->isDeclaration()) 2473 return; 2474 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { 2475 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { 2476 llvm::Function *Fn = cast<llvm::Function>(GV); 2477 Fn->addFnAttr("stackrealign"); 2478 } 2479 if (FD->hasAttr<AnyX86InterruptAttr>()) { 2480 llvm::Function *Fn = cast<llvm::Function>(GV); 2481 Fn->setCallingConv(llvm::CallingConv::X86_INTR); 2482 } 2483 } 2484 } 2485 2486 void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc, 2487 const FunctionDecl *Caller, 2488 const FunctionDecl *Callee, 2489 const CallArgList &Args) const override; 2490 }; 2491 2492 static void initFeatureMaps(const ASTContext &Ctx, 2493 llvm::StringMap<bool> &CallerMap, 2494 const FunctionDecl *Caller, 2495 llvm::StringMap<bool> &CalleeMap, 2496 const FunctionDecl *Callee) { 2497 if (CalleeMap.empty() && CallerMap.empty()) { 2498 // The caller is potentially nullptr in the case where the call isn't in a 2499 // function. In this case, the getFunctionFeatureMap ensures we just get 2500 // the TU level setting (since it cannot be modified by 'target'.. 2501 Ctx.getFunctionFeatureMap(CallerMap, Caller); 2502 Ctx.getFunctionFeatureMap(CalleeMap, Callee); 2503 } 2504 } 2505 2506 static bool checkAVXParamFeature(DiagnosticsEngine &Diag, 2507 SourceLocation CallLoc, 2508 const llvm::StringMap<bool> &CallerMap, 2509 const llvm::StringMap<bool> &CalleeMap, 2510 QualType Ty, StringRef Feature, 2511 bool IsArgument) { 2512 bool CallerHasFeat = CallerMap.lookup(Feature); 2513 bool CalleeHasFeat = CalleeMap.lookup(Feature); 2514 if (!CallerHasFeat && !CalleeHasFeat) 2515 return Diag.Report(CallLoc, diag::warn_avx_calling_convention) 2516 << IsArgument << Ty << Feature; 2517 2518 // Mixing calling conventions here is very clearly an error. 2519 if (!CallerHasFeat || !CalleeHasFeat) 2520 return Diag.Report(CallLoc, diag::err_avx_calling_convention) 2521 << IsArgument << Ty << Feature; 2522 2523 // Else, both caller and callee have the required feature, so there is no need 2524 // to diagnose. 2525 return false; 2526 } 2527 2528 static bool checkAVXParam(DiagnosticsEngine &Diag, ASTContext &Ctx, 2529 SourceLocation CallLoc, 2530 const llvm::StringMap<bool> &CallerMap, 2531 const llvm::StringMap<bool> &CalleeMap, QualType Ty, 2532 bool IsArgument) { 2533 uint64_t Size = Ctx.getTypeSize(Ty); 2534 if (Size > 256) 2535 return checkAVXParamFeature(Diag, CallLoc, CallerMap, CalleeMap, Ty, 2536 "avx512f", IsArgument); 2537 2538 if (Size > 128) 2539 return checkAVXParamFeature(Diag, CallLoc, CallerMap, CalleeMap, Ty, "avx", 2540 IsArgument); 2541 2542 return false; 2543 } 2544 2545 void X86_64TargetCodeGenInfo::checkFunctionCallABI( 2546 CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller, 2547 const FunctionDecl *Callee, const CallArgList &Args) const { 2548 llvm::StringMap<bool> CallerMap; 2549 llvm::StringMap<bool> CalleeMap; 2550 unsigned ArgIndex = 0; 2551 2552 // We need to loop through the actual call arguments rather than the the 2553 // function's parameters, in case this variadic. 2554 for (const CallArg &Arg : Args) { 2555 // The "avx" feature changes how vectors >128 in size are passed. "avx512f" 2556 // additionally changes how vectors >256 in size are passed. Like GCC, we 2557 // warn when a function is called with an argument where this will change. 2558 // Unlike GCC, we also error when it is an obvious ABI mismatch, that is, 2559 // the caller and callee features are mismatched. 2560 // Unfortunately, we cannot do this diagnostic in SEMA, since the callee can 2561 // change its ABI with attribute-target after this call. 2562 if (Arg.getType()->isVectorType() && 2563 CGM.getContext().getTypeSize(Arg.getType()) > 128) { 2564 initFeatureMaps(CGM.getContext(), CallerMap, Caller, CalleeMap, Callee); 2565 QualType Ty = Arg.getType(); 2566 // The CallArg seems to have desugared the type already, so for clearer 2567 // diagnostics, replace it with the type in the FunctionDecl if possible. 2568 if (ArgIndex < Callee->getNumParams()) 2569 Ty = Callee->getParamDecl(ArgIndex)->getType(); 2570 2571 if (checkAVXParam(CGM.getDiags(), CGM.getContext(), CallLoc, CallerMap, 2572 CalleeMap, Ty, /*IsArgument*/ true)) 2573 return; 2574 } 2575 ++ArgIndex; 2576 } 2577 2578 // Check return always, as we don't have a good way of knowing in codegen 2579 // whether this value is used, tail-called, etc. 2580 if (Callee->getReturnType()->isVectorType() && 2581 CGM.getContext().getTypeSize(Callee->getReturnType()) > 128) { 2582 initFeatureMaps(CGM.getContext(), CallerMap, Caller, CalleeMap, Callee); 2583 checkAVXParam(CGM.getDiags(), CGM.getContext(), CallLoc, CallerMap, 2584 CalleeMap, Callee->getReturnType(), 2585 /*IsArgument*/ false); 2586 } 2587 } 2588 2589 static std::string qualifyWindowsLibrary(llvm::StringRef Lib) { 2590 // If the argument does not end in .lib, automatically add the suffix. 2591 // If the argument contains a space, enclose it in quotes. 2592 // This matches the behavior of MSVC. 2593 bool Quote = (Lib.find(" ") != StringRef::npos); 2594 std::string ArgStr = Quote ? "\"" : ""; 2595 ArgStr += Lib; 2596 if (!Lib.endswith_lower(".lib") && !Lib.endswith_lower(".a")) 2597 ArgStr += ".lib"; 2598 ArgStr += Quote ? "\"" : ""; 2599 return ArgStr; 2600 } 2601 2602 class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo { 2603 public: 2604 WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, 2605 bool DarwinVectorABI, bool RetSmallStructInRegABI, bool Win32StructABI, 2606 unsigned NumRegisterParameters) 2607 : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI, 2608 Win32StructABI, NumRegisterParameters, false) {} 2609 2610 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 2611 CodeGen::CodeGenModule &CGM) const override; 2612 2613 void getDependentLibraryOption(llvm::StringRef Lib, 2614 llvm::SmallString<24> &Opt) const override { 2615 Opt = "/DEFAULTLIB:"; 2616 Opt += qualifyWindowsLibrary(Lib); 2617 } 2618 2619 void getDetectMismatchOption(llvm::StringRef Name, 2620 llvm::StringRef Value, 2621 llvm::SmallString<32> &Opt) const override { 2622 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; 2623 } 2624 }; 2625 2626 static void addStackProbeTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 2627 CodeGen::CodeGenModule &CGM) { 2628 if (llvm::Function *Fn = dyn_cast_or_null<llvm::Function>(GV)) { 2629 2630 if (CGM.getCodeGenOpts().StackProbeSize != 4096) 2631 Fn->addFnAttr("stack-probe-size", 2632 llvm::utostr(CGM.getCodeGenOpts().StackProbeSize)); 2633 if (CGM.getCodeGenOpts().NoStackArgProbe) 2634 Fn->addFnAttr("no-stack-arg-probe"); 2635 } 2636 } 2637 2638 void WinX86_32TargetCodeGenInfo::setTargetAttributes( 2639 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { 2640 X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM); 2641 if (GV->isDeclaration()) 2642 return; 2643 addStackProbeTargetAttributes(D, GV, CGM); 2644 } 2645 2646 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo { 2647 public: 2648 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, 2649 X86AVXABILevel AVXLevel) 2650 : TargetCodeGenInfo(std::make_unique<WinX86_64ABIInfo>(CGT, AVXLevel)) {} 2651 2652 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 2653 CodeGen::CodeGenModule &CGM) const override; 2654 2655 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { 2656 return 7; 2657 } 2658 2659 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2660 llvm::Value *Address) const override { 2661 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 2662 2663 // 0-15 are the 16 integer registers. 2664 // 16 is %rip. 2665 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 2666 return false; 2667 } 2668 2669 void getDependentLibraryOption(llvm::StringRef Lib, 2670 llvm::SmallString<24> &Opt) const override { 2671 Opt = "/DEFAULTLIB:"; 2672 Opt += qualifyWindowsLibrary(Lib); 2673 } 2674 2675 void getDetectMismatchOption(llvm::StringRef Name, 2676 llvm::StringRef Value, 2677 llvm::SmallString<32> &Opt) const override { 2678 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; 2679 } 2680 }; 2681 2682 void WinX86_64TargetCodeGenInfo::setTargetAttributes( 2683 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { 2684 TargetCodeGenInfo::setTargetAttributes(D, GV, CGM); 2685 if (GV->isDeclaration()) 2686 return; 2687 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { 2688 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { 2689 llvm::Function *Fn = cast<llvm::Function>(GV); 2690 Fn->addFnAttr("stackrealign"); 2691 } 2692 if (FD->hasAttr<AnyX86InterruptAttr>()) { 2693 llvm::Function *Fn = cast<llvm::Function>(GV); 2694 Fn->setCallingConv(llvm::CallingConv::X86_INTR); 2695 } 2696 } 2697 2698 addStackProbeTargetAttributes(D, GV, CGM); 2699 } 2700 } 2701 2702 void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo, 2703 Class &Hi) const { 2704 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done: 2705 // 2706 // (a) If one of the classes is Memory, the whole argument is passed in 2707 // memory. 2708 // 2709 // (b) If X87UP is not preceded by X87, the whole argument is passed in 2710 // memory. 2711 // 2712 // (c) If the size of the aggregate exceeds two eightbytes and the first 2713 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole 2714 // argument is passed in memory. NOTE: This is necessary to keep the 2715 // ABI working for processors that don't support the __m256 type. 2716 // 2717 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE. 2718 // 2719 // Some of these are enforced by the merging logic. Others can arise 2720 // only with unions; for example: 2721 // union { _Complex double; unsigned; } 2722 // 2723 // Note that clauses (b) and (c) were added in 0.98. 2724 // 2725 if (Hi == Memory) 2726 Lo = Memory; 2727 if (Hi == X87Up && Lo != X87 && honorsRevision0_98()) 2728 Lo = Memory; 2729 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp)) 2730 Lo = Memory; 2731 if (Hi == SSEUp && Lo != SSE) 2732 Hi = SSE; 2733 } 2734 2735 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) { 2736 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is 2737 // classified recursively so that always two fields are 2738 // considered. The resulting class is calculated according to 2739 // the classes of the fields in the eightbyte: 2740 // 2741 // (a) If both classes are equal, this is the resulting class. 2742 // 2743 // (b) If one of the classes is NO_CLASS, the resulting class is 2744 // the other class. 2745 // 2746 // (c) If one of the classes is MEMORY, the result is the MEMORY 2747 // class. 2748 // 2749 // (d) If one of the classes is INTEGER, the result is the 2750 // INTEGER. 2751 // 2752 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class, 2753 // MEMORY is used as class. 2754 // 2755 // (f) Otherwise class SSE is used. 2756 2757 // Accum should never be memory (we should have returned) or 2758 // ComplexX87 (because this cannot be passed in a structure). 2759 assert((Accum != Memory && Accum != ComplexX87) && 2760 "Invalid accumulated classification during merge."); 2761 if (Accum == Field || Field == NoClass) 2762 return Accum; 2763 if (Field == Memory) 2764 return Memory; 2765 if (Accum == NoClass) 2766 return Field; 2767 if (Accum == Integer || Field == Integer) 2768 return Integer; 2769 if (Field == X87 || Field == X87Up || Field == ComplexX87 || 2770 Accum == X87 || Accum == X87Up) 2771 return Memory; 2772 return SSE; 2773 } 2774 2775 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, 2776 Class &Lo, Class &Hi, bool isNamedArg) const { 2777 // FIXME: This code can be simplified by introducing a simple value class for 2778 // Class pairs with appropriate constructor methods for the various 2779 // situations. 2780 2781 // FIXME: Some of the split computations are wrong; unaligned vectors 2782 // shouldn't be passed in registers for example, so there is no chance they 2783 // can straddle an eightbyte. Verify & simplify. 2784 2785 Lo = Hi = NoClass; 2786 2787 Class &Current = OffsetBase < 64 ? Lo : Hi; 2788 Current = Memory; 2789 2790 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 2791 BuiltinType::Kind k = BT->getKind(); 2792 2793 if (k == BuiltinType::Void) { 2794 Current = NoClass; 2795 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) { 2796 Lo = Integer; 2797 Hi = Integer; 2798 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) { 2799 Current = Integer; 2800 } else if (k == BuiltinType::Float || k == BuiltinType::Double) { 2801 Current = SSE; 2802 } else if (k == BuiltinType::LongDouble) { 2803 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat(); 2804 if (LDF == &llvm::APFloat::IEEEquad()) { 2805 Lo = SSE; 2806 Hi = SSEUp; 2807 } else if (LDF == &llvm::APFloat::x87DoubleExtended()) { 2808 Lo = X87; 2809 Hi = X87Up; 2810 } else if (LDF == &llvm::APFloat::IEEEdouble()) { 2811 Current = SSE; 2812 } else 2813 llvm_unreachable("unexpected long double representation!"); 2814 } 2815 // FIXME: _Decimal32 and _Decimal64 are SSE. 2816 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). 2817 return; 2818 } 2819 2820 if (const EnumType *ET = Ty->getAs<EnumType>()) { 2821 // Classify the underlying integer type. 2822 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg); 2823 return; 2824 } 2825 2826 if (Ty->hasPointerRepresentation()) { 2827 Current = Integer; 2828 return; 2829 } 2830 2831 if (Ty->isMemberPointerType()) { 2832 if (Ty->isMemberFunctionPointerType()) { 2833 if (Has64BitPointers) { 2834 // If Has64BitPointers, this is an {i64, i64}, so classify both 2835 // Lo and Hi now. 2836 Lo = Hi = Integer; 2837 } else { 2838 // Otherwise, with 32-bit pointers, this is an {i32, i32}. If that 2839 // straddles an eightbyte boundary, Hi should be classified as well. 2840 uint64_t EB_FuncPtr = (OffsetBase) / 64; 2841 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64; 2842 if (EB_FuncPtr != EB_ThisAdj) { 2843 Lo = Hi = Integer; 2844 } else { 2845 Current = Integer; 2846 } 2847 } 2848 } else { 2849 Current = Integer; 2850 } 2851 return; 2852 } 2853 2854 if (const VectorType *VT = Ty->getAs<VectorType>()) { 2855 uint64_t Size = getContext().getTypeSize(VT); 2856 if (Size == 1 || Size == 8 || Size == 16 || Size == 32) { 2857 // gcc passes the following as integer: 2858 // 4 bytes - <4 x char>, <2 x short>, <1 x int>, <1 x float> 2859 // 2 bytes - <2 x char>, <1 x short> 2860 // 1 byte - <1 x char> 2861 Current = Integer; 2862 2863 // If this type crosses an eightbyte boundary, it should be 2864 // split. 2865 uint64_t EB_Lo = (OffsetBase) / 64; 2866 uint64_t EB_Hi = (OffsetBase + Size - 1) / 64; 2867 if (EB_Lo != EB_Hi) 2868 Hi = Lo; 2869 } else if (Size == 64) { 2870 QualType ElementType = VT->getElementType(); 2871 2872 // gcc passes <1 x double> in memory. :( 2873 if (ElementType->isSpecificBuiltinType(BuiltinType::Double)) 2874 return; 2875 2876 // gcc passes <1 x long long> as SSE but clang used to unconditionally 2877 // pass them as integer. For platforms where clang is the de facto 2878 // platform compiler, we must continue to use integer. 2879 if (!classifyIntegerMMXAsSSE() && 2880 (ElementType->isSpecificBuiltinType(BuiltinType::LongLong) || 2881 ElementType->isSpecificBuiltinType(BuiltinType::ULongLong) || 2882 ElementType->isSpecificBuiltinType(BuiltinType::Long) || 2883 ElementType->isSpecificBuiltinType(BuiltinType::ULong))) 2884 Current = Integer; 2885 else 2886 Current = SSE; 2887 2888 // If this type crosses an eightbyte boundary, it should be 2889 // split. 2890 if (OffsetBase && OffsetBase != 64) 2891 Hi = Lo; 2892 } else if (Size == 128 || 2893 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) { 2894 QualType ElementType = VT->getElementType(); 2895 2896 // gcc passes 256 and 512 bit <X x __int128> vectors in memory. :( 2897 if (passInt128VectorsInMem() && Size != 128 && 2898 (ElementType->isSpecificBuiltinType(BuiltinType::Int128) || 2899 ElementType->isSpecificBuiltinType(BuiltinType::UInt128))) 2900 return; 2901 2902 // Arguments of 256-bits are split into four eightbyte chunks. The 2903 // least significant one belongs to class SSE and all the others to class 2904 // SSEUP. The original Lo and Hi design considers that types can't be 2905 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense. 2906 // This design isn't correct for 256-bits, but since there're no cases 2907 // where the upper parts would need to be inspected, avoid adding 2908 // complexity and just consider Hi to match the 64-256 part. 2909 // 2910 // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in 2911 // registers if they are "named", i.e. not part of the "..." of a 2912 // variadic function. 2913 // 2914 // Similarly, per 3.2.3. of the AVX512 draft, 512-bits ("named") args are 2915 // split into eight eightbyte chunks, one SSE and seven SSEUP. 2916 Lo = SSE; 2917 Hi = SSEUp; 2918 } 2919 return; 2920 } 2921 2922 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 2923 QualType ET = getContext().getCanonicalType(CT->getElementType()); 2924 2925 uint64_t Size = getContext().getTypeSize(Ty); 2926 if (ET->isIntegralOrEnumerationType()) { 2927 if (Size <= 64) 2928 Current = Integer; 2929 else if (Size <= 128) 2930 Lo = Hi = Integer; 2931 } else if (ET == getContext().FloatTy) { 2932 Current = SSE; 2933 } else if (ET == getContext().DoubleTy) { 2934 Lo = Hi = SSE; 2935 } else if (ET == getContext().LongDoubleTy) { 2936 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat(); 2937 if (LDF == &llvm::APFloat::IEEEquad()) 2938 Current = Memory; 2939 else if (LDF == &llvm::APFloat::x87DoubleExtended()) 2940 Current = ComplexX87; 2941 else if (LDF == &llvm::APFloat::IEEEdouble()) 2942 Lo = Hi = SSE; 2943 else 2944 llvm_unreachable("unexpected long double representation!"); 2945 } 2946 2947 // If this complex type crosses an eightbyte boundary then it 2948 // should be split. 2949 uint64_t EB_Real = (OffsetBase) / 64; 2950 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64; 2951 if (Hi == NoClass && EB_Real != EB_Imag) 2952 Hi = Lo; 2953 2954 return; 2955 } 2956 2957 if (const auto *EITy = Ty->getAs<ExtIntType>()) { 2958 if (EITy->getNumBits() <= 64) 2959 Current = Integer; 2960 else if (EITy->getNumBits() <= 128) 2961 Lo = Hi = Integer; 2962 // Larger values need to get passed in memory. 2963 return; 2964 } 2965 2966 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 2967 // Arrays are treated like structures. 2968 2969 uint64_t Size = getContext().getTypeSize(Ty); 2970 2971 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 2972 // than eight eightbytes, ..., it has class MEMORY. 2973 if (Size > 512) 2974 return; 2975 2976 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned 2977 // fields, it has class MEMORY. 2978 // 2979 // Only need to check alignment of array base. 2980 if (OffsetBase % getContext().getTypeAlign(AT->getElementType())) 2981 return; 2982 2983 // Otherwise implement simplified merge. We could be smarter about 2984 // this, but it isn't worth it and would be harder to verify. 2985 Current = NoClass; 2986 uint64_t EltSize = getContext().getTypeSize(AT->getElementType()); 2987 uint64_t ArraySize = AT->getSize().getZExtValue(); 2988 2989 // The only case a 256-bit wide vector could be used is when the array 2990 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 2991 // to work for sizes wider than 128, early check and fallback to memory. 2992 // 2993 if (Size > 128 && 2994 (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel))) 2995 return; 2996 2997 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) { 2998 Class FieldLo, FieldHi; 2999 classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg); 3000 Lo = merge(Lo, FieldLo); 3001 Hi = merge(Hi, FieldHi); 3002 if (Lo == Memory || Hi == Memory) 3003 break; 3004 } 3005 3006 postMerge(Size, Lo, Hi); 3007 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification."); 3008 return; 3009 } 3010 3011 if (const RecordType *RT = Ty->getAs<RecordType>()) { 3012 uint64_t Size = getContext().getTypeSize(Ty); 3013 3014 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 3015 // than eight eightbytes, ..., it has class MEMORY. 3016 if (Size > 512) 3017 return; 3018 3019 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial 3020 // copy constructor or a non-trivial destructor, it is passed by invisible 3021 // reference. 3022 if (getRecordArgABI(RT, getCXXABI())) 3023 return; 3024 3025 const RecordDecl *RD = RT->getDecl(); 3026 3027 // Assume variable sized types are passed in memory. 3028 if (RD->hasFlexibleArrayMember()) 3029 return; 3030 3031 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 3032 3033 // Reset Lo class, this will be recomputed. 3034 Current = NoClass; 3035 3036 // If this is a C++ record, classify the bases first. 3037 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 3038 for (const auto &I : CXXRD->bases()) { 3039 assert(!I.isVirtual() && !I.getType()->isDependentType() && 3040 "Unexpected base class!"); 3041 const auto *Base = 3042 cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl()); 3043 3044 // Classify this field. 3045 // 3046 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a 3047 // single eightbyte, each is classified separately. Each eightbyte gets 3048 // initialized to class NO_CLASS. 3049 Class FieldLo, FieldHi; 3050 uint64_t Offset = 3051 OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base)); 3052 classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg); 3053 Lo = merge(Lo, FieldLo); 3054 Hi = merge(Hi, FieldHi); 3055 if (Lo == Memory || Hi == Memory) { 3056 postMerge(Size, Lo, Hi); 3057 return; 3058 } 3059 } 3060 } 3061 3062 // Classify the fields one at a time, merging the results. 3063 unsigned idx = 0; 3064 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 3065 i != e; ++i, ++idx) { 3066 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 3067 bool BitField = i->isBitField(); 3068 3069 // Ignore padding bit-fields. 3070 if (BitField && i->isUnnamedBitfield()) 3071 continue; 3072 3073 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than 3074 // four eightbytes, or it contains unaligned fields, it has class MEMORY. 3075 // 3076 // The only case a 256-bit wide vector could be used is when the struct 3077 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 3078 // to work for sizes wider than 128, early check and fallback to memory. 3079 // 3080 if (Size > 128 && (Size != getContext().getTypeSize(i->getType()) || 3081 Size > getNativeVectorSizeForAVXABI(AVXLevel))) { 3082 Lo = Memory; 3083 postMerge(Size, Lo, Hi); 3084 return; 3085 } 3086 // Note, skip this test for bit-fields, see below. 3087 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) { 3088 Lo = Memory; 3089 postMerge(Size, Lo, Hi); 3090 return; 3091 } 3092 3093 // Classify this field. 3094 // 3095 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate 3096 // exceeds a single eightbyte, each is classified 3097 // separately. Each eightbyte gets initialized to class 3098 // NO_CLASS. 3099 Class FieldLo, FieldHi; 3100 3101 // Bit-fields require special handling, they do not force the 3102 // structure to be passed in memory even if unaligned, and 3103 // therefore they can straddle an eightbyte. 3104 if (BitField) { 3105 assert(!i->isUnnamedBitfield()); 3106 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 3107 uint64_t Size = i->getBitWidthValue(getContext()); 3108 3109 uint64_t EB_Lo = Offset / 64; 3110 uint64_t EB_Hi = (Offset + Size - 1) / 64; 3111 3112 if (EB_Lo) { 3113 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes."); 3114 FieldLo = NoClass; 3115 FieldHi = Integer; 3116 } else { 3117 FieldLo = Integer; 3118 FieldHi = EB_Hi ? Integer : NoClass; 3119 } 3120 } else 3121 classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg); 3122 Lo = merge(Lo, FieldLo); 3123 Hi = merge(Hi, FieldHi); 3124 if (Lo == Memory || Hi == Memory) 3125 break; 3126 } 3127 3128 postMerge(Size, Lo, Hi); 3129 } 3130 } 3131 3132 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const { 3133 // If this is a scalar LLVM value then assume LLVM will pass it in the right 3134 // place naturally. 3135 if (!isAggregateTypeForABI(Ty)) { 3136 // Treat an enum type as its underlying type. 3137 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3138 Ty = EnumTy->getDecl()->getIntegerType(); 3139 3140 if (Ty->isExtIntType()) 3141 return getNaturalAlignIndirect(Ty); 3142 3143 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) 3144 : ABIArgInfo::getDirect()); 3145 } 3146 3147 return getNaturalAlignIndirect(Ty); 3148 } 3149 3150 bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const { 3151 if (const VectorType *VecTy = Ty->getAs<VectorType>()) { 3152 uint64_t Size = getContext().getTypeSize(VecTy); 3153 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel); 3154 if (Size <= 64 || Size > LargestVector) 3155 return true; 3156 QualType EltTy = VecTy->getElementType(); 3157 if (passInt128VectorsInMem() && 3158 (EltTy->isSpecificBuiltinType(BuiltinType::Int128) || 3159 EltTy->isSpecificBuiltinType(BuiltinType::UInt128))) 3160 return true; 3161 } 3162 3163 return false; 3164 } 3165 3166 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty, 3167 unsigned freeIntRegs) const { 3168 // If this is a scalar LLVM value then assume LLVM will pass it in the right 3169 // place naturally. 3170 // 3171 // This assumption is optimistic, as there could be free registers available 3172 // when we need to pass this argument in memory, and LLVM could try to pass 3173 // the argument in the free register. This does not seem to happen currently, 3174 // but this code would be much safer if we could mark the argument with 3175 // 'onstack'. See PR12193. 3176 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty) && 3177 !Ty->isExtIntType()) { 3178 // Treat an enum type as its underlying type. 3179 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3180 Ty = EnumTy->getDecl()->getIntegerType(); 3181 3182 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) 3183 : ABIArgInfo::getDirect()); 3184 } 3185 3186 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 3187 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 3188 3189 // Compute the byval alignment. We specify the alignment of the byval in all 3190 // cases so that the mid-level optimizer knows the alignment of the byval. 3191 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U); 3192 3193 // Attempt to avoid passing indirect results using byval when possible. This 3194 // is important for good codegen. 3195 // 3196 // We do this by coercing the value into a scalar type which the backend can 3197 // handle naturally (i.e., without using byval). 3198 // 3199 // For simplicity, we currently only do this when we have exhausted all of the 3200 // free integer registers. Doing this when there are free integer registers 3201 // would require more care, as we would have to ensure that the coerced value 3202 // did not claim the unused register. That would require either reording the 3203 // arguments to the function (so that any subsequent inreg values came first), 3204 // or only doing this optimization when there were no following arguments that 3205 // might be inreg. 3206 // 3207 // We currently expect it to be rare (particularly in well written code) for 3208 // arguments to be passed on the stack when there are still free integer 3209 // registers available (this would typically imply large structs being passed 3210 // by value), so this seems like a fair tradeoff for now. 3211 // 3212 // We can revisit this if the backend grows support for 'onstack' parameter 3213 // attributes. See PR12193. 3214 if (freeIntRegs == 0) { 3215 uint64_t Size = getContext().getTypeSize(Ty); 3216 3217 // If this type fits in an eightbyte, coerce it into the matching integral 3218 // type, which will end up on the stack (with alignment 8). 3219 if (Align == 8 && Size <= 64) 3220 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 3221 Size)); 3222 } 3223 3224 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(Align)); 3225 } 3226 3227 /// The ABI specifies that a value should be passed in a full vector XMM/YMM 3228 /// register. Pick an LLVM IR type that will be passed as a vector register. 3229 llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const { 3230 // Wrapper structs/arrays that only contain vectors are passed just like 3231 // vectors; strip them off if present. 3232 if (const Type *InnerTy = isSingleElementStruct(Ty, getContext())) 3233 Ty = QualType(InnerTy, 0); 3234 3235 llvm::Type *IRType = CGT.ConvertType(Ty); 3236 if (isa<llvm::VectorType>(IRType)) { 3237 // Don't pass vXi128 vectors in their native type, the backend can't 3238 // legalize them. 3239 if (passInt128VectorsInMem() && 3240 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy(128)) { 3241 // Use a vXi64 vector. 3242 uint64_t Size = getContext().getTypeSize(Ty); 3243 return llvm::FixedVectorType::get(llvm::Type::getInt64Ty(getVMContext()), 3244 Size / 64); 3245 } 3246 3247 return IRType; 3248 } 3249 3250 if (IRType->getTypeID() == llvm::Type::FP128TyID) 3251 return IRType; 3252 3253 // We couldn't find the preferred IR vector type for 'Ty'. 3254 uint64_t Size = getContext().getTypeSize(Ty); 3255 assert((Size == 128 || Size == 256 || Size == 512) && "Invalid type found!"); 3256 3257 3258 // Return a LLVM IR vector type based on the size of 'Ty'. 3259 return llvm::FixedVectorType::get(llvm::Type::getDoubleTy(getVMContext()), 3260 Size / 64); 3261 } 3262 3263 /// BitsContainNoUserData - Return true if the specified [start,end) bit range 3264 /// is known to either be off the end of the specified type or being in 3265 /// alignment padding. The user type specified is known to be at most 128 bits 3266 /// in size, and have passed through X86_64ABIInfo::classify with a successful 3267 /// classification that put one of the two halves in the INTEGER class. 3268 /// 3269 /// It is conservatively correct to return false. 3270 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, 3271 unsigned EndBit, ASTContext &Context) { 3272 // If the bytes being queried are off the end of the type, there is no user 3273 // data hiding here. This handles analysis of builtins, vectors and other 3274 // types that don't contain interesting padding. 3275 unsigned TySize = (unsigned)Context.getTypeSize(Ty); 3276 if (TySize <= StartBit) 3277 return true; 3278 3279 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 3280 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType()); 3281 unsigned NumElts = (unsigned)AT->getSize().getZExtValue(); 3282 3283 // Check each element to see if the element overlaps with the queried range. 3284 for (unsigned i = 0; i != NumElts; ++i) { 3285 // If the element is after the span we care about, then we're done.. 3286 unsigned EltOffset = i*EltSize; 3287 if (EltOffset >= EndBit) break; 3288 3289 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0; 3290 if (!BitsContainNoUserData(AT->getElementType(), EltStart, 3291 EndBit-EltOffset, Context)) 3292 return false; 3293 } 3294 // If it overlaps no elements, then it is safe to process as padding. 3295 return true; 3296 } 3297 3298 if (const RecordType *RT = Ty->getAs<RecordType>()) { 3299 const RecordDecl *RD = RT->getDecl(); 3300 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 3301 3302 // If this is a C++ record, check the bases first. 3303 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 3304 for (const auto &I : CXXRD->bases()) { 3305 assert(!I.isVirtual() && !I.getType()->isDependentType() && 3306 "Unexpected base class!"); 3307 const auto *Base = 3308 cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl()); 3309 3310 // If the base is after the span we care about, ignore it. 3311 unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base)); 3312 if (BaseOffset >= EndBit) continue; 3313 3314 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0; 3315 if (!BitsContainNoUserData(I.getType(), BaseStart, 3316 EndBit-BaseOffset, Context)) 3317 return false; 3318 } 3319 } 3320 3321 // Verify that no field has data that overlaps the region of interest. Yes 3322 // this could be sped up a lot by being smarter about queried fields, 3323 // however we're only looking at structs up to 16 bytes, so we don't care 3324 // much. 3325 unsigned idx = 0; 3326 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 3327 i != e; ++i, ++idx) { 3328 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx); 3329 3330 // If we found a field after the region we care about, then we're done. 3331 if (FieldOffset >= EndBit) break; 3332 3333 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0; 3334 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset, 3335 Context)) 3336 return false; 3337 } 3338 3339 // If nothing in this record overlapped the area of interest, then we're 3340 // clean. 3341 return true; 3342 } 3343 3344 return false; 3345 } 3346 3347 /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a 3348 /// float member at the specified offset. For example, {int,{float}} has a 3349 /// float at offset 4. It is conservatively correct for this routine to return 3350 /// false. 3351 static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset, 3352 const llvm::DataLayout &TD) { 3353 // Base case if we find a float. 3354 if (IROffset == 0 && IRType->isFloatTy()) 3355 return true; 3356 3357 // If this is a struct, recurse into the field at the specified offset. 3358 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 3359 const llvm::StructLayout *SL = TD.getStructLayout(STy); 3360 unsigned Elt = SL->getElementContainingOffset(IROffset); 3361 IROffset -= SL->getElementOffset(Elt); 3362 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD); 3363 } 3364 3365 // If this is an array, recurse into the field at the specified offset. 3366 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 3367 llvm::Type *EltTy = ATy->getElementType(); 3368 unsigned EltSize = TD.getTypeAllocSize(EltTy); 3369 IROffset -= IROffset/EltSize*EltSize; 3370 return ContainsFloatAtOffset(EltTy, IROffset, TD); 3371 } 3372 3373 return false; 3374 } 3375 3376 3377 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the 3378 /// low 8 bytes of an XMM register, corresponding to the SSE class. 3379 llvm::Type *X86_64ABIInfo:: 3380 GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset, 3381 QualType SourceTy, unsigned SourceOffset) const { 3382 // The only three choices we have are either double, <2 x float>, or float. We 3383 // pass as float if the last 4 bytes is just padding. This happens for 3384 // structs that contain 3 floats. 3385 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32, 3386 SourceOffset*8+64, getContext())) 3387 return llvm::Type::getFloatTy(getVMContext()); 3388 3389 // We want to pass as <2 x float> if the LLVM IR type contains a float at 3390 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the 3391 // case. 3392 if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) && 3393 ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout())) 3394 return llvm::FixedVectorType::get(llvm::Type::getFloatTy(getVMContext()), 3395 2); 3396 3397 return llvm::Type::getDoubleTy(getVMContext()); 3398 } 3399 3400 3401 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in 3402 /// an 8-byte GPR. This means that we either have a scalar or we are talking 3403 /// about the high or low part of an up-to-16-byte struct. This routine picks 3404 /// the best LLVM IR type to represent this, which may be i64 or may be anything 3405 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*, 3406 /// etc). 3407 /// 3408 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for 3409 /// the source type. IROffset is an offset in bytes into the LLVM IR type that 3410 /// the 8-byte value references. PrefType may be null. 3411 /// 3412 /// SourceTy is the source-level type for the entire argument. SourceOffset is 3413 /// an offset into this that we're processing (which is always either 0 or 8). 3414 /// 3415 llvm::Type *X86_64ABIInfo:: 3416 GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset, 3417 QualType SourceTy, unsigned SourceOffset) const { 3418 // If we're dealing with an un-offset LLVM IR type, then it means that we're 3419 // returning an 8-byte unit starting with it. See if we can safely use it. 3420 if (IROffset == 0) { 3421 // Pointers and int64's always fill the 8-byte unit. 3422 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) || 3423 IRType->isIntegerTy(64)) 3424 return IRType; 3425 3426 // If we have a 1/2/4-byte integer, we can use it only if the rest of the 3427 // goodness in the source type is just tail padding. This is allowed to 3428 // kick in for struct {double,int} on the int, but not on 3429 // struct{double,int,int} because we wouldn't return the second int. We 3430 // have to do this analysis on the source type because we can't depend on 3431 // unions being lowered a specific way etc. 3432 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) || 3433 IRType->isIntegerTy(32) || 3434 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) { 3435 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 : 3436 cast<llvm::IntegerType>(IRType)->getBitWidth(); 3437 3438 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth, 3439 SourceOffset*8+64, getContext())) 3440 return IRType; 3441 } 3442 } 3443 3444 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 3445 // If this is a struct, recurse into the field at the specified offset. 3446 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy); 3447 if (IROffset < SL->getSizeInBytes()) { 3448 unsigned FieldIdx = SL->getElementContainingOffset(IROffset); 3449 IROffset -= SL->getElementOffset(FieldIdx); 3450 3451 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset, 3452 SourceTy, SourceOffset); 3453 } 3454 } 3455 3456 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 3457 llvm::Type *EltTy = ATy->getElementType(); 3458 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy); 3459 unsigned EltOffset = IROffset/EltSize*EltSize; 3460 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy, 3461 SourceOffset); 3462 } 3463 3464 // Okay, we don't have any better idea of what to pass, so we pass this in an 3465 // integer register that isn't too big to fit the rest of the struct. 3466 unsigned TySizeInBytes = 3467 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity(); 3468 3469 assert(TySizeInBytes != SourceOffset && "Empty field?"); 3470 3471 // It is always safe to classify this as an integer type up to i64 that 3472 // isn't larger than the structure. 3473 return llvm::IntegerType::get(getVMContext(), 3474 std::min(TySizeInBytes-SourceOffset, 8U)*8); 3475 } 3476 3477 3478 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally 3479 /// be used as elements of a two register pair to pass or return, return a 3480 /// first class aggregate to represent them. For example, if the low part of 3481 /// a by-value argument should be passed as i32* and the high part as float, 3482 /// return {i32*, float}. 3483 static llvm::Type * 3484 GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, 3485 const llvm::DataLayout &TD) { 3486 // In order to correctly satisfy the ABI, we need to the high part to start 3487 // at offset 8. If the high and low parts we inferred are both 4-byte types 3488 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have 3489 // the second element at offset 8. Check for this: 3490 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo); 3491 unsigned HiAlign = TD.getABITypeAlignment(Hi); 3492 unsigned HiStart = llvm::alignTo(LoSize, HiAlign); 3493 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!"); 3494 3495 // To handle this, we have to increase the size of the low part so that the 3496 // second element will start at an 8 byte offset. We can't increase the size 3497 // of the second element because it might make us access off the end of the 3498 // struct. 3499 if (HiStart != 8) { 3500 // There are usually two sorts of types the ABI generation code can produce 3501 // for the low part of a pair that aren't 8 bytes in size: float or 3502 // i8/i16/i32. This can also include pointers when they are 32-bit (X32 and 3503 // NaCl). 3504 // Promote these to a larger type. 3505 if (Lo->isFloatTy()) 3506 Lo = llvm::Type::getDoubleTy(Lo->getContext()); 3507 else { 3508 assert((Lo->isIntegerTy() || Lo->isPointerTy()) 3509 && "Invalid/unknown lo type"); 3510 Lo = llvm::Type::getInt64Ty(Lo->getContext()); 3511 } 3512 } 3513 3514 llvm::StructType *Result = llvm::StructType::get(Lo, Hi); 3515 3516 // Verify that the second element is at an 8-byte offset. 3517 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 && 3518 "Invalid x86-64 argument pair!"); 3519 return Result; 3520 } 3521 3522 ABIArgInfo X86_64ABIInfo:: 3523 classifyReturnType(QualType RetTy) const { 3524 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the 3525 // classification algorithm. 3526 X86_64ABIInfo::Class Lo, Hi; 3527 classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true); 3528 3529 // Check some invariants. 3530 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 3531 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 3532 3533 llvm::Type *ResType = nullptr; 3534 switch (Lo) { 3535 case NoClass: 3536 if (Hi == NoClass) 3537 return ABIArgInfo::getIgnore(); 3538 // If the low part is just padding, it takes no register, leave ResType 3539 // null. 3540 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 3541 "Unknown missing lo part"); 3542 break; 3543 3544 case SSEUp: 3545 case X87Up: 3546 llvm_unreachable("Invalid classification for lo word."); 3547 3548 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via 3549 // hidden argument. 3550 case Memory: 3551 return getIndirectReturnResult(RetTy); 3552 3553 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next 3554 // available register of the sequence %rax, %rdx is used. 3555 case Integer: 3556 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 3557 3558 // If we have a sign or zero extended integer, make sure to return Extend 3559 // so that the parameter gets the right LLVM IR attributes. 3560 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 3561 // Treat an enum type as its underlying type. 3562 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 3563 RetTy = EnumTy->getDecl()->getIntegerType(); 3564 3565 if (RetTy->isIntegralOrEnumerationType() && 3566 isPromotableIntegerTypeForABI(RetTy)) 3567 return ABIArgInfo::getExtend(RetTy); 3568 } 3569 break; 3570 3571 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next 3572 // available SSE register of the sequence %xmm0, %xmm1 is used. 3573 case SSE: 3574 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 3575 break; 3576 3577 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is 3578 // returned on the X87 stack in %st0 as 80-bit x87 number. 3579 case X87: 3580 ResType = llvm::Type::getX86_FP80Ty(getVMContext()); 3581 break; 3582 3583 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real 3584 // part of the value is returned in %st0 and the imaginary part in 3585 // %st1. 3586 case ComplexX87: 3587 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification."); 3588 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()), 3589 llvm::Type::getX86_FP80Ty(getVMContext())); 3590 break; 3591 } 3592 3593 llvm::Type *HighPart = nullptr; 3594 switch (Hi) { 3595 // Memory was handled previously and X87 should 3596 // never occur as a hi class. 3597 case Memory: 3598 case X87: 3599 llvm_unreachable("Invalid classification for hi word."); 3600 3601 case ComplexX87: // Previously handled. 3602 case NoClass: 3603 break; 3604 3605 case Integer: 3606 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 3607 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 3608 return ABIArgInfo::getDirect(HighPart, 8); 3609 break; 3610 case SSE: 3611 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 3612 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 3613 return ABIArgInfo::getDirect(HighPart, 8); 3614 break; 3615 3616 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte 3617 // is passed in the next available eightbyte chunk if the last used 3618 // vector register. 3619 // 3620 // SSEUP should always be preceded by SSE, just widen. 3621 case SSEUp: 3622 assert(Lo == SSE && "Unexpected SSEUp classification."); 3623 ResType = GetByteVectorType(RetTy); 3624 break; 3625 3626 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is 3627 // returned together with the previous X87 value in %st0. 3628 case X87Up: 3629 // If X87Up is preceded by X87, we don't need to do 3630 // anything. However, in some cases with unions it may not be 3631 // preceded by X87. In such situations we follow gcc and pass the 3632 // extra bits in an SSE reg. 3633 if (Lo != X87) { 3634 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 3635 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 3636 return ABIArgInfo::getDirect(HighPart, 8); 3637 } 3638 break; 3639 } 3640 3641 // If a high part was specified, merge it together with the low part. It is 3642 // known to pass in the high eightbyte of the result. We do this by forming a 3643 // first class struct aggregate with the high and low part: {low, high} 3644 if (HighPart) 3645 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); 3646 3647 return ABIArgInfo::getDirect(ResType); 3648 } 3649 3650 ABIArgInfo X86_64ABIInfo::classifyArgumentType( 3651 QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE, 3652 bool isNamedArg) 3653 const 3654 { 3655 Ty = useFirstFieldIfTransparentUnion(Ty); 3656 3657 X86_64ABIInfo::Class Lo, Hi; 3658 classify(Ty, 0, Lo, Hi, isNamedArg); 3659 3660 // Check some invariants. 3661 // FIXME: Enforce these by construction. 3662 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 3663 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 3664 3665 neededInt = 0; 3666 neededSSE = 0; 3667 llvm::Type *ResType = nullptr; 3668 switch (Lo) { 3669 case NoClass: 3670 if (Hi == NoClass) 3671 return ABIArgInfo::getIgnore(); 3672 // If the low part is just padding, it takes no register, leave ResType 3673 // null. 3674 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 3675 "Unknown missing lo part"); 3676 break; 3677 3678 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument 3679 // on the stack. 3680 case Memory: 3681 3682 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or 3683 // COMPLEX_X87, it is passed in memory. 3684 case X87: 3685 case ComplexX87: 3686 if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect) 3687 ++neededInt; 3688 return getIndirectResult(Ty, freeIntRegs); 3689 3690 case SSEUp: 3691 case X87Up: 3692 llvm_unreachable("Invalid classification for lo word."); 3693 3694 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next 3695 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8 3696 // and %r9 is used. 3697 case Integer: 3698 ++neededInt; 3699 3700 // Pick an 8-byte type based on the preferred type. 3701 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0); 3702 3703 // If we have a sign or zero extended integer, make sure to return Extend 3704 // so that the parameter gets the right LLVM IR attributes. 3705 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 3706 // Treat an enum type as its underlying type. 3707 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3708 Ty = EnumTy->getDecl()->getIntegerType(); 3709 3710 if (Ty->isIntegralOrEnumerationType() && 3711 isPromotableIntegerTypeForABI(Ty)) 3712 return ABIArgInfo::getExtend(Ty); 3713 } 3714 3715 break; 3716 3717 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next 3718 // available SSE register is used, the registers are taken in the 3719 // order from %xmm0 to %xmm7. 3720 case SSE: { 3721 llvm::Type *IRType = CGT.ConvertType(Ty); 3722 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0); 3723 ++neededSSE; 3724 break; 3725 } 3726 } 3727 3728 llvm::Type *HighPart = nullptr; 3729 switch (Hi) { 3730 // Memory was handled previously, ComplexX87 and X87 should 3731 // never occur as hi classes, and X87Up must be preceded by X87, 3732 // which is passed in memory. 3733 case Memory: 3734 case X87: 3735 case ComplexX87: 3736 llvm_unreachable("Invalid classification for hi word."); 3737 3738 case NoClass: break; 3739 3740 case Integer: 3741 ++neededInt; 3742 // Pick an 8-byte type based on the preferred type. 3743 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 3744 3745 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 3746 return ABIArgInfo::getDirect(HighPart, 8); 3747 break; 3748 3749 // X87Up generally doesn't occur here (long double is passed in 3750 // memory), except in situations involving unions. 3751 case X87Up: 3752 case SSE: 3753 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 3754 3755 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 3756 return ABIArgInfo::getDirect(HighPart, 8); 3757 3758 ++neededSSE; 3759 break; 3760 3761 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the 3762 // eightbyte is passed in the upper half of the last used SSE 3763 // register. This only happens when 128-bit vectors are passed. 3764 case SSEUp: 3765 assert(Lo == SSE && "Unexpected SSEUp classification"); 3766 ResType = GetByteVectorType(Ty); 3767 break; 3768 } 3769 3770 // If a high part was specified, merge it together with the low part. It is 3771 // known to pass in the high eightbyte of the result. We do this by forming a 3772 // first class struct aggregate with the high and low part: {low, high} 3773 if (HighPart) 3774 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); 3775 3776 return ABIArgInfo::getDirect(ResType); 3777 } 3778 3779 ABIArgInfo 3780 X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt, 3781 unsigned &NeededSSE) const { 3782 auto RT = Ty->getAs<RecordType>(); 3783 assert(RT && "classifyRegCallStructType only valid with struct types"); 3784 3785 if (RT->getDecl()->hasFlexibleArrayMember()) 3786 return getIndirectReturnResult(Ty); 3787 3788 // Sum up bases 3789 if (auto CXXRD = dyn_cast<CXXRecordDecl>(RT->getDecl())) { 3790 if (CXXRD->isDynamicClass()) { 3791 NeededInt = NeededSSE = 0; 3792 return getIndirectReturnResult(Ty); 3793 } 3794 3795 for (const auto &I : CXXRD->bases()) 3796 if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE) 3797 .isIndirect()) { 3798 NeededInt = NeededSSE = 0; 3799 return getIndirectReturnResult(Ty); 3800 } 3801 } 3802 3803 // Sum up members 3804 for (const auto *FD : RT->getDecl()->fields()) { 3805 if (FD->getType()->isRecordType() && !FD->getType()->isUnionType()) { 3806 if (classifyRegCallStructTypeImpl(FD->getType(), NeededInt, NeededSSE) 3807 .isIndirect()) { 3808 NeededInt = NeededSSE = 0; 3809 return getIndirectReturnResult(Ty); 3810 } 3811 } else { 3812 unsigned LocalNeededInt, LocalNeededSSE; 3813 if (classifyArgumentType(FD->getType(), UINT_MAX, LocalNeededInt, 3814 LocalNeededSSE, true) 3815 .isIndirect()) { 3816 NeededInt = NeededSSE = 0; 3817 return getIndirectReturnResult(Ty); 3818 } 3819 NeededInt += LocalNeededInt; 3820 NeededSSE += LocalNeededSSE; 3821 } 3822 } 3823 3824 return ABIArgInfo::getDirect(); 3825 } 3826 3827 ABIArgInfo X86_64ABIInfo::classifyRegCallStructType(QualType Ty, 3828 unsigned &NeededInt, 3829 unsigned &NeededSSE) const { 3830 3831 NeededInt = 0; 3832 NeededSSE = 0; 3833 3834 return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE); 3835 } 3836 3837 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 3838 3839 const unsigned CallingConv = FI.getCallingConvention(); 3840 // It is possible to force Win64 calling convention on any x86_64 target by 3841 // using __attribute__((ms_abi)). In such case to correctly emit Win64 3842 // compatible code delegate this call to WinX86_64ABIInfo::computeInfo. 3843 if (CallingConv == llvm::CallingConv::Win64) { 3844 WinX86_64ABIInfo Win64ABIInfo(CGT, AVXLevel); 3845 Win64ABIInfo.computeInfo(FI); 3846 return; 3847 } 3848 3849 bool IsRegCall = CallingConv == llvm::CallingConv::X86_RegCall; 3850 3851 // Keep track of the number of assigned registers. 3852 unsigned FreeIntRegs = IsRegCall ? 11 : 6; 3853 unsigned FreeSSERegs = IsRegCall ? 16 : 8; 3854 unsigned NeededInt, NeededSSE; 3855 3856 if (!::classifyReturnType(getCXXABI(), FI, *this)) { 3857 if (IsRegCall && FI.getReturnType()->getTypePtr()->isRecordType() && 3858 !FI.getReturnType()->getTypePtr()->isUnionType()) { 3859 FI.getReturnInfo() = 3860 classifyRegCallStructType(FI.getReturnType(), NeededInt, NeededSSE); 3861 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) { 3862 FreeIntRegs -= NeededInt; 3863 FreeSSERegs -= NeededSSE; 3864 } else { 3865 FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType()); 3866 } 3867 } else if (IsRegCall && FI.getReturnType()->getAs<ComplexType>() && 3868 getContext().getCanonicalType(FI.getReturnType() 3869 ->getAs<ComplexType>() 3870 ->getElementType()) == 3871 getContext().LongDoubleTy) 3872 // Complex Long Double Type is passed in Memory when Regcall 3873 // calling convention is used. 3874 FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType()); 3875 else 3876 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 3877 } 3878 3879 // If the return value is indirect, then the hidden argument is consuming one 3880 // integer register. 3881 if (FI.getReturnInfo().isIndirect()) 3882 --FreeIntRegs; 3883 3884 // The chain argument effectively gives us another free register. 3885 if (FI.isChainCall()) 3886 ++FreeIntRegs; 3887 3888 unsigned NumRequiredArgs = FI.getNumRequiredArgs(); 3889 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers 3890 // get assigned (in left-to-right order) for passing as follows... 3891 unsigned ArgNo = 0; 3892 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 3893 it != ie; ++it, ++ArgNo) { 3894 bool IsNamedArg = ArgNo < NumRequiredArgs; 3895 3896 if (IsRegCall && it->type->isStructureOrClassType()) 3897 it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE); 3898 else 3899 it->info = classifyArgumentType(it->type, FreeIntRegs, NeededInt, 3900 NeededSSE, IsNamedArg); 3901 3902 // AMD64-ABI 3.2.3p3: If there are no registers available for any 3903 // eightbyte of an argument, the whole argument is passed on the 3904 // stack. If registers have already been assigned for some 3905 // eightbytes of such an argument, the assignments get reverted. 3906 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) { 3907 FreeIntRegs -= NeededInt; 3908 FreeSSERegs -= NeededSSE; 3909 } else { 3910 it->info = getIndirectResult(it->type, FreeIntRegs); 3911 } 3912 } 3913 } 3914 3915 static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF, 3916 Address VAListAddr, QualType Ty) { 3917 Address overflow_arg_area_p = 3918 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p"); 3919 llvm::Value *overflow_arg_area = 3920 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area"); 3921 3922 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16 3923 // byte boundary if alignment needed by type exceeds 8 byte boundary. 3924 // It isn't stated explicitly in the standard, but in practice we use 3925 // alignment greater than 16 where necessary. 3926 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty); 3927 if (Align > CharUnits::fromQuantity(8)) { 3928 overflow_arg_area = emitRoundPointerUpToAlignment(CGF, overflow_arg_area, 3929 Align); 3930 } 3931 3932 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area. 3933 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 3934 llvm::Value *Res = 3935 CGF.Builder.CreateBitCast(overflow_arg_area, 3936 llvm::PointerType::getUnqual(LTy)); 3937 3938 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to: 3939 // l->overflow_arg_area + sizeof(type). 3940 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to 3941 // an 8 byte boundary. 3942 3943 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8; 3944 llvm::Value *Offset = 3945 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7); 3946 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset, 3947 "overflow_arg_area.next"); 3948 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p); 3949 3950 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type. 3951 return Address(Res, Align); 3952 } 3953 3954 Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 3955 QualType Ty) const { 3956 // Assume that va_list type is correct; should be pointer to LLVM type: 3957 // struct { 3958 // i32 gp_offset; 3959 // i32 fp_offset; 3960 // i8* overflow_arg_area; 3961 // i8* reg_save_area; 3962 // }; 3963 unsigned neededInt, neededSSE; 3964 3965 Ty = getContext().getCanonicalType(Ty); 3966 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE, 3967 /*isNamedArg*/false); 3968 3969 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed 3970 // in the registers. If not go to step 7. 3971 if (!neededInt && !neededSSE) 3972 return EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty); 3973 3974 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of 3975 // general purpose registers needed to pass type and num_fp to hold 3976 // the number of floating point registers needed. 3977 3978 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into 3979 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or 3980 // l->fp_offset > 304 - num_fp * 16 go to step 7. 3981 // 3982 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of 3983 // register save space). 3984 3985 llvm::Value *InRegs = nullptr; 3986 Address gp_offset_p = Address::invalid(), fp_offset_p = Address::invalid(); 3987 llvm::Value *gp_offset = nullptr, *fp_offset = nullptr; 3988 if (neededInt) { 3989 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p"); 3990 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset"); 3991 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8); 3992 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp"); 3993 } 3994 3995 if (neededSSE) { 3996 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p"); 3997 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset"); 3998 llvm::Value *FitsInFP = 3999 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16); 4000 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp"); 4001 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP; 4002 } 4003 4004 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 4005 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); 4006 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 4007 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); 4008 4009 // Emit code to load the value if it was passed in registers. 4010 4011 CGF.EmitBlock(InRegBlock); 4012 4013 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with 4014 // an offset of l->gp_offset and/or l->fp_offset. This may require 4015 // copying to a temporary location in case the parameter is passed 4016 // in different register classes or requires an alignment greater 4017 // than 8 for general purpose registers and 16 for XMM registers. 4018 // 4019 // FIXME: This really results in shameful code when we end up needing to 4020 // collect arguments from different places; often what should result in a 4021 // simple assembling of a structure from scattered addresses has many more 4022 // loads than necessary. Can we clean this up? 4023 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 4024 llvm::Value *RegSaveArea = CGF.Builder.CreateLoad( 4025 CGF.Builder.CreateStructGEP(VAListAddr, 3), "reg_save_area"); 4026 4027 Address RegAddr = Address::invalid(); 4028 if (neededInt && neededSSE) { 4029 // FIXME: Cleanup. 4030 assert(AI.isDirect() && "Unexpected ABI info for mixed regs"); 4031 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType()); 4032 Address Tmp = CGF.CreateMemTemp(Ty); 4033 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST); 4034 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs"); 4035 llvm::Type *TyLo = ST->getElementType(0); 4036 llvm::Type *TyHi = ST->getElementType(1); 4037 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) && 4038 "Unexpected ABI info for mixed regs"); 4039 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo); 4040 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi); 4041 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegSaveArea, gp_offset); 4042 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegSaveArea, fp_offset); 4043 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr; 4044 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr; 4045 4046 // Copy the first element. 4047 // FIXME: Our choice of alignment here and below is probably pessimistic. 4048 llvm::Value *V = CGF.Builder.CreateAlignedLoad( 4049 TyLo, CGF.Builder.CreateBitCast(RegLoAddr, PTyLo), 4050 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyLo))); 4051 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 4052 4053 // Copy the second element. 4054 V = CGF.Builder.CreateAlignedLoad( 4055 TyHi, CGF.Builder.CreateBitCast(RegHiAddr, PTyHi), 4056 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyHi))); 4057 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 4058 4059 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy); 4060 } else if (neededInt) { 4061 RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, gp_offset), 4062 CharUnits::fromQuantity(8)); 4063 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy); 4064 4065 // Copy to a temporary if necessary to ensure the appropriate alignment. 4066 std::pair<CharUnits, CharUnits> SizeAlign = 4067 getContext().getTypeInfoInChars(Ty); 4068 uint64_t TySize = SizeAlign.first.getQuantity(); 4069 CharUnits TyAlign = SizeAlign.second; 4070 4071 // Copy into a temporary if the type is more aligned than the 4072 // register save area. 4073 if (TyAlign.getQuantity() > 8) { 4074 Address Tmp = CGF.CreateMemTemp(Ty); 4075 CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, false); 4076 RegAddr = Tmp; 4077 } 4078 4079 } else if (neededSSE == 1) { 4080 RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset), 4081 CharUnits::fromQuantity(16)); 4082 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy); 4083 } else { 4084 assert(neededSSE == 2 && "Invalid number of needed registers!"); 4085 // SSE registers are spaced 16 bytes apart in the register save 4086 // area, we need to collect the two eightbytes together. 4087 // The ABI isn't explicit about this, but it seems reasonable 4088 // to assume that the slots are 16-byte aligned, since the stack is 4089 // naturally 16-byte aligned and the prologue is expected to store 4090 // all the SSE registers to the RSA. 4091 Address RegAddrLo = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset), 4092 CharUnits::fromQuantity(16)); 4093 Address RegAddrHi = 4094 CGF.Builder.CreateConstInBoundsByteGEP(RegAddrLo, 4095 CharUnits::fromQuantity(16)); 4096 llvm::Type *ST = AI.canHaveCoerceToType() 4097 ? AI.getCoerceToType() 4098 : llvm::StructType::get(CGF.DoubleTy, CGF.DoubleTy); 4099 llvm::Value *V; 4100 Address Tmp = CGF.CreateMemTemp(Ty); 4101 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST); 4102 V = CGF.Builder.CreateLoad(CGF.Builder.CreateElementBitCast( 4103 RegAddrLo, ST->getStructElementType(0))); 4104 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 4105 V = CGF.Builder.CreateLoad(CGF.Builder.CreateElementBitCast( 4106 RegAddrHi, ST->getStructElementType(1))); 4107 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 4108 4109 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy); 4110 } 4111 4112 // AMD64-ABI 3.5.7p5: Step 5. Set: 4113 // l->gp_offset = l->gp_offset + num_gp * 8 4114 // l->fp_offset = l->fp_offset + num_fp * 16. 4115 if (neededInt) { 4116 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8); 4117 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset), 4118 gp_offset_p); 4119 } 4120 if (neededSSE) { 4121 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16); 4122 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset), 4123 fp_offset_p); 4124 } 4125 CGF.EmitBranch(ContBlock); 4126 4127 // Emit code to load the value if it was passed in memory. 4128 4129 CGF.EmitBlock(InMemBlock); 4130 Address MemAddr = EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty); 4131 4132 // Return the appropriate result. 4133 4134 CGF.EmitBlock(ContBlock); 4135 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock, 4136 "vaarg.addr"); 4137 return ResAddr; 4138 } 4139 4140 Address X86_64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, 4141 QualType Ty) const { 4142 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false, 4143 CGF.getContext().getTypeInfoInChars(Ty), 4144 CharUnits::fromQuantity(8), 4145 /*allowHigherAlign*/ false); 4146 } 4147 4148 ABIArgInfo 4149 WinX86_64ABIInfo::reclassifyHvaArgType(QualType Ty, unsigned &FreeSSERegs, 4150 const ABIArgInfo ¤t) const { 4151 // Assumes vectorCall calling convention. 4152 const Type *Base = nullptr; 4153 uint64_t NumElts = 0; 4154 4155 if (!Ty->isBuiltinType() && !Ty->isVectorType() && 4156 isHomogeneousAggregate(Ty, Base, NumElts) && FreeSSERegs >= NumElts) { 4157 FreeSSERegs -= NumElts; 4158 return getDirectX86Hva(); 4159 } 4160 return current; 4161 } 4162 4163 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs, 4164 bool IsReturnType, bool IsVectorCall, 4165 bool IsRegCall) const { 4166 4167 if (Ty->isVoidType()) 4168 return ABIArgInfo::getIgnore(); 4169 4170 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 4171 Ty = EnumTy->getDecl()->getIntegerType(); 4172 4173 TypeInfo Info = getContext().getTypeInfo(Ty); 4174 uint64_t Width = Info.Width; 4175 CharUnits Align = getContext().toCharUnitsFromBits(Info.Align); 4176 4177 const RecordType *RT = Ty->getAs<RecordType>(); 4178 if (RT) { 4179 if (!IsReturnType) { 4180 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI())) 4181 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 4182 } 4183 4184 if (RT->getDecl()->hasFlexibleArrayMember()) 4185 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 4186 4187 } 4188 4189 const Type *Base = nullptr; 4190 uint64_t NumElts = 0; 4191 // vectorcall adds the concept of a homogenous vector aggregate, similar to 4192 // other targets. 4193 if ((IsVectorCall || IsRegCall) && 4194 isHomogeneousAggregate(Ty, Base, NumElts)) { 4195 if (IsRegCall) { 4196 if (FreeSSERegs >= NumElts) { 4197 FreeSSERegs -= NumElts; 4198 if (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType()) 4199 return ABIArgInfo::getDirect(); 4200 return ABIArgInfo::getExpand(); 4201 } 4202 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false); 4203 } else if (IsVectorCall) { 4204 if (FreeSSERegs >= NumElts && 4205 (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())) { 4206 FreeSSERegs -= NumElts; 4207 return ABIArgInfo::getDirect(); 4208 } else if (IsReturnType) { 4209 return ABIArgInfo::getExpand(); 4210 } else if (!Ty->isBuiltinType() && !Ty->isVectorType()) { 4211 // HVAs are delayed and reclassified in the 2nd step. 4212 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false); 4213 } 4214 } 4215 } 4216 4217 if (Ty->isMemberPointerType()) { 4218 // If the member pointer is represented by an LLVM int or ptr, pass it 4219 // directly. 4220 llvm::Type *LLTy = CGT.ConvertType(Ty); 4221 if (LLTy->isPointerTy() || LLTy->isIntegerTy()) 4222 return ABIArgInfo::getDirect(); 4223 } 4224 4225 if (RT || Ty->isAnyComplexType() || Ty->isMemberPointerType()) { 4226 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is 4227 // not 1, 2, 4, or 8 bytes, must be passed by reference." 4228 if (Width > 64 || !llvm::isPowerOf2_64(Width)) 4229 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 4230 4231 // Otherwise, coerce it to a small integer. 4232 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width)); 4233 } 4234 4235 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 4236 switch (BT->getKind()) { 4237 case BuiltinType::Bool: 4238 // Bool type is always extended to the ABI, other builtin types are not 4239 // extended. 4240 return ABIArgInfo::getExtend(Ty); 4241 4242 case BuiltinType::LongDouble: 4243 // Mingw64 GCC uses the old 80 bit extended precision floating point 4244 // unit. It passes them indirectly through memory. 4245 if (IsMingw64) { 4246 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat(); 4247 if (LDF == &llvm::APFloat::x87DoubleExtended()) 4248 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false); 4249 } 4250 break; 4251 4252 case BuiltinType::Int128: 4253 case BuiltinType::UInt128: 4254 // If it's a parameter type, the normal ABI rule is that arguments larger 4255 // than 8 bytes are passed indirectly. GCC follows it. We follow it too, 4256 // even though it isn't particularly efficient. 4257 if (!IsReturnType) 4258 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false); 4259 4260 // Mingw64 GCC returns i128 in XMM0. Coerce to v2i64 to handle that. 4261 // Clang matches them for compatibility. 4262 return ABIArgInfo::getDirect(llvm::FixedVectorType::get( 4263 llvm::Type::getInt64Ty(getVMContext()), 2)); 4264 4265 default: 4266 break; 4267 } 4268 } 4269 4270 if (Ty->isExtIntType()) { 4271 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is 4272 // not 1, 2, 4, or 8 bytes, must be passed by reference." 4273 // However, non-power-of-two _ExtInts will be passed as 1,2,4 or 8 bytes 4274 // anyway as long is it fits in them, so we don't have to check the power of 4275 // 2. 4276 if (Width <= 64) 4277 return ABIArgInfo::getDirect(); 4278 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false); 4279 } 4280 4281 return ABIArgInfo::getDirect(); 4282 } 4283 4284 void WinX86_64ABIInfo::computeVectorCallArgs(CGFunctionInfo &FI, 4285 unsigned FreeSSERegs, 4286 bool IsVectorCall, 4287 bool IsRegCall) const { 4288 unsigned Count = 0; 4289 for (auto &I : FI.arguments()) { 4290 // Vectorcall in x64 only permits the first 6 arguments to be passed 4291 // as XMM/YMM registers. 4292 if (Count < VectorcallMaxParamNumAsReg) 4293 I.info = classify(I.type, FreeSSERegs, false, IsVectorCall, IsRegCall); 4294 else { 4295 // Since these cannot be passed in registers, pretend no registers 4296 // are left. 4297 unsigned ZeroSSERegsAvail = 0; 4298 I.info = classify(I.type, /*FreeSSERegs=*/ZeroSSERegsAvail, false, 4299 IsVectorCall, IsRegCall); 4300 } 4301 ++Count; 4302 } 4303 4304 for (auto &I : FI.arguments()) { 4305 I.info = reclassifyHvaArgType(I.type, FreeSSERegs, I.info); 4306 } 4307 } 4308 4309 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 4310 const unsigned CC = FI.getCallingConvention(); 4311 bool IsVectorCall = CC == llvm::CallingConv::X86_VectorCall; 4312 bool IsRegCall = CC == llvm::CallingConv::X86_RegCall; 4313 4314 // If __attribute__((sysv_abi)) is in use, use the SysV argument 4315 // classification rules. 4316 if (CC == llvm::CallingConv::X86_64_SysV) { 4317 X86_64ABIInfo SysVABIInfo(CGT, AVXLevel); 4318 SysVABIInfo.computeInfo(FI); 4319 return; 4320 } 4321 4322 unsigned FreeSSERegs = 0; 4323 if (IsVectorCall) { 4324 // We can use up to 4 SSE return registers with vectorcall. 4325 FreeSSERegs = 4; 4326 } else if (IsRegCall) { 4327 // RegCall gives us 16 SSE registers. 4328 FreeSSERegs = 16; 4329 } 4330 4331 if (!getCXXABI().classifyReturnType(FI)) 4332 FI.getReturnInfo() = classify(FI.getReturnType(), FreeSSERegs, true, 4333 IsVectorCall, IsRegCall); 4334 4335 if (IsVectorCall) { 4336 // We can use up to 6 SSE register parameters with vectorcall. 4337 FreeSSERegs = 6; 4338 } else if (IsRegCall) { 4339 // RegCall gives us 16 SSE registers, we can reuse the return registers. 4340 FreeSSERegs = 16; 4341 } 4342 4343 if (IsVectorCall) { 4344 computeVectorCallArgs(FI, FreeSSERegs, IsVectorCall, IsRegCall); 4345 } else { 4346 for (auto &I : FI.arguments()) 4347 I.info = classify(I.type, FreeSSERegs, false, IsVectorCall, IsRegCall); 4348 } 4349 4350 } 4351 4352 Address WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 4353 QualType Ty) const { 4354 4355 bool IsIndirect = false; 4356 4357 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is 4358 // not 1, 2, 4, or 8 bytes, must be passed by reference." 4359 if (isAggregateTypeForABI(Ty) || Ty->isMemberPointerType()) { 4360 uint64_t Width = getContext().getTypeSize(Ty); 4361 IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width); 4362 } 4363 4364 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, 4365 CGF.getContext().getTypeInfoInChars(Ty), 4366 CharUnits::fromQuantity(8), 4367 /*allowHigherAlign*/ false); 4368 } 4369 4370 static bool PPC_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4371 llvm::Value *Address, bool Is64Bit, 4372 bool IsAIX) { 4373 // This is calculated from the LLVM and GCC tables and verified 4374 // against gcc output. AFAIK all PPC ABIs use the same encoding. 4375 4376 CodeGen::CGBuilderTy &Builder = CGF.Builder; 4377 4378 llvm::IntegerType *i8 = CGF.Int8Ty; 4379 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 4380 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 4381 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 4382 4383 // 0-31: r0-31, the 4-byte or 8-byte general-purpose registers 4384 AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 0, 31); 4385 4386 // 32-63: fp0-31, the 8-byte floating-point registers 4387 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 4388 4389 // 64-67 are various 4-byte or 8-byte special-purpose registers: 4390 // 64: mq 4391 // 65: lr 4392 // 66: ctr 4393 // 67: ap 4394 AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 64, 67); 4395 4396 // 68-76 are various 4-byte special-purpose registers: 4397 // 68-75 cr0-7 4398 // 76: xer 4399 AssignToArrayRange(Builder, Address, Four8, 68, 76); 4400 4401 // 77-108: v0-31, the 16-byte vector registers 4402 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 4403 4404 // 109: vrsave 4405 // 110: vscr 4406 AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 109, 110); 4407 4408 // AIX does not utilize the rest of the registers. 4409 if (IsAIX) 4410 return false; 4411 4412 // 111: spe_acc 4413 // 112: spefscr 4414 // 113: sfp 4415 AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 111, 113); 4416 4417 if (!Is64Bit) 4418 return false; 4419 4420 // TODO: Need to verify if these registers are used on 64 bit AIX with Power8 4421 // or above CPU. 4422 // 64-bit only registers: 4423 // 114: tfhar 4424 // 115: tfiar 4425 // 116: texasr 4426 AssignToArrayRange(Builder, Address, Eight8, 114, 116); 4427 4428 return false; 4429 } 4430 4431 // AIX 4432 namespace { 4433 /// AIXABIInfo - The AIX XCOFF ABI information. 4434 class AIXABIInfo : public ABIInfo { 4435 const bool Is64Bit; 4436 const unsigned PtrByteSize; 4437 CharUnits getParamTypeAlignment(QualType Ty) const; 4438 4439 public: 4440 AIXABIInfo(CodeGen::CodeGenTypes &CGT, bool Is64Bit) 4441 : ABIInfo(CGT), Is64Bit(Is64Bit), PtrByteSize(Is64Bit ? 8 : 4) {} 4442 4443 bool isPromotableTypeForABI(QualType Ty) const; 4444 4445 ABIArgInfo classifyReturnType(QualType RetTy) const; 4446 ABIArgInfo classifyArgumentType(QualType Ty) const; 4447 4448 void computeInfo(CGFunctionInfo &FI) const override { 4449 if (!getCXXABI().classifyReturnType(FI)) 4450 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 4451 4452 for (auto &I : FI.arguments()) 4453 I.info = classifyArgumentType(I.type); 4454 } 4455 4456 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 4457 QualType Ty) const override; 4458 }; 4459 4460 class AIXTargetCodeGenInfo : public TargetCodeGenInfo { 4461 const bool Is64Bit; 4462 4463 public: 4464 AIXTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool Is64Bit) 4465 : TargetCodeGenInfo(std::make_unique<AIXABIInfo>(CGT, Is64Bit)), 4466 Is64Bit(Is64Bit) {} 4467 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 4468 return 1; // r1 is the dedicated stack pointer 4469 } 4470 4471 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4472 llvm::Value *Address) const override; 4473 }; 4474 } // namespace 4475 4476 // Return true if the ABI requires Ty to be passed sign- or zero- 4477 // extended to 32/64 bits. 4478 bool AIXABIInfo::isPromotableTypeForABI(QualType Ty) const { 4479 // Treat an enum type as its underlying type. 4480 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 4481 Ty = EnumTy->getDecl()->getIntegerType(); 4482 4483 // Promotable integer types are required to be promoted by the ABI. 4484 if (Ty->isPromotableIntegerType()) 4485 return true; 4486 4487 if (!Is64Bit) 4488 return false; 4489 4490 // For 64 bit mode, in addition to the usual promotable integer types, we also 4491 // need to extend all 32-bit types, since the ABI requires promotion to 64 4492 // bits. 4493 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 4494 switch (BT->getKind()) { 4495 case BuiltinType::Int: 4496 case BuiltinType::UInt: 4497 return true; 4498 default: 4499 break; 4500 } 4501 4502 return false; 4503 } 4504 4505 ABIArgInfo AIXABIInfo::classifyReturnType(QualType RetTy) const { 4506 if (RetTy->isAnyComplexType()) 4507 return ABIArgInfo::getDirect(); 4508 4509 if (RetTy->isVectorType()) 4510 llvm::report_fatal_error("vector type is not supported on AIX yet"); 4511 4512 if (RetTy->isVoidType()) 4513 return ABIArgInfo::getIgnore(); 4514 4515 if (isAggregateTypeForABI(RetTy)) 4516 return getNaturalAlignIndirect(RetTy); 4517 4518 return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) 4519 : ABIArgInfo::getDirect()); 4520 } 4521 4522 ABIArgInfo AIXABIInfo::classifyArgumentType(QualType Ty) const { 4523 Ty = useFirstFieldIfTransparentUnion(Ty); 4524 4525 if (Ty->isAnyComplexType()) 4526 return ABIArgInfo::getDirect(); 4527 4528 if (Ty->isVectorType()) 4529 llvm::report_fatal_error("vector type is not supported on AIX yet"); 4530 4531 if (isAggregateTypeForABI(Ty)) { 4532 // Records with non-trivial destructors/copy-constructors should not be 4533 // passed by value. 4534 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 4535 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 4536 4537 CharUnits CCAlign = getParamTypeAlignment(Ty); 4538 CharUnits TyAlign = getContext().getTypeAlignInChars(Ty); 4539 4540 return ABIArgInfo::getIndirect(CCAlign, /*ByVal*/ true, 4541 /*Realign*/ TyAlign > CCAlign); 4542 } 4543 4544 return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) 4545 : ABIArgInfo::getDirect()); 4546 } 4547 4548 CharUnits AIXABIInfo::getParamTypeAlignment(QualType Ty) const { 4549 // Complex types are passed just like their elements. 4550 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) 4551 Ty = CTy->getElementType(); 4552 4553 if (Ty->isVectorType()) 4554 llvm::report_fatal_error("vector type is not supported on AIX yet"); 4555 4556 // If the structure contains a vector type, the alignment is 16. 4557 if (isRecordWithSIMDVectorType(getContext(), Ty)) 4558 return CharUnits::fromQuantity(16); 4559 4560 return CharUnits::fromQuantity(PtrByteSize); 4561 } 4562 4563 Address AIXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 4564 QualType Ty) const { 4565 if (Ty->isAnyComplexType()) 4566 llvm::report_fatal_error("complex type is not supported on AIX yet"); 4567 4568 if (Ty->isVectorType()) 4569 llvm::report_fatal_error("vector type is not supported on AIX yet"); 4570 4571 auto TypeInfo = getContext().getTypeInfoInChars(Ty); 4572 TypeInfo.second = getParamTypeAlignment(Ty); 4573 4574 CharUnits SlotSize = CharUnits::fromQuantity(PtrByteSize); 4575 4576 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, TypeInfo, 4577 SlotSize, /*AllowHigher*/ true); 4578 } 4579 4580 bool AIXTargetCodeGenInfo::initDwarfEHRegSizeTable( 4581 CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const { 4582 return PPC_initDwarfEHRegSizeTable(CGF, Address, Is64Bit, /*IsAIX*/ true); 4583 } 4584 4585 // PowerPC-32 4586 namespace { 4587 /// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information. 4588 class PPC32_SVR4_ABIInfo : public DefaultABIInfo { 4589 bool IsSoftFloatABI; 4590 bool IsRetSmallStructInRegABI; 4591 4592 CharUnits getParamTypeAlignment(QualType Ty) const; 4593 4594 public: 4595 PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, bool SoftFloatABI, 4596 bool RetSmallStructInRegABI) 4597 : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI), 4598 IsRetSmallStructInRegABI(RetSmallStructInRegABI) {} 4599 4600 ABIArgInfo classifyReturnType(QualType RetTy) const; 4601 4602 void computeInfo(CGFunctionInfo &FI) const override { 4603 if (!getCXXABI().classifyReturnType(FI)) 4604 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 4605 for (auto &I : FI.arguments()) 4606 I.info = classifyArgumentType(I.type); 4607 } 4608 4609 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 4610 QualType Ty) const override; 4611 }; 4612 4613 class PPC32TargetCodeGenInfo : public TargetCodeGenInfo { 4614 public: 4615 PPC32TargetCodeGenInfo(CodeGenTypes &CGT, bool SoftFloatABI, 4616 bool RetSmallStructInRegABI) 4617 : TargetCodeGenInfo(std::make_unique<PPC32_SVR4_ABIInfo>( 4618 CGT, SoftFloatABI, RetSmallStructInRegABI)) {} 4619 4620 static bool isStructReturnInRegABI(const llvm::Triple &Triple, 4621 const CodeGenOptions &Opts); 4622 4623 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 4624 // This is recovered from gcc output. 4625 return 1; // r1 is the dedicated stack pointer 4626 } 4627 4628 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4629 llvm::Value *Address) const override; 4630 }; 4631 } 4632 4633 CharUnits PPC32_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const { 4634 // Complex types are passed just like their elements. 4635 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) 4636 Ty = CTy->getElementType(); 4637 4638 if (Ty->isVectorType()) 4639 return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 4640 : 4); 4641 4642 // For single-element float/vector structs, we consider the whole type 4643 // to have the same alignment requirements as its single element. 4644 const Type *AlignTy = nullptr; 4645 if (const Type *EltType = isSingleElementStruct(Ty, getContext())) { 4646 const BuiltinType *BT = EltType->getAs<BuiltinType>(); 4647 if ((EltType->isVectorType() && getContext().getTypeSize(EltType) == 128) || 4648 (BT && BT->isFloatingPoint())) 4649 AlignTy = EltType; 4650 } 4651 4652 if (AlignTy) 4653 return CharUnits::fromQuantity(AlignTy->isVectorType() ? 16 : 4); 4654 return CharUnits::fromQuantity(4); 4655 } 4656 4657 ABIArgInfo PPC32_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const { 4658 uint64_t Size; 4659 4660 // -msvr4-struct-return puts small aggregates in GPR3 and GPR4. 4661 if (isAggregateTypeForABI(RetTy) && IsRetSmallStructInRegABI && 4662 (Size = getContext().getTypeSize(RetTy)) <= 64) { 4663 // System V ABI (1995), page 3-22, specified: 4664 // > A structure or union whose size is less than or equal to 8 bytes 4665 // > shall be returned in r3 and r4, as if it were first stored in the 4666 // > 8-byte aligned memory area and then the low addressed word were 4667 // > loaded into r3 and the high-addressed word into r4. Bits beyond 4668 // > the last member of the structure or union are not defined. 4669 // 4670 // GCC for big-endian PPC32 inserts the pad before the first member, 4671 // not "beyond the last member" of the struct. To stay compatible 4672 // with GCC, we coerce the struct to an integer of the same size. 4673 // LLVM will extend it and return i32 in r3, or i64 in r3:r4. 4674 if (Size == 0) 4675 return ABIArgInfo::getIgnore(); 4676 else { 4677 llvm::Type *CoerceTy = llvm::Type::getIntNTy(getVMContext(), Size); 4678 return ABIArgInfo::getDirect(CoerceTy); 4679 } 4680 } 4681 4682 return DefaultABIInfo::classifyReturnType(RetTy); 4683 } 4684 4685 // TODO: this implementation is now likely redundant with 4686 // DefaultABIInfo::EmitVAArg. 4687 Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList, 4688 QualType Ty) const { 4689 if (getTarget().getTriple().isOSDarwin()) { 4690 auto TI = getContext().getTypeInfoInChars(Ty); 4691 TI.second = getParamTypeAlignment(Ty); 4692 4693 CharUnits SlotSize = CharUnits::fromQuantity(4); 4694 return emitVoidPtrVAArg(CGF, VAList, Ty, 4695 classifyArgumentType(Ty).isIndirect(), TI, SlotSize, 4696 /*AllowHigherAlign=*/true); 4697 } 4698 4699 const unsigned OverflowLimit = 8; 4700 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { 4701 // TODO: Implement this. For now ignore. 4702 (void)CTy; 4703 return Address::invalid(); // FIXME? 4704 } 4705 4706 // struct __va_list_tag { 4707 // unsigned char gpr; 4708 // unsigned char fpr; 4709 // unsigned short reserved; 4710 // void *overflow_arg_area; 4711 // void *reg_save_area; 4712 // }; 4713 4714 bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64; 4715 bool isInt = 4716 Ty->isIntegerType() || Ty->isPointerType() || Ty->isAggregateType(); 4717 bool isF64 = Ty->isFloatingType() && getContext().getTypeSize(Ty) == 64; 4718 4719 // All aggregates are passed indirectly? That doesn't seem consistent 4720 // with the argument-lowering code. 4721 bool isIndirect = Ty->isAggregateType(); 4722 4723 CGBuilderTy &Builder = CGF.Builder; 4724 4725 // The calling convention either uses 1-2 GPRs or 1 FPR. 4726 Address NumRegsAddr = Address::invalid(); 4727 if (isInt || IsSoftFloatABI) { 4728 NumRegsAddr = Builder.CreateStructGEP(VAList, 0, "gpr"); 4729 } else { 4730 NumRegsAddr = Builder.CreateStructGEP(VAList, 1, "fpr"); 4731 } 4732 4733 llvm::Value *NumRegs = Builder.CreateLoad(NumRegsAddr, "numUsedRegs"); 4734 4735 // "Align" the register count when TY is i64. 4736 if (isI64 || (isF64 && IsSoftFloatABI)) { 4737 NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1)); 4738 NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U)); 4739 } 4740 4741 llvm::Value *CC = 4742 Builder.CreateICmpULT(NumRegs, Builder.getInt8(OverflowLimit), "cond"); 4743 4744 llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs"); 4745 llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow"); 4746 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont"); 4747 4748 Builder.CreateCondBr(CC, UsingRegs, UsingOverflow); 4749 4750 llvm::Type *DirectTy = CGF.ConvertType(Ty); 4751 if (isIndirect) DirectTy = DirectTy->getPointerTo(0); 4752 4753 // Case 1: consume registers. 4754 Address RegAddr = Address::invalid(); 4755 { 4756 CGF.EmitBlock(UsingRegs); 4757 4758 Address RegSaveAreaPtr = Builder.CreateStructGEP(VAList, 4); 4759 RegAddr = Address(Builder.CreateLoad(RegSaveAreaPtr), 4760 CharUnits::fromQuantity(8)); 4761 assert(RegAddr.getElementType() == CGF.Int8Ty); 4762 4763 // Floating-point registers start after the general-purpose registers. 4764 if (!(isInt || IsSoftFloatABI)) { 4765 RegAddr = Builder.CreateConstInBoundsByteGEP(RegAddr, 4766 CharUnits::fromQuantity(32)); 4767 } 4768 4769 // Get the address of the saved value by scaling the number of 4770 // registers we've used by the number of 4771 CharUnits RegSize = CharUnits::fromQuantity((isInt || IsSoftFloatABI) ? 4 : 8); 4772 llvm::Value *RegOffset = 4773 Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.getQuantity())); 4774 RegAddr = Address(Builder.CreateInBoundsGEP(CGF.Int8Ty, 4775 RegAddr.getPointer(), RegOffset), 4776 RegAddr.getAlignment().alignmentOfArrayElement(RegSize)); 4777 RegAddr = Builder.CreateElementBitCast(RegAddr, DirectTy); 4778 4779 // Increase the used-register count. 4780 NumRegs = 4781 Builder.CreateAdd(NumRegs, 4782 Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1)); 4783 Builder.CreateStore(NumRegs, NumRegsAddr); 4784 4785 CGF.EmitBranch(Cont); 4786 } 4787 4788 // Case 2: consume space in the overflow area. 4789 Address MemAddr = Address::invalid(); 4790 { 4791 CGF.EmitBlock(UsingOverflow); 4792 4793 Builder.CreateStore(Builder.getInt8(OverflowLimit), NumRegsAddr); 4794 4795 // Everything in the overflow area is rounded up to a size of at least 4. 4796 CharUnits OverflowAreaAlign = CharUnits::fromQuantity(4); 4797 4798 CharUnits Size; 4799 if (!isIndirect) { 4800 auto TypeInfo = CGF.getContext().getTypeInfoInChars(Ty); 4801 Size = TypeInfo.first.alignTo(OverflowAreaAlign); 4802 } else { 4803 Size = CGF.getPointerSize(); 4804 } 4805 4806 Address OverflowAreaAddr = Builder.CreateStructGEP(VAList, 3); 4807 Address OverflowArea(Builder.CreateLoad(OverflowAreaAddr, "argp.cur"), 4808 OverflowAreaAlign); 4809 // Round up address of argument to alignment 4810 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty); 4811 if (Align > OverflowAreaAlign) { 4812 llvm::Value *Ptr = OverflowArea.getPointer(); 4813 OverflowArea = Address(emitRoundPointerUpToAlignment(CGF, Ptr, Align), 4814 Align); 4815 } 4816 4817 MemAddr = Builder.CreateElementBitCast(OverflowArea, DirectTy); 4818 4819 // Increase the overflow area. 4820 OverflowArea = Builder.CreateConstInBoundsByteGEP(OverflowArea, Size); 4821 Builder.CreateStore(OverflowArea.getPointer(), OverflowAreaAddr); 4822 CGF.EmitBranch(Cont); 4823 } 4824 4825 CGF.EmitBlock(Cont); 4826 4827 // Merge the cases with a phi. 4828 Address Result = emitMergePHI(CGF, RegAddr, UsingRegs, MemAddr, UsingOverflow, 4829 "vaarg.addr"); 4830 4831 // Load the pointer if the argument was passed indirectly. 4832 if (isIndirect) { 4833 Result = Address(Builder.CreateLoad(Result, "aggr"), 4834 getContext().getTypeAlignInChars(Ty)); 4835 } 4836 4837 return Result; 4838 } 4839 4840 bool PPC32TargetCodeGenInfo::isStructReturnInRegABI( 4841 const llvm::Triple &Triple, const CodeGenOptions &Opts) { 4842 assert(Triple.getArch() == llvm::Triple::ppc); 4843 4844 switch (Opts.getStructReturnConvention()) { 4845 case CodeGenOptions::SRCK_Default: 4846 break; 4847 case CodeGenOptions::SRCK_OnStack: // -maix-struct-return 4848 return false; 4849 case CodeGenOptions::SRCK_InRegs: // -msvr4-struct-return 4850 return true; 4851 } 4852 4853 if (Triple.isOSBinFormatELF() && !Triple.isOSLinux()) 4854 return true; 4855 4856 return false; 4857 } 4858 4859 bool 4860 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4861 llvm::Value *Address) const { 4862 return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ false, 4863 /*IsAIX*/ false); 4864 } 4865 4866 // PowerPC-64 4867 4868 namespace { 4869 /// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information. 4870 class PPC64_SVR4_ABIInfo : public SwiftABIInfo { 4871 public: 4872 enum ABIKind { 4873 ELFv1 = 0, 4874 ELFv2 4875 }; 4876 4877 private: 4878 static const unsigned GPRBits = 64; 4879 ABIKind Kind; 4880 bool HasQPX; 4881 bool IsSoftFloatABI; 4882 4883 // A vector of float or double will be promoted to <4 x f32> or <4 x f64> and 4884 // will be passed in a QPX register. 4885 bool IsQPXVectorTy(const Type *Ty) const { 4886 if (!HasQPX) 4887 return false; 4888 4889 if (const VectorType *VT = Ty->getAs<VectorType>()) { 4890 unsigned NumElements = VT->getNumElements(); 4891 if (NumElements == 1) 4892 return false; 4893 4894 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) { 4895 if (getContext().getTypeSize(Ty) <= 256) 4896 return true; 4897 } else if (VT->getElementType()-> 4898 isSpecificBuiltinType(BuiltinType::Float)) { 4899 if (getContext().getTypeSize(Ty) <= 128) 4900 return true; 4901 } 4902 } 4903 4904 return false; 4905 } 4906 4907 bool IsQPXVectorTy(QualType Ty) const { 4908 return IsQPXVectorTy(Ty.getTypePtr()); 4909 } 4910 4911 public: 4912 PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind, bool HasQPX, 4913 bool SoftFloatABI) 4914 : SwiftABIInfo(CGT), Kind(Kind), HasQPX(HasQPX), 4915 IsSoftFloatABI(SoftFloatABI) {} 4916 4917 bool isPromotableTypeForABI(QualType Ty) const; 4918 CharUnits getParamTypeAlignment(QualType Ty) const; 4919 4920 ABIArgInfo classifyReturnType(QualType RetTy) const; 4921 ABIArgInfo classifyArgumentType(QualType Ty) const; 4922 4923 bool isHomogeneousAggregateBaseType(QualType Ty) const override; 4924 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 4925 uint64_t Members) const override; 4926 4927 // TODO: We can add more logic to computeInfo to improve performance. 4928 // Example: For aggregate arguments that fit in a register, we could 4929 // use getDirectInReg (as is done below for structs containing a single 4930 // floating-point value) to avoid pushing them to memory on function 4931 // entry. This would require changing the logic in PPCISelLowering 4932 // when lowering the parameters in the caller and args in the callee. 4933 void computeInfo(CGFunctionInfo &FI) const override { 4934 if (!getCXXABI().classifyReturnType(FI)) 4935 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 4936 for (auto &I : FI.arguments()) { 4937 // We rely on the default argument classification for the most part. 4938 // One exception: An aggregate containing a single floating-point 4939 // or vector item must be passed in a register if one is available. 4940 const Type *T = isSingleElementStruct(I.type, getContext()); 4941 if (T) { 4942 const BuiltinType *BT = T->getAs<BuiltinType>(); 4943 if (IsQPXVectorTy(T) || 4944 (T->isVectorType() && getContext().getTypeSize(T) == 128) || 4945 (BT && BT->isFloatingPoint())) { 4946 QualType QT(T, 0); 4947 I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT)); 4948 continue; 4949 } 4950 } 4951 I.info = classifyArgumentType(I.type); 4952 } 4953 } 4954 4955 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 4956 QualType Ty) const override; 4957 4958 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, 4959 bool asReturnValue) const override { 4960 return occupiesMoreThan(CGT, scalars, /*total*/ 4); 4961 } 4962 4963 bool isSwiftErrorInRegister() const override { 4964 return false; 4965 } 4966 }; 4967 4968 class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo { 4969 4970 public: 4971 PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT, 4972 PPC64_SVR4_ABIInfo::ABIKind Kind, bool HasQPX, 4973 bool SoftFloatABI) 4974 : TargetCodeGenInfo(std::make_unique<PPC64_SVR4_ABIInfo>( 4975 CGT, Kind, HasQPX, SoftFloatABI)) {} 4976 4977 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 4978 // This is recovered from gcc output. 4979 return 1; // r1 is the dedicated stack pointer 4980 } 4981 4982 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4983 llvm::Value *Address) const override; 4984 }; 4985 4986 class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo { 4987 public: 4988 PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} 4989 4990 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 4991 // This is recovered from gcc output. 4992 return 1; // r1 is the dedicated stack pointer 4993 } 4994 4995 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4996 llvm::Value *Address) const override; 4997 }; 4998 4999 } 5000 5001 // Return true if the ABI requires Ty to be passed sign- or zero- 5002 // extended to 64 bits. 5003 bool 5004 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const { 5005 // Treat an enum type as its underlying type. 5006 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 5007 Ty = EnumTy->getDecl()->getIntegerType(); 5008 5009 // Promotable integer types are required to be promoted by the ABI. 5010 if (isPromotableIntegerTypeForABI(Ty)) 5011 return true; 5012 5013 // In addition to the usual promotable integer types, we also need to 5014 // extend all 32-bit types, since the ABI requires promotion to 64 bits. 5015 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 5016 switch (BT->getKind()) { 5017 case BuiltinType::Int: 5018 case BuiltinType::UInt: 5019 return true; 5020 default: 5021 break; 5022 } 5023 5024 if (const auto *EIT = Ty->getAs<ExtIntType>()) 5025 if (EIT->getNumBits() < 64) 5026 return true; 5027 5028 return false; 5029 } 5030 5031 /// isAlignedParamType - Determine whether a type requires 16-byte or 5032 /// higher alignment in the parameter area. Always returns at least 8. 5033 CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const { 5034 // Complex types are passed just like their elements. 5035 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) 5036 Ty = CTy->getElementType(); 5037 5038 // Only vector types of size 16 bytes need alignment (larger types are 5039 // passed via reference, smaller types are not aligned). 5040 if (IsQPXVectorTy(Ty)) { 5041 if (getContext().getTypeSize(Ty) > 128) 5042 return CharUnits::fromQuantity(32); 5043 5044 return CharUnits::fromQuantity(16); 5045 } else if (Ty->isVectorType()) { 5046 return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 : 8); 5047 } 5048 5049 // For single-element float/vector structs, we consider the whole type 5050 // to have the same alignment requirements as its single element. 5051 const Type *AlignAsType = nullptr; 5052 const Type *EltType = isSingleElementStruct(Ty, getContext()); 5053 if (EltType) { 5054 const BuiltinType *BT = EltType->getAs<BuiltinType>(); 5055 if (IsQPXVectorTy(EltType) || (EltType->isVectorType() && 5056 getContext().getTypeSize(EltType) == 128) || 5057 (BT && BT->isFloatingPoint())) 5058 AlignAsType = EltType; 5059 } 5060 5061 // Likewise for ELFv2 homogeneous aggregates. 5062 const Type *Base = nullptr; 5063 uint64_t Members = 0; 5064 if (!AlignAsType && Kind == ELFv2 && 5065 isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members)) 5066 AlignAsType = Base; 5067 5068 // With special case aggregates, only vector base types need alignment. 5069 if (AlignAsType && IsQPXVectorTy(AlignAsType)) { 5070 if (getContext().getTypeSize(AlignAsType) > 128) 5071 return CharUnits::fromQuantity(32); 5072 5073 return CharUnits::fromQuantity(16); 5074 } else if (AlignAsType) { 5075 return CharUnits::fromQuantity(AlignAsType->isVectorType() ? 16 : 8); 5076 } 5077 5078 // Otherwise, we only need alignment for any aggregate type that 5079 // has an alignment requirement of >= 16 bytes. 5080 if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) { 5081 if (HasQPX && getContext().getTypeAlign(Ty) >= 256) 5082 return CharUnits::fromQuantity(32); 5083 return CharUnits::fromQuantity(16); 5084 } 5085 5086 return CharUnits::fromQuantity(8); 5087 } 5088 5089 /// isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous 5090 /// aggregate. Base is set to the base element type, and Members is set 5091 /// to the number of base elements. 5092 bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base, 5093 uint64_t &Members) const { 5094 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 5095 uint64_t NElements = AT->getSize().getZExtValue(); 5096 if (NElements == 0) 5097 return false; 5098 if (!isHomogeneousAggregate(AT->getElementType(), Base, Members)) 5099 return false; 5100 Members *= NElements; 5101 } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 5102 const RecordDecl *RD = RT->getDecl(); 5103 if (RD->hasFlexibleArrayMember()) 5104 return false; 5105 5106 Members = 0; 5107 5108 // If this is a C++ record, check the bases first. 5109 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 5110 for (const auto &I : CXXRD->bases()) { 5111 // Ignore empty records. 5112 if (isEmptyRecord(getContext(), I.getType(), true)) 5113 continue; 5114 5115 uint64_t FldMembers; 5116 if (!isHomogeneousAggregate(I.getType(), Base, FldMembers)) 5117 return false; 5118 5119 Members += FldMembers; 5120 } 5121 } 5122 5123 for (const auto *FD : RD->fields()) { 5124 // Ignore (non-zero arrays of) empty records. 5125 QualType FT = FD->getType(); 5126 while (const ConstantArrayType *AT = 5127 getContext().getAsConstantArrayType(FT)) { 5128 if (AT->getSize().getZExtValue() == 0) 5129 return false; 5130 FT = AT->getElementType(); 5131 } 5132 if (isEmptyRecord(getContext(), FT, true)) 5133 continue; 5134 5135 // For compatibility with GCC, ignore empty bitfields in C++ mode. 5136 if (getContext().getLangOpts().CPlusPlus && 5137 FD->isZeroLengthBitField(getContext())) 5138 continue; 5139 5140 uint64_t FldMembers; 5141 if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers)) 5142 return false; 5143 5144 Members = (RD->isUnion() ? 5145 std::max(Members, FldMembers) : Members + FldMembers); 5146 } 5147 5148 if (!Base) 5149 return false; 5150 5151 // Ensure there is no padding. 5152 if (getContext().getTypeSize(Base) * Members != 5153 getContext().getTypeSize(Ty)) 5154 return false; 5155 } else { 5156 Members = 1; 5157 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 5158 Members = 2; 5159 Ty = CT->getElementType(); 5160 } 5161 5162 // Most ABIs only support float, double, and some vector type widths. 5163 if (!isHomogeneousAggregateBaseType(Ty)) 5164 return false; 5165 5166 // The base type must be the same for all members. Types that 5167 // agree in both total size and mode (float vs. vector) are 5168 // treated as being equivalent here. 5169 const Type *TyPtr = Ty.getTypePtr(); 5170 if (!Base) { 5171 Base = TyPtr; 5172 // If it's a non-power-of-2 vector, its size is already a power-of-2, 5173 // so make sure to widen it explicitly. 5174 if (const VectorType *VT = Base->getAs<VectorType>()) { 5175 QualType EltTy = VT->getElementType(); 5176 unsigned NumElements = 5177 getContext().getTypeSize(VT) / getContext().getTypeSize(EltTy); 5178 Base = getContext() 5179 .getVectorType(EltTy, NumElements, VT->getVectorKind()) 5180 .getTypePtr(); 5181 } 5182 } 5183 5184 if (Base->isVectorType() != TyPtr->isVectorType() || 5185 getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr)) 5186 return false; 5187 } 5188 return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members); 5189 } 5190 5191 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 5192 // Homogeneous aggregates for ELFv2 must have base types of float, 5193 // double, long double, or 128-bit vectors. 5194 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 5195 if (BT->getKind() == BuiltinType::Float || 5196 BT->getKind() == BuiltinType::Double || 5197 BT->getKind() == BuiltinType::LongDouble || 5198 (getContext().getTargetInfo().hasFloat128Type() && 5199 (BT->getKind() == BuiltinType::Float128))) { 5200 if (IsSoftFloatABI) 5201 return false; 5202 return true; 5203 } 5204 } 5205 if (const VectorType *VT = Ty->getAs<VectorType>()) { 5206 if (getContext().getTypeSize(VT) == 128 || IsQPXVectorTy(Ty)) 5207 return true; 5208 } 5209 return false; 5210 } 5211 5212 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough( 5213 const Type *Base, uint64_t Members) const { 5214 // Vector and fp128 types require one register, other floating point types 5215 // require one or two registers depending on their size. 5216 uint32_t NumRegs = 5217 ((getContext().getTargetInfo().hasFloat128Type() && 5218 Base->isFloat128Type()) || 5219 Base->isVectorType()) ? 1 5220 : (getContext().getTypeSize(Base) + 63) / 64; 5221 5222 // Homogeneous Aggregates may occupy at most 8 registers. 5223 return Members * NumRegs <= 8; 5224 } 5225 5226 ABIArgInfo 5227 PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const { 5228 Ty = useFirstFieldIfTransparentUnion(Ty); 5229 5230 if (Ty->isAnyComplexType()) 5231 return ABIArgInfo::getDirect(); 5232 5233 // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes) 5234 // or via reference (larger than 16 bytes). 5235 if (Ty->isVectorType() && !IsQPXVectorTy(Ty)) { 5236 uint64_t Size = getContext().getTypeSize(Ty); 5237 if (Size > 128) 5238 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 5239 else if (Size < 128) { 5240 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size); 5241 return ABIArgInfo::getDirect(CoerceTy); 5242 } 5243 } 5244 5245 if (const auto *EIT = Ty->getAs<ExtIntType>()) 5246 if (EIT->getNumBits() > 128) 5247 return getNaturalAlignIndirect(Ty, /*ByVal=*/true); 5248 5249 if (isAggregateTypeForABI(Ty)) { 5250 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 5251 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 5252 5253 uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity(); 5254 uint64_t TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity(); 5255 5256 // ELFv2 homogeneous aggregates are passed as array types. 5257 const Type *Base = nullptr; 5258 uint64_t Members = 0; 5259 if (Kind == ELFv2 && 5260 isHomogeneousAggregate(Ty, Base, Members)) { 5261 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0)); 5262 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members); 5263 return ABIArgInfo::getDirect(CoerceTy); 5264 } 5265 5266 // If an aggregate may end up fully in registers, we do not 5267 // use the ByVal method, but pass the aggregate as array. 5268 // This is usually beneficial since we avoid forcing the 5269 // back-end to store the argument to memory. 5270 uint64_t Bits = getContext().getTypeSize(Ty); 5271 if (Bits > 0 && Bits <= 8 * GPRBits) { 5272 llvm::Type *CoerceTy; 5273 5274 // Types up to 8 bytes are passed as integer type (which will be 5275 // properly aligned in the argument save area doubleword). 5276 if (Bits <= GPRBits) 5277 CoerceTy = 5278 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8)); 5279 // Larger types are passed as arrays, with the base type selected 5280 // according to the required alignment in the save area. 5281 else { 5282 uint64_t RegBits = ABIAlign * 8; 5283 uint64_t NumRegs = llvm::alignTo(Bits, RegBits) / RegBits; 5284 llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits); 5285 CoerceTy = llvm::ArrayType::get(RegTy, NumRegs); 5286 } 5287 5288 return ABIArgInfo::getDirect(CoerceTy); 5289 } 5290 5291 // All other aggregates are passed ByVal. 5292 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign), 5293 /*ByVal=*/true, 5294 /*Realign=*/TyAlign > ABIAlign); 5295 } 5296 5297 return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) 5298 : ABIArgInfo::getDirect()); 5299 } 5300 5301 ABIArgInfo 5302 PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const { 5303 if (RetTy->isVoidType()) 5304 return ABIArgInfo::getIgnore(); 5305 5306 if (RetTy->isAnyComplexType()) 5307 return ABIArgInfo::getDirect(); 5308 5309 // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes) 5310 // or via reference (larger than 16 bytes). 5311 if (RetTy->isVectorType() && !IsQPXVectorTy(RetTy)) { 5312 uint64_t Size = getContext().getTypeSize(RetTy); 5313 if (Size > 128) 5314 return getNaturalAlignIndirect(RetTy); 5315 else if (Size < 128) { 5316 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size); 5317 return ABIArgInfo::getDirect(CoerceTy); 5318 } 5319 } 5320 5321 if (const auto *EIT = RetTy->getAs<ExtIntType>()) 5322 if (EIT->getNumBits() > 128) 5323 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false); 5324 5325 if (isAggregateTypeForABI(RetTy)) { 5326 // ELFv2 homogeneous aggregates are returned as array types. 5327 const Type *Base = nullptr; 5328 uint64_t Members = 0; 5329 if (Kind == ELFv2 && 5330 isHomogeneousAggregate(RetTy, Base, Members)) { 5331 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0)); 5332 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members); 5333 return ABIArgInfo::getDirect(CoerceTy); 5334 } 5335 5336 // ELFv2 small aggregates are returned in up to two registers. 5337 uint64_t Bits = getContext().getTypeSize(RetTy); 5338 if (Kind == ELFv2 && Bits <= 2 * GPRBits) { 5339 if (Bits == 0) 5340 return ABIArgInfo::getIgnore(); 5341 5342 llvm::Type *CoerceTy; 5343 if (Bits > GPRBits) { 5344 CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits); 5345 CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy); 5346 } else 5347 CoerceTy = 5348 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8)); 5349 return ABIArgInfo::getDirect(CoerceTy); 5350 } 5351 5352 // All other aggregates are returned indirectly. 5353 return getNaturalAlignIndirect(RetTy); 5354 } 5355 5356 return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) 5357 : ABIArgInfo::getDirect()); 5358 } 5359 5360 // Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine. 5361 Address PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 5362 QualType Ty) const { 5363 auto TypeInfo = getContext().getTypeInfoInChars(Ty); 5364 TypeInfo.second = getParamTypeAlignment(Ty); 5365 5366 CharUnits SlotSize = CharUnits::fromQuantity(8); 5367 5368 // If we have a complex type and the base type is smaller than 8 bytes, 5369 // the ABI calls for the real and imaginary parts to be right-adjusted 5370 // in separate doublewords. However, Clang expects us to produce a 5371 // pointer to a structure with the two parts packed tightly. So generate 5372 // loads of the real and imaginary parts relative to the va_list pointer, 5373 // and store them to a temporary structure. 5374 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { 5375 CharUnits EltSize = TypeInfo.first / 2; 5376 if (EltSize < SlotSize) { 5377 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, CGF.Int8Ty, 5378 SlotSize * 2, SlotSize, 5379 SlotSize, /*AllowHigher*/ true); 5380 5381 Address RealAddr = Addr; 5382 Address ImagAddr = RealAddr; 5383 if (CGF.CGM.getDataLayout().isBigEndian()) { 5384 RealAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, 5385 SlotSize - EltSize); 5386 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(ImagAddr, 5387 2 * SlotSize - EltSize); 5388 } else { 5389 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize); 5390 } 5391 5392 llvm::Type *EltTy = CGF.ConvertTypeForMem(CTy->getElementType()); 5393 RealAddr = CGF.Builder.CreateElementBitCast(RealAddr, EltTy); 5394 ImagAddr = CGF.Builder.CreateElementBitCast(ImagAddr, EltTy); 5395 llvm::Value *Real = CGF.Builder.CreateLoad(RealAddr, ".vareal"); 5396 llvm::Value *Imag = CGF.Builder.CreateLoad(ImagAddr, ".vaimag"); 5397 5398 Address Temp = CGF.CreateMemTemp(Ty, "vacplx"); 5399 CGF.EmitStoreOfComplex({Real, Imag}, CGF.MakeAddrLValue(Temp, Ty), 5400 /*init*/ true); 5401 return Temp; 5402 } 5403 } 5404 5405 // Otherwise, just use the general rule. 5406 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, 5407 TypeInfo, SlotSize, /*AllowHigher*/ true); 5408 } 5409 5410 bool 5411 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable( 5412 CodeGen::CodeGenFunction &CGF, 5413 llvm::Value *Address) const { 5414 return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ true, 5415 /*IsAIX*/ false); 5416 } 5417 5418 bool 5419 PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 5420 llvm::Value *Address) const { 5421 return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ true, 5422 /*IsAIX*/ false); 5423 } 5424 5425 //===----------------------------------------------------------------------===// 5426 // AArch64 ABI Implementation 5427 //===----------------------------------------------------------------------===// 5428 5429 namespace { 5430 5431 class AArch64ABIInfo : public SwiftABIInfo { 5432 public: 5433 enum ABIKind { 5434 AAPCS = 0, 5435 DarwinPCS, 5436 Win64 5437 }; 5438 5439 private: 5440 ABIKind Kind; 5441 5442 public: 5443 AArch64ABIInfo(CodeGenTypes &CGT, ABIKind Kind) 5444 : SwiftABIInfo(CGT), Kind(Kind) {} 5445 5446 private: 5447 ABIKind getABIKind() const { return Kind; } 5448 bool isDarwinPCS() const { return Kind == DarwinPCS; } 5449 5450 ABIArgInfo classifyReturnType(QualType RetTy, bool IsVariadic) const; 5451 ABIArgInfo classifyArgumentType(QualType RetTy) const; 5452 ABIArgInfo coerceIllegalVector(QualType Ty) const; 5453 bool isHomogeneousAggregateBaseType(QualType Ty) const override; 5454 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 5455 uint64_t Members) const override; 5456 5457 bool isIllegalVectorType(QualType Ty) const; 5458 5459 void computeInfo(CGFunctionInfo &FI) const override { 5460 if (!::classifyReturnType(getCXXABI(), FI, *this)) 5461 FI.getReturnInfo() = 5462 classifyReturnType(FI.getReturnType(), FI.isVariadic()); 5463 5464 for (auto &it : FI.arguments()) 5465 it.info = classifyArgumentType(it.type); 5466 } 5467 5468 Address EmitDarwinVAArg(Address VAListAddr, QualType Ty, 5469 CodeGenFunction &CGF) const; 5470 5471 Address EmitAAPCSVAArg(Address VAListAddr, QualType Ty, 5472 CodeGenFunction &CGF) const; 5473 5474 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 5475 QualType Ty) const override { 5476 return Kind == Win64 ? EmitMSVAArg(CGF, VAListAddr, Ty) 5477 : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF) 5478 : EmitAAPCSVAArg(VAListAddr, Ty, CGF); 5479 } 5480 5481 Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, 5482 QualType Ty) const override; 5483 5484 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, 5485 bool asReturnValue) const override { 5486 return occupiesMoreThan(CGT, scalars, /*total*/ 4); 5487 } 5488 bool isSwiftErrorInRegister() const override { 5489 return true; 5490 } 5491 5492 bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy, 5493 unsigned elts) const override; 5494 5495 bool allowBFloatArgsAndRet() const override { 5496 return getTarget().hasBFloat16Type(); 5497 } 5498 }; 5499 5500 class AArch64TargetCodeGenInfo : public TargetCodeGenInfo { 5501 public: 5502 AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind) 5503 : TargetCodeGenInfo(std::make_unique<AArch64ABIInfo>(CGT, Kind)) {} 5504 5505 StringRef getARCRetainAutoreleasedReturnValueMarker() const override { 5506 return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue"; 5507 } 5508 5509 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 5510 return 31; 5511 } 5512 5513 bool doesReturnSlotInterfereWithArgs() const override { return false; } 5514 5515 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 5516 CodeGen::CodeGenModule &CGM) const override { 5517 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 5518 if (!FD) 5519 return; 5520 5521 const auto *TA = FD->getAttr<TargetAttr>(); 5522 if (TA == nullptr) 5523 return; 5524 5525 ParsedTargetAttr Attr = TA->parse(); 5526 if (Attr.BranchProtection.empty()) 5527 return; 5528 5529 TargetInfo::BranchProtectionInfo BPI; 5530 StringRef Error; 5531 (void)CGM.getTarget().validateBranchProtection(Attr.BranchProtection, 5532 BPI, Error); 5533 assert(Error.empty()); 5534 5535 auto *Fn = cast<llvm::Function>(GV); 5536 static const char *SignReturnAddrStr[] = {"none", "non-leaf", "all"}; 5537 Fn->addFnAttr("sign-return-address", SignReturnAddrStr[static_cast<int>(BPI.SignReturnAddr)]); 5538 5539 if (BPI.SignReturnAddr != LangOptions::SignReturnAddressScopeKind::None) { 5540 Fn->addFnAttr("sign-return-address-key", 5541 BPI.SignKey == LangOptions::SignReturnAddressKeyKind::AKey 5542 ? "a_key" 5543 : "b_key"); 5544 } 5545 5546 Fn->addFnAttr("branch-target-enforcement", 5547 BPI.BranchTargetEnforcement ? "true" : "false"); 5548 } 5549 }; 5550 5551 class WindowsAArch64TargetCodeGenInfo : public AArch64TargetCodeGenInfo { 5552 public: 5553 WindowsAArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind K) 5554 : AArch64TargetCodeGenInfo(CGT, K) {} 5555 5556 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 5557 CodeGen::CodeGenModule &CGM) const override; 5558 5559 void getDependentLibraryOption(llvm::StringRef Lib, 5560 llvm::SmallString<24> &Opt) const override { 5561 Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib); 5562 } 5563 5564 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value, 5565 llvm::SmallString<32> &Opt) const override { 5566 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; 5567 } 5568 }; 5569 5570 void WindowsAArch64TargetCodeGenInfo::setTargetAttributes( 5571 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { 5572 AArch64TargetCodeGenInfo::setTargetAttributes(D, GV, CGM); 5573 if (GV->isDeclaration()) 5574 return; 5575 addStackProbeTargetAttributes(D, GV, CGM); 5576 } 5577 } 5578 5579 ABIArgInfo AArch64ABIInfo::coerceIllegalVector(QualType Ty) const { 5580 assert(Ty->isVectorType() && "expected vector type!"); 5581 5582 const auto *VT = Ty->castAs<VectorType>(); 5583 if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) { 5584 assert(VT->getElementType()->isBuiltinType() && "expected builtin type!"); 5585 assert(VT->getElementType()->castAs<BuiltinType>()->getKind() == 5586 BuiltinType::UChar && 5587 "unexpected builtin type for SVE predicate!"); 5588 return ABIArgInfo::getDirect(llvm::ScalableVectorType::get( 5589 llvm::Type::getInt1Ty(getVMContext()), 16)); 5590 } 5591 5592 if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector) { 5593 assert(VT->getElementType()->isBuiltinType() && "expected builtin type!"); 5594 5595 const auto *BT = VT->getElementType()->castAs<BuiltinType>(); 5596 llvm::ScalableVectorType *ResType = nullptr; 5597 switch (BT->getKind()) { 5598 default: 5599 llvm_unreachable("unexpected builtin type for SVE vector!"); 5600 case BuiltinType::SChar: 5601 case BuiltinType::UChar: 5602 ResType = llvm::ScalableVectorType::get( 5603 llvm::Type::getInt8Ty(getVMContext()), 16); 5604 break; 5605 case BuiltinType::Short: 5606 case BuiltinType::UShort: 5607 ResType = llvm::ScalableVectorType::get( 5608 llvm::Type::getInt16Ty(getVMContext()), 8); 5609 break; 5610 case BuiltinType::Int: 5611 case BuiltinType::UInt: 5612 ResType = llvm::ScalableVectorType::get( 5613 llvm::Type::getInt32Ty(getVMContext()), 4); 5614 break; 5615 case BuiltinType::Long: 5616 case BuiltinType::ULong: 5617 ResType = llvm::ScalableVectorType::get( 5618 llvm::Type::getInt64Ty(getVMContext()), 2); 5619 break; 5620 case BuiltinType::Half: 5621 ResType = llvm::ScalableVectorType::get( 5622 llvm::Type::getHalfTy(getVMContext()), 8); 5623 break; 5624 case BuiltinType::Float: 5625 ResType = llvm::ScalableVectorType::get( 5626 llvm::Type::getFloatTy(getVMContext()), 4); 5627 break; 5628 case BuiltinType::Double: 5629 ResType = llvm::ScalableVectorType::get( 5630 llvm::Type::getDoubleTy(getVMContext()), 2); 5631 break; 5632 case BuiltinType::BFloat16: 5633 ResType = llvm::ScalableVectorType::get( 5634 llvm::Type::getBFloatTy(getVMContext()), 8); 5635 break; 5636 } 5637 return ABIArgInfo::getDirect(ResType); 5638 } 5639 5640 uint64_t Size = getContext().getTypeSize(Ty); 5641 // Android promotes <2 x i8> to i16, not i32 5642 if (isAndroid() && (Size <= 16)) { 5643 llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext()); 5644 return ABIArgInfo::getDirect(ResType); 5645 } 5646 if (Size <= 32) { 5647 llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext()); 5648 return ABIArgInfo::getDirect(ResType); 5649 } 5650 if (Size == 64) { 5651 auto *ResType = 5652 llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2); 5653 return ABIArgInfo::getDirect(ResType); 5654 } 5655 if (Size == 128) { 5656 auto *ResType = 5657 llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4); 5658 return ABIArgInfo::getDirect(ResType); 5659 } 5660 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 5661 } 5662 5663 ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const { 5664 Ty = useFirstFieldIfTransparentUnion(Ty); 5665 5666 // Handle illegal vector types here. 5667 if (isIllegalVectorType(Ty)) 5668 return coerceIllegalVector(Ty); 5669 5670 if (!isAggregateTypeForABI(Ty)) { 5671 // Treat an enum type as its underlying type. 5672 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 5673 Ty = EnumTy->getDecl()->getIntegerType(); 5674 5675 if (const auto *EIT = Ty->getAs<ExtIntType>()) 5676 if (EIT->getNumBits() > 128) 5677 return getNaturalAlignIndirect(Ty); 5678 5679 return (isPromotableIntegerTypeForABI(Ty) && isDarwinPCS() 5680 ? ABIArgInfo::getExtend(Ty) 5681 : ABIArgInfo::getDirect()); 5682 } 5683 5684 // Structures with either a non-trivial destructor or a non-trivial 5685 // copy constructor are always indirect. 5686 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { 5687 return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA == 5688 CGCXXABI::RAA_DirectInMemory); 5689 } 5690 5691 // Empty records are always ignored on Darwin, but actually passed in C++ mode 5692 // elsewhere for GNU compatibility. 5693 uint64_t Size = getContext().getTypeSize(Ty); 5694 bool IsEmpty = isEmptyRecord(getContext(), Ty, true); 5695 if (IsEmpty || Size == 0) { 5696 if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS()) 5697 return ABIArgInfo::getIgnore(); 5698 5699 // GNU C mode. The only argument that gets ignored is an empty one with size 5700 // 0. 5701 if (IsEmpty && Size == 0) 5702 return ABIArgInfo::getIgnore(); 5703 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 5704 } 5705 5706 // Homogeneous Floating-point Aggregates (HFAs) need to be expanded. 5707 const Type *Base = nullptr; 5708 uint64_t Members = 0; 5709 if (isHomogeneousAggregate(Ty, Base, Members)) { 5710 return ABIArgInfo::getDirect( 5711 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members)); 5712 } 5713 5714 // Aggregates <= 16 bytes are passed directly in registers or on the stack. 5715 if (Size <= 128) { 5716 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of 5717 // same size and alignment. 5718 if (getTarget().isRenderScriptTarget()) { 5719 return coerceToIntArray(Ty, getContext(), getVMContext()); 5720 } 5721 unsigned Alignment; 5722 if (Kind == AArch64ABIInfo::AAPCS) { 5723 Alignment = getContext().getTypeUnadjustedAlign(Ty); 5724 Alignment = Alignment < 128 ? 64 : 128; 5725 } else { 5726 Alignment = std::max(getContext().getTypeAlign(Ty), 5727 (unsigned)getTarget().getPointerWidth(0)); 5728 } 5729 Size = llvm::alignTo(Size, Alignment); 5730 5731 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment. 5732 // For aggregates with 16-byte alignment, we use i128. 5733 llvm::Type *BaseTy = llvm::Type::getIntNTy(getVMContext(), Alignment); 5734 return ABIArgInfo::getDirect( 5735 Size == Alignment ? BaseTy 5736 : llvm::ArrayType::get(BaseTy, Size / Alignment)); 5737 } 5738 5739 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 5740 } 5741 5742 ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy, 5743 bool IsVariadic) const { 5744 if (RetTy->isVoidType()) 5745 return ABIArgInfo::getIgnore(); 5746 5747 if (const auto *VT = RetTy->getAs<VectorType>()) { 5748 if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector || 5749 VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) 5750 return coerceIllegalVector(RetTy); 5751 } 5752 5753 // Large vector types should be returned via memory. 5754 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) 5755 return getNaturalAlignIndirect(RetTy); 5756 5757 if (!isAggregateTypeForABI(RetTy)) { 5758 // Treat an enum type as its underlying type. 5759 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 5760 RetTy = EnumTy->getDecl()->getIntegerType(); 5761 5762 if (const auto *EIT = RetTy->getAs<ExtIntType>()) 5763 if (EIT->getNumBits() > 128) 5764 return getNaturalAlignIndirect(RetTy); 5765 5766 return (isPromotableIntegerTypeForABI(RetTy) && isDarwinPCS() 5767 ? ABIArgInfo::getExtend(RetTy) 5768 : ABIArgInfo::getDirect()); 5769 } 5770 5771 uint64_t Size = getContext().getTypeSize(RetTy); 5772 if (isEmptyRecord(getContext(), RetTy, true) || Size == 0) 5773 return ABIArgInfo::getIgnore(); 5774 5775 const Type *Base = nullptr; 5776 uint64_t Members = 0; 5777 if (isHomogeneousAggregate(RetTy, Base, Members) && 5778 !(getTarget().getTriple().getArch() == llvm::Triple::aarch64_32 && 5779 IsVariadic)) 5780 // Homogeneous Floating-point Aggregates (HFAs) are returned directly. 5781 return ABIArgInfo::getDirect(); 5782 5783 // Aggregates <= 16 bytes are returned directly in registers or on the stack. 5784 if (Size <= 128) { 5785 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of 5786 // same size and alignment. 5787 if (getTarget().isRenderScriptTarget()) { 5788 return coerceToIntArray(RetTy, getContext(), getVMContext()); 5789 } 5790 unsigned Alignment = getContext().getTypeAlign(RetTy); 5791 Size = llvm::alignTo(Size, 64); // round up to multiple of 8 bytes 5792 5793 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment. 5794 // For aggregates with 16-byte alignment, we use i128. 5795 if (Alignment < 128 && Size == 128) { 5796 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext()); 5797 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64)); 5798 } 5799 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size)); 5800 } 5801 5802 return getNaturalAlignIndirect(RetTy); 5803 } 5804 5805 /// isIllegalVectorType - check whether the vector type is legal for AArch64. 5806 bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const { 5807 if (const VectorType *VT = Ty->getAs<VectorType>()) { 5808 // Check whether VT is a fixed-length SVE vector. These types are 5809 // represented as scalable vectors in function args/return and must be 5810 // coerced from fixed vectors. 5811 if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector || 5812 VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) 5813 return true; 5814 5815 // Check whether VT is legal. 5816 unsigned NumElements = VT->getNumElements(); 5817 uint64_t Size = getContext().getTypeSize(VT); 5818 // NumElements should be power of 2. 5819 if (!llvm::isPowerOf2_32(NumElements)) 5820 return true; 5821 5822 // arm64_32 has to be compatible with the ARM logic here, which allows huge 5823 // vectors for some reason. 5824 llvm::Triple Triple = getTarget().getTriple(); 5825 if (Triple.getArch() == llvm::Triple::aarch64_32 && 5826 Triple.isOSBinFormatMachO()) 5827 return Size <= 32; 5828 5829 return Size != 64 && (Size != 128 || NumElements == 1); 5830 } 5831 return false; 5832 } 5833 5834 bool AArch64ABIInfo::isLegalVectorTypeForSwift(CharUnits totalSize, 5835 llvm::Type *eltTy, 5836 unsigned elts) const { 5837 if (!llvm::isPowerOf2_32(elts)) 5838 return false; 5839 if (totalSize.getQuantity() != 8 && 5840 (totalSize.getQuantity() != 16 || elts == 1)) 5841 return false; 5842 return true; 5843 } 5844 5845 bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 5846 // Homogeneous aggregates for AAPCS64 must have base types of a floating 5847 // point type or a short-vector type. This is the same as the 32-bit ABI, 5848 // but with the difference that any floating-point type is allowed, 5849 // including __fp16. 5850 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 5851 if (BT->isFloatingPoint()) 5852 return true; 5853 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 5854 unsigned VecSize = getContext().getTypeSize(VT); 5855 if (VecSize == 64 || VecSize == 128) 5856 return true; 5857 } 5858 return false; 5859 } 5860 5861 bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, 5862 uint64_t Members) const { 5863 return Members <= 4; 5864 } 5865 5866 Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr, 5867 QualType Ty, 5868 CodeGenFunction &CGF) const { 5869 ABIArgInfo AI = classifyArgumentType(Ty); 5870 bool IsIndirect = AI.isIndirect(); 5871 5872 llvm::Type *BaseTy = CGF.ConvertType(Ty); 5873 if (IsIndirect) 5874 BaseTy = llvm::PointerType::getUnqual(BaseTy); 5875 else if (AI.getCoerceToType()) 5876 BaseTy = AI.getCoerceToType(); 5877 5878 unsigned NumRegs = 1; 5879 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) { 5880 BaseTy = ArrTy->getElementType(); 5881 NumRegs = ArrTy->getNumElements(); 5882 } 5883 bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy(); 5884 5885 // The AArch64 va_list type and handling is specified in the Procedure Call 5886 // Standard, section B.4: 5887 // 5888 // struct { 5889 // void *__stack; 5890 // void *__gr_top; 5891 // void *__vr_top; 5892 // int __gr_offs; 5893 // int __vr_offs; 5894 // }; 5895 5896 llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg"); 5897 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 5898 llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack"); 5899 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 5900 5901 CharUnits TySize = getContext().getTypeSizeInChars(Ty); 5902 CharUnits TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty); 5903 5904 Address reg_offs_p = Address::invalid(); 5905 llvm::Value *reg_offs = nullptr; 5906 int reg_top_index; 5907 int RegSize = IsIndirect ? 8 : TySize.getQuantity(); 5908 if (!IsFPR) { 5909 // 3 is the field number of __gr_offs 5910 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p"); 5911 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs"); 5912 reg_top_index = 1; // field number for __gr_top 5913 RegSize = llvm::alignTo(RegSize, 8); 5914 } else { 5915 // 4 is the field number of __vr_offs. 5916 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p"); 5917 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs"); 5918 reg_top_index = 2; // field number for __vr_top 5919 RegSize = 16 * NumRegs; 5920 } 5921 5922 //======================================= 5923 // Find out where argument was passed 5924 //======================================= 5925 5926 // If reg_offs >= 0 we're already using the stack for this type of 5927 // argument. We don't want to keep updating reg_offs (in case it overflows, 5928 // though anyone passing 2GB of arguments, each at most 16 bytes, deserves 5929 // whatever they get). 5930 llvm::Value *UsingStack = nullptr; 5931 UsingStack = CGF.Builder.CreateICmpSGE( 5932 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0)); 5933 5934 CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock); 5935 5936 // Otherwise, at least some kind of argument could go in these registers, the 5937 // question is whether this particular type is too big. 5938 CGF.EmitBlock(MaybeRegBlock); 5939 5940 // Integer arguments may need to correct register alignment (for example a 5941 // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we 5942 // align __gr_offs to calculate the potential address. 5943 if (!IsFPR && !IsIndirect && TyAlign.getQuantity() > 8) { 5944 int Align = TyAlign.getQuantity(); 5945 5946 reg_offs = CGF.Builder.CreateAdd( 5947 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1), 5948 "align_regoffs"); 5949 reg_offs = CGF.Builder.CreateAnd( 5950 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align), 5951 "aligned_regoffs"); 5952 } 5953 5954 // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list. 5955 // The fact that this is done unconditionally reflects the fact that 5956 // allocating an argument to the stack also uses up all the remaining 5957 // registers of the appropriate kind. 5958 llvm::Value *NewOffset = nullptr; 5959 NewOffset = CGF.Builder.CreateAdd( 5960 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs"); 5961 CGF.Builder.CreateStore(NewOffset, reg_offs_p); 5962 5963 // Now we're in a position to decide whether this argument really was in 5964 // registers or not. 5965 llvm::Value *InRegs = nullptr; 5966 InRegs = CGF.Builder.CreateICmpSLE( 5967 NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg"); 5968 5969 CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock); 5970 5971 //======================================= 5972 // Argument was in registers 5973 //======================================= 5974 5975 // Now we emit the code for if the argument was originally passed in 5976 // registers. First start the appropriate block: 5977 CGF.EmitBlock(InRegBlock); 5978 5979 llvm::Value *reg_top = nullptr; 5980 Address reg_top_p = 5981 CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p"); 5982 reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top"); 5983 Address BaseAddr(CGF.Builder.CreateInBoundsGEP(reg_top, reg_offs), 5984 CharUnits::fromQuantity(IsFPR ? 16 : 8)); 5985 Address RegAddr = Address::invalid(); 5986 llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty); 5987 5988 if (IsIndirect) { 5989 // If it's been passed indirectly (actually a struct), whatever we find from 5990 // stored registers or on the stack will actually be a struct **. 5991 MemTy = llvm::PointerType::getUnqual(MemTy); 5992 } 5993 5994 const Type *Base = nullptr; 5995 uint64_t NumMembers = 0; 5996 bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers); 5997 if (IsHFA && NumMembers > 1) { 5998 // Homogeneous aggregates passed in registers will have their elements split 5999 // and stored 16-bytes apart regardless of size (they're notionally in qN, 6000 // qN+1, ...). We reload and store into a temporary local variable 6001 // contiguously. 6002 assert(!IsIndirect && "Homogeneous aggregates should be passed directly"); 6003 auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0)); 6004 llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0)); 6005 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers); 6006 Address Tmp = CGF.CreateTempAlloca(HFATy, 6007 std::max(TyAlign, BaseTyInfo.second)); 6008 6009 // On big-endian platforms, the value will be right-aligned in its slot. 6010 int Offset = 0; 6011 if (CGF.CGM.getDataLayout().isBigEndian() && 6012 BaseTyInfo.first.getQuantity() < 16) 6013 Offset = 16 - BaseTyInfo.first.getQuantity(); 6014 6015 for (unsigned i = 0; i < NumMembers; ++i) { 6016 CharUnits BaseOffset = CharUnits::fromQuantity(16 * i + Offset); 6017 Address LoadAddr = 6018 CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, BaseOffset); 6019 LoadAddr = CGF.Builder.CreateElementBitCast(LoadAddr, BaseTy); 6020 6021 Address StoreAddr = CGF.Builder.CreateConstArrayGEP(Tmp, i); 6022 6023 llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr); 6024 CGF.Builder.CreateStore(Elem, StoreAddr); 6025 } 6026 6027 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, MemTy); 6028 } else { 6029 // Otherwise the object is contiguous in memory. 6030 6031 // It might be right-aligned in its slot. 6032 CharUnits SlotSize = BaseAddr.getAlignment(); 6033 if (CGF.CGM.getDataLayout().isBigEndian() && !IsIndirect && 6034 (IsHFA || !isAggregateTypeForABI(Ty)) && 6035 TySize < SlotSize) { 6036 CharUnits Offset = SlotSize - TySize; 6037 BaseAddr = CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, Offset); 6038 } 6039 6040 RegAddr = CGF.Builder.CreateElementBitCast(BaseAddr, MemTy); 6041 } 6042 6043 CGF.EmitBranch(ContBlock); 6044 6045 //======================================= 6046 // Argument was on the stack 6047 //======================================= 6048 CGF.EmitBlock(OnStackBlock); 6049 6050 Address stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p"); 6051 llvm::Value *OnStackPtr = CGF.Builder.CreateLoad(stack_p, "stack"); 6052 6053 // Again, stack arguments may need realignment. In this case both integer and 6054 // floating-point ones might be affected. 6055 if (!IsIndirect && TyAlign.getQuantity() > 8) { 6056 int Align = TyAlign.getQuantity(); 6057 6058 OnStackPtr = CGF.Builder.CreatePtrToInt(OnStackPtr, CGF.Int64Ty); 6059 6060 OnStackPtr = CGF.Builder.CreateAdd( 6061 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1), 6062 "align_stack"); 6063 OnStackPtr = CGF.Builder.CreateAnd( 6064 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, -Align), 6065 "align_stack"); 6066 6067 OnStackPtr = CGF.Builder.CreateIntToPtr(OnStackPtr, CGF.Int8PtrTy); 6068 } 6069 Address OnStackAddr(OnStackPtr, 6070 std::max(CharUnits::fromQuantity(8), TyAlign)); 6071 6072 // All stack slots are multiples of 8 bytes. 6073 CharUnits StackSlotSize = CharUnits::fromQuantity(8); 6074 CharUnits StackSize; 6075 if (IsIndirect) 6076 StackSize = StackSlotSize; 6077 else 6078 StackSize = TySize.alignTo(StackSlotSize); 6079 6080 llvm::Value *StackSizeC = CGF.Builder.getSize(StackSize); 6081 llvm::Value *NewStack = 6082 CGF.Builder.CreateInBoundsGEP(OnStackPtr, StackSizeC, "new_stack"); 6083 6084 // Write the new value of __stack for the next call to va_arg 6085 CGF.Builder.CreateStore(NewStack, stack_p); 6086 6087 if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) && 6088 TySize < StackSlotSize) { 6089 CharUnits Offset = StackSlotSize - TySize; 6090 OnStackAddr = CGF.Builder.CreateConstInBoundsByteGEP(OnStackAddr, Offset); 6091 } 6092 6093 OnStackAddr = CGF.Builder.CreateElementBitCast(OnStackAddr, MemTy); 6094 6095 CGF.EmitBranch(ContBlock); 6096 6097 //======================================= 6098 // Tidy up 6099 //======================================= 6100 CGF.EmitBlock(ContBlock); 6101 6102 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, 6103 OnStackAddr, OnStackBlock, "vaargs.addr"); 6104 6105 if (IsIndirect) 6106 return Address(CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"), 6107 TyAlign); 6108 6109 return ResAddr; 6110 } 6111 6112 Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty, 6113 CodeGenFunction &CGF) const { 6114 // The backend's lowering doesn't support va_arg for aggregates or 6115 // illegal vector types. Lower VAArg here for these cases and use 6116 // the LLVM va_arg instruction for everything else. 6117 if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty)) 6118 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect()); 6119 6120 uint64_t PointerSize = getTarget().getPointerWidth(0) / 8; 6121 CharUnits SlotSize = CharUnits::fromQuantity(PointerSize); 6122 6123 // Empty records are ignored for parameter passing purposes. 6124 if (isEmptyRecord(getContext(), Ty, true)) { 6125 Address Addr(CGF.Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize); 6126 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty)); 6127 return Addr; 6128 } 6129 6130 // The size of the actual thing passed, which might end up just 6131 // being a pointer for indirect types. 6132 auto TyInfo = getContext().getTypeInfoInChars(Ty); 6133 6134 // Arguments bigger than 16 bytes which aren't homogeneous 6135 // aggregates should be passed indirectly. 6136 bool IsIndirect = false; 6137 if (TyInfo.first.getQuantity() > 16) { 6138 const Type *Base = nullptr; 6139 uint64_t Members = 0; 6140 IsIndirect = !isHomogeneousAggregate(Ty, Base, Members); 6141 } 6142 6143 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, 6144 TyInfo, SlotSize, /*AllowHigherAlign*/ true); 6145 } 6146 6147 Address AArch64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, 6148 QualType Ty) const { 6149 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false, 6150 CGF.getContext().getTypeInfoInChars(Ty), 6151 CharUnits::fromQuantity(8), 6152 /*allowHigherAlign*/ false); 6153 } 6154 6155 //===----------------------------------------------------------------------===// 6156 // ARM ABI Implementation 6157 //===----------------------------------------------------------------------===// 6158 6159 namespace { 6160 6161 class ARMABIInfo : public SwiftABIInfo { 6162 public: 6163 enum ABIKind { 6164 APCS = 0, 6165 AAPCS = 1, 6166 AAPCS_VFP = 2, 6167 AAPCS16_VFP = 3, 6168 }; 6169 6170 private: 6171 ABIKind Kind; 6172 bool IsFloatABISoftFP; 6173 6174 public: 6175 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) 6176 : SwiftABIInfo(CGT), Kind(_Kind) { 6177 setCCs(); 6178 IsFloatABISoftFP = CGT.getCodeGenOpts().FloatABI == "softfp" || 6179 CGT.getCodeGenOpts().FloatABI == ""; // default 6180 } 6181 6182 bool isEABI() const { 6183 switch (getTarget().getTriple().getEnvironment()) { 6184 case llvm::Triple::Android: 6185 case llvm::Triple::EABI: 6186 case llvm::Triple::EABIHF: 6187 case llvm::Triple::GNUEABI: 6188 case llvm::Triple::GNUEABIHF: 6189 case llvm::Triple::MuslEABI: 6190 case llvm::Triple::MuslEABIHF: 6191 return true; 6192 default: 6193 return false; 6194 } 6195 } 6196 6197 bool isEABIHF() const { 6198 switch (getTarget().getTriple().getEnvironment()) { 6199 case llvm::Triple::EABIHF: 6200 case llvm::Triple::GNUEABIHF: 6201 case llvm::Triple::MuslEABIHF: 6202 return true; 6203 default: 6204 return false; 6205 } 6206 } 6207 6208 ABIKind getABIKind() const { return Kind; } 6209 6210 bool allowBFloatArgsAndRet() const override { 6211 return !IsFloatABISoftFP && getTarget().hasBFloat16Type(); 6212 } 6213 6214 private: 6215 ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic, 6216 unsigned functionCallConv) const; 6217 ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic, 6218 unsigned functionCallConv) const; 6219 ABIArgInfo classifyHomogeneousAggregate(QualType Ty, const Type *Base, 6220 uint64_t Members) const; 6221 ABIArgInfo coerceIllegalVector(QualType Ty) const; 6222 bool isIllegalVectorType(QualType Ty) const; 6223 bool containsAnyFP16Vectors(QualType Ty) const; 6224 6225 bool isHomogeneousAggregateBaseType(QualType Ty) const override; 6226 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 6227 uint64_t Members) const override; 6228 6229 bool isEffectivelyAAPCS_VFP(unsigned callConvention, bool acceptHalf) const; 6230 6231 void computeInfo(CGFunctionInfo &FI) const override; 6232 6233 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 6234 QualType Ty) const override; 6235 6236 llvm::CallingConv::ID getLLVMDefaultCC() const; 6237 llvm::CallingConv::ID getABIDefaultCC() const; 6238 void setCCs(); 6239 6240 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, 6241 bool asReturnValue) const override { 6242 return occupiesMoreThan(CGT, scalars, /*total*/ 4); 6243 } 6244 bool isSwiftErrorInRegister() const override { 6245 return true; 6246 } 6247 bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy, 6248 unsigned elts) const override; 6249 }; 6250 6251 class ARMTargetCodeGenInfo : public TargetCodeGenInfo { 6252 public: 6253 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K) 6254 : TargetCodeGenInfo(std::make_unique<ARMABIInfo>(CGT, K)) {} 6255 6256 const ARMABIInfo &getABIInfo() const { 6257 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo()); 6258 } 6259 6260 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 6261 return 13; 6262 } 6263 6264 StringRef getARCRetainAutoreleasedReturnValueMarker() const override { 6265 return "mov\tr7, r7\t\t// marker for objc_retainAutoreleaseReturnValue"; 6266 } 6267 6268 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 6269 llvm::Value *Address) const override { 6270 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 6271 6272 // 0-15 are the 16 integer registers. 6273 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15); 6274 return false; 6275 } 6276 6277 unsigned getSizeOfUnwindException() const override { 6278 if (getABIInfo().isEABI()) return 88; 6279 return TargetCodeGenInfo::getSizeOfUnwindException(); 6280 } 6281 6282 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 6283 CodeGen::CodeGenModule &CGM) const override { 6284 if (GV->isDeclaration()) 6285 return; 6286 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 6287 if (!FD) 6288 return; 6289 6290 const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>(); 6291 if (!Attr) 6292 return; 6293 6294 const char *Kind; 6295 switch (Attr->getInterrupt()) { 6296 case ARMInterruptAttr::Generic: Kind = ""; break; 6297 case ARMInterruptAttr::IRQ: Kind = "IRQ"; break; 6298 case ARMInterruptAttr::FIQ: Kind = "FIQ"; break; 6299 case ARMInterruptAttr::SWI: Kind = "SWI"; break; 6300 case ARMInterruptAttr::ABORT: Kind = "ABORT"; break; 6301 case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break; 6302 } 6303 6304 llvm::Function *Fn = cast<llvm::Function>(GV); 6305 6306 Fn->addFnAttr("interrupt", Kind); 6307 6308 ARMABIInfo::ABIKind ABI = cast<ARMABIInfo>(getABIInfo()).getABIKind(); 6309 if (ABI == ARMABIInfo::APCS) 6310 return; 6311 6312 // AAPCS guarantees that sp will be 8-byte aligned on any public interface, 6313 // however this is not necessarily true on taking any interrupt. Instruct 6314 // the backend to perform a realignment as part of the function prologue. 6315 llvm::AttrBuilder B; 6316 B.addStackAlignmentAttr(8); 6317 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B); 6318 } 6319 }; 6320 6321 class WindowsARMTargetCodeGenInfo : public ARMTargetCodeGenInfo { 6322 public: 6323 WindowsARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K) 6324 : ARMTargetCodeGenInfo(CGT, K) {} 6325 6326 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 6327 CodeGen::CodeGenModule &CGM) const override; 6328 6329 void getDependentLibraryOption(llvm::StringRef Lib, 6330 llvm::SmallString<24> &Opt) const override { 6331 Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib); 6332 } 6333 6334 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value, 6335 llvm::SmallString<32> &Opt) const override { 6336 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; 6337 } 6338 }; 6339 6340 void WindowsARMTargetCodeGenInfo::setTargetAttributes( 6341 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { 6342 ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM); 6343 if (GV->isDeclaration()) 6344 return; 6345 addStackProbeTargetAttributes(D, GV, CGM); 6346 } 6347 } 6348 6349 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const { 6350 if (!::classifyReturnType(getCXXABI(), FI, *this)) 6351 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), FI.isVariadic(), 6352 FI.getCallingConvention()); 6353 6354 for (auto &I : FI.arguments()) 6355 I.info = classifyArgumentType(I.type, FI.isVariadic(), 6356 FI.getCallingConvention()); 6357 6358 6359 // Always honor user-specified calling convention. 6360 if (FI.getCallingConvention() != llvm::CallingConv::C) 6361 return; 6362 6363 llvm::CallingConv::ID cc = getRuntimeCC(); 6364 if (cc != llvm::CallingConv::C) 6365 FI.setEffectiveCallingConvention(cc); 6366 } 6367 6368 /// Return the default calling convention that LLVM will use. 6369 llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const { 6370 // The default calling convention that LLVM will infer. 6371 if (isEABIHF() || getTarget().getTriple().isWatchABI()) 6372 return llvm::CallingConv::ARM_AAPCS_VFP; 6373 else if (isEABI()) 6374 return llvm::CallingConv::ARM_AAPCS; 6375 else 6376 return llvm::CallingConv::ARM_APCS; 6377 } 6378 6379 /// Return the calling convention that our ABI would like us to use 6380 /// as the C calling convention. 6381 llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const { 6382 switch (getABIKind()) { 6383 case APCS: return llvm::CallingConv::ARM_APCS; 6384 case AAPCS: return llvm::CallingConv::ARM_AAPCS; 6385 case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 6386 case AAPCS16_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 6387 } 6388 llvm_unreachable("bad ABI kind"); 6389 } 6390 6391 void ARMABIInfo::setCCs() { 6392 assert(getRuntimeCC() == llvm::CallingConv::C); 6393 6394 // Don't muddy up the IR with a ton of explicit annotations if 6395 // they'd just match what LLVM will infer from the triple. 6396 llvm::CallingConv::ID abiCC = getABIDefaultCC(); 6397 if (abiCC != getLLVMDefaultCC()) 6398 RuntimeCC = abiCC; 6399 } 6400 6401 ABIArgInfo ARMABIInfo::coerceIllegalVector(QualType Ty) const { 6402 uint64_t Size = getContext().getTypeSize(Ty); 6403 if (Size <= 32) { 6404 llvm::Type *ResType = 6405 llvm::Type::getInt32Ty(getVMContext()); 6406 return ABIArgInfo::getDirect(ResType); 6407 } 6408 if (Size == 64 || Size == 128) { 6409 auto *ResType = llvm::FixedVectorType::get( 6410 llvm::Type::getInt32Ty(getVMContext()), Size / 32); 6411 return ABIArgInfo::getDirect(ResType); 6412 } 6413 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 6414 } 6415 6416 ABIArgInfo ARMABIInfo::classifyHomogeneousAggregate(QualType Ty, 6417 const Type *Base, 6418 uint64_t Members) const { 6419 assert(Base && "Base class should be set for homogeneous aggregate"); 6420 // Base can be a floating-point or a vector. 6421 if (const VectorType *VT = Base->getAs<VectorType>()) { 6422 // FP16 vectors should be converted to integer vectors 6423 if (!getTarget().hasLegalHalfType() && containsAnyFP16Vectors(Ty)) { 6424 uint64_t Size = getContext().getTypeSize(VT); 6425 auto *NewVecTy = llvm::FixedVectorType::get( 6426 llvm::Type::getInt32Ty(getVMContext()), Size / 32); 6427 llvm::Type *Ty = llvm::ArrayType::get(NewVecTy, Members); 6428 return ABIArgInfo::getDirect(Ty, 0, nullptr, false); 6429 } 6430 } 6431 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false); 6432 } 6433 6434 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic, 6435 unsigned functionCallConv) const { 6436 // 6.1.2.1 The following argument types are VFP CPRCs: 6437 // A single-precision floating-point type (including promoted 6438 // half-precision types); A double-precision floating-point type; 6439 // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate 6440 // with a Base Type of a single- or double-precision floating-point type, 6441 // 64-bit containerized vectors or 128-bit containerized vectors with one 6442 // to four Elements. 6443 // Variadic functions should always marshal to the base standard. 6444 bool IsAAPCS_VFP = 6445 !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ false); 6446 6447 Ty = useFirstFieldIfTransparentUnion(Ty); 6448 6449 // Handle illegal vector types here. 6450 if (isIllegalVectorType(Ty)) 6451 return coerceIllegalVector(Ty); 6452 6453 if (!isAggregateTypeForABI(Ty)) { 6454 // Treat an enum type as its underlying type. 6455 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) { 6456 Ty = EnumTy->getDecl()->getIntegerType(); 6457 } 6458 6459 if (const auto *EIT = Ty->getAs<ExtIntType>()) 6460 if (EIT->getNumBits() > 64) 6461 return getNaturalAlignIndirect(Ty, /*ByVal=*/true); 6462 6463 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) 6464 : ABIArgInfo::getDirect()); 6465 } 6466 6467 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { 6468 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 6469 } 6470 6471 // Ignore empty records. 6472 if (isEmptyRecord(getContext(), Ty, true)) 6473 return ABIArgInfo::getIgnore(); 6474 6475 if (IsAAPCS_VFP) { 6476 // Homogeneous Aggregates need to be expanded when we can fit the aggregate 6477 // into VFP registers. 6478 const Type *Base = nullptr; 6479 uint64_t Members = 0; 6480 if (isHomogeneousAggregate(Ty, Base, Members)) 6481 return classifyHomogeneousAggregate(Ty, Base, Members); 6482 } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) { 6483 // WatchOS does have homogeneous aggregates. Note that we intentionally use 6484 // this convention even for a variadic function: the backend will use GPRs 6485 // if needed. 6486 const Type *Base = nullptr; 6487 uint64_t Members = 0; 6488 if (isHomogeneousAggregate(Ty, Base, Members)) { 6489 assert(Base && Members <= 4 && "unexpected homogeneous aggregate"); 6490 llvm::Type *Ty = 6491 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members); 6492 return ABIArgInfo::getDirect(Ty, 0, nullptr, false); 6493 } 6494 } 6495 6496 if (getABIKind() == ARMABIInfo::AAPCS16_VFP && 6497 getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(16)) { 6498 // WatchOS is adopting the 64-bit AAPCS rule on composite types: if they're 6499 // bigger than 128-bits, they get placed in space allocated by the caller, 6500 // and a pointer is passed. 6501 return ABIArgInfo::getIndirect( 6502 CharUnits::fromQuantity(getContext().getTypeAlign(Ty) / 8), false); 6503 } 6504 6505 // Support byval for ARM. 6506 // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at 6507 // most 8-byte. We realign the indirect argument if type alignment is bigger 6508 // than ABI alignment. 6509 uint64_t ABIAlign = 4; 6510 uint64_t TyAlign; 6511 if (getABIKind() == ARMABIInfo::AAPCS_VFP || 6512 getABIKind() == ARMABIInfo::AAPCS) { 6513 TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity(); 6514 ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8); 6515 } else { 6516 TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity(); 6517 } 6518 if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) { 6519 assert(getABIKind() != ARMABIInfo::AAPCS16_VFP && "unexpected byval"); 6520 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign), 6521 /*ByVal=*/true, 6522 /*Realign=*/TyAlign > ABIAlign); 6523 } 6524 6525 // On RenderScript, coerce Aggregates <= 64 bytes to an integer array of 6526 // same size and alignment. 6527 if (getTarget().isRenderScriptTarget()) { 6528 return coerceToIntArray(Ty, getContext(), getVMContext()); 6529 } 6530 6531 // Otherwise, pass by coercing to a structure of the appropriate size. 6532 llvm::Type* ElemTy; 6533 unsigned SizeRegs; 6534 // FIXME: Try to match the types of the arguments more accurately where 6535 // we can. 6536 if (TyAlign <= 4) { 6537 ElemTy = llvm::Type::getInt32Ty(getVMContext()); 6538 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32; 6539 } else { 6540 ElemTy = llvm::Type::getInt64Ty(getVMContext()); 6541 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64; 6542 } 6543 6544 return ABIArgInfo::getDirect(llvm::ArrayType::get(ElemTy, SizeRegs)); 6545 } 6546 6547 static bool isIntegerLikeType(QualType Ty, ASTContext &Context, 6548 llvm::LLVMContext &VMContext) { 6549 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure 6550 // is called integer-like if its size is less than or equal to one word, and 6551 // the offset of each of its addressable sub-fields is zero. 6552 6553 uint64_t Size = Context.getTypeSize(Ty); 6554 6555 // Check that the type fits in a word. 6556 if (Size > 32) 6557 return false; 6558 6559 // FIXME: Handle vector types! 6560 if (Ty->isVectorType()) 6561 return false; 6562 6563 // Float types are never treated as "integer like". 6564 if (Ty->isRealFloatingType()) 6565 return false; 6566 6567 // If this is a builtin or pointer type then it is ok. 6568 if (Ty->getAs<BuiltinType>() || Ty->isPointerType()) 6569 return true; 6570 6571 // Small complex integer types are "integer like". 6572 if (const ComplexType *CT = Ty->getAs<ComplexType>()) 6573 return isIntegerLikeType(CT->getElementType(), Context, VMContext); 6574 6575 // Single element and zero sized arrays should be allowed, by the definition 6576 // above, but they are not. 6577 6578 // Otherwise, it must be a record type. 6579 const RecordType *RT = Ty->getAs<RecordType>(); 6580 if (!RT) return false; 6581 6582 // Ignore records with flexible arrays. 6583 const RecordDecl *RD = RT->getDecl(); 6584 if (RD->hasFlexibleArrayMember()) 6585 return false; 6586 6587 // Check that all sub-fields are at offset 0, and are themselves "integer 6588 // like". 6589 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 6590 6591 bool HadField = false; 6592 unsigned idx = 0; 6593 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 6594 i != e; ++i, ++idx) { 6595 const FieldDecl *FD = *i; 6596 6597 // Bit-fields are not addressable, we only need to verify they are "integer 6598 // like". We still have to disallow a subsequent non-bitfield, for example: 6599 // struct { int : 0; int x } 6600 // is non-integer like according to gcc. 6601 if (FD->isBitField()) { 6602 if (!RD->isUnion()) 6603 HadField = true; 6604 6605 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 6606 return false; 6607 6608 continue; 6609 } 6610 6611 // Check if this field is at offset 0. 6612 if (Layout.getFieldOffset(idx) != 0) 6613 return false; 6614 6615 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 6616 return false; 6617 6618 // Only allow at most one field in a structure. This doesn't match the 6619 // wording above, but follows gcc in situations with a field following an 6620 // empty structure. 6621 if (!RD->isUnion()) { 6622 if (HadField) 6623 return false; 6624 6625 HadField = true; 6626 } 6627 } 6628 6629 return true; 6630 } 6631 6632 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, bool isVariadic, 6633 unsigned functionCallConv) const { 6634 6635 // Variadic functions should always marshal to the base standard. 6636 bool IsAAPCS_VFP = 6637 !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ true); 6638 6639 if (RetTy->isVoidType()) 6640 return ABIArgInfo::getIgnore(); 6641 6642 if (const VectorType *VT = RetTy->getAs<VectorType>()) { 6643 // Large vector types should be returned via memory. 6644 if (getContext().getTypeSize(RetTy) > 128) 6645 return getNaturalAlignIndirect(RetTy); 6646 // TODO: FP16/BF16 vectors should be converted to integer vectors 6647 // This check is similar to isIllegalVectorType - refactor? 6648 if ((!getTarget().hasLegalHalfType() && 6649 (VT->getElementType()->isFloat16Type() || 6650 VT->getElementType()->isHalfType())) || 6651 (IsFloatABISoftFP && 6652 VT->getElementType()->isBFloat16Type())) 6653 return coerceIllegalVector(RetTy); 6654 } 6655 6656 if (!isAggregateTypeForABI(RetTy)) { 6657 // Treat an enum type as its underlying type. 6658 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 6659 RetTy = EnumTy->getDecl()->getIntegerType(); 6660 6661 if (const auto *EIT = RetTy->getAs<ExtIntType>()) 6662 if (EIT->getNumBits() > 64) 6663 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false); 6664 6665 return isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) 6666 : ABIArgInfo::getDirect(); 6667 } 6668 6669 // Are we following APCS? 6670 if (getABIKind() == APCS) { 6671 if (isEmptyRecord(getContext(), RetTy, false)) 6672 return ABIArgInfo::getIgnore(); 6673 6674 // Complex types are all returned as packed integers. 6675 // 6676 // FIXME: Consider using 2 x vector types if the back end handles them 6677 // correctly. 6678 if (RetTy->isAnyComplexType()) 6679 return ABIArgInfo::getDirect(llvm::IntegerType::get( 6680 getVMContext(), getContext().getTypeSize(RetTy))); 6681 6682 // Integer like structures are returned in r0. 6683 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) { 6684 // Return in the smallest viable integer type. 6685 uint64_t Size = getContext().getTypeSize(RetTy); 6686 if (Size <= 8) 6687 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 6688 if (Size <= 16) 6689 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 6690 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 6691 } 6692 6693 // Otherwise return in memory. 6694 return getNaturalAlignIndirect(RetTy); 6695 } 6696 6697 // Otherwise this is an AAPCS variant. 6698 6699 if (isEmptyRecord(getContext(), RetTy, true)) 6700 return ABIArgInfo::getIgnore(); 6701 6702 // Check for homogeneous aggregates with AAPCS-VFP. 6703 if (IsAAPCS_VFP) { 6704 const Type *Base = nullptr; 6705 uint64_t Members = 0; 6706 if (isHomogeneousAggregate(RetTy, Base, Members)) 6707 return classifyHomogeneousAggregate(RetTy, Base, Members); 6708 } 6709 6710 // Aggregates <= 4 bytes are returned in r0; other aggregates 6711 // are returned indirectly. 6712 uint64_t Size = getContext().getTypeSize(RetTy); 6713 if (Size <= 32) { 6714 // On RenderScript, coerce Aggregates <= 4 bytes to an integer array of 6715 // same size and alignment. 6716 if (getTarget().isRenderScriptTarget()) { 6717 return coerceToIntArray(RetTy, getContext(), getVMContext()); 6718 } 6719 if (getDataLayout().isBigEndian()) 6720 // Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4) 6721 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 6722 6723 // Return in the smallest viable integer type. 6724 if (Size <= 8) 6725 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 6726 if (Size <= 16) 6727 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 6728 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 6729 } else if (Size <= 128 && getABIKind() == AAPCS16_VFP) { 6730 llvm::Type *Int32Ty = llvm::Type::getInt32Ty(getVMContext()); 6731 llvm::Type *CoerceTy = 6732 llvm::ArrayType::get(Int32Ty, llvm::alignTo(Size, 32) / 32); 6733 return ABIArgInfo::getDirect(CoerceTy); 6734 } 6735 6736 return getNaturalAlignIndirect(RetTy); 6737 } 6738 6739 /// isIllegalVector - check whether Ty is an illegal vector type. 6740 bool ARMABIInfo::isIllegalVectorType(QualType Ty) const { 6741 if (const VectorType *VT = Ty->getAs<VectorType> ()) { 6742 // On targets that don't support half, fp16 or bfloat, they are expanded 6743 // into float, and we don't want the ABI to depend on whether or not they 6744 // are supported in hardware. Thus return false to coerce vectors of these 6745 // types into integer vectors. 6746 // We do not depend on hasLegalHalfType for bfloat as it is a 6747 // separate IR type. 6748 if ((!getTarget().hasLegalHalfType() && 6749 (VT->getElementType()->isFloat16Type() || 6750 VT->getElementType()->isHalfType())) || 6751 (IsFloatABISoftFP && 6752 VT->getElementType()->isBFloat16Type())) 6753 return true; 6754 if (isAndroid()) { 6755 // Android shipped using Clang 3.1, which supported a slightly different 6756 // vector ABI. The primary differences were that 3-element vector types 6757 // were legal, and so were sub 32-bit vectors (i.e. <2 x i8>). This path 6758 // accepts that legacy behavior for Android only. 6759 // Check whether VT is legal. 6760 unsigned NumElements = VT->getNumElements(); 6761 // NumElements should be power of 2 or equal to 3. 6762 if (!llvm::isPowerOf2_32(NumElements) && NumElements != 3) 6763 return true; 6764 } else { 6765 // Check whether VT is legal. 6766 unsigned NumElements = VT->getNumElements(); 6767 uint64_t Size = getContext().getTypeSize(VT); 6768 // NumElements should be power of 2. 6769 if (!llvm::isPowerOf2_32(NumElements)) 6770 return true; 6771 // Size should be greater than 32 bits. 6772 return Size <= 32; 6773 } 6774 } 6775 return false; 6776 } 6777 6778 /// Return true if a type contains any 16-bit floating point vectors 6779 bool ARMABIInfo::containsAnyFP16Vectors(QualType Ty) const { 6780 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 6781 uint64_t NElements = AT->getSize().getZExtValue(); 6782 if (NElements == 0) 6783 return false; 6784 return containsAnyFP16Vectors(AT->getElementType()); 6785 } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 6786 const RecordDecl *RD = RT->getDecl(); 6787 6788 // If this is a C++ record, check the bases first. 6789 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 6790 if (llvm::any_of(CXXRD->bases(), [this](const CXXBaseSpecifier &B) { 6791 return containsAnyFP16Vectors(B.getType()); 6792 })) 6793 return true; 6794 6795 if (llvm::any_of(RD->fields(), [this](FieldDecl *FD) { 6796 return FD && containsAnyFP16Vectors(FD->getType()); 6797 })) 6798 return true; 6799 6800 return false; 6801 } else { 6802 if (const VectorType *VT = Ty->getAs<VectorType>()) 6803 return (VT->getElementType()->isFloat16Type() || 6804 VT->getElementType()->isBFloat16Type() || 6805 VT->getElementType()->isHalfType()); 6806 return false; 6807 } 6808 } 6809 6810 bool ARMABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize, 6811 llvm::Type *eltTy, 6812 unsigned numElts) const { 6813 if (!llvm::isPowerOf2_32(numElts)) 6814 return false; 6815 unsigned size = getDataLayout().getTypeStoreSizeInBits(eltTy); 6816 if (size > 64) 6817 return false; 6818 if (vectorSize.getQuantity() != 8 && 6819 (vectorSize.getQuantity() != 16 || numElts == 1)) 6820 return false; 6821 return true; 6822 } 6823 6824 bool ARMABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 6825 // Homogeneous aggregates for AAPCS-VFP must have base types of float, 6826 // double, or 64-bit or 128-bit vectors. 6827 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 6828 if (BT->getKind() == BuiltinType::Float || 6829 BT->getKind() == BuiltinType::Double || 6830 BT->getKind() == BuiltinType::LongDouble) 6831 return true; 6832 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 6833 unsigned VecSize = getContext().getTypeSize(VT); 6834 if (VecSize == 64 || VecSize == 128) 6835 return true; 6836 } 6837 return false; 6838 } 6839 6840 bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, 6841 uint64_t Members) const { 6842 return Members <= 4; 6843 } 6844 6845 bool ARMABIInfo::isEffectivelyAAPCS_VFP(unsigned callConvention, 6846 bool acceptHalf) const { 6847 // Give precedence to user-specified calling conventions. 6848 if (callConvention != llvm::CallingConv::C) 6849 return (callConvention == llvm::CallingConv::ARM_AAPCS_VFP); 6850 else 6851 return (getABIKind() == AAPCS_VFP) || 6852 (acceptHalf && (getABIKind() == AAPCS16_VFP)); 6853 } 6854 6855 Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 6856 QualType Ty) const { 6857 CharUnits SlotSize = CharUnits::fromQuantity(4); 6858 6859 // Empty records are ignored for parameter passing purposes. 6860 if (isEmptyRecord(getContext(), Ty, true)) { 6861 Address Addr(CGF.Builder.CreateLoad(VAListAddr), SlotSize); 6862 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty)); 6863 return Addr; 6864 } 6865 6866 CharUnits TySize = getContext().getTypeSizeInChars(Ty); 6867 CharUnits TyAlignForABI = getContext().getTypeUnadjustedAlignInChars(Ty); 6868 6869 // Use indirect if size of the illegal vector is bigger than 16 bytes. 6870 bool IsIndirect = false; 6871 const Type *Base = nullptr; 6872 uint64_t Members = 0; 6873 if (TySize > CharUnits::fromQuantity(16) && isIllegalVectorType(Ty)) { 6874 IsIndirect = true; 6875 6876 // ARMv7k passes structs bigger than 16 bytes indirectly, in space 6877 // allocated by the caller. 6878 } else if (TySize > CharUnits::fromQuantity(16) && 6879 getABIKind() == ARMABIInfo::AAPCS16_VFP && 6880 !isHomogeneousAggregate(Ty, Base, Members)) { 6881 IsIndirect = true; 6882 6883 // Otherwise, bound the type's ABI alignment. 6884 // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for 6885 // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte. 6886 // Our callers should be prepared to handle an under-aligned address. 6887 } else if (getABIKind() == ARMABIInfo::AAPCS_VFP || 6888 getABIKind() == ARMABIInfo::AAPCS) { 6889 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4)); 6890 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(8)); 6891 } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) { 6892 // ARMv7k allows type alignment up to 16 bytes. 6893 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4)); 6894 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(16)); 6895 } else { 6896 TyAlignForABI = CharUnits::fromQuantity(4); 6897 } 6898 6899 std::pair<CharUnits, CharUnits> TyInfo = { TySize, TyAlignForABI }; 6900 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TyInfo, 6901 SlotSize, /*AllowHigherAlign*/ true); 6902 } 6903 6904 //===----------------------------------------------------------------------===// 6905 // NVPTX ABI Implementation 6906 //===----------------------------------------------------------------------===// 6907 6908 namespace { 6909 6910 class NVPTXTargetCodeGenInfo; 6911 6912 class NVPTXABIInfo : public ABIInfo { 6913 NVPTXTargetCodeGenInfo &CGInfo; 6914 6915 public: 6916 NVPTXABIInfo(CodeGenTypes &CGT, NVPTXTargetCodeGenInfo &Info) 6917 : ABIInfo(CGT), CGInfo(Info) {} 6918 6919 ABIArgInfo classifyReturnType(QualType RetTy) const; 6920 ABIArgInfo classifyArgumentType(QualType Ty) const; 6921 6922 void computeInfo(CGFunctionInfo &FI) const override; 6923 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 6924 QualType Ty) const override; 6925 bool isUnsupportedType(QualType T) const; 6926 ABIArgInfo coerceToIntArrayWithLimit(QualType Ty, unsigned MaxSize) const; 6927 }; 6928 6929 class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo { 6930 public: 6931 NVPTXTargetCodeGenInfo(CodeGenTypes &CGT) 6932 : TargetCodeGenInfo(std::make_unique<NVPTXABIInfo>(CGT, *this)) {} 6933 6934 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 6935 CodeGen::CodeGenModule &M) const override; 6936 bool shouldEmitStaticExternCAliases() const override; 6937 6938 llvm::Type *getCUDADeviceBuiltinSurfaceDeviceType() const override { 6939 // On the device side, surface reference is represented as an object handle 6940 // in 64-bit integer. 6941 return llvm::Type::getInt64Ty(getABIInfo().getVMContext()); 6942 } 6943 6944 llvm::Type *getCUDADeviceBuiltinTextureDeviceType() const override { 6945 // On the device side, texture reference is represented as an object handle 6946 // in 64-bit integer. 6947 return llvm::Type::getInt64Ty(getABIInfo().getVMContext()); 6948 } 6949 6950 bool emitCUDADeviceBuiltinSurfaceDeviceCopy(CodeGenFunction &CGF, LValue Dst, 6951 LValue Src) const override { 6952 emitBuiltinSurfTexDeviceCopy(CGF, Dst, Src); 6953 return true; 6954 } 6955 6956 bool emitCUDADeviceBuiltinTextureDeviceCopy(CodeGenFunction &CGF, LValue Dst, 6957 LValue Src) const override { 6958 emitBuiltinSurfTexDeviceCopy(CGF, Dst, Src); 6959 return true; 6960 } 6961 6962 private: 6963 // Adds a NamedMDNode with GV, Name, and Operand as operands, and adds the 6964 // resulting MDNode to the nvvm.annotations MDNode. 6965 static void addNVVMMetadata(llvm::GlobalValue *GV, StringRef Name, 6966 int Operand); 6967 6968 static void emitBuiltinSurfTexDeviceCopy(CodeGenFunction &CGF, LValue Dst, 6969 LValue Src) { 6970 llvm::Value *Handle = nullptr; 6971 llvm::Constant *C = 6972 llvm::dyn_cast<llvm::Constant>(Src.getAddress(CGF).getPointer()); 6973 // Lookup `addrspacecast` through the constant pointer if any. 6974 if (auto *ASC = llvm::dyn_cast_or_null<llvm::AddrSpaceCastOperator>(C)) 6975 C = llvm::cast<llvm::Constant>(ASC->getPointerOperand()); 6976 if (auto *GV = llvm::dyn_cast_or_null<llvm::GlobalVariable>(C)) { 6977 // Load the handle from the specific global variable using 6978 // `nvvm.texsurf.handle.internal` intrinsic. 6979 Handle = CGF.EmitRuntimeCall( 6980 CGF.CGM.getIntrinsic(llvm::Intrinsic::nvvm_texsurf_handle_internal, 6981 {GV->getType()}), 6982 {GV}, "texsurf_handle"); 6983 } else 6984 Handle = CGF.EmitLoadOfScalar(Src, SourceLocation()); 6985 CGF.EmitStoreOfScalar(Handle, Dst); 6986 } 6987 }; 6988 6989 /// Checks if the type is unsupported directly by the current target. 6990 bool NVPTXABIInfo::isUnsupportedType(QualType T) const { 6991 ASTContext &Context = getContext(); 6992 if (!Context.getTargetInfo().hasFloat16Type() && T->isFloat16Type()) 6993 return true; 6994 if (!Context.getTargetInfo().hasFloat128Type() && 6995 (T->isFloat128Type() || 6996 (T->isRealFloatingType() && Context.getTypeSize(T) == 128))) 6997 return true; 6998 if (const auto *EIT = T->getAs<ExtIntType>()) 6999 return EIT->getNumBits() > 7000 (Context.getTargetInfo().hasInt128Type() ? 128U : 64U); 7001 if (!Context.getTargetInfo().hasInt128Type() && T->isIntegerType() && 7002 Context.getTypeSize(T) > 64U) 7003 return true; 7004 if (const auto *AT = T->getAsArrayTypeUnsafe()) 7005 return isUnsupportedType(AT->getElementType()); 7006 const auto *RT = T->getAs<RecordType>(); 7007 if (!RT) 7008 return false; 7009 const RecordDecl *RD = RT->getDecl(); 7010 7011 // If this is a C++ record, check the bases first. 7012 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 7013 for (const CXXBaseSpecifier &I : CXXRD->bases()) 7014 if (isUnsupportedType(I.getType())) 7015 return true; 7016 7017 for (const FieldDecl *I : RD->fields()) 7018 if (isUnsupportedType(I->getType())) 7019 return true; 7020 return false; 7021 } 7022 7023 /// Coerce the given type into an array with maximum allowed size of elements. 7024 ABIArgInfo NVPTXABIInfo::coerceToIntArrayWithLimit(QualType Ty, 7025 unsigned MaxSize) const { 7026 // Alignment and Size are measured in bits. 7027 const uint64_t Size = getContext().getTypeSize(Ty); 7028 const uint64_t Alignment = getContext().getTypeAlign(Ty); 7029 const unsigned Div = std::min<unsigned>(MaxSize, Alignment); 7030 llvm::Type *IntType = llvm::Type::getIntNTy(getVMContext(), Div); 7031 const uint64_t NumElements = (Size + Div - 1) / Div; 7032 return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements)); 7033 } 7034 7035 ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const { 7036 if (RetTy->isVoidType()) 7037 return ABIArgInfo::getIgnore(); 7038 7039 if (getContext().getLangOpts().OpenMP && 7040 getContext().getLangOpts().OpenMPIsDevice && isUnsupportedType(RetTy)) 7041 return coerceToIntArrayWithLimit(RetTy, 64); 7042 7043 // note: this is different from default ABI 7044 if (!RetTy->isScalarType()) 7045 return ABIArgInfo::getDirect(); 7046 7047 // Treat an enum type as its underlying type. 7048 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 7049 RetTy = EnumTy->getDecl()->getIntegerType(); 7050 7051 return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) 7052 : ABIArgInfo::getDirect()); 7053 } 7054 7055 ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const { 7056 // Treat an enum type as its underlying type. 7057 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 7058 Ty = EnumTy->getDecl()->getIntegerType(); 7059 7060 // Return aggregates type as indirect by value 7061 if (isAggregateTypeForABI(Ty)) { 7062 // Under CUDA device compilation, tex/surf builtin types are replaced with 7063 // object types and passed directly. 7064 if (getContext().getLangOpts().CUDAIsDevice) { 7065 if (Ty->isCUDADeviceBuiltinSurfaceType()) 7066 return ABIArgInfo::getDirect( 7067 CGInfo.getCUDADeviceBuiltinSurfaceDeviceType()); 7068 if (Ty->isCUDADeviceBuiltinTextureType()) 7069 return ABIArgInfo::getDirect( 7070 CGInfo.getCUDADeviceBuiltinTextureDeviceType()); 7071 } 7072 return getNaturalAlignIndirect(Ty, /* byval */ true); 7073 } 7074 7075 if (const auto *EIT = Ty->getAs<ExtIntType>()) { 7076 if ((EIT->getNumBits() > 128) || 7077 (!getContext().getTargetInfo().hasInt128Type() && 7078 EIT->getNumBits() > 64)) 7079 return getNaturalAlignIndirect(Ty, /* byval */ true); 7080 } 7081 7082 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) 7083 : ABIArgInfo::getDirect()); 7084 } 7085 7086 void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const { 7087 if (!getCXXABI().classifyReturnType(FI)) 7088 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 7089 for (auto &I : FI.arguments()) 7090 I.info = classifyArgumentType(I.type); 7091 7092 // Always honor user-specified calling convention. 7093 if (FI.getCallingConvention() != llvm::CallingConv::C) 7094 return; 7095 7096 FI.setEffectiveCallingConvention(getRuntimeCC()); 7097 } 7098 7099 Address NVPTXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 7100 QualType Ty) const { 7101 llvm_unreachable("NVPTX does not support varargs"); 7102 } 7103 7104 void NVPTXTargetCodeGenInfo::setTargetAttributes( 7105 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const { 7106 if (GV->isDeclaration()) 7107 return; 7108 const VarDecl *VD = dyn_cast_or_null<VarDecl>(D); 7109 if (VD) { 7110 if (M.getLangOpts().CUDA) { 7111 if (VD->getType()->isCUDADeviceBuiltinSurfaceType()) 7112 addNVVMMetadata(GV, "surface", 1); 7113 else if (VD->getType()->isCUDADeviceBuiltinTextureType()) 7114 addNVVMMetadata(GV, "texture", 1); 7115 return; 7116 } 7117 } 7118 7119 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 7120 if (!FD) return; 7121 7122 llvm::Function *F = cast<llvm::Function>(GV); 7123 7124 // Perform special handling in OpenCL mode 7125 if (M.getLangOpts().OpenCL) { 7126 // Use OpenCL function attributes to check for kernel functions 7127 // By default, all functions are device functions 7128 if (FD->hasAttr<OpenCLKernelAttr>()) { 7129 // OpenCL __kernel functions get kernel metadata 7130 // Create !{<func-ref>, metadata !"kernel", i32 1} node 7131 addNVVMMetadata(F, "kernel", 1); 7132 // And kernel functions are not subject to inlining 7133 F->addFnAttr(llvm::Attribute::NoInline); 7134 } 7135 } 7136 7137 // Perform special handling in CUDA mode. 7138 if (M.getLangOpts().CUDA) { 7139 // CUDA __global__ functions get a kernel metadata entry. Since 7140 // __global__ functions cannot be called from the device, we do not 7141 // need to set the noinline attribute. 7142 if (FD->hasAttr<CUDAGlobalAttr>()) { 7143 // Create !{<func-ref>, metadata !"kernel", i32 1} node 7144 addNVVMMetadata(F, "kernel", 1); 7145 } 7146 if (CUDALaunchBoundsAttr *Attr = FD->getAttr<CUDALaunchBoundsAttr>()) { 7147 // Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node 7148 llvm::APSInt MaxThreads(32); 7149 MaxThreads = Attr->getMaxThreads()->EvaluateKnownConstInt(M.getContext()); 7150 if (MaxThreads > 0) 7151 addNVVMMetadata(F, "maxntidx", MaxThreads.getExtValue()); 7152 7153 // min blocks is an optional argument for CUDALaunchBoundsAttr. If it was 7154 // not specified in __launch_bounds__ or if the user specified a 0 value, 7155 // we don't have to add a PTX directive. 7156 if (Attr->getMinBlocks()) { 7157 llvm::APSInt MinBlocks(32); 7158 MinBlocks = Attr->getMinBlocks()->EvaluateKnownConstInt(M.getContext()); 7159 if (MinBlocks > 0) 7160 // Create !{<func-ref>, metadata !"minctasm", i32 <val>} node 7161 addNVVMMetadata(F, "minctasm", MinBlocks.getExtValue()); 7162 } 7163 } 7164 } 7165 } 7166 7167 void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::GlobalValue *GV, 7168 StringRef Name, int Operand) { 7169 llvm::Module *M = GV->getParent(); 7170 llvm::LLVMContext &Ctx = M->getContext(); 7171 7172 // Get "nvvm.annotations" metadata node 7173 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations"); 7174 7175 llvm::Metadata *MDVals[] = { 7176 llvm::ConstantAsMetadata::get(GV), llvm::MDString::get(Ctx, Name), 7177 llvm::ConstantAsMetadata::get( 7178 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))}; 7179 // Append metadata to nvvm.annotations 7180 MD->addOperand(llvm::MDNode::get(Ctx, MDVals)); 7181 } 7182 7183 bool NVPTXTargetCodeGenInfo::shouldEmitStaticExternCAliases() const { 7184 return false; 7185 } 7186 } 7187 7188 //===----------------------------------------------------------------------===// 7189 // SystemZ ABI Implementation 7190 //===----------------------------------------------------------------------===// 7191 7192 namespace { 7193 7194 class SystemZABIInfo : public SwiftABIInfo { 7195 bool HasVector; 7196 bool IsSoftFloatABI; 7197 7198 public: 7199 SystemZABIInfo(CodeGenTypes &CGT, bool HV, bool SF) 7200 : SwiftABIInfo(CGT), HasVector(HV), IsSoftFloatABI(SF) {} 7201 7202 bool isPromotableIntegerTypeForABI(QualType Ty) const; 7203 bool isCompoundType(QualType Ty) const; 7204 bool isVectorArgumentType(QualType Ty) const; 7205 bool isFPArgumentType(QualType Ty) const; 7206 QualType GetSingleElementType(QualType Ty) const; 7207 7208 ABIArgInfo classifyReturnType(QualType RetTy) const; 7209 ABIArgInfo classifyArgumentType(QualType ArgTy) const; 7210 7211 void computeInfo(CGFunctionInfo &FI) const override { 7212 if (!getCXXABI().classifyReturnType(FI)) 7213 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 7214 for (auto &I : FI.arguments()) 7215 I.info = classifyArgumentType(I.type); 7216 } 7217 7218 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 7219 QualType Ty) const override; 7220 7221 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, 7222 bool asReturnValue) const override { 7223 return occupiesMoreThan(CGT, scalars, /*total*/ 4); 7224 } 7225 bool isSwiftErrorInRegister() const override { 7226 return false; 7227 } 7228 }; 7229 7230 class SystemZTargetCodeGenInfo : public TargetCodeGenInfo { 7231 public: 7232 SystemZTargetCodeGenInfo(CodeGenTypes &CGT, bool HasVector, bool SoftFloatABI) 7233 : TargetCodeGenInfo( 7234 std::make_unique<SystemZABIInfo>(CGT, HasVector, SoftFloatABI)) {} 7235 }; 7236 7237 } 7238 7239 bool SystemZABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const { 7240 // Treat an enum type as its underlying type. 7241 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 7242 Ty = EnumTy->getDecl()->getIntegerType(); 7243 7244 // Promotable integer types are required to be promoted by the ABI. 7245 if (ABIInfo::isPromotableIntegerTypeForABI(Ty)) 7246 return true; 7247 7248 if (const auto *EIT = Ty->getAs<ExtIntType>()) 7249 if (EIT->getNumBits() < 64) 7250 return true; 7251 7252 // 32-bit values must also be promoted. 7253 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 7254 switch (BT->getKind()) { 7255 case BuiltinType::Int: 7256 case BuiltinType::UInt: 7257 return true; 7258 default: 7259 return false; 7260 } 7261 return false; 7262 } 7263 7264 bool SystemZABIInfo::isCompoundType(QualType Ty) const { 7265 return (Ty->isAnyComplexType() || 7266 Ty->isVectorType() || 7267 isAggregateTypeForABI(Ty)); 7268 } 7269 7270 bool SystemZABIInfo::isVectorArgumentType(QualType Ty) const { 7271 return (HasVector && 7272 Ty->isVectorType() && 7273 getContext().getTypeSize(Ty) <= 128); 7274 } 7275 7276 bool SystemZABIInfo::isFPArgumentType(QualType Ty) const { 7277 if (IsSoftFloatABI) 7278 return false; 7279 7280 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 7281 switch (BT->getKind()) { 7282 case BuiltinType::Float: 7283 case BuiltinType::Double: 7284 return true; 7285 default: 7286 return false; 7287 } 7288 7289 return false; 7290 } 7291 7292 QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const { 7293 const RecordType *RT = Ty->getAs<RecordType>(); 7294 7295 if (RT && RT->isStructureOrClassType()) { 7296 const RecordDecl *RD = RT->getDecl(); 7297 QualType Found; 7298 7299 // If this is a C++ record, check the bases first. 7300 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 7301 for (const auto &I : CXXRD->bases()) { 7302 QualType Base = I.getType(); 7303 7304 // Empty bases don't affect things either way. 7305 if (isEmptyRecord(getContext(), Base, true)) 7306 continue; 7307 7308 if (!Found.isNull()) 7309 return Ty; 7310 Found = GetSingleElementType(Base); 7311 } 7312 7313 // Check the fields. 7314 for (const auto *FD : RD->fields()) { 7315 // For compatibility with GCC, ignore empty bitfields in C++ mode. 7316 // Unlike isSingleElementStruct(), empty structure and array fields 7317 // do count. So do anonymous bitfields that aren't zero-sized. 7318 if (getContext().getLangOpts().CPlusPlus && 7319 FD->isZeroLengthBitField(getContext())) 7320 continue; 7321 // Like isSingleElementStruct(), ignore C++20 empty data members. 7322 if (FD->hasAttr<NoUniqueAddressAttr>() && 7323 isEmptyRecord(getContext(), FD->getType(), true)) 7324 continue; 7325 7326 // Unlike isSingleElementStruct(), arrays do not count. 7327 // Nested structures still do though. 7328 if (!Found.isNull()) 7329 return Ty; 7330 Found = GetSingleElementType(FD->getType()); 7331 } 7332 7333 // Unlike isSingleElementStruct(), trailing padding is allowed. 7334 // An 8-byte aligned struct s { float f; } is passed as a double. 7335 if (!Found.isNull()) 7336 return Found; 7337 } 7338 7339 return Ty; 7340 } 7341 7342 Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 7343 QualType Ty) const { 7344 // Assume that va_list type is correct; should be pointer to LLVM type: 7345 // struct { 7346 // i64 __gpr; 7347 // i64 __fpr; 7348 // i8 *__overflow_arg_area; 7349 // i8 *__reg_save_area; 7350 // }; 7351 7352 // Every non-vector argument occupies 8 bytes and is passed by preference 7353 // in either GPRs or FPRs. Vector arguments occupy 8 or 16 bytes and are 7354 // always passed on the stack. 7355 Ty = getContext().getCanonicalType(Ty); 7356 auto TyInfo = getContext().getTypeInfoInChars(Ty); 7357 llvm::Type *ArgTy = CGF.ConvertTypeForMem(Ty); 7358 llvm::Type *DirectTy = ArgTy; 7359 ABIArgInfo AI = classifyArgumentType(Ty); 7360 bool IsIndirect = AI.isIndirect(); 7361 bool InFPRs = false; 7362 bool IsVector = false; 7363 CharUnits UnpaddedSize; 7364 CharUnits DirectAlign; 7365 if (IsIndirect) { 7366 DirectTy = llvm::PointerType::getUnqual(DirectTy); 7367 UnpaddedSize = DirectAlign = CharUnits::fromQuantity(8); 7368 } else { 7369 if (AI.getCoerceToType()) 7370 ArgTy = AI.getCoerceToType(); 7371 InFPRs = (!IsSoftFloatABI && (ArgTy->isFloatTy() || ArgTy->isDoubleTy())); 7372 IsVector = ArgTy->isVectorTy(); 7373 UnpaddedSize = TyInfo.first; 7374 DirectAlign = TyInfo.second; 7375 } 7376 CharUnits PaddedSize = CharUnits::fromQuantity(8); 7377 if (IsVector && UnpaddedSize > PaddedSize) 7378 PaddedSize = CharUnits::fromQuantity(16); 7379 assert((UnpaddedSize <= PaddedSize) && "Invalid argument size."); 7380 7381 CharUnits Padding = (PaddedSize - UnpaddedSize); 7382 7383 llvm::Type *IndexTy = CGF.Int64Ty; 7384 llvm::Value *PaddedSizeV = 7385 llvm::ConstantInt::get(IndexTy, PaddedSize.getQuantity()); 7386 7387 if (IsVector) { 7388 // Work out the address of a vector argument on the stack. 7389 // Vector arguments are always passed in the high bits of a 7390 // single (8 byte) or double (16 byte) stack slot. 7391 Address OverflowArgAreaPtr = 7392 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr"); 7393 Address OverflowArgArea = 7394 Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"), 7395 TyInfo.second); 7396 Address MemAddr = 7397 CGF.Builder.CreateElementBitCast(OverflowArgArea, DirectTy, "mem_addr"); 7398 7399 // Update overflow_arg_area_ptr pointer 7400 llvm::Value *NewOverflowArgArea = 7401 CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV, 7402 "overflow_arg_area"); 7403 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr); 7404 7405 return MemAddr; 7406 } 7407 7408 assert(PaddedSize.getQuantity() == 8); 7409 7410 unsigned MaxRegs, RegCountField, RegSaveIndex; 7411 CharUnits RegPadding; 7412 if (InFPRs) { 7413 MaxRegs = 4; // Maximum of 4 FPR arguments 7414 RegCountField = 1; // __fpr 7415 RegSaveIndex = 16; // save offset for f0 7416 RegPadding = CharUnits(); // floats are passed in the high bits of an FPR 7417 } else { 7418 MaxRegs = 5; // Maximum of 5 GPR arguments 7419 RegCountField = 0; // __gpr 7420 RegSaveIndex = 2; // save offset for r2 7421 RegPadding = Padding; // values are passed in the low bits of a GPR 7422 } 7423 7424 Address RegCountPtr = 7425 CGF.Builder.CreateStructGEP(VAListAddr, RegCountField, "reg_count_ptr"); 7426 llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count"); 7427 llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs); 7428 llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV, 7429 "fits_in_regs"); 7430 7431 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 7432 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); 7433 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 7434 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); 7435 7436 // Emit code to load the value if it was passed in registers. 7437 CGF.EmitBlock(InRegBlock); 7438 7439 // Work out the address of an argument register. 7440 llvm::Value *ScaledRegCount = 7441 CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count"); 7442 llvm::Value *RegBase = 7443 llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize.getQuantity() 7444 + RegPadding.getQuantity()); 7445 llvm::Value *RegOffset = 7446 CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset"); 7447 Address RegSaveAreaPtr = 7448 CGF.Builder.CreateStructGEP(VAListAddr, 3, "reg_save_area_ptr"); 7449 llvm::Value *RegSaveArea = 7450 CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area"); 7451 Address RawRegAddr(CGF.Builder.CreateGEP(RegSaveArea, RegOffset, 7452 "raw_reg_addr"), 7453 PaddedSize); 7454 Address RegAddr = 7455 CGF.Builder.CreateElementBitCast(RawRegAddr, DirectTy, "reg_addr"); 7456 7457 // Update the register count 7458 llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1); 7459 llvm::Value *NewRegCount = 7460 CGF.Builder.CreateAdd(RegCount, One, "reg_count"); 7461 CGF.Builder.CreateStore(NewRegCount, RegCountPtr); 7462 CGF.EmitBranch(ContBlock); 7463 7464 // Emit code to load the value if it was passed in memory. 7465 CGF.EmitBlock(InMemBlock); 7466 7467 // Work out the address of a stack argument. 7468 Address OverflowArgAreaPtr = 7469 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr"); 7470 Address OverflowArgArea = 7471 Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"), 7472 PaddedSize); 7473 Address RawMemAddr = 7474 CGF.Builder.CreateConstByteGEP(OverflowArgArea, Padding, "raw_mem_addr"); 7475 Address MemAddr = 7476 CGF.Builder.CreateElementBitCast(RawMemAddr, DirectTy, "mem_addr"); 7477 7478 // Update overflow_arg_area_ptr pointer 7479 llvm::Value *NewOverflowArgArea = 7480 CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV, 7481 "overflow_arg_area"); 7482 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr); 7483 CGF.EmitBranch(ContBlock); 7484 7485 // Return the appropriate result. 7486 CGF.EmitBlock(ContBlock); 7487 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, 7488 MemAddr, InMemBlock, "va_arg.addr"); 7489 7490 if (IsIndirect) 7491 ResAddr = Address(CGF.Builder.CreateLoad(ResAddr, "indirect_arg"), 7492 TyInfo.second); 7493 7494 return ResAddr; 7495 } 7496 7497 ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const { 7498 if (RetTy->isVoidType()) 7499 return ABIArgInfo::getIgnore(); 7500 if (isVectorArgumentType(RetTy)) 7501 return ABIArgInfo::getDirect(); 7502 if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64) 7503 return getNaturalAlignIndirect(RetTy); 7504 return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) 7505 : ABIArgInfo::getDirect()); 7506 } 7507 7508 ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const { 7509 // Handle the generic C++ ABI. 7510 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 7511 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 7512 7513 // Integers and enums are extended to full register width. 7514 if (isPromotableIntegerTypeForABI(Ty)) 7515 return ABIArgInfo::getExtend(Ty); 7516 7517 // Handle vector types and vector-like structure types. Note that 7518 // as opposed to float-like structure types, we do not allow any 7519 // padding for vector-like structures, so verify the sizes match. 7520 uint64_t Size = getContext().getTypeSize(Ty); 7521 QualType SingleElementTy = GetSingleElementType(Ty); 7522 if (isVectorArgumentType(SingleElementTy) && 7523 getContext().getTypeSize(SingleElementTy) == Size) 7524 return ABIArgInfo::getDirect(CGT.ConvertType(SingleElementTy)); 7525 7526 // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly. 7527 if (Size != 8 && Size != 16 && Size != 32 && Size != 64) 7528 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 7529 7530 // Handle small structures. 7531 if (const RecordType *RT = Ty->getAs<RecordType>()) { 7532 // Structures with flexible arrays have variable length, so really 7533 // fail the size test above. 7534 const RecordDecl *RD = RT->getDecl(); 7535 if (RD->hasFlexibleArrayMember()) 7536 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 7537 7538 // The structure is passed as an unextended integer, a float, or a double. 7539 llvm::Type *PassTy; 7540 if (isFPArgumentType(SingleElementTy)) { 7541 assert(Size == 32 || Size == 64); 7542 if (Size == 32) 7543 PassTy = llvm::Type::getFloatTy(getVMContext()); 7544 else 7545 PassTy = llvm::Type::getDoubleTy(getVMContext()); 7546 } else 7547 PassTy = llvm::IntegerType::get(getVMContext(), Size); 7548 return ABIArgInfo::getDirect(PassTy); 7549 } 7550 7551 // Non-structure compounds are passed indirectly. 7552 if (isCompoundType(Ty)) 7553 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 7554 7555 return ABIArgInfo::getDirect(nullptr); 7556 } 7557 7558 //===----------------------------------------------------------------------===// 7559 // MSP430 ABI Implementation 7560 //===----------------------------------------------------------------------===// 7561 7562 namespace { 7563 7564 class MSP430ABIInfo : public DefaultABIInfo { 7565 static ABIArgInfo complexArgInfo() { 7566 ABIArgInfo Info = ABIArgInfo::getDirect(); 7567 Info.setCanBeFlattened(false); 7568 return Info; 7569 } 7570 7571 public: 7572 MSP430ABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 7573 7574 ABIArgInfo classifyReturnType(QualType RetTy) const { 7575 if (RetTy->isAnyComplexType()) 7576 return complexArgInfo(); 7577 7578 return DefaultABIInfo::classifyReturnType(RetTy); 7579 } 7580 7581 ABIArgInfo classifyArgumentType(QualType RetTy) const { 7582 if (RetTy->isAnyComplexType()) 7583 return complexArgInfo(); 7584 7585 return DefaultABIInfo::classifyArgumentType(RetTy); 7586 } 7587 7588 // Just copy the original implementations because 7589 // DefaultABIInfo::classify{Return,Argument}Type() are not virtual 7590 void computeInfo(CGFunctionInfo &FI) const override { 7591 if (!getCXXABI().classifyReturnType(FI)) 7592 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 7593 for (auto &I : FI.arguments()) 7594 I.info = classifyArgumentType(I.type); 7595 } 7596 7597 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 7598 QualType Ty) const override { 7599 return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty)); 7600 } 7601 }; 7602 7603 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo { 7604 public: 7605 MSP430TargetCodeGenInfo(CodeGenTypes &CGT) 7606 : TargetCodeGenInfo(std::make_unique<MSP430ABIInfo>(CGT)) {} 7607 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 7608 CodeGen::CodeGenModule &M) const override; 7609 }; 7610 7611 } 7612 7613 void MSP430TargetCodeGenInfo::setTargetAttributes( 7614 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const { 7615 if (GV->isDeclaration()) 7616 return; 7617 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { 7618 const auto *InterruptAttr = FD->getAttr<MSP430InterruptAttr>(); 7619 if (!InterruptAttr) 7620 return; 7621 7622 // Handle 'interrupt' attribute: 7623 llvm::Function *F = cast<llvm::Function>(GV); 7624 7625 // Step 1: Set ISR calling convention. 7626 F->setCallingConv(llvm::CallingConv::MSP430_INTR); 7627 7628 // Step 2: Add attributes goodness. 7629 F->addFnAttr(llvm::Attribute::NoInline); 7630 F->addFnAttr("interrupt", llvm::utostr(InterruptAttr->getNumber())); 7631 } 7632 } 7633 7634 //===----------------------------------------------------------------------===// 7635 // MIPS ABI Implementation. This works for both little-endian and 7636 // big-endian variants. 7637 //===----------------------------------------------------------------------===// 7638 7639 namespace { 7640 class MipsABIInfo : public ABIInfo { 7641 bool IsO32; 7642 unsigned MinABIStackAlignInBytes, StackAlignInBytes; 7643 void CoerceToIntArgs(uint64_t TySize, 7644 SmallVectorImpl<llvm::Type *> &ArgList) const; 7645 llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const; 7646 llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const; 7647 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const; 7648 public: 7649 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) : 7650 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8), 7651 StackAlignInBytes(IsO32 ? 8 : 16) {} 7652 7653 ABIArgInfo classifyReturnType(QualType RetTy) const; 7654 ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const; 7655 void computeInfo(CGFunctionInfo &FI) const override; 7656 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 7657 QualType Ty) const override; 7658 ABIArgInfo extendType(QualType Ty) const; 7659 }; 7660 7661 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo { 7662 unsigned SizeOfUnwindException; 7663 public: 7664 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32) 7665 : TargetCodeGenInfo(std::make_unique<MipsABIInfo>(CGT, IsO32)), 7666 SizeOfUnwindException(IsO32 ? 24 : 32) {} 7667 7668 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { 7669 return 29; 7670 } 7671 7672 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 7673 CodeGen::CodeGenModule &CGM) const override { 7674 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 7675 if (!FD) return; 7676 llvm::Function *Fn = cast<llvm::Function>(GV); 7677 7678 if (FD->hasAttr<MipsLongCallAttr>()) 7679 Fn->addFnAttr("long-call"); 7680 else if (FD->hasAttr<MipsShortCallAttr>()) 7681 Fn->addFnAttr("short-call"); 7682 7683 // Other attributes do not have a meaning for declarations. 7684 if (GV->isDeclaration()) 7685 return; 7686 7687 if (FD->hasAttr<Mips16Attr>()) { 7688 Fn->addFnAttr("mips16"); 7689 } 7690 else if (FD->hasAttr<NoMips16Attr>()) { 7691 Fn->addFnAttr("nomips16"); 7692 } 7693 7694 if (FD->hasAttr<MicroMipsAttr>()) 7695 Fn->addFnAttr("micromips"); 7696 else if (FD->hasAttr<NoMicroMipsAttr>()) 7697 Fn->addFnAttr("nomicromips"); 7698 7699 const MipsInterruptAttr *Attr = FD->getAttr<MipsInterruptAttr>(); 7700 if (!Attr) 7701 return; 7702 7703 const char *Kind; 7704 switch (Attr->getInterrupt()) { 7705 case MipsInterruptAttr::eic: Kind = "eic"; break; 7706 case MipsInterruptAttr::sw0: Kind = "sw0"; break; 7707 case MipsInterruptAttr::sw1: Kind = "sw1"; break; 7708 case MipsInterruptAttr::hw0: Kind = "hw0"; break; 7709 case MipsInterruptAttr::hw1: Kind = "hw1"; break; 7710 case MipsInterruptAttr::hw2: Kind = "hw2"; break; 7711 case MipsInterruptAttr::hw3: Kind = "hw3"; break; 7712 case MipsInterruptAttr::hw4: Kind = "hw4"; break; 7713 case MipsInterruptAttr::hw5: Kind = "hw5"; break; 7714 } 7715 7716 Fn->addFnAttr("interrupt", Kind); 7717 7718 } 7719 7720 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 7721 llvm::Value *Address) const override; 7722 7723 unsigned getSizeOfUnwindException() const override { 7724 return SizeOfUnwindException; 7725 } 7726 }; 7727 } 7728 7729 void MipsABIInfo::CoerceToIntArgs( 7730 uint64_t TySize, SmallVectorImpl<llvm::Type *> &ArgList) const { 7731 llvm::IntegerType *IntTy = 7732 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8); 7733 7734 // Add (TySize / MinABIStackAlignInBytes) args of IntTy. 7735 for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N) 7736 ArgList.push_back(IntTy); 7737 7738 // If necessary, add one more integer type to ArgList. 7739 unsigned R = TySize % (MinABIStackAlignInBytes * 8); 7740 7741 if (R) 7742 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R)); 7743 } 7744 7745 // In N32/64, an aligned double precision floating point field is passed in 7746 // a register. 7747 llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const { 7748 SmallVector<llvm::Type*, 8> ArgList, IntArgList; 7749 7750 if (IsO32) { 7751 CoerceToIntArgs(TySize, ArgList); 7752 return llvm::StructType::get(getVMContext(), ArgList); 7753 } 7754 7755 if (Ty->isComplexType()) 7756 return CGT.ConvertType(Ty); 7757 7758 const RecordType *RT = Ty->getAs<RecordType>(); 7759 7760 // Unions/vectors are passed in integer registers. 7761 if (!RT || !RT->isStructureOrClassType()) { 7762 CoerceToIntArgs(TySize, ArgList); 7763 return llvm::StructType::get(getVMContext(), ArgList); 7764 } 7765 7766 const RecordDecl *RD = RT->getDecl(); 7767 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 7768 assert(!(TySize % 8) && "Size of structure must be multiple of 8."); 7769 7770 uint64_t LastOffset = 0; 7771 unsigned idx = 0; 7772 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64); 7773 7774 // Iterate over fields in the struct/class and check if there are any aligned 7775 // double fields. 7776 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 7777 i != e; ++i, ++idx) { 7778 const QualType Ty = i->getType(); 7779 const BuiltinType *BT = Ty->getAs<BuiltinType>(); 7780 7781 if (!BT || BT->getKind() != BuiltinType::Double) 7782 continue; 7783 7784 uint64_t Offset = Layout.getFieldOffset(idx); 7785 if (Offset % 64) // Ignore doubles that are not aligned. 7786 continue; 7787 7788 // Add ((Offset - LastOffset) / 64) args of type i64. 7789 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j) 7790 ArgList.push_back(I64); 7791 7792 // Add double type. 7793 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext())); 7794 LastOffset = Offset + 64; 7795 } 7796 7797 CoerceToIntArgs(TySize - LastOffset, IntArgList); 7798 ArgList.append(IntArgList.begin(), IntArgList.end()); 7799 7800 return llvm::StructType::get(getVMContext(), ArgList); 7801 } 7802 7803 llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset, 7804 uint64_t Offset) const { 7805 if (OrigOffset + MinABIStackAlignInBytes > Offset) 7806 return nullptr; 7807 7808 return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8); 7809 } 7810 7811 ABIArgInfo 7812 MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const { 7813 Ty = useFirstFieldIfTransparentUnion(Ty); 7814 7815 uint64_t OrigOffset = Offset; 7816 uint64_t TySize = getContext().getTypeSize(Ty); 7817 uint64_t Align = getContext().getTypeAlign(Ty) / 8; 7818 7819 Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes), 7820 (uint64_t)StackAlignInBytes); 7821 unsigned CurrOffset = llvm::alignTo(Offset, Align); 7822 Offset = CurrOffset + llvm::alignTo(TySize, Align * 8) / 8; 7823 7824 if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) { 7825 // Ignore empty aggregates. 7826 if (TySize == 0) 7827 return ABIArgInfo::getIgnore(); 7828 7829 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { 7830 Offset = OrigOffset + MinABIStackAlignInBytes; 7831 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 7832 } 7833 7834 // If we have reached here, aggregates are passed directly by coercing to 7835 // another structure type. Padding is inserted if the offset of the 7836 // aggregate is unaligned. 7837 ABIArgInfo ArgInfo = 7838 ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0, 7839 getPaddingType(OrigOffset, CurrOffset)); 7840 ArgInfo.setInReg(true); 7841 return ArgInfo; 7842 } 7843 7844 // Treat an enum type as its underlying type. 7845 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 7846 Ty = EnumTy->getDecl()->getIntegerType(); 7847 7848 // Make sure we pass indirectly things that are too large. 7849 if (const auto *EIT = Ty->getAs<ExtIntType>()) 7850 if (EIT->getNumBits() > 128 || 7851 (EIT->getNumBits() > 64 && 7852 !getContext().getTargetInfo().hasInt128Type())) 7853 return getNaturalAlignIndirect(Ty); 7854 7855 // All integral types are promoted to the GPR width. 7856 if (Ty->isIntegralOrEnumerationType()) 7857 return extendType(Ty); 7858 7859 return ABIArgInfo::getDirect( 7860 nullptr, 0, IsO32 ? nullptr : getPaddingType(OrigOffset, CurrOffset)); 7861 } 7862 7863 llvm::Type* 7864 MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const { 7865 const RecordType *RT = RetTy->getAs<RecordType>(); 7866 SmallVector<llvm::Type*, 8> RTList; 7867 7868 if (RT && RT->isStructureOrClassType()) { 7869 const RecordDecl *RD = RT->getDecl(); 7870 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 7871 unsigned FieldCnt = Layout.getFieldCount(); 7872 7873 // N32/64 returns struct/classes in floating point registers if the 7874 // following conditions are met: 7875 // 1. The size of the struct/class is no larger than 128-bit. 7876 // 2. The struct/class has one or two fields all of which are floating 7877 // point types. 7878 // 3. The offset of the first field is zero (this follows what gcc does). 7879 // 7880 // Any other composite results are returned in integer registers. 7881 // 7882 if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) { 7883 RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end(); 7884 for (; b != e; ++b) { 7885 const BuiltinType *BT = b->getType()->getAs<BuiltinType>(); 7886 7887 if (!BT || !BT->isFloatingPoint()) 7888 break; 7889 7890 RTList.push_back(CGT.ConvertType(b->getType())); 7891 } 7892 7893 if (b == e) 7894 return llvm::StructType::get(getVMContext(), RTList, 7895 RD->hasAttr<PackedAttr>()); 7896 7897 RTList.clear(); 7898 } 7899 } 7900 7901 CoerceToIntArgs(Size, RTList); 7902 return llvm::StructType::get(getVMContext(), RTList); 7903 } 7904 7905 ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const { 7906 uint64_t Size = getContext().getTypeSize(RetTy); 7907 7908 if (RetTy->isVoidType()) 7909 return ABIArgInfo::getIgnore(); 7910 7911 // O32 doesn't treat zero-sized structs differently from other structs. 7912 // However, N32/N64 ignores zero sized return values. 7913 if (!IsO32 && Size == 0) 7914 return ABIArgInfo::getIgnore(); 7915 7916 if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) { 7917 if (Size <= 128) { 7918 if (RetTy->isAnyComplexType()) 7919 return ABIArgInfo::getDirect(); 7920 7921 // O32 returns integer vectors in registers and N32/N64 returns all small 7922 // aggregates in registers. 7923 if (!IsO32 || 7924 (RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())) { 7925 ABIArgInfo ArgInfo = 7926 ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size)); 7927 ArgInfo.setInReg(true); 7928 return ArgInfo; 7929 } 7930 } 7931 7932 return getNaturalAlignIndirect(RetTy); 7933 } 7934 7935 // Treat an enum type as its underlying type. 7936 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 7937 RetTy = EnumTy->getDecl()->getIntegerType(); 7938 7939 // Make sure we pass indirectly things that are too large. 7940 if (const auto *EIT = RetTy->getAs<ExtIntType>()) 7941 if (EIT->getNumBits() > 128 || 7942 (EIT->getNumBits() > 64 && 7943 !getContext().getTargetInfo().hasInt128Type())) 7944 return getNaturalAlignIndirect(RetTy); 7945 7946 if (isPromotableIntegerTypeForABI(RetTy)) 7947 return ABIArgInfo::getExtend(RetTy); 7948 7949 if ((RetTy->isUnsignedIntegerOrEnumerationType() || 7950 RetTy->isSignedIntegerOrEnumerationType()) && Size == 32 && !IsO32) 7951 return ABIArgInfo::getSignExtend(RetTy); 7952 7953 return ABIArgInfo::getDirect(); 7954 } 7955 7956 void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const { 7957 ABIArgInfo &RetInfo = FI.getReturnInfo(); 7958 if (!getCXXABI().classifyReturnType(FI)) 7959 RetInfo = classifyReturnType(FI.getReturnType()); 7960 7961 // Check if a pointer to an aggregate is passed as a hidden argument. 7962 uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0; 7963 7964 for (auto &I : FI.arguments()) 7965 I.info = classifyArgumentType(I.type, Offset); 7966 } 7967 7968 Address MipsABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 7969 QualType OrigTy) const { 7970 QualType Ty = OrigTy; 7971 7972 // Integer arguments are promoted to 32-bit on O32 and 64-bit on N32/N64. 7973 // Pointers are also promoted in the same way but this only matters for N32. 7974 unsigned SlotSizeInBits = IsO32 ? 32 : 64; 7975 unsigned PtrWidth = getTarget().getPointerWidth(0); 7976 bool DidPromote = false; 7977 if ((Ty->isIntegerType() && 7978 getContext().getIntWidth(Ty) < SlotSizeInBits) || 7979 (Ty->isPointerType() && PtrWidth < SlotSizeInBits)) { 7980 DidPromote = true; 7981 Ty = getContext().getIntTypeForBitwidth(SlotSizeInBits, 7982 Ty->isSignedIntegerType()); 7983 } 7984 7985 auto TyInfo = getContext().getTypeInfoInChars(Ty); 7986 7987 // The alignment of things in the argument area is never larger than 7988 // StackAlignInBytes. 7989 TyInfo.second = 7990 std::min(TyInfo.second, CharUnits::fromQuantity(StackAlignInBytes)); 7991 7992 // MinABIStackAlignInBytes is the size of argument slots on the stack. 7993 CharUnits ArgSlotSize = CharUnits::fromQuantity(MinABIStackAlignInBytes); 7994 7995 Address Addr = emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false, 7996 TyInfo, ArgSlotSize, /*AllowHigherAlign*/ true); 7997 7998 7999 // If there was a promotion, "unpromote" into a temporary. 8000 // TODO: can we just use a pointer into a subset of the original slot? 8001 if (DidPromote) { 8002 Address Temp = CGF.CreateMemTemp(OrigTy, "vaarg.promotion-temp"); 8003 llvm::Value *Promoted = CGF.Builder.CreateLoad(Addr); 8004 8005 // Truncate down to the right width. 8006 llvm::Type *IntTy = (OrigTy->isIntegerType() ? Temp.getElementType() 8007 : CGF.IntPtrTy); 8008 llvm::Value *V = CGF.Builder.CreateTrunc(Promoted, IntTy); 8009 if (OrigTy->isPointerType()) 8010 V = CGF.Builder.CreateIntToPtr(V, Temp.getElementType()); 8011 8012 CGF.Builder.CreateStore(V, Temp); 8013 Addr = Temp; 8014 } 8015 8016 return Addr; 8017 } 8018 8019 ABIArgInfo MipsABIInfo::extendType(QualType Ty) const { 8020 int TySize = getContext().getTypeSize(Ty); 8021 8022 // MIPS64 ABI requires unsigned 32 bit integers to be sign extended. 8023 if (Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32) 8024 return ABIArgInfo::getSignExtend(Ty); 8025 8026 return ABIArgInfo::getExtend(Ty); 8027 } 8028 8029 bool 8030 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 8031 llvm::Value *Address) const { 8032 // This information comes from gcc's implementation, which seems to 8033 // as canonical as it gets. 8034 8035 // Everything on MIPS is 4 bytes. Double-precision FP registers 8036 // are aliased to pairs of single-precision FP registers. 8037 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 8038 8039 // 0-31 are the general purpose registers, $0 - $31. 8040 // 32-63 are the floating-point registers, $f0 - $f31. 8041 // 64 and 65 are the multiply/divide registers, $hi and $lo. 8042 // 66 is the (notional, I think) register for signal-handler return. 8043 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65); 8044 8045 // 67-74 are the floating-point status registers, $fcc0 - $fcc7. 8046 // They are one bit wide and ignored here. 8047 8048 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31. 8049 // (coprocessor 1 is the FP unit) 8050 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31. 8051 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31. 8052 // 176-181 are the DSP accumulator registers. 8053 AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181); 8054 return false; 8055 } 8056 8057 //===----------------------------------------------------------------------===// 8058 // AVR ABI Implementation. 8059 //===----------------------------------------------------------------------===// 8060 8061 namespace { 8062 class AVRTargetCodeGenInfo : public TargetCodeGenInfo { 8063 public: 8064 AVRTargetCodeGenInfo(CodeGenTypes &CGT) 8065 : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {} 8066 8067 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 8068 CodeGen::CodeGenModule &CGM) const override { 8069 if (GV->isDeclaration()) 8070 return; 8071 const auto *FD = dyn_cast_or_null<FunctionDecl>(D); 8072 if (!FD) return; 8073 auto *Fn = cast<llvm::Function>(GV); 8074 8075 if (FD->getAttr<AVRInterruptAttr>()) 8076 Fn->addFnAttr("interrupt"); 8077 8078 if (FD->getAttr<AVRSignalAttr>()) 8079 Fn->addFnAttr("signal"); 8080 } 8081 }; 8082 } 8083 8084 //===----------------------------------------------------------------------===// 8085 // TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults. 8086 // Currently subclassed only to implement custom OpenCL C function attribute 8087 // handling. 8088 //===----------------------------------------------------------------------===// 8089 8090 namespace { 8091 8092 class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo { 8093 public: 8094 TCETargetCodeGenInfo(CodeGenTypes &CGT) 8095 : DefaultTargetCodeGenInfo(CGT) {} 8096 8097 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 8098 CodeGen::CodeGenModule &M) const override; 8099 }; 8100 8101 void TCETargetCodeGenInfo::setTargetAttributes( 8102 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const { 8103 if (GV->isDeclaration()) 8104 return; 8105 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 8106 if (!FD) return; 8107 8108 llvm::Function *F = cast<llvm::Function>(GV); 8109 8110 if (M.getLangOpts().OpenCL) { 8111 if (FD->hasAttr<OpenCLKernelAttr>()) { 8112 // OpenCL C Kernel functions are not subject to inlining 8113 F->addFnAttr(llvm::Attribute::NoInline); 8114 const ReqdWorkGroupSizeAttr *Attr = FD->getAttr<ReqdWorkGroupSizeAttr>(); 8115 if (Attr) { 8116 // Convert the reqd_work_group_size() attributes to metadata. 8117 llvm::LLVMContext &Context = F->getContext(); 8118 llvm::NamedMDNode *OpenCLMetadata = 8119 M.getModule().getOrInsertNamedMetadata( 8120 "opencl.kernel_wg_size_info"); 8121 8122 SmallVector<llvm::Metadata *, 5> Operands; 8123 Operands.push_back(llvm::ConstantAsMetadata::get(F)); 8124 8125 Operands.push_back( 8126 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue( 8127 M.Int32Ty, llvm::APInt(32, Attr->getXDim())))); 8128 Operands.push_back( 8129 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue( 8130 M.Int32Ty, llvm::APInt(32, Attr->getYDim())))); 8131 Operands.push_back( 8132 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue( 8133 M.Int32Ty, llvm::APInt(32, Attr->getZDim())))); 8134 8135 // Add a boolean constant operand for "required" (true) or "hint" 8136 // (false) for implementing the work_group_size_hint attr later. 8137 // Currently always true as the hint is not yet implemented. 8138 Operands.push_back( 8139 llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context))); 8140 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands)); 8141 } 8142 } 8143 } 8144 } 8145 8146 } 8147 8148 //===----------------------------------------------------------------------===// 8149 // Hexagon ABI Implementation 8150 //===----------------------------------------------------------------------===// 8151 8152 namespace { 8153 8154 class HexagonABIInfo : public DefaultABIInfo { 8155 public: 8156 HexagonABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 8157 8158 private: 8159 ABIArgInfo classifyReturnType(QualType RetTy) const; 8160 ABIArgInfo classifyArgumentType(QualType RetTy) const; 8161 ABIArgInfo classifyArgumentType(QualType RetTy, unsigned *RegsLeft) const; 8162 8163 void computeInfo(CGFunctionInfo &FI) const override; 8164 8165 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 8166 QualType Ty) const override; 8167 Address EmitVAArgFromMemory(CodeGenFunction &CFG, Address VAListAddr, 8168 QualType Ty) const; 8169 Address EmitVAArgForHexagon(CodeGenFunction &CFG, Address VAListAddr, 8170 QualType Ty) const; 8171 Address EmitVAArgForHexagonLinux(CodeGenFunction &CFG, Address VAListAddr, 8172 QualType Ty) const; 8173 }; 8174 8175 class HexagonTargetCodeGenInfo : public TargetCodeGenInfo { 8176 public: 8177 HexagonTargetCodeGenInfo(CodeGenTypes &CGT) 8178 : TargetCodeGenInfo(std::make_unique<HexagonABIInfo>(CGT)) {} 8179 8180 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 8181 return 29; 8182 } 8183 8184 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 8185 CodeGen::CodeGenModule &GCM) const override { 8186 if (GV->isDeclaration()) 8187 return; 8188 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 8189 if (!FD) 8190 return; 8191 } 8192 }; 8193 8194 } // namespace 8195 8196 void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const { 8197 unsigned RegsLeft = 6; 8198 if (!getCXXABI().classifyReturnType(FI)) 8199 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 8200 for (auto &I : FI.arguments()) 8201 I.info = classifyArgumentType(I.type, &RegsLeft); 8202 } 8203 8204 static bool HexagonAdjustRegsLeft(uint64_t Size, unsigned *RegsLeft) { 8205 assert(Size <= 64 && "Not expecting to pass arguments larger than 64 bits" 8206 " through registers"); 8207 8208 if (*RegsLeft == 0) 8209 return false; 8210 8211 if (Size <= 32) { 8212 (*RegsLeft)--; 8213 return true; 8214 } 8215 8216 if (2 <= (*RegsLeft & (~1U))) { 8217 *RegsLeft = (*RegsLeft & (~1U)) - 2; 8218 return true; 8219 } 8220 8221 // Next available register was r5 but candidate was greater than 32-bits so it 8222 // has to go on the stack. However we still consume r5 8223 if (*RegsLeft == 1) 8224 *RegsLeft = 0; 8225 8226 return false; 8227 } 8228 8229 ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty, 8230 unsigned *RegsLeft) const { 8231 if (!isAggregateTypeForABI(Ty)) { 8232 // Treat an enum type as its underlying type. 8233 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 8234 Ty = EnumTy->getDecl()->getIntegerType(); 8235 8236 uint64_t Size = getContext().getTypeSize(Ty); 8237 if (Size <= 64) 8238 HexagonAdjustRegsLeft(Size, RegsLeft); 8239 8240 if (Size > 64 && Ty->isExtIntType()) 8241 return getNaturalAlignIndirect(Ty, /*ByVal=*/true); 8242 8243 return isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) 8244 : ABIArgInfo::getDirect(); 8245 } 8246 8247 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 8248 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 8249 8250 // Ignore empty records. 8251 if (isEmptyRecord(getContext(), Ty, true)) 8252 return ABIArgInfo::getIgnore(); 8253 8254 uint64_t Size = getContext().getTypeSize(Ty); 8255 unsigned Align = getContext().getTypeAlign(Ty); 8256 8257 if (Size > 64) 8258 return getNaturalAlignIndirect(Ty, /*ByVal=*/true); 8259 8260 if (HexagonAdjustRegsLeft(Size, RegsLeft)) 8261 Align = Size <= 32 ? 32 : 64; 8262 if (Size <= Align) { 8263 // Pass in the smallest viable integer type. 8264 if (!llvm::isPowerOf2_64(Size)) 8265 Size = llvm::NextPowerOf2(Size); 8266 return ABIArgInfo::getDirect(llvm::Type::getIntNTy(getVMContext(), Size)); 8267 } 8268 return DefaultABIInfo::classifyArgumentType(Ty); 8269 } 8270 8271 ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const { 8272 if (RetTy->isVoidType()) 8273 return ABIArgInfo::getIgnore(); 8274 8275 const TargetInfo &T = CGT.getTarget(); 8276 uint64_t Size = getContext().getTypeSize(RetTy); 8277 8278 if (RetTy->getAs<VectorType>()) { 8279 // HVX vectors are returned in vector registers or register pairs. 8280 if (T.hasFeature("hvx")) { 8281 assert(T.hasFeature("hvx-length64b") || T.hasFeature("hvx-length128b")); 8282 uint64_t VecSize = T.hasFeature("hvx-length64b") ? 64*8 : 128*8; 8283 if (Size == VecSize || Size == 2*VecSize) 8284 return ABIArgInfo::getDirectInReg(); 8285 } 8286 // Large vector types should be returned via memory. 8287 if (Size > 64) 8288 return getNaturalAlignIndirect(RetTy); 8289 } 8290 8291 if (!isAggregateTypeForABI(RetTy)) { 8292 // Treat an enum type as its underlying type. 8293 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 8294 RetTy = EnumTy->getDecl()->getIntegerType(); 8295 8296 if (Size > 64 && RetTy->isExtIntType()) 8297 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false); 8298 8299 return isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) 8300 : ABIArgInfo::getDirect(); 8301 } 8302 8303 if (isEmptyRecord(getContext(), RetTy, true)) 8304 return ABIArgInfo::getIgnore(); 8305 8306 // Aggregates <= 8 bytes are returned in registers, other aggregates 8307 // are returned indirectly. 8308 if (Size <= 64) { 8309 // Return in the smallest viable integer type. 8310 if (!llvm::isPowerOf2_64(Size)) 8311 Size = llvm::NextPowerOf2(Size); 8312 return ABIArgInfo::getDirect(llvm::Type::getIntNTy(getVMContext(), Size)); 8313 } 8314 return getNaturalAlignIndirect(RetTy, /*ByVal=*/true); 8315 } 8316 8317 Address HexagonABIInfo::EmitVAArgFromMemory(CodeGenFunction &CGF, 8318 Address VAListAddr, 8319 QualType Ty) const { 8320 // Load the overflow area pointer. 8321 Address __overflow_area_pointer_p = 8322 CGF.Builder.CreateStructGEP(VAListAddr, 2, "__overflow_area_pointer_p"); 8323 llvm::Value *__overflow_area_pointer = CGF.Builder.CreateLoad( 8324 __overflow_area_pointer_p, "__overflow_area_pointer"); 8325 8326 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8; 8327 if (Align > 4) { 8328 // Alignment should be a power of 2. 8329 assert((Align & (Align - 1)) == 0 && "Alignment is not power of 2!"); 8330 8331 // overflow_arg_area = (overflow_arg_area + align - 1) & -align; 8332 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int64Ty, Align - 1); 8333 8334 // Add offset to the current pointer to access the argument. 8335 __overflow_area_pointer = 8336 CGF.Builder.CreateGEP(__overflow_area_pointer, Offset); 8337 llvm::Value *AsInt = 8338 CGF.Builder.CreatePtrToInt(__overflow_area_pointer, CGF.Int32Ty); 8339 8340 // Create a mask which should be "AND"ed 8341 // with (overflow_arg_area + align - 1) 8342 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -(int)Align); 8343 __overflow_area_pointer = CGF.Builder.CreateIntToPtr( 8344 CGF.Builder.CreateAnd(AsInt, Mask), __overflow_area_pointer->getType(), 8345 "__overflow_area_pointer.align"); 8346 } 8347 8348 // Get the type of the argument from memory and bitcast 8349 // overflow area pointer to the argument type. 8350 llvm::Type *PTy = CGF.ConvertTypeForMem(Ty); 8351 Address AddrTyped = CGF.Builder.CreateBitCast( 8352 Address(__overflow_area_pointer, CharUnits::fromQuantity(Align)), 8353 llvm::PointerType::getUnqual(PTy)); 8354 8355 // Round up to the minimum stack alignment for varargs which is 4 bytes. 8356 uint64_t Offset = llvm::alignTo(CGF.getContext().getTypeSize(Ty) / 8, 4); 8357 8358 __overflow_area_pointer = CGF.Builder.CreateGEP( 8359 __overflow_area_pointer, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 8360 "__overflow_area_pointer.next"); 8361 CGF.Builder.CreateStore(__overflow_area_pointer, __overflow_area_pointer_p); 8362 8363 return AddrTyped; 8364 } 8365 8366 Address HexagonABIInfo::EmitVAArgForHexagon(CodeGenFunction &CGF, 8367 Address VAListAddr, 8368 QualType Ty) const { 8369 // FIXME: Need to handle alignment 8370 llvm::Type *BP = CGF.Int8PtrTy; 8371 llvm::Type *BPP = CGF.Int8PtrPtrTy; 8372 CGBuilderTy &Builder = CGF.Builder; 8373 Address VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap"); 8374 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 8375 // Handle address alignment for type alignment > 32 bits 8376 uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8; 8377 if (TyAlign > 4) { 8378 assert((TyAlign & (TyAlign - 1)) == 0 && "Alignment is not power of 2!"); 8379 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty); 8380 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1)); 8381 AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1))); 8382 Addr = Builder.CreateIntToPtr(AddrAsInt, BP); 8383 } 8384 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 8385 Address AddrTyped = Builder.CreateBitCast( 8386 Address(Addr, CharUnits::fromQuantity(TyAlign)), PTy); 8387 8388 uint64_t Offset = llvm::alignTo(CGF.getContext().getTypeSize(Ty) / 8, 4); 8389 llvm::Value *NextAddr = Builder.CreateGEP( 8390 Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), "ap.next"); 8391 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 8392 8393 return AddrTyped; 8394 } 8395 8396 Address HexagonABIInfo::EmitVAArgForHexagonLinux(CodeGenFunction &CGF, 8397 Address VAListAddr, 8398 QualType Ty) const { 8399 int ArgSize = CGF.getContext().getTypeSize(Ty) / 8; 8400 8401 if (ArgSize > 8) 8402 return EmitVAArgFromMemory(CGF, VAListAddr, Ty); 8403 8404 // Here we have check if the argument is in register area or 8405 // in overflow area. 8406 // If the saved register area pointer + argsize rounded up to alignment > 8407 // saved register area end pointer, argument is in overflow area. 8408 unsigned RegsLeft = 6; 8409 Ty = CGF.getContext().getCanonicalType(Ty); 8410 (void)classifyArgumentType(Ty, &RegsLeft); 8411 8412 llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg"); 8413 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 8414 llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack"); 8415 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 8416 8417 // Get rounded size of the argument.GCC does not allow vararg of 8418 // size < 4 bytes. We follow the same logic here. 8419 ArgSize = (CGF.getContext().getTypeSize(Ty) <= 32) ? 4 : 8; 8420 int ArgAlign = (CGF.getContext().getTypeSize(Ty) <= 32) ? 4 : 8; 8421 8422 // Argument may be in saved register area 8423 CGF.EmitBlock(MaybeRegBlock); 8424 8425 // Load the current saved register area pointer. 8426 Address __current_saved_reg_area_pointer_p = CGF.Builder.CreateStructGEP( 8427 VAListAddr, 0, "__current_saved_reg_area_pointer_p"); 8428 llvm::Value *__current_saved_reg_area_pointer = CGF.Builder.CreateLoad( 8429 __current_saved_reg_area_pointer_p, "__current_saved_reg_area_pointer"); 8430 8431 // Load the saved register area end pointer. 8432 Address __saved_reg_area_end_pointer_p = CGF.Builder.CreateStructGEP( 8433 VAListAddr, 1, "__saved_reg_area_end_pointer_p"); 8434 llvm::Value *__saved_reg_area_end_pointer = CGF.Builder.CreateLoad( 8435 __saved_reg_area_end_pointer_p, "__saved_reg_area_end_pointer"); 8436 8437 // If the size of argument is > 4 bytes, check if the stack 8438 // location is aligned to 8 bytes 8439 if (ArgAlign > 4) { 8440 8441 llvm::Value *__current_saved_reg_area_pointer_int = 8442 CGF.Builder.CreatePtrToInt(__current_saved_reg_area_pointer, 8443 CGF.Int32Ty); 8444 8445 __current_saved_reg_area_pointer_int = CGF.Builder.CreateAdd( 8446 __current_saved_reg_area_pointer_int, 8447 llvm::ConstantInt::get(CGF.Int32Ty, (ArgAlign - 1)), 8448 "align_current_saved_reg_area_pointer"); 8449 8450 __current_saved_reg_area_pointer_int = 8451 CGF.Builder.CreateAnd(__current_saved_reg_area_pointer_int, 8452 llvm::ConstantInt::get(CGF.Int32Ty, -ArgAlign), 8453 "align_current_saved_reg_area_pointer"); 8454 8455 __current_saved_reg_area_pointer = 8456 CGF.Builder.CreateIntToPtr(__current_saved_reg_area_pointer_int, 8457 __current_saved_reg_area_pointer->getType(), 8458 "align_current_saved_reg_area_pointer"); 8459 } 8460 8461 llvm::Value *__new_saved_reg_area_pointer = 8462 CGF.Builder.CreateGEP(__current_saved_reg_area_pointer, 8463 llvm::ConstantInt::get(CGF.Int32Ty, ArgSize), 8464 "__new_saved_reg_area_pointer"); 8465 8466 llvm::Value *UsingStack = 0; 8467 UsingStack = CGF.Builder.CreateICmpSGT(__new_saved_reg_area_pointer, 8468 __saved_reg_area_end_pointer); 8469 8470 CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, InRegBlock); 8471 8472 // Argument in saved register area 8473 // Implement the block where argument is in register saved area 8474 CGF.EmitBlock(InRegBlock); 8475 8476 llvm::Type *PTy = CGF.ConvertType(Ty); 8477 llvm::Value *__saved_reg_area_p = CGF.Builder.CreateBitCast( 8478 __current_saved_reg_area_pointer, llvm::PointerType::getUnqual(PTy)); 8479 8480 CGF.Builder.CreateStore(__new_saved_reg_area_pointer, 8481 __current_saved_reg_area_pointer_p); 8482 8483 CGF.EmitBranch(ContBlock); 8484 8485 // Argument in overflow area 8486 // Implement the block where the argument is in overflow area. 8487 CGF.EmitBlock(OnStackBlock); 8488 8489 // Load the overflow area pointer 8490 Address __overflow_area_pointer_p = 8491 CGF.Builder.CreateStructGEP(VAListAddr, 2, "__overflow_area_pointer_p"); 8492 llvm::Value *__overflow_area_pointer = CGF.Builder.CreateLoad( 8493 __overflow_area_pointer_p, "__overflow_area_pointer"); 8494 8495 // Align the overflow area pointer according to the alignment of the argument 8496 if (ArgAlign > 4) { 8497 llvm::Value *__overflow_area_pointer_int = 8498 CGF.Builder.CreatePtrToInt(__overflow_area_pointer, CGF.Int32Ty); 8499 8500 __overflow_area_pointer_int = 8501 CGF.Builder.CreateAdd(__overflow_area_pointer_int, 8502 llvm::ConstantInt::get(CGF.Int32Ty, ArgAlign - 1), 8503 "align_overflow_area_pointer"); 8504 8505 __overflow_area_pointer_int = 8506 CGF.Builder.CreateAnd(__overflow_area_pointer_int, 8507 llvm::ConstantInt::get(CGF.Int32Ty, -ArgAlign), 8508 "align_overflow_area_pointer"); 8509 8510 __overflow_area_pointer = CGF.Builder.CreateIntToPtr( 8511 __overflow_area_pointer_int, __overflow_area_pointer->getType(), 8512 "align_overflow_area_pointer"); 8513 } 8514 8515 // Get the pointer for next argument in overflow area and store it 8516 // to overflow area pointer. 8517 llvm::Value *__new_overflow_area_pointer = CGF.Builder.CreateGEP( 8518 __overflow_area_pointer, llvm::ConstantInt::get(CGF.Int32Ty, ArgSize), 8519 "__overflow_area_pointer.next"); 8520 8521 CGF.Builder.CreateStore(__new_overflow_area_pointer, 8522 __overflow_area_pointer_p); 8523 8524 CGF.Builder.CreateStore(__new_overflow_area_pointer, 8525 __current_saved_reg_area_pointer_p); 8526 8527 // Bitcast the overflow area pointer to the type of argument. 8528 llvm::Type *OverflowPTy = CGF.ConvertTypeForMem(Ty); 8529 llvm::Value *__overflow_area_p = CGF.Builder.CreateBitCast( 8530 __overflow_area_pointer, llvm::PointerType::getUnqual(OverflowPTy)); 8531 8532 CGF.EmitBranch(ContBlock); 8533 8534 // Get the correct pointer to load the variable argument 8535 // Implement the ContBlock 8536 CGF.EmitBlock(ContBlock); 8537 8538 llvm::Type *MemPTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty)); 8539 llvm::PHINode *ArgAddr = CGF.Builder.CreatePHI(MemPTy, 2, "vaarg.addr"); 8540 ArgAddr->addIncoming(__saved_reg_area_p, InRegBlock); 8541 ArgAddr->addIncoming(__overflow_area_p, OnStackBlock); 8542 8543 return Address(ArgAddr, CharUnits::fromQuantity(ArgAlign)); 8544 } 8545 8546 Address HexagonABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 8547 QualType Ty) const { 8548 8549 if (getTarget().getTriple().isMusl()) 8550 return EmitVAArgForHexagonLinux(CGF, VAListAddr, Ty); 8551 8552 return EmitVAArgForHexagon(CGF, VAListAddr, Ty); 8553 } 8554 8555 //===----------------------------------------------------------------------===// 8556 // Lanai ABI Implementation 8557 //===----------------------------------------------------------------------===// 8558 8559 namespace { 8560 class LanaiABIInfo : public DefaultABIInfo { 8561 public: 8562 LanaiABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 8563 8564 bool shouldUseInReg(QualType Ty, CCState &State) const; 8565 8566 void computeInfo(CGFunctionInfo &FI) const override { 8567 CCState State(FI); 8568 // Lanai uses 4 registers to pass arguments unless the function has the 8569 // regparm attribute set. 8570 if (FI.getHasRegParm()) { 8571 State.FreeRegs = FI.getRegParm(); 8572 } else { 8573 State.FreeRegs = 4; 8574 } 8575 8576 if (!getCXXABI().classifyReturnType(FI)) 8577 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 8578 for (auto &I : FI.arguments()) 8579 I.info = classifyArgumentType(I.type, State); 8580 } 8581 8582 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const; 8583 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const; 8584 }; 8585 } // end anonymous namespace 8586 8587 bool LanaiABIInfo::shouldUseInReg(QualType Ty, CCState &State) const { 8588 unsigned Size = getContext().getTypeSize(Ty); 8589 unsigned SizeInRegs = llvm::alignTo(Size, 32U) / 32U; 8590 8591 if (SizeInRegs == 0) 8592 return false; 8593 8594 if (SizeInRegs > State.FreeRegs) { 8595 State.FreeRegs = 0; 8596 return false; 8597 } 8598 8599 State.FreeRegs -= SizeInRegs; 8600 8601 return true; 8602 } 8603 8604 ABIArgInfo LanaiABIInfo::getIndirectResult(QualType Ty, bool ByVal, 8605 CCState &State) const { 8606 if (!ByVal) { 8607 if (State.FreeRegs) { 8608 --State.FreeRegs; // Non-byval indirects just use one pointer. 8609 return getNaturalAlignIndirectInReg(Ty); 8610 } 8611 return getNaturalAlignIndirect(Ty, false); 8612 } 8613 8614 // Compute the byval alignment. 8615 const unsigned MinABIStackAlignInBytes = 4; 8616 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; 8617 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true, 8618 /*Realign=*/TypeAlign > 8619 MinABIStackAlignInBytes); 8620 } 8621 8622 ABIArgInfo LanaiABIInfo::classifyArgumentType(QualType Ty, 8623 CCState &State) const { 8624 // Check with the C++ ABI first. 8625 const RecordType *RT = Ty->getAs<RecordType>(); 8626 if (RT) { 8627 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()); 8628 if (RAA == CGCXXABI::RAA_Indirect) { 8629 return getIndirectResult(Ty, /*ByVal=*/false, State); 8630 } else if (RAA == CGCXXABI::RAA_DirectInMemory) { 8631 return getNaturalAlignIndirect(Ty, /*ByVal=*/true); 8632 } 8633 } 8634 8635 if (isAggregateTypeForABI(Ty)) { 8636 // Structures with flexible arrays are always indirect. 8637 if (RT && RT->getDecl()->hasFlexibleArrayMember()) 8638 return getIndirectResult(Ty, /*ByVal=*/true, State); 8639 8640 // Ignore empty structs/unions. 8641 if (isEmptyRecord(getContext(), Ty, true)) 8642 return ABIArgInfo::getIgnore(); 8643 8644 llvm::LLVMContext &LLVMContext = getVMContext(); 8645 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32; 8646 if (SizeInRegs <= State.FreeRegs) { 8647 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext); 8648 SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32); 8649 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements); 8650 State.FreeRegs -= SizeInRegs; 8651 return ABIArgInfo::getDirectInReg(Result); 8652 } else { 8653 State.FreeRegs = 0; 8654 } 8655 return getIndirectResult(Ty, true, State); 8656 } 8657 8658 // Treat an enum type as its underlying type. 8659 if (const auto *EnumTy = Ty->getAs<EnumType>()) 8660 Ty = EnumTy->getDecl()->getIntegerType(); 8661 8662 bool InReg = shouldUseInReg(Ty, State); 8663 8664 // Don't pass >64 bit integers in registers. 8665 if (const auto *EIT = Ty->getAs<ExtIntType>()) 8666 if (EIT->getNumBits() > 64) 8667 return getIndirectResult(Ty, /*ByVal=*/true, State); 8668 8669 if (isPromotableIntegerTypeForABI(Ty)) { 8670 if (InReg) 8671 return ABIArgInfo::getDirectInReg(); 8672 return ABIArgInfo::getExtend(Ty); 8673 } 8674 if (InReg) 8675 return ABIArgInfo::getDirectInReg(); 8676 return ABIArgInfo::getDirect(); 8677 } 8678 8679 namespace { 8680 class LanaiTargetCodeGenInfo : public TargetCodeGenInfo { 8681 public: 8682 LanaiTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 8683 : TargetCodeGenInfo(std::make_unique<LanaiABIInfo>(CGT)) {} 8684 }; 8685 } 8686 8687 //===----------------------------------------------------------------------===// 8688 // AMDGPU ABI Implementation 8689 //===----------------------------------------------------------------------===// 8690 8691 namespace { 8692 8693 class AMDGPUABIInfo final : public DefaultABIInfo { 8694 private: 8695 static const unsigned MaxNumRegsForArgsRet = 16; 8696 8697 unsigned numRegsForType(QualType Ty) const; 8698 8699 bool isHomogeneousAggregateBaseType(QualType Ty) const override; 8700 bool isHomogeneousAggregateSmallEnough(const Type *Base, 8701 uint64_t Members) const override; 8702 8703 // Coerce HIP pointer arguments from generic pointers to global ones. 8704 llvm::Type *coerceKernelArgumentType(llvm::Type *Ty, unsigned FromAS, 8705 unsigned ToAS) const { 8706 // Structure types. 8707 if (auto STy = dyn_cast<llvm::StructType>(Ty)) { 8708 SmallVector<llvm::Type *, 8> EltTys; 8709 bool Changed = false; 8710 for (auto T : STy->elements()) { 8711 auto NT = coerceKernelArgumentType(T, FromAS, ToAS); 8712 EltTys.push_back(NT); 8713 Changed |= (NT != T); 8714 } 8715 // Skip if there is no change in element types. 8716 if (!Changed) 8717 return STy; 8718 if (STy->hasName()) 8719 return llvm::StructType::create( 8720 EltTys, (STy->getName() + ".coerce").str(), STy->isPacked()); 8721 return llvm::StructType::get(getVMContext(), EltTys, STy->isPacked()); 8722 } 8723 // Array types. 8724 if (auto ATy = dyn_cast<llvm::ArrayType>(Ty)) { 8725 auto T = ATy->getElementType(); 8726 auto NT = coerceKernelArgumentType(T, FromAS, ToAS); 8727 // Skip if there is no change in that element type. 8728 if (NT == T) 8729 return ATy; 8730 return llvm::ArrayType::get(NT, ATy->getNumElements()); 8731 } 8732 // Single value types. 8733 if (Ty->isPointerTy() && Ty->getPointerAddressSpace() == FromAS) 8734 return llvm::PointerType::get( 8735 cast<llvm::PointerType>(Ty)->getElementType(), ToAS); 8736 return Ty; 8737 } 8738 8739 public: 8740 explicit AMDGPUABIInfo(CodeGen::CodeGenTypes &CGT) : 8741 DefaultABIInfo(CGT) {} 8742 8743 ABIArgInfo classifyReturnType(QualType RetTy) const; 8744 ABIArgInfo classifyKernelArgumentType(QualType Ty) const; 8745 ABIArgInfo classifyArgumentType(QualType Ty, unsigned &NumRegsLeft) const; 8746 8747 void computeInfo(CGFunctionInfo &FI) const override; 8748 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 8749 QualType Ty) const override; 8750 }; 8751 8752 bool AMDGPUABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 8753 return true; 8754 } 8755 8756 bool AMDGPUABIInfo::isHomogeneousAggregateSmallEnough( 8757 const Type *Base, uint64_t Members) const { 8758 uint32_t NumRegs = (getContext().getTypeSize(Base) + 31) / 32; 8759 8760 // Homogeneous Aggregates may occupy at most 16 registers. 8761 return Members * NumRegs <= MaxNumRegsForArgsRet; 8762 } 8763 8764 /// Estimate number of registers the type will use when passed in registers. 8765 unsigned AMDGPUABIInfo::numRegsForType(QualType Ty) const { 8766 unsigned NumRegs = 0; 8767 8768 if (const VectorType *VT = Ty->getAs<VectorType>()) { 8769 // Compute from the number of elements. The reported size is based on the 8770 // in-memory size, which includes the padding 4th element for 3-vectors. 8771 QualType EltTy = VT->getElementType(); 8772 unsigned EltSize = getContext().getTypeSize(EltTy); 8773 8774 // 16-bit element vectors should be passed as packed. 8775 if (EltSize == 16) 8776 return (VT->getNumElements() + 1) / 2; 8777 8778 unsigned EltNumRegs = (EltSize + 31) / 32; 8779 return EltNumRegs * VT->getNumElements(); 8780 } 8781 8782 if (const RecordType *RT = Ty->getAs<RecordType>()) { 8783 const RecordDecl *RD = RT->getDecl(); 8784 assert(!RD->hasFlexibleArrayMember()); 8785 8786 for (const FieldDecl *Field : RD->fields()) { 8787 QualType FieldTy = Field->getType(); 8788 NumRegs += numRegsForType(FieldTy); 8789 } 8790 8791 return NumRegs; 8792 } 8793 8794 return (getContext().getTypeSize(Ty) + 31) / 32; 8795 } 8796 8797 void AMDGPUABIInfo::computeInfo(CGFunctionInfo &FI) const { 8798 llvm::CallingConv::ID CC = FI.getCallingConvention(); 8799 8800 if (!getCXXABI().classifyReturnType(FI)) 8801 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 8802 8803 unsigned NumRegsLeft = MaxNumRegsForArgsRet; 8804 for (auto &Arg : FI.arguments()) { 8805 if (CC == llvm::CallingConv::AMDGPU_KERNEL) { 8806 Arg.info = classifyKernelArgumentType(Arg.type); 8807 } else { 8808 Arg.info = classifyArgumentType(Arg.type, NumRegsLeft); 8809 } 8810 } 8811 } 8812 8813 Address AMDGPUABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 8814 QualType Ty) const { 8815 llvm_unreachable("AMDGPU does not support varargs"); 8816 } 8817 8818 ABIArgInfo AMDGPUABIInfo::classifyReturnType(QualType RetTy) const { 8819 if (isAggregateTypeForABI(RetTy)) { 8820 // Records with non-trivial destructors/copy-constructors should not be 8821 // returned by value. 8822 if (!getRecordArgABI(RetTy, getCXXABI())) { 8823 // Ignore empty structs/unions. 8824 if (isEmptyRecord(getContext(), RetTy, true)) 8825 return ABIArgInfo::getIgnore(); 8826 8827 // Lower single-element structs to just return a regular value. 8828 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) 8829 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 8830 8831 if (const RecordType *RT = RetTy->getAs<RecordType>()) { 8832 const RecordDecl *RD = RT->getDecl(); 8833 if (RD->hasFlexibleArrayMember()) 8834 return DefaultABIInfo::classifyReturnType(RetTy); 8835 } 8836 8837 // Pack aggregates <= 4 bytes into single VGPR or pair. 8838 uint64_t Size = getContext().getTypeSize(RetTy); 8839 if (Size <= 16) 8840 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 8841 8842 if (Size <= 32) 8843 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 8844 8845 if (Size <= 64) { 8846 llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext()); 8847 return ABIArgInfo::getDirect(llvm::ArrayType::get(I32Ty, 2)); 8848 } 8849 8850 if (numRegsForType(RetTy) <= MaxNumRegsForArgsRet) 8851 return ABIArgInfo::getDirect(); 8852 } 8853 } 8854 8855 // Otherwise just do the default thing. 8856 return DefaultABIInfo::classifyReturnType(RetTy); 8857 } 8858 8859 /// For kernels all parameters are really passed in a special buffer. It doesn't 8860 /// make sense to pass anything byval, so everything must be direct. 8861 ABIArgInfo AMDGPUABIInfo::classifyKernelArgumentType(QualType Ty) const { 8862 Ty = useFirstFieldIfTransparentUnion(Ty); 8863 8864 // TODO: Can we omit empty structs? 8865 8866 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext())) 8867 Ty = QualType(SeltTy, 0); 8868 8869 llvm::Type *OrigLTy = CGT.ConvertType(Ty); 8870 llvm::Type *LTy = OrigLTy; 8871 if (getContext().getLangOpts().HIP) { 8872 LTy = coerceKernelArgumentType( 8873 OrigLTy, /*FromAS=*/getContext().getTargetAddressSpace(LangAS::Default), 8874 /*ToAS=*/getContext().getTargetAddressSpace(LangAS::cuda_device)); 8875 } 8876 8877 // FIXME: Should also use this for OpenCL, but it requires addressing the 8878 // problem of kernels being called. 8879 // 8880 // FIXME: This doesn't apply the optimization of coercing pointers in structs 8881 // to global address space when using byref. This would require implementing a 8882 // new kind of coercion of the in-memory type when for indirect arguments. 8883 if (!getContext().getLangOpts().OpenCL && LTy == OrigLTy && 8884 isAggregateTypeForABI(Ty)) { 8885 return ABIArgInfo::getIndirectAliased( 8886 getContext().getTypeAlignInChars(Ty), 8887 getContext().getTargetAddressSpace(LangAS::opencl_constant), 8888 false /*Realign*/, nullptr /*Padding*/); 8889 } 8890 8891 // If we set CanBeFlattened to true, CodeGen will expand the struct to its 8892 // individual elements, which confuses the Clover OpenCL backend; therefore we 8893 // have to set it to false here. Other args of getDirect() are just defaults. 8894 return ABIArgInfo::getDirect(LTy, 0, nullptr, false); 8895 } 8896 8897 ABIArgInfo AMDGPUABIInfo::classifyArgumentType(QualType Ty, 8898 unsigned &NumRegsLeft) const { 8899 assert(NumRegsLeft <= MaxNumRegsForArgsRet && "register estimate underflow"); 8900 8901 Ty = useFirstFieldIfTransparentUnion(Ty); 8902 8903 if (isAggregateTypeForABI(Ty)) { 8904 // Records with non-trivial destructors/copy-constructors should not be 8905 // passed by value. 8906 if (auto RAA = getRecordArgABI(Ty, getCXXABI())) 8907 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 8908 8909 // Ignore empty structs/unions. 8910 if (isEmptyRecord(getContext(), Ty, true)) 8911 return ABIArgInfo::getIgnore(); 8912 8913 // Lower single-element structs to just pass a regular value. TODO: We 8914 // could do reasonable-size multiple-element structs too, using getExpand(), 8915 // though watch out for things like bitfields. 8916 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext())) 8917 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 8918 8919 if (const RecordType *RT = Ty->getAs<RecordType>()) { 8920 const RecordDecl *RD = RT->getDecl(); 8921 if (RD->hasFlexibleArrayMember()) 8922 return DefaultABIInfo::classifyArgumentType(Ty); 8923 } 8924 8925 // Pack aggregates <= 8 bytes into single VGPR or pair. 8926 uint64_t Size = getContext().getTypeSize(Ty); 8927 if (Size <= 64) { 8928 unsigned NumRegs = (Size + 31) / 32; 8929 NumRegsLeft -= std::min(NumRegsLeft, NumRegs); 8930 8931 if (Size <= 16) 8932 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 8933 8934 if (Size <= 32) 8935 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 8936 8937 // XXX: Should this be i64 instead, and should the limit increase? 8938 llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext()); 8939 return ABIArgInfo::getDirect(llvm::ArrayType::get(I32Ty, 2)); 8940 } 8941 8942 if (NumRegsLeft > 0) { 8943 unsigned NumRegs = numRegsForType(Ty); 8944 if (NumRegsLeft >= NumRegs) { 8945 NumRegsLeft -= NumRegs; 8946 return ABIArgInfo::getDirect(); 8947 } 8948 } 8949 } 8950 8951 // Otherwise just do the default thing. 8952 ABIArgInfo ArgInfo = DefaultABIInfo::classifyArgumentType(Ty); 8953 if (!ArgInfo.isIndirect()) { 8954 unsigned NumRegs = numRegsForType(Ty); 8955 NumRegsLeft -= std::min(NumRegs, NumRegsLeft); 8956 } 8957 8958 return ArgInfo; 8959 } 8960 8961 class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo { 8962 public: 8963 AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT) 8964 : TargetCodeGenInfo(std::make_unique<AMDGPUABIInfo>(CGT)) {} 8965 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 8966 CodeGen::CodeGenModule &M) const override; 8967 unsigned getOpenCLKernelCallingConv() const override; 8968 8969 llvm::Constant *getNullPointer(const CodeGen::CodeGenModule &CGM, 8970 llvm::PointerType *T, QualType QT) const override; 8971 8972 LangAS getASTAllocaAddressSpace() const override { 8973 return getLangASFromTargetAS( 8974 getABIInfo().getDataLayout().getAllocaAddrSpace()); 8975 } 8976 LangAS getGlobalVarAddressSpace(CodeGenModule &CGM, 8977 const VarDecl *D) const override; 8978 llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts, 8979 SyncScope Scope, 8980 llvm::AtomicOrdering Ordering, 8981 llvm::LLVMContext &Ctx) const override; 8982 llvm::Function * 8983 createEnqueuedBlockKernel(CodeGenFunction &CGF, 8984 llvm::Function *BlockInvokeFunc, 8985 llvm::Value *BlockLiteral) const override; 8986 bool shouldEmitStaticExternCAliases() const override; 8987 void setCUDAKernelCallingConvention(const FunctionType *&FT) const override; 8988 }; 8989 } 8990 8991 static bool requiresAMDGPUProtectedVisibility(const Decl *D, 8992 llvm::GlobalValue *GV) { 8993 if (GV->getVisibility() != llvm::GlobalValue::HiddenVisibility) 8994 return false; 8995 8996 return D->hasAttr<OpenCLKernelAttr>() || 8997 (isa<FunctionDecl>(D) && D->hasAttr<CUDAGlobalAttr>()) || 8998 (isa<VarDecl>(D) && 8999 (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() || 9000 cast<VarDecl>(D)->getType()->isCUDADeviceBuiltinSurfaceType() || 9001 cast<VarDecl>(D)->getType()->isCUDADeviceBuiltinTextureType())); 9002 } 9003 9004 void AMDGPUTargetCodeGenInfo::setTargetAttributes( 9005 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const { 9006 if (requiresAMDGPUProtectedVisibility(D, GV)) { 9007 GV->setVisibility(llvm::GlobalValue::ProtectedVisibility); 9008 GV->setDSOLocal(true); 9009 } 9010 9011 if (GV->isDeclaration()) 9012 return; 9013 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 9014 if (!FD) 9015 return; 9016 9017 llvm::Function *F = cast<llvm::Function>(GV); 9018 9019 const auto *ReqdWGS = M.getLangOpts().OpenCL ? 9020 FD->getAttr<ReqdWorkGroupSizeAttr>() : nullptr; 9021 9022 9023 const bool IsOpenCLKernel = M.getLangOpts().OpenCL && 9024 FD->hasAttr<OpenCLKernelAttr>(); 9025 const bool IsHIPKernel = M.getLangOpts().HIP && 9026 FD->hasAttr<CUDAGlobalAttr>(); 9027 if ((IsOpenCLKernel || IsHIPKernel) && 9028 (M.getTriple().getOS() == llvm::Triple::AMDHSA)) 9029 F->addFnAttr("amdgpu-implicitarg-num-bytes", "56"); 9030 9031 if (IsHIPKernel) 9032 F->addFnAttr("uniform-work-group-size", "true"); 9033 9034 9035 const auto *FlatWGS = FD->getAttr<AMDGPUFlatWorkGroupSizeAttr>(); 9036 if (ReqdWGS || FlatWGS) { 9037 unsigned Min = 0; 9038 unsigned Max = 0; 9039 if (FlatWGS) { 9040 Min = FlatWGS->getMin() 9041 ->EvaluateKnownConstInt(M.getContext()) 9042 .getExtValue(); 9043 Max = FlatWGS->getMax() 9044 ->EvaluateKnownConstInt(M.getContext()) 9045 .getExtValue(); 9046 } 9047 if (ReqdWGS && Min == 0 && Max == 0) 9048 Min = Max = ReqdWGS->getXDim() * ReqdWGS->getYDim() * ReqdWGS->getZDim(); 9049 9050 if (Min != 0) { 9051 assert(Min <= Max && "Min must be less than or equal Max"); 9052 9053 std::string AttrVal = llvm::utostr(Min) + "," + llvm::utostr(Max); 9054 F->addFnAttr("amdgpu-flat-work-group-size", AttrVal); 9055 } else 9056 assert(Max == 0 && "Max must be zero"); 9057 } else if (IsOpenCLKernel || IsHIPKernel) { 9058 // By default, restrict the maximum size to a value specified by 9059 // --gpu-max-threads-per-block=n or its default value for HIP. 9060 const unsigned OpenCLDefaultMaxWorkGroupSize = 256; 9061 const unsigned DefaultMaxWorkGroupSize = 9062 IsOpenCLKernel ? OpenCLDefaultMaxWorkGroupSize 9063 : M.getLangOpts().GPUMaxThreadsPerBlock; 9064 std::string AttrVal = 9065 std::string("1,") + llvm::utostr(DefaultMaxWorkGroupSize); 9066 F->addFnAttr("amdgpu-flat-work-group-size", AttrVal); 9067 } 9068 9069 if (const auto *Attr = FD->getAttr<AMDGPUWavesPerEUAttr>()) { 9070 unsigned Min = 9071 Attr->getMin()->EvaluateKnownConstInt(M.getContext()).getExtValue(); 9072 unsigned Max = Attr->getMax() ? Attr->getMax() 9073 ->EvaluateKnownConstInt(M.getContext()) 9074 .getExtValue() 9075 : 0; 9076 9077 if (Min != 0) { 9078 assert((Max == 0 || Min <= Max) && "Min must be less than or equal Max"); 9079 9080 std::string AttrVal = llvm::utostr(Min); 9081 if (Max != 0) 9082 AttrVal = AttrVal + "," + llvm::utostr(Max); 9083 F->addFnAttr("amdgpu-waves-per-eu", AttrVal); 9084 } else 9085 assert(Max == 0 && "Max must be zero"); 9086 } 9087 9088 if (const auto *Attr = FD->getAttr<AMDGPUNumSGPRAttr>()) { 9089 unsigned NumSGPR = Attr->getNumSGPR(); 9090 9091 if (NumSGPR != 0) 9092 F->addFnAttr("amdgpu-num-sgpr", llvm::utostr(NumSGPR)); 9093 } 9094 9095 if (const auto *Attr = FD->getAttr<AMDGPUNumVGPRAttr>()) { 9096 uint32_t NumVGPR = Attr->getNumVGPR(); 9097 9098 if (NumVGPR != 0) 9099 F->addFnAttr("amdgpu-num-vgpr", llvm::utostr(NumVGPR)); 9100 } 9101 } 9102 9103 unsigned AMDGPUTargetCodeGenInfo::getOpenCLKernelCallingConv() const { 9104 return llvm::CallingConv::AMDGPU_KERNEL; 9105 } 9106 9107 // Currently LLVM assumes null pointers always have value 0, 9108 // which results in incorrectly transformed IR. Therefore, instead of 9109 // emitting null pointers in private and local address spaces, a null 9110 // pointer in generic address space is emitted which is casted to a 9111 // pointer in local or private address space. 9112 llvm::Constant *AMDGPUTargetCodeGenInfo::getNullPointer( 9113 const CodeGen::CodeGenModule &CGM, llvm::PointerType *PT, 9114 QualType QT) const { 9115 if (CGM.getContext().getTargetNullPointerValue(QT) == 0) 9116 return llvm::ConstantPointerNull::get(PT); 9117 9118 auto &Ctx = CGM.getContext(); 9119 auto NPT = llvm::PointerType::get(PT->getElementType(), 9120 Ctx.getTargetAddressSpace(LangAS::opencl_generic)); 9121 return llvm::ConstantExpr::getAddrSpaceCast( 9122 llvm::ConstantPointerNull::get(NPT), PT); 9123 } 9124 9125 LangAS 9126 AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM, 9127 const VarDecl *D) const { 9128 assert(!CGM.getLangOpts().OpenCL && 9129 !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) && 9130 "Address space agnostic languages only"); 9131 LangAS DefaultGlobalAS = getLangASFromTargetAS( 9132 CGM.getContext().getTargetAddressSpace(LangAS::opencl_global)); 9133 if (!D) 9134 return DefaultGlobalAS; 9135 9136 LangAS AddrSpace = D->getType().getAddressSpace(); 9137 assert(AddrSpace == LangAS::Default || isTargetAddressSpace(AddrSpace)); 9138 if (AddrSpace != LangAS::Default) 9139 return AddrSpace; 9140 9141 if (CGM.isTypeConstant(D->getType(), false)) { 9142 if (auto ConstAS = CGM.getTarget().getConstantAddressSpace()) 9143 return ConstAS.getValue(); 9144 } 9145 return DefaultGlobalAS; 9146 } 9147 9148 llvm::SyncScope::ID 9149 AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts, 9150 SyncScope Scope, 9151 llvm::AtomicOrdering Ordering, 9152 llvm::LLVMContext &Ctx) const { 9153 std::string Name; 9154 switch (Scope) { 9155 case SyncScope::OpenCLWorkGroup: 9156 Name = "workgroup"; 9157 break; 9158 case SyncScope::OpenCLDevice: 9159 Name = "agent"; 9160 break; 9161 case SyncScope::OpenCLAllSVMDevices: 9162 Name = ""; 9163 break; 9164 case SyncScope::OpenCLSubGroup: 9165 Name = "wavefront"; 9166 } 9167 9168 if (Ordering != llvm::AtomicOrdering::SequentiallyConsistent) { 9169 if (!Name.empty()) 9170 Name = Twine(Twine(Name) + Twine("-")).str(); 9171 9172 Name = Twine(Twine(Name) + Twine("one-as")).str(); 9173 } 9174 9175 return Ctx.getOrInsertSyncScopeID(Name); 9176 } 9177 9178 bool AMDGPUTargetCodeGenInfo::shouldEmitStaticExternCAliases() const { 9179 return false; 9180 } 9181 9182 void AMDGPUTargetCodeGenInfo::setCUDAKernelCallingConvention( 9183 const FunctionType *&FT) const { 9184 FT = getABIInfo().getContext().adjustFunctionType( 9185 FT, FT->getExtInfo().withCallingConv(CC_OpenCLKernel)); 9186 } 9187 9188 //===----------------------------------------------------------------------===// 9189 // SPARC v8 ABI Implementation. 9190 // Based on the SPARC Compliance Definition version 2.4.1. 9191 // 9192 // Ensures that complex values are passed in registers. 9193 // 9194 namespace { 9195 class SparcV8ABIInfo : public DefaultABIInfo { 9196 public: 9197 SparcV8ABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 9198 9199 private: 9200 ABIArgInfo classifyReturnType(QualType RetTy) const; 9201 void computeInfo(CGFunctionInfo &FI) const override; 9202 }; 9203 } // end anonymous namespace 9204 9205 9206 ABIArgInfo 9207 SparcV8ABIInfo::classifyReturnType(QualType Ty) const { 9208 if (Ty->isAnyComplexType()) { 9209 return ABIArgInfo::getDirect(); 9210 } 9211 else { 9212 return DefaultABIInfo::classifyReturnType(Ty); 9213 } 9214 } 9215 9216 void SparcV8ABIInfo::computeInfo(CGFunctionInfo &FI) const { 9217 9218 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 9219 for (auto &Arg : FI.arguments()) 9220 Arg.info = classifyArgumentType(Arg.type); 9221 } 9222 9223 namespace { 9224 class SparcV8TargetCodeGenInfo : public TargetCodeGenInfo { 9225 public: 9226 SparcV8TargetCodeGenInfo(CodeGenTypes &CGT) 9227 : TargetCodeGenInfo(std::make_unique<SparcV8ABIInfo>(CGT)) {} 9228 }; 9229 } // end anonymous namespace 9230 9231 //===----------------------------------------------------------------------===// 9232 // SPARC v9 ABI Implementation. 9233 // Based on the SPARC Compliance Definition version 2.4.1. 9234 // 9235 // Function arguments a mapped to a nominal "parameter array" and promoted to 9236 // registers depending on their type. Each argument occupies 8 or 16 bytes in 9237 // the array, structs larger than 16 bytes are passed indirectly. 9238 // 9239 // One case requires special care: 9240 // 9241 // struct mixed { 9242 // int i; 9243 // float f; 9244 // }; 9245 // 9246 // When a struct mixed is passed by value, it only occupies 8 bytes in the 9247 // parameter array, but the int is passed in an integer register, and the float 9248 // is passed in a floating point register. This is represented as two arguments 9249 // with the LLVM IR inreg attribute: 9250 // 9251 // declare void f(i32 inreg %i, float inreg %f) 9252 // 9253 // The code generator will only allocate 4 bytes from the parameter array for 9254 // the inreg arguments. All other arguments are allocated a multiple of 8 9255 // bytes. 9256 // 9257 namespace { 9258 class SparcV9ABIInfo : public ABIInfo { 9259 public: 9260 SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 9261 9262 private: 9263 ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const; 9264 void computeInfo(CGFunctionInfo &FI) const override; 9265 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 9266 QualType Ty) const override; 9267 9268 // Coercion type builder for structs passed in registers. The coercion type 9269 // serves two purposes: 9270 // 9271 // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned' 9272 // in registers. 9273 // 2. Expose aligned floating point elements as first-level elements, so the 9274 // code generator knows to pass them in floating point registers. 9275 // 9276 // We also compute the InReg flag which indicates that the struct contains 9277 // aligned 32-bit floats. 9278 // 9279 struct CoerceBuilder { 9280 llvm::LLVMContext &Context; 9281 const llvm::DataLayout &DL; 9282 SmallVector<llvm::Type*, 8> Elems; 9283 uint64_t Size; 9284 bool InReg; 9285 9286 CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl) 9287 : Context(c), DL(dl), Size(0), InReg(false) {} 9288 9289 // Pad Elems with integers until Size is ToSize. 9290 void pad(uint64_t ToSize) { 9291 assert(ToSize >= Size && "Cannot remove elements"); 9292 if (ToSize == Size) 9293 return; 9294 9295 // Finish the current 64-bit word. 9296 uint64_t Aligned = llvm::alignTo(Size, 64); 9297 if (Aligned > Size && Aligned <= ToSize) { 9298 Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size)); 9299 Size = Aligned; 9300 } 9301 9302 // Add whole 64-bit words. 9303 while (Size + 64 <= ToSize) { 9304 Elems.push_back(llvm::Type::getInt64Ty(Context)); 9305 Size += 64; 9306 } 9307 9308 // Final in-word padding. 9309 if (Size < ToSize) { 9310 Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size)); 9311 Size = ToSize; 9312 } 9313 } 9314 9315 // Add a floating point element at Offset. 9316 void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) { 9317 // Unaligned floats are treated as integers. 9318 if (Offset % Bits) 9319 return; 9320 // The InReg flag is only required if there are any floats < 64 bits. 9321 if (Bits < 64) 9322 InReg = true; 9323 pad(Offset); 9324 Elems.push_back(Ty); 9325 Size = Offset + Bits; 9326 } 9327 9328 // Add a struct type to the coercion type, starting at Offset (in bits). 9329 void addStruct(uint64_t Offset, llvm::StructType *StrTy) { 9330 const llvm::StructLayout *Layout = DL.getStructLayout(StrTy); 9331 for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) { 9332 llvm::Type *ElemTy = StrTy->getElementType(i); 9333 uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i); 9334 switch (ElemTy->getTypeID()) { 9335 case llvm::Type::StructTyID: 9336 addStruct(ElemOffset, cast<llvm::StructType>(ElemTy)); 9337 break; 9338 case llvm::Type::FloatTyID: 9339 addFloat(ElemOffset, ElemTy, 32); 9340 break; 9341 case llvm::Type::DoubleTyID: 9342 addFloat(ElemOffset, ElemTy, 64); 9343 break; 9344 case llvm::Type::FP128TyID: 9345 addFloat(ElemOffset, ElemTy, 128); 9346 break; 9347 case llvm::Type::PointerTyID: 9348 if (ElemOffset % 64 == 0) { 9349 pad(ElemOffset); 9350 Elems.push_back(ElemTy); 9351 Size += 64; 9352 } 9353 break; 9354 default: 9355 break; 9356 } 9357 } 9358 } 9359 9360 // Check if Ty is a usable substitute for the coercion type. 9361 bool isUsableType(llvm::StructType *Ty) const { 9362 return llvm::makeArrayRef(Elems) == Ty->elements(); 9363 } 9364 9365 // Get the coercion type as a literal struct type. 9366 llvm::Type *getType() const { 9367 if (Elems.size() == 1) 9368 return Elems.front(); 9369 else 9370 return llvm::StructType::get(Context, Elems); 9371 } 9372 }; 9373 }; 9374 } // end anonymous namespace 9375 9376 ABIArgInfo 9377 SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const { 9378 if (Ty->isVoidType()) 9379 return ABIArgInfo::getIgnore(); 9380 9381 uint64_t Size = getContext().getTypeSize(Ty); 9382 9383 // Anything too big to fit in registers is passed with an explicit indirect 9384 // pointer / sret pointer. 9385 if (Size > SizeLimit) 9386 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 9387 9388 // Treat an enum type as its underlying type. 9389 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 9390 Ty = EnumTy->getDecl()->getIntegerType(); 9391 9392 // Integer types smaller than a register are extended. 9393 if (Size < 64 && Ty->isIntegerType()) 9394 return ABIArgInfo::getExtend(Ty); 9395 9396 if (const auto *EIT = Ty->getAs<ExtIntType>()) 9397 if (EIT->getNumBits() < 64) 9398 return ABIArgInfo::getExtend(Ty); 9399 9400 // Other non-aggregates go in registers. 9401 if (!isAggregateTypeForABI(Ty)) 9402 return ABIArgInfo::getDirect(); 9403 9404 // If a C++ object has either a non-trivial copy constructor or a non-trivial 9405 // destructor, it is passed with an explicit indirect pointer / sret pointer. 9406 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 9407 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 9408 9409 // This is a small aggregate type that should be passed in registers. 9410 // Build a coercion type from the LLVM struct type. 9411 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty)); 9412 if (!StrTy) 9413 return ABIArgInfo::getDirect(); 9414 9415 CoerceBuilder CB(getVMContext(), getDataLayout()); 9416 CB.addStruct(0, StrTy); 9417 CB.pad(llvm::alignTo(CB.DL.getTypeSizeInBits(StrTy), 64)); 9418 9419 // Try to use the original type for coercion. 9420 llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType(); 9421 9422 if (CB.InReg) 9423 return ABIArgInfo::getDirectInReg(CoerceTy); 9424 else 9425 return ABIArgInfo::getDirect(CoerceTy); 9426 } 9427 9428 Address SparcV9ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 9429 QualType Ty) const { 9430 ABIArgInfo AI = classifyType(Ty, 16 * 8); 9431 llvm::Type *ArgTy = CGT.ConvertType(Ty); 9432 if (AI.canHaveCoerceToType() && !AI.getCoerceToType()) 9433 AI.setCoerceToType(ArgTy); 9434 9435 CharUnits SlotSize = CharUnits::fromQuantity(8); 9436 9437 CGBuilderTy &Builder = CGF.Builder; 9438 Address Addr(Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize); 9439 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy); 9440 9441 auto TypeInfo = getContext().getTypeInfoInChars(Ty); 9442 9443 Address ArgAddr = Address::invalid(); 9444 CharUnits Stride; 9445 switch (AI.getKind()) { 9446 case ABIArgInfo::Expand: 9447 case ABIArgInfo::CoerceAndExpand: 9448 case ABIArgInfo::InAlloca: 9449 llvm_unreachable("Unsupported ABI kind for va_arg"); 9450 9451 case ABIArgInfo::Extend: { 9452 Stride = SlotSize; 9453 CharUnits Offset = SlotSize - TypeInfo.first; 9454 ArgAddr = Builder.CreateConstInBoundsByteGEP(Addr, Offset, "extend"); 9455 break; 9456 } 9457 9458 case ABIArgInfo::Direct: { 9459 auto AllocSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType()); 9460 Stride = CharUnits::fromQuantity(AllocSize).alignTo(SlotSize); 9461 ArgAddr = Addr; 9462 break; 9463 } 9464 9465 case ABIArgInfo::Indirect: 9466 case ABIArgInfo::IndirectAliased: 9467 Stride = SlotSize; 9468 ArgAddr = Builder.CreateElementBitCast(Addr, ArgPtrTy, "indirect"); 9469 ArgAddr = Address(Builder.CreateLoad(ArgAddr, "indirect.arg"), 9470 TypeInfo.second); 9471 break; 9472 9473 case ABIArgInfo::Ignore: 9474 return Address(llvm::UndefValue::get(ArgPtrTy), TypeInfo.second); 9475 } 9476 9477 // Update VAList. 9478 Address NextPtr = Builder.CreateConstInBoundsByteGEP(Addr, Stride, "ap.next"); 9479 Builder.CreateStore(NextPtr.getPointer(), VAListAddr); 9480 9481 return Builder.CreateBitCast(ArgAddr, ArgPtrTy, "arg.addr"); 9482 } 9483 9484 void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const { 9485 FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8); 9486 for (auto &I : FI.arguments()) 9487 I.info = classifyType(I.type, 16 * 8); 9488 } 9489 9490 namespace { 9491 class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo { 9492 public: 9493 SparcV9TargetCodeGenInfo(CodeGenTypes &CGT) 9494 : TargetCodeGenInfo(std::make_unique<SparcV9ABIInfo>(CGT)) {} 9495 9496 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 9497 return 14; 9498 } 9499 9500 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 9501 llvm::Value *Address) const override; 9502 }; 9503 } // end anonymous namespace 9504 9505 bool 9506 SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 9507 llvm::Value *Address) const { 9508 // This is calculated from the LLVM and GCC tables and verified 9509 // against gcc output. AFAIK all ABIs use the same encoding. 9510 9511 CodeGen::CGBuilderTy &Builder = CGF.Builder; 9512 9513 llvm::IntegerType *i8 = CGF.Int8Ty; 9514 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 9515 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 9516 9517 // 0-31: the 8-byte general-purpose registers 9518 AssignToArrayRange(Builder, Address, Eight8, 0, 31); 9519 9520 // 32-63: f0-31, the 4-byte floating-point registers 9521 AssignToArrayRange(Builder, Address, Four8, 32, 63); 9522 9523 // Y = 64 9524 // PSR = 65 9525 // WIM = 66 9526 // TBR = 67 9527 // PC = 68 9528 // NPC = 69 9529 // FSR = 70 9530 // CSR = 71 9531 AssignToArrayRange(Builder, Address, Eight8, 64, 71); 9532 9533 // 72-87: d0-15, the 8-byte floating-point registers 9534 AssignToArrayRange(Builder, Address, Eight8, 72, 87); 9535 9536 return false; 9537 } 9538 9539 // ARC ABI implementation. 9540 namespace { 9541 9542 class ARCABIInfo : public DefaultABIInfo { 9543 public: 9544 using DefaultABIInfo::DefaultABIInfo; 9545 9546 private: 9547 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 9548 QualType Ty) const override; 9549 9550 void updateState(const ABIArgInfo &Info, QualType Ty, CCState &State) const { 9551 if (!State.FreeRegs) 9552 return; 9553 if (Info.isIndirect() && Info.getInReg()) 9554 State.FreeRegs--; 9555 else if (Info.isDirect() && Info.getInReg()) { 9556 unsigned sz = (getContext().getTypeSize(Ty) + 31) / 32; 9557 if (sz < State.FreeRegs) 9558 State.FreeRegs -= sz; 9559 else 9560 State.FreeRegs = 0; 9561 } 9562 } 9563 9564 void computeInfo(CGFunctionInfo &FI) const override { 9565 CCState State(FI); 9566 // ARC uses 8 registers to pass arguments. 9567 State.FreeRegs = 8; 9568 9569 if (!getCXXABI().classifyReturnType(FI)) 9570 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 9571 updateState(FI.getReturnInfo(), FI.getReturnType(), State); 9572 for (auto &I : FI.arguments()) { 9573 I.info = classifyArgumentType(I.type, State.FreeRegs); 9574 updateState(I.info, I.type, State); 9575 } 9576 } 9577 9578 ABIArgInfo getIndirectByRef(QualType Ty, bool HasFreeRegs) const; 9579 ABIArgInfo getIndirectByValue(QualType Ty) const; 9580 ABIArgInfo classifyArgumentType(QualType Ty, uint8_t FreeRegs) const; 9581 ABIArgInfo classifyReturnType(QualType RetTy) const; 9582 }; 9583 9584 class ARCTargetCodeGenInfo : public TargetCodeGenInfo { 9585 public: 9586 ARCTargetCodeGenInfo(CodeGenTypes &CGT) 9587 : TargetCodeGenInfo(std::make_unique<ARCABIInfo>(CGT)) {} 9588 }; 9589 9590 9591 ABIArgInfo ARCABIInfo::getIndirectByRef(QualType Ty, bool HasFreeRegs) const { 9592 return HasFreeRegs ? getNaturalAlignIndirectInReg(Ty) : 9593 getNaturalAlignIndirect(Ty, false); 9594 } 9595 9596 ABIArgInfo ARCABIInfo::getIndirectByValue(QualType Ty) const { 9597 // Compute the byval alignment. 9598 const unsigned MinABIStackAlignInBytes = 4; 9599 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; 9600 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true, 9601 TypeAlign > MinABIStackAlignInBytes); 9602 } 9603 9604 Address ARCABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 9605 QualType Ty) const { 9606 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false, 9607 getContext().getTypeInfoInChars(Ty), 9608 CharUnits::fromQuantity(4), true); 9609 } 9610 9611 ABIArgInfo ARCABIInfo::classifyArgumentType(QualType Ty, 9612 uint8_t FreeRegs) const { 9613 // Handle the generic C++ ABI. 9614 const RecordType *RT = Ty->getAs<RecordType>(); 9615 if (RT) { 9616 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()); 9617 if (RAA == CGCXXABI::RAA_Indirect) 9618 return getIndirectByRef(Ty, FreeRegs > 0); 9619 9620 if (RAA == CGCXXABI::RAA_DirectInMemory) 9621 return getIndirectByValue(Ty); 9622 } 9623 9624 // Treat an enum type as its underlying type. 9625 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 9626 Ty = EnumTy->getDecl()->getIntegerType(); 9627 9628 auto SizeInRegs = llvm::alignTo(getContext().getTypeSize(Ty), 32) / 32; 9629 9630 if (isAggregateTypeForABI(Ty)) { 9631 // Structures with flexible arrays are always indirect. 9632 if (RT && RT->getDecl()->hasFlexibleArrayMember()) 9633 return getIndirectByValue(Ty); 9634 9635 // Ignore empty structs/unions. 9636 if (isEmptyRecord(getContext(), Ty, true)) 9637 return ABIArgInfo::getIgnore(); 9638 9639 llvm::LLVMContext &LLVMContext = getVMContext(); 9640 9641 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext); 9642 SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32); 9643 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements); 9644 9645 return FreeRegs >= SizeInRegs ? 9646 ABIArgInfo::getDirectInReg(Result) : 9647 ABIArgInfo::getDirect(Result, 0, nullptr, false); 9648 } 9649 9650 if (const auto *EIT = Ty->getAs<ExtIntType>()) 9651 if (EIT->getNumBits() > 64) 9652 return getIndirectByValue(Ty); 9653 9654 return isPromotableIntegerTypeForABI(Ty) 9655 ? (FreeRegs >= SizeInRegs ? ABIArgInfo::getExtendInReg(Ty) 9656 : ABIArgInfo::getExtend(Ty)) 9657 : (FreeRegs >= SizeInRegs ? ABIArgInfo::getDirectInReg() 9658 : ABIArgInfo::getDirect()); 9659 } 9660 9661 ABIArgInfo ARCABIInfo::classifyReturnType(QualType RetTy) const { 9662 if (RetTy->isAnyComplexType()) 9663 return ABIArgInfo::getDirectInReg(); 9664 9665 // Arguments of size > 4 registers are indirect. 9666 auto RetSize = llvm::alignTo(getContext().getTypeSize(RetTy), 32) / 32; 9667 if (RetSize > 4) 9668 return getIndirectByRef(RetTy, /*HasFreeRegs*/ true); 9669 9670 return DefaultABIInfo::classifyReturnType(RetTy); 9671 } 9672 9673 } // End anonymous namespace. 9674 9675 //===----------------------------------------------------------------------===// 9676 // XCore ABI Implementation 9677 //===----------------------------------------------------------------------===// 9678 9679 namespace { 9680 9681 /// A SmallStringEnc instance is used to build up the TypeString by passing 9682 /// it by reference between functions that append to it. 9683 typedef llvm::SmallString<128> SmallStringEnc; 9684 9685 /// TypeStringCache caches the meta encodings of Types. 9686 /// 9687 /// The reason for caching TypeStrings is two fold: 9688 /// 1. To cache a type's encoding for later uses; 9689 /// 2. As a means to break recursive member type inclusion. 9690 /// 9691 /// A cache Entry can have a Status of: 9692 /// NonRecursive: The type encoding is not recursive; 9693 /// Recursive: The type encoding is recursive; 9694 /// Incomplete: An incomplete TypeString; 9695 /// IncompleteUsed: An incomplete TypeString that has been used in a 9696 /// Recursive type encoding. 9697 /// 9698 /// A NonRecursive entry will have all of its sub-members expanded as fully 9699 /// as possible. Whilst it may contain types which are recursive, the type 9700 /// itself is not recursive and thus its encoding may be safely used whenever 9701 /// the type is encountered. 9702 /// 9703 /// A Recursive entry will have all of its sub-members expanded as fully as 9704 /// possible. The type itself is recursive and it may contain other types which 9705 /// are recursive. The Recursive encoding must not be used during the expansion 9706 /// of a recursive type's recursive branch. For simplicity the code uses 9707 /// IncompleteCount to reject all usage of Recursive encodings for member types. 9708 /// 9709 /// An Incomplete entry is always a RecordType and only encodes its 9710 /// identifier e.g. "s(S){}". Incomplete 'StubEnc' entries are ephemeral and 9711 /// are placed into the cache during type expansion as a means to identify and 9712 /// handle recursive inclusion of types as sub-members. If there is recursion 9713 /// the entry becomes IncompleteUsed. 9714 /// 9715 /// During the expansion of a RecordType's members: 9716 /// 9717 /// If the cache contains a NonRecursive encoding for the member type, the 9718 /// cached encoding is used; 9719 /// 9720 /// If the cache contains a Recursive encoding for the member type, the 9721 /// cached encoding is 'Swapped' out, as it may be incorrect, and... 9722 /// 9723 /// If the member is a RecordType, an Incomplete encoding is placed into the 9724 /// cache to break potential recursive inclusion of itself as a sub-member; 9725 /// 9726 /// Once a member RecordType has been expanded, its temporary incomplete 9727 /// entry is removed from the cache. If a Recursive encoding was swapped out 9728 /// it is swapped back in; 9729 /// 9730 /// If an incomplete entry is used to expand a sub-member, the incomplete 9731 /// entry is marked as IncompleteUsed. The cache keeps count of how many 9732 /// IncompleteUsed entries it currently contains in IncompleteUsedCount; 9733 /// 9734 /// If a member's encoding is found to be a NonRecursive or Recursive viz: 9735 /// IncompleteUsedCount==0, the member's encoding is added to the cache. 9736 /// Else the member is part of a recursive type and thus the recursion has 9737 /// been exited too soon for the encoding to be correct for the member. 9738 /// 9739 class TypeStringCache { 9740 enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed}; 9741 struct Entry { 9742 std::string Str; // The encoded TypeString for the type. 9743 enum Status State; // Information about the encoding in 'Str'. 9744 std::string Swapped; // A temporary place holder for a Recursive encoding 9745 // during the expansion of RecordType's members. 9746 }; 9747 std::map<const IdentifierInfo *, struct Entry> Map; 9748 unsigned IncompleteCount; // Number of Incomplete entries in the Map. 9749 unsigned IncompleteUsedCount; // Number of IncompleteUsed entries in the Map. 9750 public: 9751 TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {} 9752 void addIncomplete(const IdentifierInfo *ID, std::string StubEnc); 9753 bool removeIncomplete(const IdentifierInfo *ID); 9754 void addIfComplete(const IdentifierInfo *ID, StringRef Str, 9755 bool IsRecursive); 9756 StringRef lookupStr(const IdentifierInfo *ID); 9757 }; 9758 9759 /// TypeString encodings for enum & union fields must be order. 9760 /// FieldEncoding is a helper for this ordering process. 9761 class FieldEncoding { 9762 bool HasName; 9763 std::string Enc; 9764 public: 9765 FieldEncoding(bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {} 9766 StringRef str() { return Enc; } 9767 bool operator<(const FieldEncoding &rhs) const { 9768 if (HasName != rhs.HasName) return HasName; 9769 return Enc < rhs.Enc; 9770 } 9771 }; 9772 9773 class XCoreABIInfo : public DefaultABIInfo { 9774 public: 9775 XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 9776 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 9777 QualType Ty) const override; 9778 }; 9779 9780 class XCoreTargetCodeGenInfo : public TargetCodeGenInfo { 9781 mutable TypeStringCache TSC; 9782 void emitTargetMD(const Decl *D, llvm::GlobalValue *GV, 9783 const CodeGen::CodeGenModule &M) const; 9784 9785 public: 9786 XCoreTargetCodeGenInfo(CodeGenTypes &CGT) 9787 : TargetCodeGenInfo(std::make_unique<XCoreABIInfo>(CGT)) {} 9788 void emitTargetMetadata(CodeGen::CodeGenModule &CGM, 9789 const llvm::MapVector<GlobalDecl, StringRef> 9790 &MangledDeclNames) const override; 9791 }; 9792 9793 } // End anonymous namespace. 9794 9795 // TODO: this implementation is likely now redundant with the default 9796 // EmitVAArg. 9797 Address XCoreABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 9798 QualType Ty) const { 9799 CGBuilderTy &Builder = CGF.Builder; 9800 9801 // Get the VAList. 9802 CharUnits SlotSize = CharUnits::fromQuantity(4); 9803 Address AP(Builder.CreateLoad(VAListAddr), SlotSize); 9804 9805 // Handle the argument. 9806 ABIArgInfo AI = classifyArgumentType(Ty); 9807 CharUnits TypeAlign = getContext().getTypeAlignInChars(Ty); 9808 llvm::Type *ArgTy = CGT.ConvertType(Ty); 9809 if (AI.canHaveCoerceToType() && !AI.getCoerceToType()) 9810 AI.setCoerceToType(ArgTy); 9811 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy); 9812 9813 Address Val = Address::invalid(); 9814 CharUnits ArgSize = CharUnits::Zero(); 9815 switch (AI.getKind()) { 9816 case ABIArgInfo::Expand: 9817 case ABIArgInfo::CoerceAndExpand: 9818 case ABIArgInfo::InAlloca: 9819 llvm_unreachable("Unsupported ABI kind for va_arg"); 9820 case ABIArgInfo::Ignore: 9821 Val = Address(llvm::UndefValue::get(ArgPtrTy), TypeAlign); 9822 ArgSize = CharUnits::Zero(); 9823 break; 9824 case ABIArgInfo::Extend: 9825 case ABIArgInfo::Direct: 9826 Val = Builder.CreateBitCast(AP, ArgPtrTy); 9827 ArgSize = CharUnits::fromQuantity( 9828 getDataLayout().getTypeAllocSize(AI.getCoerceToType())); 9829 ArgSize = ArgSize.alignTo(SlotSize); 9830 break; 9831 case ABIArgInfo::Indirect: 9832 case ABIArgInfo::IndirectAliased: 9833 Val = Builder.CreateElementBitCast(AP, ArgPtrTy); 9834 Val = Address(Builder.CreateLoad(Val), TypeAlign); 9835 ArgSize = SlotSize; 9836 break; 9837 } 9838 9839 // Increment the VAList. 9840 if (!ArgSize.isZero()) { 9841 Address APN = Builder.CreateConstInBoundsByteGEP(AP, ArgSize); 9842 Builder.CreateStore(APN.getPointer(), VAListAddr); 9843 } 9844 9845 return Val; 9846 } 9847 9848 /// During the expansion of a RecordType, an incomplete TypeString is placed 9849 /// into the cache as a means to identify and break recursion. 9850 /// If there is a Recursive encoding in the cache, it is swapped out and will 9851 /// be reinserted by removeIncomplete(). 9852 /// All other types of encoding should have been used rather than arriving here. 9853 void TypeStringCache::addIncomplete(const IdentifierInfo *ID, 9854 std::string StubEnc) { 9855 if (!ID) 9856 return; 9857 Entry &E = Map[ID]; 9858 assert( (E.Str.empty() || E.State == Recursive) && 9859 "Incorrectly use of addIncomplete"); 9860 assert(!StubEnc.empty() && "Passing an empty string to addIncomplete()"); 9861 E.Swapped.swap(E.Str); // swap out the Recursive 9862 E.Str.swap(StubEnc); 9863 E.State = Incomplete; 9864 ++IncompleteCount; 9865 } 9866 9867 /// Once the RecordType has been expanded, the temporary incomplete TypeString 9868 /// must be removed from the cache. 9869 /// If a Recursive was swapped out by addIncomplete(), it will be replaced. 9870 /// Returns true if the RecordType was defined recursively. 9871 bool TypeStringCache::removeIncomplete(const IdentifierInfo *ID) { 9872 if (!ID) 9873 return false; 9874 auto I = Map.find(ID); 9875 assert(I != Map.end() && "Entry not present"); 9876 Entry &E = I->second; 9877 assert( (E.State == Incomplete || 9878 E.State == IncompleteUsed) && 9879 "Entry must be an incomplete type"); 9880 bool IsRecursive = false; 9881 if (E.State == IncompleteUsed) { 9882 // We made use of our Incomplete encoding, thus we are recursive. 9883 IsRecursive = true; 9884 --IncompleteUsedCount; 9885 } 9886 if (E.Swapped.empty()) 9887 Map.erase(I); 9888 else { 9889 // Swap the Recursive back. 9890 E.Swapped.swap(E.Str); 9891 E.Swapped.clear(); 9892 E.State = Recursive; 9893 } 9894 --IncompleteCount; 9895 return IsRecursive; 9896 } 9897 9898 /// Add the encoded TypeString to the cache only if it is NonRecursive or 9899 /// Recursive (viz: all sub-members were expanded as fully as possible). 9900 void TypeStringCache::addIfComplete(const IdentifierInfo *ID, StringRef Str, 9901 bool IsRecursive) { 9902 if (!ID || IncompleteUsedCount) 9903 return; // No key or it is is an incomplete sub-type so don't add. 9904 Entry &E = Map[ID]; 9905 if (IsRecursive && !E.Str.empty()) { 9906 assert(E.State==Recursive && E.Str.size() == Str.size() && 9907 "This is not the same Recursive entry"); 9908 // The parent container was not recursive after all, so we could have used 9909 // this Recursive sub-member entry after all, but we assumed the worse when 9910 // we started viz: IncompleteCount!=0. 9911 return; 9912 } 9913 assert(E.Str.empty() && "Entry already present"); 9914 E.Str = Str.str(); 9915 E.State = IsRecursive? Recursive : NonRecursive; 9916 } 9917 9918 /// Return a cached TypeString encoding for the ID. If there isn't one, or we 9919 /// are recursively expanding a type (IncompleteCount != 0) and the cached 9920 /// encoding is Recursive, return an empty StringRef. 9921 StringRef TypeStringCache::lookupStr(const IdentifierInfo *ID) { 9922 if (!ID) 9923 return StringRef(); // We have no key. 9924 auto I = Map.find(ID); 9925 if (I == Map.end()) 9926 return StringRef(); // We have no encoding. 9927 Entry &E = I->second; 9928 if (E.State == Recursive && IncompleteCount) 9929 return StringRef(); // We don't use Recursive encodings for member types. 9930 9931 if (E.State == Incomplete) { 9932 // The incomplete type is being used to break out of recursion. 9933 E.State = IncompleteUsed; 9934 ++IncompleteUsedCount; 9935 } 9936 return E.Str; 9937 } 9938 9939 /// The XCore ABI includes a type information section that communicates symbol 9940 /// type information to the linker. The linker uses this information to verify 9941 /// safety/correctness of things such as array bound and pointers et al. 9942 /// The ABI only requires C (and XC) language modules to emit TypeStrings. 9943 /// This type information (TypeString) is emitted into meta data for all global 9944 /// symbols: definitions, declarations, functions & variables. 9945 /// 9946 /// The TypeString carries type, qualifier, name, size & value details. 9947 /// Please see 'Tools Development Guide' section 2.16.2 for format details: 9948 /// https://www.xmos.com/download/public/Tools-Development-Guide%28X9114A%29.pdf 9949 /// The output is tested by test/CodeGen/xcore-stringtype.c. 9950 /// 9951 static bool getTypeString(SmallStringEnc &Enc, const Decl *D, 9952 const CodeGen::CodeGenModule &CGM, 9953 TypeStringCache &TSC); 9954 9955 /// XCore uses emitTargetMD to emit TypeString metadata for global symbols. 9956 void XCoreTargetCodeGenInfo::emitTargetMD( 9957 const Decl *D, llvm::GlobalValue *GV, 9958 const CodeGen::CodeGenModule &CGM) const { 9959 SmallStringEnc Enc; 9960 if (getTypeString(Enc, D, CGM, TSC)) { 9961 llvm::LLVMContext &Ctx = CGM.getModule().getContext(); 9962 llvm::Metadata *MDVals[] = {llvm::ConstantAsMetadata::get(GV), 9963 llvm::MDString::get(Ctx, Enc.str())}; 9964 llvm::NamedMDNode *MD = 9965 CGM.getModule().getOrInsertNamedMetadata("xcore.typestrings"); 9966 MD->addOperand(llvm::MDNode::get(Ctx, MDVals)); 9967 } 9968 } 9969 9970 void XCoreTargetCodeGenInfo::emitTargetMetadata( 9971 CodeGen::CodeGenModule &CGM, 9972 const llvm::MapVector<GlobalDecl, StringRef> &MangledDeclNames) const { 9973 // Warning, new MangledDeclNames may be appended within this loop. 9974 // We rely on MapVector insertions adding new elements to the end 9975 // of the container. 9976 for (unsigned I = 0; I != MangledDeclNames.size(); ++I) { 9977 auto Val = *(MangledDeclNames.begin() + I); 9978 llvm::GlobalValue *GV = CGM.GetGlobalValue(Val.second); 9979 if (GV) { 9980 const Decl *D = Val.first.getDecl()->getMostRecentDecl(); 9981 emitTargetMD(D, GV, CGM); 9982 } 9983 } 9984 } 9985 //===----------------------------------------------------------------------===// 9986 // SPIR ABI Implementation 9987 //===----------------------------------------------------------------------===// 9988 9989 namespace { 9990 class SPIRTargetCodeGenInfo : public TargetCodeGenInfo { 9991 public: 9992 SPIRTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 9993 : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {} 9994 unsigned getOpenCLKernelCallingConv() const override; 9995 }; 9996 9997 } // End anonymous namespace. 9998 9999 namespace clang { 10000 namespace CodeGen { 10001 void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI) { 10002 DefaultABIInfo SPIRABI(CGM.getTypes()); 10003 SPIRABI.computeInfo(FI); 10004 } 10005 } 10006 } 10007 10008 unsigned SPIRTargetCodeGenInfo::getOpenCLKernelCallingConv() const { 10009 return llvm::CallingConv::SPIR_KERNEL; 10010 } 10011 10012 static bool appendType(SmallStringEnc &Enc, QualType QType, 10013 const CodeGen::CodeGenModule &CGM, 10014 TypeStringCache &TSC); 10015 10016 /// Helper function for appendRecordType(). 10017 /// Builds a SmallVector containing the encoded field types in declaration 10018 /// order. 10019 static bool extractFieldType(SmallVectorImpl<FieldEncoding> &FE, 10020 const RecordDecl *RD, 10021 const CodeGen::CodeGenModule &CGM, 10022 TypeStringCache &TSC) { 10023 for (const auto *Field : RD->fields()) { 10024 SmallStringEnc Enc; 10025 Enc += "m("; 10026 Enc += Field->getName(); 10027 Enc += "){"; 10028 if (Field->isBitField()) { 10029 Enc += "b("; 10030 llvm::raw_svector_ostream OS(Enc); 10031 OS << Field->getBitWidthValue(CGM.getContext()); 10032 Enc += ':'; 10033 } 10034 if (!appendType(Enc, Field->getType(), CGM, TSC)) 10035 return false; 10036 if (Field->isBitField()) 10037 Enc += ')'; 10038 Enc += '}'; 10039 FE.emplace_back(!Field->getName().empty(), Enc); 10040 } 10041 return true; 10042 } 10043 10044 /// Appends structure and union types to Enc and adds encoding to cache. 10045 /// Recursively calls appendType (via extractFieldType) for each field. 10046 /// Union types have their fields ordered according to the ABI. 10047 static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT, 10048 const CodeGen::CodeGenModule &CGM, 10049 TypeStringCache &TSC, const IdentifierInfo *ID) { 10050 // Append the cached TypeString if we have one. 10051 StringRef TypeString = TSC.lookupStr(ID); 10052 if (!TypeString.empty()) { 10053 Enc += TypeString; 10054 return true; 10055 } 10056 10057 // Start to emit an incomplete TypeString. 10058 size_t Start = Enc.size(); 10059 Enc += (RT->isUnionType()? 'u' : 's'); 10060 Enc += '('; 10061 if (ID) 10062 Enc += ID->getName(); 10063 Enc += "){"; 10064 10065 // We collect all encoded fields and order as necessary. 10066 bool IsRecursive = false; 10067 const RecordDecl *RD = RT->getDecl()->getDefinition(); 10068 if (RD && !RD->field_empty()) { 10069 // An incomplete TypeString stub is placed in the cache for this RecordType 10070 // so that recursive calls to this RecordType will use it whilst building a 10071 // complete TypeString for this RecordType. 10072 SmallVector<FieldEncoding, 16> FE; 10073 std::string StubEnc(Enc.substr(Start).str()); 10074 StubEnc += '}'; // StubEnc now holds a valid incomplete TypeString. 10075 TSC.addIncomplete(ID, std::move(StubEnc)); 10076 if (!extractFieldType(FE, RD, CGM, TSC)) { 10077 (void) TSC.removeIncomplete(ID); 10078 return false; 10079 } 10080 IsRecursive = TSC.removeIncomplete(ID); 10081 // The ABI requires unions to be sorted but not structures. 10082 // See FieldEncoding::operator< for sort algorithm. 10083 if (RT->isUnionType()) 10084 llvm::sort(FE); 10085 // We can now complete the TypeString. 10086 unsigned E = FE.size(); 10087 for (unsigned I = 0; I != E; ++I) { 10088 if (I) 10089 Enc += ','; 10090 Enc += FE[I].str(); 10091 } 10092 } 10093 Enc += '}'; 10094 TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive); 10095 return true; 10096 } 10097 10098 /// Appends enum types to Enc and adds the encoding to the cache. 10099 static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET, 10100 TypeStringCache &TSC, 10101 const IdentifierInfo *ID) { 10102 // Append the cached TypeString if we have one. 10103 StringRef TypeString = TSC.lookupStr(ID); 10104 if (!TypeString.empty()) { 10105 Enc += TypeString; 10106 return true; 10107 } 10108 10109 size_t Start = Enc.size(); 10110 Enc += "e("; 10111 if (ID) 10112 Enc += ID->getName(); 10113 Enc += "){"; 10114 10115 // We collect all encoded enumerations and order them alphanumerically. 10116 if (const EnumDecl *ED = ET->getDecl()->getDefinition()) { 10117 SmallVector<FieldEncoding, 16> FE; 10118 for (auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E; 10119 ++I) { 10120 SmallStringEnc EnumEnc; 10121 EnumEnc += "m("; 10122 EnumEnc += I->getName(); 10123 EnumEnc += "){"; 10124 I->getInitVal().toString(EnumEnc); 10125 EnumEnc += '}'; 10126 FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc)); 10127 } 10128 llvm::sort(FE); 10129 unsigned E = FE.size(); 10130 for (unsigned I = 0; I != E; ++I) { 10131 if (I) 10132 Enc += ','; 10133 Enc += FE[I].str(); 10134 } 10135 } 10136 Enc += '}'; 10137 TSC.addIfComplete(ID, Enc.substr(Start), false); 10138 return true; 10139 } 10140 10141 /// Appends type's qualifier to Enc. 10142 /// This is done prior to appending the type's encoding. 10143 static void appendQualifier(SmallStringEnc &Enc, QualType QT) { 10144 // Qualifiers are emitted in alphabetical order. 10145 static const char *const Table[]={"","c:","r:","cr:","v:","cv:","rv:","crv:"}; 10146 int Lookup = 0; 10147 if (QT.isConstQualified()) 10148 Lookup += 1<<0; 10149 if (QT.isRestrictQualified()) 10150 Lookup += 1<<1; 10151 if (QT.isVolatileQualified()) 10152 Lookup += 1<<2; 10153 Enc += Table[Lookup]; 10154 } 10155 10156 /// Appends built-in types to Enc. 10157 static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT) { 10158 const char *EncType; 10159 switch (BT->getKind()) { 10160 case BuiltinType::Void: 10161 EncType = "0"; 10162 break; 10163 case BuiltinType::Bool: 10164 EncType = "b"; 10165 break; 10166 case BuiltinType::Char_U: 10167 EncType = "uc"; 10168 break; 10169 case BuiltinType::UChar: 10170 EncType = "uc"; 10171 break; 10172 case BuiltinType::SChar: 10173 EncType = "sc"; 10174 break; 10175 case BuiltinType::UShort: 10176 EncType = "us"; 10177 break; 10178 case BuiltinType::Short: 10179 EncType = "ss"; 10180 break; 10181 case BuiltinType::UInt: 10182 EncType = "ui"; 10183 break; 10184 case BuiltinType::Int: 10185 EncType = "si"; 10186 break; 10187 case BuiltinType::ULong: 10188 EncType = "ul"; 10189 break; 10190 case BuiltinType::Long: 10191 EncType = "sl"; 10192 break; 10193 case BuiltinType::ULongLong: 10194 EncType = "ull"; 10195 break; 10196 case BuiltinType::LongLong: 10197 EncType = "sll"; 10198 break; 10199 case BuiltinType::Float: 10200 EncType = "ft"; 10201 break; 10202 case BuiltinType::Double: 10203 EncType = "d"; 10204 break; 10205 case BuiltinType::LongDouble: 10206 EncType = "ld"; 10207 break; 10208 default: 10209 return false; 10210 } 10211 Enc += EncType; 10212 return true; 10213 } 10214 10215 /// Appends a pointer encoding to Enc before calling appendType for the pointee. 10216 static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT, 10217 const CodeGen::CodeGenModule &CGM, 10218 TypeStringCache &TSC) { 10219 Enc += "p("; 10220 if (!appendType(Enc, PT->getPointeeType(), CGM, TSC)) 10221 return false; 10222 Enc += ')'; 10223 return true; 10224 } 10225 10226 /// Appends array encoding to Enc before calling appendType for the element. 10227 static bool appendArrayType(SmallStringEnc &Enc, QualType QT, 10228 const ArrayType *AT, 10229 const CodeGen::CodeGenModule &CGM, 10230 TypeStringCache &TSC, StringRef NoSizeEnc) { 10231 if (AT->getSizeModifier() != ArrayType::Normal) 10232 return false; 10233 Enc += "a("; 10234 if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT)) 10235 CAT->getSize().toStringUnsigned(Enc); 10236 else 10237 Enc += NoSizeEnc; // Global arrays use "*", otherwise it is "". 10238 Enc += ':'; 10239 // The Qualifiers should be attached to the type rather than the array. 10240 appendQualifier(Enc, QT); 10241 if (!appendType(Enc, AT->getElementType(), CGM, TSC)) 10242 return false; 10243 Enc += ')'; 10244 return true; 10245 } 10246 10247 /// Appends a function encoding to Enc, calling appendType for the return type 10248 /// and the arguments. 10249 static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT, 10250 const CodeGen::CodeGenModule &CGM, 10251 TypeStringCache &TSC) { 10252 Enc += "f{"; 10253 if (!appendType(Enc, FT->getReturnType(), CGM, TSC)) 10254 return false; 10255 Enc += "}("; 10256 if (const FunctionProtoType *FPT = FT->getAs<FunctionProtoType>()) { 10257 // N.B. we are only interested in the adjusted param types. 10258 auto I = FPT->param_type_begin(); 10259 auto E = FPT->param_type_end(); 10260 if (I != E) { 10261 do { 10262 if (!appendType(Enc, *I, CGM, TSC)) 10263 return false; 10264 ++I; 10265 if (I != E) 10266 Enc += ','; 10267 } while (I != E); 10268 if (FPT->isVariadic()) 10269 Enc += ",va"; 10270 } else { 10271 if (FPT->isVariadic()) 10272 Enc += "va"; 10273 else 10274 Enc += '0'; 10275 } 10276 } 10277 Enc += ')'; 10278 return true; 10279 } 10280 10281 /// Handles the type's qualifier before dispatching a call to handle specific 10282 /// type encodings. 10283 static bool appendType(SmallStringEnc &Enc, QualType QType, 10284 const CodeGen::CodeGenModule &CGM, 10285 TypeStringCache &TSC) { 10286 10287 QualType QT = QType.getCanonicalType(); 10288 10289 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) 10290 // The Qualifiers should be attached to the type rather than the array. 10291 // Thus we don't call appendQualifier() here. 10292 return appendArrayType(Enc, QT, AT, CGM, TSC, ""); 10293 10294 appendQualifier(Enc, QT); 10295 10296 if (const BuiltinType *BT = QT->getAs<BuiltinType>()) 10297 return appendBuiltinType(Enc, BT); 10298 10299 if (const PointerType *PT = QT->getAs<PointerType>()) 10300 return appendPointerType(Enc, PT, CGM, TSC); 10301 10302 if (const EnumType *ET = QT->getAs<EnumType>()) 10303 return appendEnumType(Enc, ET, TSC, QT.getBaseTypeIdentifier()); 10304 10305 if (const RecordType *RT = QT->getAsStructureType()) 10306 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier()); 10307 10308 if (const RecordType *RT = QT->getAsUnionType()) 10309 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier()); 10310 10311 if (const FunctionType *FT = QT->getAs<FunctionType>()) 10312 return appendFunctionType(Enc, FT, CGM, TSC); 10313 10314 return false; 10315 } 10316 10317 static bool getTypeString(SmallStringEnc &Enc, const Decl *D, 10318 const CodeGen::CodeGenModule &CGM, 10319 TypeStringCache &TSC) { 10320 if (!D) 10321 return false; 10322 10323 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 10324 if (FD->getLanguageLinkage() != CLanguageLinkage) 10325 return false; 10326 return appendType(Enc, FD->getType(), CGM, TSC); 10327 } 10328 10329 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { 10330 if (VD->getLanguageLinkage() != CLanguageLinkage) 10331 return false; 10332 QualType QT = VD->getType().getCanonicalType(); 10333 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) { 10334 // Global ArrayTypes are given a size of '*' if the size is unknown. 10335 // The Qualifiers should be attached to the type rather than the array. 10336 // Thus we don't call appendQualifier() here. 10337 return appendArrayType(Enc, QT, AT, CGM, TSC, "*"); 10338 } 10339 return appendType(Enc, QT, CGM, TSC); 10340 } 10341 return false; 10342 } 10343 10344 //===----------------------------------------------------------------------===// 10345 // RISCV ABI Implementation 10346 //===----------------------------------------------------------------------===// 10347 10348 namespace { 10349 class RISCVABIInfo : public DefaultABIInfo { 10350 private: 10351 // Size of the integer ('x') registers in bits. 10352 unsigned XLen; 10353 // Size of the floating point ('f') registers in bits. Note that the target 10354 // ISA might have a wider FLen than the selected ABI (e.g. an RV32IF target 10355 // with soft float ABI has FLen==0). 10356 unsigned FLen; 10357 static const int NumArgGPRs = 8; 10358 static const int NumArgFPRs = 8; 10359 bool detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff, 10360 llvm::Type *&Field1Ty, 10361 CharUnits &Field1Off, 10362 llvm::Type *&Field2Ty, 10363 CharUnits &Field2Off) const; 10364 10365 public: 10366 RISCVABIInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen, unsigned FLen) 10367 : DefaultABIInfo(CGT), XLen(XLen), FLen(FLen) {} 10368 10369 // DefaultABIInfo's classifyReturnType and classifyArgumentType are 10370 // non-virtual, but computeInfo is virtual, so we overload it. 10371 void computeInfo(CGFunctionInfo &FI) const override; 10372 10373 ABIArgInfo classifyArgumentType(QualType Ty, bool IsFixed, int &ArgGPRsLeft, 10374 int &ArgFPRsLeft) const; 10375 ABIArgInfo classifyReturnType(QualType RetTy) const; 10376 10377 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 10378 QualType Ty) const override; 10379 10380 ABIArgInfo extendType(QualType Ty) const; 10381 10382 bool detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty, 10383 CharUnits &Field1Off, llvm::Type *&Field2Ty, 10384 CharUnits &Field2Off, int &NeededArgGPRs, 10385 int &NeededArgFPRs) const; 10386 ABIArgInfo coerceAndExpandFPCCEligibleStruct(llvm::Type *Field1Ty, 10387 CharUnits Field1Off, 10388 llvm::Type *Field2Ty, 10389 CharUnits Field2Off) const; 10390 }; 10391 } // end anonymous namespace 10392 10393 void RISCVABIInfo::computeInfo(CGFunctionInfo &FI) const { 10394 QualType RetTy = FI.getReturnType(); 10395 if (!getCXXABI().classifyReturnType(FI)) 10396 FI.getReturnInfo() = classifyReturnType(RetTy); 10397 10398 // IsRetIndirect is true if classifyArgumentType indicated the value should 10399 // be passed indirect, or if the type size is a scalar greater than 2*XLen 10400 // and not a complex type with elements <= FLen. e.g. fp128 is passed direct 10401 // in LLVM IR, relying on the backend lowering code to rewrite the argument 10402 // list and pass indirectly on RV32. 10403 bool IsRetIndirect = FI.getReturnInfo().getKind() == ABIArgInfo::Indirect; 10404 if (!IsRetIndirect && RetTy->isScalarType() && 10405 getContext().getTypeSize(RetTy) > (2 * XLen)) { 10406 if (RetTy->isComplexType() && FLen) { 10407 QualType EltTy = RetTy->getAs<ComplexType>()->getElementType(); 10408 IsRetIndirect = getContext().getTypeSize(EltTy) > FLen; 10409 } else { 10410 // This is a normal scalar > 2*XLen, such as fp128 on RV32. 10411 IsRetIndirect = true; 10412 } 10413 } 10414 10415 // We must track the number of GPRs used in order to conform to the RISC-V 10416 // ABI, as integer scalars passed in registers should have signext/zeroext 10417 // when promoted, but are anyext if passed on the stack. As GPR usage is 10418 // different for variadic arguments, we must also track whether we are 10419 // examining a vararg or not. 10420 int ArgGPRsLeft = IsRetIndirect ? NumArgGPRs - 1 : NumArgGPRs; 10421 int ArgFPRsLeft = FLen ? NumArgFPRs : 0; 10422 int NumFixedArgs = FI.getNumRequiredArgs(); 10423 10424 int ArgNum = 0; 10425 for (auto &ArgInfo : FI.arguments()) { 10426 bool IsFixed = ArgNum < NumFixedArgs; 10427 ArgInfo.info = 10428 classifyArgumentType(ArgInfo.type, IsFixed, ArgGPRsLeft, ArgFPRsLeft); 10429 ArgNum++; 10430 } 10431 } 10432 10433 // Returns true if the struct is a potential candidate for the floating point 10434 // calling convention. If this function returns true, the caller is 10435 // responsible for checking that if there is only a single field then that 10436 // field is a float. 10437 bool RISCVABIInfo::detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff, 10438 llvm::Type *&Field1Ty, 10439 CharUnits &Field1Off, 10440 llvm::Type *&Field2Ty, 10441 CharUnits &Field2Off) const { 10442 bool IsInt = Ty->isIntegralOrEnumerationType(); 10443 bool IsFloat = Ty->isRealFloatingType(); 10444 10445 if (IsInt || IsFloat) { 10446 uint64_t Size = getContext().getTypeSize(Ty); 10447 if (IsInt && Size > XLen) 10448 return false; 10449 // Can't be eligible if larger than the FP registers. Half precision isn't 10450 // currently supported on RISC-V and the ABI hasn't been confirmed, so 10451 // default to the integer ABI in that case. 10452 if (IsFloat && (Size > FLen || Size < 32)) 10453 return false; 10454 // Can't be eligible if an integer type was already found (int+int pairs 10455 // are not eligible). 10456 if (IsInt && Field1Ty && Field1Ty->isIntegerTy()) 10457 return false; 10458 if (!Field1Ty) { 10459 Field1Ty = CGT.ConvertType(Ty); 10460 Field1Off = CurOff; 10461 return true; 10462 } 10463 if (!Field2Ty) { 10464 Field2Ty = CGT.ConvertType(Ty); 10465 Field2Off = CurOff; 10466 return true; 10467 } 10468 return false; 10469 } 10470 10471 if (auto CTy = Ty->getAs<ComplexType>()) { 10472 if (Field1Ty) 10473 return false; 10474 QualType EltTy = CTy->getElementType(); 10475 if (getContext().getTypeSize(EltTy) > FLen) 10476 return false; 10477 Field1Ty = CGT.ConvertType(EltTy); 10478 Field1Off = CurOff; 10479 assert(CurOff.isZero() && "Unexpected offset for first field"); 10480 Field2Ty = Field1Ty; 10481 Field2Off = Field1Off + getContext().getTypeSizeInChars(EltTy); 10482 return true; 10483 } 10484 10485 if (const ConstantArrayType *ATy = getContext().getAsConstantArrayType(Ty)) { 10486 uint64_t ArraySize = ATy->getSize().getZExtValue(); 10487 QualType EltTy = ATy->getElementType(); 10488 CharUnits EltSize = getContext().getTypeSizeInChars(EltTy); 10489 for (uint64_t i = 0; i < ArraySize; ++i) { 10490 bool Ret = detectFPCCEligibleStructHelper(EltTy, CurOff, Field1Ty, 10491 Field1Off, Field2Ty, Field2Off); 10492 if (!Ret) 10493 return false; 10494 CurOff += EltSize; 10495 } 10496 return true; 10497 } 10498 10499 if (const auto *RTy = Ty->getAs<RecordType>()) { 10500 // Structures with either a non-trivial destructor or a non-trivial 10501 // copy constructor are not eligible for the FP calling convention. 10502 if (getRecordArgABI(Ty, CGT.getCXXABI())) 10503 return false; 10504 if (isEmptyRecord(getContext(), Ty, true)) 10505 return true; 10506 const RecordDecl *RD = RTy->getDecl(); 10507 // Unions aren't eligible unless they're empty (which is caught above). 10508 if (RD->isUnion()) 10509 return false; 10510 int ZeroWidthBitFieldCount = 0; 10511 for (const FieldDecl *FD : RD->fields()) { 10512 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 10513 uint64_t FieldOffInBits = Layout.getFieldOffset(FD->getFieldIndex()); 10514 QualType QTy = FD->getType(); 10515 if (FD->isBitField()) { 10516 unsigned BitWidth = FD->getBitWidthValue(getContext()); 10517 // Allow a bitfield with a type greater than XLen as long as the 10518 // bitwidth is XLen or less. 10519 if (getContext().getTypeSize(QTy) > XLen && BitWidth <= XLen) 10520 QTy = getContext().getIntTypeForBitwidth(XLen, false); 10521 if (BitWidth == 0) { 10522 ZeroWidthBitFieldCount++; 10523 continue; 10524 } 10525 } 10526 10527 bool Ret = detectFPCCEligibleStructHelper( 10528 QTy, CurOff + getContext().toCharUnitsFromBits(FieldOffInBits), 10529 Field1Ty, Field1Off, Field2Ty, Field2Off); 10530 if (!Ret) 10531 return false; 10532 10533 // As a quirk of the ABI, zero-width bitfields aren't ignored for fp+fp 10534 // or int+fp structs, but are ignored for a struct with an fp field and 10535 // any number of zero-width bitfields. 10536 if (Field2Ty && ZeroWidthBitFieldCount > 0) 10537 return false; 10538 } 10539 return Field1Ty != nullptr; 10540 } 10541 10542 return false; 10543 } 10544 10545 // Determine if a struct is eligible for passing according to the floating 10546 // point calling convention (i.e., when flattened it contains a single fp 10547 // value, fp+fp, or int+fp of appropriate size). If so, NeededArgFPRs and 10548 // NeededArgGPRs are incremented appropriately. 10549 bool RISCVABIInfo::detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty, 10550 CharUnits &Field1Off, 10551 llvm::Type *&Field2Ty, 10552 CharUnits &Field2Off, 10553 int &NeededArgGPRs, 10554 int &NeededArgFPRs) const { 10555 Field1Ty = nullptr; 10556 Field2Ty = nullptr; 10557 NeededArgGPRs = 0; 10558 NeededArgFPRs = 0; 10559 bool IsCandidate = detectFPCCEligibleStructHelper( 10560 Ty, CharUnits::Zero(), Field1Ty, Field1Off, Field2Ty, Field2Off); 10561 // Not really a candidate if we have a single int but no float. 10562 if (Field1Ty && !Field2Ty && !Field1Ty->isFloatingPointTy()) 10563 return false; 10564 if (!IsCandidate) 10565 return false; 10566 if (Field1Ty && Field1Ty->isFloatingPointTy()) 10567 NeededArgFPRs++; 10568 else if (Field1Ty) 10569 NeededArgGPRs++; 10570 if (Field2Ty && Field2Ty->isFloatingPointTy()) 10571 NeededArgFPRs++; 10572 else if (Field2Ty) 10573 NeededArgGPRs++; 10574 return IsCandidate; 10575 } 10576 10577 // Call getCoerceAndExpand for the two-element flattened struct described by 10578 // Field1Ty, Field1Off, Field2Ty, Field2Off. This method will create an 10579 // appropriate coerceToType and unpaddedCoerceToType. 10580 ABIArgInfo RISCVABIInfo::coerceAndExpandFPCCEligibleStruct( 10581 llvm::Type *Field1Ty, CharUnits Field1Off, llvm::Type *Field2Ty, 10582 CharUnits Field2Off) const { 10583 SmallVector<llvm::Type *, 3> CoerceElts; 10584 SmallVector<llvm::Type *, 2> UnpaddedCoerceElts; 10585 if (!Field1Off.isZero()) 10586 CoerceElts.push_back(llvm::ArrayType::get( 10587 llvm::Type::getInt8Ty(getVMContext()), Field1Off.getQuantity())); 10588 10589 CoerceElts.push_back(Field1Ty); 10590 UnpaddedCoerceElts.push_back(Field1Ty); 10591 10592 if (!Field2Ty) { 10593 return ABIArgInfo::getCoerceAndExpand( 10594 llvm::StructType::get(getVMContext(), CoerceElts, !Field1Off.isZero()), 10595 UnpaddedCoerceElts[0]); 10596 } 10597 10598 CharUnits Field2Align = 10599 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(Field2Ty)); 10600 CharUnits Field1Size = 10601 CharUnits::fromQuantity(getDataLayout().getTypeStoreSize(Field1Ty)); 10602 CharUnits Field2OffNoPadNoPack = Field1Size.alignTo(Field2Align); 10603 10604 CharUnits Padding = CharUnits::Zero(); 10605 if (Field2Off > Field2OffNoPadNoPack) 10606 Padding = Field2Off - Field2OffNoPadNoPack; 10607 else if (Field2Off != Field2Align && Field2Off > Field1Size) 10608 Padding = Field2Off - Field1Size; 10609 10610 bool IsPacked = !Field2Off.isMultipleOf(Field2Align); 10611 10612 if (!Padding.isZero()) 10613 CoerceElts.push_back(llvm::ArrayType::get( 10614 llvm::Type::getInt8Ty(getVMContext()), Padding.getQuantity())); 10615 10616 CoerceElts.push_back(Field2Ty); 10617 UnpaddedCoerceElts.push_back(Field2Ty); 10618 10619 auto CoerceToType = 10620 llvm::StructType::get(getVMContext(), CoerceElts, IsPacked); 10621 auto UnpaddedCoerceToType = 10622 llvm::StructType::get(getVMContext(), UnpaddedCoerceElts, IsPacked); 10623 10624 return ABIArgInfo::getCoerceAndExpand(CoerceToType, UnpaddedCoerceToType); 10625 } 10626 10627 ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed, 10628 int &ArgGPRsLeft, 10629 int &ArgFPRsLeft) const { 10630 assert(ArgGPRsLeft <= NumArgGPRs && "Arg GPR tracking underflow"); 10631 Ty = useFirstFieldIfTransparentUnion(Ty); 10632 10633 // Structures with either a non-trivial destructor or a non-trivial 10634 // copy constructor are always passed indirectly. 10635 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { 10636 if (ArgGPRsLeft) 10637 ArgGPRsLeft -= 1; 10638 return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA == 10639 CGCXXABI::RAA_DirectInMemory); 10640 } 10641 10642 // Ignore empty structs/unions. 10643 if (isEmptyRecord(getContext(), Ty, true)) 10644 return ABIArgInfo::getIgnore(); 10645 10646 uint64_t Size = getContext().getTypeSize(Ty); 10647 10648 // Pass floating point values via FPRs if possible. 10649 if (IsFixed && Ty->isFloatingType() && !Ty->isComplexType() && 10650 FLen >= Size && ArgFPRsLeft) { 10651 ArgFPRsLeft--; 10652 return ABIArgInfo::getDirect(); 10653 } 10654 10655 // Complex types for the hard float ABI must be passed direct rather than 10656 // using CoerceAndExpand. 10657 if (IsFixed && Ty->isComplexType() && FLen && ArgFPRsLeft >= 2) { 10658 QualType EltTy = Ty->castAs<ComplexType>()->getElementType(); 10659 if (getContext().getTypeSize(EltTy) <= FLen) { 10660 ArgFPRsLeft -= 2; 10661 return ABIArgInfo::getDirect(); 10662 } 10663 } 10664 10665 if (IsFixed && FLen && Ty->isStructureOrClassType()) { 10666 llvm::Type *Field1Ty = nullptr; 10667 llvm::Type *Field2Ty = nullptr; 10668 CharUnits Field1Off = CharUnits::Zero(); 10669 CharUnits Field2Off = CharUnits::Zero(); 10670 int NeededArgGPRs; 10671 int NeededArgFPRs; 10672 bool IsCandidate = 10673 detectFPCCEligibleStruct(Ty, Field1Ty, Field1Off, Field2Ty, Field2Off, 10674 NeededArgGPRs, NeededArgFPRs); 10675 if (IsCandidate && NeededArgGPRs <= ArgGPRsLeft && 10676 NeededArgFPRs <= ArgFPRsLeft) { 10677 ArgGPRsLeft -= NeededArgGPRs; 10678 ArgFPRsLeft -= NeededArgFPRs; 10679 return coerceAndExpandFPCCEligibleStruct(Field1Ty, Field1Off, Field2Ty, 10680 Field2Off); 10681 } 10682 } 10683 10684 uint64_t NeededAlign = getContext().getTypeAlign(Ty); 10685 bool MustUseStack = false; 10686 // Determine the number of GPRs needed to pass the current argument 10687 // according to the ABI. 2*XLen-aligned varargs are passed in "aligned" 10688 // register pairs, so may consume 3 registers. 10689 int NeededArgGPRs = 1; 10690 if (!IsFixed && NeededAlign == 2 * XLen) 10691 NeededArgGPRs = 2 + (ArgGPRsLeft % 2); 10692 else if (Size > XLen && Size <= 2 * XLen) 10693 NeededArgGPRs = 2; 10694 10695 if (NeededArgGPRs > ArgGPRsLeft) { 10696 MustUseStack = true; 10697 NeededArgGPRs = ArgGPRsLeft; 10698 } 10699 10700 ArgGPRsLeft -= NeededArgGPRs; 10701 10702 if (!isAggregateTypeForABI(Ty) && !Ty->isVectorType()) { 10703 // Treat an enum type as its underlying type. 10704 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 10705 Ty = EnumTy->getDecl()->getIntegerType(); 10706 10707 // All integral types are promoted to XLen width, unless passed on the 10708 // stack. 10709 if (Size < XLen && Ty->isIntegralOrEnumerationType() && !MustUseStack) { 10710 return extendType(Ty); 10711 } 10712 10713 if (const auto *EIT = Ty->getAs<ExtIntType>()) { 10714 if (EIT->getNumBits() < XLen && !MustUseStack) 10715 return extendType(Ty); 10716 if (EIT->getNumBits() > 128 || 10717 (!getContext().getTargetInfo().hasInt128Type() && 10718 EIT->getNumBits() > 64)) 10719 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 10720 } 10721 10722 return ABIArgInfo::getDirect(); 10723 } 10724 10725 // Aggregates which are <= 2*XLen will be passed in registers if possible, 10726 // so coerce to integers. 10727 if (Size <= 2 * XLen) { 10728 unsigned Alignment = getContext().getTypeAlign(Ty); 10729 10730 // Use a single XLen int if possible, 2*XLen if 2*XLen alignment is 10731 // required, and a 2-element XLen array if only XLen alignment is required. 10732 if (Size <= XLen) { 10733 return ABIArgInfo::getDirect( 10734 llvm::IntegerType::get(getVMContext(), XLen)); 10735 } else if (Alignment == 2 * XLen) { 10736 return ABIArgInfo::getDirect( 10737 llvm::IntegerType::get(getVMContext(), 2 * XLen)); 10738 } else { 10739 return ABIArgInfo::getDirect(llvm::ArrayType::get( 10740 llvm::IntegerType::get(getVMContext(), XLen), 2)); 10741 } 10742 } 10743 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 10744 } 10745 10746 ABIArgInfo RISCVABIInfo::classifyReturnType(QualType RetTy) const { 10747 if (RetTy->isVoidType()) 10748 return ABIArgInfo::getIgnore(); 10749 10750 int ArgGPRsLeft = 2; 10751 int ArgFPRsLeft = FLen ? 2 : 0; 10752 10753 // The rules for return and argument types are the same, so defer to 10754 // classifyArgumentType. 10755 return classifyArgumentType(RetTy, /*IsFixed=*/true, ArgGPRsLeft, 10756 ArgFPRsLeft); 10757 } 10758 10759 Address RISCVABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 10760 QualType Ty) const { 10761 CharUnits SlotSize = CharUnits::fromQuantity(XLen / 8); 10762 10763 // Empty records are ignored for parameter passing purposes. 10764 if (isEmptyRecord(getContext(), Ty, true)) { 10765 Address Addr(CGF.Builder.CreateLoad(VAListAddr), SlotSize); 10766 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty)); 10767 return Addr; 10768 } 10769 10770 std::pair<CharUnits, CharUnits> SizeAndAlign = 10771 getContext().getTypeInfoInChars(Ty); 10772 10773 // Arguments bigger than 2*Xlen bytes are passed indirectly. 10774 bool IsIndirect = SizeAndAlign.first > 2 * SlotSize; 10775 10776 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, SizeAndAlign, 10777 SlotSize, /*AllowHigherAlign=*/true); 10778 } 10779 10780 ABIArgInfo RISCVABIInfo::extendType(QualType Ty) const { 10781 int TySize = getContext().getTypeSize(Ty); 10782 // RV64 ABI requires unsigned 32 bit integers to be sign extended. 10783 if (XLen == 64 && Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32) 10784 return ABIArgInfo::getSignExtend(Ty); 10785 return ABIArgInfo::getExtend(Ty); 10786 } 10787 10788 namespace { 10789 class RISCVTargetCodeGenInfo : public TargetCodeGenInfo { 10790 public: 10791 RISCVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen, 10792 unsigned FLen) 10793 : TargetCodeGenInfo(std::make_unique<RISCVABIInfo>(CGT, XLen, FLen)) {} 10794 10795 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 10796 CodeGen::CodeGenModule &CGM) const override { 10797 const auto *FD = dyn_cast_or_null<FunctionDecl>(D); 10798 if (!FD) return; 10799 10800 const auto *Attr = FD->getAttr<RISCVInterruptAttr>(); 10801 if (!Attr) 10802 return; 10803 10804 const char *Kind; 10805 switch (Attr->getInterrupt()) { 10806 case RISCVInterruptAttr::user: Kind = "user"; break; 10807 case RISCVInterruptAttr::supervisor: Kind = "supervisor"; break; 10808 case RISCVInterruptAttr::machine: Kind = "machine"; break; 10809 } 10810 10811 auto *Fn = cast<llvm::Function>(GV); 10812 10813 Fn->addFnAttr("interrupt", Kind); 10814 } 10815 }; 10816 } // namespace 10817 10818 //===----------------------------------------------------------------------===// 10819 // VE ABI Implementation. 10820 // 10821 namespace { 10822 class VEABIInfo : public DefaultABIInfo { 10823 public: 10824 VEABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 10825 10826 private: 10827 ABIArgInfo classifyReturnType(QualType RetTy) const; 10828 ABIArgInfo classifyArgumentType(QualType RetTy) const; 10829 void computeInfo(CGFunctionInfo &FI) const override; 10830 }; 10831 } // end anonymous namespace 10832 10833 ABIArgInfo VEABIInfo::classifyReturnType(QualType Ty) const { 10834 if (Ty->isAnyComplexType()) 10835 return ABIArgInfo::getDirect(); 10836 uint64_t Size = getContext().getTypeSize(Ty); 10837 if (Size < 64 && Ty->isIntegerType()) 10838 return ABIArgInfo::getExtend(Ty); 10839 return DefaultABIInfo::classifyReturnType(Ty); 10840 } 10841 10842 ABIArgInfo VEABIInfo::classifyArgumentType(QualType Ty) const { 10843 if (Ty->isAnyComplexType()) 10844 return ABIArgInfo::getDirect(); 10845 uint64_t Size = getContext().getTypeSize(Ty); 10846 if (Size < 64 && Ty->isIntegerType()) 10847 return ABIArgInfo::getExtend(Ty); 10848 return DefaultABIInfo::classifyArgumentType(Ty); 10849 } 10850 10851 void VEABIInfo::computeInfo(CGFunctionInfo &FI) const { 10852 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 10853 for (auto &Arg : FI.arguments()) 10854 Arg.info = classifyArgumentType(Arg.type); 10855 } 10856 10857 namespace { 10858 class VETargetCodeGenInfo : public TargetCodeGenInfo { 10859 public: 10860 VETargetCodeGenInfo(CodeGenTypes &CGT) 10861 : TargetCodeGenInfo(std::make_unique<VEABIInfo>(CGT)) {} 10862 // VE ABI requires the arguments of variadic and prototype-less functions 10863 // are passed in both registers and memory. 10864 bool isNoProtoCallVariadic(const CallArgList &args, 10865 const FunctionNoProtoType *fnType) const override { 10866 return true; 10867 } 10868 }; 10869 } // end anonymous namespace 10870 10871 //===----------------------------------------------------------------------===// 10872 // Driver code 10873 //===----------------------------------------------------------------------===// 10874 10875 bool CodeGenModule::supportsCOMDAT() const { 10876 return getTriple().supportsCOMDAT(); 10877 } 10878 10879 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() { 10880 if (TheTargetCodeGenInfo) 10881 return *TheTargetCodeGenInfo; 10882 10883 // Helper to set the unique_ptr while still keeping the return value. 10884 auto SetCGInfo = [&](TargetCodeGenInfo *P) -> const TargetCodeGenInfo & { 10885 this->TheTargetCodeGenInfo.reset(P); 10886 return *P; 10887 }; 10888 10889 const llvm::Triple &Triple = getTarget().getTriple(); 10890 switch (Triple.getArch()) { 10891 default: 10892 return SetCGInfo(new DefaultTargetCodeGenInfo(Types)); 10893 10894 case llvm::Triple::le32: 10895 return SetCGInfo(new PNaClTargetCodeGenInfo(Types)); 10896 case llvm::Triple::mips: 10897 case llvm::Triple::mipsel: 10898 if (Triple.getOS() == llvm::Triple::NaCl) 10899 return SetCGInfo(new PNaClTargetCodeGenInfo(Types)); 10900 return SetCGInfo(new MIPSTargetCodeGenInfo(Types, true)); 10901 10902 case llvm::Triple::mips64: 10903 case llvm::Triple::mips64el: 10904 return SetCGInfo(new MIPSTargetCodeGenInfo(Types, false)); 10905 10906 case llvm::Triple::avr: 10907 return SetCGInfo(new AVRTargetCodeGenInfo(Types)); 10908 10909 case llvm::Triple::aarch64: 10910 case llvm::Triple::aarch64_32: 10911 case llvm::Triple::aarch64_be: { 10912 AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS; 10913 if (getTarget().getABI() == "darwinpcs") 10914 Kind = AArch64ABIInfo::DarwinPCS; 10915 else if (Triple.isOSWindows()) 10916 return SetCGInfo( 10917 new WindowsAArch64TargetCodeGenInfo(Types, AArch64ABIInfo::Win64)); 10918 10919 return SetCGInfo(new AArch64TargetCodeGenInfo(Types, Kind)); 10920 } 10921 10922 case llvm::Triple::wasm32: 10923 case llvm::Triple::wasm64: { 10924 WebAssemblyABIInfo::ABIKind Kind = WebAssemblyABIInfo::MVP; 10925 if (getTarget().getABI() == "experimental-mv") 10926 Kind = WebAssemblyABIInfo::ExperimentalMV; 10927 return SetCGInfo(new WebAssemblyTargetCodeGenInfo(Types, Kind)); 10928 } 10929 10930 case llvm::Triple::arm: 10931 case llvm::Triple::armeb: 10932 case llvm::Triple::thumb: 10933 case llvm::Triple::thumbeb: { 10934 if (Triple.getOS() == llvm::Triple::Win32) { 10935 return SetCGInfo( 10936 new WindowsARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS_VFP)); 10937 } 10938 10939 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS; 10940 StringRef ABIStr = getTarget().getABI(); 10941 if (ABIStr == "apcs-gnu") 10942 Kind = ARMABIInfo::APCS; 10943 else if (ABIStr == "aapcs16") 10944 Kind = ARMABIInfo::AAPCS16_VFP; 10945 else if (CodeGenOpts.FloatABI == "hard" || 10946 (CodeGenOpts.FloatABI != "soft" && 10947 (Triple.getEnvironment() == llvm::Triple::GNUEABIHF || 10948 Triple.getEnvironment() == llvm::Triple::MuslEABIHF || 10949 Triple.getEnvironment() == llvm::Triple::EABIHF))) 10950 Kind = ARMABIInfo::AAPCS_VFP; 10951 10952 return SetCGInfo(new ARMTargetCodeGenInfo(Types, Kind)); 10953 } 10954 10955 case llvm::Triple::ppc: { 10956 if (Triple.isOSAIX()) 10957 return SetCGInfo(new AIXTargetCodeGenInfo(Types, /*Is64Bit*/ false)); 10958 10959 bool IsSoftFloat = 10960 CodeGenOpts.FloatABI == "soft" || getTarget().hasFeature("spe"); 10961 bool RetSmallStructInRegABI = 10962 PPC32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts); 10963 return SetCGInfo( 10964 new PPC32TargetCodeGenInfo(Types, IsSoftFloat, RetSmallStructInRegABI)); 10965 } 10966 case llvm::Triple::ppc64: 10967 if (Triple.isOSAIX()) 10968 return SetCGInfo(new AIXTargetCodeGenInfo(Types, /*Is64Bit*/ true)); 10969 10970 if (Triple.isOSBinFormatELF()) { 10971 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1; 10972 if (getTarget().getABI() == "elfv2") 10973 Kind = PPC64_SVR4_ABIInfo::ELFv2; 10974 bool HasQPX = getTarget().getABI() == "elfv1-qpx"; 10975 bool IsSoftFloat = CodeGenOpts.FloatABI == "soft"; 10976 10977 return SetCGInfo(new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX, 10978 IsSoftFloat)); 10979 } 10980 return SetCGInfo(new PPC64TargetCodeGenInfo(Types)); 10981 case llvm::Triple::ppc64le: { 10982 assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!"); 10983 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2; 10984 if (getTarget().getABI() == "elfv1" || getTarget().getABI() == "elfv1-qpx") 10985 Kind = PPC64_SVR4_ABIInfo::ELFv1; 10986 bool HasQPX = getTarget().getABI() == "elfv1-qpx"; 10987 bool IsSoftFloat = CodeGenOpts.FloatABI == "soft"; 10988 10989 return SetCGInfo(new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX, 10990 IsSoftFloat)); 10991 } 10992 10993 case llvm::Triple::nvptx: 10994 case llvm::Triple::nvptx64: 10995 return SetCGInfo(new NVPTXTargetCodeGenInfo(Types)); 10996 10997 case llvm::Triple::msp430: 10998 return SetCGInfo(new MSP430TargetCodeGenInfo(Types)); 10999 11000 case llvm::Triple::riscv32: 11001 case llvm::Triple::riscv64: { 11002 StringRef ABIStr = getTarget().getABI(); 11003 unsigned XLen = getTarget().getPointerWidth(0); 11004 unsigned ABIFLen = 0; 11005 if (ABIStr.endswith("f")) 11006 ABIFLen = 32; 11007 else if (ABIStr.endswith("d")) 11008 ABIFLen = 64; 11009 return SetCGInfo(new RISCVTargetCodeGenInfo(Types, XLen, ABIFLen)); 11010 } 11011 11012 case llvm::Triple::systemz: { 11013 bool SoftFloat = CodeGenOpts.FloatABI == "soft"; 11014 bool HasVector = !SoftFloat && getTarget().getABI() == "vector"; 11015 return SetCGInfo(new SystemZTargetCodeGenInfo(Types, HasVector, SoftFloat)); 11016 } 11017 11018 case llvm::Triple::tce: 11019 case llvm::Triple::tcele: 11020 return SetCGInfo(new TCETargetCodeGenInfo(Types)); 11021 11022 case llvm::Triple::x86: { 11023 bool IsDarwinVectorABI = Triple.isOSDarwin(); 11024 bool RetSmallStructInRegABI = 11025 X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts); 11026 bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing(); 11027 11028 if (Triple.getOS() == llvm::Triple::Win32) { 11029 return SetCGInfo(new WinX86_32TargetCodeGenInfo( 11030 Types, IsDarwinVectorABI, RetSmallStructInRegABI, 11031 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters)); 11032 } else { 11033 return SetCGInfo(new X86_32TargetCodeGenInfo( 11034 Types, IsDarwinVectorABI, RetSmallStructInRegABI, 11035 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters, 11036 CodeGenOpts.FloatABI == "soft")); 11037 } 11038 } 11039 11040 case llvm::Triple::x86_64: { 11041 StringRef ABI = getTarget().getABI(); 11042 X86AVXABILevel AVXLevel = 11043 (ABI == "avx512" 11044 ? X86AVXABILevel::AVX512 11045 : ABI == "avx" ? X86AVXABILevel::AVX : X86AVXABILevel::None); 11046 11047 switch (Triple.getOS()) { 11048 case llvm::Triple::Win32: 11049 return SetCGInfo(new WinX86_64TargetCodeGenInfo(Types, AVXLevel)); 11050 default: 11051 return SetCGInfo(new X86_64TargetCodeGenInfo(Types, AVXLevel)); 11052 } 11053 } 11054 case llvm::Triple::hexagon: 11055 return SetCGInfo(new HexagonTargetCodeGenInfo(Types)); 11056 case llvm::Triple::lanai: 11057 return SetCGInfo(new LanaiTargetCodeGenInfo(Types)); 11058 case llvm::Triple::r600: 11059 return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types)); 11060 case llvm::Triple::amdgcn: 11061 return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types)); 11062 case llvm::Triple::sparc: 11063 return SetCGInfo(new SparcV8TargetCodeGenInfo(Types)); 11064 case llvm::Triple::sparcv9: 11065 return SetCGInfo(new SparcV9TargetCodeGenInfo(Types)); 11066 case llvm::Triple::xcore: 11067 return SetCGInfo(new XCoreTargetCodeGenInfo(Types)); 11068 case llvm::Triple::arc: 11069 return SetCGInfo(new ARCTargetCodeGenInfo(Types)); 11070 case llvm::Triple::spir: 11071 case llvm::Triple::spir64: 11072 return SetCGInfo(new SPIRTargetCodeGenInfo(Types)); 11073 case llvm::Triple::ve: 11074 return SetCGInfo(new VETargetCodeGenInfo(Types)); 11075 } 11076 } 11077 11078 /// Create an OpenCL kernel for an enqueued block. 11079 /// 11080 /// The kernel has the same function type as the block invoke function. Its 11081 /// name is the name of the block invoke function postfixed with "_kernel". 11082 /// It simply calls the block invoke function then returns. 11083 llvm::Function * 11084 TargetCodeGenInfo::createEnqueuedBlockKernel(CodeGenFunction &CGF, 11085 llvm::Function *Invoke, 11086 llvm::Value *BlockLiteral) const { 11087 auto *InvokeFT = Invoke->getFunctionType(); 11088 llvm::SmallVector<llvm::Type *, 2> ArgTys; 11089 for (auto &P : InvokeFT->params()) 11090 ArgTys.push_back(P); 11091 auto &C = CGF.getLLVMContext(); 11092 std::string Name = Invoke->getName().str() + "_kernel"; 11093 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys, false); 11094 auto *F = llvm::Function::Create(FT, llvm::GlobalValue::InternalLinkage, Name, 11095 &CGF.CGM.getModule()); 11096 auto IP = CGF.Builder.saveIP(); 11097 auto *BB = llvm::BasicBlock::Create(C, "entry", F); 11098 auto &Builder = CGF.Builder; 11099 Builder.SetInsertPoint(BB); 11100 llvm::SmallVector<llvm::Value *, 2> Args; 11101 for (auto &A : F->args()) 11102 Args.push_back(&A); 11103 Builder.CreateCall(Invoke, Args); 11104 Builder.CreateRetVoid(); 11105 Builder.restoreIP(IP); 11106 return F; 11107 } 11108 11109 /// Create an OpenCL kernel for an enqueued block. 11110 /// 11111 /// The type of the first argument (the block literal) is the struct type 11112 /// of the block literal instead of a pointer type. The first argument 11113 /// (block literal) is passed directly by value to the kernel. The kernel 11114 /// allocates the same type of struct on stack and stores the block literal 11115 /// to it and passes its pointer to the block invoke function. The kernel 11116 /// has "enqueued-block" function attribute and kernel argument metadata. 11117 llvm::Function *AMDGPUTargetCodeGenInfo::createEnqueuedBlockKernel( 11118 CodeGenFunction &CGF, llvm::Function *Invoke, 11119 llvm::Value *BlockLiteral) const { 11120 auto &Builder = CGF.Builder; 11121 auto &C = CGF.getLLVMContext(); 11122 11123 auto *BlockTy = BlockLiteral->getType()->getPointerElementType(); 11124 auto *InvokeFT = Invoke->getFunctionType(); 11125 llvm::SmallVector<llvm::Type *, 2> ArgTys; 11126 llvm::SmallVector<llvm::Metadata *, 8> AddressQuals; 11127 llvm::SmallVector<llvm::Metadata *, 8> AccessQuals; 11128 llvm::SmallVector<llvm::Metadata *, 8> ArgTypeNames; 11129 llvm::SmallVector<llvm::Metadata *, 8> ArgBaseTypeNames; 11130 llvm::SmallVector<llvm::Metadata *, 8> ArgTypeQuals; 11131 llvm::SmallVector<llvm::Metadata *, 8> ArgNames; 11132 11133 ArgTys.push_back(BlockTy); 11134 ArgTypeNames.push_back(llvm::MDString::get(C, "__block_literal")); 11135 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(0))); 11136 ArgBaseTypeNames.push_back(llvm::MDString::get(C, "__block_literal")); 11137 ArgTypeQuals.push_back(llvm::MDString::get(C, "")); 11138 AccessQuals.push_back(llvm::MDString::get(C, "none")); 11139 ArgNames.push_back(llvm::MDString::get(C, "block_literal")); 11140 for (unsigned I = 1, E = InvokeFT->getNumParams(); I < E; ++I) { 11141 ArgTys.push_back(InvokeFT->getParamType(I)); 11142 ArgTypeNames.push_back(llvm::MDString::get(C, "void*")); 11143 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(3))); 11144 AccessQuals.push_back(llvm::MDString::get(C, "none")); 11145 ArgBaseTypeNames.push_back(llvm::MDString::get(C, "void*")); 11146 ArgTypeQuals.push_back(llvm::MDString::get(C, "")); 11147 ArgNames.push_back( 11148 llvm::MDString::get(C, (Twine("local_arg") + Twine(I)).str())); 11149 } 11150 std::string Name = Invoke->getName().str() + "_kernel"; 11151 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys, false); 11152 auto *F = llvm::Function::Create(FT, llvm::GlobalValue::InternalLinkage, Name, 11153 &CGF.CGM.getModule()); 11154 F->addFnAttr("enqueued-block"); 11155 auto IP = CGF.Builder.saveIP(); 11156 auto *BB = llvm::BasicBlock::Create(C, "entry", F); 11157 Builder.SetInsertPoint(BB); 11158 const auto BlockAlign = CGF.CGM.getDataLayout().getPrefTypeAlign(BlockTy); 11159 auto *BlockPtr = Builder.CreateAlloca(BlockTy, nullptr); 11160 BlockPtr->setAlignment(BlockAlign); 11161 Builder.CreateAlignedStore(F->arg_begin(), BlockPtr, BlockAlign); 11162 auto *Cast = Builder.CreatePointerCast(BlockPtr, InvokeFT->getParamType(0)); 11163 llvm::SmallVector<llvm::Value *, 2> Args; 11164 Args.push_back(Cast); 11165 for (auto I = F->arg_begin() + 1, E = F->arg_end(); I != E; ++I) 11166 Args.push_back(I); 11167 Builder.CreateCall(Invoke, Args); 11168 Builder.CreateRetVoid(); 11169 Builder.restoreIP(IP); 11170 11171 F->setMetadata("kernel_arg_addr_space", llvm::MDNode::get(C, AddressQuals)); 11172 F->setMetadata("kernel_arg_access_qual", llvm::MDNode::get(C, AccessQuals)); 11173 F->setMetadata("kernel_arg_type", llvm::MDNode::get(C, ArgTypeNames)); 11174 F->setMetadata("kernel_arg_base_type", 11175 llvm::MDNode::get(C, ArgBaseTypeNames)); 11176 F->setMetadata("kernel_arg_type_qual", llvm::MDNode::get(C, ArgTypeQuals)); 11177 if (CGF.CGM.getCodeGenOpts().EmitOpenCLArgMetadata) 11178 F->setMetadata("kernel_arg_name", llvm::MDNode::get(C, ArgNames)); 11179 11180 return F; 11181 } 11182